index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service/sql/WorkerSQL.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.master.service.sql;
public interface WorkerSQL {
String SELECT_MOST_IDLE =
"SELECT * FROM worker ORDER BY (max_load - current_load) DESC, last_modified_time ASC LIMIT 1";
String SELECT_BY_IP = "SELECT * FROM worker WHERE host_ip = ?";
String SELECT_ALL = "SELECT * FROM worker";
String UPDATE_LOAD = "UPDATE worker SET current_load = ? WHERE host_ip = ?";
String SELECT_FOR_DISK_CLEANUP =
"SELECT * FROM worker WHERE disk_total > 0 AND disk_used / disk_total >= 0.75 ORDER BY disk_used DESC LIMIT 20";
String UPDATE_DISK_USAGE = "UPDATE worker SET disk_total= ?, disk_used= ? WHERE host_ip = ?";
}
| 3,200 |
0 | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service/sql/SQL.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.master.service.sql;
public interface SQL {
String COUNT_NAME = "COUNT(*)";
String SELECT_TRUE = "SELECT TRUE";
}
| 3,201 |
0 | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service/sql/MasterSQL.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.master.service.sql;
public interface MasterSQL {
String SELECT = "SELECT * FROM master where host_ip = ?";
}
| 3,202 |
0 | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service/sql/AdminSQL.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.master.service.sql;
public interface AdminSQL {
String SELECT_BY_USER_ID = "SELECT * FROM admin WHERE user_id = ?";
String INSERT = "INSERT INTO admin(user_id) VALUES(?)";
String QUERY_ALL = "SELECT * FROM admin";
}
| 3,203 |
0 | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service | Create_ds/eclipse-jifa/backend/master/src/main/java/org/eclipse/jifa/master/service/sql/FileSQL.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.master.service.sql;
public interface FileSQL {
String INSERT = "INSERT INTO file(user_id, original_name, name, type, size, host_ip, transfer_state, shared, " +
"downloadable, in_shared_disk, deleted, cas_state) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
String SELECT_BY_USER_ID_AND_TYPE =
"SELECT * FROM file WHERE user_id = ? AND type = ? AND deleted = false ORDER BY creation_time DESC LIMIT ?, ?";
String COUNT_BY_USER_ID_AND_TYPE = "SELECT COUNT(*) FROM file WHERE user_id = ? AND type = ? AND deleted = false";
String SELECT_BY_USER_ID_AND_TYPE_AND_EXPECTED_NAME =
"SELECT * FROM file WHERE user_id = ? AND type = ? AND (name like ? OR display_name like ?) AND deleted = " +
"false ORDER BY creation_time DESC LIMIT ?, ?";
String COUNT_BY_USER_ID_AND_TYPE_AND_EXPECTED_NAME = "SELECT COUNT(*) FROM file WHERE user_id = ? AND type = ? " +
"AND (name like ? OR display_name like ?) AND deleted = false";
String SELECT_FILE_BY_NAME = "SELECT * FROM file WHERE name = ?";
String UPDATE_TRANSFER_RESULT = "UPDATE file SET transfer_state = ?, size = ? WHERE name = ?";
String SET_SHARED = "UPDATE file SET shared = 1 WHERE name = ?";
/**
* To simultaneously accept users' requests while doing disk cleaning task
* we can CAS cas_state field to handle these works, its values are as follow:
* <p>
* cas_state(0) - This file is not being using by users
* cas_state(1) - This file is currently being using by users
* cas_state(2) - This file will be deleted quickly
*/
String UPDATE_FILE_AS_USED =
"UPDATE file SET cas_state = 1 WHERE name = ? and deleted = false AND (cas_state = 0 OR cas_state = 1)";
String UPDATE_FILE_AS_UNUSED =
"UPDATE file SET cas_state = 0 WHERE name = ? and deleted = false AND (cas_state = 1 OR cas_state = 0)";
String UPDATE_AS_PENDING_DELETE_BY_HOST = "UPDATE file SET cas_state = 2 WHERE host_ip = ? AND deleted = false AND " +
"cas_state = 0 AND transfer_state != 'IN_PROGRESS' ORDER BY creation_time ASC LIMIT 10";
String UPDATE_AS_PENDING_DELETE_BY_FILE_NAME = "UPDATE file SET cas_state = 2 WHERE name = ? AND deleted = false AND " +
"cas_state = 0 AND transfer_state != 'IN_PROGRESS' ORDER BY creation_time ASC LIMIT 10";
String SELECT_PENDING_DELETE_BY_HOST = "SELECT * FROM file WHERE host_ip = ? AND cas_state = 2 AND deleted = false";
String SELECT_PENDING_DELETE_BY_FILE_NAME = "SELECT * FROM file WHERE name = ? AND cas_state = 2 AND deleted = false";
String DELETE_FILE_BY_NAME =
"UPDATE file SET deleted = true, deleter = ?, deleted_time = now(), cas_state = 0 WHERE name= ? AND deleted = false";
String UPDATE_DISPLAY_NAME = "UPDATE file SET display_name = ? WHERE name = ?";
String SELECT_FILES_FOR_SYNC =
"SELECT * FROM file WHERE host_ip = ? AND transfer_state = 'SUCCESS' AND deleted = false AND type != 'JINSIGHT'";
String SELECT_DATED_FILES =
"SELECT f.* FROM file f LEFT JOIN active_job aj ON f.name=aj.target " +
"WHERE aj.target is null and " +
"f.deleted=0 and " +
"f.cas_state=0 and " +
"f.transfer_state='SUCCESS' and " +
"f.last_modified_time < now() - interval 7 day " +
"LIMIT 50";
String SELECT_TIMEOUT_IN_PROGRESS_FILE = "SELECT * FROM file WHERE transfer_state = 'IN_PROGRESS' AND " +
"last_modified_time < now() - INTERVAL 1 day ";
String UPDATE_IN_PROGRESS_FILE_AS_ERROR_BY_NAME = "UPDATE file SET transfer_state = 'ERROR' WHERE name = ? and transfer_state = 'IN_PROGRESS'";
}
| 3,204 |
0 | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker/FakeHooks.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker;
import io.vertx.core.json.JsonObject;
import org.eclipse.jifa.common.JifaHooks;
import java.util.concurrent.atomic.AtomicInteger;
public class FakeHooks implements JifaHooks {
static AtomicInteger initTriggered = new AtomicInteger(0);
public FakeHooks() {
}
public static void reset() {
initTriggered.set(0);
}
public static int countInitTriggered() {
return initTriggered.get();
}
@Override
public void init(JsonObject config) {
initTriggered.incrementAndGet();
}
}
| 3,205 |
0 | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker/TestWorker.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker;
import io.vertx.core.DeploymentOptions;
import io.vertx.core.Vertx;
import io.vertx.core.json.JsonObject;
import io.vertx.ext.unit.Async;
import io.vertx.ext.unit.TestContext;
import io.vertx.ext.unit.junit.VertxUnitRunner;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
@RunWith(VertxUnitRunner.class)
public class TestWorker {
private static Logger LOGGER = LoggerFactory.getLogger(TestWorker.class);
@Before
public void setup(TestContext context) throws Exception {
FakeHooks.reset();
}
@Test
public void testStartupBasicConfig(TestContext context) throws Exception {
Map<String, Object> cfg = new HashMap<>();
cfg.put("server.port", findRandomPort());
cfg.put("server.host", "127.0.0.1");
cfg.put("api.prefix", "/jifa-api");
Map<String, Object> auth = new HashMap<>();
auth.put("enabled", false);
cfg.put("basicAuth", auth);
Map<String, Object> cache = new HashMap<>();
cache.put("expireAfterAccess", 10);
cache.put("expireAfterAccessTimeUnit", "MINUTES");
cfg.put("cacheConfig", cache);
Async async = context.async();
Vertx vertx = Vertx.vertx();
CountDownLatch count = new CountDownLatch(1);
CountDownLatch done = new CountDownLatch(1);
Worker.setCount(count);
vertx.deployVerticle(Worker.class.getName(),
new DeploymentOptions().setConfig(new JsonObject(cfg)).setInstances(1),
res -> {
if (res.succeeded()) {
try {
count.await();
} catch (InterruptedException e) {
context.fail(e);
return;
}
vertx.undeploy(res.result(), res2 -> {
if (res2.succeeded()) {
done.countDown();
async.complete();
} else {
context.fail(res2.cause());
}
});
} else {
context.fail(res.cause());
}
});
done.await();
}
@Test
public void testStartWithHook(TestContext context) throws Exception {
Map<String, Object> cfg = new HashMap<>();
cfg.put("server.port", findRandomPort());
cfg.put("server.host", "127.0.0.1");
cfg.put("api.prefix", "/jifa-api");
Map<String, Object> auth = new HashMap<>();
auth.put("enabled", false);
cfg.put("basicAuth", auth);
cfg.put("hooks.className", FakeHooks.class.getName());
Map<String, Object> cache = new HashMap<>();
cache.put("expireAfterAccess", 10);
cache.put("expireAfterAccessTimeUnit", "MINUTES");
cfg.put("cacheConfig", cache);
Async async = context.async();
Vertx vertx = Vertx.vertx();
CountDownLatch count = new CountDownLatch(1);
CountDownLatch done = new CountDownLatch(1);
Worker.setCount(count);
vertx.deployVerticle(Worker.class.getName(),
new DeploymentOptions().setConfig(new JsonObject(cfg)).setInstances(1),
res -> {
if (res.succeeded()) {
try {
count.await();
} catch (InterruptedException e) {
context.fail(e);
return;
}
vertx.undeploy(res.result(), res2 -> {
if (res2.succeeded()) {
context.assertEquals(FakeHooks.countInitTriggered(), 1);
done.countDown();
async.complete();
} else {
context.fail(res2.cause());
}
});
} else {
context.fail(res.cause());
}
});
done.await();
}
@After
public void reset() {
Worker.resetCount();
}
int findRandomPort() throws IOException {
ServerSocket socket = new ServerSocket(0);
int port = socket.getLocalPort();
socket.close();
return port;
}
}
| 3,206 |
0 | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker/route/TestRoutes.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import com.google.common.io.Files;
import com.sun.management.HotSpotDiagnosticMXBean;
import io.vertx.ext.unit.TestContext;
import io.vertx.ext.unit.junit.VertxUnitRunner;
import org.apache.commons.io.FileUtils;
import org.eclipse.jifa.common.enums.FileTransferState;
import org.eclipse.jifa.common.enums.FileType;
import org.eclipse.jifa.worker.Worker;
import org.eclipse.jifa.worker.WorkerGlobal;
import org.eclipse.jifa.worker.support.FileSupport;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.lang.management.ManagementFactory;
@RunWith(VertxUnitRunner.class)
public class TestRoutes {
private static Logger LOGGER = LoggerFactory.getLogger(TestRoutes.class);
@Before
public void setup(TestContext context) throws Exception {
// start worker
Worker.main(new String[]{});
// prepare heap dump file
HotSpotDiagnosticMXBean mxBean = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class);
String name = "test_dump_" + System.currentTimeMillis() + ".hprof";
Base.TEST_HEAP_DUMP_FILENAME = name;
mxBean.dumpHeap(name, false);
FileSupport.initInfoFile(FileType.HEAP_DUMP, name, name);
Files.move(new File(name), new File(FileSupport.filePath(FileType.HEAP_DUMP, name)));
FileSupport.updateTransferState(FileType.HEAP_DUMP, name, FileTransferState.SUCCESS);
}
@Test
public void testRoutes(TestContext context) throws Exception {
FileRouteSuite.test(context);
HeapDumpRouteSuite.test(context);
}
@After
public void tearDown(TestContext context) {
try {
System.out.println(context);
FileUtils.deleteDirectory(new File(WorkerGlobal.workspace()));
WorkerGlobal.VERTX.close(context.asyncAssertSuccess());
} catch (Throwable t) {
LOGGER.error("Error", t);
}
}
}
| 3,207 |
0 | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker/route/FileRouteSuite.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import com.google.gson.reflect.TypeToken;
import io.vertx.ext.unit.Async;
import io.vertx.ext.unit.TestContext;
import org.eclipse.jifa.common.enums.FileType;
import org.eclipse.jifa.common.vo.FileInfo;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.worker.Worker;
import org.eclipse.jifa.worker.WorkerGlobal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Type;
import static org.eclipse.jifa.common.util.GsonHolder.GSON;
public class FileRouteSuite extends Base {
private static Logger LOGGER = LoggerFactory.getLogger(FileRouteSuite.class);
public static void test(TestContext context) {
Async async = context.async();
LOGGER.info("port = {}, host = {}, uri = {}", WorkerGlobal.PORT, WorkerGlobal.HOST, uri("/files"));
CLIENT.get(WorkerGlobal.PORT, WorkerGlobal.HOST, uri("/files"))
.addQueryParam("type", FileType.HEAP_DUMP.name())
.addQueryParam("page", "1")
.addQueryParam("pageSize", "10")
.send(ar -> {
context.assertTrue(ar.succeeded(), ar.cause() != null ? ar.cause().getMessage() : "");
Type type = new TypeToken<PageView<FileInfo>>() {
}.getType();
PageView<FileInfo> view = GSON.fromJson(ar.result().bodyAsString(), type);
context.assertTrue(view.getData().size() > 0, ar.result().bodyAsString());
async.complete();
});
}
}
| 3,208 |
0 | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker/route/Base.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import io.vertx.core.Vertx;
import io.vertx.ext.web.client.WebClient;
import org.eclipse.jifa.worker.Constant;
import org.eclipse.jifa.worker.WorkerGlobal;
public class Base {
static WebClient CLIENT = WebClient.create(Vertx.vertx());
static String TEST_HEAP_DUMP_FILENAME;
public static String uri(String uri) {
return WorkerGlobal.stringConfig(Constant.ConfigKey.API_PREFIX) + uri;
}
}
| 3,209 |
0 | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/test/java/org/eclipse/jifa/worker/route/HeapDumpRouteSuite.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import com.google.gson.reflect.TypeToken;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.HttpMethod;
import io.vertx.ext.unit.Async;
import io.vertx.ext.unit.TestContext;
import io.vertx.ext.web.client.HttpRequest;
import io.vertx.ext.web.client.HttpResponse;
import org.eclipse.jifa.common.enums.ProgressState;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.common.vo.Progress;
import org.eclipse.jifa.common.vo.Result;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.WorkerGlobal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Type;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.eclipse.jifa.common.Constant.HTTP_GET_OK_STATUS_CODE;
import static org.eclipse.jifa.common.Constant.HTTP_POST_CREATED_STATUS_CODE;
import static org.eclipse.jifa.common.util.GsonHolder.GSON;
public class HeapDumpRouteSuite extends Base {
private static final Logger LOGGER = LoggerFactory.getLogger(HeapDumpRouteSuite.class);
static TestContext context;
public static void test(TestContext c) throws Exception {
context = c;
Holder holder = new Holder();
testGet("/isFirstAnalysis",
(PostProcessor) resp -> {
Type type = new TypeToken<Result<Boolean>>() {
}.getType();
Result<Boolean> result = GSON.fromJson(resp.bodyAsString(), type);
context.assertTrue(result.getResult(), resp.bodyAsString());
});
testPost("/analyze");
AtomicBoolean success = new AtomicBoolean();
while (!success.get()) {
testGet("/progressOfAnalysis",
(PostProcessor) resp -> {
Progress progress = GSON.fromJson(resp.bodyAsString(), Progress.class);
ProgressState state = progress.getState();
context.assertTrue(state == ProgressState.IN_PROGRESS || state == ProgressState.SUCCESS,
resp.bodyAsString());
if (state == ProgressState.SUCCESS) {
success.set(true);
}
});
Thread.sleep(200);
}
// overview
testGet("/details");
testGet("/biggestObjects");
// class loader
testGet("/classLoaderExplorer/summary");
testGet("/classLoaderExplorer/classLoader",
req -> req.addQueryParam("page", "1")
.addQueryParam("pageSize", "10"),
resp -> {
Type type = new TypeToken<PageView<Model.ClassLoader.Item>>() {
}.getType();
PageView<Model.ClassLoader.Item> result = GSON.fromJson(resp.bodyAsString(), type);
holder.id = result.getData().get(0).getObjectId();
});
testGet("/classLoaderExplorer/children",
(PreProcessor) req -> req.addQueryParam("classLoaderId", String.valueOf(holder.id))
.addQueryParam("page", "1")
.addQueryParam("pageSize", "10"));
// class reference
testGet("/classReference/inbounds/class",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id)));
testGet("/classReference/outbounds/class",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id)));
// direct byte buffer
testGet("/directByteBuffer/summary");
testGet("/directByteBuffer/records",
(PreProcessor) req -> req.addQueryParam("page", "1")
.addQueryParam("pageSize", "10"));
// dominator tree
testGet("/dominatorTree/roots",
(PreProcessor) req -> req.addQueryParam("page", "1")
.addQueryParam("pageSize", "10")
.addQueryParam("sortBy", "id")
.addQueryParam("ascendingOrder", "true")
.addQueryParam("grouping", "NONE"));
testGet("/dominatorTree/children",
(PreProcessor) req -> req.addQueryParam("page", "1")
.addQueryParam("sortBy", "id")
.addQueryParam("ascendingOrder", "true")
.addQueryParam("pageSize", "10")
.addQueryParam("grouping", "NONE")
.addQueryParam("idPathInResultTree", "[" + holder.id + "]")
.addQueryParam("parentObjectId", String.valueOf(holder.id)));
// gc root
testGet("/GCRoots");
testGet("/GCRoots/classes",
(PreProcessor) req -> req.addQueryParam("page", "1")
.addQueryParam("pageSize", "10")
.addQueryParam("rootTypeIndex", "1"));
testGet("/GCRoots/class/objects",
(PreProcessor) req -> req.addQueryParam("page", "1")
.addQueryParam("pageSize", "10")
.addQueryParam("rootTypeIndex", "1")
.addQueryParam("classIndex", "1"));
// histogram
testGet("/histogram",
(PreProcessor) req -> req.addQueryParam("page", "1")
.addQueryParam("pageSize", "10")
.addQueryParam("sortBy", "id")
.addQueryParam("ascendingOrder", "true")
.addQueryParam("groupingBy", "BY_CLASS"));
// inspector
testGet("/inspector/objectView",
req -> req.addQueryParam("objectId", String.valueOf(holder.id)),
resp -> {
Model.InspectorView view = GSON.fromJson(resp.bodyAsString(), Model.InspectorView.class);
holder.objectAddress = view.getObjectAddress();
});
testGet("/inspector/addressToId",
(PreProcessor) req -> req.addQueryParam("objectAddress", String.valueOf(holder.objectAddress)));
testGet("/inspector/value",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id)));
testGet("/inspector/fields",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id))
.addQueryParam("page", "1")
.addQueryParam("pageSize", "10"));
testGet("/inspector/staticFields",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id))
.addQueryParam("page", "1")
.addQueryParam("pageSize", "10"));
// leak report
testGet("/leak/report");
// object list
testGet("/outbounds",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id))
.addQueryParam("page", "1")
.addQueryParam("pageSize", "10"));
testGet("/inbounds",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id))
.addQueryParam("page", "1")
.addQueryParam("pageSize", "10"));
// object
testGet("/object",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id)));
// oql
testGet("/oql",
(PreProcessor) req -> req.addQueryParam("oql", "select * from java.lang.String")
.addQueryParam("page", "1")
.addQueryParam("sortBy", "id")
.addQueryParam("ascendingOrder", "true")
.addQueryParam("pageSize", "10"));
// oql
testGet("/sql",
(PreProcessor) req -> req.addQueryParam("sql", "select * from java.lang.String")
.addQueryParam("page", "1")
.addQueryParam("sortBy", "id")
.addQueryParam("ascendingOrder", "true")
.addQueryParam("pageSize", "10"));
// path to gc roots
testGet("/pathToGCRoots",
(PreProcessor) req -> req.addQueryParam("origin", String.valueOf(holder.id))
.addQueryParam("skip", "0")
.addQueryParam("count", "10"));
// system property
testGet("/systemProperties");
// thread
testGet("/threadsSummary");
testGet("/threads",
req -> req.addQueryParam("page", "1")
.addQueryParam("sortBy", "id")
.addQueryParam("ascendingOrder", "true")
.addQueryParam("pageSize", "10"),
resp -> {
Type type = new TypeToken<PageView<Model.Thread.Item>>() {
}.getType();
PageView<Model.Thread.Item> result = GSON.fromJson(resp.bodyAsString(), type);
holder.id = result.getData().get(0).getObjectId();
}
);
testGet("/stackTrace",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id)));
testGet("/locals",
(PreProcessor) req -> req.addQueryParam("objectId", String.valueOf(holder.id))
.addQueryParam("depth", "1")
.addQueryParam("firstNonNativeFrame", "false"));
// unreachable objects
testGet("/unreachableObjects/summary");
testGet("/unreachableObjects/records",
(PreProcessor) req -> req.addQueryParam("page", "1")
.addQueryParam("pageSize", "10"));
}
static void testGet(String uri) {
testGet(uri, null, null);
}
static void testGet(String uri, PreProcessor processor) {
testGet(uri, processor, null);
}
static void testGet(String uri, PostProcessor postProcessor) {
testGet(uri, null, postProcessor);
}
static void testGet(String uri, PreProcessor processor, PostProcessor postProcessor) {
test(uri, HttpMethod.GET, processor, postProcessor);
}
static void testPost(String uri) {
test(uri, HttpMethod.POST, null, null);
}
static void test(String uri, HttpMethod method, PreProcessor processor, PostProcessor postProcessor) {
LOGGER.info("test {}", uri);
Async async = context.async();
LOGGER.info("method = {}, port = {}, host = {}, uri = {}", method, WorkerGlobal.PORT, WorkerGlobal.HOST,
uri("/heap-dump/" + TEST_HEAP_DUMP_FILENAME + uri));
HttpRequest<Buffer> request =
CLIENT.request(method, WorkerGlobal.PORT, WorkerGlobal.HOST,
uri("/heap-dump/" + TEST_HEAP_DUMP_FILENAME + uri));
if (processor != null) {
processor.process(request);
}
request.send(
ar -> {
context.assertTrue(ar.succeeded(), ar.cause() != null ? ar.cause().getMessage() : "");
LOGGER.debug("{}: {} - {}", uri, ar.result().statusCode(), ar.result().bodyAsString());
context.assertEquals(ar.result().statusCode(),
method == HttpMethod.GET ? HTTP_GET_OK_STATUS_CODE : HTTP_POST_CREATED_STATUS_CODE,
ar.result().bodyAsString());
if (postProcessor != null) {
postProcessor.process(ar.result());
}
LOGGER.info("{}: {}", uri, ar.result().bodyAsString());
async.complete();
}
);
async.awaitSuccess();
}
interface PreProcessor {
void process(HttpRequest<Buffer> request);
}
interface PostProcessor {
void process(HttpResponse<Buffer> resp);
}
static class Holder {
int id;
long objectAddress;
}
}
| 3,210 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/WorkerGlobal.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker;
import io.vertx.core.Vertx;
import io.vertx.core.json.JsonObject;
import org.eclipse.jifa.common.JifaHooks;
import org.eclipse.jifa.worker.support.FileSupport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import static org.eclipse.jifa.common.util.Assertion.ASSERT;
public class WorkerGlobal {
private static final Logger LOGGER = LoggerFactory.getLogger(Worker.class);
public static Vertx VERTX;
public static String HOST;
public static int PORT;
private static JsonObject CONFIG;
private static String WORKSPACE;
private static JifaHooks HOOKS;
private static boolean initialized;
static synchronized void reset() {
if (!initialized) {
return;
}
VERTX = null;
HOST = null;
PORT = 0;
CONFIG = null;
HOOKS = null;
initialized = false;
}
static synchronized void init(Vertx vertx, String host, int port, JsonObject config, JifaHooks hooks) {
if (initialized) {
return;
}
VERTX = vertx;
HOST = host;
PORT = port;
CONFIG = config;
HOOKS = hooks;
WORKSPACE = CONFIG.getString(Constant.ConfigKey.WORKSPACE, org.eclipse.jifa.common.Constant.DEFAULT_WORKSPACE);
LOGGER.debug("Workspace: {}", WORKSPACE);
File workspaceDir = new File(WORKSPACE);
if (workspaceDir.exists()) {
ASSERT.isTrue(workspaceDir.isDirectory(), "Workspace must be directory");
} else {
ASSERT.isTrue(workspaceDir.mkdirs(),
() -> "Can not create workspace: " + workspaceDir.getAbsolutePath());
}
FileSupport.init();
initialized = true;
}
public static String stringConfig(String key) {
return CONFIG.getString(key);
}
public static String stringConfig(String... keys) {
JsonObject o = CONFIG;
for (int i = 0; i < keys.length - 1; i++) {
o = o.getJsonObject(keys[i]);
}
return o.getString(keys[keys.length - 1]);
}
public static int intConfig(String... keys) {
JsonObject o = CONFIG;
for (int i = 0; i < keys.length - 1; i++) {
o = o.getJsonObject(keys[i]);
}
return o.getInteger(keys[keys.length - 1]);
}
public static boolean booleanConfig(String... keys) {
JsonObject o = CONFIG;
for (int i = 0; i < keys.length - 1; i++) {
o = o.getJsonObject(keys[i]);
}
if (o == null) {
return false;
}
return o.getBoolean(keys[keys.length - 1]);
}
public static String workspace() {
return WORKSPACE;
}
public static JifaHooks hooks() {
return HOOKS;
}
}
| 3,211 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/Constant.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker;
public interface Constant extends org.eclipse.jifa.common.Constant {
interface Misc {
String VERTX_CONFIG_PROP = "jifa.vertx.config";
String WORKER_CONFIG_PROP = "jifa.worker.config";
String DEFAULT_VERTX_CONFIG_FILE = "vertx-config.json";
String DEFAULT_WORKER_CONFIG_FILE = "worker-config.json";
String DEFAULT_HOST = "0.0.0.0";
String WEB_ROOT_KEY = "jifa.webroot";
}
interface Heap {
String TOTAL_SIZE_KEY = "totalSize";
String SHALLOW_HEAP_KEY = "shallowHeap";
String RETAINED_HEAP_KEY = "retainedHeap";
}
interface File {
String INFO_FILE_SUFFIX = "-info.json";
}
interface ConfigKey {
String BASIC_AUTH = "basicAuth";
String ENABLED = "enabled";
String WORKSPACE = "workspace";
String API_PREFIX = "api.prefix";
String SERVER_HOST_KEY = "server.host";
String SERVER_PORT_KEY = "server.port";
String USERNAME = "username";
String PASSWORD = "password";
String HOOKS_NAME_KEY = "hooks.className";
String SERVER_UPLOAD_DIR_KEY = "server.uploadDir";
}
interface CacheConfig {
String CACHE_CONFIG = "cacheConfig";
String EXPIRE_AFTER_ACCESS = "expireAfterAccess";
String EXPIRE_AFTER_ACCESS_TIME_UNIT = "expireAfterAccessTimeUnit";
}
}
| 3,212 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/Worker.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker;
import com.google.common.base.Strings;
import io.vertx.core.*;
import io.vertx.core.http.HttpServer;
import io.vertx.core.json.JsonObject;
import io.vertx.ext.auth.User;
import io.vertx.ext.web.Router;
import io.vertx.ext.web.handler.*;
import org.apache.commons.io.FileUtils;
import org.eclipse.jifa.common.JifaException;
import org.eclipse.jifa.common.JifaHooks;
import org.eclipse.jifa.common.util.FileUtil;
import org.eclipse.jifa.worker.route.RouteFiller;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.ServerSocket;
import java.nio.charset.Charset;
import java.util.concurrent.CountDownLatch;
import static org.eclipse.jifa.worker.Constant.ConfigKey.*;
import static org.eclipse.jifa.worker.Constant.Misc.*;
import static org.eclipse.jifa.worker.WorkerGlobal.stringConfig;
public class Worker extends AbstractVerticle {
private static final Logger LOGGER = LoggerFactory.getLogger(Worker.class);
private static CountDownLatch count = new CountDownLatch(Runtime.getRuntime().availableProcessors());
private static long startTime;
public static void main(String[] args) throws InterruptedException, IOException {
startTime = System.currentTimeMillis();
JsonObject vertxConfig = readConfig(VERTX_CONFIG_PROP, DEFAULT_VERTX_CONFIG_FILE);
Vertx vertx = Vertx.vertx(new VertxOptions(vertxConfig));
JsonObject jifaConfig = readConfig(WORKER_CONFIG_PROP, DEFAULT_WORKER_CONFIG_FILE);
jifaConfig.getJsonObject(BASIC_AUTH).put(ENABLED, false);
vertx.deployVerticle(Worker.class.getName(), new DeploymentOptions().setConfig(jifaConfig).setInstances(
Runtime.getRuntime().availableProcessors()));
count.await();
}
private static int randomPort() {
try {
ServerSocket socket = new ServerSocket(0);
int port = socket.getLocalPort();
socket.close();
return port;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static JsonObject readConfig(String key, String def) throws IOException {
// Read default config first
JsonObject config = new JsonObject(FileUtil.content(Worker.class.getClassLoader().getResourceAsStream(def)));
// Merge the config items if customized config file is set.
String v = System.getProperty(key);
if (!Strings.isNullOrEmpty(v)) {
JsonObject customConfig = new JsonObject(FileUtils.readFileToString(new File(v), Charset.defaultCharset()));
config.mergeIn(customConfig);
}
return config;
}
// test support
public static void setCount(CountDownLatch count) {
Worker.count = count;
}
public static void resetCount() {
count = new CountDownLatch(Runtime.getRuntime().availableProcessors());
}
@SuppressWarnings("unchecked")
JifaHooks findHooks() {
JifaHooks hook = null;
if (config().containsKey(HOOKS_NAME_KEY)) {
String className = config().getString(HOOKS_NAME_KEY);
try {
LOGGER.info("applying hooks: " + className);
Class<JifaHooks> clazz = (Class<JifaHooks>) Class.forName(className);
hook = clazz.getConstructor().newInstance();
hook.init(config());
} catch (ReflectiveOperationException e) {
LOGGER.warn("could not start hook class: " + className + ", due to error", e);
}
}
return hook != null ? hook : new JifaHooks.EmptyHooks();
}
private void setupBasicAuthHandler(Router router) {
AuthenticationHandler authHandler = BasicAuthHandler.create((authInfo, resultHandler) -> {
Promise<User> promise = Promise.promise();
if (stringConfig(BASIC_AUTH, USERNAME).equals(authInfo.getString(USERNAME)) &&
stringConfig(BASIC_AUTH, PASSWORD).equals(authInfo.getString(PASSWORD))) {
promise.complete(User.create(authInfo));
} else {
promise.fail(new JifaException("Illegal User"));
}
resultHandler.handle(promise.future());
});
router.route().handler(authHandler);
}
@Override
public void start() {
String host = config().containsKey(SERVER_HOST_KEY) ? config().getString(SERVER_HOST_KEY) : DEFAULT_HOST;
int port = config().containsKey(SERVER_PORT_KEY) ? config().getInteger(SERVER_PORT_KEY) : randomPort();
String staticRoot = System.getProperty(WEB_ROOT_KEY, "webroot");
String uploadDir =
config().containsKey(SERVER_UPLOAD_DIR_KEY) ? config().getString(SERVER_UPLOAD_DIR_KEY) : null;
JifaHooks hooks = findHooks();
vertx.executeBlocking(event -> {
WorkerGlobal.init(vertx, host, port, config(), hooks);
HttpServer server = vertx.createHttpServer(hooks.serverOptions(vertx));
Router router = Router.router(vertx);
// body handler always ends to be first so it can read the body
if (uploadDir == null) {
router.post().handler(BodyHandler.create());
} else {
router.post().handler(BodyHandler.create(uploadDir));
}
hooks.beforeRoutes(vertx, router);
File webRoot = new File(staticRoot);
if (webRoot.exists() && webRoot.isDirectory()) {
StaticHandler staticHandler = StaticHandler.create();
staticHandler.setAllowRootFileSystemAccess(true);
staticHandler.setWebRoot(staticRoot);
// non-api
String staticPattern = "^(?!" + WorkerGlobal.stringConfig(Constant.ConfigKey.API_PREFIX) + ").*$";
router.routeWithRegex(staticPattern)
.handler(staticHandler)
// route to "/" if not found
.handler(context -> context.reroute("/"));
}
// cors
router.route().handler(CorsHandler.create("*"));
// basic auth
if (WorkerGlobal.booleanConfig(BASIC_AUTH, Constant.ConfigKey.ENABLED)) {
setupBasicAuthHandler(router);
}
router.post().handler(BodyHandler.create());
new RouteFiller(router).fill();
hooks.afterRoutes(vertx, router);
server.requestHandler(router);
server.listen(port, host, ar -> {
if (ar.succeeded()) {
event.complete();
} else {
event.fail(ar.cause());
}
});
}, ar -> {
if (ar.succeeded()) {
LOGGER.info("Jifa-Worker startup successfully in {} ms, verticle count = {}, http server = {}:{}",
System.currentTimeMillis() - startTime,
Runtime.getRuntime().availableProcessors(),
WorkerGlobal.HOST,
WorkerGlobal.PORT);
count.countDown();
} else {
LOGGER.error("Failed to start Jifa' worker side", ar.cause());
System.exit(-1);
}
});
}
@Override
public void stop() {
WorkerGlobal.reset();
}
}
| 3,213 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/RouteFiller.java | /********************************************************************************
* Copyright (c) 2020, 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import io.vertx.ext.web.Route;
import io.vertx.ext.web.Router;
import org.eclipse.jifa.common.ErrorCode;
import org.eclipse.jifa.common.JifaException;
import org.eclipse.jifa.common.util.HTTPRespGuarder;
import org.eclipse.jifa.worker.route.gclog.GCLogBaseRoute;
import org.eclipse.jifa.worker.Constant;
import org.eclipse.jifa.worker.WorkerGlobal;
import org.eclipse.jifa.worker.route.heapdump.HeapBaseRoute;
import org.eclipse.jifa.worker.route.threaddump.ThreadDumpBaseRoute;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.lang.reflect.Parameter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class RouteFiller {
private static final Logger LOGGER = LoggerFactory.getLogger(RouteFiller.class);
private Router router;
public RouteFiller(Router router) {
this.router = router;
}
public void fill() {
try {
register(FileRoute.class);
register(AnalysisRoute.class);
register(SystemRoute.class);
for (Class<? extends HeapBaseRoute> route : HeapBaseRoute.routes()) {
register(route);
}
for (Class<? extends GCLogBaseRoute> route: GCLogBaseRoute.routes()){
register(route);
}
for (Class<? extends ThreadDumpBaseRoute> route: ThreadDumpBaseRoute.routes()){
register(route);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private String[] buildPrefixes(Class<?> clazz) {
ArrayList<String> prefixes = new ArrayList<>();
buildPrefix(prefixes, "", clazz);
return prefixes.toArray(new String[0]);
}
private void buildPrefix(ArrayList<String> prefixes, String prevPrefix, Class<?> clazz) {
if (clazz == null) {
String rootPrefix = WorkerGlobal.stringConfig(Constant.ConfigKey.API_PREFIX);
prefixes.add(rootPrefix + prevPrefix);
return;
}
MappingPrefix anno = clazz.getDeclaredAnnotation(MappingPrefix.class);
if (anno == null) {
buildPrefix(prefixes, prevPrefix, clazz.getSuperclass());
} else {
for (int i = 0; i < anno.value().length; i++) {
buildPrefix(prefixes, anno.value()[i] + prevPrefix, clazz.getSuperclass());
}
}
}
private void register(Class<? extends BaseRoute> clazz) throws Exception {
Constructor<? extends BaseRoute> constructor = clazz.getDeclaredConstructor();
constructor.setAccessible(true);
BaseRoute thisObject = constructor.newInstance();
String[] prefixes = buildPrefixes(clazz);
Method[] methods = clazz.getDeclaredMethods();
for (String prefix : prefixes) {
for (Method method : methods) {
registerMethodRoute(thisObject, prefix, method);
}
}
}
private void registerMethodRoute(BaseRoute thisObject, String prefix, Method method) {
RouteMeta meta = method.getAnnotation(RouteMeta.class);
if (meta == null) {
return;
}
String fullPath = prefix + meta.path();
Route route = router.route(meta.method().toVertx(), fullPath);
Arrays.stream(meta.contentType()).forEach(route::produces);
method.setAccessible(true);
LOGGER.debug("Route: path = {}, method = {}", fullPath, method.toGenericString());
route.blockingHandler(rc -> {
try {
// pre-process
if (meta.contentType().length > 0) {
rc.response().putHeader("content-type", String.join(";", meta.contentType()));
}
List<Object> arguments = new ArrayList<>();
Parameter[] params = method.getParameters();
for (Parameter param : params) {
if (!RouterAnnotationProcessor.processParamKey(arguments, rc, method, param) &&
!RouterAnnotationProcessor.processParamMap(arguments, rc, method, param) &&
!RouterAnnotationProcessor.processPagingRequest(arguments, rc, method, param) &&
!RouterAnnotationProcessor.processHttpServletRequest(arguments, rc, method, param) &&
!RouterAnnotationProcessor.processHttpServletResponse(arguments, rc, method, param) &&
!RouterAnnotationProcessor.processPromise(arguments, rc, method, param) &&
!RouterAnnotationProcessor.processRoutingContext(arguments, rc, method, param)
) {
throw new JifaException(ErrorCode.ILLEGAL_ARGUMENT,
"Illegal parameter meta, method = " + method);
}
}
method.invoke(thisObject, arguments.toArray());
} catch (Throwable t) {
HTTPRespGuarder.fail(rc, t);
}
}, false);
}
}
| 3,214 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/MappingPrefix.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface MappingPrefix {
String[] value() default "";
}
| 3,215 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/SystemRoute.java | /********************************************************************************
* Copyright (c) 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.vo.DiskUsage;
import org.eclipse.jifa.worker.support.FileSupport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings("unused")
class SystemRoute extends BaseRoute {
private static final Logger LOGGER = LoggerFactory.getLogger(SystemRoute.class);
@RouteMeta(path = "/system/diskUsage")
void diskUsage(Promise<DiskUsage> promise) {
// Should we cache it?
long totalSpaceInMb = FileSupport.getTotalDiskSpace();
long usedSpaceInMb = FileSupport.getUsedDiskSpace();
assert totalSpaceInMb >= usedSpaceInMb;
LOGGER.info("Disk total {}MB, used {}MB", totalSpaceInMb, usedSpaceInMb);
promise.complete(new DiskUsage(totalSpaceInMb, usedSpaceInMb));
}
@RouteMeta(path = "/system/ping")
void ping(Promise<Void> promise) {
promise.complete();
}
}
| 3,216 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/AnalysisRoute.java | /********************************************************************************
* Copyright (c) 2020, 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import io.vertx.core.Promise;
import io.vertx.core.http.HttpServerRequest;
import org.eclipse.jifa.common.enums.FileType;
import org.eclipse.jifa.common.util.FileUtil;
import org.eclipse.jifa.common.vo.Progress;
import org.eclipse.jifa.common.vo.Result;
import org.eclipse.jifa.worker.Constant;
import org.eclipse.jifa.worker.WorkerGlobal;
import org.eclipse.jifa.worker.support.Analyzer;
import org.eclipse.jifa.worker.support.FileSupport;
import java.io.File;
import java.util.Map;
@MappingPrefix(
value = {
"/heap-dump/:file",
"/gc-log/:file",
"/thread-dump/:file"}
)
class AnalysisRoute extends BaseRoute {
private final Analyzer helper = Analyzer.getInstance();
// TODO: not good enough
private FileType typeOf(HttpServerRequest request) {
String uri = request.uri();
String apiPrefix = WorkerGlobal.stringConfig(Constant.ConfigKey.API_PREFIX);
int end = uri.indexOf("/", apiPrefix.length() + 1);
return FileType.getByTag(uri.substring(apiPrefix.length() + 1, end));
}
@RouteMeta(path = "/isFirstAnalysis")
void isFirstAnalysis(HttpServerRequest request, Promise<Result<Boolean>> promise, @ParamKey("file") String file) {
promise.complete(new Result<>(helper.isFirstAnalysis(typeOf(request), file)));
}
@RouteMeta(path = "/analyze", method = HttpMethod.POST)
void analyze(HttpServerRequest request, Promise<Void> promise, @ParamKey("file") String file,
@ParamMap(keys = {"keep_unreachable_objects", "strictness"},
mandatory = {false, false, false}) Map<String, String> options) {
helper.analyze(promise, typeOf(request), file, options);
}
@RouteMeta(path = "/progressOfAnalysis")
void poll(HttpServerRequest request, Promise<Progress> promise, @ParamKey("file") String file) {
promise.complete(helper.pollProgress(typeOf(request), file));
}
@RouteMeta(path = "/release", method = HttpMethod.POST)
void release(HttpServerRequest request, Promise<Void> promise, @ParamKey("file") String file) {
helper.release(file);
promise.complete();
}
@RouteMeta(path = "/clean", method = HttpMethod.POST)
void clean(HttpServerRequest request, Promise<Void> promise, @ParamKey("file") String file) {
helper.clean(typeOf(request), file);
promise.complete();
}
@RouteMeta(path = "/errorLog")
void failedLog(HttpServerRequest request, Promise<Result<String>> promise, @ParamKey("file") String file) {
promise.complete(new Result<>(FileUtil.content(new File(FileSupport.errorLogPath(typeOf(request), file)))));
}
}
| 3,217 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/ParamKey.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.PARAMETER)
public @interface ParamKey {
String value();
boolean mandatory() default true;
}
| 3,218 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/RouterAnnotationProcessor.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import com.google.gson.Gson;
import io.vertx.core.Promise;
import io.vertx.core.http.HttpServerRequest;
import io.vertx.core.http.HttpServerResponse;
import io.vertx.ext.web.RoutingContext;
import org.eclipse.jifa.common.ErrorCode;
import org.eclipse.jifa.common.JifaException;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.util.HTTPRespGuarder;
import org.eclipse.jifa.gclog.diagnoser.AnalysisConfig;
import java.lang.reflect.Method;
import java.lang.reflect.Parameter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import static org.eclipse.jifa.common.util.Assertion.ASSERT;
class RouterAnnotationProcessor {
private static Map<Class<?>, Function<String, ?>> converter = new HashMap<>();
static {
converter.put(String.class, s -> s);
converter.put(Integer.class, Integer::parseInt);
converter.put(int.class, Integer::parseInt);
converter.put(Long.class, Long::parseLong);
converter.put(long.class, Long::parseLong);
converter.put(Float.class, Float::parseFloat);
converter.put(float.class, Float::parseFloat);
converter.put(Double.class, Double::parseDouble);
converter.put(double.class, Double::parseDouble);
converter.put(Boolean.class, Boolean::parseBoolean);
converter.put(boolean.class, Boolean::parseBoolean);
converter.put(int[].class, s -> new Gson().fromJson(s, int[].class));
converter.put(String[].class, s -> new Gson().fromJson(s, String[].class));
converter.put(AnalysisConfig.class, s -> new Gson().fromJson(s, AnalysisConfig.class));
}
static boolean processParamKey(List<Object> arguments, RoutingContext context, Method method, Parameter param) {
// param key
ParamKey paramKey = param.getAnnotation(ParamKey.class);
if (paramKey != null) {
String value = context.request().getParam(paramKey.value());
ASSERT.isTrue(!paramKey.mandatory() || value != null, ErrorCode.ILLEGAL_ARGUMENT,
() -> "Miss request parameter, key = " + paramKey.value());
arguments.add(value != null ? convert(method, param, context.request().getParam(paramKey.value())) : null);
return true;
}
return false;
}
static boolean processParamMap(List<Object> arguments, RoutingContext context, Method method, Parameter param) {
ParamMap paramMap = param.getAnnotation(ParamMap.class);
if (paramMap != null) {
Map<String, String> map = new HashMap<>();
String[] keys = paramMap.keys();
boolean[] mandatory = paramMap.mandatory();
for (int j = 0; j < keys.length; j++) {
String key = keys[j];
String value = context.request().getParam(key);
ASSERT.isTrue(!mandatory[j] || value != null, ErrorCode.ILLEGAL_ARGUMENT,
() -> "Miss request parameter, key = " + key);
if (value != null) {
map.put(key, value);
}
}
arguments.add(map);
return true;
}
return false;
}
static boolean processPagingRequest(List<Object> arguments, RoutingContext context, Method method,
Parameter param) {
if (param.getType() == PagingRequest.class) {
int page;
int pageSize;
try {
page = Integer.parseInt(context.request().getParam("page"));
pageSize = Integer.parseInt(context.request().getParam("pageSize"));
ASSERT.isTrue(page >= 1 && pageSize >= 1, ErrorCode.ILLEGAL_ARGUMENT,
"must greater than 1");
} catch (Exception e) {
throw new JifaException(ErrorCode.ILLEGAL_ARGUMENT, "Paging parameter(page or pageSize) is illegal, " +
e.getMessage());
}
arguments.add(new PagingRequest(page, pageSize));
return true;
}
return false;
}
static boolean processHttpServletRequest(List<Object> arguments, RoutingContext context, Method method,
Parameter param) {
if (param.getType().equals(HttpServerRequest.class)) {
arguments.add(context.request());
return true;
}
return false;
}
static boolean processHttpServletResponse(List<Object> arguments, RoutingContext context, Method method,
Parameter param) {
if (param.getType().equals(HttpServerResponse.class)) {
arguments.add(context.response());
return true;
}
return false;
}
static boolean processPromise(List<Object> arguments, RoutingContext context, Method method, Parameter param) {
if (param.getType().equals(Promise.class)) {
arguments.add(newPromise(context));
return true;
}
return false;
}
static boolean processRoutingContext(List<Object> arguments, RoutingContext context, Method method,
Parameter param) {
if (param.getType().equals(RoutingContext.class)) {
arguments.add(context);
return true;
}
return false;
}
private static Object convert(Method m, Parameter p, String value) {
Class<?> type = p.getType();
Function<String, ?> f;
if (type.isEnum()) {
f = s -> {
for (Object e : type.getEnumConstants()) {
if (((Enum) e).name().equalsIgnoreCase(value)) {
return e;
}
}
throw new JifaException(ErrorCode.ILLEGAL_ARGUMENT,
"Illegal parameter value, parameter = " + p + ", value = " + value);
};
} else {
f = converter.get(type);
}
ASSERT.notNull(f, () -> "Unsupported parameter type, method = " + m + ", parameter = " + p);
return f.apply(value);
}
private static <T> Promise<T> newPromise(io.vertx.ext.web.RoutingContext rc) {
Promise<T> promise = Promise.promise();
promise.future().onComplete(
event -> {
if (event.succeeded()) {
HTTPRespGuarder.ok(rc, event.result());
} else {
HTTPRespGuarder.fail(rc, event.cause());
}
}
);
return promise;
}
} | 3,219 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/FileRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import io.vertx.core.Promise;
import io.vertx.core.http.HttpServerResponse;
import io.vertx.ext.web.FileUpload;
import io.vertx.ext.web.RoutingContext;
import org.apache.logging.log4j.util.Strings;
import org.eclipse.jifa.common.ErrorCode;
import org.eclipse.jifa.common.JifaException;
import org.eclipse.jifa.common.enums.FileTransferState;
import org.eclipse.jifa.common.enums.FileType;
import org.eclipse.jifa.common.enums.ProgressState;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.util.HTTPRespGuarder;
import org.eclipse.jifa.common.util.PageViewBuilder;
import org.eclipse.jifa.common.vo.FileInfo;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.common.vo.TransferProgress;
import org.eclipse.jifa.common.vo.TransferringFile;
import org.eclipse.jifa.worker.WorkerGlobal;
import org.eclipse.jifa.worker.support.FileSupport;
import org.eclipse.jifa.worker.support.TransferListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.List;
import static org.eclipse.jifa.common.Constant.*;
import static org.eclipse.jifa.common.util.Assertion.ASSERT;
import static org.eclipse.jifa.common.util.GsonHolder.GSON;
class FileRoute extends BaseRoute {
private static final Logger LOGGER = LoggerFactory.getLogger(FileRoute.class);
@RouteMeta(path = "/files")
void list(Promise<PageView<FileInfo>> promise, @ParamKey("type") FileType type, PagingRequest paging) {
List<FileInfo> info = FileSupport.info(type);
info.sort((i1, i2) -> Long.compare(i2.getCreationTime(), i1.getCreationTime()));
promise.complete(PageViewBuilder.build(info, paging));
}
@RouteMeta(path = "/file")
void file(Promise<FileInfo> promise, @ParamKey("type") FileType type, @ParamKey("name") String name) {
promise.complete(FileSupport.info(type, name));
}
@RouteMeta(path = "/file/delete", method = HttpMethod.POST)
void delete(Promise<Void> promise, @ParamKey("type") FileType type, @ParamKey("name") String name) {
FileSupport.delete(type, name);
promise.complete();
}
@RouteMeta(path = "/publicKey")
void publicKeys(Promise<String> promise) {
if (FileSupport.PUB_KEYS.size() > 0) {
promise.complete(FileSupport.PUB_KEYS.get(0));
} else {
promise.complete(EMPTY_STRING);
}
}
private String decorateFileName(String fileName) {
return System.currentTimeMillis() + "-" + fileName;
}
private String extractFileName(String path) {
return path.substring(path.lastIndexOf(File.separatorChar) + 1);
}
@RouteMeta(path = "/file/transferByURL", method = HttpMethod.POST)
void transferByURL(Promise<TransferringFile> promise, @ParamKey("type") FileType fileType,
@ParamKey("url") String url, @ParamKey(value = "fileName", mandatory = false) String fileName) {
String originalName;
try {
originalName = extractFileName(new URL(url).getPath());
} catch (MalformedURLException e) {
LOGGER.warn("invalid url: {}", url);
throw new JifaException(ErrorCode.ILLEGAL_ARGUMENT, e);
}
fileName = Strings.isNotBlank(fileName) ? fileName : decorateFileName(originalName);
TransferListener listener = FileSupport.createTransferListener(fileType, originalName, fileName);
FileSupport.transferByURL(url, fileType, fileName, listener, promise);
}
@RouteMeta(path = "/file/transferByOSS", method = HttpMethod.POST)
void transferByOSS(Promise<TransferringFile> promise, @ParamKey("type") FileType fileType,
@ParamKey("endpoint") String endpoint, @ParamKey("accessKeyId") String accessKeyId,
@ParamKey("accessKeySecret") String accessKeySecret, @ParamKey("bucketName") String bucketName,
@ParamKey("objectName") String objectName,
@ParamKey(value = "fileName", mandatory = false) String fileName) {
String originalName = extractFileName(objectName);
fileName = Strings.isNotBlank(fileName) ? fileName : decorateFileName(originalName);
TransferListener listener = FileSupport.createTransferListener(fileType, originalName, fileName);
FileSupport.transferByOSS(endpoint, accessKeyId, accessKeySecret, bucketName, objectName,
fileType, fileName, listener, promise);
}
@RouteMeta(path = "/file/transferByS3", method = HttpMethod.POST)
void transferByS3(Promise<TransferringFile> promise, @ParamKey("type") FileType fileType,
@ParamKey("endpoint") String endpoint, @ParamKey("accessKey") String accessKey,
@ParamKey("keySecret") String keySecret, @ParamKey("bucketName") String bucketName,
@ParamKey("objectName") String objectName,
@ParamKey(value = "fileName", mandatory = false) String fileName) {
String originalName = extractFileName(objectName);
fileName = Strings.isNotBlank(fileName) ? fileName : decorateFileName(originalName);
TransferListener listener = FileSupport.createTransferListener(fileType, originalName, fileName);
FileSupport.transferByS3(endpoint, accessKey, keySecret, bucketName, objectName,
fileType, fileName, listener, promise);
}
@RouteMeta(path = "/file/transferBySCP", method = HttpMethod.POST)
void transferBySCP(Promise<TransferringFile> promise, @ParamKey("type") FileType fileType,
@ParamKey("hostname") String hostname, @ParamKey("path") String path,
@ParamKey("user") String user, @ParamKey("usePublicKey") boolean usePublicKey,
@ParamKey(value = "password", mandatory = false) String password,
@ParamKey(value = "fileName", mandatory = false) String fileName) {
if (!usePublicKey) {
ASSERT.isTrue(password != null && password.length() > 0,
"Must provide password if you don't use public key");
}
String originalName = extractFileName(path);
fileName = Strings.isNotBlank(fileName) ? fileName : decorateFileName(extractFileName(path));
TransferListener listener = FileSupport.createTransferListener(fileType, originalName, fileName);
// do transfer
if (usePublicKey) {
FileSupport.transferBySCP(user, hostname, path, fileType, fileName, listener, promise);
} else {
FileSupport.transferBySCP(user, password, hostname, path, fileType, fileName, listener, promise);
}
}
@RouteMeta(path = "/file/transferByFileSystem", method = HttpMethod.POST)
void transferByFileSystem(Promise<TransferringFile> promise, @ParamKey("type") FileType fileType,
@ParamKey("path") String path, @ParamKey("move") boolean move) {
File src = new File(path);
ASSERT.isTrue(src.exists() && !src.isDirectory(), "Illegal path");
String originalName = extractFileName(path);
String fileName = decorateFileName(originalName);
promise.complete(new TransferringFile(fileName));
TransferListener listener = FileSupport.createTransferListener(fileType, originalName, fileName);
listener.setTotalSize(src.length());
listener.updateState(ProgressState.IN_PROGRESS);
if (move) {
WorkerGlobal.VERTX.fileSystem().moveBlocking(path, FileSupport.filePath(fileType, fileName));
} else {
WorkerGlobal.VERTX.fileSystem().copyBlocking(path, FileSupport.filePath(fileType, fileName));
}
listener.setTransferredSize(listener.getTotalSize());
listener.updateState(ProgressState.SUCCESS);
}
@RouteMeta(path = "/file/transferProgress")
void transferProgress(Promise<TransferProgress> promise, @ParamKey("type") FileType type,
@ParamKey("name") String name) {
TransferListener listener = FileSupport.getTransferListener(name);
if (listener != null) {
TransferProgress progress = new TransferProgress();
progress.setTotalSize(listener.getTotalSize());
progress.setTransferredSize(listener.getTransferredSize());
progress.setMessage(listener.getErrorMsg());
if (listener.getTotalSize() > 0) {
progress.setPercent((double) listener.getTransferredSize() / (double) listener.getTotalSize());
}
progress.setState(listener.getState());
if (progress.getState() == ProgressState.SUCCESS || progress.getState() == ProgressState.ERROR) {
FileSupport.removeTransferListener(name);
}
promise.complete(progress);
} else {
FileInfo info = FileSupport.infoOrNull(type, name);
if (info == null) {
TransferProgress progress = new TransferProgress();
progress.setState(ProgressState.ERROR);
promise.complete(progress);
return;
}
if (info.getTransferState() == FileTransferState.IN_PROGRESS
|| info.getTransferState() == FileTransferState.NOT_STARTED) {
LOGGER.warn("Illegal file {} state", name);
info.setTransferState(FileTransferState.ERROR);
FileSupport.save(info);
}
TransferProgress progress = new TransferProgress();
progress.setState(info.getTransferState().toProgressState());
if (progress.getState() == ProgressState.SUCCESS) {
progress.setPercent(1.0);
progress.setTotalSize(info.getSize());
progress.setTransferredSize(info.getSize());
}
promise.complete(progress);
}
}
@RouteMeta(path = "/file/sync", method = HttpMethod.POST)
void sync(Promise<Void> promise, @ParamKey("files") String files, @ParamKey("cleanStale") boolean cleanStale) {
promise.complete();
FileInfo[] fileInfos = GSON.fromJson(files, FileInfo[].class);
FileSupport.sync(fileInfos, cleanStale);
}
@RouteMeta(path = "/file/upload", method = HttpMethod.POST)
void upload(RoutingContext context, @ParamKey("type") FileType type,
@ParamKey(value = "fileName", mandatory = false) String fileName) {
FileUpload[] uploads = context.fileUploads().toArray(new FileUpload[0]);
try {
if (uploads.length > 0) {
// only process the first file
FileUpload file = uploads[0];
if (fileName == null || fileName.isBlank()) {
fileName = decorateFileName(file.fileName());
}
TransferListener listener = FileSupport.createTransferListener(type, file.fileName(), fileName);
listener.updateState(ProgressState.IN_PROGRESS);
try {
context.vertx().fileSystem()
.moveBlocking(file.uploadedFileName(), FileSupport.filePath(type, fileName));
FileSupport.updateTransferState(type, fileName, FileTransferState.SUCCESS);
} finally {
FileSupport.removeTransferListener(fileName);
}
}
HTTPRespGuarder.ok(context);
} finally {
// remove other files
for (int i = 1; i < uploads.length; i++) {
context.vertx().fileSystem().deleteBlocking(uploads[i].uploadedFileName());
}
}
}
@RouteMeta(path = "/file/download", contentType = {CONTENT_TYPE_FILE_FORM})
void download(RoutingContext context, @ParamKey("type") FileType fileType, @ParamKey("name") String name) {
File file = new File(FileSupport.filePath(fileType, name));
ASSERT.isTrue(file.exists(), "File doesn't exist!");
HttpServerResponse response = context.response();
response.putHeader(HEADER_CONTENT_DISPOSITION, "attachment;filename=" + file.getName());
response.sendFile(file.getAbsolutePath(), event -> {
if (!response.ended()) {
response.end();
}
});
}
@RouteMeta(path = "/file/getOrGenInfo", method = HttpMethod.POST)
void getOrGenInfo(Promise<FileInfo> promise, @ParamKey("fileType") FileType fileType,
@ParamKey("filename") String name) {
promise.complete(FileSupport.getOrGenInfo(fileType, name));
}
@RouteMeta(path = "/file/batchDelete", method = HttpMethod.POST)
void batchDelete(Promise<Void> promise, @ParamKey("files") String files) {
promise.complete();
FileInfo[] fileInfos = GSON.fromJson(files, FileInfo[].class);
FileSupport.delete(fileInfos);
}
}
| 3,220 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/HttpMethod.java | /********************************************************************************
* Copyright (c) 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
public enum HttpMethod {
GET(io.vertx.core.http.HttpMethod.GET),
POST(io.vertx.core.http.HttpMethod.POST);
private final io.vertx.core.http.HttpMethod method;
HttpMethod(io.vertx.core.http.HttpMethod method) {
this.method = method;
}
public io.vertx.core.http.HttpMethod toVertx() {
return method;
}
}
| 3,221 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/ParamMap.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.PARAMETER;
@Retention(RetentionPolicy.RUNTIME)
@Target(PARAMETER)
public @interface ParamMap {
String[] keys();
boolean[] mandatory();
}
| 3,222 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/BaseRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
public abstract class BaseRoute {
}
| 3,223 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/RouteMeta.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @interface RouteMeta {
HttpMethod method() default HttpMethod.GET;
String path();
String[] contentType() default {"application/json; charset=utf-8"};
}
| 3,224 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/threaddump/ThreadDumpBaseRoute.java | /********************************************************************************
* Copyright (c) 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.threaddump;
import org.eclipse.jifa.worker.route.BaseRoute;
import org.eclipse.jifa.worker.route.MappingPrefix;
import java.util.ArrayList;
import java.util.List;
@MappingPrefix("/thread-dump/:file")
public class ThreadDumpBaseRoute extends BaseRoute {
private static List<Class<? extends ThreadDumpBaseRoute>> ROUTES = new ArrayList<>();
static {
ROUTES.add(ThreadDumpRoute.class);
}
public static List<Class<? extends ThreadDumpBaseRoute>> routes() {
return ROUTES;
}
}
| 3,225 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/threaddump/ThreadDumpRoute.java | /********************************************************************************
* Copyright (c) 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.threaddump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.tda.ThreadDumpAnalyzer;
import org.eclipse.jifa.tda.enums.MonitorState;
import org.eclipse.jifa.tda.enums.ThreadType;
import org.eclipse.jifa.tda.vo.Content;
import org.eclipse.jifa.tda.vo.Overview;
import org.eclipse.jifa.tda.vo.VFrame;
import org.eclipse.jifa.tda.vo.VMonitor;
import org.eclipse.jifa.tda.vo.VThread;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import org.eclipse.jifa.worker.support.Analyzer;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class ThreadDumpRoute extends ThreadDumpBaseRoute {
@RouteMeta(path = "/overview")
public void overview(Promise<Overview> promise, @ParamKey("file") String file) {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.overview());
}
@RouteMeta(path = "/callSiteTree")
public void callSiteTree(Promise<PageView<VFrame>> promise,
@ParamKey("file") String file,
@ParamKey("parentId") int parentId,
PagingRequest paging) {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.callSiteTree(parentId, paging));
}
@RouteMeta(path = "/threads")
public void threads(Promise<PageView<VThread>> promise,
@ParamKey("file") String file,
@ParamKey(value = "name", mandatory = false) String name,
@ParamKey(value = "type", mandatory = false) ThreadType type,
PagingRequest paging) {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.threads(name, type, paging));
}
@RouteMeta(path = "/threadsOfGroup")
public void threadsOfGroup(Promise<PageView<VThread>> promise,
@ParamKey("file") String file,
@ParamKey(value = "groupName") String groupName,
PagingRequest paging) {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.threadsOfGroup(groupName, paging));
}
@RouteMeta(path = "/rawContentOfThread")
public void rawContentOfThread(Promise<List<String>> promise,
@ParamKey("file") String file,
@ParamKey("id") int id) throws IOException {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.rawContentOfThread(id));
}
@RouteMeta(path = "/content")
public void content(Promise<Content> promise,
@ParamKey("file") String file,
@ParamKey("lineNo") int lineNo,
@ParamKey("lineLimit") int lineLimit) throws IOException {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.content(lineNo, lineLimit));
}
@RouteMeta(path = "/monitors")
public void monitors(Promise<PageView<VMonitor>> promise, @ParamKey("file") String file, PagingRequest paging) {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.monitors(paging));
}
@RouteMeta(path = "/threadCountsByMonitor")
public void threadCountsByMonitor(Promise<Map<MonitorState, Integer>> promise, @ParamKey("file") String file,
@ParamKey("id") int id) {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.threadCountsByMonitor(id));
}
@RouteMeta(path = "/threadsByMonitor")
public void threadsByMonitor(Promise<PageView<VThread>> promise, @ParamKey("file") String file,
@ParamKey("id") int id, @ParamKey("state") MonitorState state,
PagingRequest paging) {
ThreadDumpAnalyzer analyzer = Analyzer.threadDumpAnalyzerOf(file);
promise.complete(analyzer.threadsByMonitor(id, state, paging));
}
}
| 3,226 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/HeapBaseRoute.java | /********************************************************************************
* Copyright (c) 2020, 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import org.eclipse.jifa.hda.api.HeapDumpAnalyzer;
import org.eclipse.jifa.common.listener.ProgressListener;
import org.eclipse.jifa.worker.route.BaseRoute;
import org.eclipse.jifa.worker.route.MappingPrefix;
import org.eclipse.jifa.worker.support.Analyzer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@MappingPrefix("/heap-dump/:file")
public class HeapBaseRoute extends BaseRoute {
private static final List<Class<? extends HeapBaseRoute>> ROUTES = new ArrayList<>();
static {
ROUTES.add(OverviewRoute.class);
ROUTES.add(ObjectRoute.class);
ROUTES.add(InspectorRoute.class);
ROUTES.add(DominatorTreeRoute.class);
ROUTES.add(HistogramRoute.class);
ROUTES.add(UnreachableObjectsRoute.class);
ROUTES.add(ClassLoaderRoute.class);
ROUTES.add(DuplicatedClassesRoute.class);
ROUTES.add(SystemPropertyRoute.class);
ROUTES.add(ThreadRoute.class);
ROUTES.add(ObjectListRoute.class);
ROUTES.add(ClassReferenceRoute.class);
ROUTES.add(OQLRoute.class);
ROUTES.add(CalciteSQLRoute.class);
ROUTES.add(DirectByteBufferRoute.class);
ROUTES.add(GCRootRoute.class);
ROUTES.add(PathToGCRootsRoute.class);
ROUTES.add(CompareRoute.class);
ROUTES.add(LeakRoute.class);
ROUTES.add(MergePathToGCRootsRoute.class);
ROUTES.add(StringsRoute.class);
}
public static List<Class<? extends HeapBaseRoute>> routes() {
return ROUTES;
}
public static HeapDumpAnalyzer analyzerOf(String dump) {
return Analyzer.getOrBuildHeapDumpAnalyzer(dump, Collections.emptyMap(), ProgressListener.NoOpProgressListener);
}
}
| 3,227 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/PathToGCRootsRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import static org.eclipse.jifa.common.util.Assertion.ASSERT;
import static org.eclipse.jifa.hda.api.Model.GCRootPath;
class PathToGCRootsRoute extends HeapBaseRoute {
@RouteMeta(path = "/pathToGCRoots")
void path(Promise<GCRootPath.Item> promise, @ParamKey("file") String file, @ParamKey("origin") int origin,
@ParamKey("skip") int skip, @ParamKey("count") int count) {
ASSERT.isTrue(origin >= 0).isTrue(skip >= 0).isTrue(count > 0);
promise.complete(analyzerOf(file).getPathToGCRoots(origin, skip, count));
}
}
| 3,228 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/CompareRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.enums.FileTransferState;
import org.eclipse.jifa.common.enums.FileType;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.util.PageViewBuilder;
import org.eclipse.jifa.common.vo.FileInfo;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import org.eclipse.jifa.worker.support.FileSupport;
import java.io.File;
import java.util.stream.Collectors;
import static org.eclipse.jifa.common.enums.FileType.HEAP_DUMP;
import static org.eclipse.jifa.hda.api.Model.Comparison;
class CompareRoute extends HeapBaseRoute {
@RouteMeta(path = "/compare/files")
void files(Promise<PageView<FileInfo>> promise, @ParamKey("file") String source,
@ParamKey(value = "expected", mandatory = false) String expected, PagingRequest pagingRequest) {
promise.complete(PageViewBuilder.build(FileSupport.info(FileType.HEAP_DUMP).stream().filter(
fileInfo -> !fileInfo.getName().equals(source) && fileInfo.getTransferState() == FileTransferState.SUCCESS)
.sorted((i1, i2) -> Long.compare(i2.getCreationTime(),
i1.getCreationTime()))
.collect(Collectors.toList()), pagingRequest));
}
@RouteMeta(path = "/compare/summary")
void summary(Promise<Comparison.Summary> promise, @ParamKey("file") String target,
@ParamKey("baseline") String baseline) {
promise.complete(HeapBaseRoute.analyzerOf(target)
.getSummaryOfComparison(new File(FileSupport.filePath(HEAP_DUMP, baseline)).toPath()));
}
@RouteMeta(path = "/compare/records")
void record(Promise<PageView<Comparison.Item>> promise, @ParamKey("file") String target,
@ParamKey("baseline") String baseline, PagingRequest pagingRequest) {
promise.complete(
analyzerOf(target).getItemsOfComparison(new File(FileSupport.filePath(HEAP_DUMP, baseline)).toPath(),
pagingRequest.getPage(), pagingRequest.getPageSize()));
}
}
| 3,229 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/InspectorRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.MappingPrefix;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
@MappingPrefix("/inspector")
class InspectorRoute extends HeapBaseRoute {
@RouteMeta(path = "/addressToId")
void addressToId(Promise<Integer> promise, @ParamKey("file") String file, @ParamKey("objectAddress") long address) {
promise.complete(analyzerOf(file).mapAddressToId(address));
}
@RouteMeta(path = "/value")
void value(Promise<String> promise, @ParamKey("file") String file, @ParamKey("objectId") int objectId) {
promise.complete(analyzerOf(file).getObjectValue(objectId));
}
@RouteMeta(path = "/objectView")
void objectView(Promise<Model.InspectorView> promise, @ParamKey("file") String file,
@ParamKey("objectId") int objectId) {
promise.complete(analyzerOf(file).getInspectorView(objectId));
}
@RouteMeta(path = "/fields")
void fields(Promise<PageView<Model.FieldView>> promise, @ParamKey("file") String file,
@ParamKey("objectId") int objectId, PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getFields(objectId,
pagingRequest.getPage(), pagingRequest.getPageSize()));
}
@RouteMeta(path = "/staticFields")
void staticFields(Promise<PageView<Model.FieldView>> promise, @ParamKey("file") String file,
@ParamKey("objectId") int objectId, PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getStaticFields(objectId, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,230 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/StringsRoute.java | /********************************************************************************
* Copyright (c) 2022, Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
class StringsRoute extends HeapBaseRoute {
@RouteMeta(path = "/findStrings")
void strings(Promise<PageView<Model.TheString.Item>> promise, @ParamKey("file") String file,
@ParamKey("pattern") String pattern, PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getStrings(pattern, pagingRequest.getPage(), pagingRequest.getPageSize()));
}
}
| 3,231 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/CalciteSQLRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
class CalciteSQLRoute extends HeapBaseRoute {
@RouteMeta(path = "/sql")
void calciteSql(Promise<Model.CalciteSQLResult> promise, @ParamKey("file") String file,
@ParamKey("sql") String oql, @ParamKey(value = "sortBy", mandatory = false) String sortBy,
@ParamKey(value = "ascendingOrder", mandatory = false) boolean ascendingOrder,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getCalciteSQLResult(oql, sortBy, ascendingOrder,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,232 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/ClassReferenceRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
class ClassReferenceRoute extends HeapBaseRoute {
@RouteMeta(path = "/classReference/inbounds/class")
void inboundsClassInfo(Promise<Model.ClassReferrer.Item> promise, @ParamKey("file") String file,
@ParamKey("objectId") int objectId) {
promise.complete(analyzerOf(file).getInboundClassOfClassReference(objectId));
}
@RouteMeta(path = "/classReference/outbounds/class")
void outboundsClassInfo(Promise<Model.ClassReferrer.Item> promise, @ParamKey("file") String file,
@ParamKey("objectId") int objectId) {
promise.complete(analyzerOf(file).getOutboundClassOfClassReference(objectId));
}
@RouteMeta(path = "/classReference/inbounds/children")
void inboundsChildren(Promise<PageView<Model.ClassReferrer.Item>> promise, @ParamKey("file") String file,
PagingRequest pagingRequest,
@ParamKey("objectIds") int[] objectIds) {
promise.complete(analyzerOf(file).getInboundsOfClassReference(objectIds,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/classReference/outbounds/children")
void outboundsChildren(Promise<PageView<Model.ClassReferrer.Item>> promise, @ParamKey("file") String file,
PagingRequest pagingRequest,
@ParamKey("objectIds") int[] objectIds) {
promise.complete(analyzerOf(file).getOutboundsOfClassReference(objectIds,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,233 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/DirectByteBufferRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import static org.eclipse.jifa.hda.api.Model.DirectByteBuffer;
class DirectByteBufferRoute extends HeapBaseRoute {
@RouteMeta(path = "/directByteBuffer/summary")
void summary(Promise<DirectByteBuffer.Summary> promise, @ParamKey("file") String file) {
promise.complete(analyzerOf(file).getSummaryOfDirectByteBuffers());
}
@RouteMeta(path = "/directByteBuffer/records")
void record(Promise<PageView<DirectByteBuffer.Item>> promise, @ParamKey("file") String file,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getDirectByteBuffers(pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,234 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/MergePathToGCRootsRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import com.google.gson.Gson;
import io.vertx.core.Promise;
import io.vertx.ext.web.RoutingContext;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.worker.route.HttpMethod;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import static org.eclipse.jifa.hda.api.Model.GCRootPath;
class MergePathToGCRootsRoute extends HeapBaseRoute {
@RouteMeta(path = "/mergePathToGCRoots/roots/byClassId")
void rootsByClassId(Promise<PageView<GCRootPath.MergePathToGCRootsTreeNode>> promise,
@ParamKey("file") String file,
@ParamKey("classId") int classId,
@ParamKey("grouping") GCRootPath.Grouping grouping,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file)
.getRootsOfMergePathToGCRootsByClassId(classId,
grouping, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/mergePathToGCRoots/children/byClassId")
void childrenByClassId(Promise<PageView<GCRootPath.MergePathToGCRootsTreeNode>> promise,
@ParamKey("file") String file,
@ParamKey("grouping") GCRootPath.Grouping grouping,
PagingRequest pagingRequest,
@ParamKey("classId") int classId,
@ParamKey("objectIdPathInGCPathTree") int[] objectIdPathInGCPathTree) {
promise.complete(analyzerOf(file)
.getChildrenOfMergePathToGCRootsByClassId(classId,
objectIdPathInGCPathTree,
grouping, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/mergePathToGCRoots/roots/byObjectIds", method = HttpMethod.POST)
void rootsByObjectId(Promise<PageView<GCRootPath.MergePathToGCRootsTreeNode>> promise,
@ParamKey("file") String file,
@ParamKey("grouping") GCRootPath.Grouping grouping,
RoutingContext context,
PagingRequest pagingRequest) {
int[] objectIds = new Gson().fromJson(context.getBodyAsString(), int[].class);
promise.complete(analyzerOf(file)
.getRootsOfMergePathToGCRootsByObjectIds(objectIds,
grouping, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/mergePathToGCRoots/children/byObjectIds", method = HttpMethod.POST)
void childrenByObjectId(Promise<PageView<GCRootPath.MergePathToGCRootsTreeNode>> promise,
@ParamKey("file") String file,
@ParamKey("grouping") GCRootPath.Grouping grouping,
PagingRequest pagingRequest,
RoutingContext context,
@ParamKey("objectIdPathInGCPathTree") int[] objectIdPathInGCPathTree) {
int[] objectIds = new Gson().fromJson(context.getBodyAsString(), int[].class);
promise.complete(analyzerOf(file)
.getChildrenOfMergePathToGCRootsByObjectIds(objectIds,
objectIdPathInGCPathTree,
grouping, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,235 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/SystemPropertyRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import java.util.Map;
class SystemPropertyRoute extends HeapBaseRoute {
@RouteMeta(path = "/systemProperties")
void systemProperty(Promise<Map<String, String>> promise, @ParamKey("file") String file) {
promise.complete(analyzerOf(file).getSystemProperties());
}
}
| 3,236 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/ObjectListRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import static org.eclipse.jifa.common.util.Assertion.ASSERT;
class ObjectListRoute extends HeapBaseRoute {
@RouteMeta(path = "/outbounds")
void outbounds(Promise<PageView<Model.JavaObject>> promise, @ParamKey("file") String file,
PagingRequest pagingRequest,
@ParamKey("objectId") int objectId) {
ASSERT.isTrue(objectId >= 0, "Object id must be greater than or equal to 0");
promise.complete(analyzerOf(file).getOutboundOfObject(objectId,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/inbounds")
void inbounds(Promise<PageView<Model.JavaObject>> promise, @ParamKey("file") String file,
PagingRequest pagingRequest,
@ParamKey("objectId") int objectId) {
ASSERT.isTrue(objectId >= 0, "Object id must be greater than or equal to 0");
promise.complete(analyzerOf(file).getInboundOfObject(objectId,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,237 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/OQLRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
class OQLRoute extends HeapBaseRoute {
@RouteMeta(path = "/oql")
void oql(Promise<Model.OQLResult> promise, @ParamKey("file") String file,
@ParamKey("oql") String oql, @ParamKey(value = "sortBy", mandatory = false) String sortBy,
@ParamKey(value = "ascendingOrder", mandatory = false) boolean ascendingOrder,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getOQLResult(oql, sortBy, ascendingOrder,
pagingRequest.getPage(), pagingRequest.getPageSize()));
}
}
| 3,238 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/OverviewRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import java.util.List;
class OverviewRoute extends HeapBaseRoute {
@RouteMeta(path = "/details")
void details(Promise<Model.Overview.Details> promise, @ParamKey("file") String file) {
promise.complete(analyzerOf(file).getDetails());
}
@RouteMeta(path = "/biggestObjects")
void biggestObjects(Promise<List<Model.Overview.BigObject>> promise, @ParamKey("file") String file) {
promise.complete(analyzerOf(file).getBigObjects());
}
}
| 3,239 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/DuplicatedClassesRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.common.vo.support.SearchType;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import static org.eclipse.jifa.hda.api.Model.DuplicatedClass;
class DuplicatedClassesRoute extends HeapBaseRoute {
@RouteMeta(path = "/duplicatedClasses/classes")
void classRecords(Promise<PageView<DuplicatedClass.ClassItem>> promise, @ParamKey("file") String file,
@ParamKey(value = "searchText", mandatory = false) String searchText,
@ParamKey(value = "searchType", mandatory = false) SearchType searchType,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getDuplicatedClasses(searchText, searchType, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/duplicatedClasses/classLoaders")
void classLoaderRecords(Promise<PageView<DuplicatedClass.ClassLoaderItem>> promise, @ParamKey("file") String file,
@ParamKey("index") int index,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getClassloadersOfDuplicatedClass(index, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,240 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/ClassLoaderRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
class ClassLoaderRoute extends HeapBaseRoute {
@RouteMeta(path = "/classLoaderExplorer/summary")
void summary(Promise<Model.ClassLoader.Summary> promise, @ParamKey("file") String file) {
promise.complete(analyzerOf(file).getSummaryOfClassLoaders());
}
@RouteMeta(path = "/classLoaderExplorer/classLoader")
void classLoaders(Promise<PageView<Model.ClassLoader.Item>> promise, @ParamKey("file") String file,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getClassLoaders(pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/classLoaderExplorer/children")
void children(Promise<PageView<Model.ClassLoader.Item>> promise, @ParamKey("file") String file,
@ParamKey("classLoaderId") int classLoaderId, PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getChildrenOfClassLoader(classLoaderId,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,241 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/UnreachableObjectsRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
class UnreachableObjectsRoute extends HeapBaseRoute {
@RouteMeta(path = "/unreachableObjects/summary")
void summary(Promise<Model.UnreachableObject.Summary> promise, @ParamKey("file") String file) {
promise.complete(analyzerOf(file).getSummaryOfUnreachableObjects());
}
@RouteMeta(path = "/unreachableObjects/records")
void records(Promise<PageView<Model.UnreachableObject.Item>> promise, @ParamKey("file") String file,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getUnreachableObjects(pagingRequest.getPage(), pagingRequest.getPageSize()));
}
}
| 3,242 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/GCRootRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import java.util.List;
import static org.eclipse.jifa.hda.api.Model.GCRoot;
class GCRootRoute extends HeapBaseRoute {
@RouteMeta(path = "/GCRoots")
void roots(Promise<List<GCRoot.Item>> promise, @ParamKey("file") String file) {
promise.complete(analyzerOf(file).getGCRoots());
}
@RouteMeta(path = "/GCRoots/classes")
void classes(Promise<PageView<GCRoot.Item>> promise, @ParamKey("file") String file,
@ParamKey("rootTypeIndex") int rootTypeIndex, PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getClassesOfGCRoot(rootTypeIndex, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/GCRoots/class/objects")
void objects(Promise<PageView<Model.JavaObject>> promise, @ParamKey("file") String file,
@ParamKey("rootTypeIndex") int rootTypeIndex, @ParamKey("classIndex") int classIndex,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getObjectsOfGCRoot(rootTypeIndex, classIndex,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,243 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/HistogramRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.common.vo.support.SearchType;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
class HistogramRoute extends HeapBaseRoute {
@RouteMeta(path = "/histogram")
void histogram(Promise<PageView<Model.Histogram.Item>> promise, @ParamKey("file") String file,
@ParamKey("groupingBy") Model.Histogram.Grouping groupingBy,
@ParamKey(value = "ids", mandatory = false) int[] ids,
@ParamKey(value = "sortBy", mandatory = false) String sortBy,
@ParamKey(value = "ascendingOrder", mandatory = false) boolean ascendingOrder,
@ParamKey(value = "searchText", mandatory = false) String searchText,
@ParamKey(value = "searchType", mandatory = false) SearchType searchType,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getHistogram(groupingBy,
ids, sortBy, ascendingOrder, searchText,
searchType, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/histogram/children")
void children(Promise<PageView<Model.Histogram.Item>> promise, @ParamKey("file") String file,
@ParamKey("groupingBy") Model.Histogram.Grouping groupingBy,
@ParamKey(value = "ids", mandatory = false) int[] ids,
@ParamKey(value = "sortBy", mandatory = false) String sortBy,
@ParamKey(value = "ascendingOrder", mandatory = false) boolean ascendingOrder,
@ParamKey("parentObjectId") int parentObjectId,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getChildrenOfHistogram(groupingBy, ids,
sortBy, ascendingOrder, parentObjectId,
pagingRequest.getPage(), pagingRequest.getPageSize()));
}
@RouteMeta(path = "/histogram/objects")
void objects(Promise<PageView<Model.JavaObject>> promise, @ParamKey("file") String file,
@ParamKey("classId") int classId, PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getHistogramObjects(classId,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,244 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/ThreadRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.common.vo.support.SearchType;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import java.util.List;
class ThreadRoute extends HeapBaseRoute {
@RouteMeta(path = "/threadsSummary")
void threadsSummary(Promise<Model.Thread.Summary> promise, @ParamKey("file") String file,
@ParamKey(value = "searchText", mandatory = false) String searchText,
@ParamKey(value = "searchType", mandatory = false) SearchType searchType) {
promise.complete(analyzerOf(file).getSummaryOfThreads(searchText, searchType));
}
@RouteMeta(path = "/threads")
void threads(Promise<PageView<Model.Thread.Item>> promise, @ParamKey("file") String file,
@ParamKey(value = "sortBy", mandatory = false) String sortBy,
@ParamKey(value = "ascendingOrder", mandatory = false) boolean ascendingOrder,
@ParamKey(value = "searchText", mandatory = false) String searchText,
@ParamKey(value = "searchType", mandatory = false) SearchType searchType,
PagingRequest paging) {
promise.complete(analyzerOf(file).getThreads(sortBy, ascendingOrder,
searchText, searchType, paging.getPage(), paging.getPageSize()));
}
@RouteMeta(path = "/stackTrace")
void stackTrace(Promise<List<Model.Thread.StackFrame>> promise, @ParamKey("file") String file,
@ParamKey("objectId") int objectId) {
promise.complete(analyzerOf(file).getStackTrace(objectId));
}
@RouteMeta(path = "/locals")
void locals(Promise<List<Model.Thread.LocalVariable>> promise, @ParamKey("file") String file,
@ParamKey("objectId") int objectId, @ParamKey("depth") int depth,
@ParamKey("firstNonNativeFrame") boolean firstNonNativeFrame) {
promise.complete(analyzerOf(file).getLocalVariables(objectId, depth, firstNonNativeFrame));
}
}
| 3,245 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/ObjectRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import static org.eclipse.jifa.common.util.Assertion.ASSERT;
class ObjectRoute extends HeapBaseRoute {
@RouteMeta(path = "/object")
void info(Promise<Model.JavaObject> promise, @ParamKey("file") String file, @ParamKey("objectId") int objectId) {
ASSERT.isTrue(objectId >= 0, "Object id must be greater than or equal to 0");
promise.complete(analyzerOf(file).getObjectInfo(objectId));
}
}
| 3,246 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/DominatorTreeRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.common.vo.support.SearchType;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import static org.eclipse.jifa.hda.api.Model.DominatorTree;
class DominatorTreeRoute extends HeapBaseRoute {
@RouteMeta(path = "/dominatorTree/roots")
void roots(Promise<PageView<? extends DominatorTree.Item>> promise, @ParamKey("file") String file,
@ParamKey("grouping") DominatorTree.Grouping grouping,
@ParamKey(value = "sortBy", mandatory = false) String sortBy,
@ParamKey(value = "ascendingOrder", mandatory = false) boolean ascendingOrder,
@ParamKey(value = "searchText", mandatory = false) String searchText,
@ParamKey(value = "searchType", mandatory = false) SearchType searchType,
PagingRequest pagingRequest) {
promise.complete(analyzerOf(file).getRootsOfDominatorTree(grouping, sortBy,
ascendingOrder, searchText, searchType,
pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
@RouteMeta(path = "/dominatorTree/children")
void children(Promise<PageView<? extends DominatorTree.Item>> promise, @ParamKey("file") String file,
@ParamKey("grouping") DominatorTree.Grouping grouping,
@ParamKey(value = "sortBy", mandatory = false) String sortBy,
@ParamKey(value = "ascendingOrder", mandatory = false) boolean ascendingOrder,
PagingRequest pagingRequest,
@ParamKey("parentObjectId") int parentObjectId, @ParamKey(value = "idPathInResultTree",
mandatory = false) int[] idPathInResultTree) {
promise.complete(analyzerOf(file).getChildrenOfDominatorTree(grouping, sortBy,
ascendingOrder,
parentObjectId,
idPathInResultTree, pagingRequest.getPage(),
pagingRequest.getPageSize()));
}
}
| 3,247 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/heapdump/LeakRoute.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.heapdump;
import io.vertx.core.Promise;
import org.eclipse.jifa.hda.api.Model;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
class LeakRoute extends HeapBaseRoute {
@RouteMeta(path = "/leak/report")
void report(Promise<Model.LeakReport> promise, @ParamKey("file") String file) {
promise.complete(analyzerOf(file).getLeakReport());
}
}
| 3,248 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/gclog/GCLogBaseRoute.java | /********************************************************************************
* Copyright (c) 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.gclog;
import org.eclipse.jifa.worker.route.BaseRoute;
import org.eclipse.jifa.worker.route.MappingPrefix;
import java.util.ArrayList;
import java.util.List;
@MappingPrefix("/gc-log/:file")
public class GCLogBaseRoute extends BaseRoute {
private static List<Class<? extends GCLogBaseRoute>> ROUTES = new ArrayList<>();
static {
ROUTES.add(GCLogRoute.class);
}
public static List<Class<? extends GCLogBaseRoute>> routes() {
return ROUTES;
}
}
| 3,249 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/route/gclog/GCLogRoute.java | /********************************************************************************
* Copyright (c) 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.route.gclog;
import org.eclipse.jifa.gclog.diagnoser.AnalysisConfig;
import org.eclipse.jifa.gclog.diagnoser.GlobalDiagnoser;
import org.eclipse.jifa.gclog.vo.GCEventVO;
import org.eclipse.jifa.gclog.model.modeInfo.GCLogMetadata;
import org.eclipse.jifa.gclog.model.GCModel;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.request.PagingRequest;
import org.eclipse.jifa.common.vo.PageView;
import org.eclipse.jifa.gclog.model.modeInfo.VmOptions;
import org.eclipse.jifa.gclog.vo.*;
import org.eclipse.jifa.worker.route.HttpMethod;
import org.eclipse.jifa.worker.route.ParamKey;
import org.eclipse.jifa.worker.route.RouteMeta;
import org.eclipse.jifa.worker.support.Analyzer;
import java.util.List;
import java.util.Map;
public class GCLogRoute extends org.eclipse.jifa.worker.route.gclog.GCLogBaseRoute {
@RouteMeta(path = "/metadata")
void metadata(Promise<GCLogMetadata> promise, @ParamKey("file") String file) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
promise.complete(model.getGcModelMetadata());
}
@RouteMeta(path = "/objectStatistics")
void objectStats(Promise<ObjectStatistics> promise, @ParamKey("file") String file,
@ParamKey("start") double start,
@ParamKey("end") double end) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
promise.complete(model.getObjectStatistics(new TimeRange(start, end)));
}
@RouteMeta(path = "/memoryStatistics")
void memoryStats(Promise<MemoryStatistics> promise, @ParamKey("file") String file,
@ParamKey("start") double start,
@ParamKey("end") double end) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
promise.complete(model.getMemoryStatistics(new TimeRange(start, end)));
}
@RouteMeta(path = "/pauseDistribution")
void pauseStats(Promise<Map<String, int[]>> promise, @ParamKey("file") String file,
@ParamKey("start") double start,
@ParamKey("end") double end,
@ParamKey("partitions") int[] partitions) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
promise.complete(model.getPauseDistribution(new TimeRange(start, end), partitions));
}
@RouteMeta(path = "/pauseStatistics")
void pauseStats(Promise<PauseStatistics> promise, @ParamKey("file") String file,
@ParamKey("start") double start,
@ParamKey("end") double end) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
promise.complete(model.getPauseStatistics(new TimeRange(start, end)));
}
@RouteMeta(path = "/phaseStatistics")
void phaseStats(Promise<PhaseStatistics> promise, @ParamKey("file") String file,
@ParamKey("start") double start,
@ParamKey("end") double end) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
promise.complete(model.getPhaseStatistics(new TimeRange(start, end)));
}
@RouteMeta(path = "/gcDetails")
void detail(Promise<PageView<GCEventVO>> promise, @ParamKey("file") String file,
@ParamKey(value = "eventType", mandatory = false) String eventType,
@ParamKey(value = "gcCause", mandatory = false) String gcCause,
@ParamKey(value = "logTimeLow", mandatory = false) Double logTimeLow,
@ParamKey(value = "logTimeHigh", mandatory = false) Double logTimeHigh,
@ParamKey(value = "pauseTimeLow", mandatory = false) Double pauseTimeLow,
// time range of config is ignored for the time being
@ParamKey("config") AnalysisConfig config,
PagingRequest pagingRequest) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
GCModel.GCDetailFilter filter = new GCModel.GCDetailFilter(eventType, gcCause, logTimeLow, logTimeHigh, pauseTimeLow);
promise.complete(model.getGCDetails(pagingRequest, filter, config));
}
@RouteMeta(path = "/vmOptions", method = HttpMethod.GET)
void getVMOptions(Promise<VmOptions.VmOptionResult> promise,
@ParamKey("file") String file) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
VmOptions options = model.getVmOptions();
promise.complete(options == null ? null : options.getVmOptionResult());
}
@RouteMeta(path = "/timeGraphData", method = HttpMethod.GET)
void getTimeGraphData(Promise<Map<String, List<Object[]>>> promise,
@ParamKey("file") String file,
@ParamKey("dataTypes") String[] dateTypes) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
promise.complete(model.getTimeGraphData(dateTypes));
}
@RouteMeta(path = "/vmOptions", method = HttpMethod.POST)
void setVMOptions(Promise<Void> promise,
@ParamKey("file") String file, @ParamKey("options") String options) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
model.setVmOptions(new VmOptions(options));
promise.complete();
}
@RouteMeta(path = "/diagnoseInfo", method = HttpMethod.GET)
void getDiagnoseInfo(Promise<GlobalDiagnoser.GlobalAbnormalInfo> promise,
@ParamKey("file") String file,
@ParamKey("config") AnalysisConfig config) {
final GCModel model = Analyzer.getOrOpenGCLogModel(file);
promise.complete(model.getGlobalAbnormalInfo(config));
}
}
| 3,250 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/support/Analyzer.java | /********************************************************************************
* Copyright (c) 2020, 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.support;
import org.eclipse.jifa.gclog.model.GCModel;
import org.eclipse.jifa.gclog.parser.GCLogAnalyzer;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import io.vertx.core.Promise;
import org.eclipse.jifa.common.JifaException;
import org.eclipse.jifa.common.enums.FileType;
import org.eclipse.jifa.common.enums.ProgressState;
import org.eclipse.jifa.common.util.ErrorUtil;
import org.eclipse.jifa.common.util.FileUtil;
import org.eclipse.jifa.common.listener.DefaultProgressListener;
import org.eclipse.jifa.hda.api.HeapDumpAnalyzer;
import org.eclipse.jifa.common.listener.ProgressListener;
import org.eclipse.jifa.tda.ThreadDumpAnalyzer;
import org.eclipse.jifa.worker.Worker;
import org.eclipse.jifa.worker.WorkerGlobal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.ServiceLoader;
import java.util.concurrent.TimeUnit;
import static org.eclipse.jifa.common.enums.FileType.*;
import static org.eclipse.jifa.common.util.Assertion.ASSERT;
import static org.eclipse.jifa.worker.Constant.CacheConfig.*;
public class Analyzer {
public static final HeapDumpAnalyzer.Provider HEAP_DUMP_ANALYZER_PROVIDER;
private static final Logger LOGGER = LoggerFactory.getLogger(Analyzer.class);
static {
try {
Iterator<HeapDumpAnalyzer.Provider> iterator =
ServiceLoader.load(HeapDumpAnalyzer.Provider.class, Worker.class.getClassLoader()).iterator();
ASSERT.isTrue(iterator.hasNext());
HEAP_DUMP_ANALYZER_PROVIDER = iterator.next();
} catch (Throwable t) {
LOGGER.error("Init analyzer failed", t);
throw new Error(t);
}
}
private final Map<String, ProgressListener> listeners;
private final Cache<String, Object> cache;
private Analyzer() {
listeners = new HashMap<>();
cache = CacheBuilder
.newBuilder()
.softValues()
.recordStats()
.expireAfterWrite(
WorkerGlobal.intConfig(CACHE_CONFIG, EXPIRE_AFTER_ACCESS),
TimeUnit.valueOf(WorkerGlobal.stringConfig(CACHE_CONFIG, EXPIRE_AFTER_ACCESS_TIME_UNIT))
)
.build();
}
private static <T> T getOrBuild(String key, Builder<T> builder) {
T result = getInstance().getCacheValueIfPresent(key);
if (result != null) {
return result;
}
synchronized (key.intern()) {
result = getInstance().getCacheValueIfPresent(key);
if (result != null) {
return result;
}
try {
result = builder.build(key);
} catch (Throwable t) {
throw new JifaException(t);
}
getInstance().putCacheValue(key, result);
return result;
}
}
public static HeapDumpAnalyzer getOrBuildHeapDumpAnalyzer(String dump, Map<String, String> options,
ProgressListener listener) {
return getOrBuild(dump, key -> HEAP_DUMP_ANALYZER_PROVIDER
.provide(new File(FileSupport.filePath(HEAP_DUMP, dump)).toPath(), options, listener));
}
public static Analyzer getInstance() {
return Singleton.INSTANCE;
}
public boolean isFirstAnalysis(FileType fileType, String file) {
switch (fileType) {
case HEAP_DUMP:
return !new File(FileSupport.indexPath(fileType, file)).exists() &&
!new File(FileSupport.errorLogPath(fileType, file)).exists() &&
getFileListener(file) == null;
default:
throw new IllegalArgumentException(fileType.name());
}
}
public void analyze(Promise<Void> promise, FileType fileType, String fileName, Map<String, String> options) {
ProgressListener progressListener;
if (getCacheValueIfPresent(fileName) != null ||
new File(FileSupport.errorLogPath(fileType, fileName)).exists()) {
promise.complete();
return;
}
progressListener = new DefaultProgressListener();
boolean success = putFileListener(fileName, progressListener);
promise.complete();
if (success) {
try {
switch (fileType) {
case HEAP_DUMP:
getOrBuildHeapDumpAnalyzer(fileName, options, progressListener);
break;
case GC_LOG:
getOrOpenGCLogModel(fileName,progressListener);
break;
case THREAD_DUMP:
threadDumpAnalyzerOf(fileName, progressListener);
break;
default:
break;
}
} catch (Exception e) {
LOGGER.error("task failed due to {}", ErrorUtil.toString(e));
LOGGER.error(progressListener.log());
File log = new File(FileSupport.errorLogPath(fileType, fileName));
FileUtil.write(log, progressListener.log(), false);
FileUtil.write(log, ErrorUtil.toString(e), true);
} finally {
removeFileListener(fileName);
}
}
}
public void clean(FileType fileType, String fileName) {
clearCacheValue(fileName);
File errorLog = new File(FileSupport.errorLogPath(fileType, fileName));
if (errorLog.exists()) {
ASSERT.isTrue(errorLog.delete(), "Delete error log failed");
}
if (getFileListener(fileName) != null) {
return;
}
File index = new File(FileSupport.indexPath(fileType, fileName));
if (index.exists()) {
ASSERT.isTrue(index.delete(), "Delete index file failed");
}
File kryo = new File(FileSupport.filePath(fileType, fileName, fileName + ".kryo"));
if (kryo.exists()) {
ASSERT.isTrue(kryo.delete(), "Delete kryo file failed");
}
}
public void release(String fileName) {
clearCacheValue(fileName);
}
public org.eclipse.jifa.common.vo.Progress pollProgress(FileType fileType, String fileName) {
ProgressListener progressListener = getFileListener(fileName);
if (progressListener == null) {
org.eclipse.jifa.common.vo.Progress progress = buildProgressIfFinished(fileType, fileName);
return progress;
} else {
org.eclipse.jifa.common.vo.Progress progress = new org.eclipse.jifa.common.vo.Progress();
progress.setState(ProgressState.IN_PROGRESS);
progress.setMessage(progressListener.log());
progress.setPercent(progressListener.percent());
return progress;
}
}
@SuppressWarnings("unchecked")
private synchronized <T> T getCacheValueIfPresent(String key) {
return (T) cache.getIfPresent(key);
}
private synchronized void putCacheValue(String key, Object value) {
cache.put(key, value);
LOGGER.info("Put cache: {}", key);
}
private synchronized void clearCacheValue(String key) {
Object value = cache.getIfPresent(key);
if (value instanceof HeapDumpAnalyzer) {
((HeapDumpAnalyzer) value).dispose();
}
cache.invalidate(key);
LOGGER.info("Clear cache: {}", key);
}
private synchronized ProgressListener getFileListener(String fileName) {
return listeners.get(fileName);
}
private synchronized boolean putFileListener(String fileName, ProgressListener listener) {
if (listeners.containsKey(fileName)) {
return false;
}
listeners.put(fileName, listener);
return true;
}
private synchronized void removeFileListener(String fileName) {
listeners.remove(fileName);
}
private org.eclipse.jifa.common.vo.Progress buildProgressIfFinished(FileType fileType, String fileName) {
if (getCacheValueIfPresent(fileName) != null) {
org.eclipse.jifa.common.vo.Progress result = new org.eclipse.jifa.common.vo.Progress();
result.setPercent(1);
result.setState(ProgressState.SUCCESS);
return result;
}
File failed = new File(FileSupport.errorLogPath(fileType, fileName));
if (failed.exists()) {
org.eclipse.jifa.common.vo.Progress result = new org.eclipse.jifa.common.vo.Progress();
result.setState(ProgressState.ERROR);
result.setMessage(FileUtil.content(failed));
return result;
}
org.eclipse.jifa.common.vo.Progress result = new org.eclipse.jifa.common.vo.Progress();
result.setState(ProgressState.NOT_STARTED);
return result;
}
interface Builder<T> {
T build(String key) throws Throwable;
}
private static class Singleton {
static Analyzer INSTANCE = new Analyzer();
}
public static GCModel getOrOpenGCLogModel(String info) {
return getOrOpenGCLogModel(info, ProgressListener.NoOpProgressListener);
}
private static GCModel getOrOpenGCLogModel(String gclogFile, ProgressListener listener) {
return getOrBuild(gclogFile,
key -> new GCLogAnalyzer(new File(FileSupport.filePath(GC_LOG, gclogFile)),
listener).parse());
}
public static ThreadDumpAnalyzer threadDumpAnalyzerOf(String threadDumpFile) {
return threadDumpAnalyzerOf(threadDumpFile, ProgressListener.NoOpProgressListener);
}
public static ThreadDumpAnalyzer threadDumpAnalyzerOf(String threadDumpFile,
ProgressListener listener) {
return getOrBuild(threadDumpFile,
key -> ThreadDumpAnalyzer
.build(new File(FileSupport.filePath(THREAD_DUMP, threadDumpFile)).toPath(), listener));
}
}
| 3,251 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/support/TransferListener.java | /********************************************************************************
* Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.support;
import lombok.Data;
import org.eclipse.jifa.common.enums.FileTransferState;
import org.eclipse.jifa.common.enums.FileType;
import org.eclipse.jifa.common.enums.ProgressState;
import org.eclipse.jifa.worker.support.FileSupport;
@Data
public class TransferListener {
private ProgressState state;
private long totalSize;
private long transferredSize;
private String errorMsg;
private FileType fileType;
private String fileName;
public TransferListener(FileType fileType, String originalName, String fileName) {
this.fileType = fileType;
this.fileName = fileName;
this.state = ProgressState.NOT_STARTED;
FileSupport.initInfoFile(fileType, originalName, fileName);
}
public synchronized void updateState(ProgressState state) {
if (this.state == state) {
return;
}
if (state == ProgressState.SUCCESS) {
FileSupport.updateTransferState(fileType, fileName, FileTransferState.SUCCESS);
totalSize = FileSupport.info(fileType, fileName).getSize();
}
if (state == ProgressState.ERROR) {
FileSupport.updateTransferState(fileType, fileName, FileTransferState.ERROR);
}
if (state == ProgressState.IN_PROGRESS && this.state == ProgressState.NOT_STARTED) {
FileSupport.updateTransferState(fileType, fileName, FileTransferState.IN_PROGRESS);
}
this.state = state;
}
public synchronized void addTransferredSize(long bytes) {
this.transferredSize += bytes;
}
}
| 3,252 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/support/BinFunction.java | /********************************************************************************
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.support;
public interface BinFunction<A, B, R> {
R apply(A a, B b) throws Exception;
}
| 3,253 |
0 | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker | Create_ds/eclipse-jifa/backend/worker/src/main/java/org/eclipse/jifa/worker/support/FileSupport.java | /********************************************************************************
* Copyright (c) 2020, 2022 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
********************************************************************************/
package org.eclipse.jifa.worker.support;
import com.aliyun.oss.OSSClient;
import com.aliyun.oss.event.ProgressEventType;
import com.aliyun.oss.model.DownloadFileRequest;
import com.aliyun.oss.model.ObjectMetadata;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.SdkClientException;
import com.amazonaws.auth.*;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.services.s3.model.ResponseHeaderOverrides;
import com.amazonaws.services.s3.model.S3Object;
import io.vertx.core.Promise;
import net.schmizz.sshj.SSHClient;
import net.schmizz.sshj.common.StreamCopier;
import net.schmizz.sshj.xfer.FileSystemFile;
import net.schmizz.sshj.xfer.scp.SCPDownloadClient;
import net.schmizz.sshj.xfer.scp.SCPFileTransfer;
import org.apache.commons.io.FileUtils;
import org.eclipse.jifa.common.Constant;
import org.eclipse.jifa.common.ErrorCode;
import org.eclipse.jifa.common.JifaException;
import org.eclipse.jifa.common.enums.FileTransferState;
import org.eclipse.jifa.common.enums.FileType;
import org.eclipse.jifa.common.enums.ProgressState;
import org.eclipse.jifa.common.util.FileUtil;
import org.eclipse.jifa.common.vo.FileInfo;
import org.eclipse.jifa.common.vo.TransferringFile;
import org.eclipse.jifa.worker.WorkerGlobal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import static org.eclipse.jifa.common.util.Assertion.ASSERT;
import static org.eclipse.jifa.common.util.GsonHolder.GSON;
import static org.eclipse.jifa.worker.Constant.File.INFO_FILE_SUFFIX;
public class FileSupport {
public static final List<String> PUB_KEYS = new ArrayList<>();
private static final String ERROR_LOG = "error.log";
private static final Logger LOGGER = LoggerFactory.getLogger(FileSupport.class);
private static final Map<String, TransferListener> transferListeners = new ConcurrentHashMap<>();
private static String[] keyLocations() {
final String base = System.getProperty("user.home") + File.separator + ".ssh" + File.separator;
return new String[]{base + "jifa-ssh-key"};
}
private static String[] pubKeyLocations() {
final String base = System.getProperty("user.home") + File.separator + ".ssh" + File.separator;
return new String[]{base + "jifa-ssh-key.pub"};
}
public static void init() {
for (FileType type : FileType.values()) {
File file = new File(WorkerGlobal.workspace() + File.separator + type.getTag());
if (file.exists()) {
ASSERT.isTrue(file.isDirectory(), String.format("%s must be directory", file.getAbsolutePath()));
} else {
ASSERT.isTrue(file.mkdirs(), String.format("Can not create %s ", file.getAbsolutePath()));
}
}
for (String loc : pubKeyLocations()) {
File f = new File(loc);
if (f.exists() && f.length() > 0) {
PUB_KEYS.add(FileUtil.content(f));
}
}
}
public static void initInfoFile(FileType type, String originalName, String name) {
ASSERT.isTrue(new File(dirPath(type, name)).mkdirs(), "Make directory failed");
FileInfo info = buildInitFileInfo(type, originalName, name);
try {
FileUtils.write(infoFile(type, name), GSON.toJson(info), Charset.defaultCharset());
} catch (IOException e) {
LOGGER.error("Write file information failed", e);
throw new JifaException(e);
}
}
private static FileInfo buildInitFileInfo(FileType type, String originalName, String name) {
FileInfo info = new FileInfo();
info.setOriginalName(originalName);
info.setName(name);
info.setSize(0);
info.setType(type);
info.setTransferState(FileTransferState.NOT_STARTED);
info.setDownloadable(false);
info.setCreationTime(System.currentTimeMillis());
return info;
}
public static List<FileInfo> info(FileType type) {
List<FileInfo> infoList = new ArrayList<>();
File dir = new File(dirPath(type));
ASSERT.isTrue(dir.isDirectory(), ErrorCode.SANITY_CHECK);
File[] subDirs = dir.listFiles(File::isDirectory);
if (subDirs == null) {
return infoList;
}
for (File subDir : subDirs) {
String infoFileName = subDir.getName() + INFO_FILE_SUFFIX;
File[] files = subDir.listFiles((d, name) -> infoFileName.equals(name));
if (files != null && files.length == 1) {
File infoFile = files[0];
try {
FileInfo info = GSON.fromJson(FileUtils.readFileToString(infoFile, Charset.defaultCharset()),
FileInfo.class);
ensureValidFileInfo(info);
infoList.add(info);
} catch (Exception e) {
LOGGER.error("Read file information failed: {}", infoFile.getAbsolutePath(), e);
// should not throw exception here
}
}
}
return infoList;
}
private static void ensureValidFileInfo(FileInfo info) {
ASSERT.notNull(info)
.notNull(info.getOriginalName())
.notNull(info.getName())
.notNull(info.getType())
.notNull(info.getTransferState())
.isTrue(info.getSize() >= 0)
.isTrue(info.getCreationTime() > 0);
}
public static FileInfo getOrGenInfo(FileType type, String name) {
File file = new File(FileSupport.filePath(type, name));
ASSERT.isTrue(file.exists(), ErrorCode.FILE_DOES_NOT_EXIST);
File infoFile = infoFile(type, name);
if (infoFile.exists()) {
return info(type, name);
}
FileInfo fileInfo = buildInitFileInfo(type, name, name);
fileInfo.setCreationTime(file.lastModified());
fileInfo.setTransferState(FileTransferState.SUCCESS);
fileInfo.setSize(file.length());
save(fileInfo);
return fileInfo;
}
public static FileInfo info(FileType type, String name) {
File infoFile = infoFile(type, name);
FileInfo fileInfo;
try {
fileInfo = GSON.fromJson(FileUtils.readFileToString(infoFile, Charset.defaultCharset()), FileInfo.class);
ensureValidFileInfo(fileInfo);
} catch (IOException e) {
LOGGER.error("Read file information failed", e);
throw new JifaException(e);
}
return fileInfo;
}
public static FileInfo infoOrNull(FileType type, String name) {
try {
return info(type, name);
} catch (Exception e) {
return null;
}
}
public static void save(FileInfo info) {
try {
FileUtils
.write(infoFile(info.getType(), info.getName()), GSON.toJson(info), Charset.defaultCharset());
} catch (IOException e) {
LOGGER.error("Save file information failed", e);
throw new JifaException(e);
}
}
public static void delete(FileType type, String name) {
try {
FileUtils.deleteDirectory(new File(dirPath(type, name)));
} catch (IOException e) {
LOGGER.error("Delete file failed", e);
throw new JifaException(e);
}
}
public static void delete(FileInfo[] fileInfos) {
for (FileInfo fileInfo : fileInfos) {
try {
delete(fileInfo.getType(), fileInfo.getName());
} catch (Throwable t) {
LOGGER.error("Delete file failed", t);
}
}
}
public static void sync(FileInfo[] fileInfos, boolean cleanStale) {
Map<FileType, List<String>> files = new HashMap<>(){{
for (FileType ft : FileType.values()) {
// In case no files returned
this.put(ft, new ArrayList<>());
}
}};
for (FileInfo fi : fileInfos) {
files.get(fi.getType()).add(fi.getName());
}
long lastModified = System.currentTimeMillis() - Constant.STALE_THRESHOLD;
for (FileType ft : files.keySet()) {
List<String> names = files.get(ft);
File[] listFiles = new File(dirPath(ft)).listFiles();
if (listFiles == null) {
continue;
}
for (File lf : listFiles) {
if (names.contains(lf.getName())) {
continue;
}
LOGGER.info("{} is not synchronized", lf.getName());
if (cleanStale && lf.lastModified() < lastModified) {
LOGGER.info("Delete stale file {}", lf.getName());
delete(ft, lf.getName());
}
}
}
}
public static void updateTransferState(FileType type, String name, FileTransferState state) {
FileInfo info = info(type, name);
info.setTransferState(state);
if (state == FileTransferState.SUCCESS) {
// for worker, file is downloadable after transferred
info.setSize(new File(FileSupport.filePath(type, name)).length());
info.setDownloadable(true);
}
save(info);
}
private static String dirPath(FileType type) {
return WorkerGlobal.workspace() + File.separator + type.getTag();
}
public static String dirPath(FileType type, String name) {
String defaultDirPath = dirPath(type) + File.separator + name;
return WorkerGlobal.hooks().mapDirPath(type, name, defaultDirPath);
}
private static String infoFilePath(FileType type, String name) {
return dirPath(type, name) + File.separator + name + INFO_FILE_SUFFIX;
}
private static File infoFile(FileType type, String name) {
return new File(infoFilePath(type, name));
}
public static String filePath(FileType type, String name) {
return filePath(type, name, name);
}
public static String filePath(FileType type, String name, String childrenName) {
String defaultFilePath = dirPath(type, name) + File.separator + childrenName;
return WorkerGlobal.hooks().mapFilePath(type, name, childrenName, defaultFilePath);
}
public static String errorLogPath(FileType fileType, String file) {
return FileSupport.filePath(fileType, file, ERROR_LOG);
}
public static String indexPath(FileType fileType, String file) {
String indexFileNamePrefix;
int i = file.lastIndexOf('.');
if (i >= 0) {
indexFileNamePrefix = file.substring(0, i + 1);
} else {
indexFileNamePrefix = file + '.';
}
String defaultIndexPath = FileSupport.filePath(fileType, file, indexFileNamePrefix + "index");
return WorkerGlobal.hooks().mapIndexPath(fileType, file, defaultIndexPath);
}
public static TransferListener createTransferListener(FileType fileType, String originalName, String fileName) {
TransferListener listener = new TransferListener(fileType, originalName, fileName);
transferListeners.put(fileName, listener);
return listener;
}
public static void removeTransferListener(String fileName) {
transferListeners.remove(fileName);
}
public static TransferListener getTransferListener(String fileName) {
return transferListeners.get(fileName);
}
public static void transferBySCP(String user, String hostname, String src, FileType fileType, String fileName,
TransferListener transferProgressListener, Promise<TransferringFile> promise) {
transferBySCP(user, null, hostname, src, fileType, fileName, transferProgressListener, promise);
}
public static void transferBySCP(String user, String pwd, String hostname, String src, FileType fileType,
String fileName, TransferListener transferProgressListener,
Promise<TransferringFile> promise) {
transferProgressListener.updateState(ProgressState.IN_PROGRESS);
SSHClient ssh = new SSHClient();
ssh.addHostKeyVerifier((h, port, key) -> true);
try {
ssh.connect(hostname);
if (pwd != null) {
ssh.authPassword(user, pwd);
} else {
ssh.authPublickey(user, keyLocations());
}
SCPFileTransfer transfer = ssh.newSCPFileTransfer();
transfer.setTransferListener(new net.schmizz.sshj.xfer.TransferListener() {
@Override
public net.schmizz.sshj.xfer.TransferListener directory(String name) {
return this;
}
@Override
public StreamCopier.Listener file(String name, long size) {
transferProgressListener.setTotalSize(size);
return transferProgressListener::setTransferredSize;
}
});
SCPDownloadClient downloadClient = transfer.newSCPDownloadClient();
promise.complete(new TransferringFile(fileName));
// do not copy dir now
downloadClient.setRecursiveMode(false);
downloadClient.copy(src, new FileSystemFile(FileSupport.filePath(fileType, fileName)));
transferProgressListener.updateState(ProgressState.SUCCESS);
} catch (Exception e) {
LOGGER.error("SSH transfer failed");
handleTransferError(fileName, transferProgressListener, promise, e);
} finally {
try {
ssh.disconnect();
} catch (IOException e) {
LOGGER.error("SSH disconnect failed", e);
}
}
}
public static void transferByURL(String url, FileType fileType, String fileName, TransferListener listener,
Promise<TransferringFile> promise) {
InputStream in = null;
OutputStream out = null;
String filePath = FileSupport.filePath(fileType, fileName);
try {
URLConnection conn = new URL(url).openConnection();
listener.updateState(ProgressState.IN_PROGRESS);
promise.complete(new TransferringFile(fileName));
listener.setTotalSize(Math.max(conn.getContentLength(), 0));
in = conn.getInputStream();
out = new FileOutputStream(filePath);
byte[] buffer = new byte[8192];
int length;
while ((length = in.read(buffer)) > 0) {
out.write(buffer, 0, length);
listener.addTransferredSize(length);
}
listener.updateState(ProgressState.SUCCESS);
} catch (Exception e) {
LOGGER.error("URL transfer failed");
handleTransferError(fileName, listener, promise, e);
} finally {
try {
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
} catch (IOException e) {
LOGGER.error("Close stream failed", e);
}
}
}
public static void transferByOSS(String endpoint, String accessKeyId, String accessKeySecret, String bucketName,
String objectName, FileType fileType, String fileName,
TransferListener transferProgressListener,
Promise<TransferringFile> promise) {
OSSClient ossClient = null;
try {
ossClient = new OSSClient(endpoint, accessKeyId, accessKeySecret);
ObjectMetadata meta = ossClient.getObjectMetadata(bucketName, objectName);
transferProgressListener.setTotalSize(meta.getContentLength());
promise.complete(new TransferringFile(fileName));
DownloadFileRequest downloadFileRequest = new DownloadFileRequest(bucketName, objectName);
downloadFileRequest.setDownloadFile(new File(FileSupport.filePath(fileType, fileName)).getAbsolutePath());
// 128m per thread now
downloadFileRequest.setPartSize(128 * 1024 * 1024);
downloadFileRequest.setTaskNum(Runtime.getRuntime().availableProcessors());
downloadFileRequest.setEnableCheckpoint(true);
downloadFileRequest.withProgressListener(progressEvent -> {
long bytes = progressEvent.getBytes();
ProgressEventType eventType = progressEvent.getEventType();
switch (eventType) {
case TRANSFER_STARTED_EVENT:
transferProgressListener.updateState(ProgressState.IN_PROGRESS);
break;
case RESPONSE_BYTE_TRANSFER_EVENT:
transferProgressListener.addTransferredSize(bytes);
break;
case TRANSFER_FAILED_EVENT:
transferProgressListener.updateState(ProgressState.ERROR);
break;
default:
break;
}
});
ossClient.downloadFile(downloadFileRequest);
transferProgressListener.updateState(ProgressState.SUCCESS);
} catch (Throwable t) {
LOGGER.error("OSS transfer failed");
handleTransferError(fileName, transferProgressListener, promise, t);
} finally {
if (ossClient != null) {
ossClient.shutdown();
}
}
}
public static void transferByS3(String region, String accessKey, String secretKey, String bucketName,
String objectName, FileType fileType, String fileName,
TransferListener transferProgressListener,
Promise<TransferringFile> promise) {
AmazonS3 s3Client = null;
try {
AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.setProtocol(Protocol.HTTPS);
s3Client = AmazonS3ClientBuilder.standard()
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withCredentials(new InstanceProfileCredentialsProvider(false))
.withClientConfiguration(clientConfig)
.withRegion(region)
.withPathStyleAccessEnabled(true)
.build();
GetObjectRequest getObjectRequest = new GetObjectRequest(bucketName, objectName)
.withGeneralProgressListener(progressEvent -> {
long bytes = progressEvent.getBytes();
switch (progressEvent.getEventType()) {
case TRANSFER_STARTED_EVENT:
transferProgressListener.updateState(ProgressState.IN_PROGRESS);
break;
case RESPONSE_BYTE_TRANSFER_EVENT:
transferProgressListener.addTransferredSize(bytes);
break;
case TRANSFER_FAILED_EVENT:
transferProgressListener.updateState(ProgressState.ERROR);
break;
default:
break;
}
});
com.amazonaws.services.s3.model.ObjectMetadata objectMetadata =
s3Client.getObjectMetadata(bucketName, objectName);
transferProgressListener.setTotalSize(objectMetadata.getContentLength());
promise.complete(new TransferringFile(fileName));
s3Client.getObject(getObjectRequest, new File(FileSupport.filePath(fileType, fileName)));
transferProgressListener.updateState(ProgressState.SUCCESS);
} catch (Throwable t) {
LOGGER.error("S3 transfer failed");
handleTransferError(fileName, transferProgressListener, promise, t);
} finally {
if (s3Client != null) {
s3Client.shutdown();
}
}
}
private static void handleTransferError(String fileName, TransferListener transferProgressListener,
Promise<TransferringFile> promise, Throwable t) {
if (promise.future().isComplete()) {
transferProgressListener.updateState(ProgressState.ERROR);
Throwable cause = t;
while (cause.getCause() != null) {
cause = cause.getCause();
}
transferProgressListener.setErrorMsg(cause.toString());
} else {
FileSupport.delete(transferProgressListener.getFileType(), fileName);
removeTransferListener(fileName);
}
throw new JifaException(ErrorCode.TRANSFER_ERROR, t);
}
public static long getTotalDiskSpace() {
return new File(System.getProperty("user.home")).getTotalSpace() >> 20;
}
public static long getUsedDiskSpace() {
return FileUtils.sizeOfDirectory(new File(System.getProperty("user.home"))) >> 20;
}
}
| 3,254 |
0 | Create_ds/accumulo-examples/spark/src/main/java/org/apache/accumulo | Create_ds/accumulo-examples/spark/src/main/java/org/apache/accumulo/spark/CopyPlus5K.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.spark;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.hadoop.mapreduce.AccumuloFileOutputFormat;
import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.spark.Partitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CopyPlus5K {
private static final Logger log = LoggerFactory.getLogger(CopyPlus5K.class);
public static class AccumuloRangePartitioner extends Partitioner {
private static final long serialVersionUID = 1L;
private List<String> splits;
AccumuloRangePartitioner(String... listSplits) {
this.splits = Arrays.asList(listSplits);
}
@Override
public int getPartition(Object o) {
int index = Collections.binarySearch(splits, ((Key) o).getRow().toString());
index = index < 0 ? (index + 1) * -1 : index;
return index;
}
@Override
public int numPartitions() {
return splits.size() + 1;
}
}
private static void cleanupAndCreateTables(Properties props) throws Exception {
FileSystem hdfs = FileSystem.get(new Configuration());
if (hdfs.exists(rootPath)) {
hdfs.delete(rootPath, true);
}
try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
if (client.tableOperations().exists(inputTable)) {
client.tableOperations().delete(inputTable);
}
if (client.tableOperations().exists(outputTable)) {
client.tableOperations().delete(outputTable);
}
// Create tables
try {
client.tableOperations().create(inputTable);
} catch (TableExistsException e) {
log.error("Something went wrong. Table '{}' should have been deleted prior to creation "
+ "attempt!", inputTable);
return;
}
try {
client.tableOperations().create(outputTable);
} catch (TableExistsException e) {
log.error("Something went wrong. Table '{}' should have been deleted prior to creation "
+ "attempt!", inputTable);
return;
}
// Write data to input table
try (BatchWriter bw = client.createBatchWriter(inputTable)) {
for (int i = 0; i < 100; i++) {
Mutation m = new Mutation(String.format("%03d", i));
m.at().family("cf1").qualifier("cq1").put("" + i);
bw.addMutation(m);
}
}
}
}
private static final String inputTable = "spark_example_input";
private static final String outputTable = "spark_example_output";
private static final Path rootPath = new Path("/spark_example/");
public static void main(String[] args) throws Exception {
if ((!args[0].equals("batch") && !args[0].equals("bulk")) || args[1].isEmpty()) {
System.out.println("Usage: ./run.sh [batch|bulk] /path/to/accumulo-client.properties");
System.exit(1);
}
// Read client properties from file
final Properties props = Accumulo.newClientProperties().from(args[1]).build();
cleanupAndCreateTables(props);
SparkConf conf = new SparkConf();
conf.setAppName("CopyPlus5K");
// KryoSerializer is needed for serializing Accumulo Key when partitioning data for bulk import
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
conf.registerKryoClasses(new Class[] {Key.class, Value.class, Properties.class});
try (JavaSparkContext sc = new JavaSparkContext(conf)) {
Job job = Job.getInstance();
// Read input from Accumulo
AccumuloInputFormat.configure().clientProperties(props).table(inputTable).store(job);
JavaPairRDD<Key,Value> data = sc.newAPIHadoopRDD(job.getConfiguration(),
AccumuloInputFormat.class, Key.class, Value.class);
// Add 5K to all values
JavaPairRDD<Key,Value> dataPlus5K = data
.mapValues(v -> new Value("" + (Integer.parseInt(v.toString()) + 5_000)));
if (args[0].equals("batch")) {
// Write output using batch writer
dataPlus5K.foreachPartition(iter -> {
// Intentionally created an Accumulo client for each partition to avoid attempting to
// serialize it and send it to each remote process.
try (AccumuloClient client = Accumulo.newClient().from(props).build();
BatchWriter bw = client.createBatchWriter(outputTable)) {
iter.forEachRemaining(kv -> {
Key key = kv._1;
Value val = kv._2;
Mutation m = new Mutation(key.getRow());
m.at().family(key.getColumnFamily()).qualifier(key.getColumnQualifier())
.visibility(key.getColumnVisibility()).timestamp(key.getTimestamp()).put(val);
try {
bw.addMutation(m);
} catch (MutationsRejectedException e) {
e.printStackTrace();
}
});
}
});
} else if (args[0].equals("bulk")) {
// Write output using bulk import
// Create HDFS directory for bulk import
FileSystem hdfs = FileSystem.get(new Configuration());
hdfs.mkdirs(rootPath);
Path outputDir = new Path(rootPath.toString() + "/output");
// Write Spark output to HDFS
AccumuloFileOutputFormat.configure().outputPath(outputDir).store(job);
Partitioner partitioner = new AccumuloRangePartitioner("3", "7");
JavaPairRDD<Key,Value> partData = dataPlus5K
.repartitionAndSortWithinPartitions(partitioner);
partData.saveAsNewAPIHadoopFile(outputDir.toString(), Key.class, Value.class,
AccumuloFileOutputFormat.class);
// Bulk import into Accumulo
try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
client.tableOperations().importDirectory(outputDir.toString()).to(outputTable).load();
}
} else {
System.out.println("Unknown method to write output: " + args[0]);
System.exit(1);
}
}
}
}
| 3,255 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/ExamplesIT.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import java.io.BufferedWriter;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.user.AgeOffFilter;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.client.RandomBatchScanner;
import org.apache.accumulo.examples.client.ReadWriteExample;
import org.apache.accumulo.examples.client.RowOperations;
import org.apache.accumulo.examples.client.SequentialBatchWriter;
import org.apache.accumulo.examples.combiner.StatsCombiner;
import org.apache.accumulo.examples.constraints.MaxMutationSize;
import org.apache.accumulo.examples.helloworld.Insert;
import org.apache.accumulo.examples.helloworld.Read;
import org.apache.accumulo.examples.isolation.InterferenceTest;
import org.apache.accumulo.examples.mapreduce.RegexExample;
import org.apache.accumulo.examples.mapreduce.RowHash;
import org.apache.accumulo.examples.mapreduce.TableToFile;
import org.apache.accumulo.examples.mapreduce.TeraSortIngest;
import org.apache.accumulo.examples.mapreduce.WordCount;
import org.apache.accumulo.examples.shard.ContinuousQuery;
import org.apache.accumulo.examples.shard.Index;
import org.apache.accumulo.examples.shard.Query;
import org.apache.accumulo.examples.shard.Reverse;
import org.apache.accumulo.harness.AccumuloClusterHarness;
import org.apache.accumulo.minicluster.MemoryUnit;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.accumulo.test.TestIngest;
import org.apache.accumulo.test.TestIngest.IngestParams;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ExamplesIT extends AccumuloClusterHarness {
private static final BatchWriterConfig bwc = new BatchWriterConfig();
private static final String auths = "A,B";
private AccumuloClient c;
private BatchWriter bw;
private IteratorSetting is;
private String dir;
private FileSystem fs;
private Authorizations origAuths;
@Override
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
// 128MB * 3
cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE);
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
}
@BeforeEach
public void setupTest() throws Exception {
c = Accumulo.newClient().from(getClientProps()).build();
String user = c.whoami();
String instance = getClientInfo().getInstanceName();
String keepers = getClientInfo().getZooKeepers();
AuthenticationToken token = getAdminToken();
if (token instanceof PasswordToken) {
String passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8);
writeClientPropsFile(getClientPropsFile(), instance, keepers, user, passwd);
} else {
fail("Unknown token type: " + token);
}
fs = getCluster().getFileSystem();
dir = new Path(cluster.getTemporaryPath(), getClass().getName()).toString();
origAuths = c.securityOperations().getUserAuthorizations(user);
c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(",")));
}
@AfterEach
public void teardownTest() throws Exception {
if (bw != null) {
bw.close();
}
if (null != origAuths) {
c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
}
c.close();
}
public static void writeClientPropsFile(String file, String instance, String keepers, String user,
String password) throws IOException {
try (BufferedWriter writer = Files.newBufferedWriter(Paths.get(file))) {
writer.write("instance.name=" + instance + "\n");
writer.write("instance.zookeepers=" + keepers + "\n");
writer.write("auth.type=password\n");
writer.write("auth.principal=" + user + "\n");
writer.write("auth.token=" + password + "\n");
}
}
private String getClientPropsFile() {
return System.getProperty("user.dir") + "/target/accumulo-client.properties";
}
@Override
protected Duration defaultTimeout() {
return Duration.ofMinutes(6);
}
@Test
public void testAgeoffFilter() throws Exception {
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
is = new IteratorSetting(10, AgeOffFilter.class);
AgeOffFilter.setTTL(is, 1000L);
c.tableOperations().attachIterator(tableName, is);
sleepUninterruptibly(500, TimeUnit.MILLISECONDS); // let zookeeper updates propagate.
bw = c.createBatchWriter(tableName, bwc);
Mutation m = new Mutation("foo");
m.put("a", "b", "c");
bw.addMutation(m);
bw.close();
sleepUninterruptibly(1, TimeUnit.SECONDS);
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
assertTrue(scanner.stream().findAny().isEmpty());
}
}
@Test
public void testStatsCombiner() throws Exception {
String table = getUniqueNames(1)[0];
c.tableOperations().create(table);
is = new IteratorSetting(10, StatsCombiner.class);
StatsCombiner.setCombineAllColumns(is, true);
StatsCombiner.setRadix(is, 10);
assertTrue(is.getOptions().containsKey(StatsCombiner.RADIX_OPTION));
c.tableOperations().attachIterator(table, is);
bw = c.createBatchWriter(table, bwc);
// Write two mutations otherwise the NativeMap would dedupe them into a single update
Mutation m = new Mutation("foo");
m.put("a", "b", "1");
bw.addMutation(m);
m = new Mutation("foo");
m.put("a", "b", "3");
bw.addMutation(m);
bw.flush();
try (Scanner scanner = c.createScanner(table, Authorizations.EMPTY)) {
Iterator<Entry<Key,Value>> iter = scanner.iterator();
assertTrue(iter.hasNext(), "Iterator had no results");
Entry<Key,Value> e = iter.next();
assertEquals("1,3,4,2", e.getValue().toString(), "Results ");
assertFalse(iter.hasNext(), "Iterator had additional results");
m = new Mutation("foo");
m.put("a", "b", "0,20,20,2");
bw.addMutation(m);
bw.close();
}
try (Scanner scanner = c.createScanner(table, Authorizations.EMPTY)) {
Iterator<Entry<Key,Value>> iter = scanner.iterator();
assertTrue(iter.hasNext(), "Iterator had no results");
Entry<Key,Value> e = iter.next();
assertEquals("0,20,24,4", e.getValue().toString(), "Results ");
assertFalse(iter.hasNext(), "Iterator had additional results");
}
}
@Test
public void testShardedIndex() throws Exception {
File src = new File(System.getProperty("user.dir") + "/src");
assumeTrue(src.exists());
String[] names = getUniqueNames(3);
final String shard = names[0], index = names[1];
c.tableOperations().create(shard);
c.tableOperations().create(index);
bw = c.createBatchWriter(shard, bwc);
Index.index(30, src, "\\W+", bw);
bw.close();
List<String> found;
try (BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4)) {
found = Query.query(bs, Arrays.asList("foo", "bar"), null);
}
// should find ourselves
assertTrue(found.stream().anyMatch(file -> file.endsWith("/ExamplesIT.java")));
String[] args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term",
index};
// create a reverse index
goodExec(Reverse.class, args);
args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term", index,
"--terms", "5", "--count", "1000"};
// run some queries
goodExec(ContinuousQuery.class, args);
}
@Test
public void testMaxMutationConstraint() throws Exception {
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
c.tableOperations().addConstraint(tableName, MaxMutationSize.class.getName());
IngestParams params = new IngestParams(c.properties(), tableName, 1);
params.cols = 1000;
try {
TestIngest.ingest(c, params);
} catch (MutationsRejectedException ex) {
assertEquals(1, ex.getConstraintViolationSummaries().size());
}
}
@Test
public void testTeraSortAndRead() throws Exception {
assumeTrue(getAdminToken() instanceof PasswordToken);
String tableName = getUniqueNames(1)[0];
String[] args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv",
"10", "-xv", "10", "-t", tableName, "-c", getClientPropsFile(), "--splits", "4"};
goodExec(TeraSortIngest.class, args);
Path output = new Path(dir, "tmp/nines");
if (fs.exists(output)) {
fs.delete(output, true);
}
args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--rowRegex", ".*999.*",
"--output", output.toString()};
goodExec(RegexExample.class, args);
args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--column", "c:"};
goodExec(RowHash.class, args);
output = new Path(dir, "tmp/tableFile");
if (fs.exists(output)) {
fs.delete(output, true);
}
args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--output",
output.toString()};
goodExec(TableToFile.class, args);
}
@Test
public void testWordCount() throws Exception {
assumeTrue(getAdminToken() instanceof PasswordToken);
Path readme = new Path(new Path(System.getProperty("user.dir")), "README.md");
if (!new File(readme.toString()).exists()) {
fail("README.md does not exist!");
}
fs.copyFromLocalFile(readme, new Path(dir + "/tmp/wc/README.md"));
String[] args = new String[] {"-c", getClientPropsFile(), "-i", dir + "/tmp/wc", "-t",
getUniqueNames(1)[0]};
goodExec(WordCount.class, args);
}
@Test
public void testInsertWithBatchWriterAndReadData() throws Exception {
String[] args;
args = new String[] {"-c", getClientPropsFile()};
goodExec(Insert.class, args);
goodExec(Read.class, args);
}
@Test
public void testIsolatedScansWithInterference() throws Exception {
String[] args;
args = new String[] {"-c", getClientPropsFile(), "-t", getUniqueNames(1)[0], "--iterations",
"100000", "--isolated"};
goodExec(InterferenceTest.class, args);
}
@Test
public void testScansWithInterference() throws Exception {
String[] args;
args = new String[] {"-c", getClientPropsFile(), "-t", getUniqueNames(1)[0], "--iterations",
"100000"};
goodExec(InterferenceTest.class, args);
}
@Test
public void testRowOperations() throws Exception {
goodExec(RowOperations.class, "-c", getClientPropsFile());
}
@Test
public void testReadWriteAndDelete() throws Exception {
goodExec(ReadWriteExample.class, "-c", getClientPropsFile());
}
@Test
public void testBatch() throws Exception {
goodExec(SequentialBatchWriter.class, "-c", getClientPropsFile());
goodExec(RandomBatchScanner.class, "-c", getClientPropsFile());
}
private void goodExec(Class<?> theClass, String... args) throws IOException {
Entry<Integer,String> pair;
// We're already slurping stdout into memory (not redirecting to file). Might as well add it
// to error message.
pair = getClusterControl().execWithStdout(theClass, args);
assertEquals(0, pair.getKey().intValue(), "stdout=" + pair.getValue());
}
}
| 3,256 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/constraints/NumericValueConstraintTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.constraints;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.junit.jupiter.api.Test;
import com.google.common.collect.Iterables;
public class NumericValueConstraintTest {
private final NumericValueConstraint nvc = new NumericValueConstraint();
@Test
public void testCheck() {
Mutation goodMutation = new Mutation("r");
goodMutation.put("cf", "cq", new Value("1234".getBytes()));
assertNull(nvc.check(null, goodMutation));
// Check that multiple bad mutations result in one violation only
Mutation badMutation = new Mutation("r");
badMutation.put("cf", "cq", new Value("foo1234".getBytes()));
badMutation.put("cf2", "cq2", new Value("foo1234".getBytes()));
assertEquals(NumericValueConstraint.NON_NUMERIC_VALUE,
Iterables.getOnlyElement(nvc.check(null, badMutation)).shortValue());
}
@Test
public void testGetViolationDescription() {
assertEquals(NumericValueConstraint.VIOLATION_MESSAGE,
nvc.getViolationDescription(NumericValueConstraint.NON_NUMERIC_VALUE));
assertNull(nvc.getViolationDescription((short) 2));
}
}
| 3,257 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraintTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.constraints;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.junit.jupiter.api.Test;
import com.google.common.collect.ImmutableList;
public class AlphaNumKeyConstraintTest {
private final AlphaNumKeyConstraint ankc = new AlphaNumKeyConstraint();
@Test
public void test() {
Mutation goodMutation = new Mutation("Row1");
goodMutation.put("Colf2", "ColQ3", new Value("value".getBytes()));
assertNull(ankc.check(null, goodMutation));
// Check that violations are in row, cf, cq order
Mutation badMutation = new Mutation("Row#1");
badMutation.put("Colf$2", "Colq%3", new Value("value".getBytes()));
assertEquals(
ImmutableList.of(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW,
AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ),
ankc.check(null, badMutation));
}
@Test
public void testGetViolationDescription() {
assertEquals(AlphaNumKeyConstraint.ROW_VIOLATION_MESSAGE,
ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW));
assertEquals(AlphaNumKeyConstraint.COLF_VIOLATION_MESSAGE,
ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF));
assertEquals(AlphaNumKeyConstraint.COLQ_VIOLATION_MESSAGE,
ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ));
assertNull(ankc.getViolationDescription((short) 4));
}
}
| 3,258 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.dirlist;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import java.util.ArrayList;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.examples.cli.BatchWriterOpts;
import org.apache.accumulo.examples.cli.ScannerOpts;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.accumulo.test.functional.ConfigurableMacBase;
import org.apache.hadoop.conf.Configuration;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class CountIT extends ConfigurableMacBase {
private AccumuloClient client;
private String tableName;
@Override
protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
}
@BeforeEach
public void setupInstance() throws Exception {
tableName = getUniqueNames(1)[0];
client = Accumulo.newClient().from(getClientProperties()).build();
client.tableOperations().create(tableName);
try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
ColumnVisibility cv = new ColumnVisibility();
// / has 1 dir
// /local has 2 dirs 1 file
// /local/user1 has 2 files
bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
bw.addMutation(
Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
bw.addMutation(
Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
bw.addMutation(
Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
bw.addMutation(
Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
}
}
@AfterEach
public void teardown() {
client.close();
}
@Test
public void test() throws Exception {
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
FileCount fc = new FileCount(client, tableName, Authorizations.EMPTY, new ColumnVisibility(),
scanOpts, bwOpts);
ArrayList<Pair<String,String>> expected = new ArrayList<>();
expected.add(new Pair<>(QueryUtil.getRow("").toString(), "1,0,3,3"));
expected.add(new Pair<>(QueryUtil.getRow("/local").toString(), "2,1,2,3"));
expected.add(new Pair<>(QueryUtil.getRow("/local/user1").toString(), "0,2,0,2"));
expected.add(new Pair<>(QueryUtil.getRow("/local/user2").toString(), "0,0,0,0"));
int actualCount = 0;
try (Scanner scanner = client.createScanner(tableName, new Authorizations())) {
scanner.fetchColumn("dir", "counts");
assertFalse(scanner.iterator().hasNext());
fc.run();
for (Entry<Key,Value> e : scanner) {
assertEquals(e.getKey().getRow().toString(), expected.get(actualCount).getFirst());
assertEquals(e.getValue().toString(), expected.get(actualCount).getSecond());
actualCount++;
}
}
assertEquals(expected.size(), actualCount);
}
}
| 3,259 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/filedata/ChunkCombinerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
public class ChunkCombinerTest {
public static class MapIterator implements SortedKeyValueIterator<Key,Value> {
private Iterator<Entry<Key,Value>> iter;
private Entry<Key,Value> entry;
Collection<ByteSequence> columnFamilies;
private final SortedMap<Key,Value> map;
private Range range;
@Override
public MapIterator deepCopy(IteratorEnvironment env) {
return new MapIterator(map);
}
private MapIterator(SortedMap<Key,Value> map) {
this.map = map;
iter = map.entrySet().iterator();
this.range = new Range();
if (iter.hasNext())
entry = iter.next();
else
entry = null;
}
@Override
public Key getTopKey() {
return entry.getKey();
}
@Override
public Value getTopValue() {
return entry.getValue();
}
@Override
public boolean hasTop() {
return entry != null;
}
@Override
public void next() {
entry = null;
while (iter.hasNext()) {
entry = iter.next();
if (columnFamilies.size() > 0
&& !columnFamilies.contains(entry.getKey().getColumnFamilyData())) {
entry = null;
continue;
}
if (range.afterEndKey(entry.getKey()))
entry = null;
break;
}
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) {
if (!inclusive) {
throw new IllegalArgumentException("can only do inclusive colf filtering");
}
this.columnFamilies = columnFamilies;
this.range = range;
Key key = range.getStartKey();
if (key == null) {
key = new Key();
}
iter = map.tailMap(key).entrySet().iterator();
next();
while (hasTop() && range.beforeStartKey(getTopKey())) {
next();
}
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) {
throw new UnsupportedOperationException();
}
}
private static TreeMap<Key,Value> allRows;
private static TreeMap<Key,Value> allCRows;
private static TreeMap<Key,Value> allCOnlyRows;
private static TreeMap<Key,Value> badrow;
@BeforeAll
protected static void setUp() {
TreeMap<Key,Value> row1 = new TreeMap<>();
TreeMap<Key,Value> row2 = new TreeMap<>();
TreeMap<Key,Value> row3 = new TreeMap<>();
allRows = new TreeMap<>();
TreeMap<Key,Value> cRow1 = new TreeMap<>();
TreeMap<Key,Value> cRow2 = new TreeMap<>();
TreeMap<Key,Value> cRow3 = new TreeMap<>();
allCRows = new TreeMap<>();
TreeMap<Key,Value> cOnlyRow1 = new TreeMap<>();
TreeMap<Key,Value> cOnlyRow2 = new TreeMap<>();
TreeMap<Key,Value> cOnlyRow3 = new TreeMap<>();
allCOnlyRows = new TreeMap<>();
badrow = new TreeMap<>();
String refs = FileDataIngest.REFS_CF.toString();
String fileext = FileDataIngest.REFS_FILE_EXT;
String filename = FileDataIngest.REFS_ORIG_FILE;
String chunk_cf = FileDataIngest.CHUNK_CF.toString();
row1.put(new Key("row1", refs, "hash1\0" + fileext, "C"), new Value("jpg".getBytes()));
row1.put(new Key("row1", refs, "hash1\0" + filename, "D"), new Value("foo1.jpg".getBytes()));
row1.put(new Key("row1", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
row1.put(new Key("row1", chunk_cf, "0000", "B"), new Value("V1".getBytes()));
row1.put(new Key("row1", chunk_cf, "0001", "A"), new Value("V2".getBytes()));
row1.put(new Key("row1", chunk_cf, "0001", "B"), new Value("V2".getBytes()));
cRow1.put(new Key("row1", refs, "hash1\0" + fileext, "C"), new Value("jpg".getBytes()));
cRow1.put(new Key("row1", refs, "hash1\0" + filename, "D"), new Value("foo1.jpg".getBytes()));
cRow1.put(new Key("row1", chunk_cf, "0000", "(C)|(D)"), new Value("V1".getBytes()));
cRow1.put(new Key("row1", chunk_cf, "0001", "(C)|(D)"), new Value("V2".getBytes()));
cOnlyRow1.put(new Key("row1", chunk_cf, "0000", "(C)|(D)"), new Value("V1".getBytes()));
cOnlyRow1.put(new Key("row1", chunk_cf, "0001", "(C)|(D)"), new Value("V2".getBytes()));
row2.put(new Key("row2", refs, "hash1\0" + fileext, "A"), new Value("jpg".getBytes()));
row2.put(new Key("row2", refs, "hash1\0" + filename, "B"), new Value("foo1.jpg".getBytes()));
row2.put(new Key("row2", chunk_cf, "0000", "A|B"), new Value("V1".getBytes()));
row2.put(new Key("row2", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
row2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
row2.put(new Key("row2a", chunk_cf, "0000", "C"), new Value("V1".getBytes()));
cRow2.put(new Key("row2", refs, "hash1\0" + fileext, "A"), new Value("jpg".getBytes()));
cRow2.put(new Key("row2", refs, "hash1\0" + filename, "B"), new Value("foo1.jpg".getBytes()));
cRow2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
cOnlyRow2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
row3.put(new Key("row3", refs, "hash1\0w", "(A&B)|(C&(D|E))"), new Value("".getBytes()));
row3.put(new Key("row3", refs, "hash1\0x", "A&B"), new Value("".getBytes()));
row3.put(new Key("row3", refs, "hash1\0y", "(A&B)"), new Value("".getBytes()));
row3.put(new Key("row3", refs, "hash1\0z", "(F|G)&(D|E)"), new Value("".getBytes()));
row3.put(new Key("row3", chunk_cf, "0000", "(A&B)|(C&(D|E))", 10), new Value("V1".getBytes()));
row3.put(new Key("row3", chunk_cf, "0000", "A&B", 20), new Value("V1".getBytes()));
row3.put(new Key("row3", chunk_cf, "0000", "(A&B)", 10), new Value("V1".getBytes()));
row3.put(new Key("row3", chunk_cf, "0000", "(F|G)&(D|E)", 10), new Value("V1".getBytes()));
cRow3.put(new Key("row3", refs, "hash1\0w", "(A&B)|(C&(D|E))"), new Value("".getBytes()));
cRow3.put(new Key("row3", refs, "hash1\0x", "A&B"), new Value("".getBytes()));
cRow3.put(new Key("row3", refs, "hash1\0y", "(A&B)"), new Value("".getBytes()));
cRow3.put(new Key("row3", refs, "hash1\0z", "(F|G)&(D|E)"), new Value("".getBytes()));
cRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20),
new Value("V1".getBytes()));
cOnlyRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20),
new Value("V1".getBytes()));
badrow.put(new Key("row1", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
badrow.put(new Key("row1", chunk_cf, "0000", "B"), new Value("V2".getBytes()));
allRows.putAll(row1);
allRows.putAll(row2);
allRows.putAll(row3);
allCRows.putAll(cRow1);
allCRows.putAll(cRow2);
allCRows.putAll(cRow3);
allCOnlyRows.putAll(cOnlyRow1);
allCOnlyRows.putAll(cOnlyRow2);
allCOnlyRows.putAll(cOnlyRow3);
}
private static final Collection<ByteSequence> emptyColfs = new HashSet<>();
@Test
public void test1() throws IOException {
runTest(false, allRows, allCRows, emptyColfs);
runTest(true, allRows, allCRows, emptyColfs);
runTest(false, allRows, allCOnlyRows, Collections.singleton(FileDataIngest.CHUNK_CF_BS));
runTest(true, allRows, allCOnlyRows, Collections.singleton(FileDataIngest.CHUNK_CF_BS));
try {
runTest(true, badrow, null, emptyColfs);
assertNotNull(null);
} catch (RuntimeException e) {
assertNull(null);
}
}
private void runTest(boolean reseek, TreeMap<Key,Value> source, TreeMap<Key,Value> result,
Collection<ByteSequence> cols) throws IOException {
MapIterator src = new MapIterator(source);
SortedKeyValueIterator<Key,Value> iter = new ChunkCombiner();
iter.init(src, null, null);
iter = iter.deepCopy(null);
iter.seek(new Range(), cols, true);
TreeMap<Key,Value> seen = new TreeMap<>();
while (iter.hasTop()) {
assertFalse(seen.containsKey(iter.getTopKey()), "already contains " + iter.getTopKey());
seen.put(new Key(iter.getTopKey()), new Value(iter.getTopValue()));
if (reseek)
iter.seek(new Range(iter.getTopKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL), true,
null, true), cols, true);
else
iter.next();
}
assertEquals(result, seen);
}
}
| 3,260 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.KeyValue;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterators;
import com.google.common.collect.PeekingIterator;
public class ChunkInputStreamTest {
private static final Logger log = LoggerFactory.getLogger(ChunkInputStream.class);
private List<Entry<Key,Value>> data;
private List<Entry<Key,Value>> baddata;
private List<Entry<Key,Value>> multidata;
@BeforeEach
public void setupData() {
data = new ArrayList<>();
addData(data, "a", "refs", "id\0ext", "A&B", "ext");
addData(data, "a", "refs", "id\0name", "A&B", "name");
addData(data, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(data, "a", "~chunk", 100, 1, "A&B", "");
addData(data, "b", "refs", "id\0ext", "A&B", "ext");
addData(data, "b", "refs", "id\0name", "A&B", "name");
addData(data, "b", "~chunk", 100, 0, "A&B", "qwertyuiop");
addData(data, "b", "~chunk", 100, 0, "B&C", "qwertyuiop");
addData(data, "b", "~chunk", 100, 1, "A&B", "");
addData(data, "b", "~chunk", 100, 1, "B&C", "");
addData(data, "b", "~chunk", 100, 1, "D", "");
addData(data, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(data, "c", "~chunk", 100, 1, "A&B", "asdfjkl;");
addData(data, "c", "~chunk", 100, 2, "A&B", "");
addData(data, "d", "~chunk", 100, 0, "A&B", "");
addData(data, "e", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(data, "e", "~chunk", 100, 1, "A&B", "");
baddata = new ArrayList<>();
addData(baddata, "a", "~chunk", 100, 0, "A", "asdfjkl;");
addData(baddata, "b", "~chunk", 100, 0, "B", "asdfjkl;");
addData(baddata, "b", "~chunk", 100, 2, "C", "");
addData(baddata, "c", "~chunk", 100, 0, "D", "asdfjkl;");
addData(baddata, "c", "~chunk", 100, 2, "E", "");
addData(baddata, "d", "~chunk", 100, 0, "F", "asdfjkl;");
addData(baddata, "d", "~chunk", 100, 1, "G", "");
addData(baddata, "d", "~zzzzz", "colq", "H", "");
addData(baddata, "e", "~chunk", 100, 0, "I", "asdfjkl;");
addData(baddata, "e", "~chunk", 100, 1, "J", "");
addData(baddata, "e", "~chunk", 100, 2, "I", "asdfjkl;");
addData(baddata, "f", "~chunk", 100, 2, "K", "asdfjkl;");
addData(baddata, "g", "~chunk", 100, 0, "L", "");
multidata = new ArrayList<>();
addData(multidata, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(multidata, "a", "~chunk", 100, 1, "A&B", "");
addData(multidata, "a", "~chunk", 200, 0, "B&C", "asdfjkl;");
addData(multidata, "b", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(multidata, "b", "~chunk", 200, 0, "B&C", "asdfjkl;");
addData(multidata, "b", "~chunk", 200, 1, "B&C", "asdfjkl;");
addData(multidata, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
}
private static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq,
String vis, String value) {
data.add(new KeyValue(new Key(row, cf, cq, vis), value.getBytes()));
}
private static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize,
int chunkCount, String vis, String value) {
Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)),
value.getBytes()));
}
@Test
public void testExceptionOnMultipleSetSourceWithoutClose() throws IOException {
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertThrows(IOException.class, () -> cis.setSource(pi));
}
}
@Test
public void testExceptionOnGetVisBeforeClose() throws IOException {
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertThrows(RuntimeException.class, cis::getVisibilities);
cis.close();
cis.getVisibilities();
}
}
@Test
public void testReadIntoBufferSmallerThanChunks() throws IOException {
byte[] b = new byte[5];
int read;
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), 5);
assertEquals(new String(b, 0, read), "asdfj");
assertEquals(read = cis.read(b), 3);
assertEquals(new String(b, 0, read), "kl;");
assertEquals(read = cis.read(b), -1);
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), 5);
assertEquals(new String(b, 0, read), "qwert");
assertEquals(read = cis.read(b), 5);
assertEquals(new String(b, 0, read), "yuiop");
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), 5);
assertEquals(new String(b, 0, read), "asdfj");
assertEquals(read = cis.read(b), 5);
assertEquals(new String(b, 0, read), "kl;as");
assertEquals(read = cis.read(b), 5);
assertEquals(new String(b, 0, read), "dfjkl");
assertEquals(read = cis.read(b), 1);
assertEquals(new String(b, 0, read), ";");
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[A&B]");
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), -1);
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), 5);
assertEquals(new String(b, 0, read), "asdfj");
assertEquals(read = cis.read(b), 3);
assertEquals(new String(b, 0, read), "kl;");
assertEquals(read = cis.read(b), -1);
}
assertFalse(pi.hasNext());
}
@Test
public void testReadIntoBufferLargerThanChunks() throws IOException {
byte[] b = new byte[20];
int read;
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), 8);
assertEquals(new String(b, 0, read), "asdfjkl;");
assertEquals(read = cis.read(b), -1);
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), 10);
assertEquals(new String(b, 0, read), "qwertyuiop");
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), 16);
assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[A&B]");
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), -1);
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(read = cis.read(b), 8);
assertEquals(new String(b, 0, read), "asdfjkl;");
assertEquals(read = cis.read(b), -1);
}
assertFalse(pi.hasNext());
}
private static void assumeExceptionOnRead(ChunkInputStream cis, byte[] b) {
try {
assertEquals(0, cis.read(b));
fail();
} catch (IOException e) {
log.debug("EXCEPTION {}", e.getMessage());
// expected, ignore
}
}
private static void assumeExceptionOnClose(ChunkInputStream cis) {
var e = assertThrows(IOException.class, cis::close);
log.debug("EXCEPTION {}", e.getMessage());
}
@Test
public void testBadData() throws IOException {
ChunkInputStream cis = new ChunkInputStream();
byte[] b = new byte[20];
int read;
final PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(baddata.iterator());
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
assumeExceptionOnClose(cis);
// can still get visibilities after exception -- bad?
assertEquals(cis.getVisibilities().toString(), "[A]");
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
assumeExceptionOnClose(cis);
assertEquals(cis.getVisibilities().toString(), "[B, C]");
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
assumeExceptionOnClose(cis);
assertEquals(cis.getVisibilities().toString(), "[D, E]");
cis.setSource(pi);
assertEquals(read = cis.read(b), 8);
assertEquals(new String(b, 0, read), "asdfjkl;");
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[F, G]");
cis.close();
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
cis.close();
assertEquals(cis.getVisibilities().toString(), "[I, J]");
assertThrows(IOException.class, () -> cis.setSource(pi));
assumeExceptionOnClose(cis);
assertEquals(cis.getVisibilities().toString(), "[K]");
cis.setSource(pi);
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[L]");
cis.close();
assertFalse(pi.hasNext());
final PeekingIterator<Entry<Key,Value>> pi2 = Iterators.peekingIterator(baddata.iterator());
cis.setSource(pi2);
assumeExceptionOnClose(cis);
}
@Test
public void testBadDataWithoutClosing() throws IOException {
ChunkInputStream cis = new ChunkInputStream();
byte[] b = new byte[20];
int read;
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(baddata.iterator());
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
// can still get visibilities after exception -- bad?
assertEquals(cis.getVisibilities().toString(), "[A]");
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
assertEquals(cis.getVisibilities().toString(), "[B, C]");
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
assertEquals(cis.getVisibilities().toString(), "[D, E]");
cis.setSource(pi);
assertEquals(read = cis.read(b), 8);
assertEquals(new String(b, 0, read), "asdfjkl;");
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[F, G]");
cis.close();
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
assertEquals(cis.getVisibilities().toString(), "[I, J]");
try {
cis.setSource(pi);
fail();
} catch (IOException e) {
// expected, ignore
}
assertEquals(cis.getVisibilities().toString(), "[K]");
cis.setSource(pi);
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[L]");
cis.close();
assertFalse(pi.hasNext());
pi = Iterators.peekingIterator(baddata.iterator());
cis.setSource(pi);
assumeExceptionOnClose(cis);
}
@Test
public void testMultipleChunkSizes() throws IOException {
ChunkInputStream cis = new ChunkInputStream();
byte[] b = new byte[20];
int read;
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(multidata.iterator());
b = new byte[20];
cis.setSource(pi);
assertEquals(read = cis.read(b), 8);
assertEquals(read = cis.read(b), -1);
cis.close();
assertEquals(cis.getVisibilities().toString(), "[A&B]");
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
assertEquals(cis.getVisibilities().toString(), "[A&B]");
cis.setSource(pi);
assertEquals(read = cis.read(b), 8);
assertEquals(new String(b, 0, read), "asdfjkl;");
assertEquals(read = cis.read(b), -1);
cis.close();
assertEquals(cis.getVisibilities().toString(), "[A&B, B&C]");
assertFalse(pi.hasNext());
}
@Test
public void testSingleByteRead() throws IOException {
ChunkInputStream cis = new ChunkInputStream();
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
cis.setSource(pi);
assertEquals((byte) 'a', (byte) cis.read());
assertEquals((byte) 's', (byte) cis.read());
assertEquals((byte) 'd', (byte) cis.read());
assertEquals((byte) 'f', (byte) cis.read());
assertEquals((byte) 'j', (byte) cis.read());
assertEquals((byte) 'k', (byte) cis.read());
assertEquals((byte) 'l', (byte) cis.read());
assertEquals((byte) ';', (byte) cis.read());
assertEquals(cis.read(), -1);
cis.close();
assertEquals(cis.getVisibilities().toString(), "[A&B]");
}
}
| 3,261 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java | /// *
// * Licensed to the Apache Software Foundation (ASF) under one or more
// * contributor license agreements. See the NOTICE file distributed with
// * this work for additional information regarding copyright ownership.
// * The ASF licenses this file to You under the Apache License, Version 2.0
// * (the "License"); you may not use this file except in compliance with
// * the License. You may obtain a copy of the License at
// *
// * http://www.apache.org/licenses/LICENSE-2.0
// *
// * Unless required by applicable law or agreed to in writing, software
// * distributed under the License is distributed on an "AS IS" BASIS,
// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// * See the License for the specific language governing permissions and
// * limitations under the License.
// */
//
// package org.apache.accumulo.examples.filedata;
//
// import static org.junit.jupiter.api.Assertions.assertEquals;
// import static org.junit.jupiter.api.Assertions.assertFalse;
// import static org.junit.jupiter.api.Assertions.assertNotNull;
// import static org.junit.jupiter.api.Assertions.fail;
//
// import java.io.File;
// import java.io.IOException;
// import java.io.InputStream;
// import java.util.ArrayList;
// import java.util.List;
// import java.util.Map.Entry;
//
// import org.apache.accumulo.core.client.Accumulo;
// import org.apache.accumulo.core.client.AccumuloClient;
// import org.apache.accumulo.core.client.BatchWriter;
// import org.apache.accumulo.core.client.BatchWriterConfig;
// import org.apache.accumulo.core.conf.Property;
// import org.apache.accumulo.core.data.Key;
// import org.apache.accumulo.core.data.Mutation;
// import org.apache.accumulo.core.data.Value;
// import org.apache.accumulo.core.security.Authorizations;
// import org.apache.accumulo.core.security.ColumnVisibility;
// import org.apache.accumulo.harness.AccumuloClusterHarness;
// import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
// import org.apache.hadoop.conf.Configuration;
// import org.apache.hadoop.conf.Configured;
// import org.apache.hadoop.mapreduce.Job;
// import org.apache.hadoop.mapreduce.Mapper;
// import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
// import org.apache.hadoop.util.Tool;
// import org.apache.hadoop.util.ToolRunner;
// import org.junit.jupiter.api.AfterEach;
// import org.junit.jupiter.api.BeforeAll;
// import org.junit.jupiter.api.BeforeEach;
// import org.junit.jupiter.api.Test;
//
// import com.google.common.collect.ArrayListMultimap;
// import com.google.common.collect.Multimap;
//
// public class ChunkInputFormatIT extends AccumuloClusterHarness {
// @Override
// public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
// cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
// }
//
// // track errors in the map reduce job; jobs insert a dummy error for the map and cleanup tasks
/// (to
// // ensure test correctness), so error tests should check to see if there is at least one error
// // (could be more depending on the test) rather than zero
// private static final Multimap<String,AssertionError> assertionErrors =
/// ArrayListMultimap.create();
//
// private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
//
// private static List<Entry<Key,Value>> data;
// private static List<Entry<Key,Value>> baddata;
//
// private AccumuloClient client;
// private String tableName;
//
// @BeforeEach
// public void setupInstance() throws Exception {
// client = Accumulo.newClient().from(getClientProps()).build();
// tableName = getUniqueNames(1)[0];
// client.securityOperations().changeUserAuthorizations(client.whoami(), AUTHS);
// }
//
// @AfterEach
// public void teardown() {
// client.close();
// }
//
// @BeforeAll
// public static void setupClass() {
// System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
//
// data = new ArrayList<>();
// ChunkInputStreamIT.addData(data, "a", "refs", "ida\0ext", "A&B", "ext");
// ChunkInputStreamIT.addData(data, "a", "refs", "ida\0name", "A&B", "name");
// ChunkInputStreamIT.addData(data, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
// ChunkInputStreamIT.addData(data, "a", "~chunk", 100, 1, "A&B", "");
// ChunkInputStreamIT.addData(data, "b", "refs", "ida\0ext", "A&B", "ext");
// ChunkInputStreamIT.addData(data, "b", "refs", "ida\0name", "A&B", "name");
// ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 0, "A&B", "qwertyuiop");
// ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 0, "B&C", "qwertyuiop");
// ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 1, "A&B", "");
// ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 1, "B&C", "");
// ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 1, "D", "");
// baddata = new ArrayList<>();
// ChunkInputStreamIT.addData(baddata, "c", "refs", "ida\0ext", "A&B", "ext");
// ChunkInputStreamIT.addData(baddata, "c", "refs", "ida\0name", "A&B", "name");
// }
//
// public static void entryEquals(Entry<Key,Value> e1, Entry<Key,Value> e2) {
// assertEquals(e1.getKey(), e2.getKey());
// assertEquals(e1.getValue(), e2.getValue());
// }
//
// public static class CIFTester extends Configured implements Tool {
// public static class TestMapper
// extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
// int count = 0;
//
// @Override
// protected void map(List<Entry<Key,Value>> key, InputStream value, Context context)
// throws IOException {
// String table = context.getConfiguration().get("MRTester_tableName");
// assertNotNull(table);
//
// byte[] b = new byte[20];
// int read;
// try (value) {
// switch (count) {
// case 0:
// assertEquals(key.size(), 2);
// entryEquals(key.get(0), data.get(0));
// entryEquals(key.get(1), data.get(1));
// assertEquals(read = value.read(b), 8);
// assertEquals(new String(b, 0, read), "asdfjkl;");
// assertEquals(read = value.read(b), -1);
// break;
// case 1:
// assertEquals(key.size(), 2);
// entryEquals(key.get(0), data.get(4));
// entryEquals(key.get(1), data.get(5));
// assertEquals(read = value.read(b), 10);
// assertEquals(new String(b, 0, read), "qwertyuiop");
// assertEquals(read = value.read(b), -1);
// break;
// default:
// fail();
// }
// } catch (AssertionError e) {
// assertionErrors.put(table, e);
// }
// count++;
// }
//
// @Override
// protected void cleanup(Context context) {
// String table = context.getConfiguration().get("MRTester_tableName");
// assertNotNull(table);
//
// try {
// assertEquals(2, count);
// } catch (AssertionError e) {
// assertionErrors.put(table, e);
// }
// }
// }
//
// public static class TestNoClose
// extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
// int count = 0;
//
// @Override
// protected void map(List<Entry<Key,Value>> key, InputStream value, Context context)
// throws IOException, InterruptedException {
// String table = context.getConfiguration().get("MRTester_tableName");
// assertNotNull(table);
//
// byte[] b = new byte[5];
// int read;
// try {
// switch (count) {
// case 0:
// assertEquals(read = value.read(b), 5);
// assertEquals(new String(b, 0, read), "asdfj");
// break;
// default:
// fail();
// }
// } catch (AssertionError e) {
// assertionErrors.put(table, e);
// }
// count++;
// try {
// context.nextKeyValue();
// fail();
// } catch (IOException ioe) {
// assertionErrors.put(table + "_map_ioexception", new AssertionError(toString(), ioe));
// }
// }
// }
//
// public static class TestBadData
// extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
// @Override
// protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) {
// String table = context.getConfiguration().get("MRTester_tableName");
// assertNotNull(table);
//
// byte[] b = new byte[20];
// try {
// assertEquals(key.size(), 2);
// entryEquals(key.get(0), baddata.get(0));
// entryEquals(key.get(1), baddata.get(1));
// } catch (AssertionError e) {
// assertionErrors.put(table, e);
// }
// try {
// assertFalse(value.read(b) > 0);
// try {
// fail();
// } catch (AssertionError e) {
// assertionErrors.put(table, e);
// }
// } catch (Exception e) {
// // expected, ignore
// }
// try {
// value.close();
// try {
// fail();
// } catch (AssertionError e) {
// assertionErrors.put(table, e);
// }
// } catch (Exception e) {
// // expected, ignore
// }
// }
// }
//
// @SuppressWarnings("deprecation")
// @Override
// public int run(String[] args) throws Exception {
// if (args.length != 2) {
// throw new IllegalArgumentException(
// "Usage : " + CIFTester.class.getName() + " <table> <mapperClass>");
// }
//
// String table = args[0];
// assertionErrors.put(table, new AssertionError("Dummy"));
// assertionErrors.put(table + "_map_ioexception", new AssertionError("Dummy_ioexception"));
// getConf().set("MRTester_tableName", table);
//
// Job job = Job.getInstance(getConf());
// job.setJobName(this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
// job.setJarByClass(this.getClass());
//
// job.setInputFormatClass(ChunkInputFormat.class);
//
// ChunkInputFormat.setZooKeeperInstance(job, getCluster().getClientConfig());
// ChunkInputFormat.setConnectorInfo(job, getAdminPrincipal(), getAdminToken());
// ChunkInputFormat.setInputTableName(job, table);
// ChunkInputFormat.setScanAuthorizations(job, AUTHS);
//
// @SuppressWarnings("unchecked")
// Class<? extends Mapper<?,?,?,?>> forName = (Class<? extends Mapper<?,?,?,?>>) Class
// .forName(args[1]);
// job.setMapperClass(forName);
// job.setMapOutputKeyClass(Key.class);
// job.setMapOutputValueClass(Value.class);
// job.setOutputFormatClass(NullOutputFormat.class);
//
// job.setNumReduceTasks(0);
//
// job.waitForCompletion(true);
//
// return job.isSuccessful() ? 0 : 1;
// }
//
// public static int main(String... args) throws Exception {
// Configuration conf = new Configuration();
// conf.set("mapreduce.framework.name", "local");
// conf.set("mapreduce.cluster.local.dir",
// new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
// return ToolRunner.run(conf, new CIFTester(), args);
// }
// }
//
// @Test
// public void test() throws Exception {
// client.tableOperations().create(tableName);
// BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
//
// for (Entry<Key,Value> e : data) {
// Key k = e.getKey();
// Mutation m = new Mutation(k.getRow());
// m.put(k.getColumnFamily(), k.getColumnQualifier(),
// new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
// bw.addMutation(m);
// }
// bw.close();
//
// assertEquals(0, CIFTester.main(tableName, CIFTester.TestMapper.class.getName()));
// assertEquals(1, assertionErrors.get(tableName).size());
// }
//
// @Test
// public void testErrorOnNextWithoutClose() throws Exception {
// client.tableOperations().create(tableName);
// BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
//
// for (Entry<Key,Value> e : data) {
// Key k = e.getKey();
// Mutation m = new Mutation(k.getRow());
// m.put(k.getColumnFamily(), k.getColumnQualifier(),
// new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
// bw.addMutation(m);
// }
// bw.close();
//
// assertEquals(1, CIFTester.main(tableName, CIFTester.TestNoClose.class.getName()));
// assertEquals(1, assertionErrors.get(tableName).size());
// // this should actually exist, in addition to the dummy entry
// assertEquals(2, assertionErrors.get(tableName + "_map_ioexception").size());
// }
//
// @Test
// public void testInfoWithoutChunks() throws Exception {
// client.tableOperations().create(tableName);
// BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
// for (Entry<Key,Value> e : baddata) {
// Key k = e.getKey();
// Mutation m = new Mutation(k.getRow());
// m.put(k.getColumnFamily(), k.getColumnQualifier(),
// new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
// bw.addMutation(m);
// }
// bw.close();
//
// assertEquals(0, CIFTester.main(tableName, CIFTester.TestBadData.class.getName()));
// assertEquals(1, assertionErrors.get(tableName).size());
// }
// }
| 3,262 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/filedata/KeyUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class KeyUtilTest {
public static void checkSeps(String... s) {
Text t = KeyUtil.buildNullSepText(s);
String[] rets = KeyUtil.splitNullSepText(t);
int length = 0;
for (String str : s)
length += str.length();
assertEquals(t.getLength(), length + s.length - 1);
assertEquals(rets.length, s.length);
for (int i = 0; i < s.length; i++)
assertEquals(s[i], rets[i]);
}
@Test
public void testNullSep() {
checkSeps("abc", "d", "", "efgh");
checkSeps("ab", "");
checkSeps("abcde");
checkSeps("");
checkSeps("", "");
}
}
| 3,263 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.KeyValue;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.harness.AccumuloClusterHarness;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import com.google.common.collect.Iterators;
import com.google.common.collect.PeekingIterator;
public class ChunkInputStreamIT extends AccumuloClusterHarness {
@Override
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
}
private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
private AccumuloClient client;
private String tableName;
private List<Entry<Key,Value>> data;
@BeforeEach
public void setupInstance() throws Exception {
client = Accumulo.newClient().from(getClientProps()).build();
tableName = getUniqueNames(1)[0];
client.securityOperations().changeUserAuthorizations(client.whoami(), AUTHS);
}
@AfterEach
public void teardown() {
client.close();
}
@BeforeEach
public void setupData() {
data = new ArrayList<>();
addData(data, "a", "refs", "id\0ext", "A&B", "ext");
addData(data, "a", "refs", "id\0name", "A&B", "name");
addData(data, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(data, "a", "~chunk", 100, 1, "A&B", "");
addData(data, "b", "refs", "id\0ext", "A&B", "ext");
addData(data, "b", "refs", "id\0name", "A&B", "name");
addData(data, "b", "~chunk", 100, 0, "A&B", "qwertyuiop");
addData(data, "b", "~chunk", 100, 0, "B&C", "qwertyuiop");
addData(data, "b", "~chunk", 100, 1, "A&B", "");
addData(data, "b", "~chunk", 100, 1, "B&C", "");
addData(data, "b", "~chunk", 100, 1, "D", "");
addData(data, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(data, "c", "~chunk", 100, 1, "A&B", "asdfjkl;");
addData(data, "c", "~chunk", 100, 2, "A&B", "");
addData(data, "d", "~chunk", 100, 0, "A&B", "");
addData(data, "e", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(data, "e", "~chunk", 100, 1, "A&B", "");
List<Entry<Key,Value>> baddata = new ArrayList<>();
addData(baddata, "a", "~chunk", 100, 0, "A", "asdfjkl;");
addData(baddata, "b", "~chunk", 100, 0, "B", "asdfjkl;");
addData(baddata, "b", "~chunk", 100, 2, "C", "");
addData(baddata, "c", "~chunk", 100, 0, "D", "asdfjkl;");
addData(baddata, "c", "~chunk", 100, 2, "E", "");
addData(baddata, "d", "~chunk", 100, 0, "F", "asdfjkl;");
addData(baddata, "d", "~chunk", 100, 1, "G", "");
addData(baddata, "d", "~zzzzz", "colq", "H", "");
addData(baddata, "e", "~chunk", 100, 0, "I", "asdfjkl;");
addData(baddata, "e", "~chunk", 100, 1, "J", "");
addData(baddata, "e", "~chunk", 100, 2, "I", "asdfjkl;");
addData(baddata, "f", "~chunk", 100, 2, "K", "asdfjkl;");
addData(baddata, "g", "~chunk", 100, 0, "L", "");
List<Entry<Key,Value>> multidata = new ArrayList<>();
addData(multidata, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(multidata, "a", "~chunk", 100, 1, "A&B", "");
addData(multidata, "a", "~chunk", 200, 0, "B&C", "asdfjkl;");
addData(multidata, "b", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(multidata, "b", "~chunk", 200, 0, "B&C", "asdfjkl;");
addData(multidata, "b", "~chunk", 200, 1, "B&C", "asdfjkl;");
addData(multidata, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
}
static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis,
String value) {
data.add(new KeyValue(new Key(row, cf, cq, vis), value.getBytes()));
}
static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize,
int chunkCount, String vis, String value) {
Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)),
value.getBytes()));
}
@Test
public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException,
TableExistsException, TableNotFoundException, IOException {
client.tableOperations().create(tableName);
try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
for (Entry<Key,Value> e : data) {
Key k = e.getKey();
Mutation m = new Mutation(k.getRow());
m.put(k.getColumnFamily(), k.getColumnQualifier(),
new ColumnVisibility(k.getColumnVisibility()), e.getValue());
bw.addMutation(m);
}
}
try (Scanner scan = client.createScanner(tableName, AUTHS)) {
byte[] b = new byte[20];
int read;
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(scan.iterator());
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(8, read = cis.read(b));
assertEquals("asdfjkl;", new String(b, 0, read));
assertEquals(-1, cis.read(b));
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(10, read = cis.read(b));
assertEquals("qwertyuiop", new String(b, 0, read));
assertEquals(-1, cis.read(b));
assertEquals("[A&B, B&C, D]", cis.getVisibilities().toString());
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(16, read = cis.read(b));
assertEquals("asdfjkl;asdfjkl;", new String(b, 0, read));
assertEquals(-1, cis.read(b));
assertEquals("[A&B]", cis.getVisibilities().toString());
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(-1, cis.read(b));
}
try (ChunkInputStream cis = new ChunkInputStream(pi)) {
assertEquals(8, read = cis.read(b));
assertEquals("asdfjkl;", new String(b, 0, read));
assertEquals(-1, cis.read(b));
}
assertFalse(pi.hasNext());
}
}
}
| 3,264 |
0 | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.security.MessageDigest;
import java.time.Duration;
import java.util.Base64;
import java.util.Collections;
import java.util.Map.Entry;
import java.util.Properties;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.ExamplesIT;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.accumulo.test.functional.ConfigurableMacBase;
import org.apache.hadoop.conf.Configuration;
import org.junit.jupiter.api.Test;
public class MapReduceIT extends ConfigurableMacBase {
@Override
protected Duration defaultTimeout() {
return Duration.ofMinutes(1);
}
@Override
protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
}
public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir")
+ "/target/hadoop-tmp";
static final String tablename = "mapredf";
static final String input_cf = "cf-HASHTYPE";
static final String input_cq = "cq-NOTHASHED";
static final String input_cfcq = input_cf + ":" + input_cq;
static final String output_cq = "cq-MD4BASE64";
static final String output_cfcq = input_cf + ":" + output_cq;
@Test
public void test() throws Exception {
String confFile = System.getProperty("user.dir") + "/target/accumulo-client.properties";
Properties props = getClientProperties();
String instance = ClientProperty.INSTANCE_NAME.getValue(props);
String keepers = ClientProperty.INSTANCE_ZOOKEEPERS.getValue(props);
ExamplesIT.writeClientPropsFile(confFile, instance, keepers, "root", ROOT_PASSWORD);
try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
client.tableOperations().create(tablename);
try (BatchWriter bw = client.createBatchWriter(tablename)) {
for (int i = 0; i < 10; i++) {
Mutation m = new Mutation("" + i);
m.put(input_cf, input_cq, "row" + i);
bw.addMutation(m);
}
}
MiniAccumuloClusterImpl.ProcessInfo hash = getCluster().exec(RowHash.class,
Collections.singletonList(hadoopTmpDirArg), "-c", confFile, "-t", tablename, "--column",
input_cfcq);
assertEquals(0, hash.getProcess().waitFor());
try (Scanner s = client.createScanner(tablename, Authorizations.EMPTY)) {
s.fetchColumn(input_cf, output_cq);
int i = 0;
MessageDigest md = MessageDigest.getInstance("MD5");
for (Entry<Key,Value> entry : s) {
byte[] check = Base64.getEncoder().encode(md.digest(("row" + i).getBytes()));
assertEquals(entry.getValue().toString(), new String(check));
i++;
}
}
}
}
}
| 3,265 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/Common.java | package org.apache.accumulo.examples;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.NamespaceExistsException;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Common {
private static final Logger log = LoggerFactory.getLogger(Common.class);
public static final String NAMESPACE = "examples";
public static final String TABLE_EXISTS_MSG = "Table already exists. User may wish to delete "
+ "existing table and re-run example. Table name: ";
public static final String NAMESPACE_EXISTS_MSG = "Namespace already exists. User can ignore "
+ "this message and continue. Namespace: ";
/**
* Create a table within the supplied namespace.
*
* The incoming table name is expected to have the form "namespace.tablename". If the namespace
* portion of the name is blank then the table is created outside of a namespace.
*
* @param client
* AccumuloClient instance
* @param table
* The name of the table to be created
*/
public static void createTableWithNamespace(final AccumuloClient client, final String table)
throws AccumuloException, AccumuloSecurityException {
createTableWithNamespace(client, table, new NewTableConfiguration());
}
public static void createTableWithNamespace(final AccumuloClient client, final String table,
final NewTableConfiguration newTableConfig)
throws AccumuloException, AccumuloSecurityException {
String[] name = table.split("\\.");
if (name.length == 2 && !name[0].isEmpty()) {
try {
client.namespaceOperations().create(name[0]);
} catch (NamespaceExistsException e) {
log.info(NAMESPACE_EXISTS_MSG + name[0]);
}
}
try {
client.tableOperations().create(table, newTableConfig);
} catch (TableExistsException e) {
log.warn(TABLE_EXISTS_MSG + table);
}
}
}
| 3,266 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/helloworld/Read.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.helloworld;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Reads all data between two rows
*/
public class Read {
private static final Logger log = LoggerFactory.getLogger(Read.class);
public static void main(String[] args) throws TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(Read.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build();
Scanner scan = client.createScanner(Insert.HELLO_TABLE, Authorizations.EMPTY)) {
scan.setRange(new Range(new Key("row_0"), new Key("row_1002")));
for (Entry<Key,Value> e : scan) {
Key key = e.getKey();
log.trace(key.getRow() + " " + key.getColumnFamily() + " " + key.getColumnQualifier() + " "
+ e.getValue());
}
log.info("Scan complete");
}
}
}
| 3,267 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/helloworld/Insert.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.helloworld;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Inserts 10K rows (50K entries) into accumulo with each row having 5 entries.
*/
public class Insert {
private static final Logger log = LoggerFactory.getLogger(Insert.class);
static final String HELLO_TABLE = Common.NAMESPACE + ".hellotable";
public static void main(String[] args)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(Insert.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Common.createTableWithNamespace(client, HELLO_TABLE);
try (BatchWriter bw = client.createBatchWriter(HELLO_TABLE)) {
log.trace("writing ...");
for (int i = 0; i < 10000; i++) {
Mutation m = new Mutation(String.format("row_%d", i));
for (int j = 0; j < 5; j++) {
m.put("colfam", String.format("colqual_%d", j),
new Value((String.format("value_%d_%d", i, j)).getBytes()));
}
bw.addMutation(m);
if (i % 100 == 0) {
log.trace(String.valueOf(i));
}
}
}
}
}
}
| 3,268 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/constraints/ConstraintsCommon.java | package org.apache.accumulo.examples.constraints;
import org.apache.accumulo.examples.Common;
public enum ConstraintsCommon {
;
public static final String CONSTRAINTS_TABLE = Common.NAMESPACE + ".testConstraints";
public static final String CONSTRAINT_VIOLATED_MSG = "Constraint violated: {}";
}
| 3,269 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.constraints;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.data.constraints.Constraint;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is an accumulo constraint that ensures all fields of a key are alpha numeric.
*/
public class AlphaNumKeyConstraint implements Constraint {
private static final Logger log = LoggerFactory.getLogger(AlphaNumKeyConstraint.class);
static final short NON_ALPHA_NUM_ROW = 1;
static final short NON_ALPHA_NUM_COLF = 2;
static final short NON_ALPHA_NUM_COLQ = 3;
static final String ROW_VIOLATION_MESSAGE = "Row was not alpha numeric";
static final String COLF_VIOLATION_MESSAGE = "Column family was not alpha numeric";
static final String COLQ_VIOLATION_MESSAGE = "Column qualifier was not alpha numeric";
private boolean isNotAlphaNum(byte[] bytes) {
for (byte b : bytes) {
boolean ok = ((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9'));
if (!ok)
return true;
}
return false;
}
private Set<Short> addViolation(Set<Short> violations, short violation) {
if (violations == null) {
violations = new LinkedHashSet<>();
}
violations.add(violation);
return violations;
}
@Override
public List<Short> check(Environment env, Mutation mutation) {
Set<Short> violations = null;
if (isNotAlphaNum(mutation.getRow()))
violations = addViolation(violations, NON_ALPHA_NUM_ROW);
Collection<ColumnUpdate> updates = mutation.getUpdates();
for (ColumnUpdate columnUpdate : updates) {
if (isNotAlphaNum(columnUpdate.getColumnFamily()))
violations = addViolation(violations, NON_ALPHA_NUM_COLF);
if (isNotAlphaNum(columnUpdate.getColumnQualifier()))
violations = addViolation(violations, NON_ALPHA_NUM_COLQ);
}
return null == violations ? null : new ArrayList<>(violations);
}
@Override
public String getViolationDescription(short violationCode) {
switch (violationCode) {
case NON_ALPHA_NUM_ROW:
return ROW_VIOLATION_MESSAGE;
case NON_ALPHA_NUM_COLF:
return COLF_VIOLATION_MESSAGE;
case NON_ALPHA_NUM_COLQ:
return COLQ_VIOLATION_MESSAGE;
}
return null;
}
public static void main(String[] args)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(AlphaNumKeyConstraint.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Common.createTableWithNamespace(client, ConstraintsCommon.CONSTRAINTS_TABLE);
/*
* Add the {@link AlphaNumKeyConstraint} to the table. Be sure to use the fully qualified
* class name.
*/
int num = client.tableOperations().addConstraint(ConstraintsCommon.CONSTRAINTS_TABLE,
"org.apache.accumulo.examples.constraints.AlphaNumKeyConstraint");
log.info("Attempting to write non alpha numeric data to testConstraints");
try (BatchWriter bw = client.createBatchWriter(ConstraintsCommon.CONSTRAINTS_TABLE)) {
Mutation m = new Mutation("r1--$$@@%%");
m.put("cf1", "cq1", new Value(("value1").getBytes()));
bw.addMutation(m);
} catch (MutationsRejectedException e) {
e.getConstraintViolationSummaries().forEach(violationSummary -> log
.error(ConstraintsCommon.CONSTRAINT_VIOLATED_MSG, violationSummary.constrainClass));
}
client.tableOperations().removeConstraint(ConstraintsCommon.CONSTRAINTS_TABLE, num);
}
}
}
| 3,270 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.constraints;
import java.util.Collection;
import java.util.List;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.data.constraints.Constraint;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is an accumulo constraint that ensures values are numeric strings.
*/
public class NumericValueConstraint implements Constraint {
private static final Logger log = LoggerFactory.getLogger(NumericValueConstraint.class);
static final short NON_NUMERIC_VALUE = 1;
static final String VIOLATION_MESSAGE = "Value is not numeric";
private static final List<Short> VIOLATION_LIST = List.of(NON_NUMERIC_VALUE);
private boolean isNumeric(byte[] bytes) {
for (byte b : bytes) {
boolean ok = (b >= '0' && b <= '9');
if (!ok)
return false;
}
return true;
}
@Override
public List<Short> check(Environment env, Mutation mutation) {
Collection<ColumnUpdate> updates = mutation.getUpdates();
for (ColumnUpdate columnUpdate : updates) {
if (!isNumeric(columnUpdate.getValue()))
return VIOLATION_LIST;
}
return null;
}
@Override
public String getViolationDescription(short violationCode) {
if (violationCode == NON_NUMERIC_VALUE) {
return VIOLATION_MESSAGE;
}
return null;
}
public static void main(String[] args)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(NumericValueConstraint.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Common.createTableWithNamespace(client, ConstraintsCommon.CONSTRAINTS_TABLE);
/*
* Add the {@link NumericValueConstraint} constraint to the table. Be sure to use the fully
* qualified class name
*/
int num = client.tableOperations().addConstraint(ConstraintsCommon.CONSTRAINTS_TABLE,
"org.apache.accumulo.examples.constraints.NumericValueConstraint");
log.info("Attempting to write non-numeric data to testConstraints");
try (BatchWriter bw = client.createBatchWriter(ConstraintsCommon.CONSTRAINTS_TABLE)) {
Mutation m = new Mutation("r1");
m.put("cf1", "cq1", new Value(("value1--$$@@%%").getBytes()));
bw.addMutation(m);
} catch (MutationsRejectedException e) {
e.getConstraintViolationSummaries()
.forEach(m -> log.error(ConstraintsCommon.CONSTRAINT_VIOLATED_MSG, m.constrainClass));
}
client.tableOperations().removeConstraint(ConstraintsCommon.CONSTRAINTS_TABLE, num);
}
}
}
| 3,271 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.constraints;
import java.util.Collections;
import java.util.List;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.data.constraints.Constraint;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Ensure that mutations are a reasonable size: we must be able to fit several in memory at a time.
*/
public class MaxMutationSize implements Constraint {
private static final Logger log = LoggerFactory.getLogger(MaxMutationSize.class);
static final long MAX_SIZE = Runtime.getRuntime().maxMemory() >> 8;
static final List<Short> empty = Collections.emptyList();
static final List<Short> violations = Collections.singletonList((short) 0);
@Override
public String getViolationDescription(short violationCode) {
return String.format("mutation exceeded maximum size of %d", MAX_SIZE);
}
@Override
public List<Short> check(Environment env, Mutation mutation) {
if (mutation.estimatedMemoryUsed() < MAX_SIZE)
return empty;
return violations;
}
public static void main(String[] args)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(MaxMutationSize.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Common.createTableWithNamespace(client, ConstraintsCommon.CONSTRAINTS_TABLE);
/*
* Add the {@link MaxMutationSize} constraint to the table. Be sure to use the fully qualified
* class name
*/
int num = client.tableOperations().addConstraint(ConstraintsCommon.CONSTRAINTS_TABLE,
"org.apache.accumulo.examples.constraints.MaxMutationSize");
log.info("Attempting to write a lot of mutations to testConstraints");
try (BatchWriter bw = client.createBatchWriter(ConstraintsCommon.CONSTRAINTS_TABLE)) {
Mutation m = new Mutation("r1");
for (int i = 0; i < 1_000_000; i++)
m.put("cf" + i % 5000, "cq" + i, new Value(("value" + i).getBytes()));
bw.addMutation(m);
} catch (MutationsRejectedException e) {
e.getConstraintViolationSummaries()
.forEach(m -> log.error(ConstraintsCommon.CONSTRAINT_VIOLATED_MSG, m.constrainClass));
}
client.tableOperations().removeConstraint(ConstraintsCommon.CONSTRAINTS_TABLE, num);
}
}
}
| 3,272 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/util/FormatUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.util;
import java.util.Map;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.hadoop.io.Text;
public final class FormatUtil {
/**
* Format and return the specified table entry as a human-readable String suitable for logging.
* <br/>
* If {@code includeTimestamp} is true, the entry will be formatted as: <br/>
* {@literal <row> <columnFamily>:<columnQualifier> <columnVisibility> <timestamp>\t<value>} <br/>
* If false, the entry will be formatted as: <br/>
* {@literal <row> <columnFamily>:<columnQualifier> <columnVisibility>\t<value>} <br/>
* Examples: <br/>
* {@literal a ~chunk:\x00\x00\x00d\x00\x00\x00\x00 [A&B] 9223372036854775807 asdfjkl;}
* {@literal a ~chunk:\x00\x00\x00d\x00\x00\x00\x00 [A&B] asdfjkl;}
*
* @param entry
* the table entry to format
* @param includeTimestamp
* if true, include the timestamp in the returned result
* @return the specified entry as a formatted String, or null if the entry is null
*/
public static String formatTableEntry(final Map.Entry<Key,Value> entry,
final boolean includeTimestamp) {
if (entry == null) {
return null;
}
Key key = entry.getKey();
StringBuilder sb = new StringBuilder();
Text buffer = new Text();
// Append row.
appendBytes(sb, key.getRow(buffer).getBytes()).append(" ");
// Append column family.
appendBytes(sb, key.getColumnFamily().getBytes()).append(":");
// Append column qualifier.
appendBytes(sb, key.getColumnQualifier().getBytes()).append(" ");
// Append visibility and timestamp.
sb.append(new ColumnVisibility(key.getColumnVisibility(buffer)));
if (includeTimestamp) {
sb.append(" ").append(entry.getKey().getTimestamp());
}
// Append value.
Value value = entry.getValue();
if (value != null && value.getSize() > 0) {
sb.append("\t");
appendBytes(sb, value.get());
}
return sb.toString();
}
private static StringBuilder appendBytes(final StringBuilder sb, final byte[] ba) {
for (byte b : ba) {
int c = 0xff & b;
if (c == '\\') {
sb.append("\\\\");
} else if (c >= 32 && c <= 126) {
sb.append((char) c);
} else {
sb.append("\\x").append(String.format("%02X", c));
}
}
return sb;
}
private FormatUtil() {
throw new UnsupportedOperationException();
}
}
| 3,273 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/reservations/ARS.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.reservations;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.ConditionalWriter;
import org.apache.accumulo.core.client.ConditionalWriter.Status;
import org.apache.accumulo.core.client.ConditionalWriterConfig;
import org.apache.accumulo.core.client.IsolatedScanner;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.data.Condition;
import org.apache.accumulo.core.data.ConditionalMutation;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Accumulo Reservation System : An example reservation system using Accumulo. Supports atomic
* reservations of a resource at a date. Wait list are also supported. In order to keep the example
* simple, no checking is done of the date. Also the code is inefficient, if interested in improving
* it take a look at the EXCERCISE comments.
*/
// EXCERCISE create a test that verifies correctness under concurrency. For example, have M threads
// making reservations against N resources. Each thread could randomly reserve and cancel resources
// for a single user. When each thread finishes, it knows what the state of its single user should
// be. When all threads finish, collect their expected state and verify the status of all users and
// resources. For extra credit run the test on a IAAS provider using 10 nodes and 10 threads per
// node.
public class ARS {
private static final Logger log = LoggerFactory.getLogger(ARS.class);
private final AccumuloClient client;
private final String rTable;
public enum ReservationResult {
RESERVED, WAIT_LISTED
}
public ARS(AccumuloClient client, String rTable) {
this.client = client;
this.rTable = rTable;
}
public List<String> setCapacity(String what, String when, int count) {
// EXCERCISE implement this method which atomically sets a capacity and returns anyone who was
// moved to the wait list if the capacity was decreased
throw new UnsupportedOperationException();
}
public ReservationResult reserve(String what, String when, String who) throws Exception {
String row = what + ":" + when;
// EXCERCISE This code assumes there is no reservation and tries to create one. If a reservation
// exist then the update will fail. This is a good strategy when it is expected there are
// usually no reservations. Could modify the code to scan first.
// The following mutation requires that the column tx:seq does not exist and will fail if it
// does.
ConditionalMutation update = new ConditionalMutation(row, new Condition("tx", "seq"));
update.put("tx", "seq", "0");
update.put("res", String.format("%04d", 0), who);
ReservationResult result = ReservationResult.RESERVED;
// it is important to use an isolated scanner so that only whole mutations are seen
try (
ConditionalWriter cwriter = client.createConditionalWriter(rTable,
new ConditionalWriterConfig());
Scanner scanner = client.createScanner(rTable, Authorizations.EMPTY);
Scanner isolatedScanner = new IsolatedScanner(scanner)) {
while (true) {
Status status = cwriter.write(update).getStatus();
switch (status) {
case ACCEPTED:
return result;
case REJECTED:
case UNKNOWN:
// read the row and decide what to do
break;
default:
throw new RuntimeException("Unexpected status " + status);
}
// EXCERCISE in the case of many threads trying to reserve a slot, this approach of
// immediately retrying is inefficient. Exponential back-off is good general solution to
// solve contention problems like this. However in this particular case, exponential
// back-off could penalize the earliest threads that attempted to make a reservation by
// putting them later in the list. A more complex solution could involve having independent
// sub-queues within the row that approximately maintain arrival order and use exponential
// back off to fairly merge the sub-queues into the main queue.
isolatedScanner.setRange(new Range(row));
int seq = -1;
int maxReservation = -1;
for (Entry<Key,Value> entry : isolatedScanner) {
String cf = entry.getKey().getColumnFamilyData().toString();
String cq = entry.getKey().getColumnQualifierData().toString();
String val = entry.getValue().toString();
if (cf.equals("tx") && cq.equals("seq")) {
seq = Integer.parseInt(val);
} else if (cf.equals("res")) {
// EXCERCISE scanning the entire list to find if reserver is already in the list is
// inefficient. One possible way to solve this would be to sort the data differently in
// Accumulo so that finding the reserver could be done quickly.
if (val.equals(who))
if (maxReservation == -1)
return ReservationResult.RESERVED; // already have the first reservation
else
return ReservationResult.WAIT_LISTED; // already on wait list
// EXCERCISE the way this code finds the max reservation is very inefficient.... it
// would be better if it did not have to scan the entire row. One possibility is to just
// use the sequence number. Could also consider sorting the data in another way and/or
// using an iterator.
maxReservation = Integer.parseInt(cq);
}
}
Condition condition = new Condition("tx", "seq");
if (seq >= 0)
condition.setValue(seq + ""); // only expect a seq # if one was seen
update = new ConditionalMutation(row, condition);
update.put("tx", "seq", (seq + 1) + "");
update.put("res", String.format("%04d", maxReservation + 1), who);
// EXCERCISE if set capacity is implemented, then result should take capacity into account
if (maxReservation == -1)
result = ReservationResult.RESERVED; // if successful, will be first reservation
else
result = ReservationResult.WAIT_LISTED;
}
}
}
public void cancel(String what, String when, String who) throws Exception {
String row = what + ":" + when;
// Even though this method is only deleting a column, its important to use a conditional writer.
// By updating the seq # when deleting a reservation, it
// will cause any concurrent reservations to retry. If this delete were done using a batch
// writer, then a concurrent reservation could report WAIT_LISTED
// when it actually got the reservation.
// its important to use an isolated scanner so that only whole mutations are seen
try (
ConditionalWriter cwriter = client.createConditionalWriter(rTable,
new ConditionalWriterConfig());
Scanner scanner = client.createScanner(rTable, Authorizations.EMPTY);
Scanner isolatedScanner = new IsolatedScanner(scanner)) {
while (true) {
isolatedScanner.setRange(new Range(row));
int seq = -1;
String reservation = null;
for (Entry<Key,Value> entry : isolatedScanner) {
String cf = entry.getKey().getColumnFamilyData().toString();
String cq = entry.getKey().getColumnQualifierData().toString();
String val = entry.getValue().toString();
// EXCERCISE avoid linear scan
if (cf.equals("tx") && cq.equals("seq")) {
seq = Integer.parseInt(val);
} else if (cf.equals("res") && val.equals(who)) {
reservation = cq;
}
}
if (reservation != null) {
ConditionalMutation update = new ConditionalMutation(row,
new Condition("tx", "seq").setValue(seq + ""));
update.putDelete("res", reservation);
update.put("tx", "seq", (seq + 1) + "");
Status status = cwriter.write(update).getStatus();
switch (status) {
case ACCEPTED:
// successfully canceled reservation
return;
case REJECTED:
case UNKNOWN:
// retry
// EXCERCISE exponential back-off could be used here
break;
default:
throw new RuntimeException("Unexpected status " + status);
}
} else {
// not reserved, nothing to do
break;
}
}
}
}
public List<String> list(String what, String when) throws Exception {
String row = what + ":" + when;
// its important to use an isolated scanner so that only whole mutations are seen
try (Scanner scanner = client.createScanner(rTable, Authorizations.EMPTY);
Scanner isolatedScanner = new IsolatedScanner(scanner)) {
isolatedScanner.setRange(new Range(row));
isolatedScanner.fetchColumnFamily("res");
List<String> reservations = new ArrayList<>();
for (Entry<Key,Value> entry : isolatedScanner) {
String val = entry.getValue().toString();
reservations.add(val);
}
return reservations;
}
}
public static void main(String[] args) throws Exception {
var console = System.console();
var out = System.out;
ARS ars = null;
while (true) {
String line = console.readLine(">");
if (line == null)
break;
final String[] tokens = line.split("\\s+");
if (tokens[0].equals("reserve") && tokens.length >= 4 && ars != null) {
// start up multiple threads all trying to reserve the same resource, no more than one
// should succeed
final ARS fars = ars;
ArrayList<Thread> threads = new ArrayList<>();
for (int i = 3; i < tokens.length; i++) {
final int whoIndex = i;
Runnable reservationTask = () -> {
try {
out.println(" " + String.format("%20s", tokens[whoIndex]) + " : "
+ fars.reserve(tokens[1], tokens[2], tokens[whoIndex]));
} catch (Exception e) {
log.warn("Could not write to the ConsoleReader.", e);
}
};
threads.add(new Thread(reservationTask));
}
for (Thread thread : threads)
thread.start();
for (Thread thread : threads)
thread.join();
} else if (tokens[0].equals("cancel") && tokens.length == 4 && ars != null) {
ars.cancel(tokens[1], tokens[2], tokens[3]);
} else if (tokens[0].equals("list") && tokens.length == 3 && ars != null) {
List<String> reservations = ars.list(tokens[1], tokens[2]);
if (reservations.size() > 0) {
out.println(" Reservation holder : " + reservations.get(0));
if (reservations.size() > 1)
out.println(" Wait list : " + reservations.subList(1, reservations.size()));
}
} else if (tokens[0].equals("quit") && tokens.length == 1) {
break;
} else if (tokens[0].equals("connect") && tokens.length == 6 && ars == null) {
// the client can't be closed here, because it is passed to the new ARS object
AccumuloClient client = Accumulo.newClient().to(tokens[1], tokens[2])
.as(tokens[3], tokens[4]).build();
if (client.tableOperations().exists(tokens[5])) {
ars = new ARS(client, tokens[5]);
out.println(" connected");
} else {
out.println(" No Such Table");
}
} else {
System.out.println(" Commands : ");
if (ars == null) {
out.println(" connect <instance> <zookeepers> <user> <pass> <table>");
} else {
out.println(" reserve <what> <when> <who> {who}");
out.println(" cancel <what> <when> <who>");
out.println(" list <what> <when>");
}
}
}
}
}
| 3,274 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.combiner;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.Combiner;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
/**
* This combiner calculates the max, min, sum, and count of long integers represented as strings in
* values. It stores the result in a comma-separated value of the form min,max,sum,count. If such a
* value is encountered while combining, its information is incorporated into the running
* calculations of min, max, sum, and count. See {@link Combiner} for more information on which
* values are combined together.
*/
public class StatsCombiner extends Combiner {
public static final String RADIX_OPTION = "radix";
private int radix = 10;
@Override
public Value reduce(Key key, Iterator<Value> iter) {
long min = Long.MAX_VALUE;
long max = Long.MIN_VALUE;
long sum = 0;
long count = 0;
while (iter.hasNext()) {
String[] stats = iter.next().toString().split(",");
if (stats.length == 1) {
long val = Long.parseLong(stats[0], radix);
min = Math.min(val, min);
max = Math.max(val, max);
sum += val;
count += 1;
} else {
min = Math.min(Long.parseLong(stats[0], radix), min);
max = Math.max(Long.parseLong(stats[1], radix), max);
sum += Long.parseLong(stats[2], radix);
count += Long.parseLong(stats[3], radix);
}
}
String ret = Long.toString(min, radix) + "," + Long.toString(max, radix) + ","
+ Long.toString(sum, radix) + "," + Long.toString(count, radix);
return new Value(ret.getBytes());
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
super.init(source, options, env);
if (options.containsKey(RADIX_OPTION))
radix = Integer.parseInt(options.get(RADIX_OPTION));
else
radix = 10;
}
@Override
public IteratorOptions describeOptions() {
IteratorOptions io = super.describeOptions();
io.setName("statsCombiner");
io.setDescription("Combiner that keeps track of min, max, sum, and count");
io.addNamedOption(RADIX_OPTION, "radix/base of the numbers");
return io;
}
@Override
public boolean validateOptions(Map<String,String> options) {
if (!super.validateOptions(options))
return false;
if (options.containsKey(RADIX_OPTION) && !options.get(RADIX_OPTION).matches("\\d+"))
throw new IllegalArgumentException(
"invalid option " + RADIX_OPTION + ":" + options.get(RADIX_OPTION));
return true;
}
/**
* A convenience method for setting the expected base/radix of the numbers
*
* @param iterConfig
* Iterator settings to configure
* @param base
* The expected base/radix of the numbers.
*/
public static void setRadix(IteratorSetting iterConfig, int base) {
iterConfig.addOption(RADIX_OPTION, base + "");
}
}
| 3,275 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.bloom;
import static org.apache.accumulo.examples.bloom.BloomFilters.writeData;
import java.util.Map;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class BloomFiltersNotFound {
private static final Logger log = LoggerFactory.getLogger(BloomFiltersNotFound.class);
public static void main(String[] args)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(BloomFiltersNotFound.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Map<String,String> props = Map.of(BloomCommon.BLOOM_ENABLED_PROPERTY, "true");
var newTableConfig = new NewTableConfiguration().setProperties(props);
Common.createTableWithNamespace(client, BloomCommon.BLOOM_TEST3_TABLE);
Common.createTableWithNamespace(client, BloomCommon.BLOOM_TEST4_TABLE, newTableConfig);
writeAndFlush(BloomCommon.BLOOM_TEST3_TABLE, client);
writeAndFlush(BloomCommon.BLOOM_TEST4_TABLE, client);
BloomBatchScanner.scan(client, BloomCommon.BLOOM_TEST3_TABLE, 8);
BloomBatchScanner.scan(client, BloomCommon.BLOOM_TEST4_TABLE, 8);
}
}
private static void writeAndFlush(String tableName, AccumuloClient client)
throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
log.info("Writing data to {} (bloom filters enabled)", tableName);
writeData(client, tableName, 7);
client.tableOperations().flush(tableName, null, null, true);
}
}
| 3,276 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/bloom/BloomCommon.java | package org.apache.accumulo.examples.bloom;
import org.apache.accumulo.examples.Common;
enum BloomCommon {
;
public static final String BLOOM_TEST1_TABLE = Common.NAMESPACE + ".bloom_test1";
public static final String BLOOM_TEST2_TABLE = Common.NAMESPACE + ".bloom_test2";
public static final String BLOOM_TEST3_TABLE = Common.NAMESPACE + ".bloom_test3";
public static final String BLOOM_TEST4_TABLE = Common.NAMESPACE + ".bloom_test4";
public static final String BLOOM_ENABLED_PROPERTY = "table.bloom.enabled";
}
| 3,277 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.bloom;
import static org.apache.accumulo.examples.client.RandomBatchWriter.abs;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Random;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple example for reading random batches of data from Accumulo.
*/
public final class BloomBatchScanner {
private static final Logger log = LoggerFactory.getLogger(BloomBatchScanner.class);
private BloomBatchScanner() {}
public static void main(String[] args) throws TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(BloomBatchScanner.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
scan(client, BloomCommon.BLOOM_TEST1_TABLE, 7);
scan(client, BloomCommon.BLOOM_TEST2_TABLE, 7);
}
}
static void scan(AccumuloClient client, String tableName, int seed)
throws TableNotFoundException {
Random r = new Random(seed);
HashSet<Range> ranges = new HashSet<>();
HashMap<String,Boolean> expectedRows = new HashMap<>();
while (ranges.size() < 500) {
long rowId = abs(r.nextLong()) % 1_000_000_000;
String row = String.format("row_%010d", rowId);
ranges.add(new Range(row));
expectedRows.put(row, false);
}
long t1 = System.currentTimeMillis();
long results = 0;
long lookups = ranges.size();
log.info("Scanning {} with seed {}", tableName, seed);
try (BatchScanner scan = client.createBatchScanner(tableName, Authorizations.EMPTY, 20)) {
scan.setRanges(ranges);
for (Entry<Key,Value> entry : scan) {
Key key = entry.getKey();
if (expectedRows.containsKey(key.getRow().toString())) {
expectedRows.put(key.getRow().toString(), true);
} else {
log.info("Encountered unexpected key: {}", key);
}
results++;
}
}
long t2 = System.currentTimeMillis();
log.info(String.format("Scan finished! %6.2f lookups/sec, %.2f secs, %d results",
lookups / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0), results));
int count = 0;
for (Entry<String,Boolean> entry : expectedRows.entrySet()) {
if (!entry.getValue()) {
count++;
}
}
if (count > 0)
log.info("Did not find " + count);
else
log.info("All expected rows were scanned");
}
}
| 3,278 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.bloom;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.examples.client.RandomBatchWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class BloomFilters {
private static final Logger log = LoggerFactory.getLogger(BloomFilters.class);
private BloomFilters() {}
public static void main(String[] args)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(BloomFilters.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Map<String,String> table1props = Map.of("table.compaction.major.ratio", "7");
Map<String,String> table2props = new HashMap<>(table1props);
table2props.put(BloomCommon.BLOOM_ENABLED_PROPERTY, "true");
Common.createTableWithNamespace(client, BloomCommon.BLOOM_TEST1_TABLE,
new NewTableConfiguration().setProperties(table1props));
Common.createTableWithNamespace(client, BloomCommon.BLOOM_TEST2_TABLE,
new NewTableConfiguration().setProperties(table2props));
writeAndFlushData(BloomCommon.BLOOM_TEST1_TABLE, client);
writeAndFlushData(BloomCommon.BLOOM_TEST2_TABLE, client);
}
}
// Write a million rows 3 times flushing files to disk separately
private static void writeAndFlushData(final String tableName, final AccumuloClient client)
throws TableNotFoundException, AccumuloSecurityException, AccumuloException {
log.info("Writing data to {}", tableName);
writeData(client, tableName, 7);
client.tableOperations().flush(tableName, null, null, true);
writeData(client, tableName, 8);
client.tableOperations().flush(tableName, null, null, true);
writeData(client, tableName, 9);
client.tableOperations().flush(tableName, null, null, true);
}
// write a million random rows
static void writeData(AccumuloClient client, String tableName, int seed)
throws TableNotFoundException, MutationsRejectedException {
Random r = new Random(seed);
try (BatchWriter bw = client.createBatchWriter(tableName)) {
for (int x = 0; x < 1_000_000; x++) {
long rowId = RandomBatchWriter.abs(r.nextLong()) % 1_000_000_000;
Mutation m = RandomBatchWriter.createMutation(rowId, 50, new ColumnVisibility());
bw.addMutation(m);
}
}
}
}
| 3,279 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/cli/ClientOnRequiredTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.cli;
import com.beust.jcommander.Parameter;
public class ClientOnRequiredTable extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
private String tableName;
public String getTableName() {
return tableName;
}
}
| 3,280 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/cli/ClientOnDefaultTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.cli;
import com.beust.jcommander.Parameter;
public class ClientOnDefaultTable extends ClientOpts {
@Parameter(names = "--table", description = "table to use")
private String tableName;
public ClientOnDefaultTable(String table) {
this.tableName = table;
}
public String getTableName() {
return tableName;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
}
| 3,281 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/cli/Help.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.cli;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
public class Help {
@Parameter(names = {"-h", "-?", "--help", "-help"}, help = true)
public boolean help = false;
public void parseArgs(String programName, String[] args, Object... others) {
JCommander commander = new JCommander();
commander.addObject(this);
for (Object other : others)
commander.addObject(other);
commander.setProgramName(programName);
try {
commander.parse(args);
} catch (ParameterException ex) {
commander.usage();
exitWithError(ex.getMessage(), 1);
}
if (help) {
commander.usage();
exit(0);
}
}
public void exit(int status) {
System.exit(status);
}
public void exitWithError(String message, int status) {
System.err.println(message);
exit(status);
}
}
| 3,282 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.cli;
import java.time.Duration;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.client.BatchWriterConfig;
import com.beust.jcommander.IStringConverter;
import com.beust.jcommander.Parameter;
public class BatchWriterOpts {
private static final BatchWriterConfig BWDEFAULTS = new BatchWriterConfig();
public static class TimeConverter implements IStringConverter<Long> {
@Override
public Long convert(String value) {
if (value.matches("[0-9]+"))
value = "PT" + value + "S"; // if only numbers then assume seconds
return Duration.parse(value).toMillis();
}
}
public static class MemoryConverter implements IStringConverter<Long> {
@Override
public Long convert(String str) {
try {
char lastChar = str.charAt(str.length() - 1);
int multiplier = 0;
switch (Character.toUpperCase(lastChar)) {
case 'G':
multiplier += 10;
case 'M':
multiplier += 10;
case 'K':
multiplier += 10;
case 'B':
break;
default:
return Long.parseLong(str);
}
return Long.parseLong(str.substring(0, str.length() - 1)) << multiplier;
} catch (Exception ex) {
throw new IllegalArgumentException(
"The value '" + str + "' is not a valid memory setting. A valid value would a number "
+ "possibily followed by an optional 'G', 'M', 'K', or 'B'.");
}
}
}
@Parameter(names = "--batchThreads",
description = "Number of threads to use when writing large batches")
public Integer batchThreads = BWDEFAULTS.getMaxWriteThreads();
@Parameter(names = "--batchLatency", converter = TimeConverter.class,
description = "The maximum time to wait before flushing data to servers when writing")
public Long batchLatency = BWDEFAULTS.getMaxLatency(TimeUnit.MILLISECONDS);
@Parameter(names = "--batchMemory", converter = MemoryConverter.class,
description = "memory used to batch data when writing")
public Long batchMemory = BWDEFAULTS.getMaxMemory();
@Parameter(names = "--batchTimeout", converter = TimeConverter.class,
description = "timeout used to fail a batch write")
public Long batchTimeout = BWDEFAULTS.getTimeout(TimeUnit.MILLISECONDS);
public BatchWriterConfig getBatchWriterConfig() {
BatchWriterConfig config = new BatchWriterConfig();
config.setMaxWriteThreads(this.batchThreads);
config.setMaxLatency(this.batchLatency, TimeUnit.MILLISECONDS);
config.setMaxMemory(this.batchMemory);
config.setTimeout(this.batchTimeout, TimeUnit.MILLISECONDS);
return config;
}
}
| 3,283 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.cli;
import java.nio.file.Paths;
import java.util.Properties;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.hadoop.conf.Configuration;
import com.beust.jcommander.IStringConverter;
import com.beust.jcommander.Parameter;
public class ClientOpts extends Help {
public static class AuthConverter implements IStringConverter<Authorizations> {
@Override
public Authorizations convert(String value) {
return new Authorizations(value.split(","));
}
}
public static class VisibilityConverter implements IStringConverter<ColumnVisibility> {
@Override
public ColumnVisibility convert(String value) {
return new ColumnVisibility(value);
}
}
@Parameter(names = {"-c", "--conf"}, description = "Path to accumulo-client.properties."
+ "If not set, defaults to path set by env variable ACCUMULO_CLIENT_PROPS.")
private String propsPath = null;
@Parameter(names = {"-auths", "--auths"}, converter = AuthConverter.class,
description = "the authorizations to use when reading or writing")
public Authorizations auths = Authorizations.EMPTY;
private Properties cachedProps = null;
public AccumuloClient createAccumuloClient() {
return Accumulo.newClient().from(getClientPropsPath()).build();
}
public String getClientPropsPath() {
if (propsPath == null) {
propsPath = System.getenv("ACCUMULO_CLIENT_PROPS");
if (propsPath == null) {
throw new IllegalArgumentException("accumulo-client.properties must be set!");
}
if (!Paths.get(propsPath).toFile().exists()) {
throw new IllegalArgumentException(propsPath + " does not exist!");
}
}
return propsPath;
}
public Properties getClientProperties() {
if (cachedProps == null) {
cachedProps = Accumulo.newClientProperties().from(getClientPropsPath()).build();
}
return cachedProps;
}
public Configuration getHadoopConfig() {
Configuration config = new Configuration();
config.set("mapreduce.job.classloader", "true");
return config;
}
}
| 3,284 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.cli;
import com.beust.jcommander.Parameter;
public class ScannerOpts {
@Parameter(names = "--scanBatchSize",
description = "the number of key-values to pull during a scan")
public int scanBatchSize = 1000;
}
| 3,285 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.isolation;
import java.util.HashSet;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.IsolatedScanner;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.BatchWriterOpts;
import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
/**
* This example shows how a concurrent reader and writer can interfere with each other. It creates
* two threads that run forever reading and writing to the same table.
*
* When the example is run with isolation enabled, no interference will be observed.
*
* When the example is run with out isolation, the reader will see partial mutations of a row.
*
*/
public final class InterferenceTest {
private static final int NUM_ROWS = 500;
private static final int NUM_COLUMNS = 113; // scanner batches 1000 by default, so make num
// columns not a multiple of 10
private static final String ERROR_MISSING_COLS = "ERROR Did not see {} columns in row {}";
private static final String ERROR_MULTIPLE_VALS = "ERROR Columns in row {} had multiple values "
+ "{}";
private static final Logger log = LoggerFactory.getLogger(InterferenceTest.class);
private InterferenceTest() {}
static class Writer implements Runnable {
private final BatchWriter bw;
private final long iterations;
Writer(BatchWriter bw, long iterations) {
this.bw = bw;
this.iterations = iterations;
}
@Override
public void run() {
int row = 0;
int value = 0;
for (long i = 0; i < iterations; i++) {
Mutation m = new Mutation(String.format("%03d", row));
row = (row + 1) % NUM_ROWS;
for (int cq = 0; cq < NUM_COLUMNS; cq++)
m.put("000", String.format("%04d", cq), new Value(("" + value).getBytes()));
value++;
try {
bw.addMutation(m);
} catch (MutationsRejectedException e) {
log.error("Mutation was rejected.", e);
System.exit(-1);
}
}
try {
bw.close();
} catch (MutationsRejectedException e) {
log.error("Mutation was rejected on BatchWriter close.", e);
}
}
}
static class Reader implements Runnable {
private final Scanner scanner;
volatile boolean stop = false;
Reader(Scanner scanner) {
this.scanner = scanner;
}
@Override
public void run() {
while (!stop) {
ByteSequence row = null;
int count = 0;
// all columns in a row should have the same value,
// use this hash set to track that
HashSet<String> values = new HashSet<>();
for (Entry<Key,Value> entry : scanner) {
if (row == null)
row = entry.getKey().getRowData();
if (!row.equals(entry.getKey().getRowData())) {
if (count != NUM_COLUMNS)
log.error(ERROR_MISSING_COLS, NUM_COLUMNS, row);
if (values.size() > 1)
log.error(ERROR_MULTIPLE_VALS, row, values);
row = entry.getKey().getRowData();
count = 0;
values.clear();
}
count++;
values.add(entry.getValue().toString());
}
if (count > 0 && count != NUM_COLUMNS)
log.error(ERROR_MISSING_COLS, NUM_COLUMNS, row);
if (values.size() > 1)
log.error(ERROR_MULTIPLE_VALS, row, values);
}
}
public void stopNow() {
stop = true;
}
}
static class Opts extends ClientOnRequiredTable {
@Parameter(names = "--iterations", description = "number of times to run", required = true)
long iterations = 0;
@Parameter(names = "--isolated", description = "use isolated scans")
boolean isolated = false;
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(InterferenceTest.class.getName(), args, bwOpts);
if (opts.iterations < 1)
opts.iterations = Long.MAX_VALUE;
try (AccumuloClient client = opts.createAccumuloClient()) {
Common.createTableWithNamespace(client, opts.getTableName());
Thread writer = new Thread(
new Writer(client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()),
opts.iterations));
writer.start();
Reader r;
if (opts.isolated)
r = new Reader(new IsolatedScanner(client.createScanner(opts.getTableName(), opts.auths)));
else
r = new Reader(client.createScanner(opts.getTableName(), opts.auths));
Thread reader;
reader = new Thread(r);
reader.start();
writer.join();
r.stopNow();
reader.join();
log.info("finished");
}
}
}
| 3,286 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.sample;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.SampleNotPresentException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.admin.CompactionConfig;
import org.apache.accumulo.core.client.admin.PluginConfig;
import org.apache.accumulo.core.client.sample.RowSampler;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.BatchWriterOpts;
import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
import org.apache.accumulo.examples.client.RandomBatchWriter;
import org.apache.accumulo.examples.shard.CutoffIntersectingIterator;
import com.google.common.collect.ImmutableMap;
/**
* A simple example of using Accumulo's sampling feature. This example does something similar to
* what README.sample shows using the shell. Also, see {@link CutoffIntersectingIterator} and
* README.sample for an example of how to use sample data from within an iterator.
*/
public class SampleExample {
// a compaction strategy that only selects files for compaction that have no sample data or sample
// data created in a different way than the tables
static final PluginConfig selectorCfg = new PluginConfig(
"org.apache.accumulo.tserver.compaction.strategies.ConfigurableCompactionStrategy",
Map.of("SF_NO_SAMPLE", ""));
static final CompactionConfig NO_SAMPLE_STRATEGY = new CompactionConfig()
.setSelector(selectorCfg);
static class Opts extends ClientOnDefaultTable {
public Opts() {
super("examples.sampex");
}
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
try (AccumuloClient client = opts.createAccumuloClient()) {
Common.createTableWithNamespace(client, opts.getTableName());
// write some data
try (
BatchWriter bw = client.createBatchWriter(opts.getTableName(),
bwOpts.getBatchWriterConfig());
Scanner scanner = client.createScanner(opts.getTableName(), Authorizations.EMPTY)) {
bw.addMutation(createMutation("9225", "abcde", "file://foo.txt"));
bw.addMutation(createMutation("8934", "accumulo scales", "file://accumulo_notes.txt"));
bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano",
"file://groceries/9/txt"));
bw.addMutation(createMutation("3900", "EC2 ate my homework", "file://final_project.txt"));
bw.flush();
SamplerConfiguration sc1 = new SamplerConfiguration(RowSampler.class.getName());
sc1.setOptions(ImmutableMap.of("hasher", "murmur3_32", "modulus", "3"));
client.tableOperations().setSamplerConfiguration(opts.getTableName(), sc1);
System.out.println("Scanning all data :");
print(scanner);
System.out.println();
System.out.println(
"Scanning with sampler configuration. Data was written before sampler was set on table, scan should fail.");
scanner.setSamplerConfiguration(sc1);
try {
print(scanner);
} catch (SampleNotPresentException e) {
System.out.println(" Saw sample not present exception as expected.");
}
System.out.println();
// compact table to recreate sample data
client.tableOperations().compact(opts.getTableName(), NO_SAMPLE_STRATEGY);
System.out
.println("Scanning after compaction (compaction should have created sample data) : ");
print(scanner);
System.out.println();
// update a document in the sample data
bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano, butter",
"file://groceries/9/txt"));
bw.flush();
System.out.println(
"Scanning sample after updating content for docId 2317 (should see content change in sample data) : ");
print(scanner);
System.out.println();
// change tables sampling configuration...
SamplerConfiguration sc2 = new SamplerConfiguration(RowSampler.class.getName());
sc2.setOptions(ImmutableMap.of("hasher", "murmur3_32", "modulus", "2"));
client.tableOperations().setSamplerConfiguration(opts.getTableName(), sc2);
// compact table to recreate sample data using new configuration
client.tableOperations().compact(opts.getTableName(), NO_SAMPLE_STRATEGY);
System.out.println(
"Scanning with old sampler configuration. Sample data was created using new configuration with a compaction. Scan should fail.");
try {
// try scanning with old sampler configuration
print(scanner);
} catch (SampleNotPresentException e) {
System.out.println(" Saw sample not present exception as expected ");
}
System.out.println();
// update expected sampler configuration on scanner
scanner.setSamplerConfiguration(sc2);
System.out.println("Scanning with new sampler configuration : ");
print(scanner);
System.out.println();
}
}
}
private static void print(Scanner scanner) {
for (Entry<Key,Value> entry : scanner) {
System.out.println(" " + entry.getKey() + " " + entry.getValue());
}
}
private static Mutation createMutation(String docId, String content, String url) {
Mutation m = new Mutation(docId);
m.put("doc", "context", content);
m.put("doc", "url", url);
return m;
}
}
| 3,287 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.dirlist;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.user.RegExFilter;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
import org.apache.hadoop.io.Text;
import com.beust.jcommander.Parameter;
/**
* Provides utility methods for getting the info for a file, listing the contents of a directory,
* and performing single wild card searches on file or directory names.
*/
public class QueryUtil {
private final AccumuloClient client;
private final String tableName;
private final Authorizations auths;
public static final Text DIR_COLF = new Text("dir");
public static final Text FORWARD_PREFIX = new Text("f");
public static final Text REVERSE_PREFIX = new Text("r");
public static final Text INDEX_COLF = new Text("i");
public static final Text COUNTS_COLQ = new Text("counts");
public QueryUtil(AccumuloClient client, Opts opts) {
this.client = client;
this.tableName = opts.getTableName();
this.auths = opts.auths;
}
/**
* Calculates the depth of a path, i.e. the number of forward slashes in the path name.
*
* @param path
* the full path of a file or directory
* @return the depth of the path
*/
public static int getDepth(String path) {
int numSlashes = 0;
int index = -1;
while ((index = path.indexOf("/", index + 1)) >= 0)
numSlashes++;
return numSlashes;
}
/**
* Given a path, construct an accumulo row prepended with the path's depth for the directory
* table.
*
* @param path
* the full path of a file or directory
* @return the accumulo row associated with this path
*/
public static Text getRow(String path) {
Text row = new Text(String.format("%03d", getDepth(path)));
row.append(path.getBytes(), 0, path.length());
return row;
}
/**
* Given a path, construct an accumulo row prepended with the {@link #FORWARD_PREFIX} for the
* index table.
*
* @param path
* the full path of a file or directory
* @return the accumulo row associated with this path
*/
public static Text getForwardIndex(String path) {
String part = path.substring(path.lastIndexOf("/") + 1);
if (part.length() == 0)
return null;
Text row = new Text(FORWARD_PREFIX);
row.append(part.getBytes(), 0, part.length());
return row;
}
/**
* Given a path, construct an accumulo row prepended with the {@link #REVERSE_PREFIX} with the
* path reversed for the index table.
*
* @param path
* the full path of a file or directory
* @return the accumulo row associated with this path
*/
public static Text getReverseIndex(String path) {
String part = path.substring(path.lastIndexOf("/") + 1);
if (part.length() == 0)
return null;
byte[] rev = new byte[part.length()];
int i = part.length() - 1;
for (byte b : part.getBytes())
rev[i--] = b;
Text row = new Text(REVERSE_PREFIX);
row.append(rev, 0, rev.length);
return row;
}
/**
* Returns either the {@link #DIR_COLF} or a decoded string version of the colf.
*
* @param colf
* the column family
*/
public static String getType(Text colf) {
if (colf.equals(DIR_COLF))
return colf + ":";
return Ingest.encoder.decode(colf.getBytes()) + ":";
}
/**
* Scans over the directory table and pulls out stat information about a path.
*
* @param path
* the full path of a file or directory
*/
public Map<String,String> getData(String path) throws TableNotFoundException {
if (path.endsWith("/"))
path = path.substring(0, path.length() - 1);
Map<String,String> data = new TreeMap<>();
try (Scanner scanner = client.createScanner(tableName, auths)) {
scanner.setRange(new Range(getRow(path)));
for (Entry<Key,Value> e : scanner) {
String type = getType(e.getKey().getColumnFamily());
data.put("fullname", e.getKey().getRow().toString().substring(3));
data.put(type + e.getKey().getColumnQualifier().toString() + ":"
+ e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
}
}
return data;
}
/**
* Uses the directory table to list the contents of a directory.
*
* @param path
* the full path of a directory
*/
public Map<String,Map<String,String>> getDirList(String path) throws TableNotFoundException {
if (!path.endsWith("/"))
path = path + "/";
Map<String,Map<String,String>> fim = new TreeMap<>();
try (Scanner scanner = client.createScanner(tableName, auths)) {
scanner.setRange(Range.prefix(getRow(path)));
for (Entry<Key,Value> e : scanner) {
String name = e.getKey().getRow().toString();
name = name.substring(name.lastIndexOf("/") + 1);
String type = getType(e.getKey().getColumnFamily());
if (!fim.containsKey(name)) {
fim.put(name, new TreeMap<>());
fim.get(name).put("fullname", e.getKey().getRow().toString().substring(3));
}
fim.get(name).put(type + e.getKey().getColumnQualifier().toString() + ":"
+ e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
}
}
return fim;
}
/**
* Scans over the index table for files or directories with a given name.
*
* @param term
* the name a file or directory to search for
*/
public Iterable<Entry<Key,Value>> exactTermSearch(String term) throws Exception {
System.out.println("executing exactTermSearch for " + term);
Scanner scanner = client.createScanner(tableName, auths);
scanner.setRange(new Range(getForwardIndex(term)));
return scanner;
}
/**
* Scans over the index table for files or directories with a given name, prefix, or suffix
* (indicated by a wildcard '*' at the beginning or end of the term.
*
* @param exp
* the name a file or directory to search for with an optional wildcard '*' at the
* beginning or end
*/
public Iterable<Entry<Key,Value>> singleRestrictedWildCardSearch(String exp) throws Exception {
if (exp.contains("/"))
throw new Exception("this method only works with unqualified names");
Scanner scanner = client.createScanner(tableName, auths);
if (exp.startsWith("*")) {
System.out.println("executing beginning wildcard search for " + exp);
exp = exp.substring(1);
scanner.setRange(Range.prefix(getReverseIndex(exp)));
} else if (exp.endsWith("*")) {
System.out.println("executing ending wildcard search for " + exp);
exp = exp.substring(0, exp.length() - 1);
scanner.setRange(Range.prefix(getForwardIndex(exp)));
} else if (exp.contains("*")) {
throw new Exception("this method only works for beginning or ending wild cards");
} else {
return exactTermSearch(exp);
}
return scanner;
}
/**
* Scans over the index table for files or directories with a given name that can contain a single
* wildcard '*' anywhere in the term.
*
* @param exp
* the name a file or directory to search for with one optional wildcard '*'
*/
public Iterable<Entry<Key,Value>> singleWildCardSearch(String exp) throws Exception {
int starIndex = exp.indexOf("*");
if (exp.indexOf("*", starIndex + 1) >= 0)
throw new Exception("only one wild card for search");
if (starIndex < 0) {
return exactTermSearch(exp);
} else if (starIndex == 0 || starIndex == exp.length() - 1) {
return singleRestrictedWildCardSearch(exp);
}
String firstPart = exp.substring(0, starIndex);
String lastPart = exp.substring(starIndex + 1);
String regexString = ".*/" + exp.replace("*", "[^/]*");
Scanner scanner = client.createScanner(tableName, auths);
if (firstPart.length() >= lastPart.length()) {
System.out.println("executing middle wildcard search for " + regexString
+ " from entries starting with " + firstPart);
scanner.setRange(Range.prefix(getForwardIndex(firstPart)));
} else {
System.out.println("executing middle wildcard search for " + regexString
+ " from entries ending with " + lastPart);
scanner.setRange(Range.prefix(getReverseIndex(lastPart)));
}
IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
RegExFilter.setRegexs(regex, null, null, regexString, null, false);
scanner.addScanIterator(regex);
return scanner;
}
public static class Opts extends ClientOnRequiredTable {
@Parameter(names = "--path", description = "the directory to list")
String path = "/";
@Parameter(names = "--search", description = "find a file or directory with the given name")
boolean search = false;
}
/**
* Lists the contents of a directory using the directory table, or searches for file or directory
* names (if the -search flag is included).
*/
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(QueryUtil.class.getName(), args);
try (AccumuloClient client = opts.createAccumuloClient()) {
QueryUtil q = new QueryUtil(client, opts);
if (opts.search) {
for (Entry<Key,Value> e : q.singleWildCardSearch(opts.path)) {
System.out.println(e.getKey().getColumnQualifier());
}
} else {
for (Entry<String,Map<String,String>> e : q.getDirList(opts.path).entrySet()) {
System.out.println(e);
}
}
}
}
}
| 3,288 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.dirlist;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.cli.BatchWriterOpts;
import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
import org.apache.accumulo.examples.cli.ScannerOpts;
import org.apache.hadoop.io.Text;
import com.beust.jcommander.Parameter;
/**
* Computes recursive counts over file system information and stores them back into the same
* Accumulo table.
*/
public class FileCount {
private int entriesScanned;
private int inserts;
private final ScannerOpts scanOpts;
private final BatchWriterOpts bwOpts;
private final AccumuloClient client;
private final String tableName;
private final Authorizations auths;
private final ColumnVisibility visibility;
private static class CountValue {
int dirCount = 0;
int fileCount = 0;
int recursiveDirCount = 0;
int recusiveFileCount = 0;
void set(Value val) {
String[] sa = val.toString().split(",");
dirCount = Integer.parseInt(sa[0]);
fileCount = Integer.parseInt(sa[1]);
recursiveDirCount = Integer.parseInt(sa[2]);
recusiveFileCount = Integer.parseInt(sa[3]);
}
Value toValue() {
return new Value(
(dirCount + "," + fileCount + "," + recursiveDirCount + "," + recusiveFileCount)
.getBytes());
}
void incrementFiles() {
fileCount++;
recusiveFileCount++;
}
void incrementDirs() {
dirCount++;
recursiveDirCount++;
}
public void clear() {
dirCount = 0;
fileCount = 0;
recursiveDirCount = 0;
recusiveFileCount = 0;
}
public void incrementRecursive(CountValue other) {
recursiveDirCount += other.recursiveDirCount;
recusiveFileCount += other.recusiveFileCount;
}
}
private int findMaxDepth(Scanner scanner, int min, int max) {
int mid = min + (max - min) / 2;
return findMaxDepth(scanner, min, mid, max);
}
private int findMaxDepth(Scanner scanner, int min, int mid, int max) {
// check to see if the mid-point exist
if (max < min)
return -1;
scanner.setRange(
new Range(String.format("%03d", mid), true, String.format("%03d", mid + 1), false));
if (scanner.iterator().hasNext()) {
// this depth exist, check to see if a larger depth exist
int ret = findMaxDepth(scanner, mid + 1, max);
if (ret == -1)
return mid; // this must the max
else
return ret;
} else {
// this depth does not exist, look lower
return findMaxDepth(scanner, min, mid - 1);
}
}
private int findMaxDepth(Scanner scanner) {
// do binary search to find max depth
int origBatchSize = scanner.getBatchSize();
scanner.setBatchSize(100);
int depth = findMaxDepth(scanner, 0, 64, 999);
scanner.setBatchSize(origBatchSize);
return depth;
}
// find the count column and consume a row
private Entry<Key,Value> findCount(Entry<Key,Value> entry, Iterator<Entry<Key,Value>> iterator,
CountValue cv) {
Key key = entry.getKey();
Text currentRow = key.getRow();
if (key.compareColumnQualifier(QueryUtil.COUNTS_COLQ) == 0)
cv.set(entry.getValue());
while (iterator.hasNext()) {
entry = iterator.next();
entriesScanned++;
key = entry.getKey();
if (key.compareRow(currentRow) != 0)
return entry;
if (key.compareColumnFamily(QueryUtil.DIR_COLF) == 0
&& key.compareColumnQualifier(QueryUtil.COUNTS_COLQ) == 0) {
cv.set(entry.getValue());
}
}
return null;
}
private Entry<Key,Value> consumeRow(Entry<Key,Value> entry, Iterator<Entry<Key,Value>> iterator) {
Key key = entry.getKey();
Text currentRow = key.getRow();
while (iterator.hasNext()) {
entry = iterator.next();
entriesScanned++;
key = entry.getKey();
if (key.compareRow(currentRow) != 0)
return entry;
}
return null;
}
private String extractDir(Key key) {
String row = key.getRowData().toString();
return row.substring(3, row.lastIndexOf('/'));
}
private Mutation createMutation(int depth, String dir, CountValue countVal) {
Mutation m = new Mutation(String.format("%03d%s", depth, dir));
m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, visibility, countVal.toValue());
return m;
}
private void calculateCounts(Scanner scanner, int depth, BatchWriter batchWriter)
throws Exception {
scanner.setRange(
new Range(String.format("%03d", depth), true, String.format("%03d", depth + 1), false));
CountValue countVal = new CountValue();
Iterator<Entry<Key,Value>> iterator = scanner.iterator();
String currentDir = null;
Entry<Key,Value> entry = null;
if (iterator.hasNext()) {
entry = iterator.next();
entriesScanned++;
}
while (entry != null) {
Key key = entry.getKey();
String dir = extractDir(key);
if (currentDir == null) {
currentDir = dir;
} else if (!currentDir.equals(dir)) {
batchWriter.addMutation(createMutation(depth - 1, currentDir, countVal));
inserts++;
currentDir = dir;
countVal.clear();
}
// process a whole row
if (key.compareColumnFamily(QueryUtil.DIR_COLF) == 0) {
CountValue tmpCount = new CountValue();
entry = findCount(entry, iterator, tmpCount);
if (tmpCount.dirCount == 0 && tmpCount.fileCount == 0) {
// in this case the higher depth will not insert anything if the
// dir has no children, so insert something here
Mutation m = new Mutation(key.getRow());
m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, visibility, tmpCount.toValue());
batchWriter.addMutation(m);
inserts++;
}
countVal.incrementRecursive(tmpCount);
countVal.incrementDirs();
} else {
entry = consumeRow(entry, iterator);
countVal.incrementFiles();
}
}
if (currentDir != null) {
batchWriter.addMutation(createMutation(depth - 1, currentDir, countVal));
inserts++;
}
}
public FileCount(AccumuloClient client, String tableName, Authorizations auths,
ColumnVisibility cv, ScannerOpts scanOpts, BatchWriterOpts bwOpts) {
this.client = client;
this.tableName = tableName;
this.auths = auths;
this.visibility = cv;
this.scanOpts = scanOpts;
this.bwOpts = bwOpts;
}
public void run() throws Exception {
entriesScanned = 0;
inserts = 0;
try (Scanner scanner = client.createScanner(tableName, auths);
BatchWriter bw = client.createBatchWriter(tableName, bwOpts.getBatchWriterConfig())) {
scanner.setBatchSize(scanOpts.scanBatchSize);
long t1 = System.currentTimeMillis();
int depth = findMaxDepth(scanner);
long t2 = System.currentTimeMillis();
for (int d = depth; d > 0; d--) {
calculateCounts(scanner, d, bw);
// must flush so next depth can read what prev depth wrote
bw.flush();
}
long t3 = System.currentTimeMillis();
System.out.printf("Max depth : %d%n", depth);
System.out.printf("Time to find max depth : %,d ms%n", (t2 - t1));
System.out.printf("Time to compute counts : %,d ms%n", (t3 - t2));
System.out.printf("Entries scanned : %,d %n", entriesScanned);
System.out.printf("Counts inserted : %,d %n", inserts);
}
}
public static class Opts extends ClientOnRequiredTable {
@Parameter(names = "--vis", description = "use a given visibility for the new counts",
converter = VisibilityConverter.class)
ColumnVisibility visibility = new ColumnVisibility();
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
String programName = FileCount.class.getName();
opts.parseArgs(programName, args, scanOpts, bwOpts);
try (AccumuloClient client = opts.createAccumuloClient()) {
FileCount fileCount = new FileCount(client, opts.getTableName(), opts.auths, opts.visibility,
scanOpts, bwOpts);
fileCount.run();
}
}
}
| 3,289 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.dirlist;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.core.client.lexicoder.Encoder;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.LongCombiner;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.BatchWriterOpts;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.examples.filedata.ChunkCombiner;
import org.apache.accumulo.examples.filedata.FileDataIngest;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
/**
* Recursively lists the files and directories under a given path, ingests their names and file info
* into one Accumulo table, indexes the file names in a separate table, and the file data into a
* third table.
*/
public final class Ingest {
private static final Logger log = LoggerFactory.getLogger(Ingest.class);
static final String DIR_TABLE = Common.NAMESPACE + ".dirTable";
static final String INDEX_TABLE = Common.NAMESPACE + ".indexTable";
static final String DATA_TABLE = Common.NAMESPACE + ".dataTable";
static final Value nullValue = new Value(new byte[0]);
public static final String LENGTH_CQ = "length";
public static final String HIDDEN_CQ = "hidden";
public static final String EXEC_CQ = "exec";
public static final String LASTMOD_CQ = "lastmod";
public static final String HASH_CQ = "md5";
public static final Encoder<Long> encoder = LongCombiner.FIXED_LEN_ENCODER;
private Ingest() {}
public static Mutation buildMutation(ColumnVisibility cv, String path, boolean isDir,
boolean isHidden, boolean canExec, long length, long lastmod, String hash) {
if (path.equals("/"))
path = "";
Mutation m = new Mutation(QueryUtil.getRow(path));
String colf;
if (isDir)
colf = QueryUtil.DIR_COLF.toString();
else
colf = new String(encoder.encode(Long.MAX_VALUE - lastmod), UTF_8);
m.put(colf, LENGTH_CQ, cv, new Value(Long.toString(length).getBytes()));
m.put(colf, HIDDEN_CQ, cv, new Value(Boolean.toString(isHidden).getBytes()));
m.put(colf, EXEC_CQ, cv, new Value(Boolean.toString(canExec).getBytes()));
m.put(colf, LASTMOD_CQ, cv, new Value(Long.toString(lastmod).getBytes()));
if (hash != null && hash.length() > 0)
m.put(colf, HASH_CQ, cv, new Value(hash.getBytes()));
return m;
}
private static void ingest(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW,
FileDataIngest fdi, BatchWriter data) throws Exception {
// build main table entry
String path;
try {
path = src.getCanonicalPath();
} catch (IOException e) {
path = src.getAbsolutePath();
}
log.info(path);
String hash = null;
if (!src.isDirectory()) {
try {
hash = fdi.insertFileData(path, data);
} catch (Exception e) {
// if something goes wrong, just skip this one
return;
}
}
dirBW.addMutation(buildMutation(cv, path, src.isDirectory(), src.isHidden(), src.canExecute(),
src.length(), src.lastModified(), hash));
// build index table entries
Text row = QueryUtil.getForwardIndex(path);
if (row != null) {
Text p = new Text(QueryUtil.getRow(path));
Mutation m = new Mutation(row);
m.put(QueryUtil.INDEX_COLF, p, cv, nullValue);
indexBW.addMutation(m);
row = QueryUtil.getReverseIndex(path);
m = new Mutation(row);
m.put(QueryUtil.INDEX_COLF, p, cv, nullValue);
indexBW.addMutation(m);
}
}
private static void recurse(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW,
FileDataIngest fdi, BatchWriter data) throws Exception {
// ingest this File
ingest(src, cv, dirBW, indexBW, fdi, data);
// recurse into subdirectories
if (src.isDirectory()) {
File[] files = src.listFiles();
if (files == null)
return;
for (File child : files) {
recurse(child, cv, dirBW, indexBW, fdi, data);
}
}
}
static class Opts extends ClientOpts {
@Parameter(names = "--dirTable", description = "a table to hold the directory information")
String dirTable = DIR_TABLE;
@Parameter(names = "--indexTable", description = "an index over the ingested data")
String indexTable = INDEX_TABLE;
@Parameter(names = "--dataTable", description = "the file data, chunked into parts")
String dataTable = DATA_TABLE;
@Parameter(names = "--vis", description = "the visibility to mark the data",
converter = VisibilityConverter.class)
ColumnVisibility visibility = new ColumnVisibility();
@Parameter(names = "--chunkSize", description = "the size of chunks when breaking down files")
int chunkSize = 100000;
@Parameter(description = "<dir> { <dir> ... }")
List<String> directories = new ArrayList<>();
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(Ingest.class.getName(), args, bwOpts);
try (AccumuloClient client = opts.createAccumuloClient()) {
var newTableConfig = new NewTableConfiguration()
.attachIterator(new IteratorSetting(1, ChunkCombiner.class));
Common.createTableWithNamespace(client, opts.dirTable);
Common.createTableWithNamespace(client, opts.indexTable);
Common.createTableWithNamespace(client, opts.dataTable, newTableConfig);
try (
BatchWriter dirBW = client.createBatchWriter(opts.dirTable,
bwOpts.getBatchWriterConfig());
BatchWriter indexBW = client.createBatchWriter(opts.indexTable,
bwOpts.getBatchWriterConfig());
BatchWriter dataBW = client.createBatchWriter(opts.dataTable,
bwOpts.getBatchWriterConfig())) {
FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
for (String dir : opts.directories) {
recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
// fill in parent directory info
int slashIndex;
while ((slashIndex = dir.lastIndexOf('/')) > 0) {
dir = dir.substring(0, slashIndex);
ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
}
}
ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW);
}
}
}
}
| 3,290 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.dirlist;
import java.awt.BorderLayout;
import java.io.IOException;
import java.util.Enumeration;
import java.util.Map;
import java.util.Map.Entry;
import javax.swing.JFrame;
import javax.swing.JScrollPane;
import javax.swing.JSplitPane;
import javax.swing.JTextArea;
import javax.swing.JTree;
import javax.swing.event.TreeExpansionEvent;
import javax.swing.event.TreeExpansionListener;
import javax.swing.event.TreeSelectionEvent;
import javax.swing.event.TreeSelectionListener;
import javax.swing.tree.DefaultMutableTreeNode;
import javax.swing.tree.DefaultTreeModel;
import javax.swing.tree.TreeNode;
import javax.swing.tree.TreePath;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.examples.filedata.FileDataQuery;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
/**
* Provides a GUI for browsing the file system information stored in Accumulo.
*/
@SuppressWarnings("serial")
public class Viewer extends JFrame implements TreeSelectionListener, TreeExpansionListener {
private static final Logger log = LoggerFactory.getLogger(Viewer.class);
JTree tree;
DefaultTreeModel treeModel;
final QueryUtil q;
FileDataQuery fdq;
String topPath;
Map<String,DefaultMutableTreeNode> nodeNameMap;
JTextArea text;
JTextArea data;
JScrollPane dataPane;
public static class NodeInfo {
private final String name;
private final Map<String,String> data;
public NodeInfo(String name, Map<String,String> data) {
this.name = name;
this.data = data;
}
public String getName() {
return name;
}
public String getFullName() {
String fn = data.get("fullname");
if (fn == null)
return name;
return fn;
}
public Map<String,String> getData() {
return data;
}
@Override
public String toString() {
return getName();
}
public String getHash() {
for (String k : data.keySet()) {
String[] parts = k.split(":");
if (parts.length >= 2 && parts[1].equals("md5")) {
return data.get(k);
}
}
return null;
}
}
public Viewer(AccumuloClient client, Opts opts) throws Exception {
super("File Viewer");
setSize(1000, 800);
setDefaultCloseOperation(EXIT_ON_CLOSE);
q = new QueryUtil(client, opts);
fdq = new FileDataQuery(client, opts.dataTable, opts.auths);
this.topPath = opts.path;
}
public void populate(DefaultMutableTreeNode node) throws TableNotFoundException {
String path = ((NodeInfo) node.getUserObject()).getFullName();
log.debug("listing " + path);
for (Entry<String,Map<String,String>> e : q.getDirList(path).entrySet()) {
log.debug("got child for " + node.getUserObject() + ": " + e.getKey());
node.add(new DefaultMutableTreeNode(new NodeInfo(e.getKey(), e.getValue())));
}
}
public void populateChildren(DefaultMutableTreeNode node) throws TableNotFoundException {
Enumeration<TreeNode> children = node.children();
while (children.hasMoreElements()) {
populate((DefaultMutableTreeNode) children.nextElement());
}
}
public void init() throws TableNotFoundException {
DefaultMutableTreeNode root = new DefaultMutableTreeNode(
new NodeInfo(topPath, q.getData(topPath)));
populate(root);
populateChildren(root);
treeModel = new DefaultTreeModel(root);
tree = new JTree(treeModel);
tree.addTreeExpansionListener(this);
tree.addTreeSelectionListener(this);
text = new JTextArea(getText(q.getData(topPath)));
data = new JTextArea("");
JScrollPane treePane = new JScrollPane(tree);
JScrollPane textPane = new JScrollPane(text);
dataPane = new JScrollPane(data);
JSplitPane infoSplitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT, textPane, dataPane);
JSplitPane mainSplitPane = new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, treePane, infoSplitPane);
mainSplitPane.setDividerLocation(300);
infoSplitPane.setDividerLocation(150);
getContentPane().add(mainSplitPane, BorderLayout.CENTER);
}
public static String getText(DefaultMutableTreeNode node) {
return getText(((NodeInfo) node.getUserObject()).getData());
}
public static String getText(Map<String,String> data) {
StringBuilder sb = new StringBuilder();
for (String name : data.keySet()) {
sb.append(name);
sb.append(" : ");
sb.append(data.get(name));
sb.append('\n');
}
return sb.toString();
}
@Override
public void treeExpanded(TreeExpansionEvent event) {
try {
populateChildren((DefaultMutableTreeNode) event.getPath().getLastPathComponent());
} catch (TableNotFoundException e) {
log.error("Could not find table.", e);
}
}
@Override
public void treeCollapsed(TreeExpansionEvent event) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) event.getPath().getLastPathComponent();
Enumeration<TreeNode> children = node.children();
while (children.hasMoreElements()) {
DefaultMutableTreeNode child = (DefaultMutableTreeNode) children.nextElement();
log.debug("removing children of " + ((NodeInfo) child.getUserObject()).getFullName());
child.removeAllChildren();
}
}
@Override
public void valueChanged(TreeSelectionEvent e) {
TreePath selected = e.getNewLeadSelectionPath();
if (selected == null)
return;
DefaultMutableTreeNode node = (DefaultMutableTreeNode) selected.getLastPathComponent();
text.setText(getText(node));
try {
String hash = ((NodeInfo) node.getUserObject()).getHash();
if (hash != null) {
data.setText(fdq.getSomeData(hash, 10000));
} else {
data.setText("");
}
} catch (IOException e1) {
log.error("Could not get data from FileDataQuery.", e1);
}
}
static class Opts extends QueryUtil.Opts {
@Parameter(names = "--dataTable")
String dataTable = "dataTable";
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(Viewer.class.getName(), args);
try (AccumuloClient client = opts.createAccumuloClient()) {
Viewer v = new Viewer(client, opts);
v.init();
v.setVisible(true);
}
}
}
| 3,291 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import java.util.TreeSet;
import org.apache.accumulo.core.data.ByteSequence;
/**
* A utility for merging visibilities into the form {@code (VIS1)|(VIS2)|...|(VISN)}. Used by the
* {@link ChunkCombiner}.
*/
public class VisibilityCombiner {
private final TreeSet<String> visibilities = new TreeSet<>();
void add(ByteSequence cv) {
if (cv.length() == 0)
return;
int depth = 0;
int offset = 0;
for (int i = 0; i < cv.length(); i++) {
switch (cv.byteAt(i)) {
case '(':
depth++;
break;
case ')':
depth--;
if (depth < 0)
throw new IllegalArgumentException("Invalid vis " + cv);
break;
case '|':
if (depth == 0) {
insert(cv.subSequence(offset, i));
offset = i + 1;
}
break;
}
}
insert(cv.subSequence(offset, cv.length()));
if (depth != 0)
throw new IllegalArgumentException("Invalid vis " + cv);
}
private void insert(ByteSequence cv) {
String cvs = cv.toString();
if (cvs.charAt(0) != '(')
cvs = "(" + cvs + ")";
else {
int depth = 0;
int depthZeroCloses = 0;
for (int i = 0; i < cv.length(); i++) {
switch (cv.byteAt(i)) {
case '(':
depth++;
break;
case ')':
depth--;
if (depth == 0)
depthZeroCloses++;
break;
}
}
if (depthZeroCloses > 1)
cvs = "(" + cvs + ")";
}
visibilities.add(cvs);
}
byte[] get() {
StringBuilder sb = new StringBuilder();
String sep = "";
for (String cvs : visibilities) {
sb.append(sep);
sep = "|";
sb.append(cvs);
}
return sb.toString().getBytes();
}
}
| 3,292 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeSet;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.PeekingIterator;
/**
* An input stream that reads file data stored in one or more Accumulo values. Used by
* {@link ChunkInputFormat} to present input streams to a mapper.
*/
public class ChunkInputStream extends InputStream {
private static final Logger log = LoggerFactory.getLogger(ChunkInputStream.class);
protected PeekingIterator<Entry<Key,Value>> source;
protected Key currentKey;
protected Set<Text> currentVis;
protected int currentChunk;
protected int currentChunkSize;
protected boolean gotEndMarker;
protected byte[] buf;
protected int count;
protected int pos;
public ChunkInputStream() {
source = null;
}
public ChunkInputStream(PeekingIterator<Entry<Key,Value>> in) throws IOException {
setSource(in);
}
public void setSource(PeekingIterator<Entry<Key,Value>> in) throws IOException {
if (source != null)
throw new IOException("setting new source without closing old one");
this.source = in;
currentVis = new TreeSet<>();
count = pos = 0;
if (!source.hasNext()) {
log.debug("source has no next");
gotEndMarker = true;
return;
}
// read forward until we reach a chunk
Entry<Key,Value> entry = source.next();
currentKey = entry.getKey();
buf = entry.getValue().get();
while (!currentKey.getColumnFamily().equals(FileDataIngest.CHUNK_CF)) {
log.debug("skipping key: " + currentKey.toString());
if (!source.hasNext())
return;
entry = source.next();
currentKey = entry.getKey();
buf = entry.getValue().get();
}
log.debug("starting chunk: " + currentKey.toString());
count = buf.length;
currentVis.add(currentKey.getColumnVisibility());
currentChunk = FileDataIngest.bytesToInt(currentKey.getColumnQualifier().getBytes(), 4);
currentChunkSize = FileDataIngest.bytesToInt(currentKey.getColumnQualifier().getBytes(), 0);
gotEndMarker = buf.length == 0;
if (currentChunk != 0) {
source = null;
throw new IOException("starting chunk number isn't 0 for " + currentKey.getRow());
}
}
private int fill() throws IOException {
if (source == null || !source.hasNext()) {
if (gotEndMarker)
return count = pos = 0;
else
throw new IOException("no end chunk marker but source has no data");
}
Entry<Key,Value> entry = source.peek();
Key thisKey = entry.getKey();
log.debug("evaluating key: " + thisKey.toString());
// check that we're still on the same row
if (!thisKey.equals(currentKey, PartialKey.ROW)) {
if (gotEndMarker)
return -1;
else {
String currentRow = currentKey.getRow().toString();
clear();
throw new IOException("got to the end of the row without end chunk marker " + currentRow);
}
}
log.debug("matches current key");
// ok to advance the iterator
source.next();
// check that this is part of a chunk
if (!thisKey.getColumnFamily().equals(FileDataIngest.CHUNK_CF)) {
log.debug("skipping non-chunk key");
return fill();
}
log.debug("is a chunk");
// check that the chunk size is the same as the one being read
if (currentChunkSize != FileDataIngest.bytesToInt(thisKey.getColumnQualifier().getBytes(), 0)) {
log.debug("skipping chunk of different size");
return fill();
}
// add the visibility to the list if it's not there
currentVis.add(thisKey.getColumnVisibility());
// check to see if it is an identical chunk with a different visibility
if (thisKey.getColumnQualifier().equals(currentKey.getColumnQualifier())) {
log.debug("skipping identical chunk with different visibility");
return fill();
}
if (gotEndMarker) {
log.debug("got another chunk after end marker: " + currentKey.toString() + " " + thisKey);
clear();
throw new IOException("found extra chunk after end marker");
}
// got new chunk of the same file, check that it's the next chunk
int thisChunk = FileDataIngest.bytesToInt(thisKey.getColumnQualifier().getBytes(), 4);
if (thisChunk != currentChunk + 1) {
log.debug(
"new chunk same file, unexpected chunkID: " + currentKey.toString() + " " + thisKey);
clear();
throw new IOException("missing chunks between " + currentChunk + " and " + thisChunk);
}
currentKey = thisKey;
currentChunk = thisChunk;
buf = entry.getValue().get();
pos = 0;
// check to see if it's the last chunk
if (buf.length == 0) {
gotEndMarker = true;
return fill();
}
return count = buf.length;
}
public Set<Text> getVisibilities() {
if (source != null)
throw new IllegalStateException(
"don't get visibilities before chunks have been completely read");
return currentVis;
}
@Override
public int read() throws IOException {
if (source == null)
return -1;
log.debug("pos: " + pos + " count: " + count);
if (pos >= count) {
if (fill() <= 0) {
log.debug("done reading input stream at key: "
+ (currentKey == null ? "null" : currentKey.toString()));
if (source != null && source.hasNext())
log.debug("next key: " + source.peek().getKey());
clear();
return -1;
}
}
return buf[pos++] & 0xff;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
} else if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length)
|| ((off + len) < 0)) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
log.debug("filling buffer " + off + " " + len);
int total = 0;
while (total < len) {
int avail = count - pos;
log.debug(avail + " available in current local buffer");
if (avail <= 0) {
if (fill() <= 0) {
log.debug("done reading input stream at key: "
+ (currentKey == null ? "null" : currentKey.toString()));
if (source != null && source.hasNext())
log.debug("next key: " + source.peek().getKey());
clear();
log.debug("filled " + total + " bytes");
return total == 0 ? -1 : total;
}
avail = count - pos;
}
int cnt = Math.min(avail, len - total);
log.debug("copying from local buffer: local pos " + pos + " into pos " + off + " len " + cnt);
System.arraycopy(buf, pos, b, off, cnt);
pos += cnt;
off += cnt;
total += cnt;
}
log.debug("filled " + total + " bytes");
return total;
}
public void clear() {
source = null;
buf = null;
currentKey = null;
currentChunk = 0;
pos = count = 0;
}
@Override
public void close() throws IOException {
try {
while (fill() > 0) {}
} catch (IOException e) {
clear();
throw new IOException(e);
}
clear();
}
}
| 3,293 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.BatchWriterOpts;
import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
import org.apache.hadoop.io.Text;
import com.beust.jcommander.Parameter;
/**
* Takes a list of files and archives them into Accumulo keyed on hashes of the files.
*/
public class FileDataIngest {
public static final Text CHUNK_CF = new Text("~chunk");
public static final Text REFS_CF = new Text("refs");
public static final String REFS_ORIG_FILE = "name";
public static final String REFS_FILE_EXT = "filext";
public static final ByteSequence CHUNK_CF_BS = new ArrayByteSequence(CHUNK_CF.getBytes(), 0,
CHUNK_CF.getLength());
public static final ByteSequence REFS_CF_BS = new ArrayByteSequence(REFS_CF.getBytes(), 0,
REFS_CF.getLength());
public static final String TABLE_EXISTS_MSG = "Table already exists. User may wish to delete existing "
+ "table and re-run example. Table name: ";
final int chunkSize;
final byte[] chunkSizeBytes;
final byte[] buf;
MessageDigest md5digest;
ColumnVisibility cv;
public FileDataIngest(int chunkSize, ColumnVisibility colvis) {
this.chunkSize = chunkSize;
chunkSizeBytes = intToBytes(chunkSize);
buf = new byte[chunkSize];
try {
md5digest = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
cv = colvis;
}
public String insertFileData(String filename, BatchWriter bw)
throws MutationsRejectedException, IOException {
if (chunkSize == 0)
return "";
md5digest.reset();
String uid = hexString(md5digest.digest(filename.getBytes()));
// read through file once, calculating hashes
md5digest.reset();
int numRead = 0;
try (InputStream fis = new FileInputStream(filename)) {
numRead = fis.read(buf);
while (numRead >= 0) {
if (numRead > 0) {
md5digest.update(buf, 0, numRead);
}
numRead = fis.read(buf);
}
}
String row = hexString(md5digest.digest());
// write info to accumulo
Mutation m = new Mutation(row);
m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_ORIG_FILE), cv,
new Value(filename.getBytes()));
String fext = getExt(filename);
if (fext != null)
m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_FILE_EXT), cv, new Value(fext.getBytes()));
bw.addMutation(m);
// read through file again, writing chunks to accumulo
int chunkCount = 0;
try (InputStream fis = new FileInputStream(filename)) {
numRead = fis.read(buf);
while (numRead >= 0) {
while (numRead < buf.length) {
int moreRead = fis.read(buf, numRead, buf.length - numRead);
if (moreRead > 0)
numRead += moreRead;
else if (moreRead < 0)
break;
}
m = new Mutation(row);
Text chunkCQ = new Text(chunkSizeBytes);
chunkCQ.append(intToBytes(chunkCount), 0, 4);
m.put(CHUNK_CF, chunkCQ, cv, new Value(buf, 0, numRead));
bw.addMutation(m);
if (chunkCount == Integer.MAX_VALUE)
throw new RuntimeException(
"too many chunks for file " + filename + ", try raising chunk size");
chunkCount++;
numRead = fis.read(buf);
}
}
m = new Mutation(row);
Text chunkCQ = new Text(chunkSizeBytes);
chunkCQ.append(intToBytes(chunkCount), 0, 4);
m.put(new Text(CHUNK_CF), chunkCQ, cv, new Value(new byte[0]));
bw.addMutation(m);
return row;
}
public static int bytesToInt(byte[] b, int offset) {
if (b.length <= offset + 3)
throw new NumberFormatException("couldn't pull integer from bytes at offset " + offset);
return (((b[offset] & 255) << 24) + ((b[offset + 1] & 255) << 16) + ((b[offset + 2] & 255) << 8)
+ ((b[offset + 3] & 255)));
}
public static byte[] intToBytes(int l) {
byte[] b = new byte[4];
b[0] = (byte) (l >>> 24);
b[1] = (byte) (l >>> 16);
b[2] = (byte) (l >>> 8);
b[3] = (byte) (l);
return b;
}
private static String getExt(String filename) {
if (!filename.contains("."))
return null;
return filename.substring(filename.lastIndexOf(".") + 1);
}
public String hexString(byte[] bytes) {
StringBuilder sb = new StringBuilder();
for (byte b : bytes) {
sb.append(String.format("%02x", b));
}
return sb.toString();
}
public static class Opts extends ClientOnRequiredTable {
@Parameter(names = "--vis", description = "use a given visibility for the new counts",
converter = VisibilityConverter.class)
ColumnVisibility visibility = new ColumnVisibility();
@Parameter(names = "--chunk", description = "size of the chunks used to store partial files")
int chunkSize = 64 * 1024;
@Parameter(description = "<file> { <file> ... }")
List<String> files = new ArrayList<>();
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
try (AccumuloClient client = opts.createAccumuloClient()) {
Common.createTableWithNamespace(client, opts.getTableName(),
new NewTableConfiguration().attachIterator(new IteratorSetting(1, ChunkCombiner.class)));
try (BatchWriter bw = client.createBatchWriter(opts.getTableName(),
bwOpts.getBatchWriterConfig())) {
FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
for (String filename : opts.files) {
fdi.insertFileData(filename, bw);
}
}
}
}
}
| 3,294 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java | /// *
// * Licensed to the Apache Software Foundation (ASF) under one or more
// * contributor license agreements. See the NOTICE file distributed with
// * this work for additional information regarding copyright ownership.
// * The ASF licenses this file to You under the Apache License, Version 2.0
// * (the "License"); you may not use this file except in compliance with
// * the License. You may obtain a copy of the License at
// *
// * http://www.apache.org/licenses/LICENSE-2.0
// *
// * Unless required by applicable law or agreed to in writing, software
// * distributed under the License is distributed on an "AS IS" BASIS,
// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// * See the License for the specific language governing permissions and
// * limitations under the License.
// */
// package org.apache.accumulo.examples.filedata;
//
// import java.io.IOException;
// import java.io.InputStream;
// import java.util.Arrays;
// import java.util.List;
// import java.util.Map.Entry;
// import java.util.Properties;
//
// import org.apache.accumulo.core.client.security.tokens.PasswordToken;
// import org.apache.accumulo.core.data.Key;
// import org.apache.accumulo.core.data.Mutation;
// import org.apache.accumulo.core.data.Value;
// import org.apache.accumulo.core.iterators.user.SummingArrayCombiner;
// import org.apache.accumulo.core.security.ColumnVisibility;
// import org.apache.accumulo.examples.cli.ClientOpts;
// import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
// import org.apache.hadoop.io.Text;
// import org.apache.hadoop.mapreduce.Job;
// import org.apache.hadoop.mapreduce.Mapper;
//
// import com.beust.jcommander.Parameter;
//
/// **
// * A MapReduce that computes a histogram of byte frequency for each file and stores the histogram
// * alongside the file data. The {@link ChunkInputFormat} is used to read the file data from
// * Accumulo.
// */
// public class CharacterHistogram {
//
// private static final String VIS = "vis";
//
// public static class HistMapper extends Mapper<List<Entry<Key,Value>>,InputStream,Text,Mutation> {
// private ColumnVisibility cv;
//
// @Override
// public void map(List<Entry<Key,Value>> k, InputStream v, Context context)
// throws IOException, InterruptedException {
// Long[] hist = new Long[256];
// Arrays.fill(hist, 0L);
// int b = v.read();
// while (b >= 0) {
// hist[b] += 1L;
// b = v.read();
// }
// v.close();
// Mutation m = new Mutation(k.get(0).getKey().getRow());
// m.put("info", "hist", cv,
// new Value(SummingArrayCombiner.STRING_ARRAY_ENCODER.encode(Arrays.asList(hist))));
// context.write(new Text(), m);
// }
//
// @Override
// protected void setup(Context context) {
// cv = new ColumnVisibility(context.getConfiguration().get(VIS, ""));
// }
// }
//
// static class Opts extends ClientOpts {
// @Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
// String tableName;
// @Parameter(names = "--vis")
// String visibilities = "";
// }
//
// @SuppressWarnings("deprecation")
// public static void main(String[] args) throws Exception {
// Opts opts = new Opts();
// opts.parseArgs(CharacterHistogram.class.getName(), args);
//
// Job job = Job.getInstance(opts.getHadoopConfig());
// job.setJobName(CharacterHistogram.class.getSimpleName());
// job.setJarByClass(CharacterHistogram.class);
// job.setInputFormatClass(ChunkInputFormat.class);
// job.getConfiguration().set(VIS, opts.visibilities);
// job.setMapperClass(HistMapper.class);
// job.setMapOutputKeyClass(Text.class);
// job.setMapOutputValueClass(Mutation.class);
//
// job.setNumReduceTasks(0);
//
// Properties props = opts.getClientProperties();
// ChunkInputFormat.setZooKeeperInstance(job, props.getProperty("instance.name"),
// props.getProperty("instance.zookeepers"));
// PasswordToken token = new PasswordToken(props.getProperty("auth.token"));
// ChunkInputFormat.setConnectorInfo(job, props.getProperty("auth.principal"), token);
// ChunkInputFormat.setInputTableName(job, opts.tableName);
// ChunkInputFormat.setScanAuthorizations(job, opts.auths);
//
// job.setOutputFormatClass(AccumuloOutputFormat.class);
// AccumuloOutputFormat.configure().clientProperties(opts.getClientProperties())
// .defaultTable(opts.tableName).createTables(true).store(job);
//
// System.exit(job.waitForCompletion(true) ? 0 : 1);
// }
// }
| 3,295 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/filedata/ChunkCombiner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.hadoop.io.Text;
/**
* This iterator dedupes chunks and sets their visibilities to the combined visibility of the refs
* columns. For example, it would combine
*
* <pre>
* row1 refs uid1\0a A&B V0
* row1 refs uid2\0b C&D V0
* row1 ~chunk 0 A&B V1
* row1 ~chunk 0 C&D V1
* row1 ~chunk 0 E&F V1
* row1 ~chunk 0 G&H V1
* </pre>
*
* into the following
*
* <pre>
* row1 refs uid1\0a A&B V0
* row1 refs uid2\0b C&D V0
* row1 ~chunk 0 (A&B)|(C&D) V1
* </pre>
*
* {@link VisibilityCombiner} is used to combie the visibilities.
*/
public class ChunkCombiner implements SortedKeyValueIterator<Key,Value> {
private SortedKeyValueIterator<Key,Value> source;
private SortedKeyValueIterator<Key,Value> refsSource;
private static final Collection<ByteSequence> refsColf = Collections
.singleton(FileDataIngest.REFS_CF_BS);
private Map<Text,byte[]> lastRowVC = Collections.emptyMap();
private Key topKey = null;
private Value topValue = null;
public ChunkCombiner() {}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
this.source = source;
this.refsSource = source.deepCopy(env);
}
@Override
public boolean hasTop() {
return topKey != null;
}
@Override
public void next() throws IOException {
findTop();
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
source.seek(range, columnFamilies, inclusive);
findTop();
}
private void findTop() throws IOException {
do {
topKey = null;
topValue = null;
} while (source.hasTop() && _findTop() == null);
}
private byte[] _findTop() throws IOException {
long maxTS;
topKey = new Key(source.getTopKey());
topValue = new Value(source.getTopValue());
source.next();
if (!topKey.getColumnFamilyData().equals(FileDataIngest.CHUNK_CF_BS))
return topKey.getColumnVisibility().getBytes();
maxTS = topKey.getTimestamp();
while (source.hasTop() && source.getTopKey().equals(topKey, PartialKey.ROW_COLFAM_COLQUAL)) {
if (source.getTopKey().getTimestamp() > maxTS)
maxTS = source.getTopKey().getTimestamp();
if (!topValue.equals(source.getTopValue()))
throw new RuntimeException("values not equals " + topKey + " " + source.getTopKey() + " : "
+ diffInfo(topValue, source.getTopValue()));
source.next();
}
byte[] vis = getVisFromRefs();
if (vis != null) {
topKey = new Key(topKey.getRowData().toArray(), topKey.getColumnFamilyData().toArray(),
topKey.getColumnQualifierData().toArray(), vis, maxTS);
}
return vis;
}
private byte[] getVisFromRefs() throws IOException {
Text row = topKey.getRow();
if (lastRowVC.containsKey(row))
return lastRowVC.get(row);
Range range = new Range(row);
refsSource.seek(range, refsColf, true);
VisibilityCombiner vc = null;
while (refsSource.hasTop()) {
if (vc == null)
vc = new VisibilityCombiner();
vc.add(refsSource.getTopKey().getColumnVisibilityData());
refsSource.next();
}
if (vc == null) {
lastRowVC = Collections.singletonMap(row, null);
return null;
}
lastRowVC = Collections.singletonMap(row, vc.get());
return vc.get();
}
private String diffInfo(Value v1, Value v2) {
if (v1.getSize() != v2.getSize()) {
return "val len not equal " + v1.getSize() + "!=" + v2.getSize();
}
byte[] vb1 = v1.get();
byte[] vb2 = v2.get();
for (int i = 0; i < vb1.length; i++) {
if (vb1[i] != vb2[i]) {
return String.format("first diff at offset %,d 0x%02x != 0x%02x", i, 0xff & vb1[i],
0xff & vb2[i]);
}
}
return null;
}
@Override
public Key getTopKey() {
return topKey;
}
@Override
public Value getTopValue() {
return topValue;
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
ChunkCombiner cc = new ChunkCombiner();
try {
cc.init(source.deepCopy(env), null, env);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
return cc;
}
}
| 3,296 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/filedata/KeyUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import java.util.ArrayList;
import org.apache.hadoop.io.Text;
/**
* A utility for creating and parsing null-byte separated strings into/from Text objects.
*/
public class KeyUtil {
public static final byte[] nullbyte = new byte[] {0};
/**
* Join some number of strings using a null byte separator into a text object.
*
* @param s
* strings
* @return a text object containing the strings separated by null bytes
*/
public static Text buildNullSepText(String... s) {
Text t = new Text(s[0]);
for (int i = 1; i < s.length; i++) {
t.append(nullbyte, 0, 1);
t.append(s[i].getBytes(), 0, s[i].length());
}
return t;
}
/**
* Split a text object using a null byte separator into an array of strings.
*
* @param t
* null-byte separated text object
* @return an array of strings
*/
public static String[] splitNullSepText(Text t) {
ArrayList<String> s = new ArrayList<>();
byte[] b = t.getBytes();
int lastindex = 0;
for (int i = 0; i < t.getLength(); i++) {
if (b[i] == (byte) 0) {
s.add(new String(b, lastindex, i - lastindex));
lastindex = i + 1;
}
}
s.add(new String(b, lastindex, t.getLength() - lastindex));
return s.toArray(new String[s.size()]);
}
}
| 3,297 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.filedata;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import com.google.common.collect.Iterators;
import com.google.common.collect.PeekingIterator;
/**
* Retrieves file data based on the hash of the file. Used by the
* {@link org.apache.accumulo.examples.dirlist.Viewer}. See README.dirlist for instructions.
*/
public class FileDataQuery {
final List<Entry<Key,Value>> lastRefs;
private final ChunkInputStream cis;
Scanner scanner;
public FileDataQuery(AccumuloClient client, String tableName, Authorizations auths)
throws TableNotFoundException {
lastRefs = new ArrayList<>();
cis = new ChunkInputStream();
scanner = client.createScanner(tableName, auths);
}
public List<Entry<Key,Value>> getLastRefs() {
return lastRefs;
}
public ChunkInputStream getData(String hash) throws IOException {
scanner.setRange(new Range(hash));
scanner.setBatchSize(1);
lastRefs.clear();
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(scanner.iterator());
if (pi.hasNext()) {
while (!pi.peek().getKey().getColumnFamily().equals(FileDataIngest.CHUNK_CF)) {
lastRefs.add(pi.peek());
pi.next();
}
}
cis.clear();
cis.setSource(pi);
return cis;
}
public String getSomeData(String hash, int numBytes) throws IOException {
ChunkInputStream is = getData(hash);
byte[] buf = new byte[numBytes];
if (is.read(buf) >= 0) {
return new String(buf);
} else {
return "";
}
}
}
| 3,298 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputFormat.java | /// *
// * Licensed to the Apache Software Foundation (ASF) under one or more
// * contributor license agreements. See the NOTICE file distributed with
// * this work for additional information regarding copyright ownership.
// * The ASF licenses this file to You under the Apache License, Version 2.0
// * (the "License"); you may not use this file except in compliance with
// * the License. You may obtain a copy of the License at
// *
// * http://www.apache.org/licenses/LICENSE-2.0
// *
// * Unless required by applicable law or agreed to in writing, software
// * distributed under the License is distributed on an "AS IS" BASIS,
// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// * See the License for the specific language governing permissions and
// * limitations under the License.
// */
// package org.apache.accumulo.examples.filedata;
//
// import java.io.IOException;
// import java.io.InputStream;
// import java.util.ArrayList;
// import java.util.List;
// import java.util.Map.Entry;
//
// import org.apache.accumulo.core.data.Key;
// import org.apache.accumulo.core.data.Value;
// import org.apache.accumulo.examples.util.FormatUtil;
// import org.apache.hadoop.mapreduce.InputSplit;
// import org.apache.hadoop.mapreduce.RecordReader;
// import org.apache.hadoop.mapreduce.TaskAttemptContext;
//
// import com.google.common.collect.Iterators;
// import com.google.common.collect.PeekingIterator;
//
/// **
// * An InputFormat that turns the file data ingested with {@link FileDataIngest} into an
/// InputStream
// * using {@link ChunkInputStream}. Mappers used with this InputFormat must close the InputStream.
// */
// @SuppressWarnings("deprecation")
// public class ChunkInputFormat extends
// org.apache.accumulo.core.client.mapreduce.InputFormatBase<List<Entry<Key,Value>>,InputStream> {
// @Override
// public RecordReader<List<Entry<Key,Value>>,InputStream> createRecordReader(InputSplit split,
// TaskAttemptContext context) {
// return new RecordReaderBase<>() {
// private PeekingIterator<Entry<Key,Value>> peekingScannerIterator;
//
// @Override
// public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
// super.initialize(inSplit, attempt);
// peekingScannerIterator = Iterators.peekingIterator(scannerIterator);
// currentK = new ArrayList<>();
// currentV = new ChunkInputStream();
// }
//
// @Override
// public boolean nextKeyValue() throws IOException {
// log.debug("nextKeyValue called");
//
// currentK.clear();
// if (peekingScannerIterator.hasNext()) {
// ++numKeysRead;
// Entry<Key,Value> entry = peekingScannerIterator.peek();
// while (!entry.getKey().getColumnFamily().equals(FileDataIngest.CHUNK_CF)) {
// currentK.add(entry);
// peekingScannerIterator.next();
// if (!peekingScannerIterator.hasNext()) {
// return true;
// }
// entry = peekingScannerIterator.peek();
// }
// currentKey = entry.getKey();
// ((ChunkInputStream) currentV).setSource(peekingScannerIterator);
// if (log.isTraceEnabled()) {
// log.trace("Processing key/value pair: " + FormatUtil.formatTableEntry(entry, true));
// }
//
// return true;
// }
// return false;
// }
// };
// }
// }
| 3,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.