index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/worker/JobWorker.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job.worker; import static io.mantisrx.master.events.LifecycleEventsProto.StatusEvent; import static io.mantisrx.master.events.LifecycleEventsProto.WorkerStatusEvent; import static java.util.Optional.ofNullable; import java.io.IOException; import java.util.List; import java.util.Objects; import java.util.Optional; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.common.WorkerPorts; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Gauge; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.common.metrics.spectator.MetricGroupId; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.jobcluster.job.IMantisWorkerEventProcessor; import io.mantisrx.master.jobcluster.job.JobActor; import io.mantisrx.master.scheduler.WorkerStateAdapter; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.StatusPayloads; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.domain.WorkerRequest; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.exceptions.InvalidWorkerStateChangeException; import io.mantisrx.server.master.scheduler.WorkerEvent; import io.mantisrx.server.master.scheduler.WorkerLaunchFailed; import io.mantisrx.server.master.scheduler.WorkerLaunched; import io.mantisrx.server.master.scheduler.WorkerOnDisabledVM; import io.mantisrx.server.master.scheduler.WorkerResourceStatus; import io.mantisrx.server.master.scheduler.WorkerUnscheduleable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class encapsulates information about a worker of a job. */ public class JobWorker implements IMantisWorkerEventProcessor { private static final Logger LOGGER = LoggerFactory.getLogger(JobWorker.class); private final IMantisWorkerMetadata metadata; private final LifecycleEventPublisher eventPublisher; private final Metrics metrics; private final MetricGroupId metricsGroupId; private final Counter numWorkerLaunched; private final Counter numWorkerTerminated; private final Counter numWorkerLaunchFailed; private final Counter numWorkerUnschedulable; private final Counter numWorkersDisabledVM; private final Counter numHeartBeatsReceived; private final Gauge lastWorkerLaunchToStartMillis; /** * Creates an instance of JobWorker. * @param metadata The {@link IMantisWorkerMetadata} for this worker. * @param eventPublisher A {@link LifecycleEventPublisher} where lifecycle events are to be sent. */ public JobWorker(final IMantisWorkerMetadata metadata, final LifecycleEventPublisher eventPublisher) { Preconditions.checkNotNull(metadata, "metadata"); this.metadata = metadata; this.eventPublisher = eventPublisher; this.metricsGroupId = new MetricGroupId("JobWorker", new BasicTag("jobId", this.metadata.getJobId())); Metrics m = new Metrics.Builder() .id(metricsGroupId) .addCounter("numWorkerLaunched") .addCounter("numWorkerTerminated") .addCounter("numWorkerLaunchFailed") .addCounter("numWorkerUnschedulable") .addCounter("numWorkersDisabledVM") .addCounter("numHeartBeatsReceived") .addGauge("lastWorkerLaunchToStartMillis") .build(); this.metrics = MetricsRegistry.getInstance().registerAndGet(m); this.numWorkerLaunched = metrics.getCounter("numWorkerLaunched"); this.numWorkerTerminated = metrics.getCounter("numWorkerTerminated"); this.numWorkerLaunchFailed = metrics.getCounter("numWorkerLaunchFailed"); this.numWorkerUnschedulable = metrics.getCounter("numWorkerUnschedulable"); this.numWorkersDisabledVM = metrics.getCounter("numWorkersDisabledVM"); this.numHeartBeatsReceived = metrics.getCounter("numHeartBeatsReceived"); this.lastWorkerLaunchToStartMillis = metrics.getGauge("lastWorkerLaunchToStartMillis"); } public IMantisWorkerMetadata getMetadata() { return metadata; } // Setters on mutable metadata private MantisWorkerMetadataImpl mutableMetadata() { if (metadata instanceof MantisWorkerMetadataImpl) { return (MantisWorkerMetadataImpl) metadata; } else { throw new IllegalStateException(); } } private void setState(WorkerState newState, long when, JobCompletedReason reason) throws InvalidWorkerStateChangeException { mutableMetadata().setState(newState, when, reason); } private void setLastHeartbeatAt(long lastHeartbeatAt) { mutableMetadata().setLastHeartbeatAt(lastHeartbeatAt); } private void setSlave(String slave) { mutableMetadata().setSlave(slave); } private void setSlaveID(String slaveID) { mutableMetadata().setSlaveID(slaveID); } private void setCluster(Optional<String> cluster) { mutableMetadata().setCluster(cluster); } /** * Marks the worker as being subscribed. * @param isSub */ void setIsSubscribed(boolean isSub) { mutableMetadata().setIsSubscribed(isSub); } /** * Adds the associated ports data. * @param ports */ void addPorts(final WorkerPorts ports) { mutableMetadata().addPorts(ports); } // Worker Event handlers /** * All events associated to this worker are processed in this method. * * @param workerEvent The {@link WorkerEvent} associated with this worker. * * @return boolean indicating whether to a change worth persisting occurred. * * @throws InvalidWorkerStateChangeException thrown if the worker event lead to an invalid state transition. */ public boolean processEvent(WorkerEvent workerEvent) throws InvalidWorkerStateChangeException { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Processing event {} for worker {}", workerEvent, metadata.getWorkerId()); } boolean persistStateRequired = false; if (workerEvent instanceof WorkerLaunched) { persistStateRequired = onWorkerLaunched((WorkerLaunched) workerEvent); } else if (workerEvent instanceof WorkerLaunchFailed) { persistStateRequired = onWorkerLaunchFailed((WorkerLaunchFailed) workerEvent); } else if (workerEvent instanceof WorkerUnscheduleable) { persistStateRequired = onWorkerUnscheduleable((WorkerUnscheduleable) workerEvent); } else if (workerEvent instanceof WorkerResourceStatus) { persistStateRequired = onWorkerResourceStatus((WorkerResourceStatus) workerEvent); } else if (workerEvent instanceof WorkerHeartbeat) { persistStateRequired = onHeartBeat((WorkerHeartbeat) workerEvent); } else if (workerEvent instanceof WorkerTerminate) { persistStateRequired = onTerminate((WorkerTerminate) workerEvent); } else if (workerEvent instanceof WorkerOnDisabledVM) { persistStateRequired = onDisabledVM((WorkerOnDisabledVM) workerEvent); } else if (workerEvent instanceof WorkerStatus) { persistStateRequired = onWorkerStatus((WorkerStatus) workerEvent); } return persistStateRequired; } private boolean onWorkerStatus(WorkerStatus workerEvent) throws InvalidWorkerStateChangeException { if (LOGGER.isDebugEnabled()) { LOGGER.debug("on WorkerStatus for {}", workerEvent); } switch (workerEvent.getState()) { case StartInitiated: case Started: case Completed: case Failed: setState(workerEvent.getState(), workerEvent.getEventTimeMs(), workerEvent.getStatus().getReason()); eventPublisher.publishStatusEvent(new WorkerStatusEvent( StatusEvent.StatusEventType.INFO, "worker status update", metadata.getStageNum(), workerEvent.getWorkerId(), workerEvent.getState())); return true; case Launched: case Accepted: case Noop: case Unknown: default: LOGGER.warn("unexpected worker state {} in WorkerStatus update", workerEvent.getState().name()); break; } return false; } private boolean onDisabledVM(WorkerOnDisabledVM workerEvent) { numWorkersDisabledVM.increment(); LOGGER.info("on WorkerDisabledVM for {}", workerEvent); return false; } private boolean onTerminate(WorkerTerminate workerEvent) throws InvalidWorkerStateChangeException { numWorkerTerminated.increment(); setState(workerEvent.getFinalState(), workerEvent.getEventTimeMs(), workerEvent.getReason()); eventPublisher.publishStatusEvent(new WorkerStatusEvent( StatusEvent.StatusEventType.INFO, "worker terminated", -1, workerEvent.getWorkerId(), WorkerState.Failed, ofNullable(metadata.getSlave()))); return true; } /** * Updates this {@link JobWorker}'s metadata from a {@link WorkerLaunched} event received by Mesos. * This method will update metadata followed by updating the worker's state via * {@link JobWorker#setState(WorkerState, long, JobCompletedReason)}. If any of the metadata * fails to save, an {@link InvalidWorkerStateChangeException} is thrown and eventually bubbled up to the * corresponding {@link JobActor} to handle. * * @param workerEvent an event received by Mesos with worker metadata after it was launched. * * @return {@code true} if all saving state succeeds * {@code false} otherwise (and don't durably persist; expect worker to be relaunched because * our state doesn't match Mesos) */ private boolean onWorkerLaunched(WorkerLaunched workerEvent) throws InvalidWorkerStateChangeException { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Processing for worker {} with id {}", workerEvent, metadata.getWorkerId()); } setSlave(workerEvent.getHostname()); addPorts(workerEvent.getPorts()); setSlaveID(workerEvent.getVmId()); setCluster(workerEvent.getClusterName()); setState(WorkerState.Launched, workerEvent.getEventTimeMs(), JobCompletedReason.Normal); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Worker {} state changed to Launched", workerEvent.getWorkerId()); } numWorkerLaunched.increment(); try { eventPublisher.publishStatusEvent(new WorkerStatusEvent( StatusEvent.StatusEventType.INFO, "scheduled on " + workerEvent.getHostname() + " with ports " + Jackson.toJson(workerEvent.getPorts()), workerEvent.getStageNum(), workerEvent.getWorkerId(), WorkerState.Launched)); } catch (IOException e) { LOGGER.warn("Error publishing status event for worker {} launch", workerEvent.getWorkerId(), e); } return true; } // handle worker status update from Mesos private boolean onWorkerResourceStatus(final WorkerResourceStatus workerEvent) throws InvalidWorkerStateChangeException { WorkerState workerStateFromEvent = WorkerStateAdapter.from(workerEvent.getState()); // if worker current state is terminated, but we get a resource update from Mesos // saying worker is still running, terminate the task if (WorkerState.isRunningState(workerStateFromEvent)) { if (WorkerState.isTerminalState(metadata.getState())) { numWorkerTerminated.increment(); // kill worker } } // Resource status is terminal but our metadata shows worker as running => update our worker state // based on event and if (WorkerState.isTerminalState(workerStateFromEvent)) { if (!WorkerState.isTerminalState(metadata.getState())) { LOGGER.info("Worker {} state changed to {}", this, workerEvent.getState()); setState(workerStateFromEvent, workerEvent.getEventTimeMs(), JobCompletedReason.Normal); eventPublisher.publishStatusEvent(new WorkerStatusEvent( StatusEvent.StatusEventType.INFO, "worker resource state " + workerEvent.getMessage(), -1, workerEvent.getWorkerId(), workerStateFromEvent, ofNullable(metadata.getSlave()))); return true; } } return false; } /** * Handles a {@link WorkerHeartbeat} event. * * Assumptions: * * 1. Heartbeats from workers of terminated jobs are ignored at a higher level. * * @param workerEvent a {@link WorkerHeartbeat} event. * * @throws InvalidWorkerStateChangeException if it fails to persist worker state * via {@link JobWorker#setState(WorkerState, long, JobCompletedReason)}. */ private boolean onHeartBeat(WorkerHeartbeat workerEvent) throws InvalidWorkerStateChangeException { numHeartBeatsReceived.increment(); if (LOGGER.isTraceEnabled()) { LOGGER.trace("Job {} Processing onHeartBeat for {}", this.metadata.getJobId(), metadata.getWorkerId()); } WorkerState workerState = metadata.getState(); setLastHeartbeatAt(workerEvent.getEventTimeMs()); boolean persistStateRequired = false; if (workerState != WorkerState.Started) { setState(WorkerState.Started, workerEvent.getEventTimeMs(), JobCompletedReason.Normal); persistStateRequired = true; final long startLatency = workerEvent.getEventTimeMs() - metadata.getLaunchedAt(); if (startLatency > 0) { lastWorkerLaunchToStartMillis.set(startLatency); } else { LOGGER.info("Unexpected error when computing startlatency for {} start time {} launch time {}", workerEvent.getWorkerId().getId(), workerEvent.getEventTimeMs(), metadata.getLaunchedAt()); } LOGGER.info("Job {} Worker {} started ", metadata.getJobId(), metadata.getWorkerId()); eventPublisher.publishStatusEvent(new WorkerStatusEvent( StatusEvent.StatusEventType.INFO, "setting worker Started on heartbeat", workerEvent.getStatus().getStageNum(), workerEvent.getWorkerId(), WorkerState.Started, ofNullable(metadata.getSlave()))); } List<Status.Payload> payloads = workerEvent.getStatus().getPayloads(); for (Status.Payload payload : payloads) { if (payload.getType().equals(StatusPayloads.Type.SubscriptionState.toString())) { String data = payload.getData(); try { boolean subscriptionStatus = Boolean.parseBoolean(data); if (getMetadata().getIsSubscribed() != subscriptionStatus) { setIsSubscribed(subscriptionStatus); persistStateRequired = true; } } catch (Exception e) { // could not parse subscriptionstatus LOGGER.warn("Exception parsing subscription payload", e); } } } return persistStateRequired; } private boolean onWorkerLaunchFailed(WorkerLaunchFailed workerEvent) throws InvalidWorkerStateChangeException { numWorkerLaunchFailed.increment(); setState(WorkerState.Failed, workerEvent.getEventTimeMs(), JobCompletedReason.Error); eventPublisher.publishStatusEvent(new WorkerStatusEvent( StatusEvent.StatusEventType.ERROR, "worker launch failed, reason: " + workerEvent.getErrorMessage(), workerEvent.getStageNum(), workerEvent.getWorkerId(), WorkerState.Failed)); return true; } private boolean onWorkerUnscheduleable(WorkerUnscheduleable workerEvent) { // we shouldn't reach here for Worker Unscheduleable events, as Job Actor would update the readyAt time // in the JobActor on receiving this event numWorkerUnschedulable.increment(); return true; } /** * Processes a {@link WorkerEvent} and if successful, saves/update state in the {@link MantisJobStore}. * * @param event a worker event which can be one of many event types such as launched, heartbeat, etc. * @param jobStore a place to persist metadata. * * @throws InvalidWorkerStateChangeException if a worker failed to persist its state. * @throws IOException if the job store failed to update the worker metadata. */ @Override public void processEvent(final WorkerEvent event, final MantisJobStore jobStore) throws InvalidWorkerStateChangeException, IOException { if (event.getWorkerId().equals(this.metadata.getWorkerId())) { boolean persistStateRequired = processEvent(event); if (persistStateRequired) { jobStore.updateWorker(this.metadata); } } else { LOGGER.warn("Current workerId is " + this.metadata.getWorkerId() + " event received from workerId " + event.getWorkerId() + " ignoring"); // pbly event from an old worker number } } /** * Builder to enable fluid creation of a {@link JobWorker}. */ public static class Builder { private static final int INVALID_VALUE = -1; private int workerIndex = INVALID_VALUE; private int workerNumber = INVALID_VALUE; private String jobId = null; private int stageNum = INVALID_VALUE; private int numberOfPorts = INVALID_VALUE; private WorkerPorts workerPorts = null; private WorkerState state = WorkerState.Accepted; private String slave = null; private String slaveID = null; private long acceptedAt = System.currentTimeMillis(); private long launchedAt = -1; private long startingAt = -1; private long startedAt = -1; private long completedAt = -1; private JobCompletedReason reason = JobCompletedReason.Normal; private int resubmitOf = 0; private int totalResubmitCount = 0; private Optional<String> preferredCluster = Optional.empty(); private IMantisWorkerMetadata metadata; private LifecycleEventPublisher eventPublisher; /** * Default constructor. */ public Builder() { } /** * Required. WorkerIndex of this worker. * @param ind * @return */ public JobWorker.Builder withWorkerIndex(int ind) { this.workerIndex = ind; return this; } /** * Required. Worker number associated with this worker. * @param num * @return */ public JobWorker.Builder withWorkerNumber(int num) { this.workerNumber = num; return this; } /** * Optional. Resubmit count associated with this workerIndex. * @param c * @return */ public JobWorker.Builder withResubmitCount(int c) { this.totalResubmitCount = c; return this; } /** * Optional. If this is a resubmit of an old worker then the Worker Number of the old worker. * @param r * @return */ public JobWorker.Builder withResubmitOf(int r) { this.resubmitOf = r; return this; } /** * Required. Job id for this worker. * @param jid * @return */ public JobWorker.Builder withJobId(String jid) { this.jobId = jid; return this; } /** * Required (if String version not used). {@link JobId} of the job of this worker. * @param jid * @return */ public JobWorker.Builder withJobId(JobId jid) { this.jobId = jid.getId(); return this; } /** * Required. Stage number for this worker. * @param num * @return */ public JobWorker.Builder withStageNum(int num) { this.stageNum = num; return this; } /** * Required. Number of ports to be assigned to this worker. * @param portNums * @return */ public JobWorker.Builder withNumberOfPorts(int portNums) { this.numberOfPorts = portNums; return this; } /** * Required. Details of the ports assigned to this worker. * @param workerP * @return */ public JobWorker.Builder withWorkerPorts(WorkerPorts workerP) { this.workerPorts = workerP; return this; } /** * Optional. The {@link WorkerState} associated with this worker. * @param state * @return */ public JobWorker.Builder withState(WorkerState state) { this.state = state; return this; } /** * (Optional) Mesos Slave on which this worker is executing. * @param slave * @return */ public JobWorker.Builder withSlave(String slave) { this.slave = slave; return this; } /** * (Optional) Mesos slave Id on which this worker is executing. * @param slaveid * @return */ public JobWorker.Builder withSlaveID(String slaveid) { this.slaveID = slaveid; return this; } /** * (Optional) The timestamp this worker went into accepted state. * @param acc * @return */ public JobWorker.Builder withAcceptedAt(long acc) { this.acceptedAt = acc; return this; } /** * (Optional) The timestamp this worker went into launched state. * @param la * @return */ public JobWorker.Builder withLaunchedAt(long la) { this.launchedAt = la; return this; } /** * (Optional) The timestamp this worker went into starting state. * @param sa * @return */ public JobWorker.Builder withStartingAt(long sa) { this.startingAt = sa; return this; } /** * (Optional) The timestamp this worker went into started state. * @param sa * @return */ public JobWorker.Builder withStartedAt(long sa) { this.startedAt = sa; return this; } /** * (Optional) The timestamp this worker went into terminal state. * @param ca * @return */ public JobWorker.Builder withCompletedAt(long ca) { this.completedAt = ca; return this; } /** * (Optional) The preferred cluster where this worker should be scheduled. * @param preferredCluster * @return */ public JobWorker.Builder withPreferredCluster(Optional<String> preferredCluster) { this.preferredCluster = preferredCluster; return this; } /** * (Optional) The reason for worker termination. * @param reason * @return */ public JobWorker.Builder withJobCompletedReason(JobCompletedReason reason) { this.reason = reason; return this; } /** * (Required) The listener where worker lifecycle events are published. * @param publisher * @return */ public JobWorker.Builder withLifecycleEventsPublisher(LifecycleEventPublisher publisher) { this.eventPublisher = publisher; return this; } /** * Helper builder which clones from an instance of {@link IMantisWorkerMetadata} object. * @param cloneFrom * @return */ public JobWorker.Builder from(IMantisWorkerMetadata cloneFrom) { workerIndex = cloneFrom.getWorkerIndex(); workerNumber = cloneFrom.getWorkerNumber(); jobId = cloneFrom.getJobId(); stageNum = cloneFrom.getStageNum(); numberOfPorts = cloneFrom.getNumberOfPorts(); if (cloneFrom.getPorts().isPresent()) { workerPorts = cloneFrom.getPorts().get(); } state = cloneFrom.getState(); slave = cloneFrom.getSlave(); slaveID = cloneFrom.getSlaveID(); acceptedAt = cloneFrom.getAcceptedAt(); launchedAt = cloneFrom.getLaunchedAt(); startingAt = cloneFrom.getStartingAt(); startedAt = cloneFrom.getStartedAt(); completedAt = cloneFrom.getCompletedAt(); reason = cloneFrom.getReason(); resubmitOf = cloneFrom.getResubmitOf(); totalResubmitCount = cloneFrom.getTotalResubmitCount(); preferredCluster = cloneFrom.getPreferredClusterOptional(); return this; } /** * Helper builder that clones from given {@link WorkerRequest}. * @param workerRequest * @return */ public JobWorker.Builder from(WorkerRequest workerRequest) { this.workerIndex = workerRequest.getWorkerIndex(); this.workerNumber = workerRequest.getWorkerNumber(); this.jobId = workerRequest.getJobId(); this.stageNum = workerRequest.getWorkerStage(); this.numberOfPorts = workerRequest.getNumPortsPerInstance(); this.preferredCluster = workerRequest.getPreferredCluster(); return this; } /** * Creates and returns an instance of {@link JobWorker}. * @return */ public JobWorker build() { Objects.requireNonNull(jobId, "Job Id cannot be null"); if (workerIndex <= INVALID_VALUE) { IllegalArgumentException ex = new IllegalArgumentException( String.format("Invalid workerIndex {} specified", workerIndex)); LOGGER.error("Invalid worker index specified {}", workerIndex, ex); throw ex; } if (workerNumber <= INVALID_VALUE) { LOGGER.error("Invalid worker number specified {}", workerNumber); throw new IllegalArgumentException(String.format("Invalid workerNumber {} specified", workerNumber)); } if (stageNum <= INVALID_VALUE) { LOGGER.error("Invalid stage num specified {}", stageNum); throw new IllegalArgumentException(String.format("Invalid stageNum {} specified", stageNum)); } if (numberOfPorts <= INVALID_VALUE) { LOGGER.error("Invalid num ports specified {}", numberOfPorts); throw new IllegalArgumentException(String.format("Invalid no of Ports {} specified", numberOfPorts)); } if (totalResubmitCount < 0) { LOGGER.error("Invalid resubmit count specified {}", totalResubmitCount); throw new IllegalArgumentException( String.format("Invalid resubmit Count {} specified", totalResubmitCount)); } if (eventPublisher == null) { IllegalArgumentException ex = new IllegalArgumentException( String.format("lifecycle event publisher cannot be null")); LOGGER.error("lifecycle event publisher is null", ex); throw ex; } this.metadata = new MantisWorkerMetadataImpl(workerIndex, workerNumber, jobId, stageNum, numberOfPorts, workerPorts, state, slave, slaveID, acceptedAt, launchedAt, startingAt, startedAt, completedAt, reason, resubmitOf, totalResubmitCount, preferredCluster ); return new JobWorker(this.metadata, this.eventPublisher); } } @Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final JobWorker jobWorker = (JobWorker) o; return Objects.equals(metadata, jobWorker.metadata) && Objects.equals(eventPublisher, jobWorker.eventPublisher); } @Override public int hashCode() { return Objects.hash(metadata, eventPublisher); } @Override public String toString() { return "JobWorker{" + "metadata=" + metadata + '}'; } }
4,400
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/worker/WorkerHeartbeat.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job.worker; import java.time.Instant; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.scheduler.WorkerEvent; /** * A WorkerHeartbeat object encapsulates the heart beat data sent by the worker to the master. */ public class WorkerHeartbeat implements WorkerEvent { private WorkerId workerId; private Status heartBeat; private WorkerState workerState; private long time; /** * Creates an instance of this class from a {@link Status} object. * @param hb */ public WorkerHeartbeat(Status hb) { this(hb, Instant.ofEpochMilli(hb.getTimestamp())); } /** * For testing only. * * @param hb * @param time */ public WorkerHeartbeat(Status hb, Instant time) { this.heartBeat = hb; String jobId = heartBeat.getJobId(); int index = heartBeat.getWorkerIndex(); int number = heartBeat.getWorkerNumber(); this.time = time.toEpochMilli(); workerId = new WorkerId(jobId, index, number); workerState = setWorkerState(heartBeat.getState()); } private WorkerState setWorkerState(MantisJobState state) { switch (state) { case Launched: return WorkerState.Launched; case Started: return WorkerState.Started; case StartInitiated: return WorkerState.StartInitiated; case Completed: return WorkerState.Completed; case Failed: return WorkerState.Failed; case Noop: return WorkerState.Noop; default: return WorkerState.Unknown; } } @Override public WorkerId getWorkerId() { return this.workerId; } public Status getStatus() { return this.heartBeat; } public WorkerState getState() { return workerState; } @Override public long getEventTimeMs() { return this.time; } @Override public String toString() { return "WorkerHeartbeat [workerId=" + workerId + ", heartBeat=" + heartBeat + ", workerState=" + workerState + ", time=" + time + "]"; } }
4,401
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/worker/WorkerStatus.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job.worker; import java.time.Instant; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.scheduler.WorkerEvent; /** * Encapsulates the status data sent by the worker to the master. */ public class WorkerStatus implements WorkerEvent { private WorkerId workerId; private Status heartBeat; private WorkerState workerState; private long time; /** * Creates an instance using the given {@link Status}. * @param hb */ public WorkerStatus(Status hb) { this(hb, Instant.ofEpochMilli(hb.getTimestamp())); } /** * Used for testing. * * @param hb * @param time artificially inject time */ public WorkerStatus(Status hb, Instant time) { this.heartBeat = hb; String jobId = heartBeat.getJobId(); int index = heartBeat.getWorkerIndex(); int number = heartBeat.getWorkerNumber(); this.time = time.toEpochMilli(); workerId = new WorkerId(jobId, index, number); workerState = setWorkerState(heartBeat.getState()); } private WorkerState setWorkerState(MantisJobState state) { switch (state) { case Launched: return WorkerState.Launched; case Started: return WorkerState.Started; case StartInitiated: return WorkerState.StartInitiated; case Completed: return WorkerState.Completed; case Failed: return WorkerState.Failed; case Noop: return WorkerState.Noop; default: return WorkerState.Unknown; } } @Override public WorkerId getWorkerId() { return this.workerId; } public Status getStatus() { return this.heartBeat; } public WorkerState getState() { return workerState; } @Override public long getEventTimeMs() { return this.time; } @Override public String toString() { return "WorkerHeartbeat [workerId=" + workerId + ", heartBeat=" + heartBeat + ", workerState=" + workerState + ", time=" + time + "]"; } }
4,402
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/worker/IMantisWorkerMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job.worker; import java.time.Instant; import java.util.Optional; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonSubTypes; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonTypeInfo; import io.mantisrx.common.WorkerPorts; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.JobId; /** * Metadata object for a Mantis worker. Modification operations do not perform locking. Instead, a lock can be * obtained via the <code>obtainLock()</code> method which is an instance of {@link java.lang.AutoCloseable}. */ @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "type") @JsonSubTypes({ @JsonSubTypes.Type(value = MantisWorkerMetadataImpl.class) }) public interface IMantisWorkerMetadata { /** * Index assigned to this worker. * @return */ int getWorkerIndex(); /** * Number assigned to this worker. * @return */ int getWorkerNumber(); /** * JobId of the Job this worker belongs to. * @return */ String getJobId(); /** * Returns the {@link JobId} for this worker. * @return */ @JsonIgnore JobId getJobIdObject(); /** * Returns the {@link WorkerId} associated with this worker. * @return */ WorkerId getWorkerId(); /** * The stage number this worker belongs to. * @return */ int getStageNum(); /** * @return the {@link WorkerPorts} for this worker. */ WorkerPorts getWorkerPorts(); /** * The port on which Metrics stream is served. * @return */ int getMetricsPort(); /** * The port which can be used to connect jconsole to. * @return */ int getDebugPort(); /** * A custom port associated with Netflix Admin console if enabled. * @return */ int getConsolePort(); /** * A free form port to be used by the job for any purpose. * @return */ int getCustomPort(); /** * The port which can be used to connect to other workers. * * @return */ int getSinkPort(); /** * The AWS cluster on which the worker was launched. * Used to maintain affinity during deploys. * @return */ Optional<String> getCluster(); /** * Get number of ports for this worker, including the metrics port. * @return The number of ports */ int getNumberOfPorts(); /** * Returns an optional of {@link WorkerPorts} associated with this worker. * @return */ Optional<WorkerPorts> getPorts(); /** * A count of the number of times this worker has been resubmitted. * @return */ int getTotalResubmitCount(); /** * Get the worker number (not index) of which this is a resubmission of. * @return */ int getResubmitOf(); /** * Returns the current {@link WorkerState} of this worker. * @return */ WorkerState getState(); /** * Returns the mesos slave on which this worker is executing. * @return */ String getSlave(); /** * Returns the mesos slaveId on which this worker is executing. * @return */ String getSlaveID(); /** * Returns whether a listener exists that is streaming the results computed by this worker. * @return */ boolean getIsSubscribed(); /** * The timestamp at which this worker went into Accepted state. * @return */ long getAcceptedAt(); /** * The timestamp at which this worker landed on a mesos slave. * @return */ long getLaunchedAt(); /** * The timestamp at which this worker started initialization. * @return */ long getStartingAt(); /** * The timestamp the worker reported as running. * @return */ long getStartedAt(); /** * The timestamp the worker was marked for termination. * @return */ long getCompletedAt(); /** * If in terminal state returns the reason for completion. * @return */ JobCompletedReason getReason(); /** * The preferred AWS cluster on which to schedule this worker. * @return */ Optional<String> getPreferredClusterOptional(); /** * The last time a heartbeat was received from this worker. * @return */ Optional<Instant> getLastHeartbeatAt(); }
4,403
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/worker/MantisWorkerMetadataImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job.worker; import java.time.Instant; import java.util.Objects; import java.util.Optional; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.common.WorkerPorts; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.exceptions.InvalidWorkerStateChangeException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Holds metadata related to a Mantis Worker. */ @JsonFilter("topLevelFilter") public class MantisWorkerMetadataImpl implements IMantisWorkerMetadata { private static final Logger LOGGER = LoggerFactory.getLogger(MantisWorkerMetadataImpl.class); /** * metrics, debug, console and custom port. */ @JsonIgnore public static final int MANTIS_SYSTEM_ALLOCATED_NUM_PORTS = 4; private final int workerIndex; private int workerNumber; private String jobId; @JsonIgnore private JobId jobIdObj; private final int stageNum; private final int numberOfPorts; @JsonIgnore private final WorkerId workerId; private WorkerPorts workerPorts; private volatile WorkerState state; private volatile String slave; private volatile String slaveID; private volatile long acceptedAt = 0; private volatile long launchedAt = 0; private volatile long startingAt = 0; private volatile long startedAt = 0; private volatile long completedAt = 0; private volatile JobCompletedReason reason; private volatile int resubmitOf = -1; private volatile int totalResubmitCount = 0; @JsonIgnore private volatile Optional<Instant> lastHeartbeatAt = Optional.empty(); private volatile boolean subscribed; private volatile Optional<String> preferredCluster; /** * Creates an instance of this class. * @param workerIndex * @param workerNumber * @param jobId * @param stageNum * @param numberOfPorts * @param workerPorts * @param state * @param slave * @param slaveID * @param acceptedAt * @param launchedAt * @param startingAt * @param startedAt * @param completedAt * @param reason * @param resubmitOf * @param totalResubmitCount * @param preferredCluster */ @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public MantisWorkerMetadataImpl(@JsonProperty("workerIndex") int workerIndex, @JsonProperty("workerNumber") int workerNumber, @JsonProperty("jobId") String jobId, @JsonProperty("stageNum") int stageNum, @JsonProperty("numberOfPorts") int numberOfPorts, @JsonProperty("workerPorts") WorkerPorts workerPorts, @JsonProperty("state") WorkerState state, @JsonProperty("slave") String slave, @JsonProperty("slaveID") String slaveID, @JsonProperty("acceptedAt") long acceptedAt, @JsonProperty("launchedAt") long launchedAt, @JsonProperty("startingAt") long startingAt, @JsonProperty("startedAt") long startedAt, @JsonProperty("completedAt") long completedAt, @JsonProperty("reason") JobCompletedReason reason, @JsonProperty("resubmitOf") int resubmitOf, @JsonProperty("totalResubmitCount") int totalResubmitCount, @JsonProperty("preferredCluster") Optional<String> preferredCluster ) { this.workerIndex = workerIndex; this.workerNumber = workerNumber; this.jobId = jobId; this.jobIdObj = JobId.fromId(jobId).orElseThrow(() -> new IllegalArgumentException( "jobId format is invalid" + jobId)); this.workerId = new WorkerId(jobId, workerIndex, workerNumber); this.stageNum = stageNum; this.numberOfPorts = numberOfPorts; this.workerPorts = workerPorts; this.state = state; this.slave = slave; this.slaveID = slaveID; this.state = state; this.acceptedAt = acceptedAt; this.launchedAt = launchedAt; this.completedAt = completedAt; this.startedAt = startedAt; this.startingAt = startingAt; this.reason = reason; this.resubmitOf = resubmitOf; this.totalResubmitCount = totalResubmitCount; this.preferredCluster = preferredCluster; this.totalResubmitCount = totalResubmitCount; } public int getWorkerIndex() { return workerIndex; } public int getWorkerNumber() { return workerNumber; } public String getJobId() { return jobId; } public JobId getJobIdObject() { return jobIdObj; } public WorkerId getWorkerId() { return workerId; } public int getStageNum() { return stageNum; } public int getNumberOfPorts() { return numberOfPorts; } public Optional<WorkerPorts> getPorts() { return Optional.ofNullable(workerPorts); } public WorkerPorts getWorkerPorts() { return this.workerPorts; } void addPorts(final WorkerPorts ports) { this.workerPorts = (ports); } public int getTotalResubmitCount() { return totalResubmitCount; } public int getMetricsPort() { return workerPorts == null ? -1 : workerPorts.getMetricsPort(); } public int getDebugPort() { return workerPorts == null ? -1 : workerPorts.getDebugPort(); } public int getConsolePort() { return workerPorts == null ? -1 : workerPorts.getConsolePort(); } public int getCustomPort() { return workerPorts == null ? -1 : workerPorts.getCustomPort(); } public int getSinkPort() { return workerPorts == null ? -1 : workerPorts.getSinkPort(); } public int getResubmitOf() { return resubmitOf; } @JsonIgnore private void setResubmitInfo(int resubmitOf, int totalCount) { this.resubmitOf = resubmitOf; this.totalResubmitCount = totalCount; } @JsonIgnore public Optional<Instant> getLastHeartbeatAt() { return lastHeartbeatAt; } @JsonIgnore void setLastHeartbeatAt(long lastHeartbeatAt) { this.lastHeartbeatAt = Optional.of(Instant.ofEpochMilli(lastHeartbeatAt)); } private void validateStateChange(WorkerState newState) throws InvalidWorkerStateChangeException { if (!WorkerState.isValidStateChgTo(state, newState)) throw new InvalidWorkerStateChangeException(jobId, workerId, state, newState); } /** * Update the state of the worker. * @param newState * @param when * @param reason * @throws InvalidWorkerStateChangeException */ void setState(WorkerState newState, long when, JobCompletedReason reason) throws InvalidWorkerStateChangeException { WorkerState previousState = this.state; validateStateChange(newState); this.state = newState; LOGGER.info("Worker {} State changed from {} to {}", this.workerId, previousState, state); switch (state) { case Accepted: this.acceptedAt = when; break; case Launched: this.launchedAt = when; break; case StartInitiated: this.startingAt = when; break; case Started: this.startedAt = when; break; case Failed: this.completedAt = when; LOGGER.info("Worker {} failedAt {}", this.workerId, when); this.reason = reason == null ? JobCompletedReason.Lost : reason; break; case Completed: this.completedAt = when; LOGGER.info("Worker {} completedAt {}", this.workerId, when); this.reason = reason == null ? JobCompletedReason.Normal : reason; break; default: assert false : "Unexpected job state to set"; } } public WorkerState getState() { return state; } void setSlave(String slave) { this.slave = slave; } public String getSlave() { return slave; } void setSlaveID(String slaveID) { this.slaveID = slaveID; } void setCluster(Optional<String> cluster) { this.preferredCluster = cluster; } public String getSlaveID() { return slaveID; } public long getAcceptedAt() { return acceptedAt; } public long getLaunchedAt() { return launchedAt; } public long getStartingAt() { return startingAt; } public long getStartedAt() { return startedAt; } public long getCompletedAt() { return completedAt; } void setIsSubscribed(boolean isSub) { this.subscribed = isSub; } public boolean getIsSubscribed() { return this.subscribed; } public JobCompletedReason getReason() { return reason; } @Override public Optional<String> getPreferredClusterOptional() { return this.preferredCluster; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MantisWorkerMetadataImpl that = (MantisWorkerMetadataImpl) o; return workerIndex == that.workerIndex && workerNumber == that.workerNumber && stageNum == that.stageNum && numberOfPorts == that.numberOfPorts && acceptedAt == that.acceptedAt && launchedAt == that.launchedAt && startingAt == that.startingAt && startedAt == that.startedAt && completedAt == that.completedAt && resubmitOf == that.resubmitOf && totalResubmitCount == that.totalResubmitCount && Objects.equals(jobId, that.jobId) && Objects.equals(workerId, that.workerId) && Objects.equals(workerPorts, that.workerPorts) && state == that.state && Objects.equals(slave, that.slave) && Objects.equals(slaveID, that.slaveID) && reason == that.reason && Objects.equals(preferredCluster, that.preferredCluster); } @Override public int hashCode() { return Objects.hash(workerIndex, workerNumber, jobId, stageNum, numberOfPorts, workerId, workerPorts, state, slave, slaveID, acceptedAt, launchedAt, startingAt, startedAt, completedAt, reason, resubmitOf, totalResubmitCount, preferredCluster); } @Override public String toString() { return "MantisWorkerMetadataImpl{" + "workerIndex=" + workerIndex + ", workerNumber=" + workerNumber + ", jobId=" + jobId + ", stageNum=" + stageNum + ", numberOfPorts=" + numberOfPorts + ", workerId=" + workerId + ", workerPorts=" + workerPorts + ", state=" + state + ", slave='" + slave + '\'' + ", slaveID='" + slaveID + '\'' + ", acceptedAt=" + acceptedAt + ", launchedAt=" + launchedAt + ", startingAt=" + startingAt + ", startedAt=" + startedAt + ", completedAt=" + completedAt + ", reason=" + reason + ", resubmitOf=" + resubmitOf + ", totalResubmitCount=" + totalResubmitCount + ", lastHeartbeatAt=" + lastHeartbeatAt + ", subscribed=" + subscribed + ", preferredCluster=" + preferredCluster + '}'; } @Override public Optional<String> getCluster() { return this.preferredCluster; } }
4,404
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/worker/WorkerState.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job.worker; import java.util.HashMap; import java.util.Map; /** * Enumeration of all the states a Worker can be in. * Worker State Machine: * (Resource assignment) (Worker startup) (Worker running) * [Accepted] --> [Launched] --> [StartInitiated] --> [Started] * | | | / | * * [----------------------------------------Failed----------------------------------------] [Completed] */ public enum WorkerState { /** * Indicates a worker submission has been received by the Master. */ Accepted, /** * Indicates the worker has been scheduled onto a Mesos slave. */ Launched, /** * Indicates the worker is in the process of starting up. */ StartInitiated, /** * Indicates the worker is running. */ Started, /** * Indicates the worker has encountered a fatal error. */ Failed, /** * Indicates the worker has completed execution. */ Completed, /** * A place holder state. */ Noop, /** * Indicates that the actual state of the worker is unknown. */ Unknown; /** * A rollup worker state indicating whether worker is running or terminal. */ public enum MetaState { /** * Indicates the worker is not in a final state. */ Active, /** * Indicates worker is dead. */ Terminal } private static final Map<WorkerState, WorkerState[]> STATE_TRANSITION_MAP; private static final Map<WorkerState, MetaState> META_STATES; static { STATE_TRANSITION_MAP = new HashMap<>(); STATE_TRANSITION_MAP.put(WorkerState.Accepted, new WorkerState[] {WorkerState.Launched, WorkerState.Failed, WorkerState.Completed}); STATE_TRANSITION_MAP.put(WorkerState.Launched, new WorkerState[] { WorkerState.StartInitiated, WorkerState.Started, WorkerState.Failed, WorkerState.Completed}); STATE_TRANSITION_MAP.put(WorkerState.StartInitiated, new WorkerState[] {WorkerState.StartInitiated, WorkerState.Started, WorkerState.Failed, WorkerState.Completed}); STATE_TRANSITION_MAP.put(WorkerState.Started, new WorkerState[] {WorkerState.Started, WorkerState.Failed, WorkerState.Completed}); STATE_TRANSITION_MAP.put(WorkerState.Failed, new WorkerState[] {WorkerState.Failed}); STATE_TRANSITION_MAP.put(WorkerState.Completed, new WorkerState[] {}); META_STATES = new HashMap<>(); META_STATES.put(WorkerState.Accepted, MetaState.Active); META_STATES.put(WorkerState.Launched, MetaState.Active); META_STATES.put(WorkerState.StartInitiated, MetaState.Active); META_STATES.put(WorkerState.Started, MetaState.Active); META_STATES.put(WorkerState.Failed, MetaState.Terminal); META_STATES.put(WorkerState.Completed, MetaState.Terminal); } /** * Returns true if the worker is in a state that indicates it is on Mesos slave. * @param state * @return */ public static boolean isWorkerOnSlave(WorkerState state) { switch (state) { case StartInitiated: case Started: return true; default: return false; } } /** * Returns true if the worker is any valid non terminal state. * @param state * @return */ public static boolean isRunningState(WorkerState state) { switch (state) { case Launched: case StartInitiated: case Started: return true; default: return false; } } /** * Returns true if the old state -> new state transition is valid. * @param currentState * @param newState * @return */ public static boolean isValidStateChgTo(WorkerState currentState, WorkerState newState) { for (WorkerState validState : STATE_TRANSITION_MAP.get(currentState)) if (validState == newState) return true; return false; } /** * Returns true if the worker is in a terminal state. * @param state * @return */ public static boolean isTerminalState(WorkerState state) { switch (state) { case Completed: case Failed: return true; default: return false; } } /** * Returns true if the worker is in error state. * @param state * @return */ public static boolean isErrorState(WorkerState state) { switch (state) { case Failed: return true; default: return false; } } /** * Translates the given {@link WorkerState} to a MetaState. * @param state * @return */ public static MetaState toMetaState(WorkerState state) { return META_STATES.get(state); } }
4,405
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/worker/WorkerTerminate.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job.worker; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.scheduler.WorkerEvent; /** * Encapsulates a worker terminated event. */ public class WorkerTerminate implements WorkerEvent { private final JobCompletedReason reason; private final WorkerId workerId; private final long eventTime; private final WorkerState finalState; /** * Creates an instance of this class. * @param workerId * @param state * @param reason * @param time */ public WorkerTerminate(WorkerId workerId, WorkerState state, JobCompletedReason reason, long time) { this.workerId = workerId; this.reason = reason; this.finalState = state; this.eventTime = time; } /** * Creates an instance of this class. auto-populates the current time. * @param workerId * @param state * @param reason */ public WorkerTerminate(WorkerId workerId, WorkerState state, JobCompletedReason reason) { this(workerId, state, reason, System.currentTimeMillis()); } @Override public WorkerId getWorkerId() { return workerId; } @Override public long getEventTimeMs() { return this.eventTime; } public JobCompletedReason getReason() { return reason; } public WorkerState getFinalState() { return finalState; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (int) (eventTime ^ (eventTime >>> 32)); result = prime * result + ((finalState == null) ? 0 : finalState.hashCode()); result = prime * result + ((reason == null) ? 0 : reason.hashCode()); result = prime * result + ((workerId == null) ? 0 : workerId.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; WorkerTerminate other = (WorkerTerminate) obj; if (eventTime != other.eventTime) return false; if (finalState != other.finalState) return false; if (reason != other.reason) return false; if (workerId == null) { if (other.workerId != null) return false; } else if (!workerId.equals(other.workerId)) return false; return true; } @Override public String toString() { return "WorkerTerminate [reason=" + reason + ", workerId=" + workerId + ", eventTime=" + eventTime + ", finalState=" + finalState + "]"; } }
4,406
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/vm/AgentClusterOperationsImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.vm; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.fenzo.AutoScaleRule; import com.netflix.fenzo.TaskRequest; import com.netflix.fenzo.VirtualMachineCurrentState; import com.netflix.fenzo.VirtualMachineLease; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.util.DateTimeExt; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventsProto; import io.mantisrx.server.core.BaseService; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.AgentClustersAutoScaler; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.scheduler.JobMessageRouter; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.WorkerOnDisabledVM; import org.apache.mesos.Protos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.schedulers.Schedulers; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; public class AgentClusterOperationsImpl extends BaseService implements AgentClusterOperations { static class ActiveVmAttributeValues { private final List<String> values; @JsonCreator ActiveVmAttributeValues(@JsonProperty("values") List<String> values) { this.values = values; } List<String> getValues() { return values; } boolean isEmpty() { return values==null || values.isEmpty(); } } private static final Logger logger = LoggerFactory.getLogger(AgentClusterOperationsImpl.class); private final IMantisStorageProvider storageProvider; private final JobMessageRouter jobMessageRouter; private final MantisScheduler scheduler; private final LifecycleEventPublisher lifecycleEventPublisher; private volatile ActiveVmAttributeValues activeVmAttributeValues=null; private final ConcurrentMap<String, List<VirtualMachineCurrentState>> vmStatesMap; private final AgentClustersAutoScaler agentClustersAutoScaler; private final String attrName; private final Counter listJobsOnVMsCount; public AgentClusterOperationsImpl(final IMantisStorageProvider storageProvider, final JobMessageRouter jobMessageRouter, final MantisScheduler scheduler, final LifecycleEventPublisher lifecycleEventPublisher, final String activeSlaveAttributeName) { super(true); Preconditions.checkNotNull(storageProvider, "storageProvider"); Preconditions.checkNotNull(jobMessageRouter, "jobMessageRouter"); Preconditions.checkNotNull(scheduler, "scheduler"); Preconditions.checkNotNull(lifecycleEventPublisher, "lifecycleEventPublisher"); Preconditions.checkNotNull(activeSlaveAttributeName, "activeSlaveAttributeName"); this.storageProvider = storageProvider; this.jobMessageRouter = jobMessageRouter; this.scheduler = scheduler; this.lifecycleEventPublisher = lifecycleEventPublisher; this.vmStatesMap = new ConcurrentHashMap<>(); this.agentClustersAutoScaler = AgentClustersAutoScaler.get(); this.attrName = activeSlaveAttributeName; Metrics metrics = new Metrics.Builder() .id("AgentClusterOperations") .addCounter("listJobsOnVMsCount") .build(); this.listJobsOnVMsCount = metrics.getCounter("listJobsOnVMsCount"); } @Override public void start() { super.awaitActiveModeAndStart(() -> { try { Schedulers.computation().createWorker().schedulePeriodically( () -> checkInactiveVMs(scheduler.getCurrentVMState()), 1, 30, TimeUnit.SECONDS); List<String> activeVmGroups = storageProvider.initActiveVmAttributeValuesList(); activeVmAttributeValues = new ActiveVmAttributeValues(activeVmGroups); scheduler.setActiveVmGroups(activeVmAttributeValues.getValues()); logger.info("Initialized activeVmAttributeValues=" + (activeVmAttributeValues == null ? "null" : activeVmAttributeValues.getValues())); } catch (IOException e) { logger.error("Can't initialize activeVM attribute values list: " + e.getMessage()); } }); } @Override public void setActiveVMsAttributeValues(List<String> values) throws IOException { logger.info("setting active VMs to {}", values); storageProvider.setActiveVmAttributeValuesList(values); activeVmAttributeValues = new ActiveVmAttributeValues(values); List<String> activeVMGroups = activeVmAttributeValues.getValues(); scheduler.setActiveVmGroups(activeVMGroups); lifecycleEventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.CLUSTER_ACTIVE_VMS, "ActiveVMs", String.join(", ", values)) ); } @Override public List<String> getActiveVMsAttributeValues() { return activeVmAttributeValues==null? null : activeVmAttributeValues.values; } private List<JobsOnVMStatus> getJobsOnVMStatus() { List<AgentClusterOperations.JobsOnVMStatus> result = new ArrayList<>(); final List<VirtualMachineCurrentState> vmCurrentStates = scheduler.getCurrentVMState(); if (vmCurrentStates != null && !vmCurrentStates.isEmpty()) { for (VirtualMachineCurrentState currentState: vmCurrentStates) { final VirtualMachineLease currAvailableResources = currentState.getCurrAvailableResources(); if (currAvailableResources != null) { final Protos.Attribute attribute = currAvailableResources.getAttributeMap().get(attrName); if(attribute!=null) { AgentClusterOperations.JobsOnVMStatus s = new AgentClusterOperations.JobsOnVMStatus(currAvailableResources.hostname(), attribute.getText().getValue()); for (TaskRequest r: currentState.getRunningTasks()) { final Optional<WorkerId> workerId = WorkerId.fromId(r.getId()); s.addJob(new AgentClusterOperations.JobOnVMInfo( workerId.map(w -> w.getJobId()).orElse("InvalidJobId"), -1, workerId.map(w -> w.getWorkerIndex()).orElse(-1), workerId.map(w -> w.getWorkerNum()).orElse(-1))); } result.add(s); } } } } return result; } @Override public Map<String, List<JobsOnVMStatus>> getJobsOnVMs() { listJobsOnVMsCount.increment(); Map<String, List<JobsOnVMStatus>> result = new HashMap<>(); final List<JobsOnVMStatus> statusList = getJobsOnVMStatus(); if (statusList != null && !statusList.isEmpty()) { for (JobsOnVMStatus status: statusList) { List<JobsOnVMStatus> jobsOnVMStatuses = result.get(status.getAttributeValue()); if (jobsOnVMStatuses == null) { jobsOnVMStatuses = new ArrayList<>(); result.put(status.getAttributeValue(), jobsOnVMStatuses); } jobsOnVMStatuses.add(status); } } return result; } private boolean isIn(String name, List<String> activeVMs) { for(String vm: activeVMs) if(vm.equals(name)) return true; return false; } @Override public boolean isActive(String name) { return activeVmAttributeValues==null || activeVmAttributeValues.isEmpty() || isIn(name, activeVmAttributeValues.getValues()); } @Override public void setAgentInfos(List<VirtualMachineCurrentState> vmStates) { vmStatesMap.put("0", vmStates); } @Override public List<AgentInfo> getAgentInfos() { List<VirtualMachineCurrentState> vmStates = vmStatesMap.get("0"); List<AgentInfo> agentInfos = new ArrayList<>(); if (vmStates != null && !vmStates.isEmpty()) { for (VirtualMachineCurrentState s : vmStates) { List<VirtualMachineLease.Range> ranges = s.getCurrAvailableResources().portRanges(); int ports = 0; if (ranges != null && !ranges.isEmpty()) for (VirtualMachineLease.Range r : ranges) ports += r.getEnd() - r.getBeg(); Map<String, Protos.Attribute> attributeMap = s.getCurrAvailableResources().getAttributeMap(); Map<String, String> attributes = new HashMap<>(); if (attributeMap != null && !attributeMap.isEmpty()) { for (Map.Entry<String, Protos.Attribute> entry : attributeMap.entrySet()) { attributes.put(entry.getKey(), entry.getValue().getText().getValue()); } } agentInfos.add(new AgentInfo( s.getHostname(), s.getCurrAvailableResources().cpuCores(), s.getCurrAvailableResources().memoryMB(), s.getCurrAvailableResources().diskMB(), ports, s.getCurrAvailableResources().getScalarValues(), attributes, s.getResourceSets().keySet(), getTimeString(s.getDisabledUntil()) )); } } return agentInfos; } @Override public Map<String, AgentClusterAutoScaleRule> getAgentClusterAutoScaleRules() { final Set<AutoScaleRule> agentAutoscaleRules = agentClustersAutoScaler.getRules(); final Map<String, AgentClusterAutoScaleRule> result = new HashMap<>(); if (agentAutoscaleRules != null && !agentAutoscaleRules.isEmpty()) { for (AutoScaleRule r: agentAutoscaleRules) { result.put(r.getRuleName(), new AgentClusterOperations.AgentClusterAutoScaleRule( r.getRuleName(), r.getCoolDownSecs(), r.getMinIdleHostsToKeep(), r.getMaxIdleHostsToKeep(), r.getMinSize(), r.getMaxSize())); } } return result; } private String getTimeString(long disabledUntil) { if (System.currentTimeMillis() > disabledUntil) return null; return DateTimeExt.toUtcDateTimeString(disabledUntil); } List<String> manageActiveVMs(final List<VirtualMachineCurrentState> currentStates) { List<String> inactiveVMs = new ArrayList<>(); if(currentStates!=null && !currentStates.isEmpty()) { final List<String> values = getActiveVMsAttributeValues(); if(values==null || values.isEmpty()) return Collections.EMPTY_LIST; // treat no valid active VMs attribute value as all are active for(VirtualMachineCurrentState currentState: currentStates) { final VirtualMachineLease lease = currentState.getCurrAvailableResources(); //logger.info("Lease for VM: " + currentState.getCurrAvailableResources()); if(lease != null) { final Collection<TaskRequest> runningTasks = currentState.getRunningTasks(); if(runningTasks!=null && !runningTasks.isEmpty()) { final Map<String,Protos.Attribute> attributeMap = lease.getAttributeMap(); if(attributeMap!=null && !attributeMap.isEmpty()) { final Protos.Attribute attribute = attributeMap.get(attrName); if(attribute!=null && attribute.hasText()) { if(!isIn(attribute.getText().getValue(), values)) { inactiveVMs.add(lease.hostname()); for(TaskRequest t: runningTasks) { Optional<WorkerId> workerIdO = WorkerId.fromId(t.getId()); workerIdO.ifPresent(workerId -> jobMessageRouter.routeWorkerEvent(new WorkerOnDisabledVM(workerId))); } } } else logger.warn("No attribute value for " + attrName + " found on VM " + lease.hostname() + " that has " + runningTasks.size() + " tasks on it"); } else logger.warn("No attributes found on VM " + lease.hostname() + " that has " + runningTasks.size() + " tasks on it"); } } } } return inactiveVMs; } private void checkInactiveVMs(List<VirtualMachineCurrentState> vmCurrentStates) { logger.debug("Checking on any workers on VMs that are not active anymore"); final List<String> inactiveVMs = manageActiveVMs(vmCurrentStates); if (inactiveVMs!=null && !inactiveVMs.isEmpty()) { for(String vm: inactiveVMs) { logger.info("expiring all leases of inactive vm " + vm); scheduler.rescindOffers(vm); } } } }
4,407
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/vm/AgentClusterOperations.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.vm; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.fenzo.VirtualMachineCurrentState; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; public interface AgentClusterOperations { void setActiveVMsAttributeValues(List<String> values) throws IOException; List<String> getActiveVMsAttributeValues(); boolean isActive(String name); void setAgentInfos(List<VirtualMachineCurrentState> agentInfos); List<AgentInfo> getAgentInfos(); /** * Get all current jobs assigned to VMs. This produces a map with key as the value for VM attribute used to * set active VMs. The values of the map are the list of jobs on VM status objects. * @return current jobs assigned to VMs. */ Map<String, List<JobsOnVMStatus>> getJobsOnVMs(); Map<String, AgentClusterAutoScaleRule> getAgentClusterAutoScaleRules(); class JobOnVMInfo { private final String jobId; private final int stage; private final int workerIndex; private final int workerNumber; @JsonCreator public JobOnVMInfo(@JsonProperty("jobId") String jobId, @JsonProperty("stage") int stage, @JsonProperty("workerIndex") int workerIndex, @JsonProperty("workerNumber") int workerNumber) { this.jobId = jobId; this.stage = stage; this.workerIndex = workerIndex; this.workerNumber = workerNumber; } public String getJobId() { return jobId; } public int getStage() { return stage; } public int getWorkerIndex() { return workerIndex; } public int getWorkerNumber() { return workerNumber; } } class JobsOnVMStatus { private final String hostname; private final String attributeValue; private final List<JobOnVMInfo> jobs; @JsonCreator public JobsOnVMStatus(@JsonProperty("hostname") String hostname, @JsonProperty("attributeValue") String attributeValue) { this.hostname = hostname; this.attributeValue = attributeValue; this.jobs = new ArrayList<>(); } @JsonIgnore void addJob(JobOnVMInfo job) { jobs.add(job); } public String getHostname() { return hostname; } public String getAttributeValue() { return attributeValue; } public List<JobOnVMInfo> getJobs() { return jobs; } } class AgentInfo { private final String name; private final double availableCpus; private final double availableMemory; private final double availableDisk; private final int availableNumPorts; private final Map<String, Double> scalars; private final Map<String, String> attributes; private final Set<String> resourceSets; private final String disabledUntil; @JsonCreator public AgentInfo(@JsonProperty("name") String name, @JsonProperty("availableCpus") double availableCpus, @JsonProperty("availableMemory") double availableMemory, @JsonProperty("availableDisk") double availableDisk, @JsonProperty("availableNumPorts") int availableNumPorts, @JsonProperty("scalars") Map<String, Double> scalars, @JsonProperty("attributes") Map<String, String> attributes, @JsonProperty("resourceSets") Set<String> resourceSets, @JsonProperty("disabledUntil") String disabledUntil) { this.name = name; this.availableCpus = availableCpus; this.availableMemory = availableMemory; this.availableDisk = availableDisk; this.availableNumPorts = availableNumPorts; this.scalars = scalars; this.attributes = attributes; this.resourceSets = resourceSets; this.disabledUntil = disabledUntil; } public String getName() { return name; } public double getAvailableCpus() { return availableCpus; } public double getAvailableMemory() { return availableMemory; } public double getAvailableDisk() { return availableDisk; } public int getAvailableNumPorts() { return availableNumPorts; } public Map<String, Double> getScalars() { return scalars; } public Map<String, String> getAttributes() { return attributes; } public Set<String> getResourceSets() { return resourceSets; } public String getDisabledUntil() { return disabledUntil; } } class AgentClusterAutoScaleRule { private final String name; private final long cooldownSecs; private final int minIdle; private final int maxIdle; private final int minSize; private final int maxSize; @JsonCreator public AgentClusterAutoScaleRule(@JsonProperty("name") final String name, @JsonProperty("cooldownSecs") final long cooldownSecs, @JsonProperty("minIdle") final int minIdle, @JsonProperty("maxIdle") final int maxIdle, @JsonProperty("minSize") final int minSize, @JsonProperty("maxSize") final int maxSize) { this.name = name; this.cooldownSecs = cooldownSecs; this.minIdle = minIdle; this.maxIdle = maxIdle; this.minSize = minSize; this.maxSize = maxSize; } public String getName() { return name; } public long getCooldownSecs() { return cooldownSecs; } public int getMinIdle() { return minIdle; } public int getMaxIdle() { return maxIdle; } public int getMinSize() { return minSize; } public int getMaxSize() { return maxSize; } } }
4,408
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/akka/ActorSystemMetrics.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.akka; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; /** * A holder class for metrics associated with an Actor. */ public final class ActorSystemMetrics { private final Counter actorKilledCount; private final Counter actorInitExceptionCount; private final Counter actorDeathPactExcCount; private final Counter actorResumeCount; private static final ActorSystemMetrics INSTANCE = new ActorSystemMetrics(); private ActorSystemMetrics() { Metrics m = new Metrics.Builder() .id("ActorSystemMetrics") .addCounter("actorKilledCount") .addCounter("actorInitExceptionCount") .addCounter("actorDeathPactExcCount") .addCounter("actorResumeCount") .build(); Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m); this.actorKilledCount = metrics.getCounter("actorKilledCount"); this.actorInitExceptionCount = metrics.getCounter("actorInitExceptionCount"); this.actorDeathPactExcCount = metrics.getCounter("actorDeathPactExcCount"); this.actorResumeCount = metrics.getCounter("actorResumeCount"); } public static ActorSystemMetrics getInstance() { return INSTANCE; } /** * Increments Actor kill count. */ public void incrementActorKilledCount() { actorKilledCount.increment(); } /** * Tracks how many times an actor failed to initialize. */ public void incrementActorInitExceptionCount() { actorInitExceptionCount.increment(); } /** * Tracks how many times an actor was killed due to a death pack. */ public void incrementActorDeathPactExcCount() { actorDeathPactExcCount.increment(); } /** * Tracks how many times an actor has been resumed. */ public void incrementActorResumeCount() { actorResumeCount.increment(); } }
4,409
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/akka/MantisActorSupervisorStrategy.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.akka; import akka.actor.ActorInitializationException; import akka.actor.ActorKilledException; import akka.actor.DeathPactException; import akka.actor.OneForOneStrategy; import akka.actor.SupervisorStrategy; import akka.actor.SupervisorStrategyConfigurator; import akka.japi.pf.DeciderBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * The standard Mantis Actor supervisor strategy. */ public class MantisActorSupervisorStrategy implements SupervisorStrategyConfigurator { private static final Logger LOGGER = LoggerFactory.getLogger(MantisActorSupervisorStrategy.class); private static final MantisActorSupervisorStrategy INSTANCE = new MantisActorSupervisorStrategy(); public static MantisActorSupervisorStrategy getInstance() { return INSTANCE; } @Override public SupervisorStrategy create() { // custom supervisor strategy to resume the child actors on Exception instead of the default restart behavior return new OneForOneStrategy(DeciderBuilder .match(ActorInitializationException.class, e -> { ActorSystemMetrics.getInstance().incrementActorInitExceptionCount(); return SupervisorStrategy.stop(); }) .match(ActorKilledException.class, e -> { ActorSystemMetrics.getInstance().incrementActorKilledCount(); return SupervisorStrategy.stop(); }) .match(DeathPactException.class, e -> { ActorSystemMetrics.getInstance().incrementActorDeathPactExcCount(); return SupervisorStrategy.stop(); }) .match(Exception.class, e -> { LOGGER.info("resuming actor on exception {}", e.getMessage(), e); ActorSystemMetrics.getInstance().incrementActorResumeCount(); return SupervisorStrategy.resume(); }) .build()); } }
4,410
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/akka/MeteredMessageQueue.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.akka; import akka.actor.ActorRef; import akka.dispatch.Envelope; import akka.dispatch.MessageQueue; import akka.dispatch.UnboundedMessageQueueSemantics; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Registry; import com.netflix.spectator.api.Timer; import com.netflix.spectator.api.patterns.PolledMeter; import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; /** * A custom implementation of a message queue used by a few key Actors. This implementation * keeps track of enqueue and wait rates to the Actor queue. */ public class MeteredMessageQueue implements MessageQueue, UnboundedMessageQueueSemantics { private final String path; private final Counter insertCounter; private final Timer waitTimer; private final ConcurrentLinkedQueue<Entry> queue = new ConcurrentLinkedQueue<>(); /** * Creates an instance. * @param path The actor path. */ public MeteredMessageQueue(final String path) { Registry registry = SpectatorRegistryFactory.getRegistry(); this.path = path; this.insertCounter = registry.counter("akka.queue.insert", "path", path); this.waitTimer = registry.timer("akka.queue.wait", "path", path); PolledMeter .using(registry) .withName("akka.queue.size") .withTag("path", path) .monitorSize(queue); } /** * A wrapper class that adds the time of creation of a message. */ static final class Entry { /** * The {@link Envelope} used by Akka around each enqueued message. */ private final Envelope v; /** * Nano time of when the message was enqueued. */ private final long t; /** * Creates an instance of this class. * @param v */ Entry(final Envelope v) { this.v = v; this.t = System.nanoTime(); } } /** * Invoked every time a message is enqueued for an Actor. * @param receiver * @param handle */ public void enqueue(ActorRef receiver, Envelope handle) { insertCounter.increment(); queue.offer(new Entry(handle)); } /** * Invoked every time a message is dequeued from an Actor's queue. * @return */ public Envelope dequeue() { Entry tmp = queue.poll(); if (tmp == null) { return null; } else { long dur = System.nanoTime() - tmp.t; waitTimer.record(dur, TimeUnit.NANOSECONDS); return tmp.v; } } /** * Returns current queue size. * @return queue size */ public int numberOfMessages() { return queue.size(); } /** * Returns true if there is atleast a single message in the queue. * @return boolean whether queue is not empty. */ public boolean hasMessages() { return !queue.isEmpty(); } /** * Clears the Actor queue. * @param owner * @param deadLetters */ public void cleanUp(ActorRef owner, MessageQueue deadLetters) { queue.clear(); } }
4,411
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/akka/UnboundedMeteredMailbox.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.akka; import akka.actor.ActorPath; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.dispatch.MailboxType; import akka.dispatch.MessageQueue; import akka.dispatch.ProducesMessageQueue; import com.typesafe.config.Config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.Option; /** * A simple unbounded metered mail box. */ public class UnboundedMeteredMailbox implements MailboxType, ProducesMessageQueue<MeteredMessageQueue> { private final ActorSystem.Settings settings; private final Config config; private static final Logger LOGGER = LoggerFactory.getLogger(UnboundedMeteredMailbox.class); /** * Creates an instance of this class. * @param settings * @param config */ public UnboundedMeteredMailbox(final ActorSystem.Settings settings, final Config config) { this.settings = settings; this.config = config; } /** * Creates an instance of a {@link MessageQueue}. * @param owner * @param system * @return */ public MessageQueue create(final Option<ActorRef> owner, final Option<ActorSystem> system) { String path = owner.fold(() -> "unknown", r -> tagValue(r.path())); if (LOGGER.isDebugEnabled()) { LOGGER.debug("created message queue for {}", path); } return new MeteredMessageQueue(path); } /** Summarizes a path for use in a metric tag. */ private String tagValue(ActorPath path) { return path.name(); } }
4,412
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/MasterApiAkkaService.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka; import akka.NotUsed; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.settings.ServerSettings; import akka.http.javadsl.settings.WebSocketSettings; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.v0.AgentClusterRoute; import io.mantisrx.master.api.akka.route.v0.JobClusterRoute; import io.mantisrx.master.api.akka.route.v0.JobDiscoveryRoute; import io.mantisrx.master.api.akka.route.v0.JobStatusRoute; import io.mantisrx.master.api.akka.route.v0.JobRoute; import io.mantisrx.master.api.akka.route.v1.AdminMasterRoute; import io.mantisrx.master.api.akka.route.v1.AgentClustersRoute; import io.mantisrx.master.api.akka.route.v1.JobClustersRoute; import io.mantisrx.master.api.akka.route.MantisMasterRoute; import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.v1.JobDiscoveryStreamRoute; import io.mantisrx.master.api.akka.route.v1.JobStatusStreamRoute; import io.mantisrx.master.api.akka.route.v1.JobsRoute; import io.mantisrx.master.api.akka.route.v1.LastSubmittedJobIdStreamRoute; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.vm.AgentClusterOperations; import io.mantisrx.server.core.BaseService; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.master.MasterMonitor; import io.mantisrx.server.master.ILeadershipManager; import io.mantisrx.server.master.LeaderRedirectionFilter; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.duration.Duration; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; public class MasterApiAkkaService extends BaseService { private static final Logger logger = LoggerFactory.getLogger(MasterApiAkkaService.class); private final MasterMonitor masterMonitor; private final MasterDescription masterDescription; private final ActorRef jobClustersManagerActor; private final ActorRef statusEventBrokerActor; private final int port; private final IMantisStorageProvider storageProvider; private final MantisScheduler scheduler; private final LifecycleEventPublisher lifecycleEventPublisher; private final MantisMasterRoute mantisMasterRoute; private final ILeadershipManager leadershipManager; private final ActorSystem system; private final ActorMaterializer materializer; private final ExecutorService executorService; private final CountDownLatch serviceLatch = new CountDownLatch(1); public MasterApiAkkaService(final MasterMonitor masterMonitor, final MasterDescription masterDescription, final ActorRef jobClustersManagerActor, final ActorRef statusEventBrokerActor, final int serverPort, final IMantisStorageProvider mantisStorageProvider, final MantisScheduler scheduler, final LifecycleEventPublisher lifecycleEventPublisher, final ILeadershipManager leadershipManager, final AgentClusterOperations agentClusterOperations) { super(true); Preconditions.checkNotNull(masterMonitor, "MasterMonitor"); Preconditions.checkNotNull(masterDescription, "masterDescription"); Preconditions.checkNotNull(jobClustersManagerActor, "jobClustersManagerActor"); Preconditions.checkNotNull(statusEventBrokerActor, "statusEventBrokerActor"); Preconditions.checkNotNull(mantisStorageProvider, "mantisStorageProvider"); Preconditions.checkNotNull(scheduler, "scheduler"); Preconditions.checkNotNull(lifecycleEventPublisher, "lifecycleEventPublisher"); Preconditions.checkNotNull(leadershipManager, "leadershipManager"); Preconditions.checkNotNull(agentClusterOperations, "agentClusterOperations"); this.masterMonitor = masterMonitor; this.masterDescription = masterDescription; this.jobClustersManagerActor = jobClustersManagerActor; this.statusEventBrokerActor = statusEventBrokerActor; this.port = serverPort; this.storageProvider = mantisStorageProvider; this.scheduler = scheduler; this.lifecycleEventPublisher = lifecycleEventPublisher; this.leadershipManager = leadershipManager; this.system = ActorSystem.create("MasterApiActorSystem"); this.materializer = ActorMaterializer.create(system); this.mantisMasterRoute = configureApiRoutes(this.system, agentClusterOperations); this.executorService = Executors.newSingleThreadExecutor(r -> { Thread t = new Thread(r, "MasterApiAkkaServiceThread"); t.setDaemon(true); return t; }); executorService.execute(() -> { try { startAPIServer(); } catch (Exception e) { logger.warn("caught exception starting API server", e); } }); } private MantisMasterRoute configureApiRoutes(final ActorSystem actorSystem, final AgentClusterOperations agentClusterOperations) { // Setup API routes final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl(jobClustersManagerActor); final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl(jobClustersManagerActor); final MasterDescriptionRoute masterDescriptionRoute = new MasterDescriptionRoute(masterDescription); final JobRoute v0JobRoute = new JobRoute(jobRouteHandler, actorSystem); java.time.Duration idleTimeout = actorSystem.settings().config().getDuration("akka.http.server.idle-timeout"); logger.info("idle timeout {} sec ", idleTimeout.getSeconds()); final JobStatusRouteHandler jobStatusRouteHandler = new JobStatusRouteHandlerAkkaImpl(actorSystem, statusEventBrokerActor); final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl(jobClustersManagerActor, idleTimeout); final JobDiscoveryRoute v0JobDiscoveryRoute = new JobDiscoveryRoute(jobDiscoveryRouteHandler); final JobClusterRoute v0JobClusterRoute = new JobClusterRoute(jobClusterRouteHandler, jobRouteHandler, actorSystem); final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute(agentClusterOperations, actorSystem); final JobStatusRoute v0JobStatusRoute = new JobStatusRoute(jobStatusRouteHandler); final JobClustersRoute v1JobClusterRoute = new JobClustersRoute(jobClusterRouteHandler, actorSystem); final JobsRoute v1JobsRoute = new JobsRoute(jobClusterRouteHandler, jobRouteHandler, actorSystem); final AdminMasterRoute v1AdminMasterRoute = new AdminMasterRoute(masterDescription); final AgentClustersRoute v1AgentClustersRoute = new AgentClustersRoute(agentClusterOperations); final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute = new JobDiscoveryStreamRoute(jobDiscoveryRouteHandler); final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute = new LastSubmittedJobIdStreamRoute(jobDiscoveryRouteHandler); final JobStatusStreamRoute v1JobStatusStreamRoute = new JobStatusStreamRoute(jobStatusRouteHandler); final LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter(masterMonitor, leadershipManager); return new MantisMasterRoute(leaderRedirectionFilter, masterDescriptionRoute, v0JobClusterRoute, v0JobRoute, v0JobDiscoveryRoute, v0JobStatusRoute, v0AgentClusterRoute, v1JobClusterRoute, v1JobsRoute, v1AdminMasterRoute, v1AgentClustersRoute, v1JobDiscoveryStreamRoute, v1LastSubmittedJobIdStreamRoute, v1JobStatusStreamRoute); } private void startAPIServer() { final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = this.mantisMasterRoute.createRoute().flow(system, materializer); final Http http = Http.get(system); ServerSettings defaultSettings = ServerSettings.create(system); java.time.Duration idleTimeout = system.settings().config().getDuration("akka.http.server.idle-timeout"); logger.info("idle timeout {} sec ", idleTimeout.getSeconds()); WebSocketSettings customWebsocketSettings = defaultSettings.getWebsocketSettings() .withPeriodicKeepAliveMaxIdle(Duration.create(idleTimeout.getSeconds() - 1, TimeUnit.SECONDS)) .withPeriodicKeepAliveMode("pong"); ServerSettings customServerSettings = defaultSettings.withWebsocketSettings(customWebsocketSettings); final CompletionStage<ServerBinding> binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("0.0.0.0", port), customServerSettings, system.log(), materializer); binding.exceptionally(failure -> { System.err.println("API service exited, committing suicide !" + failure.getMessage()); logger.info("Master API service exited in error, committing suicide !"); system.terminate(); System.exit(2); return null; }); logger.info("Starting Mantis Master API on port {}", port); try { serviceLatch.await(); } catch (InterruptedException e) { logger.error("Master API thread interrupted, committing suicide", e); System.exit(2); } binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> { logger.error("Master API service unbind, committing suicide"); system.terminate(); System.exit(2); }); // and shutdown when done } @Override public void start() { super.awaitActiveModeAndStart(() -> { logger.info("marking leader READY"); leadershipManager.setLeaderReady(); }); } @Override public void shutdown() { super.shutdown(); logger.info("Shutting down Mantis Master API"); serviceLatch.countDown(); executorService.shutdownNow(); system.terminate(); } }
4,413
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/MantisMasterRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route; import akka.http.javadsl.server.AllDirectives; import akka.http.javadsl.server.Route; import io.mantisrx.master.api.akka.route.v0.AgentClusterRoute; import io.mantisrx.master.api.akka.route.v0.JobClusterRoute; import io.mantisrx.master.api.akka.route.v0.JobDiscoveryRoute; import io.mantisrx.master.api.akka.route.v0.JobStatusRoute; import io.mantisrx.master.api.akka.route.v0.JobRoute; import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute; import io.mantisrx.master.api.akka.route.v1.AdminMasterRoute; import io.mantisrx.master.api.akka.route.v1.AgentClustersRoute; import io.mantisrx.master.api.akka.route.v1.JobClustersRoute; import io.mantisrx.master.api.akka.route.v1.JobDiscoveryStreamRoute; import io.mantisrx.master.api.akka.route.v1.JobStatusStreamRoute; import io.mantisrx.master.api.akka.route.v1.JobsRoute; import io.mantisrx.master.api.akka.route.v1.LastSubmittedJobIdStreamRoute; import io.mantisrx.server.master.LeaderRedirectionFilter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class MantisMasterRoute extends AllDirectives { private static final Logger logger = LoggerFactory.getLogger(MantisMasterRoute.class); private final LeaderRedirectionFilter leaderRedirectionFilter; private final JobClusterRoute v0JobClusterRoute; private final JobRoute v0JobRoute; private final JobDiscoveryRoute v0JobDiscoveryRoute; private final JobStatusRoute v0JobStatusRoute; private final AgentClusterRoute v0AgentClusterRoute; private final MasterDescriptionRoute v0MasterDescriptionRoute; private final JobClustersRoute v1JobClusterRoute; private final JobsRoute v1JobsRoute; private final AdminMasterRoute v1MasterRoute; private final AgentClustersRoute v1AgentClustersRoute; private final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute; private final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute; private final JobStatusStreamRoute v1JobStatusStreamRoute; public MantisMasterRoute( final LeaderRedirectionFilter leaderRedirectionFilter, final MasterDescriptionRoute v0MasterDescriptionRoute, final JobClusterRoute v0JobClusterRoute, final JobRoute v0JobRoute, final JobDiscoveryRoute v0JobDiscoveryRoute, final JobStatusRoute v0JobStatusRoute, final AgentClusterRoute v0AgentClusterRoute, final JobClustersRoute v1JobClusterRoute, final JobsRoute v1JobsRoute, final AdminMasterRoute v1MasterRoute, final AgentClustersRoute v1AgentClustersRoute, final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute, final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute, final JobStatusStreamRoute v1JobStatusStreamRoute) { this.leaderRedirectionFilter = leaderRedirectionFilter; this.v0MasterDescriptionRoute = v0MasterDescriptionRoute; this.v0JobClusterRoute = v0JobClusterRoute; this.v0JobRoute = v0JobRoute; this.v0JobDiscoveryRoute = v0JobDiscoveryRoute; this.v0JobStatusRoute = v0JobStatusRoute; this.v0AgentClusterRoute = v0AgentClusterRoute; this.v1JobClusterRoute = v1JobClusterRoute; this.v1JobsRoute = v1JobsRoute; this.v1MasterRoute = v1MasterRoute; this.v1AgentClustersRoute = v1AgentClustersRoute; this.v1JobDiscoveryStreamRoute = v1JobDiscoveryStreamRoute; this.v1LastSubmittedJobIdStreamRoute = v1LastSubmittedJobIdStreamRoute; this.v1JobStatusStreamRoute = v1JobStatusStreamRoute; } public Route createRoute() { return concat( v0MasterDescriptionRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v0JobStatusRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v0JobRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v0JobClusterRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v0JobDiscoveryRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v0AgentClusterRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v1JobClusterRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v1JobsRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v1MasterRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v1AgentClustersRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v1JobDiscoveryStreamRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v1LastSubmittedJobIdStreamRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader), v1JobStatusStreamRoute.createRoute(leaderRedirectionFilter::redirectIfNotLeader) ); } }
4,414
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/Jackson.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route; import java.io.IOException; import akka.http.javadsl.model.HttpEntity; import akka.http.javadsl.model.MediaTypes; import akka.http.javadsl.model.RequestEntity; import akka.http.javadsl.marshalling.Marshaller; import akka.http.javadsl.unmarshalling.Unmarshaller; import io.mantisrx.master.api.akka.route.v0.JobRoute; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.FilterProvider; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.PropertyFilter; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import io.mantisrx.shaded.com.google.common.base.Strings; import com.netflix.spectator.impl.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class Jackson { private static final Logger logger = LoggerFactory.getLogger(Jackson.class); private static final ObjectMapper defaultObjectMapper = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) .registerModule(new Jdk8Module()); public static final SimpleFilterProvider DEFAULT_FILTER_PROVIDER; static { DEFAULT_FILTER_PROVIDER = new SimpleFilterProvider(); DEFAULT_FILTER_PROVIDER.setFailOnUnknownId(false); } public static <T> Marshaller<T, RequestEntity> marshaller() { return marshaller(defaultObjectMapper, null); } public static <T> Marshaller<T, RequestEntity> marshaller(FilterProvider filterProvider) { return marshaller(defaultObjectMapper, filterProvider); } public static <T> Marshaller<T, RequestEntity> marshaller(ObjectMapper mapper) { return Marshaller.wrapEntity( u -> { try { return toJSON(mapper, null, u); } catch (JsonProcessingException e) { String objStr = u.toString(); String errMsg = "cannot marshal to Json " + objStr.substring(0, Math.min(objStr.length(), 100)); logger.warn(errMsg, e); throw new IllegalArgumentException(errMsg); } }, Marshaller.stringToEntity(), MediaTypes.APPLICATION_JSON ); } public static <T> Marshaller<T, RequestEntity> marshaller( ObjectMapper mapper, FilterProvider filterProvider) { return Marshaller.wrapEntity( u -> { try { return toJSON(mapper, filterProvider, u); } catch (JsonProcessingException e) { String objStr = u.toString(); String errMsg = "cannot marshal to Json " + objStr.substring(0, Math.min(objStr.length(), 100)); logger.warn(errMsg, e); throw new IllegalArgumentException(errMsg); } }, Marshaller.stringToEntity(), MediaTypes.APPLICATION_JSON ); } public static <T> Unmarshaller<HttpEntity, T> unmarshaller(Class<T> expectedType) { return unmarshaller(defaultObjectMapper, expectedType); } public static <T> Unmarshaller<HttpEntity, T> unmarshaller(TypeReference<T> expectedType) { return unmarshaller(defaultObjectMapper, expectedType); } public static <T> Unmarshaller<HttpEntity, T> unmarshaller( ObjectMapper mapper, Class<T> expectedType) { return Unmarshaller.forMediaType(MediaTypes.APPLICATION_JSON, Unmarshaller.entityToString()) .thenApply(s -> { try { return fromJSON(mapper, s, expectedType); } catch (IOException e) { logger.warn("cannot unmarshal json", e); throw new IllegalArgumentException("cannot unmarshall Json as " + expectedType.getSimpleName()); } }); } public static <T> Unmarshaller<HttpEntity, T> unmarshaller( ObjectMapper mapper, TypeReference<T> expectedType) { return Unmarshaller.forMediaType(MediaTypes.APPLICATION_JSON, Unmarshaller.entityToString()) .thenApply(s -> { try { return fromJSON(mapper, s, expectedType); } catch (IOException e) { logger.warn("cannot unmarshal json", e); throw new IllegalArgumentException("cannot unmarshall Json as " + expectedType.getType() .getTypeName()); } }); } private static String toJSON( ObjectMapper mapper, FilterProvider filters, Object object) throws JsonProcessingException { if (filters == null) { filters = DEFAULT_FILTER_PROVIDER; } return mapper.writer(filters).writeValueAsString(object); } public static <T> T fromJSON( ObjectMapper mapper, String json, TypeReference<T> expectedType) throws IOException { return mapper.readerFor(expectedType).readValue(json); } public static <T> T fromJSON( ObjectMapper mapper, String json, Class<T> expectedType) throws IOException { return mapper.readerFor(expectedType).readValue(json); } public static <T> T fromJSON(String json, Class<T> expectedType) throws IOException { return defaultObjectMapper.readerFor(expectedType).readValue(json); } public static <T> T fromJSON(String json, TypeReference<T> expectedType) throws IOException { return defaultObjectMapper.readerFor(expectedType).readValue(json); } public static String toJson(Object object) throws IOException { return defaultObjectMapper.writeValueAsString(object); } }
4,415
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/MasterApiMetrics.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; public class MasterApiMetrics { private final Counter resp2xx; private final Counter resp4xx; private final Counter resp5xx; private final Counter askTimeOutCount; private static final MasterApiMetrics INSTANCE = new MasterApiMetrics(); private MasterApiMetrics() { Metrics m = new Metrics.Builder() .id("MasterApiMetrics") .addCounter("resp2xx") .addCounter("resp4xx") .addCounter("resp5xx") .addCounter("askTimeOutCount") .build(); Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m); this.askTimeOutCount = metrics.getCounter("askTimeOutCount"); this.resp2xx = metrics.getCounter("resp2xx"); this.resp4xx = metrics.getCounter("resp4xx"); this.resp5xx = metrics.getCounter("resp5xx"); } public static final MasterApiMetrics getInstance() { return INSTANCE; } public void incrementResp2xx() { resp2xx.increment(); } public void incrementResp4xx() { resp4xx.increment(); } public void incrementResp5xx() { resp5xx.increment(); } public void incrementAskTimeOutCount() { askTimeOutCount.increment(); } }
4,416
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/HttpRequestMetrics.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import io.mantisrx.shaded.com.google.common.base.Preconditions; import io.mantisrx.shaded.com.google.common.collect.Sets; import com.netflix.spectator.api.Registry; import com.netflix.spectator.api.Tag; import io.mantisrx.common.metrics.spectator.MetricId; import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory; import java.util.Set; public class HttpRequestMetrics { public enum HttpVerb { GET, POST, PUT, DELETE } public static class Endpoints { public static final String JOB_CLUSTERS = "api.v1.jobClusters"; public static final String JOB_CLUSTER_INSTANCE = "api.v1.jobClusters.instance"; public static final String JOB_CLUSTER_INSTANCE_LATEST_JOB_DISCOVERY_INFO = "api.v1.jobClusters.instance.latestJobDiscoveryInfo"; public static final String JOB_CLUSTER_INSTANCE_ACTION_UPDATE_ARTIFACT = "api.v1.jobClusters.instance.actions.updateArtifact"; public static final String JOB_CLUSTER_INSTANCE_ACTION_UPDATE_SLA = "api.v1.jobClusters.instance.actions.updateSla"; public static final String JOB_CLUSTER_INSTANCE_ACTION_UPDATE_MIGRATION_STRATEGY = "api.v1.jobClusters.instance.actions.updateMigrationStrategy"; public static final String JOB_CLUSTER_INSTANCE_ACTION_UPDATE_LABEL = "api.v1.jobClusters.instance.actions.updateLabel"; public static final String JOB_CLUSTER_INSTANCE_ACTION_ENABLE_CLUSTER = "api.v1.jobClusters.instance.actions.enableCluster"; public static final String JOB_CLUSTER_INSTANCE_ACTION_DISABLE_CLUSTER = "api.v1.jobClusters.instance.actions.disableCluster"; public static final String JOBS = "api.v1.jobs"; public static final String JOB_CLUSTER_INSTANCE_JOBS = "api.v1.jobClusters.instance.jobs"; public static final String JOB_INSTANCE = "api.v1.jobs.instance"; public static final String JOB_INSTANCE_ARCHIVED_WORKERS = "api.v1.jobs.instance.archivedWorkers"; public static final String JOB_CLUSTER_INSTANCE_JOB_INSTANCE = "api.v1.jobClusters.instance.jobs.instance"; public static final String JOB_CLUSTER_INSTANCE_JOB_INSTANCE_ARCHIVED = "api.v1.jobClusters.instance.jobs.instance.archived"; public static final String JOBS_ACTION_QUICKSUBMIT = "api.v1.jobs.actions.quickSubmit"; public static final String JOBS_ACTION_POST_JOB_STATUS = "api.v1.jobs.actions.postJobStatus"; public static final String JOB_INSTANCE_ACTION_SCALE_STAGE = "api.v1.jobs.instance.actions.scaleStage"; public static final String JOB_INSTANCE_ACTION_RESUBMIT_WORKER = "api.v1.jobs.instance.actions.resubmitWorker"; public static final String MASTER_INFO = "api.v1.masterInfo"; public static final String MASTER_CONFIGS = "api.v1.masterConfigs"; public static final String AGENT_CLUSTERS = "api.v1.agentClusters"; public static final String AGENT_CLUSTERS_JOBS = "api.v1.agentClusters.jobs"; public static final String AGENT_CLUSTERS_AUTO_SCALE_POLICY = "api.v1.agentClusters.autoScalePolicy"; public static final String JOB_STATUS_STREAM = "api.v1.jobStatusStream.instance"; public static final String JOB_DISCOVERY_STREAM = "api.v1.jobDiscoveryStream.instance"; public static final String LAST_SUBMITTED_JOB_ID_STREAM = "api.v1.lastSubmittedJobIdStream.instance"; private static String[] endpoints = new String[]{ JOB_CLUSTERS, JOB_CLUSTER_INSTANCE, JOB_CLUSTER_INSTANCE_LATEST_JOB_DISCOVERY_INFO, JOB_CLUSTER_INSTANCE_ACTION_UPDATE_ARTIFACT, JOB_CLUSTER_INSTANCE_ACTION_UPDATE_SLA, JOB_CLUSTER_INSTANCE_ACTION_UPDATE_MIGRATION_STRATEGY, JOB_CLUSTER_INSTANCE_ACTION_UPDATE_LABEL, JOB_CLUSTER_INSTANCE_ACTION_ENABLE_CLUSTER, JOB_CLUSTER_INSTANCE_ACTION_DISABLE_CLUSTER, JOBS, JOB_CLUSTER_INSTANCE_JOBS, JOB_INSTANCE, JOB_INSTANCE_ARCHIVED_WORKERS, JOB_CLUSTER_INSTANCE_JOB_INSTANCE, JOB_CLUSTER_INSTANCE_JOB_INSTANCE_ARCHIVED, JOBS_ACTION_QUICKSUBMIT, JOBS_ACTION_POST_JOB_STATUS, JOB_INSTANCE_ACTION_SCALE_STAGE, JOB_INSTANCE_ACTION_RESUBMIT_WORKER, MASTER_INFO, MASTER_CONFIGS, AGENT_CLUSTERS, AGENT_CLUSTERS_JOBS, AGENT_CLUSTERS_AUTO_SCALE_POLICY, JOB_STATUS_STREAM, JOB_DISCOVERY_STREAM, LAST_SUBMITTED_JOB_ID_STREAM }; private static Set<String> endpointSet = Sets.newHashSet(endpoints); } private final Registry registry; private static String METRIC_GROUP_ID = "apiv1"; private static HttpRequestMetrics instance; private HttpRequestMetrics() { this.registry = SpectatorRegistryFactory.getRegistry(); } public static HttpRequestMetrics getInstance() { if (instance == null) { instance = new HttpRequestMetrics(); } return instance; } public void incrementEndpointMetrics( String endpoint, final Tag... tags) { Preconditions.checkArgument( Endpoints.endpointSet.contains(endpoint), String.format("endpoint %s is not valid", endpoint)); MetricId id = new MetricId(METRIC_GROUP_ID, endpoint, tags); registry.counter(id.getSpectatorId(registry)).increment(); } }
4,417
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/AdminMasterRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.Route; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spectator.api.BasicTag; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; /*** * Master description route * Defines the following end points: * /api/v1/masterInfo (GET) * /api/v1/masterConfigs (GET) */ public class AdminMasterRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(AdminMasterRoute.class); private static final PathMatcher0 MASTER_API_PREFIX = segment("api").slash("v1"); private static final ObjectMapper mapper = new ObjectMapper().configure( DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); private final MasterDescription masterDesc; private final List<Configlet> configs = new ArrayList<>(); public static class Configlet { private final String name; private final String value; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public Configlet(@JsonProperty("name") String name, @JsonProperty("value") String value) { this.name = name; this.value = value; } public String getName() { return name; } public String getValue() { return value; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final Configlet configlet = (Configlet) o; return Objects.equals(name, configlet.name) && Objects.equals(value, configlet.value); } @Override public int hashCode() { return Objects.hash(name, value); } @Override public String toString() { return "Configlet{" + "name='" + name + '\'' + ", value='" + value + '\'' + '}'; } } static class WorkerResourceLimits { private final int maxCpuCores; private final int maxMemoryMB; private final int maxNetworkMbps; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public WorkerResourceLimits( @JsonProperty("maxCpuCores") final int maxCpuCores, @JsonProperty("maxMemoryMB") final int maxMemoryMB, @JsonProperty("maxNetworkMbps") final int maxNetworkMbps) { this.maxCpuCores = maxCpuCores; this.maxMemoryMB = maxMemoryMB; this.maxNetworkMbps = maxNetworkMbps; } public int getMaxCpuCores() { return maxCpuCores; } public int getMaxMemoryMB() { return maxMemoryMB; } public int getMaxNetworkMbps() { return maxNetworkMbps; } } public AdminMasterRoute(final MasterDescription masterDescription) { //TODO: hardcode some V1 admin master info, this should be cleaned up once v0 apis // are deprecated this.masterDesc = new MasterDescription(masterDescription.getHostname(), masterDescription.getHostIP(), masterDescription.getApiPort(), masterDescription.getSchedInfoPort(), -1, "api/v1/jobs/actions/postJobStatus", -1, masterDescription.getCreateTime()); try { configs.add(new Configlet( JobConstraints.class.getSimpleName(), mapper.writeValueAsString(JobConstraints.values()))); configs.add(new Configlet( StageScalingPolicy.ScalingReason.class.getSimpleName(), mapper.writeValueAsString(StageScalingPolicy.ScalingReason.values()))); configs.add(new Configlet( WorkerMigrationConfig.MigrationStrategyEnum.class.getSimpleName(), mapper.writeValueAsString(WorkerMigrationConfig.MigrationStrategyEnum.values()))); MasterConfiguration config = ConfigurationProvider.getConfig(); int maxCpuCores = config.getWorkerMachineDefinitionMaxCpuCores(); int maxMemoryMB = config.getWorkerMachineDefinitionMaxMemoryMB(); int maxNetworkMbps = config.getWorkerMachineDefinitionMaxNetworkMbps(); configs.add(new Configlet( WorkerResourceLimits.class.getSimpleName(), mapper.writeValueAsString(new WorkerResourceLimits( maxCpuCores, maxMemoryMB, maxNetworkMbps)))); } catch (JsonProcessingException e) { logger.error(e.getMessage(), e); } } public List<Configlet> getConfigs() { return configs; } @Override protected Route constructRoutes() { return pathPrefix( MASTER_API_PREFIX, () -> concat( // GET api/v1/masterInfo path(segment("masterInfo"), () -> pathEndOrSingleSlash(() -> concat( get(this::getMasterInfo)))), // GET api/v1/masterConfigs path(segment("masterConfigs"), () -> pathEndOrSingleSlash(() -> concat( get(this::getMasterConfigs)))) )); } @Override public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating /api/v1/masterInfo routes"); logger.info("creating /api/v1/masterConfigs routes"); return super.createRoute(routeFilter); } private Route getMasterInfo() { logger.info("GET /api/v1/masterInfo called"); HttpRequestMetrics.getInstance().incrementEndpointMetrics( HttpRequestMetrics.Endpoints.MASTER_INFO, new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()), new BasicTag("responseCode", "200")); return completeOK(masterDesc, Jackson.marshaller()); } private Route getMasterConfigs() { logger.info("GET /api/v1/masterConfigs called"); HttpRequestMetrics.getInstance().incrementEndpointMetrics( HttpRequestMetrics.Endpoints.MASTER_CONFIGS, new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()), new BasicTag("responseCode", "200")); return completeOK(configs, Jackson.marshaller()); } }
4,418
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobStatusStreamRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.NotUsed; import akka.http.javadsl.model.ws.Message; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.PathMatchers; import akka.http.javadsl.server.Route; import akka.stream.javadsl.Flow; import com.netflix.spectator.api.BasicTag; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; /*** * JobStatusStreamRoute * Defines the following end points: * /api/v1/jobStatusStream/{jobId} (websocket) */ public class JobStatusStreamRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(JobStatusStreamRoute.class); private final JobStatusRouteHandler jobStatusRouteHandler; private static final PathMatcher0 JOBSTATUS_API_PREFIX = segment("api").slash("v1"); public JobStatusStreamRoute(final JobStatusRouteHandler jobStatusRouteHandler) { this.jobStatusRouteHandler = jobStatusRouteHandler; } @Override protected Route constructRoutes() { return pathPrefix( JOBSTATUS_API_PREFIX, () -> concat( path(segment("jobStatusStream").slash(PathMatchers.segment()), (jobId) -> get(() -> getJobStatusStreamRoute(jobId)) ) ) ); } @Override public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating /api/v1/jobStatusStream routes"); return super.createRoute(routeFilter); } private Route getJobStatusStreamRoute(String jobId) { logger.info("/api/v1/jobStatusStream/{} called", jobId); HttpRequestMetrics.getInstance().incrementEndpointMetrics( HttpRequestMetrics.Endpoints.JOB_STATUS_STREAM); Flow<Message, Message, NotUsed> webSocketFlow = jobStatusRouteHandler.jobStatus(jobId); return handleWebSocketMessages(webSocketFlow); } }
4,419
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobsRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.actor.ActorSystem; import akka.http.caching.javadsl.Cache; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.Uri; import akka.http.javadsl.server.*; import akka.http.javadsl.unmarshalling.StringUnmarshallers; import akka.japi.Pair; import io.mantisrx.shaded.com.google.common.base.Strings; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.runtime.MantisJobDefinition; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.server.core.PostJobStatusRequest; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import io.mantisrx.server.master.domain.DataFormatAdapter; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.http.api.CompactJobInfo; import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createListJobsRequest; import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createWorkerStatusRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest.DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT; import static akka.http.javadsl.server.directives.CachingDirectives.cache; import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache; /*** * JobsRoute * Defines the following end points: * api/v1/jobs (GET, POST) * api/v1/jobClusters/{}/jobs (GET, POST) * api/v1/jobs/{} (GET, DELETE) * api/v1/jobClusters/{}/jobs/{} (GET) * api/v1/jobs/{}/archivedWorkers (GET) * api/v1/jobs/actions/quickSubmit (POST) * api/v1/jobs/actions/postJobStatus (POST) * api/v1/jobs/{}/actions/scaleStage (POST) * api/v1/jobs/{}/actions/resubmitWorker (POST) */ public class JobsRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(JobsRoute.class); private static final PathMatcher0 JOBS_API_PREFIX = segment("api").slash("v1").slash("jobs"); private static final PathMatcher1<String> CLUSTER_JOBS_API_PREFIX = segment("api").slash("v1") .slash("jobClusters") .slash(PathMatchers.segment()) .slash("jobs"); private final JobRouteHandler jobRouteHandler; private final JobClusterRouteHandler clusterRouteHandler; private final MasterConfiguration config; private final Cache<Uri, RouteResult> routeResultCache; public JobsRoute( final JobClusterRouteHandler clusterRouteHandler, final JobRouteHandler jobRouteHandler, final ActorSystem actorSystem) { this.jobRouteHandler = jobRouteHandler; this.clusterRouteHandler = clusterRouteHandler; this.config = ConfigurationProvider.getConfig(); this.routeResultCache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(), config.getApiCacheTtlMilliseconds()); } public Route constructRoutes() { return concat( pathPrefix(JOBS_API_PREFIX, () -> concat( // api/v1/jobs pathEndOrSingleSlash(() -> concat( // GET - list jobs get(this::getJobsRoute), // POST - submit a job post(this::postJobsRoute) )), // api/v1/jobs/{jobId} path( PathMatchers.segment(), (jobId) -> pathEndOrSingleSlash(() -> concat( // GET - retrieve job detail by job ID get(() -> getJobInstanceRoute(jobId)), // DELETE - permanently kill a job. delete(() -> deleteJobInstanceRoute(jobId)), // reject post post(() -> complete(StatusCodes.METHOD_NOT_ALLOWED)) )) ), path(PathMatchers.segment().slash("archivedWorkers"), (jobId) -> pathEndOrSingleSlash(() -> concat( get(()-> getArchivedWorkers(jobId)) )) ), // api/v1/jobs/actions/quickSubmit path( PathMatchers.segment("actions").slash("quickSubmit"), () -> pathEndOrSingleSlash( () -> // POST - quick submit a job post(this::postJobInstanceQuickSubmitRoute) ) ), // api/v1/jobs/actions/postJobStatus path( PathMatchers.segment("actions").slash("postJobStatus"), () -> pathEndOrSingleSlash( () -> // POST Job Status post(this::postJobStatusRoute) ) ), // api/v1/jobs/{jobId}/actions/scaleStage path( PathMatchers.segment().slash("actions").slash("scaleStage"), (jobId) -> pathEndOrSingleSlash( // POST - scale stage () -> post(() -> postJobInstanceScaleStageRoute(jobId)) ) ), // api/v1/jobs/{jobId}/actions/resubmitWorker path( PathMatchers.segment().slash("actions").slash("resubmitWorker"), (jobId) -> pathEndOrSingleSlash( () -> // POST - resubmit worker post(() -> postJobInstanceResubmitWorkerRoute(jobId)) ) )) ), pathPrefix(CLUSTER_JOBS_API_PREFIX, (cluster) -> concat( // api/v1/jobClusters/{clusterName}/jobs pathEndOrSingleSlash(() -> concat( // GET - list jobs get(() -> getJobsRoute(Optional.of(cluster))), // POST - submit a job post(() -> postJobsRoute(Optional.of(cluster)))) ), // api/v1/jobClusters/{clusterName}/jobs/{jobId} path( PathMatchers.segment(), (jobId) -> pathEndOrSingleSlash(() -> concat( // GET - retrieve job detail by cluster & job ID get(() -> getJobInstanceRoute(Optional.of(cluster), jobId)), // reject post post(() -> complete(StatusCodes.METHOD_NOT_ALLOWED)) ))) ) ) ); } @Override public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating /api/v1/jobs routes"); return super.createRoute(routeFilter); } private Route getJobsRoute() { return getJobsRoute(Optional.empty()); } private Route getJobsRoute(Optional<String> clusterName) { return parameterOptional(StringUnmarshallers.INTEGER, ParamName.PAGINATION_LIMIT, (pageSize) -> parameterOptional(StringUnmarshallers.INTEGER, ParamName.PAGINATION_OFFSET, (offset) -> parameterOptional(StringUnmarshallers.BOOLEAN, ParamName.SORT_ASCENDING, (ascending) -> parameterOptional(StringUnmarshallers.STRING, ParamName.SORT_BY, (sortField) -> parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) -> parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_TARGET, (target) -> parameterOptional(StringUnmarshallers.BOOLEAN, ParamName.JOB_COMPACT, (isCompact) -> parameterOptional(StringUnmarshallers.STRING, ParamName.JOB_FILTER_MATCH, (matching) -> parameterMultiMap(params -> alwaysCache(routeResultCache, getRequestUriKeyer , () -> extractUri(uri -> { String endpoint; if (clusterName.isPresent()) { logger.debug("GET /api/v1/jobClusters/{}/jobs called", clusterName); endpoint = HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_JOBS; } else { logger.debug("GET /api/v1/jobs called"); endpoint = HttpRequestMetrics.Endpoints.JOBS; } JobClusterManagerProto.ListJobsRequest listJobsRequest = createListJobsRequest( params, clusterName.map(s -> Optional.of("^" + s + "$")).orElse(matching), true); return completeAsync( jobRouteHandler.listJobs(listJobsRequest), resp -> completeOK( (isCompact.isPresent() && isCompact.get()) ? resp.getJobList( JobClusterProtoAdapter::toCompactJobInfo, CompactJobInfo.class, pageSize.orElse(null), offset.orElse(null), sortField.orElse(null), ascending.orElse(null), uri) : resp.getJobList(pageSize.orElse(null), offset.orElse(null), sortField.orElse(null), ascending.orElse(null), uri), Jackson.marshaller( super.parseFilter(fields.orElse(null), target.orElse(null))) ), endpoint, HttpRequestMetrics.HttpVerb.GET ); }))))))))))); } private Route postJobsRoute() { return postJobsRoute(Optional.empty()); } private Route postJobsRoute(Optional<String> clusterName) { return decodeRequest(() -> entity( Jackson.unmarshaller(MantisJobDefinition.class), submitJobRequest -> { String endpoint; if (clusterName.isPresent()) { logger.info( "POST /api/v1/jobClusters/{}/jobs called {}", clusterName); endpoint = HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_JOBS; } else { logger.info( "POST /api/v1/jobs called {}", submitJobRequest); endpoint = HttpRequestMetrics.Endpoints.JOBS; } CompletionStage<JobClusterManagerProto.SubmitJobResponse> response = null; try { // validate request submitJobRequest.validate(true); Pair<Boolean, String> validationResult = validateSubmitJobRequest( submitJobRequest, clusterName); if (!validationResult.first()) { CompletableFuture<JobClusterManagerProto.SubmitJobResponse> resp = new CompletableFuture<>(); resp.complete( new JobClusterManagerProto.SubmitJobResponse( -1, BaseResponse.ResponseCode.CLIENT_ERROR, validationResult.second(), Optional.empty())); response = resp; } else { response = clusterRouteHandler.submit( JobClusterProtoAdapter.toSubmitJobClusterRequest( submitJobRequest)); } } catch (Exception e) { logger.warn("exception in submit job request {}", submitJobRequest, e); CompletableFuture<JobClusterManagerProto.SubmitJobResponse> resp = new CompletableFuture<>(); resp.complete( new JobClusterManagerProto.SubmitJobResponse( -1, BaseResponse.ResponseCode.SERVER_ERROR, e.getMessage(), Optional.empty())); response = resp; } CompletionStage<JobClusterManagerProto.GetJobDetailsResponse> r = response.thenCompose( t -> { if (t.responseCode.getValue() >= 200 && t.responseCode.getValue() < 300) { final JobClusterManagerProto.GetJobDetailsRequest request = new JobClusterManagerProto.GetJobDetailsRequest( submitJobRequest.getUser(), t.getJobId().get()); return jobRouteHandler.getJobDetails(request); } else { CompletableFuture<JobClusterManagerProto.GetJobDetailsResponse> responseCompletableFuture = new CompletableFuture<>(); responseCompletableFuture.complete( new JobClusterManagerProto.GetJobDetailsResponse( t.requestId, t.responseCode, t.message, Optional.empty())); return responseCompletableFuture; } }); return completeAsync( r, resp -> complete( StatusCodes.CREATED, resp.getJobMetadata().map(metaData -> new MantisJobMetadataView(metaData, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false)), Jackson.marshaller()), endpoint, HttpRequestMetrics.HttpVerb.POST); }) ); } private Route getJobInstanceRoute(String jobId) { return getJobInstanceRoute(Optional.empty(), jobId); } private Route getJobInstanceRoute(Optional<String> clusterName, String jobId) { String endpoint; if (clusterName.isPresent()) { logger.info("GET /api/v1/jobClusters/{}/jobs/{} called", clusterName.get(), jobId); endpoint = HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_JOBS; } else { logger.info("GET /api/v1/jobs/{} called", jobId); endpoint = HttpRequestMetrics.Endpoints.JOBS; } return parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) -> parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_TARGET, (target) -> completeAsync( jobRouteHandler.getJobDetails( new JobClusterManagerProto.GetJobDetailsRequest("masterAPI", jobId)) .thenCompose(r -> { CompletableFuture<JobClusterManagerProto.GetJobDetailsResponse> resp = new CompletableFuture<>(); if (r.responseCode.getValue() >= 200 && r.responseCode.getValue() < 300 && clusterName.isPresent() && r.getJobMetadata().isPresent()) { if (!clusterName.get().equals( r.getJobMetadata().get().getClusterName())) { String msg = String.format( "JobId [%s] exists but does not belong to specified cluster [%s]", jobId, clusterName.get()); resp.complete( new JobClusterManagerProto.GetJobDetailsResponse( r.requestId, BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND, msg, Optional.empty())); } else { resp.complete(r); } } else { resp.complete(r); } return resp; }), resp -> complete( StatusCodes.OK, resp.getJobMetadata().map(metaData -> new MantisJobMetadataView(metaData, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false)), Jackson.marshaller(super.parseFilter(fields.orElse(null), target.orElse(null)))), endpoint, HttpRequestMetrics.HttpVerb.GET) )); } private Route getArchivedWorkers(String jobId) { logger.info("GET /api/v1/jobs/{}/archivedWorkers called", jobId); Optional<JobId> parsedJobId = JobId.fromId(jobId); if (!parsedJobId.isPresent()){ return complete(StatusCodes.BAD_REQUEST, super.generateFailureResponsePayload("Invalid jobId in URI", -1)); } else { return parameterOptional(StringUnmarshallers.INTEGER, ParamName.PAGINATION_LIMIT, (pageSize) -> parameterOptional(StringUnmarshallers.INTEGER, ParamName.PAGINATION_OFFSET, (offset) -> parameterOptional(StringUnmarshallers.BOOLEAN, ParamName.SORT_ASCENDING, (ascending) -> parameterOptional(StringUnmarshallers.STRING, ParamName.SORT_BY, (sortField) -> parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) -> parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_TARGET, (target) -> parameterOptional(StringUnmarshallers.INTEGER, ParamName.SERVER_FILTER_LIMIT, (limit) -> parameterMultiMap(params -> extractUri(uri -> { JobClusterManagerProto.ListArchivedWorkersRequest req = new JobClusterManagerProto.ListArchivedWorkersRequest( parsedJobId.get(), limit.orElse(DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT)); return completeAsync( jobRouteHandler.listArchivedWorkers(req), resp -> completeOK( resp.getWorkerMetadata(DataFormatAdapter::convertMantisWorkerMetadataToMantisWorkerMetadataWritable, MantisWorkerMetadataWritable.class, pageSize.orElse(null), offset.orElse(null), sortField.orElse(null), ascending.orElse(null), uri), Jackson.marshaller(super.parseFilter(fields.orElse(null), target.orElse(null)))), HttpRequestMetrics.Endpoints.JOB_INSTANCE_ARCHIVED_WORKERS, HttpRequestMetrics.HttpVerb.GET); }))))))))); } } private Route deleteJobInstanceRoute(String jobId) { logger.info("DELETE /api/v1/jobs/{} called", jobId); return parameterOptional(StringUnmarshallers.STRING, ParamName.USER, (user) -> parameterOptional(StringUnmarshallers.STRING, ParamName.REASON, (reason) -> { String userStr = user.orElse(null); String reasonStr = reason.orElse(null); if (Strings.isNullOrEmpty(userStr)) { return complete(StatusCodes.BAD_REQUEST, "Missing required parameter 'user'"); } else if (Strings.isNullOrEmpty(reasonStr)) { return complete(StatusCodes.BAD_REQUEST, "Missing required parameter 'reason'"); } else { return completeAsync( jobRouteHandler.kill(new JobClusterManagerProto.KillJobRequest( jobId, reasonStr, userStr)), resp -> complete( StatusCodes.ACCEPTED, ""), HttpRequestMetrics.Endpoints.JOB_INSTANCE, HttpRequestMetrics.HttpVerb.DELETE); } } ) ); } private Route postJobInstanceQuickSubmitRoute() { return entity( Jackson.unmarshaller(JobClusterManagerProto.SubmitJobRequest.class), request -> { logger.info("POST /api/v1/jobs/actions/quickSubmit called"); final CompletionStage<JobClusterManagerProto.GetJobDetailsResponse> response = clusterRouteHandler.submit(request) .thenCompose(t -> { if (t.responseCode.getValue() >= 200 && t.responseCode.getValue() < 300) { return jobRouteHandler.getJobDetails(new JobClusterManagerProto.GetJobDetailsRequest( request.getSubmitter(), t.getJobId().get())); } else { CompletableFuture<JobClusterManagerProto.GetJobDetailsResponse> responseCompletableFuture = new CompletableFuture<>(); responseCompletableFuture.complete( new JobClusterManagerProto.GetJobDetailsResponse( t.requestId, t.responseCode, t.message, Optional.empty())); return responseCompletableFuture; } }); return completeAsync( response, resp -> complete( StatusCodes.CREATED, resp.getJobMetadata().map(metaData -> new MantisJobMetadataView(metaData, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false)), Jackson.marshaller() ), HttpRequestMetrics.Endpoints.JOBS_ACTION_QUICKSUBMIT, HttpRequestMetrics.HttpVerb.POST ); }); } private Route postJobStatusRoute() { return entity( Jackson.unmarshaller(PostJobStatusRequest.class), request -> { logger.info("POST /api/v1/jobs/actions/postJobStatus called"); return completeAsync( jobRouteHandler.workerStatus(createWorkerStatusRequest(request)), resp -> complete( StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOBS_ACTION_POST_JOB_STATUS, HttpRequestMetrics.HttpVerb.POST ); }); } private Route postJobInstanceScaleStageRoute(String jobId) { return entity( Jackson.unmarshaller(JobClusterManagerProto.ScaleStageRequest.class), request -> { logger.info("POST /api/v1/jobs/{}/actions/scaleStage called", jobId); CompletionStage<JobClusterManagerProto.ScaleStageResponse> response = null; int numWorkers = request.getNumWorkers(); int maxWorkersPerStage = ConfigurationProvider.getConfig().getMaxWorkersPerStage(); if (numWorkers > maxWorkersPerStage) { CompletableFuture<JobClusterManagerProto.ScaleStageResponse> responseCompletableFuture = new CompletableFuture<>(); responseCompletableFuture.complete( new JobClusterManagerProto.ScaleStageResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, "num workers must be less than " + maxWorkersPerStage, -1)); response = responseCompletableFuture; } else if (jobId.equals(request.getJobId().getId())) { response = jobRouteHandler.scaleStage(request); } else { CompletableFuture<JobClusterManagerProto.ScaleStageResponse> responseCompletableFuture = new CompletableFuture<>(); responseCompletableFuture.complete( new JobClusterManagerProto.ScaleStageResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, String.format("JobId specified in request payload [%s] does not match with resource uri [%s]", request.getJobId().getId(), jobId), -1)); response = responseCompletableFuture; } return completeAsync( response, resp -> complete( StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOB_INSTANCE_ACTION_SCALE_STAGE, HttpRequestMetrics.HttpVerb.POST ); }); } private Route postJobInstanceResubmitWorkerRoute(String jobId) { return entity( Jackson.unmarshaller(JobClusterManagerProto.V1ResubmitWorkerRequest.class), request -> { logger.info("POST /api/v1/jobs/{}/actions/resubmitWorker called", jobId); CompletionStage<JobClusterManagerProto.ResubmitWorkerResponse> response; response = jobRouteHandler.resubmitWorker( new JobClusterManagerProto.ResubmitWorkerRequest(jobId, request.getWorkerNum(), request.getUser(), request.getReason())); return completeAsync( response, resp -> complete( StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOB_INSTANCE_ACTION_RESUBMIT_WORKER, HttpRequestMetrics.HttpVerb.POST ); }); } /** * @return true to indicate valid, false otherwise. The String holds the error message when the request is invalid */ private Pair<Boolean, String> validateSubmitJobRequest( MantisJobDefinition mjd, Optional<String> clusterNameInResource) { if (null == mjd) { logger.error("rejecting job submit request, job definition is malformed {}", mjd); return Pair.apply(false, "Malformed job definition."); } // must include job cluster name if (mjd.getName() == null || mjd.getName().length() == 0) { logger.info("rejecting job submit request, must include name {}", mjd); return Pair.apply(false, "Job definition must include name"); } // validate specified job cluster name matches with what specified in REST resource endpoint if (clusterNameInResource.isPresent()) { if (!clusterNameInResource.get().equals(mjd.getName())) { String msg = String.format("Cluster name specified in request payload [%s] " + "does not match with what specified in resource endpoint [%s]", mjd.getName(), clusterNameInResource.get()); logger.info("rejecting job submit request, {} {}", msg, mjd); return Pair.apply(false, msg); } } // validate scheduling info SchedulingInfo schedulingInfo = mjd.getSchedulingInfo(); if (schedulingInfo != null) { Map<Integer, StageSchedulingInfo> stages = schedulingInfo.getStages(); if (stages != null) { for (StageSchedulingInfo stageSchedInfo : stages.values()) { double cpuCores = stageSchedInfo.getMachineDefinition().getCpuCores(); int maxCpuCores = ConfigurationProvider.getConfig() .getWorkerMachineDefinitionMaxCpuCores(); if (cpuCores > maxCpuCores) { logger.info( "rejecting job submit request, requested CPU {} > max for {} (user: {}) (stage: {})", cpuCores, mjd.getName(), mjd.getUser(), stages); return Pair.apply( false, "requested CPU cannot be more than max CPU per worker " + maxCpuCores); } double memoryMB = stageSchedInfo.getMachineDefinition().getMemoryMB(); int maxMemoryMB = ConfigurationProvider.getConfig() .getWorkerMachineDefinitionMaxMemoryMB(); if (memoryMB > maxMemoryMB) { logger.info( "rejecting job submit request, requested memory {} > max for {} (user: {}) (stage: {})", memoryMB, mjd.getName(), mjd.getUser(), stages); return Pair.apply( false, "requested memory cannot be more than max memoryMB per worker " + maxMemoryMB); } double networkMbps = stageSchedInfo.getMachineDefinition().getNetworkMbps(); int maxNetworkMbps = ConfigurationProvider.getConfig() .getWorkerMachineDefinitionMaxNetworkMbps(); if (networkMbps > maxNetworkMbps) { logger.info( "rejecting job submit request, requested network {} > max for {} (user: {}) (stage: {})", networkMbps, mjd.getName(), mjd.getUser(), stages); return Pair.apply( false, "requested network cannot be more than max networkMbps per worker " + maxNetworkMbps); } int numberOfInstances = stageSchedInfo.getNumberOfInstances(); int maxWorkersPerStage = ConfigurationProvider.getConfig() .getMaxWorkersPerStage(); if (numberOfInstances > maxWorkersPerStage) { logger.info( "rejecting job submit request, requested num instances {} > max for {} (user: {}) (stage: {})", numberOfInstances, mjd.getName(), mjd.getUser(), stages); return Pair.apply( false, "requested number of instances per stage cannot be more than " + maxWorkersPerStage); } StageScalingPolicy scalingPolicy = stageSchedInfo.getScalingPolicy(); if (scalingPolicy != null) { if (scalingPolicy.getMax() > maxWorkersPerStage) { logger.info( "rejecting job submit request, requested num instances in scaling policy {} > max for {} (user: {}) (stage: {})", numberOfInstances, mjd.getName(), mjd.getUser(), stages); return Pair.apply( false, "requested number of instances per stage in scaling policy cannot be more than " + maxWorkersPerStage); } } } } } return Pair.apply(true, ""); } }
4,420
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobDiscoveryStreamRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.NotUsed; import akka.http.javadsl.marshalling.sse.EventStreamMarshalling; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.sse.ServerSentEvent; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.PathMatchers; import akka.http.javadsl.server.Route; import akka.http.javadsl.unmarshalling.StringUnmarshallers; import akka.stream.javadsl.Source; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto; import io.mantisrx.master.api.akka.route.utils.StreamingUtils; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.master.domain.JobId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.RxReactiveStreams; import java.util.Objects; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; /*** * JobDiscoveryStreamRoute - returns scheduling info stream for a given job. * Defines the following end points: * /api/v1/jobDiscoveryStream/{jobId} (GET) */public class JobDiscoveryStreamRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(JobDiscoveryStreamRoute.class); private final JobDiscoveryRouteHandler jobDiscoveryRouteHandler; private static final PathMatcher0 JOBDISCOVERY_API_PREFIX = segment("api").slash("v1"); public JobDiscoveryStreamRoute(final JobDiscoveryRouteHandler jobDiscoveryRouteHandler) { this.jobDiscoveryRouteHandler = jobDiscoveryRouteHandler; } @Override protected Route constructRoutes() { return pathPrefix( JOBDISCOVERY_API_PREFIX, () -> concat( path( segment("jobDiscoveryStream").slash(PathMatchers.segment()), (jobId) -> pathEndOrSingleSlash( () -> get(() -> getJobDiscoveryStreamRoute( jobId))) ) ) ); } @Override public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating /api/v1/jobDiscoveryStream routes"); return super.createRoute(routeFilter); } private Route getJobDiscoveryStreamRoute(String jobId) { return parameterOptional( StringUnmarshallers.BOOLEAN, ParamName.SEND_HEARTBEAT, (sendHeartbeats) -> { logger.info("GET /api/v1/jobStatusStream/{} called", jobId); CompletionStage<JobDiscoveryRouteProto.SchedInfoResponse> schedulingInfoRespCS = jobDiscoveryRouteHandler.schedulingInfoStream( new JobClusterManagerProto .GetJobSchedInfoRequest(JobId.fromId(jobId).get()), sendHeartbeats.orElse(false)); return completeAsync( schedulingInfoRespCS, resp -> { Optional<Observable<JobSchedulingInfo>> siStream = resp.getSchedInfoStream(); if (siStream.isPresent()) { Observable<JobSchedulingInfo> schedulingInfoObs = siStream.get(); Source<ServerSentEvent, NotUsed> schedInfoSource = Source.fromPublisher(RxReactiveStreams.toPublisher( schedulingInfoObs)) .map(j -> StreamingUtils.from(j).orElse(null)) .filter(Objects::nonNull); return completeOK( schedInfoSource, EventStreamMarshalling.toEventStream()); } else { logger.warn( "Failed to get sched info stream for job {}", jobId); return complete( StatusCodes.INTERNAL_SERVER_ERROR, "Failed to get sched info stream for job " + jobId); } }, HttpRequestMetrics.Endpoints.JOB_STATUS_STREAM, HttpRequestMetrics.HttpVerb.GET ); }); } }
4,421
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/BaseRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Arrays; import java.util.Set; import java.util.StringTokenizer; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; import java.util.function.Function; import akka.actor.ActorSystem; import akka.http.caching.LfuCache; import akka.http.caching.javadsl.Cache; import akka.http.caching.javadsl.CachingSettings; import akka.http.caching.javadsl.LfuCacheSettings; import akka.http.javadsl.model.ContentTypes; import akka.http.javadsl.model.HttpEntities; import akka.http.javadsl.model.HttpHeader; import akka.http.javadsl.model.HttpMethods; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.Uri; import akka.http.javadsl.server.AllDirectives; import akka.http.javadsl.server.ExceptionHandler; import akka.http.javadsl.server.RequestContext; import akka.http.javadsl.server.Route; import akka.http.javadsl.server.RouteResult; import akka.http.javadsl.server.directives.RouteAdapter; import akka.japi.JavaPartialFunction; import akka.japi.pf.PFBuilder; import akka.pattern.AskTimeoutException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.node.JsonNodeFactory; import io.mantisrx.shaded.com.fasterxml.jackson.databind.node.ObjectNode; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.FilterProvider; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; import io.mantisrx.shaded.com.google.common.base.Strings; import io.mantisrx.shaded.com.google.common.collect.Sets; import com.netflix.spectator.api.BasicTag; import io.mantisrx.master.api.akka.route.MasterApiMetrics; import io.mantisrx.master.jobcluster.proto.BaseResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.duration.Duration; abstract class BaseRoute extends AllDirectives { private static final Logger logger = LoggerFactory.getLogger(BaseRoute.class); public static final String TOPLEVEL_FILTER = "topLevelFilter"; public static final String JOBMETADATA_FILTER = "jobMetadata"; public static final String STAGEMETADATA_FILTER = "stageMetadataList"; public static final String WORKERMETADATA_FILTER = "workerMetadataList"; private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER = HttpHeader.parse("Access-Control-Allow-Origin", "*"); private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList(ACCESS_CONTROL_ALLOW_ORIGIN_HEADER); protected final JavaPartialFunction<RequestContext, Uri> getRequestUriKeyer = new JavaPartialFunction<RequestContext, Uri>() { public Uri apply(RequestContext in, boolean isCheck) { final HttpRequest request = in.getRequest(); final boolean isGet = request.method() == HttpMethods.GET; if (isGet) { return request.getUri(); } else { throw noMatch(); } } }; private String hostName; BaseRoute() { try { this.hostName = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException ex) { this.hostName = "unknown"; } } protected Cache<Uri, RouteResult> createCache(ActorSystem actorSystem, int initialCapacity, int maxCapacity, int ttlMillis) { final CachingSettings defaultCachingSettings = CachingSettings.create(actorSystem); final LfuCacheSettings lfuCacheSettings = defaultCachingSettings.lfuCacheSettings() .withInitialCapacity(initialCapacity) .withMaxCapacity(maxCapacity) .withTimeToLive(Duration.create(ttlMillis, TimeUnit.MILLISECONDS)); final CachingSettings cachingSettings = defaultCachingSettings.withLfuCacheSettings(lfuCacheSettings); return LfuCache.create(cachingSettings); } protected abstract Route constructRoutes(); protected Route createRoute(Function<Route, Route> routeFilter) { final ExceptionHandler jsonExceptionHandler = ExceptionHandler .newBuilder() .match( Exception.class, x -> { logger.error("got exception", x); return complete( StatusCodes.INTERNAL_SERVER_ERROR, generateFailureResponsePayload( "caught exception: " + x.toString(), -1) ); }) .build(); return respondWithHeaders( DEFAULT_RESPONSE_HEADERS, () -> handleExceptions( jsonExceptionHandler, () -> routeFilter.apply(this.constructRoutes()))); } HttpResponse toDefaultHttpResponse(final BaseResponse r) { switch (r.responseCode) { case SUCCESS: return HttpResponse.create() .withEntity(ContentTypes.APPLICATION_JSON, r.message) .withStatus(StatusCodes.OK); case SUCCESS_CREATED: return HttpResponse.create() .withEntity(ContentTypes.APPLICATION_JSON, r.message) .withStatus(StatusCodes.CREATED); case CLIENT_ERROR: return HttpResponse.create() .withEntity( ContentTypes.APPLICATION_JSON, generateFailureResponsePayload(r.message, r.requestId)) .withStatus(StatusCodes.BAD_REQUEST); case CLIENT_ERROR_NOT_FOUND: return HttpResponse.create() .withEntity( ContentTypes.APPLICATION_JSON, generateFailureResponsePayload(r.message, r.requestId)) .withStatus(StatusCodes.NOT_FOUND); case CLIENT_ERROR_CONFLICT: return HttpResponse.create() .withEntity( ContentTypes.APPLICATION_JSON, generateFailureResponsePayload(r.message, r.requestId)) .withStatus(StatusCodes.CONFLICT); case OPERATION_NOT_ALLOWED: return HttpResponse.create() .withEntity( ContentTypes.APPLICATION_JSON, generateFailureResponsePayload(r.message, r.requestId)) .withStatus(StatusCodes.METHOD_NOT_ALLOWED); case SERVER_ERROR: default: return HttpResponse.create() .withEntity( ContentTypes.APPLICATION_JSON, generateFailureResponsePayload(r.message, r.requestId)) .withStatus(StatusCodes.INTERNAL_SERVER_ERROR); } } <T extends BaseResponse> RouteAdapter completeAsync( final CompletionStage<T> stage, final Function<T, RouteAdapter> successTransform, String endpointName, HttpRequestMetrics.HttpVerb verb) { return completeAsync( stage, successTransform, r -> { HttpResponse response = toDefaultHttpResponse(r); return complete( response.status(), HttpEntities.create( ContentTypes.APPLICATION_JSON, generateFailureResponsePayload( r.message, r.requestId)) ); }, endpointName, verb); } <T extends BaseResponse> RouteAdapter completeAsync( final CompletionStage<T> stage, final Function<T, RouteAdapter> successTransform, final Function<T, RouteAdapter> clientFailureTransform, String endpointName, HttpRequestMetrics.HttpVerb verb) { return onComplete( stage, resp -> resp .map(r -> { HttpRequestMetrics.getInstance() .incrementEndpointMetrics( endpointName, new BasicTag("verb", verb.toString()), new BasicTag( "responseCode", String.valueOf(r.responseCode.getValue()))); switch (r.responseCode) { case SUCCESS: case SUCCESS_CREATED: MasterApiMetrics.getInstance().incrementResp2xx(); return successTransform.apply(r); case CLIENT_ERROR: case CLIENT_ERROR_CONFLICT: case CLIENT_ERROR_NOT_FOUND: case OPERATION_NOT_ALLOWED: MasterApiMetrics.getInstance().incrementResp4xx(); return clientFailureTransform.apply(r); case SERVER_ERROR: default: MasterApiMetrics.getInstance().incrementResp5xx(); return complete(StatusCodes.INTERNAL_SERVER_ERROR, r.message); } }) .recover( new PFBuilder<Throwable, Route>() .match(AskTimeoutException.class, te -> { MasterApiMetrics.getInstance() .incrementAskTimeOutCount(); MasterApiMetrics.getInstance().incrementResp5xx(); return complete( StatusCodes.INTERNAL_SERVER_ERROR, generateFailureResponsePayload( te.toString(), -1)); }) .matchAny(ex -> { MasterApiMetrics.getInstance().incrementResp5xx(); return complete( StatusCodes.INTERNAL_SERVER_ERROR, generateFailureResponsePayload( ex.toString(), -1)); }) .build()).get()); } protected String generateFailureResponsePayload(String errorMsg, long requestId) { ObjectNode node = JsonNodeFactory.instance.objectNode(); node.put("time", System.currentTimeMillis()); node.put("host", this.hostName); node.put("error", errorMsg); node.put("requestId", requestId); return node.toString(); } FilterProvider parseFilter(String fields, String target) { if (Strings.isNullOrEmpty(fields)) { return null; } if (Strings.isNullOrEmpty(target)) { target = TOPLEVEL_FILTER; } Set<String> filtersSet = Sets.newHashSet(); StringTokenizer st = new StringTokenizer(fields, ","); while (st.hasMoreTokens()) { filtersSet.add(st.nextToken().trim()); } return new SimpleFilterProvider() .addFilter(TOPLEVEL_FILTER, TOPLEVEL_FILTER.equalsIgnoreCase(target) ? SimpleBeanPropertyFilter.filterOutAllExcept(filtersSet) : SimpleBeanPropertyFilter.filterOutAllExcept(target)) .addFilter(JOBMETADATA_FILTER, JOBMETADATA_FILTER.equalsIgnoreCase(target) ? SimpleBeanPropertyFilter.filterOutAllExcept(filtersSet) : SimpleBeanPropertyFilter.serializeAll()) .addFilter(STAGEMETADATA_FILTER, STAGEMETADATA_FILTER.equalsIgnoreCase(target) ? SimpleBeanPropertyFilter.filterOutAllExcept(filtersSet) : SimpleBeanPropertyFilter.serializeAll()) .addFilter(WORKERMETADATA_FILTER, WORKERMETADATA_FILTER.equalsIgnoreCase(target) ? SimpleBeanPropertyFilter.filterOutAllExcept(filtersSet) : SimpleBeanPropertyFilter.serializeAll()); } Integer parseInteger(String val) { if (Strings.isNullOrEmpty(val)) { return null; } else { return Integer.valueOf(val); } } Boolean parseBoolean(String val) { if (Strings.isNullOrEmpty(val)) { return null; } else { return Boolean.valueOf(val); } } }
4,422
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/LastSubmittedJobIdStreamRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.NotUsed; import akka.http.javadsl.marshalling.sse.EventStreamMarshalling; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.sse.ServerSentEvent; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.PathMatchers; import akka.http.javadsl.server.Route; import akka.http.javadsl.unmarshalling.StringUnmarshallers; import akka.stream.javadsl.Source; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.proto.JobClusterInfo; import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto; import io.mantisrx.master.api.akka.route.utils.StreamingUtils; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.RxReactiveStreams; import java.util.Objects; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; /*** * LastSubmittedJobIdStreamRoute * Defines the following end points: * /api/v1/lastSubmittedJobIdStream/{clusterName} (GET) */ public class LastSubmittedJobIdStreamRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(LastSubmittedJobIdStreamRoute.class); private final JobDiscoveryRouteHandler jobDiscoveryRouteHandler; private static final PathMatcher0 JOBDISCOVERY_API_PREFIX = segment("api").slash("v1"); public LastSubmittedJobIdStreamRoute(final JobDiscoveryRouteHandler jobDiscoveryRouteHandler) { this.jobDiscoveryRouteHandler = jobDiscoveryRouteHandler; } @Override protected Route constructRoutes() { return pathPrefix( JOBDISCOVERY_API_PREFIX, () -> concat( path( segment("lastSubmittedJobIdStream").slash(PathMatchers.segment()), (clusterName) -> pathEndOrSingleSlash( () -> get(() -> getLastSubmittedJobIdStreamRoute(clusterName))) ) ) ); } @Override public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating /api/v1/jobDiscoveryStream routes"); return super.createRoute(routeFilter); } private Route getLastSubmittedJobIdStreamRoute(String clusterName) { return parameterOptional(StringUnmarshallers.BOOLEAN, ParamName.SEND_HEARTBEAT, (sendHeartbeats) -> { logger.info("GET /api/v1/lastSubmittedJobIdStream/{} called", clusterName); CompletionStage<JobDiscoveryRouteProto.JobClusterInfoResponse> jobClusterInfoRespCS = jobDiscoveryRouteHandler.lastSubmittedJobIdStream( new JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest( clusterName), sendHeartbeats.orElse(false)); return completeAsync( jobClusterInfoRespCS, resp -> { Optional<Observable<JobClusterInfo>> jobClusterInfoO = resp.getJobClusterInfoObs(); if (jobClusterInfoO.isPresent()) { Observable<JobClusterInfo> jciStream = jobClusterInfoO.get(); Source<ServerSentEvent, NotUsed> source = Source .fromPublisher(RxReactiveStreams.toPublisher(jciStream)) .map(j -> StreamingUtils.from(j).orElse(null)) .filter(Objects::nonNull); return completeOK( source, EventStreamMarshalling.toEventStream()); } else { logger.warn( "Failed to get last submitted jobId stream for {}", clusterName); return complete( StatusCodes.INTERNAL_SERVER_ERROR, "Failed to get last submitted jobId stream for " + clusterName); } }, HttpRequestMetrics.Endpoints.LAST_SUBMITTED_JOB_ID_STREAM, HttpRequestMetrics.HttpVerb.GET); }); } }
4,423
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/JobClustersRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.actor.ActorSystem; import akka.http.caching.javadsl.Cache; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.Uri; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.PathMatchers; import akka.http.javadsl.server.Route; import akka.http.javadsl.server.RouteResult; import akka.http.javadsl.unmarshalling.StringUnmarshallers; import io.mantisrx.shaded.com.google.common.base.Strings; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.runtime.NamedJobDefinition; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; import static akka.http.javadsl.server.directives.CachingDirectives.cache; import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.*; /*** * JobClustersRoute * Defines the following end points: * api/v1/jobsClusters (GET, POST) * api/v1/jobClusters/{}/latestJobDiscoveryInfo (GET) * api/v1/jobClusters/{} (GET, POST, PUT, DELETE) * api/v1/jobClusters/{}/actions/updateArtifact (POST) * api/v1/jobClusters/{}/actions/updateSla (POST) * api/v1/jobClusters/{}/actions/updateMigrationStrategy (POST) * api/v1/jobClusters/{}/actions/updateLabel (POST) * api/v1/jobClusters/{}/actions/enableCluster (POST) * api/v1/jobClusters/{}/actions/disableCluster (POST) */ public class JobClustersRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(JobClustersRoute.class); private static final PathMatcher0 JOBCLUSTERS_API_PREFIX = segment("api").slash("v1").slash("jobClusters"); private final JobClusterRouteHandler jobClusterRouteHandler; private final Cache<Uri, RouteResult> routeResultCache; public JobClustersRoute(final JobClusterRouteHandler jobClusterRouteHandler, final ActorSystem actorSystem) { this.jobClusterRouteHandler = jobClusterRouteHandler; MasterConfiguration config = ConfigurationProvider.getConfig(); this.routeResultCache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(), config.getApiCacheTtlMilliseconds()); } public Route constructRoutes() { return pathPrefix( JOBCLUSTERS_API_PREFIX, () -> concat( // api/v1/jobClusters pathEndOrSingleSlash(() -> concat( // GET get(this::getJobClustersRoute), // POST post(this::postJobClustersRoute)) ), // api/v1/jobClusters/{} path( PathMatchers.segment(), (clusterName) -> pathEndOrSingleSlash(() -> concat( // GET get(() -> getJobClusterInstanceRoute(clusterName)), // PUT put(() -> putJobClusterInstanceRoute(clusterName)), // DELETE delete(() -> deleteJobClusterInstanceRoute(clusterName))) ) ), // api/v1/jobClusters/{}/latestJobDiscoveryInfo path( PathMatchers.segment().slash("latestJobDiscoveryInfo"), (clusterName) -> pathEndOrSingleSlash(() -> concat( // GET get(() -> getLatestJobDiscoveryInfo(clusterName)) )) ), // api/v1/jobClusters/{}/actions/updateArtifact path( PathMatchers.segment().slash("actions").slash("updateArtifact"), (clusterName) -> pathEndOrSingleSlash(() -> concat( // POST post(() -> updateClusterArtifactRoute(clusterName)) )) ), // api/v1/jobClusters/{}/actions/updateSla pathPrefix( PathMatchers.segment().slash("actions").slash("updateSla"), (clusterName) -> pathEndOrSingleSlash(() -> concat( // POST post(() -> updateClusterSlaRoute(clusterName)) )) ), // api/v1/jobClusters/{}/actions/updateMigrationStrategy pathPrefix( PathMatchers.segment() .slash("actions") .slash("updateMigrationStrategy"), (clusterName) -> pathEndOrSingleSlash(() -> concat( // POST post(() -> updateMigrationStrategyRoute(clusterName)) )) ), // api/v1/jobClusters/{}/actions/updateLabel pathPrefix( PathMatchers.segment().slash("actions").slash("updateLabel"), (clusterName) -> pathEndOrSingleSlash(() -> concat( // POST post(() -> updateJobClusterLabelRoute(clusterName)) )) ), // api/v1/jobClusters/{}/actions/enableCluster pathPrefix( PathMatchers.segment().slash("actions").slash("enableCluster"), (clusterName) -> pathEndOrSingleSlash(() -> concat( // POST post(() -> updateJobClusterStateEnableRoute(clusterName)) )) ), // api/v1/jobClusters/{}/actions/disableCluster pathPrefix( PathMatchers.segment().slash("actions").slash("disableCluster"), (clusterName) -> pathEndOrSingleSlash(() -> concat( // POST post(() -> updateJobClusterStateDisableRoute(clusterName)) )) ) ) ); } @Override public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating /api/v1/jobClusters routes"); return super.createRoute(routeFilter); } private Route getJobClustersRoute() { logger.trace("GET /api/v1/jobClusters called"); return parameterMap(param -> alwaysCache(routeResultCache, getRequestUriKeyer, () -> extractUri( uri -> { logger.debug("GET all job clusters"); return completeAsync( jobClusterRouteHandler.getAllJobClusters( new ListJobClustersRequest()), resp -> completeOK( resp.getJobClusters( param.getOrDefault( ParamName.JOBCLUSTER_FILTER_MATCH, null), this.parseInteger(param.getOrDefault( ParamName.PAGINATION_LIMIT, null)), this.parseInteger(param.getOrDefault( ParamName.PAGINATION_OFFSET, null)), param.getOrDefault(ParamName.SORT_BY, null), this.parseBoolean(param.getOrDefault( ParamName.SORT_ASCENDING, null)), uri), Jackson.marshaller(super.parseFilter( param.getOrDefault(ParamName.PROJECTION_FIELDS, null), null))), HttpRequestMetrics.Endpoints.JOB_CLUSTERS, HttpRequestMetrics.HttpVerb.GET); }))); } private Route postJobClustersRoute() { return entity(Jackson.unmarshaller(NamedJobDefinition.class), jobClusterDefn -> { logger.info("POST /api/v1/jobClusters called {}", jobClusterDefn); final CreateJobClusterRequest createJobClusterRequest = JobClusterProtoAdapter.toCreateJobClusterRequest(jobClusterDefn); // sequentially chaining the createJobClusterRequest and getJobClusterRequest // when previous is successful final CompletionStage<GetJobClusterResponse> response = jobClusterRouteHandler .create(createJobClusterRequest) .thenCompose(t -> { if (t.responseCode.getValue() >= 200 && t.responseCode.getValue() < 300) { final GetJobClusterRequest request = new GetJobClusterRequest( t.getJobClusterName()); return jobClusterRouteHandler.getJobClusterDetails(request); } else { CompletableFuture<GetJobClusterResponse> responseCompletableFuture = new CompletableFuture<>(); responseCompletableFuture.complete( new JobClusterManagerProto.GetJobClusterResponse( t.requestId, t.responseCode, t.message, Optional.empty())); return responseCompletableFuture; } }); return completeAsync( response, resp -> complete( StatusCodes.CREATED, resp.getJobCluster(), Jackson.marshaller()), HttpRequestMetrics.Endpoints.JOB_CLUSTERS, HttpRequestMetrics.HttpVerb.POST ); }); } private Route getLatestJobDiscoveryInfo(String clusterName) { logger.trace("GET /api/v1/jobClusters/{}/latestJobDiscoveryInfo called", clusterName); return parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) -> cache(routeResultCache, getRequestUriKeyer, () -> extractUri(uri -> { logger.debug("GET latest job discovery info for {}", clusterName); return completeAsync( jobClusterRouteHandler.getLatestJobDiscoveryInfo(new GetLatestJobDiscoveryInfoRequest(clusterName)), resp -> { HttpResponse httpResponse = this.toDefaultHttpResponse(resp); return complete( httpResponse.status(), resp.getDiscoveryInfo().orElse(null), Jackson.marshaller(super.parseFilter(fields.orElse(null), null))); }, HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_LATEST_JOB_DISCOVERY_INFO, HttpRequestMetrics.HttpVerb.GET); }))); } private Route getJobClusterInstanceRoute(String clusterName) { logger.info("GET /api/v1/jobClusters/{} called", clusterName); return parameterOptional(StringUnmarshallers.STRING, ParamName.PROJECTION_FIELDS, (fields) -> completeAsync( jobClusterRouteHandler.getJobClusterDetails(new GetJobClusterRequest( clusterName)), resp -> { HttpResponse httpResponse = this.toDefaultHttpResponse(resp); return complete( httpResponse.status(), resp.getJobCluster(), Jackson.marshaller(super.parseFilter(fields.orElse(null), null))); }, HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE, HttpRequestMetrics.HttpVerb.GET)); } private Route putJobClusterInstanceRoute(String clusterName) { return entity(Jackson.unmarshaller(NamedJobDefinition.class), jobClusterDefn -> { logger.info("PUT /api/v1/jobClusters/{} called {}", clusterName, jobClusterDefn); final UpdateJobClusterRequest request = JobClusterProtoAdapter .toUpdateJobClusterRequest(jobClusterDefn); CompletionStage<UpdateJobClusterResponse> updateResponse; if (jobClusterDefn.getJobDefinition() == null) { // if request payload is invalid CompletableFuture<UpdateJobClusterResponse> resp = new CompletableFuture<>(); resp.complete( new UpdateJobClusterResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, "Invalid request payload.")); updateResponse = resp; } else if (!clusterName.equals(jobClusterDefn.getJobDefinition().getName())) { // if cluster name specified in request payload does not match with what specified in // the endpoint path segment CompletableFuture<UpdateJobClusterResponse> resp = new CompletableFuture<>(); resp.complete( new UpdateJobClusterResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, String.format( "Cluster name specified in request payload %s " + "does not match with what specified in resource path %s", jobClusterDefn.getJobDefinition().getName(), clusterName))); updateResponse = resp; } else { // everything look ok so far, process the request! updateResponse = jobClusterRouteHandler.update( JobClusterProtoAdapter.toUpdateJobClusterRequest(jobClusterDefn)); } CompletionStage<GetJobClusterResponse> response = updateResponse .thenCompose(t -> { if (t.responseCode.getValue() >= 200 && t.responseCode.getValue() < 300) { return jobClusterRouteHandler.getJobClusterDetails( new GetJobClusterRequest(clusterName)); } else { CompletableFuture<GetJobClusterResponse> responseCompletableFuture = new CompletableFuture<>(); responseCompletableFuture.complete( new JobClusterManagerProto.GetJobClusterResponse( t.requestId, t.responseCode, t.message, Optional.empty())); return responseCompletableFuture; } }); return completeAsync( response, resp -> { HttpResponse httpResponse = this.toDefaultHttpResponse(resp); return complete( httpResponse.status(), resp.getJobCluster(), Jackson.marshaller()); }, HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE, HttpRequestMetrics.HttpVerb.PUT); }); } private Route deleteJobClusterInstanceRoute(String clusterName) { return parameterOptional("user", user -> { logger.info("DELETE /api/v1/jobClusters/{} called", clusterName); String userStr = user.orElse(null); if (Strings.isNullOrEmpty(userStr)) { return complete(StatusCodes.BAD_REQUEST, "Missing required parameter 'user'"); } else { return completeAsync( jobClusterRouteHandler.delete(new DeleteJobClusterRequest(userStr, clusterName)), resp -> complete(StatusCodes.ACCEPTED, ""), HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE, HttpRequestMetrics.HttpVerb.DELETE ); } }); } private Route updateClusterArtifactRoute(String clusterName) { return entity(Jackson.unmarshaller(UpdateJobClusterArtifactRequest.class), request -> { logger.info( "POST /api/v1/jobClusters/{}/actions/updateArtifact called {}", clusterName, request); CompletionStage<UpdateJobClusterArtifactResponse> updateResponse; if (!clusterName.equals(request.getClusterName())) { // if cluster name specified in request payload does not match with what specified in // the endpoint path segment CompletableFuture<UpdateJobClusterArtifactResponse> resp = new CompletableFuture<>(); resp.complete( new UpdateJobClusterArtifactResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, String.format( "Cluster name specified in request payload %s " + "does not match with what specified in resource path %s", request.getClusterName(), clusterName))); updateResponse = resp; } else { // everything look ok so far, process the request! updateResponse = jobClusterRouteHandler.updateArtifact(request); } return completeAsync( updateResponse, resp -> complete(StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_ARTIFACT, HttpRequestMetrics.HttpVerb.POST ); }); } private Route updateClusterSlaRoute(String clusterName) { return entity(Jackson.unmarshaller(UpdateJobClusterSLARequest.class), request -> { logger.info( "POST /api/v1/jobClusters/{}/actions/updateSla called {}", clusterName, request); CompletionStage<UpdateJobClusterSLAResponse> updateResponse; if (!clusterName.equals(request.getClusterName())) { // if cluster name specified in request payload does not match with what specified in // the endpoint path segment CompletableFuture<UpdateJobClusterSLAResponse> resp = new CompletableFuture<>(); resp.complete( new UpdateJobClusterSLAResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, String.format( "Cluster name specified in request payload %s " + "does not match with what specified in resource path %s", request.getClusterName(), clusterName))); updateResponse = resp; } else { // everything look ok so far, process the request! updateResponse = jobClusterRouteHandler.updateSLA(request); } return completeAsync( updateResponse, resp -> complete(StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_SLA, HttpRequestMetrics.HttpVerb.POST ); }); } private Route updateMigrationStrategyRoute(String clusterName) { return entity( Jackson.unmarshaller(UpdateJobClusterWorkerMigrationStrategyRequest.class), request -> { logger.info( "POST /api/v1/jobClusters/{}/actions/updateMigrationStrategy called {}", clusterName, request); CompletionStage<UpdateJobClusterWorkerMigrationStrategyResponse> updateResponse; if (!clusterName.equals(request.getClusterName())) { // if cluster name specified in request payload does not match with what specified in // the endpoint path segment CompletableFuture<UpdateJobClusterWorkerMigrationStrategyResponse> resp = new CompletableFuture<>(); resp.complete( new UpdateJobClusterWorkerMigrationStrategyResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, String.format( "Cluster name specified in request payload %s " + "does not match with what specified in resource path %s", request.getClusterName(), clusterName))); updateResponse = resp; } else { // everything look ok so far, process the request! updateResponse = jobClusterRouteHandler.updateWorkerMigrateStrategy(request); } return completeAsync( updateResponse, resp -> complete(StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_MIGRATION_STRATEGY, HttpRequestMetrics.HttpVerb.POST ); }); } private Route updateJobClusterLabelRoute(String clusterName) { return entity(Jackson.unmarshaller(UpdateJobClusterLabelsRequest.class), request -> { logger.info( "POST /api/v1/jobClusters/{}/actions/updateLabel called {}", clusterName, request); CompletionStage<UpdateJobClusterLabelsResponse> updateResponse; if (!clusterName.equals(request.getClusterName())) { // if cluster name specified in request payload does not match with what specified in // the endpoint path segment CompletableFuture<UpdateJobClusterLabelsResponse> resp = new CompletableFuture<>(); resp.complete( new UpdateJobClusterLabelsResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, String.format( "Cluster name specified in request payload %s " + "does not match with what specified in resource path %s", request.getClusterName(), clusterName))); updateResponse = resp; } else { // everything look ok so far, process the request! updateResponse = jobClusterRouteHandler.updateLabels(request); } return completeAsync( updateResponse, resp -> complete(StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_UPDATE_LABEL, HttpRequestMetrics.HttpVerb.POST ); }); } private Route updateJobClusterStateEnableRoute(String clusterName) { return entity(Jackson.unmarshaller(EnableJobClusterRequest.class), request -> { logger.info( "POST /api/v1/jobClusters/{}/actions/enableCluster called {}", clusterName, request); CompletionStage<EnableJobClusterResponse> updateResponse; if (!clusterName.equals(request.getClusterName())) { // if cluster name specified in request payload does not match with what specified in // the endpoint path segment CompletableFuture<EnableJobClusterResponse> resp = new CompletableFuture<>(); resp.complete( new EnableJobClusterResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, String.format( "Cluster name specified in request payload %s " + "does not match with what specified in resource path %s", request.getClusterName(), clusterName))); updateResponse = resp; } else { // everything look ok so far, process the request! updateResponse = jobClusterRouteHandler.enable(request); } return completeAsync( updateResponse, resp -> complete(StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_ENABLE_CLUSTER, HttpRequestMetrics.HttpVerb.POST ); }); } private Route updateJobClusterStateDisableRoute(String clusterName) { return entity(Jackson.unmarshaller(DisableJobClusterRequest.class), request -> { logger.info( "POST /api/v1/jobClusters/{}/actions/disableCluster called {}", clusterName, request); CompletionStage<DisableJobClusterResponse> updateResponse; if (!clusterName.equals(request.getClusterName())) { // if cluster name specified in request payload does not match with what specified in // the endpoint path segment CompletableFuture<DisableJobClusterResponse> resp = new CompletableFuture<>(); resp.complete( new DisableJobClusterResponse( request.requestId, BaseResponse.ResponseCode.CLIENT_ERROR, String.format( "Cluster name specified in request payload %s " + "does not match with what specified in resource path %s", request.getClusterName(), clusterName))); updateResponse = resp; } else { // everything look ok so far, process the request! updateResponse = jobClusterRouteHandler.disable(request); } return completeAsync( updateResponse, resp -> complete(StatusCodes.NO_CONTENT, ""), HttpRequestMetrics.Endpoints.JOB_CLUSTER_INSTANCE_ACTION_DISABLE_CLUSTER, HttpRequestMetrics.HttpVerb.POST ); }); } }
4,424
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/ParamName.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; public class ParamName { public static String PROJECTION_FIELDS = "fields"; public static String PROJECTION_TARGET = "fromObj"; public static String SORT_BY = "sortBy"; public static String SORT_ASCENDING = "ascending"; public static String PAGINATION_LIMIT = "pageSize"; public static String PAGINATION_OFFSET = "offset"; public static String JOB_COMPACT = "compact"; public static String JOB_FILTER_MATCH = "matching"; public static String JOBCLUSTER_FILTER_MATCH = "matching"; public static String REASON = "reason"; public static String USER = "user"; public static String SEND_HEARTBEAT = "sendHB"; public static String ARCHIVED = "archived"; public static String SERVER_FILTER_LIMIT = "limit"; }
4,425
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v1/AgentClustersRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.Route; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import com.netflix.spectator.api.BasicTag; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.vm.AgentClusterOperations; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; /*** * Agent clusters route * Defines the following end points: * /api/v1/agentClusters (GET, POST) * /api/v1/agentClusters/jobs (GET) * /api/v1/agentClusters/autoScalePolicy (GET) */ public class AgentClustersRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(AgentClustersRoute.class); private final AgentClusterOperations agentClusterOps; public AgentClustersRoute(final AgentClusterOperations agentClusterOperations) { this.agentClusterOps = agentClusterOperations; } private static final PathMatcher0 API_V1_AGENT_CLUSTER = segment("api").slash("v1") .slash("agentClusters"); @Override public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating /api/v1/agentClusters"); return super.createRoute(routeFilter); } public Route constructRoutes() { return concat( pathPrefix(API_V1_AGENT_CLUSTER, () -> concat( // api/v1/agentClusters pathEndOrSingleSlash(() -> concat( // GET - list all active agent clusters get(this::getAgentClustersRoute), // POST - activate/deactivate agent clusters post(this::postAgentClustersRoute) )), // api/v1/agentClusters/jobs path( "jobs", () -> pathEndOrSingleSlash( // GET - retrieve job detail by job ID () -> get(this::getAgentClustersJobsRoute) ) ), // api/v1/agentClusters/autoScalePolicy path( "autoScalePolicy", () -> pathEndOrSingleSlash( // GET - retrieve job detail by job ID () -> get(this::getAgentClustersAutoScalePolicyRoute) )) ) ) ); } private Route getAgentClustersRoute() { logger.info("GET /api/v1/agentClusters called"); HttpRequestMetrics.getInstance().incrementEndpointMetrics( HttpRequestMetrics.Endpoints.AGENT_CLUSTERS, new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()), new BasicTag("responseCode", String.valueOf(StatusCodes.OK.intValue()))); return complete( StatusCodes.OK, agentClusterOps.getActiveVMsAttributeValues(), Jackson.marshaller()); } private Route postAgentClustersRoute() { logger.info("POST /api/v1/agentClusters called"); return entity( Jackson.unmarshaller(new TypeReference<List<String>>() { }), activeClustersList -> { logger.info("POST {} called {}", API_V1_AGENT_CLUSTER, activeClustersList); try { agentClusterOps.setActiveVMsAttributeValues(activeClustersList); } catch (IOException e) { HttpRequestMetrics.getInstance().incrementEndpointMetrics( HttpRequestMetrics.Endpoints.AGENT_CLUSTERS, new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()), new BasicTag("responseCode", String.valueOf(StatusCodes.INTERNAL_SERVER_ERROR.intValue()))); return complete( StatusCodes.INTERNAL_SERVER_ERROR, "Failed to set active clusters to " + activeClustersList.toString()); } HttpRequestMetrics.getInstance().incrementEndpointMetrics( HttpRequestMetrics.Endpoints.AGENT_CLUSTERS, new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()), new BasicTag("responseCode", String.valueOf(StatusCodes.OK.intValue()))); return complete(StatusCodes.OK, ""); }); } private Route getAgentClustersJobsRoute() { logger.info("GET /api/v1/agentClusters/jobs called"); HttpRequestMetrics.getInstance().incrementEndpointMetrics( HttpRequestMetrics.Endpoints.AGENT_CLUSTERS_JOBS, new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()), new BasicTag("responseCode", String.valueOf(StatusCodes.OK.intValue()))); return complete( StatusCodes.OK, agentClusterOps.getJobsOnVMs(), Jackson.marshaller()); } private Route getAgentClustersAutoScalePolicyRoute() { logger.info("GET /api/v1/agentClusters/autoScalePolicy called"); HttpRequestMetrics.getInstance().incrementEndpointMetrics( HttpRequestMetrics.Endpoints.AGENT_CLUSTERS_AUTO_SCALE_POLICY, new BasicTag("verb", HttpRequestMetrics.HttpVerb.GET.toString()), new BasicTag("responseCode", String.valueOf(StatusCodes.OK.intValue()))); return complete( StatusCodes.OK, agentClusterOps.getAgentClusterAutoScaleRules(), Jackson.marshaller()); } }
4,426
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/pagination/ListObject.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.pagination; import akka.http.javadsl.model.Uri; import io.mantisrx.shaded.com.google.common.base.Strings; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.shaded.com.google.common.collect.Maps; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.master.api.akka.route.v1.ParamName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.beans.BeanInfo; import java.beans.IntrospectionException; import java.beans.Introspector; import java.beans.MethodDescriptor; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.util.Comparator; import java.util.List; import java.util.Map; /*** * Generic ListObject to support pagination, sorting * @param <T> */ public class ListObject<T> { private static final Logger logger = LoggerFactory.getLogger(ListObject.class); public List<T> list; public String prev; public String next; public int total; ListObject(List<T> objects, int limit, int offset, Comparator<T> sorter, Uri uri) { if (objects == null) { list = Lists.newArrayList(); total = 0; } else { total = objects.size(); if (sorter != null) { objects.sort(sorter); } int toIndex = (offset + limit) > (objects.size() - 1) ? objects.size() : offset + limit; if (offset > toIndex) { this.list = Lists.newArrayList(); } else { this.list = objects.subList(offset, toIndex); } if (limit < Integer.MAX_VALUE && uri != null) { if (offset == 0) { prev = null; } else { int prevOffset = offset - limit >= 0 ? offset - limit : 0; prev = generateNewUri(uri, prevOffset); } if ((offset + limit) >= objects.size()) { next = null; } else { int nextOffset = offset + limit; next = generateNewUri(uri, nextOffset); } } } } public List<T> getList() { return list; } public void setList(List<T> list) { this.list = list; } public String getNext() { return next; } public void setNext(String next) { this.next = next; } public String getPrev() { return prev; } public void setPrev(String prev) { this.prev = prev; } private String generateNewUri(Uri originalUri, int offset) { Map<String, String> queryMap = Maps.newLinkedHashMap(originalUri.query().toMap()); queryMap.put(ParamName.PAGINATION_OFFSET, String.valueOf(offset)); StringBuilder stringBuilder = new StringBuilder(originalUri.path()); String dividerChar = "?"; for (Map.Entry<String, String> entry : queryMap.entrySet()) { stringBuilder.append(dividerChar); stringBuilder.append(entry.getKey()); stringBuilder.append("="); stringBuilder.append(entry.getValue()); dividerChar = "&"; } return stringBuilder.toString(); } public static class Builder<T> { private List<T> objects = null; private Class<T> targetType; private int limit = Integer.MAX_VALUE; private int offset = 0; private String sortField = null; private boolean sortAscending = true; private Uri uri = null; public Builder() { } public ListObject.Builder<T> withObjects(List<T> objects, Class<T> targetType) { this.objects = objects; this.targetType = targetType; return this; } public ListObject.Builder<T> withLimit(int limit) { this.limit = limit; return this; } public ListObject.Builder<T> withOffset(int offset) { this.offset = offset; return this; } public ListObject.Builder<T> withSortField(String sortField) { this.sortField = sortField; return this; } public ListObject.Builder<T> withSortAscending(boolean isAscending) { this.sortAscending = isAscending; return this; } public ListObject.Builder<T> withUri(Uri uri) { this.uri = uri; return this; } public ListObject<T> build() { Preconditions.checkNotNull(this.objects, "Objects cannot be null"); Preconditions.checkNotNull(this.targetType, "Target type cannot be null"); Preconditions.checkState(this.limit > 0, "limit needs to be greater than 0"); Preconditions.checkState(offset >= 0, "offset has to be equal or greater than 0."); return new ListObject<>( this.objects, this.limit, this.offset, getSorter(), this.uri); } private Comparator<T> getSorter() { if (Strings.isNullOrEmpty(sortField)) { return null; } // make sure specified field is valid for the given type try { Field field = targetType.getDeclaredField(sortField); if (field == null) { throw new RuntimeException( String.format("Specified sort field is invalid. [%s]", sortField)); } } catch (NoSuchFieldException ex) { throw new RuntimeException( String.format("Specified sort field is invalid. [%s]", sortField), ex); } return (T t1, T t2) -> { int result; if (t1 == null && t2 == null) { result = 0; } else if (t1 == null) { result = -1; } else if (t2 == null) { result = 1; } else { Comparable f1 = getComparableFromFieldName(sortField, t1, targetType); Comparable f2 = getComparableFromFieldName(sortField, t2, targetType); if (f1 != null) { result = f1.compareTo(f2); } else if (f2 != null) { result = f2.compareTo(f1); } else { result = 0; } } return sortAscending ? result : -result; }; } private static <T> Comparable getComparableFromFieldName( String fieldName, T val, Class<T> targetType) { try { Field field = targetType.getDeclaredField(fieldName); Object fieldValue = null; try { fieldValue = field.get(val); } catch (IllegalAccessException ex) { logger.warn( "Unable to access field {}, trying Bean getter method instead...", fieldName); } // field is private, try pojo/bean get method instead if (fieldValue == null) { BeanInfo info = Introspector.getBeanInfo(targetType); MethodDescriptor[] methods = info.getMethodDescriptors(); if (methods == null) { throw new RuntimeException("Cannot access sort field. " + fieldName); } for (MethodDescriptor methodDescriptor : methods) { if (methodDescriptor.getName().equalsIgnoreCase("get" + fieldName)) { fieldValue = methodDescriptor.getMethod().invoke(val); break; } } } if (fieldValue == null) { throw new RuntimeException("Cannot access sort field. " + fieldName); } if (!(fieldValue instanceof Comparable)) { throw new RuntimeException( String.format("Specified sort field is invalid. [%s]", fieldName)); } return (Comparable) fieldValue; } catch (NoSuchFieldException | IllegalAccessException | IntrospectionException | InvocationTargetException ex) { throw new RuntimeException( String.format("Specified sort field is invalid. [%s]", fieldName), ex); } } } }
4,427
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v0/AgentClusterRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.actor.ActorSystem; import akka.http.caching.javadsl.Cache; import akka.http.caching.javadsl.CachingSettings; import akka.http.javadsl.model.HttpHeader; import akka.http.javadsl.model.HttpMethods; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.Uri; import akka.http.javadsl.server.ExceptionHandler; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.RequestContext; import akka.http.javadsl.server.Route; import akka.http.javadsl.server.RouteResult; import akka.http.javadsl.unmarshalling.Unmarshaller; import akka.japi.JavaPartialFunction; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import io.mantisrx.shaded.com.google.common.annotations.VisibleForTesting; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.vm.AgentClusterOperations; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache; import static akka.http.javadsl.server.directives.CachingDirectives.routeCache; public class AgentClusterRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(AgentClusterRoute.class); private final AgentClusterOperations agentClusterOps; private final Cache<Uri, RouteResult> cache; private final JavaPartialFunction<RequestContext, Uri> requestUriKeyer = new JavaPartialFunction<RequestContext, Uri>() { public Uri apply(RequestContext in, boolean isCheck) { final HttpRequest request = in.getRequest(); final boolean isGet = request.method() == HttpMethods.GET; if (isGet) { return request.getUri(); } else { throw noMatch(); } } }; private final Counter setActiveCount; private final Counter listActiveCount; private final Counter listJobsOnVMsCount; private final Counter listAgentClustersCount; public AgentClusterRoute(final AgentClusterOperations agentClusterOperations, final ActorSystem actorSystem) { Preconditions.checkNotNull(agentClusterOperations, "agentClusterOperations"); this.agentClusterOps = agentClusterOperations; MasterConfiguration config = ConfigurationProvider.getConfig(); this.cache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(), config.getApiCacheTtlMilliseconds()); Metrics m = new Metrics.Builder() .id("V0AgentClusterRoute") .addCounter("setActive") .addCounter("listActive") .addCounter("listJobsOnVMs") .addCounter("listAgentClusters") .build(); this.setActiveCount = m.getCounter("setActive"); this.listActiveCount = m.getCounter("listActive"); this.listJobsOnVMsCount = m.getCounter("listJobsOnVMs"); this.listAgentClustersCount = m.getCounter("listAgentClusters"); } private static final PathMatcher0 API_VM_ACTIVEVMS = segment("api").slash("vm").slash("activevms"); @VisibleForTesting public static final String LISTACTIVE ="listactive"; @VisibleForTesting public static final String SETACTIVE ="setactive"; @VisibleForTesting public static final String LISTJOBSONVMS="listjobsonvms"; @VisibleForTesting public static final String LISTAGENTCLUSTERS = "listagentclusters"; private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER = HttpHeader.parse("Access-Control-Allow-Origin", "*"); private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList( ACCESS_CONTROL_ALLOW_ORIGIN_HEADER); private Route agentClusterRoutes() { return route( get(() -> route( path(API_VM_ACTIVEVMS.slash(LISTACTIVE), () -> { logger.debug("/api/vm/activems/{} called", LISTACTIVE); listActiveCount.increment(); return complete(StatusCodes.OK, agentClusterOps.getActiveVMsAttributeValues(), Jackson.marshaller()); }), path(API_VM_ACTIVEVMS.slash(LISTJOBSONVMS), () -> { logger.debug("/api/vm/activems/{} called", LISTJOBSONVMS); listJobsOnVMsCount.increment(); return alwaysCache(cache, requestUriKeyer, () -> extractUri(uri -> complete(StatusCodes.OK, agentClusterOps.getJobsOnVMs(), Jackson.marshaller()))); }), path(API_VM_ACTIVEVMS.slash(LISTAGENTCLUSTERS), () -> { logger.debug("/api/vm/activems/{} called", LISTAGENTCLUSTERS); listAgentClustersCount.increment(); return complete(StatusCodes.OK, agentClusterOps.getAgentClusterAutoScaleRules(), Jackson.marshaller()); }) )), post(() -> route( path(API_VM_ACTIVEVMS.slash(SETACTIVE), () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), req -> { try { setActiveCount.increment(); List<String> activeClustersList = Jackson.fromJSON(req, new TypeReference<List<String>>() {}); logger.info("POST /api/vm/activems/{} called {}", SETACTIVE, activeClustersList); agentClusterOps.setActiveVMsAttributeValues(activeClustersList); } catch (IOException e) { return complete(StatusCodes.INTERNAL_SERVER_ERROR, "Failed to set active clusters to "+req); } return complete(StatusCodes.OK, req); })) ) )) ); } public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating routes"); final ExceptionHandler jsonExceptionHandler = ExceptionHandler.newBuilder() .match(Exception.class, x -> { logger.error("got exception", x); return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + x.getMessage() + "\"}"); }) .build(); return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(jsonExceptionHandler, () -> routeFilter.apply(agentClusterRoutes()))); } }
4,428
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v0/JobDiscoveryRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.NotUsed; import akka.http.javadsl.marshalling.sse.EventStreamMarshalling; import akka.http.javadsl.model.HttpHeader; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.sse.ServerSentEvent; import akka.http.javadsl.server.ExceptionHandler; import akka.http.javadsl.server.PathMatchers; import akka.http.javadsl.server.Route; import akka.http.javadsl.unmarshalling.StringUnmarshallers; import akka.stream.javadsl.Source; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.proto.JobClusterInfo; import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto; import io.mantisrx.master.api.akka.route.utils.StreamingUtils; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.master.domain.JobId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.RxReactiveStreams; import java.util.Arrays; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; public class JobDiscoveryRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(JobDiscoveryRoute.class); private final JobDiscoveryRouteHandler jobDiscoveryRouteHandler; private final Metrics metrics; private final Counter schedulingInfoStreamGET; private final Counter jobClusterInfoStreamGET; public JobDiscoveryRoute(final JobDiscoveryRouteHandler jobDiscoveryRouteHandler) { this.jobDiscoveryRouteHandler = jobDiscoveryRouteHandler; Metrics m = new Metrics.Builder() .id("JobDiscoveryRoute") .addCounter("schedulingInfoStreamGET") .addCounter("jobClusterInfoStreamGET") .build(); this.metrics = MetricsRegistry.getInstance().registerAndGet(m); this.schedulingInfoStreamGET = metrics.getCounter("schedulingInfoStreamGET"); this.jobClusterInfoStreamGET = metrics.getCounter("jobClusterInfoStreamGET"); } private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER = HttpHeader.parse("Access-Control-Allow-Origin", "*"); private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList( ACCESS_CONTROL_ALLOW_ORIGIN_HEADER); private Route getJobDiscoveryRoutes() { return route( get(() -> route( path(segment("assignmentresults").slash(PathMatchers.segment()), (jobId) -> parameterOptional( StringUnmarshallers.BOOLEAN, "sendHB", (sendHeartbeats) -> { logger.debug( "/assignmentresults/{} called", jobId); schedulingInfoStreamGET.increment(); JobClusterManagerProto.GetJobSchedInfoRequest req = new JobClusterManagerProto.GetJobSchedInfoRequest( JobId.fromId(jobId).get()); CompletionStage<JobDiscoveryRouteProto.SchedInfoResponse> schedulingInfoRespCS = jobDiscoveryRouteHandler.schedulingInfoStream( req, sendHeartbeats.orElse(false)); return completeAsync( schedulingInfoRespCS, r -> { Optional<Observable<JobSchedulingInfo>> schedInfoStreamO = r .getSchedInfoStream(); if (schedInfoStreamO.isPresent()) { Observable<JobSchedulingInfo> schedulingInfoObs = schedInfoStreamO .get(); Source<ServerSentEvent, NotUsed> schedInfoSource = Source.fromPublisher( RxReactiveStreams.toPublisher( schedulingInfoObs)) .map(j -> StreamingUtils.from( j) .orElse(null)) .filter(sse -> sse != null); return completeOK( schedInfoSource, EventStreamMarshalling.toEventStream()); } else { logger.warn( "Failed to get sched info stream for job {}", jobId); return complete( StatusCodes.INTERNAL_SERVER_ERROR, "Failed to get sched info stream for job " + jobId); } }); }) ), path(segment("namedjobs").slash(PathMatchers.segment()), (jobCluster) -> parameterOptional( StringUnmarshallers.BOOLEAN, "sendHB", (sendHeartbeats) -> { logger.debug( "/namedjobs/{} called", jobCluster); jobClusterInfoStreamGET.increment(); JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest req = new JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest( jobCluster); CompletionStage<JobDiscoveryRouteProto.JobClusterInfoResponse> jobClusterInfoRespCS = jobDiscoveryRouteHandler.lastSubmittedJobIdStream( req, sendHeartbeats.orElse(false)); return completeAsync( jobClusterInfoRespCS, r -> { Optional<Observable<JobClusterInfo>> jobClusterInfoO = r .getJobClusterInfoObs(); if (jobClusterInfoO.isPresent()) { Observable<JobClusterInfo> jobClusterInfoObs = jobClusterInfoO .get(); Source<ServerSentEvent, NotUsed> source = Source .fromPublisher(RxReactiveStreams .toPublisher( jobClusterInfoObs)) .map(j -> StreamingUtils.from(j) .orElse(null)) .filter(sse -> sse != null); return completeOK( source, EventStreamMarshalling.toEventStream()); } else { logger.warn( "Failed to get last submitted jobId stream for {}", jobCluster); return complete( StatusCodes.INTERNAL_SERVER_ERROR, "Failed to get last submitted jobId stream for " + jobCluster); } }); }) ) )) ); } public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating routes"); final ExceptionHandler jsonExceptionHandler = ExceptionHandler.newBuilder() .match(Exception.class, x -> { logger.error("got exception", x); return complete( StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + x.getMessage() + "\"}"); }) .build(); return respondWithHeaders( DEFAULT_RESPONSE_HEADERS, () -> handleExceptions( jsonExceptionHandler, () -> routeFilter.apply(getJobDiscoveryRoutes()))); } }
4,429
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v0/JobRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.actor.ActorSystem; import akka.http.caching.javadsl.Cache; import akka.http.caching.javadsl.CachingSettings; import akka.http.javadsl.model.HttpHeader; import akka.http.javadsl.model.HttpMethods; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.Uri; import akka.http.javadsl.server.ExceptionHandler; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.PathMatchers; import akka.http.javadsl.server.RequestContext; import akka.http.javadsl.server.Route; import akka.http.javadsl.server.RouteResult; import akka.http.javadsl.unmarshalling.StringUnmarshallers; import akka.http.javadsl.unmarshalling.Unmarshaller; import akka.japi.JavaPartialFunction; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest; import io.mantisrx.server.core.PostJobStatusRequest; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import io.mantisrx.server.master.domain.DataFormatAdapter; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.scheduler.WorkerEvent; import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.function.Function; import java.util.stream.Collectors; import static akka.http.javadsl.server.PathMatchers.segment; import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache; import static akka.http.javadsl.server.directives.CachingDirectives.routeCache; import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createListJobIdsRequest; import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createListJobsRequest; import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createWorkerStatusRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest.DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT; public class JobRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(JobRoute.class); private final JobRouteHandler jobRouteHandler; private final Metrics metrics; private final Counter jobListGET; private final Counter jobListJobIdGET; private final Counter jobListRegexGET; private final Counter jobListLabelMatchGET; private final Counter jobArchivedWorkersGET; private final Counter jobArchivedWorkersGETInvalid; private final Counter workerHeartbeatStatusPOST; private final Counter workerHeartbeatSkipped; private final Cache<Uri, RouteResult> cache; private final JavaPartialFunction<RequestContext, Uri> requestUriKeyer = new JavaPartialFunction<RequestContext, Uri>() { public Uri apply(RequestContext in, boolean isCheck) { final HttpRequest request = in.getRequest(); final boolean isGet = request.method() == HttpMethods.GET; if (isGet) { return request.getUri(); } else { throw noMatch(); } } }; public JobRoute(final JobRouteHandler jobRouteHandler, final ActorSystem actorSystem) { this.jobRouteHandler = jobRouteHandler; MasterConfiguration config = ConfigurationProvider.getConfig(); this.cache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(), config.getApiCacheTtlMilliseconds()); Metrics m = new Metrics.Builder() .id("V0JobRoute") .addCounter("jobListGET") .addCounter("jobListJobIdGET") .addCounter("jobListRegexGET") .addCounter("jobListLabelMatchGET") .addCounter("jobArchivedWorkersGET") .addCounter("jobArchivedWorkersGETInvalid") .addCounter("workerHeartbeatStatusPOST") .addCounter("workerHeartbeatSkipped") .build(); this.metrics = MetricsRegistry.getInstance().registerAndGet(m); this.jobListGET = metrics.getCounter("jobListGET"); this.jobListJobIdGET = metrics.getCounter("jobListJobIdGET"); this.jobListRegexGET = metrics.getCounter("jobListRegexGET"); this.jobListLabelMatchGET = metrics.getCounter("jobListLabelMatchGET"); this.jobArchivedWorkersGET = metrics.getCounter("jobArchivedWorkersGET"); this.jobArchivedWorkersGETInvalid = metrics.getCounter("jobArchivedWorkersGETInvalid"); this.workerHeartbeatStatusPOST = metrics.getCounter("workerHeartbeatStatusPOST"); this.workerHeartbeatSkipped = metrics.getCounter("workerHeartbeatSkipped"); } private static final PathMatcher0 API_JOBS = segment("api").slash("jobs"); private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER = HttpHeader.parse("Access-Control-Allow-Origin", "*"); private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList( ACCESS_CONTROL_ALLOW_ORIGIN_HEADER); public static final String KILL_ENDPOINT = "kill"; public static final String RESUBMIT_WORKER_ENDPOINT = "resubmitWorker"; public static final String SCALE_STAGE_ENDPOINT = "scaleStage"; public static final PathMatcher0 STATUS_ENDPOINT = segment("api").slash("postjobstatus"); /** * Route that returns * - a list of Job Ids only if 'jobIdsOnly' query param is set * - a list of compact Job Infos if 'compact' query param is set * - a list of Job metadatas otherwise * The above lists are filtered and returned based on other criteria specified in the List request * like stageNumber, workerIndex, workerNumber, matchingLabels, regex, activeOnly, jobState, workerState, limit * * @param regex the regex to match against Job IDs to return in response * @return Route job list route */ private Route jobListRoute(final Optional<String> regex) { return parameterOptional(StringUnmarshallers.BOOLEAN, "jobIdsOnly", (jobIdsOnly) -> parameterOptional(StringUnmarshallers.BOOLEAN, "compact", (isCompact) -> parameterMultiMap(params -> { if (jobIdsOnly.isPresent() && jobIdsOnly.get()) { logger.debug("/api/jobs/list jobIdsOnly called"); return alwaysCache(cache, requestUriKeyer, () -> extractUri(uri -> completeAsync( jobRouteHandler.listJobIds(createListJobIdsRequest(params, regex, true)), resp -> completeOK( resp.getJobIds().stream() .map(jobId -> jobId.getJobId()) .collect(Collectors.toList()), Jackson.marshaller())))); } if (isCompact.isPresent() && isCompact.get()) { logger.debug("/api/jobs/list compact called"); return alwaysCache(cache, requestUriKeyer, () -> extractUri(uri -> completeAsync( jobRouteHandler.listJobs(createListJobsRequest(params, regex, true)), resp -> completeOK( resp.getJobList() .stream() .map(jobMetadataView -> JobClusterProtoAdapter.toCompactJobInfo(jobMetadataView)) .collect(Collectors.toList()), Jackson.marshaller())))); } else { logger.debug("/api/jobs/list called"); return alwaysCache(cache, requestUriKeyer, () -> extractUri(uri -> completeAsync( jobRouteHandler.listJobs(createListJobsRequest(params, regex, true)), resp -> completeOK( resp.getJobList(), Jackson.marshaller())))); } }) ) ); } private Route getJobRoutes() { return route( path(STATUS_ENDPOINT, () -> post(() -> decodeRequest(() -> entity(Unmarshaller.entityToString(), req -> { if (logger.isDebugEnabled()) { logger.debug("/api/postjobstatus called {}", req); } try { workerHeartbeatStatusPOST.increment(); PostJobStatusRequest postJobStatusRequest = Jackson.fromJSON(req, PostJobStatusRequest.class); WorkerEvent workerStatusRequest = createWorkerStatusRequest(postJobStatusRequest); if (workerStatusRequest instanceof WorkerHeartbeat) { if (!ConfigurationProvider.getConfig().isHeartbeatProcessingEnabled()) { // skip heartbeat processing if (logger.isTraceEnabled()) { logger.trace("skipped heartbeat event {}", workerStatusRequest); } workerHeartbeatSkipped.increment(); return complete(StatusCodes.OK); } } return completeWithFuture( jobRouteHandler.workerStatus(workerStatusRequest) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error handling job status {}", req, e); return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"invalid JSON payload to post job status\"}"); } }) ))), pathPrefix(API_JOBS, () -> route( post(() -> route( path(KILL_ENDPOINT, () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), req -> { logger.debug("/api/jobs/kill called {}", req); try { final KillJobRequest killJobRequest = Jackson.fromJSON(req, KillJobRequest.class); return completeWithFuture( jobRouteHandler.kill(killJobRequest) .thenApply(resp -> { if (resp.responseCode == BaseResponse.ResponseCode.SUCCESS) { return new JobClusterManagerProto.KillJobResponse(resp.requestId, resp.responseCode, resp.getState(), "[\""+ resp.getJobId().getId() +" Killed\"]", resp.getJobId(), resp.getUser()); } else if (resp.responseCode == BaseResponse.ResponseCode.CLIENT_ERROR) { // for backwards compatibility with old master return new JobClusterManagerProto.KillJobResponse(resp.requestId, BaseResponse.ResponseCode.SUCCESS, resp.getState(), "[\""+ resp.message +" \"]", resp.getJobId(), resp.getUser()); } return resp; }) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error on job kill {}", req, e); return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"invalid json payload to kill job\"}"); } }) )), path(RESUBMIT_WORKER_ENDPOINT, () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), req -> { logger.debug("/api/jobs/resubmitWorker called {}", req); try { final ResubmitWorkerRequest resubmitWorkerRequest = Jackson.fromJSON(req, ResubmitWorkerRequest.class); return completeWithFuture( jobRouteHandler.resubmitWorker(resubmitWorkerRequest) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error on worker resubmit {}", req, e); return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"invalid json payload to resubmit worker\"}"); } }) )), path(SCALE_STAGE_ENDPOINT, () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), req -> { logger.debug("/api/jobs/scaleStage called {}", req); try { ScaleStageRequest scaleStageRequest = Jackson.fromJSON(req, ScaleStageRequest.class); int numWorkers = scaleStageRequest.getNumWorkers(); int maxWorkersPerStage = ConfigurationProvider.getConfig().getMaxWorkersPerStage(); if (numWorkers > maxWorkersPerStage) { logger.warn("rejecting ScaleStageRequest {} with invalid num workers", scaleStageRequest); return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"num workers must be less than " + maxWorkersPerStage + "\"}"); } return completeWithFuture( jobRouteHandler.scaleStage(scaleStageRequest) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error scaling stage {}", req, e); return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"invalid json payload to scale stage " + e.getMessage() +"\"}"); } }) )) // TODO path("updateScalingPolicy", () -> // entity(Jackson.unmarshaller(UpdateJobClusterRequest.class), req -> { // logger.info("/api/jobs/kill called {}", req); // return completeWithFuture( // jobRouteHandler.kill(req) // .thenApply(this::toHttpResponse)); // }) // ) )), get(() -> route( // Context from old mantis master: // list all jobs activeOnly = true // optional boolean 'compact' query param to return compact job infos if set // For compact, // - optional 'limit' query param // - optional 'jobState' query param // For non compact, // - optional boolean 'jobIdsOnly' query param to return only the job Ids if set // - optional int 'stageNumber' query param to filter for stage number // - optional int 'workerIndex' query param to filter for worker index // - optional int 'workerNumber' query param to filter for worker number // - optional int 'workerState' query param to filter for worker state // list/all - list all jobs activeOnly=false with above query parameters // list/matching/<regex> - if optional regex param specified, propagate regex // else list all jobs activeOnly=false with above query parameters // list/matchinglabels // - optional labels query param // - optional labels.op query param - default value is 'or' if not specified (other possible value is 'and' path(segment("list"), () -> { jobListGET.increment(); return jobListRoute(Optional.empty()); }), path(segment("list").slash("matchinglabels"), () -> { jobListLabelMatchGET.increment(); return jobListRoute(Optional.empty()); }), path(segment("list").slash(PathMatchers.segment()), (jobId) -> { logger.debug("/api/jobs/list/{} called", jobId); jobListJobIdGET.increment(); return completeAsync( jobRouteHandler.getJobDetails(new JobClusterManagerProto.GetJobDetailsRequest("masterAPI", jobId)), resp -> { Optional<MantisJobMetadataView> mantisJobMetadataView = resp.getJobMetadata() .map(metaData -> new MantisJobMetadataView(metaData, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false)); return completeOK(mantisJobMetadataView, Jackson.marshaller()); }); }), path(segment("list").slash("matching").slash(PathMatchers.segment()), (regex) -> { jobListRegexGET.increment(); return jobListRoute(Optional.ofNullable(regex) .filter(r -> !r.isEmpty())); }), path(segment("archived").slash(PathMatchers.segment()), (jobId) -> parameterOptional(StringUnmarshallers.INTEGER, "limit", (limit) -> { jobArchivedWorkersGET.increment(); Optional<JobId> jobIdO = JobId.fromId(jobId); if (jobIdO.isPresent()) { ListArchivedWorkersRequest req = new ListArchivedWorkersRequest(jobIdO.get(), limit.orElse(DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT)); return alwaysCache(cache, requestUriKeyer, () -> extractUri(uri -> completeAsync( jobRouteHandler.listArchivedWorkers(req), resp -> { List<MantisWorkerMetadataWritable> workers = resp.getWorkerMetadata().stream() .map(wm -> DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(wm)) .collect(Collectors.toList()); return completeOK(workers, Jackson.marshaller()); }))); } else { return complete(StatusCodes.BAD_REQUEST, "error: 'archived/<jobId>' request must include a valid jobId"); } }) ), path(segment("archived"), () -> { jobArchivedWorkersGETInvalid.increment(); return complete(StatusCodes.BAD_REQUEST, "error: 'archived' Request must include jobId"); }) ))) )); } public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating routes"); final ExceptionHandler genericExceptionHandler = ExceptionHandler.newBuilder() .match(Exception.class, x -> { logger.error("got exception", x); return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + x.getMessage() + "\"}"); }) .build(); return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(genericExceptionHandler, () -> routeFilter.apply(getJobRoutes()))); } }
4,430
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v0/MasterDescriptionRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.http.javadsl.model.HttpHeader; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.server.ExceptionHandler; import akka.http.javadsl.server.Route; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; public class MasterDescriptionRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(MasterDescriptionRoute.class); private static final ObjectMapper mapper = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); private final MasterDescription masterDesc; private String masterDescStr; private final List<Configlet> configs = new ArrayList<>(); public static class Configlet { private final String name; private final String value; @JsonCreator @JsonIgnoreProperties(ignoreUnknown=true) public Configlet(@JsonProperty("name") String name, @JsonProperty("value") String value) { this.name = name; this.value = value; } public String getName() { return name; } public String getValue() { return value; } @Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final Configlet configlet = (Configlet) o; return Objects.equals(name, configlet.name) && Objects.equals(value, configlet.value); } @Override public int hashCode() { return Objects.hash(name, value); } @Override public String toString() { return "Configlet{" + "name='" + name + '\'' + ", value='" + value + '\'' + '}'; } } static class WorkerResourceLimits { private final int maxCpuCores; private final int maxMemoryMB; private final int maxNetworkMbps; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public WorkerResourceLimits(@JsonProperty("maxCpuCores") final int maxCpuCores, @JsonProperty("maxMemoryMB") final int maxMemoryMB, @JsonProperty("maxNetworkMbps") final int maxNetworkMbps) { this.maxCpuCores = maxCpuCores; this.maxMemoryMB = maxMemoryMB; this.maxNetworkMbps = maxNetworkMbps; } public int getMaxCpuCores() { return maxCpuCores; } public int getMaxMemoryMB() { return maxMemoryMB; } public int getMaxNetworkMbps() { return maxNetworkMbps; } } public MasterDescriptionRoute(final MasterDescription masterDescription) { this.masterDesc = masterDescription; try { this.masterDescStr = mapper.writeValueAsString(masterDesc); } catch (JsonProcessingException e) { logger.error("failed to create json for master desc {}", masterDesc); this.masterDescStr = masterDesc.toString(); } try { configs.add(new Configlet(JobConstraints.class.getSimpleName(), mapper.writeValueAsString(JobConstraints.values()))); configs.add(new Configlet(StageScalingPolicy.ScalingReason.class.getSimpleName(), mapper.writeValueAsString(StageScalingPolicy.ScalingReason.values()))); configs.add(new Configlet(WorkerMigrationConfig.MigrationStrategyEnum.class.getSimpleName(), mapper.writeValueAsString(WorkerMigrationConfig.MigrationStrategyEnum.values()))); MasterConfiguration config = ConfigurationProvider.getConfig(); int maxCpuCores = config.getWorkerMachineDefinitionMaxCpuCores(); int maxMemoryMB = config.getWorkerMachineDefinitionMaxMemoryMB(); int maxNetworkMbps = config.getWorkerMachineDefinitionMaxNetworkMbps(); configs.add(new Configlet(WorkerResourceLimits.class.getSimpleName(), mapper.writeValueAsString(new WorkerResourceLimits(maxCpuCores, maxMemoryMB, maxNetworkMbps)))); } catch (JsonProcessingException e) { logger.error(e.getMessage(), e); } } private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER = HttpHeader.parse("Access-Control-Allow-Origin", "*"); private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList( ACCESS_CONTROL_ALLOW_ORIGIN_HEADER); public List<Configlet> getConfigs() { return configs; } private Route getMasterDescRoute() { return route( get(() -> route( path(segment("api").slash("masterinfo"), () -> completeOK(masterDesc, Jackson.marshaller())), path(segment("api").slash("masterinfostr"), () -> complete(StatusCodes.OK, masterDescStr)), path(segment("api").slash("masterconfig"), () -> completeOK(configs, Jackson.marshaller())) )) ); } public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating routes"); final ExceptionHandler jsonExceptionHandler = ExceptionHandler.newBuilder() .match(IOException.class, x -> { logger.error("got exception", x); return complete(StatusCodes.BAD_REQUEST, "caught exception " + x.getMessage()); }) .build(); return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(jsonExceptionHandler, () -> routeFilter.apply(getMasterDescRoute()))); } }
4,431
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v0/BaseRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.actor.ActorSystem; import akka.http.caching.LfuCache; import akka.http.caching.javadsl.Cache; import akka.http.caching.javadsl.CachingSettings; import akka.http.caching.javadsl.LfuCacheSettings; import akka.http.javadsl.model.ContentTypes; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.Uri; import akka.http.javadsl.server.AllDirectives; import akka.http.javadsl.server.Route; import akka.http.javadsl.server.RouteResult; import akka.http.javadsl.server.directives.RouteAdapter; import akka.japi.pf.PFBuilder; import akka.pattern.AskTimeoutException; import io.mantisrx.master.api.akka.route.MasterApiMetrics; import io.mantisrx.master.jobcluster.proto.BaseResponse; import scala.concurrent.duration.Duration; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; import java.util.function.Function; abstract class BaseRoute extends AllDirectives { protected HttpResponse toHttpResponse(final BaseResponse r) { switch (r.responseCode) { case SUCCESS: case SUCCESS_CREATED: MasterApiMetrics.getInstance().incrementResp2xx(); return HttpResponse.create() .withEntity(ContentTypes.APPLICATION_JSON, r.message) .withStatus(StatusCodes.OK); case CLIENT_ERROR: case CLIENT_ERROR_NOT_FOUND: case CLIENT_ERROR_CONFLICT: MasterApiMetrics.getInstance().incrementResp4xx(); return HttpResponse.create() .withEntity(ContentTypes.APPLICATION_JSON, "{\"error\": \"" + r.message + "\"}") .withStatus(StatusCodes.BAD_REQUEST); case OPERATION_NOT_ALLOWED: MasterApiMetrics.getInstance().incrementResp4xx(); return HttpResponse.create() .withEntity(ContentTypes.APPLICATION_JSON, "{\"error\": \"" + r.message + "\"}") .withStatus(StatusCodes.METHOD_NOT_ALLOWED); case SERVER_ERROR: default: MasterApiMetrics.getInstance().incrementResp5xx(); return HttpResponse.create() .withEntity(ContentTypes.APPLICATION_JSON, "{\"error\": \"" + r.message + "\"}") .withStatus(StatusCodes.INTERNAL_SERVER_ERROR); } } protected <T extends BaseResponse> RouteAdapter completeAsync(final CompletionStage<T> stage, final Function<T, RouteAdapter> successTransform) { return completeAsync(stage, successTransform, r -> complete(StatusCodes.BAD_REQUEST, "{\"error\": \"" + r.message + "\"}")); } protected <T extends BaseResponse> RouteAdapter completeAsync(final CompletionStage<T> stage, final Function<T, RouteAdapter> successTransform, final Function<T, RouteAdapter> clientFailureTransform) { return onComplete( stage, resp -> resp .map(r -> { switch (r.responseCode) { case SUCCESS: case SUCCESS_CREATED: MasterApiMetrics.getInstance().incrementResp2xx(); return successTransform.apply(r); case CLIENT_ERROR: case CLIENT_ERROR_NOT_FOUND: case CLIENT_ERROR_CONFLICT: return clientFailureTransform.apply(r); case SERVER_ERROR: case OPERATION_NOT_ALLOWED: default: MasterApiMetrics.getInstance().incrementResp5xx(); return complete(StatusCodes.INTERNAL_SERVER_ERROR, r.message); } }) .recover(new PFBuilder<Throwable, Route>() .match(AskTimeoutException.class, te -> { MasterApiMetrics.getInstance().incrementAskTimeOutCount(); MasterApiMetrics.getInstance().incrementResp5xx(); return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + te.getMessage() + "\"}"); }) .matchAny(ex -> { MasterApiMetrics.getInstance().incrementResp5xx(); return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + ex.getMessage() + "\"}"); }) .build()).get()); } protected Cache<Uri, RouteResult> createCache(ActorSystem actorSystem, int initialCapacity, int maxCapacity, int ttlMillis) { final CachingSettings defaultCachingSettings = CachingSettings.create(actorSystem); final LfuCacheSettings lfuCacheSettings = defaultCachingSettings.lfuCacheSettings() .withInitialCapacity(initialCapacity) .withMaxCapacity(maxCapacity) .withTimeToLive(Duration.create(ttlMillis, TimeUnit.MILLISECONDS)); final CachingSettings cachingSettings = defaultCachingSettings.withLfuCacheSettings(lfuCacheSettings); return LfuCache.create(cachingSettings); } }
4,432
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v0/JobClusterRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.actor.ActorSystem; import akka.http.caching.LfuCache; import akka.http.caching.javadsl.Cache; import akka.http.caching.javadsl.CachingSettings; import akka.http.caching.javadsl.LfuCacheSettings; import akka.http.javadsl.model.HttpHeader; import akka.http.javadsl.model.HttpMethods; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.Uri; import akka.http.javadsl.server.ExceptionHandler; import akka.http.javadsl.server.PathMatcher0; import akka.http.javadsl.server.PathMatchers; import akka.http.javadsl.server.RequestContext; import akka.http.javadsl.server.Route; import akka.http.javadsl.server.RouteResult; import akka.http.javadsl.unmarshalling.StringUnmarshallers; import akka.http.javadsl.unmarshalling.Unmarshaller; import akka.japi.JavaPartialFunction; import akka.japi.Pair; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.StringArraySerializer; import io.mantisrx.shaded.com.google.common.base.Strings; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.runtime.MantisJobDefinition; import io.mantisrx.runtime.NamedJobDefinition; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.duration.Duration; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; import static akka.http.javadsl.server.PathMatchers.segment; import static akka.http.javadsl.server.directives.CachingDirectives.alwaysCache; import static io.mantisrx.master.api.akka.route.utils.JobRouteUtils.createListJobIdsRequest; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_CONFLICT; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobClustersRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest; public class JobClusterRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(JobClusterRoute.class); private final JobClusterRouteHandler jobClusterRouteHandler; private final JobRouteHandler jobRouteHandler; private final Cache<Uri, RouteResult> cache; private final JavaPartialFunction<RequestContext, Uri> requestUriKeyer = new JavaPartialFunction<RequestContext, Uri>() { public Uri apply(RequestContext in, boolean isCheck) { final HttpRequest request = in.getRequest(); final boolean isGet = request.method() == HttpMethods.GET; if (isGet) { return request.getUri(); } else { throw noMatch(); } } }; private final Metrics metrics; private final Counter jobClusterSubmit; private final Counter jobClusterSubmitError; private final Counter jobClusterCreate; private final Counter jobClusterCreateError; private final Counter jobClusterCreateUpdate; private final Counter jobClusterCreateUpdateError; private final Counter jobClusterDelete; private final Counter jobClusterDeleteError; private final Counter jobClusterDisable; private final Counter jobClusterDisableError; private final Counter jobClusterEnable; private final Counter jobClusterEnableError; private final Counter jobClusterQuickupdate; private final Counter jobClusterQuickupdateError; private final Counter jobClusterUpdateLabel; private final Counter jobClusterUpdateSla; private final Counter jobClusterUpdateSlaError; private final Counter jobClusterUpdateLabelError; private final Counter jobClusterListGET; private final Counter jobClusterListJobIdGET; private final Counter jobClusterListClusterGET; public JobClusterRoute(final JobClusterRouteHandler jobClusterRouteHandler, final JobRouteHandler jobRouteHandler, final ActorSystem actorSystem) { this.jobClusterRouteHandler = jobClusterRouteHandler; this.jobRouteHandler = jobRouteHandler; MasterConfiguration config = ConfigurationProvider.getConfig(); this.cache = createCache(actorSystem, config.getApiCacheMinSize(), config.getApiCacheMaxSize(), config.getApiCacheTtlMilliseconds()); Metrics m = new Metrics.Builder() .id("V0JobClusterRoute") .addCounter("jobClusterSubmit") .addCounter("jobClusterSubmitError") .addCounter("jobClusterCreate") .addCounter("jobClusterCreateError") .addCounter("jobClusterCreateUpdate") .addCounter("jobClusterCreateUpdateError") .addCounter("jobClusterDelete") .addCounter("jobClusterDeleteError") .addCounter("jobClusterDisable") .addCounter("jobClusterDisableError") .addCounter("jobClusterEnable") .addCounter("jobClusterEnableError") .addCounter("jobClusterQuickupdate") .addCounter("jobClusterQuickupdateError") .addCounter("jobClusterUpdateLabel") .addCounter("jobClusterUpdateLabelError") .addCounter("jobClusterListGET") .addCounter("jobClusterListJobIdGET") .addCounter("jobClusterListClusterGET") .addCounter("jobClusterUpdateSla") .addCounter("jobClusterUpdateSlaError") .build(); this.metrics = MetricsRegistry.getInstance().registerAndGet(m); this.jobClusterSubmit = metrics.getCounter("jobClusterSubmit"); this.jobClusterSubmitError = metrics.getCounter("jobClusterSubmitError"); this.jobClusterCreate = metrics.getCounter("jobClusterCreate"); this.jobClusterCreateError = metrics.getCounter("jobClusterCreateError"); this.jobClusterCreateUpdate = metrics.getCounter("jobClusterCreateUpdate"); this.jobClusterCreateUpdateError = metrics.getCounter("jobClusterCreateUpdateError"); this.jobClusterDelete = metrics.getCounter("jobClusterDelete"); this.jobClusterDeleteError = metrics.getCounter("jobClusterDeleteError"); this.jobClusterDisable = metrics.getCounter("jobClusterDisable"); this.jobClusterDisableError = metrics.getCounter("jobClusterDisableError"); this.jobClusterEnable = metrics.getCounter("jobClusterEnable"); this.jobClusterEnableError = metrics.getCounter("jobClusterEnableError"); this.jobClusterQuickupdate = metrics.getCounter("jobClusterQuickupdate"); this.jobClusterQuickupdateError = metrics.getCounter("jobClusterQuickupdateError"); this.jobClusterUpdateLabel = metrics.getCounter("jobClusterUpdateLabel"); this.jobClusterUpdateLabelError = metrics.getCounter("jobClusterUpdateLabelError"); this.jobClusterListGET = metrics.getCounter("jobClusterListGET"); this.jobClusterListJobIdGET = metrics.getCounter("jobClusterListJobIdGET"); this.jobClusterListClusterGET = metrics.getCounter("jobClusterListClusterGET"); this.jobClusterUpdateSla = metrics.getCounter("jobClusterUpdateSla"); this.jobClusterUpdateSlaError = metrics.getCounter("jobClusterUpdateSlaError"); } private Cache<Uri, RouteResult> createCache(ActorSystem actorSystem) { final CachingSettings defaultCachingSettings = CachingSettings.create(actorSystem); final LfuCacheSettings lfuCacheSettings = defaultCachingSettings.lfuCacheSettings() .withInitialCapacity(5) .withMaxCapacity(50) .withTimeToLive(Duration.create(1, TimeUnit.SECONDS)); final CachingSettings cachingSettings = defaultCachingSettings.withLfuCacheSettings(lfuCacheSettings); // Created outside the route to potentially allow using // the same cache across multiple calls final Cache<Uri, RouteResult> jobClustersListCache = LfuCache.create(cachingSettings); return jobClustersListCache; } private static final PathMatcher0 API_V0_JOBCLUSTER = segment("api").slash("namedjob"); private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER = HttpHeader.parse("Access-Control-Allow-Origin", "*"); private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList( ACCESS_CONTROL_ALLOW_ORIGIN_HEADER); /** * Route that returns * - a list of Job Ids only if 'jobIdsOnly' query param is set * - a list of JobIdInfo objects otherwise * The above lists are filtered and returned based on other criteria specified in the List request * like stageNumber, workerIndex, workerNumber, matchingLabels, regex, activeOnly, jobState, workerState, limit * * @param jobCluster the regex to match against Job IDs to return in response * @return Route job list route */ private Route jobClusterListRoute(final String jobCluster) { return parameterOptional(StringUnmarshallers.BOOLEAN, "jobIdsOnly", (jobIdsOnly) -> parameterMultiMap(params -> { if (jobIdsOnly.isPresent() && jobIdsOnly.get()) { logger.debug("/api/namedjob/listJobIds jobIdsOnly called"); return alwaysCache(cache, requestUriKeyer, () -> extractUri(uri -> completeAsync( jobRouteHandler.listJobIds(createListJobIdsRequest(params, (Strings.isNullOrEmpty(jobCluster)) ? Optional.empty() : Optional.of("^" + jobCluster + "$"), true)), resp -> completeOK( resp.getJobIds().stream() .map(jobId -> jobId.getJobId()) .collect(Collectors.toList()), Jackson.marshaller()) ) ) ); } logger.debug("/api/namedjob/listJobIds/{} called", jobCluster); return alwaysCache(cache, requestUriKeyer, () -> extractUri(uri -> { return completeAsync( jobRouteHandler.listJobIds(createListJobIdsRequest(params, (Strings.isNullOrEmpty(jobCluster)) ? Optional.empty() : Optional.of("^" + jobCluster + "$"), false)), resp -> completeOK( resp.getJobIds(), Jackson.marshaller()), resp -> completeOK(Collections.emptyList(), Jackson.marshaller()) ); }) ); }) ); } /** * @return true to indicate valid, false otherwise. The String holds the error message when the request is invalid */ private Pair<Boolean, String> validateSubmitJobRequest(MantisJobDefinition mjd) { if (mjd.getName() == null || mjd.getName().length() == 0) { logger.info("rejecting job submit request, must include name {}", mjd); return Pair.apply(false, "Job definition must include name"); } SchedulingInfo schedulingInfo = mjd.getSchedulingInfo(); if (schedulingInfo != null) { Map<Integer, StageSchedulingInfo> stages = schedulingInfo.getStages(); if (stages == null) { return Pair.apply(true, ""); } for (StageSchedulingInfo stageSchedInfo : stages.values()) { double cpuCores = stageSchedInfo.getMachineDefinition().getCpuCores(); int maxCpuCores = ConfigurationProvider.getConfig().getWorkerMachineDefinitionMaxCpuCores(); if (cpuCores > maxCpuCores) { logger.info("rejecting job submit request, requested CPU {} > max for {} (user: {}) (stage: {})", cpuCores, mjd.getName(), mjd.getUser(), stages); return Pair.apply(false, "requested CPU cannot be more than max CPU per worker "+maxCpuCores); } double memoryMB = stageSchedInfo.getMachineDefinition().getMemoryMB(); int maxMemoryMB = ConfigurationProvider.getConfig().getWorkerMachineDefinitionMaxMemoryMB(); if (memoryMB > maxMemoryMB) { logger.info("rejecting job submit request, requested memory {} > max for {} (user: {}) (stage: {})", memoryMB, mjd.getName(), mjd.getUser(), stages); return Pair.apply(false, "requested memory cannot be more than max memoryMB per worker "+maxMemoryMB); } double networkMbps = stageSchedInfo.getMachineDefinition().getNetworkMbps(); int maxNetworkMbps = ConfigurationProvider.getConfig().getWorkerMachineDefinitionMaxNetworkMbps(); if (networkMbps > maxNetworkMbps) { logger.info("rejecting job submit request, requested network {} > max for {} (user: {}) (stage: {})", networkMbps, mjd.getName(), mjd.getUser(), stages); return Pair.apply(false, "requested network cannot be more than max networkMbps per worker "+maxNetworkMbps); } int numberOfInstances = stageSchedInfo.getNumberOfInstances(); int maxWorkersPerStage = ConfigurationProvider.getConfig().getMaxWorkersPerStage(); if (numberOfInstances > maxWorkersPerStage) { logger.info("rejecting job submit request, requested num instances {} > max for {} (user: {}) (stage: {})", numberOfInstances, mjd.getName(), mjd.getUser(), stages); return Pair.apply(false, "requested number of instances per stage cannot be more than " + maxWorkersPerStage); } StageScalingPolicy scalingPolicy = stageSchedInfo.getScalingPolicy(); if (scalingPolicy != null) { if (scalingPolicy.getMax() > maxWorkersPerStage) { logger.info("rejecting job submit request, requested num instances in scaling policy {} > max for {} (user: {}) (stage: {})", numberOfInstances, mjd.getName(), mjd.getUser(), stages); return Pair.apply(false, "requested number of instances per stage in scaling policy cannot be more than " + maxWorkersPerStage); } } } } return Pair.apply(true, ""); } private Route getJobClusterRoutes() { return route( path(segment("api").slash("submit"), () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), request -> { logger.debug("/api/submit called {}", request); try { MantisJobDefinition mjd = Jackson.fromJSON(request, MantisJobDefinition.class); logger.debug("job submit request {}", mjd); mjd.validate(true); Pair<Boolean, String> validationResult = validateSubmitJobRequest(mjd); if (!validationResult.first()) { jobClusterSubmitError.increment(); return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"" + validationResult.second() + "\"}"); } jobClusterSubmit.increment(); return completeWithFuture( jobClusterRouteHandler.submit(JobClusterProtoAdapter.toSubmitJobClusterRequest(mjd)) .thenApply(this::toHttpResponse)); } catch (Exception e) { logger.warn("exception in submit job request {}", request, e); jobClusterSubmitError.increment(); return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \""+e.getMessage()+ "\"}"); } }) ) ), pathPrefix(API_V0_JOBCLUSTER, () -> route( post(() -> route( path("create", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), jobClusterDefn -> { logger.debug("/api/namedjob/create called {}", jobClusterDefn); try { final NamedJobDefinition namedJobDefinition = Jackson.fromJSON(jobClusterDefn, NamedJobDefinition.class); if (namedJobDefinition == null || namedJobDefinition.getJobDefinition() == null || namedJobDefinition.getJobDefinition().getJobJarFileLocation() == null || namedJobDefinition.getJobDefinition().getName() == null || namedJobDefinition.getJobDefinition().getName().isEmpty()) { logger.warn("JobCluster create request must include name and URL {}", jobClusterDefn); return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"Job definition must include name and URL\"}"); } final CompletionStage<CreateJobClusterResponse> response = jobClusterRouteHandler.create(JobClusterProtoAdapter.toCreateJobClusterRequest(namedJobDefinition)); jobClusterCreate.increment(); return completeWithFuture(response .thenApply(r -> { if ((r.responseCode == CLIENT_ERROR || r.responseCode == CLIENT_ERROR_CONFLICT) && r.message.contains("already exists")) { return new CreateJobClusterResponse(r.requestId, SERVER_ERROR, r.message, r.getJobClusterName()); } return r; }) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error creating JobCluster {}", jobClusterDefn, e); jobClusterCreateError.increment(); return complete(StatusCodes.BAD_REQUEST, "Can't read valid json in request: "+e.getMessage()); } catch (Exception e) { logger.warn("Error creating JobCluster {}", jobClusterDefn, e); jobClusterCreateError.increment(); return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": "+e.getMessage()+"}"); } }) ) ), path("update", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), jobClusterDefn -> { logger.debug("/api/namedjob/update called {}", jobClusterDefn); try { final NamedJobDefinition namedJobDefinition = Jackson.fromJSON(jobClusterDefn, NamedJobDefinition.class); if (namedJobDefinition == null || namedJobDefinition.getJobDefinition() == null || namedJobDefinition.getJobDefinition().getJobJarFileLocation() == null || namedJobDefinition.getJobDefinition().getName() == null || namedJobDefinition.getJobDefinition().getName().isEmpty()) { logger.warn("JobCluster update request must include name and URL {}", jobClusterDefn); jobClusterCreateUpdateError.increment(); return complete(StatusCodes.BAD_REQUEST, "{\"error\": \"Job definition must include name and URL\"}"); } final CompletionStage<UpdateJobClusterResponse> response = jobClusterRouteHandler.update(JobClusterProtoAdapter.toUpdateJobClusterRequest(namedJobDefinition)); jobClusterCreateUpdate.increment(); return completeWithFuture(response.thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error updating JobCluster {}", jobClusterDefn, e); jobClusterCreateUpdateError.increment(); return complete(StatusCodes.BAD_REQUEST, "Can't read valid json in request: "+e.getMessage()); } catch (Exception e) { logger.warn("Error updating JobCluster {}", jobClusterDefn, e); jobClusterCreateUpdateError.increment(); return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": "+e.getMessage()+"}"); } }) ) ), path("delete", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), deleteReq -> { logger.debug("/api/namedjob/delete called {}", deleteReq); try { final DeleteJobClusterRequest deleteJobClusterRequest = Jackson.fromJSON(deleteReq, DeleteJobClusterRequest.class); final CompletionStage<DeleteJobClusterResponse> response = jobClusterRouteHandler.delete(deleteJobClusterRequest); jobClusterDelete.increment(); return completeWithFuture(response.thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error deleting JobCluster {}", deleteReq, e); jobClusterDeleteError.increment(); return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage()); } }) ) ), path("disable", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), request -> { logger.debug("/api/namedjob/disable called {}", request); try { final DisableJobClusterRequest disableJobClusterRequest = Jackson.fromJSON(request, DisableJobClusterRequest.class); final CompletionStage<DisableJobClusterResponse> response = jobClusterRouteHandler.disable(disableJobClusterRequest); jobClusterDisable.increment(); return completeWithFuture(response.thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error disabling JobCluster {}", request, e); jobClusterDisableError.increment(); return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage()); } }) ) ), path("enable", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), request -> { logger.debug("/api/namedjob/enable called {}", request); try { final EnableJobClusterRequest enableJobClusterRequest = Jackson.fromJSON(request, EnableJobClusterRequest.class); final CompletionStage<EnableJobClusterResponse> response = jobClusterRouteHandler.enable(enableJobClusterRequest); jobClusterEnable.increment(); return completeWithFuture(response.thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error enabling JobCluster {}", request, e); jobClusterEnableError.increment(); return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage()); } }) ) ), path("quickupdate", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), request -> { logger.debug("/api/namedjob/quickupdate called {}", request); try { final UpdateJobClusterArtifactRequest updateJobClusterArtifactRequest = Jackson.fromJSON(request, UpdateJobClusterArtifactRequest.class); final CompletionStage<UpdateJobClusterArtifactResponse> response = jobClusterRouteHandler.updateArtifact(updateJobClusterArtifactRequest); jobClusterQuickupdate.increment(); return completeWithFuture(response.thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error on quickupdate for JobCluster {}", request, e); jobClusterQuickupdateError.increment(); return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage()); } }) ) ), path("updatelabels", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), request -> { logger.debug("/api/namedjob/updatelabels called {}", request); try { final UpdateJobClusterLabelsRequest updateJobClusterLabelsRequest = Jackson.fromJSON(request, UpdateJobClusterLabelsRequest.class); jobClusterUpdateLabel.increment(); return completeWithFuture(jobClusterRouteHandler.updateLabels(updateJobClusterLabelsRequest) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error updating labels for JobCluster {}", request, e); jobClusterUpdateLabelError.increment(); return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage()); } }) ) ), path("updatesla", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), request -> { logger.debug("/api/namedjob/updatesla called {}", request); jobClusterUpdateSla.increment(); try { final UpdateJobClusterSLARequest updateJobClusterSLARequest = Jackson.fromJSON(request, UpdateJobClusterSLARequest.class); return completeWithFuture(jobClusterRouteHandler.updateSLA(updateJobClusterSLARequest) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error updating SLA for JobCluster {}", request, e); jobClusterUpdateSlaError.increment(); return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage()); } }) ) ), path("migratestrategy", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), request -> { logger.debug("/api/namedjob/migratestrategy called {}", request); try { final UpdateJobClusterWorkerMigrationStrategyRequest updateMigrateStrategyReq = Jackson.fromJSON(request, UpdateJobClusterWorkerMigrationStrategyRequest.class); return completeWithFuture(jobClusterRouteHandler.updateWorkerMigrateStrategy(updateMigrateStrategyReq) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error updating migrate strategy for JobCluster {}", request, e); return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage()); } }) ) ), path("quicksubmit", () -> decodeRequest(() -> entity(Unmarshaller.entityToString(), request -> { logger.debug("/api/namedjob/quicksubmit called {}", request); try { final JobClusterManagerProto.SubmitJobRequest submitJobRequest = Jackson.fromJSON(request, JobClusterManagerProto.SubmitJobRequest.class); return completeWithFuture(jobClusterRouteHandler.submit(submitJobRequest) .thenApply(this::toHttpResponse)); } catch (IOException e) { logger.warn("Error on quick submit for JobCluster {}", request, e); return complete(StatusCodes.BAD_REQUEST, "Can't find valid json in request: " + e.getMessage()); } }) ) ) )), get(() -> route( pathPrefix("list", () -> route( pathEndOrSingleSlash(() -> { logger.debug("/api/namedjob/list called"); jobClusterListGET.increment(); return alwaysCache(cache, requestUriKeyer, () -> extractUri(uri -> completeAsync( jobClusterRouteHandler.getAllJobClusters(new ListJobClustersRequest()), resp -> completeOK( resp.getJobClusters() .stream() .map(jobClusterMetadataView -> JobClusterProtoAdapter.toJobClusterInfo(jobClusterMetadataView)) .collect(Collectors.toList()) , Jackson.marshaller()), resp -> completeOK(Collections.emptyList(), Jackson.marshaller())))); }), path(PathMatchers.segment(), (jobCluster) -> { if (logger.isDebugEnabled()) { logger.debug("/api/namedjob/list/{} called", jobCluster); } jobClusterListClusterGET.increment(); return completeAsync( jobClusterRouteHandler.getJobClusterDetails(new JobClusterManagerProto.GetJobClusterRequest(jobCluster)), resp -> completeOK( resp.getJobCluster().map(jc -> Arrays.asList(jc)).orElse(Collections.emptyList()), Jackson.marshaller()), resp -> completeOK(Collections.emptyList(), Jackson.marshaller()) ); }) )), path(segment("listJobIds").slash(PathMatchers.segment()), (jobCluster) -> { logger.debug("/api/namedjob/listJobIds/{} called", jobCluster); jobClusterListJobIdGET.increment(); return jobClusterListRoute(jobCluster); }), path("listJobIds", () -> { logger.debug("/api/namedjob/listJobIds called"); return complete(StatusCodes.BAD_REQUEST, "Specify the Job cluster name '/api/namedjob/listJobIds/<JobClusterName>' to list the job Ids"); }) ))) )); } public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating routes"); final ExceptionHandler genericExceptionHandler = ExceptionHandler.newBuilder() .match(Exception.class, e -> { logger.error("got exception", e); return complete(StatusCodes.INTERNAL_SERVER_ERROR, "{\"error\": \"" + e.getMessage() + "\"}"); }) .build(); return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(genericExceptionHandler, () -> routeFilter.apply(getJobClusterRoutes()))); } }
4,433
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/v0/JobStatusRoute.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.NotUsed; import akka.http.javadsl.model.HttpHeader; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.ws.Message; import akka.http.javadsl.server.ExceptionHandler; import akka.http.javadsl.server.PathMatchers; import akka.http.javadsl.server.Route; import akka.stream.javadsl.Flow; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Arrays; import java.util.function.Function; import static akka.http.javadsl.server.PathMatchers.segment; public class JobStatusRoute extends BaseRoute { private static final Logger logger = LoggerFactory.getLogger(JobStatusRoute.class); private final JobStatusRouteHandler jobStatusRouteHandler; public JobStatusRoute(final JobStatusRouteHandler jobStatusRouteHandler) { this.jobStatusRouteHandler = jobStatusRouteHandler; } private static final HttpHeader ACCESS_CONTROL_ALLOW_ORIGIN_HEADER = HttpHeader.parse("Access-Control-Allow-Origin", "*"); private static final Iterable<HttpHeader> DEFAULT_RESPONSE_HEADERS = Arrays.asList( ACCESS_CONTROL_ALLOW_ORIGIN_HEADER); private Route getJobStatusRoutes() { return route( get(() -> route( path(segment("job").slash("status").slash(PathMatchers.segment()), (jobId) -> { logger.info("/job/status/{} called", jobId); Flow<Message, Message, NotUsed> webSocketFlow = jobStatusRouteHandler.jobStatus(jobId); return handleWebSocketMessages(webSocketFlow); }) )) ); } public Route createRoute(Function<Route, Route> routeFilter) { logger.info("creating routes"); final ExceptionHandler jsonExceptionHandler = ExceptionHandler.newBuilder() .match(IOException.class, x -> { logger.error("got exception", x); return complete(StatusCodes.BAD_REQUEST, "caught exception " + x.getMessage()); }) .build(); return respondWithHeaders(DEFAULT_RESPONSE_HEADERS, () -> handleExceptions(jsonExceptionHandler, () -> routeFilter.apply(getJobStatusRoutes()))); } }
4,434
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/proto/JobClusterInfo.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.proto; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; public class JobClusterInfo { private final String name; private final String jobId; @JsonCreator @JsonIgnoreProperties(ignoreUnknown=true) public JobClusterInfo(@JsonProperty("name") String name, @JsonProperty("jobId") String jobId) { this.name = name; this.jobId = jobId; } public String getName() { return name; } public String getJobId() { return jobId; } }
4,435
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/proto/JobClusterProtoAdapter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.proto; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest; import io.mantisrx.runtime.MantisJobDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.NamedJobDefinition; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.server.master.domain.*; import io.mantisrx.server.master.http.api.CompactJobInfo; import io.mantisrx.server.master.http.api.JobClusterInfo; import io.mantisrx.server.master.store.MantisJobMetadata; import io.mantisrx.server.master.store.MantisStageMetadata; import io.mantisrx.server.master.store.MantisWorkerMetadata; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; public class JobClusterProtoAdapter { // explicit private constructor to prohibit instantiation private JobClusterProtoAdapter() {} public static final CreateJobClusterRequest toCreateJobClusterRequest(final NamedJobDefinition njd) { MantisJobDefinition jd = njd.getJobDefinition(); final CreateJobClusterRequest request = new CreateJobClusterRequest(new JobClusterDefinitionImpl( jd.getName(), Arrays.asList(new JobClusterConfig(jd.getJobJarFileLocation().toString(), System.currentTimeMillis(), jd.getVersion(), jd.getSchedulingInfo() )), njd.getOwner(), jd.getUser(), new SLA(jd.getSlaMin(), jd.getSlaMax(), jd.getCronSpec(), jd.getCronPolicy() == NamedJobDefinition.CronPolicy.KEEP_EXISTING ? IJobClusterDefinition.CronPolicy.KEEP_EXISTING : IJobClusterDefinition.CronPolicy.KEEP_NEW), jd.getMigrationConfig(), jd.getIsReadyForJobMaster(), jd.getParameters(), jd.getLabels() ) , "user" ); return request; } // public static final JobSla toJobSla(final io.mantisrx.master.core.proto.JobSla protoSla) { // return new JobSla(protoSla.getRuntimeLimitSecs(), // protoSla.getMinRuntimeSecs(), // JobSla.StreamSLAType.valueOf(protoSla.getSlaType().name()), // MantisJobDurationType.valueOf(protoSla.getDurationType().name()), // protoSla.getUserProvidedType()); // } // // public static final MachineDefinition toMachineDefinition(final io.mantisrx.master.core.proto.MachineDefinition md) { // return new MachineDefinition(md.getCpuCores(), // md.getMemoryMB(), md.getNetworkMbps(), md.getDiskMB(), md.getNumPorts()); // } // public static final StageScalingPolicy.Strategy toStageScalingStrategy(final io.mantisrx.master.core.proto.StageScalingPolicy.Strategy s) { // return new StageScalingPolicy.Strategy( // StageScalingPolicy.ScalingReason.valueOf(s.getReason().name()), // s.getScaleDownBelowPct(), // s.getScaleUpAbovePct(), // s.hasRollingCount() ? // new StageScalingPolicy.RollingCount( // s.getRollingCount().getCount(), // s.getRollingCount().getOf()) : // null // ); // } // public static final StageScalingPolicy toStageScalingPolicy(final io.mantisrx.master.core.proto.StageScalingPolicy p) { // return new StageScalingPolicy( // p.getStage(), // p.getMin(), // p.getMax(), // p.getIncrement(), // p.getDecrement(), // p.getCoolDownSecs(), // p.getStrategiesMap().entrySet().stream().collect( // Collectors.toMap( // e -> StageScalingPolicy.ScalingReason.valueOf(e.getKey()), // e -> toStageScalingStrategy(e.getValue()) // ) // ) // ); // } // // private static final StageSchedulingInfo toStageSchedulingInfo(final io.mantisrx.master.core.proto.SchedulingInfo.StageSchedulingInfo s) { // return new StageSchedulingInfo( // s.getNumberOfInstances(), // toMachineDefinition(s.getMachineDefinition()), // s.getHardConstraintsList().stream().map(c -> JobConstraints.valueOf(c.name())).collect(Collectors.toList()), // s.getSoftConstraintsList().stream().map(c -> JobConstraints.valueOf(c.name())).collect(Collectors.toList()), // s.hasScalingPolicy() ? toStageScalingPolicy(s.getScalingPolicy()) : null, // s.getScalable() // ); // } // private static final SchedulingInfo toSchedulingInfo(final io.mantisrx.master.core.proto.SchedulingInfo s) { // // return new SchedulingInfo( // s.getStagesMap().entrySet().stream() // .collect(Collectors.toMap(e -> e.getKey(), // e -> toStageSchedulingInfo(e.getValue()))) // ); // } // private static final WorkerMigrationConfig toMigrationConfig(final io.mantisrx.master.core.proto.WorkerMigrationConfig cfg) { // return new WorkerMigrationConfig( // WorkerMigrationConfig.MigrationStrategyEnum.valueOf(cfg.getStrategy().name()), // cfg.getConfigString() // ); // } // public static final MantisJobDefinition toMantisJobDefinition(final JobSubmitRequest jsr) throws MalformedURLException { // // return new MantisJobDefinition(jsr.getName(), // jsr.getUser(), // new URL(jsr.getUrl()), // jsr.getVersion(), // jsr.getParametersList().stream().map(p -> new Parameter(p.getName(), p.getValue())).collect(Collectors.toList()), // jsr.hasJobSla() ? toJobSla(jsr.getJobSla()) : null, // jsr.getSubscriptionTimeoutSecs(), // jsr.hasSchedulingInfo() ? toSchedulingInfo(jsr.getSchedulingInfo()) : null, // jsr.getSlaMin(), // jsr.getSlaMax(), // jsr.getCronSpec(), // NamedJobDefinition.CronPolicy.valueOf(jsr.getCronPolicy().name()), // true, // jsr.hasMigrationConfig() ? toMigrationConfig(jsr.getMigrationConfig()) : WorkerMigrationConfig.DEFAULT, // jsr.getLabelsList().stream().map(l -> new Label(l.getName(), l.getValue())).collect(Collectors.toList()) // ); // } public static final UpdateJobClusterRequest toUpdateJobClusterRequest(final NamedJobDefinition njd) { MantisJobDefinition jd = njd.getJobDefinition(); final UpdateJobClusterRequest request = new UpdateJobClusterRequest(new JobClusterDefinitionImpl( jd.getName(), Arrays.asList(new JobClusterConfig(jd.getJobJarFileLocation().toString(), System.currentTimeMillis(), jd.getVersion(), jd.getSchedulingInfo() )), njd.getOwner(), jd.getUser(), new SLA(jd.getSlaMin(), jd.getSlaMax(), jd.getCronSpec(), jd.getCronPolicy() == NamedJobDefinition.CronPolicy.KEEP_EXISTING ? IJobClusterDefinition.CronPolicy.KEEP_EXISTING : IJobClusterDefinition.CronPolicy.KEEP_NEW), jd.getMigrationConfig(), jd.getIsReadyForJobMaster(), jd.getParameters(), jd.getLabels() ), "user"); return request; } // public static final JobClusterManagerProto.SubmitJobRequest toSubmitJobClusterRequest(final SubmitJobRequest jd) // throws InvalidJobException { // // final JobClusterManagerProto.SubmitJobRequest request = new JobClusterManagerProto.SubmitJobRequest( // jd.getName(), // jd.getUser(), // Optional.of( // new JobDefinition( // jd.getName(), // jd.getUser(), // (DataFormatAdapter.extractArtifactName(jd.getJobJarFileLocation())).orElse(""), // jd.getVersion(), // jd.getParametersList().stream().map(p -> new Parameter(p.getName(), p.getValue())).collect(Collectors.toList()), // jd.hasJobSla() ? toJobSla(jd.getJobSla()) : null, // jd.getSubscriptionTimeoutSecs(), // jd.hasSchedulingInfo() ? toSchedulingInfo(jd.getSchedulingInfo()) : null, // jd.getSchedulingInfo() == null ? -1 : jd.getSchedulingInfo().getStagesMap().size(), // jd.getLabelsList().stream().map(l -> new Label(l.getName(), l.getValue())).collect(Collectors.toList())) // )); // // return request; // } public static final JobClusterManagerProto.SubmitJobRequest toSubmitJobClusterRequest(final MantisJobDefinition jd) throws InvalidJobException { final JobClusterManagerProto.SubmitJobRequest request = new JobClusterManagerProto.SubmitJobRequest( jd.getName(), jd.getUser(), Optional.of( new JobDefinition( jd.getName(), jd.getUser(), (DataFormatAdapter.extractArtifactName(jd.getJobJarFileLocation())).orElse(""), jd.getVersion(), jd.getParameters(), jd.getJobSla(), jd.getSubscriptionTimeoutSecs(), jd.getSchedulingInfo(), jd.getSchedulingInfo() == null ? -1 : jd.getSchedulingInfo().getStages().size(), jd.getLabels()) )); return request; } public static JobClusterInfo toJobClusterInfo(MantisJobClusterMetadataView jobClusterMetadataView) { List<JobClusterInfo.JarInfo> jarInfoList = DataFormatAdapter.convertNamedJobJarListToJarInfoList(jobClusterMetadataView.getJars()); JobClusterInfo jobClusterInfo = new JobClusterInfo(jobClusterMetadataView.getName(), jobClusterMetadataView.getSla(), jobClusterMetadataView.getOwner(), jobClusterMetadataView.isDisabled(), jobClusterMetadataView.isCronActive(), jarInfoList, jobClusterMetadataView.getParameters(), jobClusterMetadataView.getLabels()); return jobClusterInfo; } public static class JobIdInfo { private final String jobId; private final String version; private final MantisJobState state; private final String submittedAt; private final String terminatedAt; private final String user; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JobIdInfo(@JsonProperty("jobId") String jobId, @JsonProperty("version") String version, @JsonProperty("state") MantisJobState state, @JsonProperty("submittedAt") String submittedAt, @JsonProperty("terminatedAt") String terminatedAt, @JsonProperty("user") String user) { this.jobId = jobId; this.version = version; this.state = state; this.submittedAt = submittedAt; this.terminatedAt = terminatedAt; this.user = user; } public String getJobId() { return jobId; } public String getVersion() { return version; } public MantisJobState getState() { return state; } public String getSubmittedAt() { return submittedAt; } public String getTerminatedAt() { return terminatedAt; } public String getUser() { return user; } @Override public String toString() { return "JobIdInfo{" + "jobId='" + jobId + '\'' + ", version='" + version + '\'' + ", state=" + state + ", submittedAt='" + submittedAt + '\'' + ", terminatedAt='" + terminatedAt + '\'' + ", user='" + user + '\'' + '}'; } public static class Builder { private String jobId; private String version; private MantisJobState state; private String submittedAt = ""; private String terminatedAt = ""; private String user = ""; public Builder() { } public Builder withJobIdStr(String jobId) { this.jobId = jobId; return this; } public Builder withJobId(JobId jId) { jobId = jId.getId(); return this; } public Builder withJobState(JobState st) { state = toJobState(st); return this; } public Builder withVersion(String version) { this.version = version; return this; } public Builder withSubmittedAt(long time) { submittedAt = Long.toString(time); return this; } public Builder withTerminatedAt(long time) { if(time != -1) { terminatedAt = Long.toString(time); } return this; } public Builder withUser(String user) { this.user = user; return this; } public JobIdInfo build() { return new JobIdInfo(jobId,version,state,submittedAt,terminatedAt,user); } } } private static MantisJobState toJobState(final JobState state) { switch (state) { case Accepted: return MantisJobState.Accepted; case Launched: return MantisJobState.Launched; case Terminating_normal: case Completed: return MantisJobState.Completed; case Terminating_abnormal: case Failed: return MantisJobState.Failed; case Noop: return MantisJobState.Noop; default: throw new IllegalArgumentException("cannot translate JobState to MantisJobState " + state); } } public static final JobIdInfo toJobIdInfo(final MantisJobMetadataView view) { MantisJobMetadata jm = view.getJobMetadata(); return new JobIdInfo( jm.getJobId(), view.getVersion(), jm.getState(), String.valueOf(jm.getSubmittedAt()), view.getTerminatedAt(), jm.getUser() ); } public static final CompactJobInfo toCompactJobInfo(final MantisJobMetadataView view) { MantisJobMetadata jm = view.getJobMetadata(); int workers=0; double totCPUs = 0.0; double totMem = 0.0; Map<String, Integer> stSmry = new HashMap<>(); for (MantisStageMetadata s: view.getStageMetadataList()) { workers += s.getNumWorkers(); totCPUs += s.getNumWorkers() * s.getMachineDefinition().getCpuCores(); totMem += s.getNumWorkers() * s.getMachineDefinition().getMemoryMB(); } for (MantisWorkerMetadata w: view.getWorkerMetadataList()) { final Integer prevVal = stSmry.get(w.getState() + ""); if (prevVal == null) { stSmry.put(w.getState() + "", 1); } else { stSmry.put(w.getState() + "", prevVal + 1); } } return new CompactJobInfo( jm.getJobId(), (jm.getJarUrl() != null) ? jm.getJarUrl().toString() : "", jm.getSubmittedAt(), jm.getUser(), jm.getState(), jm.getSla() != null ? jm.getSla().getDurationType() : MantisJobDurationType.Transient, jm.getNumStages(), workers, totCPUs, totMem, stSmry, jm.getLabels() ); } }
4,436
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/proto/JobStatus.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.proto; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.server.core.Status; import java.util.Objects; public class JobStatus { private final Status status; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JobStatus(@JsonProperty("status") final Status status) { this.status = status; } public Status getStatus() { return status; } @Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final JobStatus jobStatus = (JobStatus) o; return Objects.equals(status, jobStatus.status); } @Override public int hashCode() { return Objects.hash(status); } @Override public String toString() { return "JobStatus{" + "status=" + status + '}'; } }
4,437
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/proto/JobDiscoveryRouteProto.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.proto; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.server.core.JobSchedulingInfo; import rx.Observable; import java.util.Optional; public class JobDiscoveryRouteProto { public static class SchedInfoResponse extends BaseResponse { private final Optional<Observable<JobSchedulingInfo>> schedInfoStream; public SchedInfoResponse(final long requestId, final ResponseCode responseCode, final String message, final Observable<JobSchedulingInfo> schedInfoStream) { super(requestId, responseCode, message); this.schedInfoStream = Optional.ofNullable(schedInfoStream); } public SchedInfoResponse(final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); this.schedInfoStream = Optional.empty(); } public Optional<Observable<JobSchedulingInfo>> getSchedInfoStream() { return schedInfoStream; } } public static class JobClusterInfoResponse extends BaseResponse { private final Optional<Observable<JobClusterInfo>> jobClusterInfoObs; public JobClusterInfoResponse(final long requestId, final ResponseCode responseCode, final String message, final Observable<JobClusterInfo> jobClusterInfoObservable) { super(requestId, responseCode, message); this.jobClusterInfoObs = Optional.ofNullable(jobClusterInfoObservable); } public JobClusterInfoResponse(final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); this.jobClusterInfoObs = Optional.empty(); } public Optional<Observable<JobClusterInfo>> getJobClusterInfoObs() { return jobClusterInfoObs; } } }
4,438
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/utils/JobRouteUtils.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.utils; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.job.worker.WorkerStatus; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.core.PostJobStatusRequest; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.scheduler.WorkerEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.Map; import java.util.Optional; import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValue; import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValueAsBool; import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValueAsInt; import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValuesAsInt; import static io.mantisrx.master.api.akka.route.utils.QueryParamUtils.paramValuesAsMetaState; public class JobRouteUtils { private static final Logger logger = LoggerFactory.getLogger(JobRouteUtils.class); public static final String QUERY_PARAM_LIMIT = "limit"; public static final String QUERY_PARAM_JOB_STATE = "jobState"; public static final String QUERY_PARAM_STAGE_NUM = "stageNumber"; public static final String QUERY_PARAM_WORKER_INDEX = "workerIndex"; public static final String QUERY_PARAM_WORKER_NUM = "workerNumber"; public static final String QUERY_PARAM_WORKER_STATE = "workerState"; public static final String QUERY_PARAM_ACTIVE_ONLY = "activeOnly"; public static final String QUERY_PARAM_LABELS_QUERY = "labels"; public static final String QUERY_PARAM_LABELS_OPERAND = "labels.op"; public static WorkerEvent createWorkerStatusRequest(final PostJobStatusRequest req) { final Status status = req.getStatus(); if (status.getType() != Status.TYPE.HEARTBEAT) { final WorkerId workerId = new WorkerId(req.getJobId(), status.getWorkerIndex(), status.getWorkerNumber()); if (logger.isTraceEnabled()) { logger.trace("forwarding worker status type {} from worker {}", status.getType().name(), workerId); } return new WorkerStatus(status); } else { return new WorkerHeartbeat(status); } } public static JobClusterManagerProto.ListJobsRequest createListJobsRequest(final Map<String, List<String>> params, final Optional<String> regex, final boolean activeOnlyDefault) { if(params == null) { if (regex.isPresent()) { return new JobClusterManagerProto.ListJobsRequest(regex.get()); } else { return new JobClusterManagerProto.ListJobsRequest(); } } final Optional<Integer> limit = paramValueAsInt(params, QUERY_PARAM_LIMIT); final Optional<JobState.MetaState> jobState = paramValue(params, QUERY_PARAM_JOB_STATE).map(p -> JobState.MetaState.valueOf(p)); final List<Integer> stageNumber = paramValuesAsInt(params, QUERY_PARAM_STAGE_NUM); final List<Integer> workerIndex = paramValuesAsInt(params, QUERY_PARAM_WORKER_INDEX); final List<Integer> workerNumber = paramValuesAsInt(params, QUERY_PARAM_WORKER_NUM); final List<WorkerState.MetaState> workerState = paramValuesAsMetaState(params, QUERY_PARAM_WORKER_STATE); final Optional<Boolean> activeOnly = Optional.of(paramValueAsBool(params, QUERY_PARAM_ACTIVE_ONLY).orElse(activeOnlyDefault)); final Optional<String> labelsQuery = paramValue(params, QUERY_PARAM_LABELS_QUERY); final Optional<String> labelsOperand = paramValue(params, QUERY_PARAM_LABELS_OPERAND); return new JobClusterManagerProto.ListJobsRequest(new JobClusterManagerProto.ListJobCriteria(limit, jobState, stageNumber, workerIndex, workerNumber, workerState, activeOnly, regex, labelsQuery, labelsOperand)); } public static JobClusterManagerProto.ListJobIdsRequest createListJobIdsRequest(final Map<String, List<String>> params, final Optional<String> regex, final boolean activeOnlyDefault) { if(params == null) { return new JobClusterManagerProto.ListJobIdsRequest(); } final Optional<Integer> limit = paramValueAsInt(params, QUERY_PARAM_LIMIT); final Optional<JobState.MetaState> jobState = paramValue(params, QUERY_PARAM_JOB_STATE).map(p -> JobState.MetaState.valueOf(p)); // list job ids is used on job cluster detail page, the UI does not set this flag explicitly but expects to see completed jobs as well final Optional<Boolean> activeOnly = Optional.of(paramValueAsBool(params, QUERY_PARAM_ACTIVE_ONLY).orElse(activeOnlyDefault)); final Optional<String> labelsQuery = paramValue(params, QUERY_PARAM_LABELS_QUERY); final Optional<String> labelsOperand = paramValue(params, QUERY_PARAM_LABELS_OPERAND); return new JobClusterManagerProto.ListJobIdsRequest(limit, jobState, activeOnly, regex, labelsQuery, labelsOperand); } }
4,439
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/utils/JobDiscoveryHeartbeats.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.utils; import io.mantisrx.master.api.akka.route.proto.JobClusterInfo; import io.mantisrx.server.core.JobSchedulingInfo; public class JobDiscoveryHeartbeats { public static final JobClusterInfo JOB_CLUSTER_INFO_HB_INSTANCE = new JobClusterInfo(JobSchedulingInfo.HB_JobId, null); public static final JobSchedulingInfo SCHED_INFO_HB_INSTANCE = new JobSchedulingInfo(JobSchedulingInfo.HB_JobId, null); }
4,440
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/utils/StreamingUtils.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.utils; import akka.http.javadsl.model.sse.ServerSentEvent; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.master.api.akka.route.proto.JobClusterInfo; import io.mantisrx.server.core.JobSchedulingInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Optional; import static io.mantisrx.master.api.akka.route.utils.JobDiscoveryHeartbeats.JOB_CLUSTER_INFO_HB_INSTANCE; import static io.mantisrx.master.api.akka.route.utils.JobDiscoveryHeartbeats.SCHED_INFO_HB_INSTANCE; public class StreamingUtils { private static final Logger logger = LoggerFactory.getLogger(StreamingUtils.class); private static final ObjectMapper mapper = new ObjectMapper().configure( DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); private static volatile Optional<ServerSentEvent> cachedSchedInfoHbEvent = Optional.empty(); private static volatile Optional<ServerSentEvent> cachedJobClusterInfoHbEvent = Optional.empty(); static { try { cachedJobClusterInfoHbEvent = Optional.of(ServerSentEvent.create(mapper.writeValueAsString( JOB_CLUSTER_INFO_HB_INSTANCE))); cachedSchedInfoHbEvent = Optional.of(ServerSentEvent.create(mapper.writeValueAsString( SCHED_INFO_HB_INSTANCE))); } catch (JsonProcessingException e) { logger.error("Failed to cache serialized Heartbeat event", e); } } public static Optional<ServerSentEvent> from(final JobSchedulingInfo jsi) { try { if (jsi.getJobId().equals(JobSchedulingInfo.HB_JobId) && cachedSchedInfoHbEvent.isPresent()) { return cachedSchedInfoHbEvent; } return Optional.ofNullable(ServerSentEvent.create(mapper.writeValueAsString(jsi))); } catch (JsonProcessingException e) { logger.warn("failed to serialize Job Scheduling Info {}", jsi); } return Optional.empty(); } public static Optional<ServerSentEvent> from(final JobClusterInfo jci) { try { if (jci.getName().equals(JobSchedulingInfo.HB_JobId) && cachedJobClusterInfoHbEvent.isPresent()) { return cachedJobClusterInfoHbEvent; } return Optional.ofNullable(ServerSentEvent.create(mapper.writeValueAsString(jci))); } catch (JsonProcessingException e) { logger.warn("failed to serialize Job Cluster Info {}", jci); } return Optional.empty(); } }
4,441
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/utils/QueryParamUtils.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.utils; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; public class QueryParamUtils { private static final Logger logger = LoggerFactory.getLogger(QueryParamUtils.class); public static Optional<String> paramValue(final Map<String, List<String>> params, final String key) { List<String> values = params.get(key); return Optional.ofNullable(values) .filter(vs -> vs.size() > 0) .map(x -> x.get(0)); } public static Optional<Integer> paramValueAsInt(final Map<String, List<String>> params, final String key) { List<String> values = params.get(key); return Optional.ofNullable(values) .filter(vs -> vs.size() > 0) .map(x -> { try { return Integer.parseInt(x.get(0)); } catch (NumberFormatException e) { return null; } }); } public static Optional<Boolean> paramValueAsBool(final Map<String, List<String>> params, final String key) { List<String> values = params.get(key); return Optional.ofNullable(values) .filter(vs -> vs.size() > 0) .map(x -> { try { return Boolean.valueOf(x.get(0)); } catch (NumberFormatException e) { return null; } }); } public static List<Integer> paramValuesAsInt(final Map<String, List<String>> params, final String key) { List<String> values = params.get(key); if (values == null) { return Collections.emptyList(); } else { return values.stream().map(s -> { try { return Integer.parseInt(s); } catch (NumberFormatException e) { return null; } }).collect(Collectors.toList()); } } public static List<WorkerState.MetaState> paramValuesAsMetaState(final Map<String, List<String>> params, final String key) { List<String> values = params.get(key); if (values != null) { return new ArrayList<>(values.stream() .map(s -> WorkerState.MetaState.valueOf(s)) .collect(Collectors.toSet())); } else { return Collections.emptyList(); } } }
4,442
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobClusterRouteHandler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.handlers; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import java.util.concurrent.CompletionStage; public interface JobClusterRouteHandler { CompletionStage<JobClusterManagerProto.CreateJobClusterResponse> create(final JobClusterManagerProto.CreateJobClusterRequest request); CompletionStage<JobClusterManagerProto.UpdateJobClusterResponse> update(final JobClusterManagerProto.UpdateJobClusterRequest request); CompletionStage<JobClusterManagerProto.DeleteJobClusterResponse> delete(final JobClusterManagerProto.DeleteJobClusterRequest request); CompletionStage<JobClusterManagerProto.DisableJobClusterResponse> disable(final JobClusterManagerProto.DisableJobClusterRequest request); CompletionStage<JobClusterManagerProto.EnableJobClusterResponse> enable(final JobClusterManagerProto.EnableJobClusterRequest request); CompletionStage<JobClusterManagerProto.UpdateJobClusterArtifactResponse> updateArtifact(final JobClusterManagerProto.UpdateJobClusterArtifactRequest request); CompletionStage<JobClusterManagerProto.UpdateJobClusterSLAResponse> updateSLA(final JobClusterManagerProto.UpdateJobClusterSLARequest request); CompletionStage<JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse> updateWorkerMigrateStrategy(final JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest request); CompletionStage<JobClusterManagerProto.UpdateJobClusterLabelsResponse> updateLabels(final JobClusterManagerProto.UpdateJobClusterLabelsRequest request); CompletionStage<JobClusterManagerProto.SubmitJobResponse> submit(final JobClusterManagerProto.SubmitJobRequest request); CompletionStage<JobClusterManagerProto.GetJobClusterResponse> getJobClusterDetails(final JobClusterManagerProto.GetJobClusterRequest request); CompletionStage<JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse> getLatestJobDiscoveryInfo(final JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request); CompletionStage<JobClusterManagerProto.ListJobClustersResponse> getAllJobClusters(final JobClusterManagerProto.ListJobClustersRequest request); }
4,443
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobClusterRouteHandlerAkkaImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.handlers; import akka.actor.ActorRef; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.master.config.ConfigurationProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.time.Duration; import java.util.Optional; import java.util.concurrent.CompletionStage; import static akka.pattern.PatternsCS.ask; public class JobClusterRouteHandlerAkkaImpl implements JobClusterRouteHandler { private static final Logger logger = LoggerFactory.getLogger(JobClusterRouteHandlerAkkaImpl.class); private final ActorRef jobClustersManagerActor; private final Counter allJobClustersGET; private final Duration timeout; public JobClusterRouteHandlerAkkaImpl(ActorRef jobClusterManagerActor) { this.jobClustersManagerActor = jobClusterManagerActor; long timeoutMs = Optional.ofNullable(ConfigurationProvider.getConfig().getMasterApiAskTimeoutMs()).orElse(1000L); this.timeout = Duration.ofMillis(timeoutMs); Metrics m = new Metrics.Builder() .id("JobClusterRouteHandler") .addCounter("allJobClustersGET") .build(); Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m); allJobClustersGET = metrics.getCounter("allJobClustersGET"); } @Override public CompletionStage<JobClusterManagerProto.CreateJobClusterResponse> create(final JobClusterManagerProto.CreateJobClusterRequest request) { CompletionStage<JobClusterManagerProto.CreateJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.CreateJobClusterResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.UpdateJobClusterResponse> update(JobClusterManagerProto.UpdateJobClusterRequest request) { CompletionStage<JobClusterManagerProto.UpdateJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.UpdateJobClusterResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.DeleteJobClusterResponse> delete(JobClusterManagerProto.DeleteJobClusterRequest request) { CompletionStage<JobClusterManagerProto.DeleteJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.DeleteJobClusterResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.DisableJobClusterResponse> disable(JobClusterManagerProto.DisableJobClusterRequest request) { CompletionStage<JobClusterManagerProto.DisableJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.DisableJobClusterResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.EnableJobClusterResponse> enable(JobClusterManagerProto.EnableJobClusterRequest request) { CompletionStage<JobClusterManagerProto.EnableJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.EnableJobClusterResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.UpdateJobClusterArtifactResponse> updateArtifact(JobClusterManagerProto.UpdateJobClusterArtifactRequest request) { CompletionStage<JobClusterManagerProto.UpdateJobClusterArtifactResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.UpdateJobClusterArtifactResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.UpdateJobClusterSLAResponse> updateSLA(JobClusterManagerProto.UpdateJobClusterSLARequest request) { CompletionStage<JobClusterManagerProto.UpdateJobClusterSLAResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.UpdateJobClusterSLAResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse> updateWorkerMigrateStrategy(JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest request) { CompletionStage<JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.UpdateJobClusterLabelsResponse> updateLabels(JobClusterManagerProto.UpdateJobClusterLabelsRequest request) { CompletionStage<JobClusterManagerProto.UpdateJobClusterLabelsResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.UpdateJobClusterLabelsResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.SubmitJobResponse> submit(JobClusterManagerProto.SubmitJobRequest request) { CompletionStage<JobClusterManagerProto.SubmitJobResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.SubmitJobResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.GetJobClusterResponse> getJobClusterDetails(JobClusterManagerProto.GetJobClusterRequest request) { CompletionStage<JobClusterManagerProto.GetJobClusterResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.GetJobClusterResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.ListJobClustersResponse> getAllJobClusters(JobClusterManagerProto.ListJobClustersRequest request) { allJobClustersGET.increment(); CompletionStage<JobClusterManagerProto.ListJobClustersResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.ListJobClustersResponse.class::cast); return response; } @Override public CompletionStage<JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse> getLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request) { CompletionStage<JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse> response = ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse.class::cast); return response; } }
4,444
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobStatusRouteHandlerAkkaImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.handlers; import akka.NotUsed; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.http.javadsl.model.ws.Message; import akka.http.scaladsl.model.ws.TextMessage; import akka.stream.OverflowStrategy; import akka.stream.javadsl.Flow; import akka.stream.javadsl.Sink; import akka.stream.javadsl.Source; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.api.akka.route.proto.JobStatus; import io.mantisrx.master.events.JobStatusConnectedWSActor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class JobStatusRouteHandlerAkkaImpl implements JobStatusRouteHandler { private static final Logger logger = LoggerFactory.getLogger(JobStatusRouteHandlerAkkaImpl.class); private final ActorRef statusEventBrokerActor; private final ActorSystem actorSystem; public JobStatusRouteHandlerAkkaImpl(final ActorSystem actorSystem, final ActorRef statusEventBrokerActor) { this.actorSystem = actorSystem; this.statusEventBrokerActor = statusEventBrokerActor; } /** * Based on https://markatta.com/codemonkey/blog/2016/04/18/chat-with-akka-http-websockets/ * @param jobId job for which job status is requested * @return a flow that ignores the incoming messages from the WS client, and * creates a akka Source to emit a stream of JobStatus messages to the WS client */ @Override public Flow<Message, Message, NotUsed> jobStatus(final String jobId) { ActorRef jobStatusConnectedWSActor = actorSystem.actorOf(JobStatusConnectedWSActor.props(jobId, statusEventBrokerActor), "JobStatusConnectedWSActor-" + jobId + "-" + System.currentTimeMillis()); Sink<Message, NotUsed> incomingMessagesIgnored = Flow.<Message>create().to(Sink.ignore()); Source<Message, NotUsed> backToWebSocket = Source.<JobStatus>actorRef(100, OverflowStrategy.dropHead()) .mapMaterializedValue((ActorRef outgoingActor) -> { jobStatusConnectedWSActor.tell( new JobStatusConnectedWSActor.Connected(outgoingActor), ActorRef.noSender() ); return NotUsed.getInstance(); }) .map(js -> new TextMessage.Strict(Jackson.toJson(js))); return Flow.fromSinkAndSource(incomingMessagesIgnored, backToWebSocket); } }
4,445
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobDiscoveryRouteHandler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.handlers; import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import java.util.concurrent.CompletionStage; import static io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto.SchedInfoResponse; public interface JobDiscoveryRouteHandler { CompletionStage<SchedInfoResponse> schedulingInfoStream(final JobClusterManagerProto.GetJobSchedInfoRequest request, final boolean sendHeartbeats); CompletionStage<JobDiscoveryRouteProto.JobClusterInfoResponse> lastSubmittedJobIdStream(final JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest request, final boolean sendHeartbeats); }
4,446
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobDiscoveryRouteHandlerAkkaImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.handlers; import akka.actor.ActorRef; import com.github.benmanes.caffeine.cache.AsyncLoadingCache; import com.github.benmanes.caffeine.cache.Caffeine; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.master.api.akka.route.proto.JobClusterInfo; import io.mantisrx.master.api.akka.route.proto.JobDiscoveryRouteProto; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.domain.JobId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.subjects.BehaviorSubject; import java.time.Duration; import java.util.HashMap; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static akka.pattern.PatternsCS.ask; import static io.mantisrx.master.api.akka.route.utils.JobDiscoveryHeartbeats.JOB_CLUSTER_INFO_HB_INSTANCE; import static io.mantisrx.master.api.akka.route.utils.JobDiscoveryHeartbeats.SCHED_INFO_HB_INSTANCE; public class JobDiscoveryRouteHandlerAkkaImpl implements JobDiscoveryRouteHandler { private static final Logger logger = LoggerFactory.getLogger(JobDiscoveryRouteHandlerAkkaImpl.class); private final ActorRef jobClustersManagerActor; private final Duration askTimeout; // We want to heartbeat at least once before the idle conn timeout to keep the SSE stream conn alive private final Duration serverIdleConnectionTimeout; private final Counter schedInfoStreamErrors; private final Counter lastSubmittedJobIdStreamErrors; private final AsyncLoadingCache<GetJobSchedInfoRequest, GetJobSchedInfoResponse> schedInfoCache; private final AsyncLoadingCache<GetLastSubmittedJobIdStreamRequest, GetLastSubmittedJobIdStreamResponse> lastSubmittedJobIdStreamRespCache; public JobDiscoveryRouteHandlerAkkaImpl(ActorRef jobClustersManagerActor, Duration serverIdleTimeout) { this.jobClustersManagerActor = jobClustersManagerActor; long timeoutMs = Optional.ofNullable(ConfigurationProvider.getConfig().getMasterApiAskTimeoutMs()).orElse(1000L); this.askTimeout = Duration.ofMillis(timeoutMs); this.serverIdleConnectionTimeout = serverIdleTimeout; schedInfoCache = Caffeine.newBuilder() .expireAfterWrite(5, TimeUnit.SECONDS) .maximumSize(500) .buildAsync(this::jobSchedInfo); lastSubmittedJobIdStreamRespCache = Caffeine.newBuilder() .expireAfterWrite(5, TimeUnit.SECONDS) .maximumSize(500) .buildAsync(this::lastSubmittedJobId); Metrics m = new Metrics.Builder() .id("JobDiscoveryRouteHandlerAkkaImpl") .addCounter("schedInfoStreamErrors") .addCounter("lastSubmittedJobIdStreamErrors") .build(); this.schedInfoStreamErrors = m.getCounter("schedInfoStreamErrors"); this.lastSubmittedJobIdStreamErrors = m.getCounter("lastSubmittedJobIdStreamErrors"); } private CompletableFuture<GetJobSchedInfoResponse> jobSchedInfo(final GetJobSchedInfoRequest request, Executor executor) { return ask(jobClustersManagerActor, request, askTimeout) .thenApply(GetJobSchedInfoResponse.class::cast) .toCompletableFuture(); } @Override public CompletionStage<JobDiscoveryRouteProto.SchedInfoResponse> schedulingInfoStream(final GetJobSchedInfoRequest request, final boolean sendHeartbeats) { CompletionStage<GetJobSchedInfoResponse> response = schedInfoCache.get(request); try { AtomicBoolean isJobCompleted = new AtomicBoolean(false); final String jobId = request.getJobId().getId(); final JobSchedulingInfo completedJobSchedulingInfo = new JobSchedulingInfo(jobId, new HashMap<>()); CompletionStage<JobDiscoveryRouteProto.SchedInfoResponse> jobSchedInfoObsCS = response .thenApply(getJobSchedInfoResp -> { Optional<BehaviorSubject<JobSchedulingInfo>> jobStatusSubjectO = getJobSchedInfoResp.getJobSchedInfoSubject(); if (getJobSchedInfoResp.responseCode.equals(BaseResponse.ResponseCode.SUCCESS) && jobStatusSubjectO.isPresent()) { BehaviorSubject<JobSchedulingInfo> jobSchedulingInfoObs = jobStatusSubjectO.get(); Observable<JobSchedulingInfo> heartbeats = Observable.interval(5, serverIdleConnectionTimeout.getSeconds() - 1, TimeUnit.SECONDS) .map(x -> { if(!isJobCompleted.get()) { return SCHED_INFO_HB_INSTANCE; } else { return completedJobSchedulingInfo; } }) .takeWhile(x -> sendHeartbeats == true); // Job SchedulingInfo obs completes on job shutdown. Use the do On completed as a signal to inform the user that there are no workers to connect to. // TODO For future a more explicit key in the payload saying the job is completed. Observable<JobSchedulingInfo> jobSchedulingInfoWithHBObs = Observable.merge(jobSchedulingInfoObs.doOnCompleted(() -> isJobCompleted.set(true)), heartbeats); return new JobDiscoveryRouteProto.SchedInfoResponse( getJobSchedInfoResp.requestId, getJobSchedInfoResp.responseCode, getJobSchedInfoResp.message, jobSchedulingInfoWithHBObs ); } else { logger.info("Failed to get Sched info stream for {}", request.getJobId().getId()); schedInfoStreamErrors.increment(); return new JobDiscoveryRouteProto.SchedInfoResponse( getJobSchedInfoResp.requestId, getJobSchedInfoResp.responseCode, getJobSchedInfoResp.message ); } }); return jobSchedInfoObsCS; } catch (Exception e) { logger.error("caught exception fetching sched info stream for {}", request.getJobId().getId(), e); schedInfoStreamErrors.increment(); return CompletableFuture.completedFuture(new JobDiscoveryRouteProto.SchedInfoResponse( 0, BaseResponse.ResponseCode.SERVER_ERROR, "Failed to get SchedulingInfo stream for jobId " + request.getJobId().getId() + " error: " + e.getMessage() )); } } private CompletableFuture<GetLastSubmittedJobIdStreamResponse> lastSubmittedJobId(final GetLastSubmittedJobIdStreamRequest request, Executor executor) { return ask(jobClustersManagerActor, request, askTimeout) .thenApply(GetLastSubmittedJobIdStreamResponse.class::cast) .toCompletableFuture(); } @Override public CompletionStage<JobDiscoveryRouteProto.JobClusterInfoResponse> lastSubmittedJobIdStream(final GetLastSubmittedJobIdStreamRequest request, final boolean sendHeartbeats) { CompletionStage<GetLastSubmittedJobIdStreamResponse> response = lastSubmittedJobIdStreamRespCache.get(request); try { return response .thenApply(lastSubmittedJobIdResp -> { Optional<BehaviorSubject<JobId>> jobIdSubjectO = lastSubmittedJobIdResp.getjobIdBehaviorSubject(); if (lastSubmittedJobIdResp.responseCode.equals(BaseResponse.ResponseCode.SUCCESS) && jobIdSubjectO.isPresent()) { Observable<JobClusterInfo> jobClusterInfoObs = jobIdSubjectO.get().map(jobId -> new JobClusterInfo(jobId.getCluster(), jobId.getId())); Observable<JobClusterInfo> heartbeats = Observable.interval(5, serverIdleConnectionTimeout.getSeconds() - 1, TimeUnit.SECONDS) .map(x -> JOB_CLUSTER_INFO_HB_INSTANCE) .takeWhile(x -> sendHeartbeats == true); Observable<JobClusterInfo> jobClusterInfoWithHB = Observable.merge(jobClusterInfoObs, heartbeats); return new JobDiscoveryRouteProto.JobClusterInfoResponse( lastSubmittedJobIdResp.requestId, lastSubmittedJobIdResp.responseCode, lastSubmittedJobIdResp.message, jobClusterInfoWithHB ); } else { logger.info("Failed to get lastSubmittedJobId stream for job cluster {}", request.getClusterName()); lastSubmittedJobIdStreamErrors.increment(); return new JobDiscoveryRouteProto.JobClusterInfoResponse( lastSubmittedJobIdResp.requestId, lastSubmittedJobIdResp.responseCode, lastSubmittedJobIdResp.message ); } }); } catch (Exception e) { logger.error("caught exception fetching lastSubmittedJobId stream for {}", request.getClusterName(), e); lastSubmittedJobIdStreamErrors.increment(); return CompletableFuture.completedFuture(new JobDiscoveryRouteProto.JobClusterInfoResponse( 0, BaseResponse.ResponseCode.SERVER_ERROR, "Failed to get last submitted jobId stream for " + request.getClusterName() + " error: " + e.getMessage() )); } } }
4,447
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobRouteHandler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.handlers; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.master.scheduler.WorkerEvent; import java.util.concurrent.CompletionStage; public interface JobRouteHandler { CompletionStage<JobClusterManagerProto.KillJobResponse> kill(final JobClusterManagerProto.KillJobRequest request); CompletionStage<JobClusterManagerProto.ResubmitWorkerResponse> resubmitWorker(final JobClusterManagerProto.ResubmitWorkerRequest request); CompletionStage<JobClusterManagerProto.ScaleStageResponse> scaleStage(final JobClusterManagerProto.ScaleStageRequest request); CompletionStage<BaseResponse> workerStatus(final WorkerEvent event); //TODO CompletionStage<JobClusterManagerProto.ScaleStageResponse> updateScalingPolicy(final JobClusterManagerProto.Update request); CompletionStage<JobClusterManagerProto.GetJobDetailsResponse> getJobDetails(final JobClusterManagerProto.GetJobDetailsRequest request); CompletionStage<JobClusterManagerProto.ListJobsResponse> listJobs(final JobClusterManagerProto.ListJobsRequest request); CompletionStage<JobClusterManagerProto.ListJobIdsResponse> listJobIds(final JobClusterManagerProto.ListJobIdsRequest request); CompletionStage<JobClusterManagerProto.ListArchivedWorkersResponse> listArchivedWorkers(final JobClusterManagerProto.ListArchivedWorkersRequest request); }
4,448
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobStatusRouteHandler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.handlers; import akka.NotUsed; import akka.http.javadsl.model.ws.Message; import akka.stream.javadsl.Flow; public interface JobStatusRouteHandler { Flow<Message, Message, NotUsed> jobStatus(final String jobId); }
4,449
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/api/akka/route/handlers/JobRouteHandlerAkkaImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.handlers; import akka.actor.ActorRef; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.scheduler.WorkerEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.time.Duration; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import static akka.pattern.PatternsCS.ask; public class JobRouteHandlerAkkaImpl implements JobRouteHandler { private static final Logger logger = LoggerFactory.getLogger(JobRouteHandlerAkkaImpl.class); private final ActorRef jobClustersManagerActor; private final Counter listAllJobs; private final Counter listJobIds; private final Counter listArchivedWorkers; private final Duration timeout; public JobRouteHandlerAkkaImpl(ActorRef jobClusterManagerActor) { this.jobClustersManagerActor = jobClusterManagerActor; long timeoutMs = Optional.ofNullable(ConfigurationProvider.getConfig().getMasterApiAskTimeoutMs()).orElse(1000L); this.timeout = Duration.ofMillis(timeoutMs); Metrics m = new Metrics.Builder() .id("JobRouteHandler") .addCounter("listAllJobs") .addCounter("listJobIds") .addCounter("listArchivedWorkers") .build(); Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m); this.listAllJobs = metrics.getCounter("listAllJobs"); this.listJobIds = metrics.getCounter("listJobIds"); this.listArchivedWorkers = metrics.getCounter("listArchivedWorkers"); } @Override public CompletionStage<JobClusterManagerProto.KillJobResponse> kill(JobClusterManagerProto.KillJobRequest request) { return ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.KillJobResponse.class::cast); } @Override public CompletionStage<JobClusterManagerProto.ResubmitWorkerResponse> resubmitWorker(JobClusterManagerProto.ResubmitWorkerRequest request) { return ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.ResubmitWorkerResponse.class::cast); } @Override public CompletionStage<JobClusterManagerProto.ScaleStageResponse> scaleStage(JobClusterManagerProto.ScaleStageRequest request) { return ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.ScaleStageResponse.class::cast); } @Override public CompletionStage<BaseResponse> workerStatus(final WorkerEvent request) { jobClustersManagerActor.tell(request, ActorRef.noSender()); return CompletableFuture.completedFuture(new BaseResponse(0L, BaseResponse.ResponseCode.SUCCESS, "forwarded worker status")); } @Override public CompletionStage<JobClusterManagerProto.GetJobDetailsResponse> getJobDetails(final JobClusterManagerProto.GetJobDetailsRequest request) { return ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.GetJobDetailsResponse.class::cast); } @Override public CompletionStage<JobClusterManagerProto.ListJobsResponse> listJobs(JobClusterManagerProto.ListJobsRequest request) { logger.debug("request {}", request); listAllJobs.increment(); return ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.ListJobsResponse.class::cast); } @Override public CompletionStage<JobClusterManagerProto.ListJobIdsResponse> listJobIds(JobClusterManagerProto.ListJobIdsRequest request) { logger.debug("request {}", request); listJobIds.increment(); return ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.ListJobIdsResponse.class::cast); } @Override public CompletionStage<JobClusterManagerProto.ListArchivedWorkersResponse> listArchivedWorkers(JobClusterManagerProto.ListArchivedWorkersRequest request) { listArchivedWorkers.increment(); return ask(jobClustersManagerActor, request, timeout) .thenApply(JobClusterManagerProto.ListArchivedWorkersResponse.class::cast); } }
4,450
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/WorkerRegistryV2.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import akka.actor.Props; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.scheduler.WorkerRegistry; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.stream.Collectors; import static java.util.stream.Collectors.toMap; /** * This Actor holds a registry of all running workers for all jobs in the system. * The Job Actor sends a message with a complete snapshot of running workers to the LifeCycleEventPublisher * The LifeCycleEventPublisher then forwards them to this Actor. */ public class WorkerRegistryV2 implements WorkerRegistry, WorkerEventSubscriber { private final Logger logger = LoggerFactory.getLogger(WorkerRegistryV2.class); private final ConcurrentMap<JobId, List<IMantisWorkerMetadata>> jobToWorkerInfoMap = new ConcurrentHashMap<>(); public static final WorkerRegistryV2 INSTANCE = new WorkerRegistryV2(); public static Props props() { return Props.create(WorkerRegistryV2.class); } WorkerRegistryV2() { logger.info("WorkerRegistryV2 created"); } /** * For Testing */ public void clearState() { jobToWorkerInfoMap.clear(); } /** * Iterate through all jobs and addup the worker list size for each * @return */ @Override public int getNumRunningWorkers() { if(logger.isDebugEnabled()) { logger.debug("In getNumRunningWorkers"); } int cnt = jobToWorkerInfoMap.values().stream() .map(workerList -> workerList.stream() .filter(wm -> WorkerState.isRunningState(wm.getState())) .collect(Collectors.toList()) .size() ) .reduce(0,(a, b) -> a + b); if(logger.isDebugEnabled()) { logger.debug("Returning {} from getNumRunningWorkers", cnt); } return cnt; } /** * Return a Set of all running workers in the system * @return */ @Override public Set<WorkerId> getAllRunningWorkers() { return jobToWorkerInfoMap.values().stream() .flatMap(workerList -> workerList.stream() .filter(wm -> WorkerState.isRunningState(wm.getState())) .map(workerMeta -> workerMeta.getWorkerId())) .collect(Collectors.toSet()); } /** * Return a mapping of workerId to slaveID for all running workers in the system * @return */ @Override public Map<WorkerId, String> getAllRunningWorkerSlaveIdMappings() { return jobToWorkerInfoMap.values().stream() .flatMap(workerList -> workerList.stream() .filter(wm -> WorkerState.isRunningState(wm.getState()))) .collect(toMap( IMantisWorkerMetadata::getWorkerId, IMantisWorkerMetadata::getSlaveID, (s1, s2) -> (s1 != null) ? s1 : s2)); } /** * Check whether a workerId is valid * @param workerId * @return */ @Override public boolean isWorkerValid(WorkerId workerId) { if(logger.isDebugEnabled()) { logger.debug("In isWorkerValid event {}", workerId); } Optional<JobId> jIdOp = JobId.fromId(workerId.getJobId()); if(!jIdOp.isPresent()) { logger.warn("Invalid job Id {}", workerId.getJobId()); return false; } List<IMantisWorkerMetadata> mantisWorkerMetadataList = jobToWorkerInfoMap.get(jIdOp.get()); // logger.info("Current Map {}", jobToWorkerInfoMap); boolean isValid = false; if(mantisWorkerMetadataList != null) { isValid = mantisWorkerMetadataList.stream().anyMatch((mData) -> mData.getWorkerId().equals(workerId)); } else { logger.warn("No such job {} found in job To worker map ", jIdOp.get()); } return isValid; } /** * Return the accepted At time for the given worker * @param workerId * @return */ @Override public Optional<Long> getAcceptedAt(WorkerId workerId) { if(logger.isDebugEnabled()) { logger.debug("In getAcceptedAt for worker {}", workerId); } Optional<JobId> jId = JobId.fromId(workerId.getJobId()); if(!jId.isPresent()) { return Optional.empty(); } List<IMantisWorkerMetadata> mantisWorkerMetadataList = jobToWorkerInfoMap.get(jId.get()); if(mantisWorkerMetadataList != null) { Optional<IMantisWorkerMetadata> mantisWorkerMetadata = mantisWorkerMetadataList.stream().filter(mData -> mData.getWorkerId().equals(workerId)).findAny(); if (mantisWorkerMetadata.isPresent()) { logger.info("Found worker {} return acceptedAt {}", workerId, mantisWorkerMetadata.get().getAcceptedAt()); return Optional.of(mantisWorkerMetadata.get().getAcceptedAt()); } } return Optional.empty(); } /** * When the worker info subject completes this method is invoked to clean up state. * @param jobId * @return */ private boolean deregisterJob(JobId jobId) { logger.info("De-registering {}", jobId); return jobToWorkerInfoMap.remove(jobId) != null; } @Override public void process(LifecycleEventsProto.WorkerListChangedEvent event) { if(logger.isDebugEnabled()) { logger.debug("on WorkerListChangedEvent for job {} with workers {}", event.getWorkerInfoListHolder().getJobId(), event.getWorkerInfoListHolder().getWorkerMetadataList().size()); } JobId jId = event.getWorkerInfoListHolder().getJobId(); jobToWorkerInfoMap.put(jId, event.getWorkerInfoListHolder().getWorkerMetadataList()); } @Override public void process(LifecycleEventsProto.JobStatusEvent statusEvent) { if(logger.isDebugEnabled()) { logger.debug("In JobStatusEvent {}", statusEvent); } JobState jobState = statusEvent.getJobState(); if(JobState.isTerminalState(jobState)) { String jobId = statusEvent.getJobId(); Optional<JobId> optionalJobId = JobId.fromId(jobId); if(optionalJobId.isPresent()) { deregisterJob(optionalJobId.get()); } else { logger.warn("Invalid Job id {} Ignoring terminate event", jobId); } } } }
4,451
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/LifecycleEventsProto.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import io.mantisrx.master.jobcluster.WorkerInfoListHolder; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.DataFormatAdapter; import io.mantisrx.server.master.domain.JobId; import java.util.Optional; import static java.util.Optional.empty; import static java.util.Optional.ofNullable; public class LifecycleEventsProto { public static final class AuditEvent { public enum AuditEventType { // job cluster events JOB_CLUSTER_CREATE, JOB_CLUSTER_EXISTS, JOB_CLUSTER_FAILURE, JOB_CLUSTER_UPDATE, JOB_CLUSTER_DELETE, JOB_CLUSTER_DISABLED, JOB_CLUSTER_ENABLED, // job events JOB_SUBMIT, JOB_START, JOB_TERMINATE, JOB_SHUTDOWN, JOB_DELETE, JOB_SCALE_UP, JOB_SCALE_DOWN, JOB_SCALE_UPDATE, JOB_FAILURE, // worker events WORKER_START, WORKER_TERMINATE, WORKER_RESUBMIT, WORKER_RESUBMITS_LIMIT, WORKER_STATUS_HB, // agent cluster events CLUSTER_SCALE_UP, CLUSTER_SCALE_DOWN, CLUSTER_ACTIVE_VMS, //actor events JOB_CLUSTER_ACTOR_CREATE, JOB_CLUSTER_ACTOR_TERMINATE, } private final AuditEventType auditEventType; private final String operand; private final String data; public AuditEvent(AuditEventType auditEventType, String operand, String data) { this.auditEventType = auditEventType; this.operand = operand; this.data = data; } public AuditEventType getAuditEventType() { return auditEventType; } public String getOperand() { return operand; } public String getData() { return data; } @Override public String toString() { return "AuditEvent{" + "auditEventType=" + auditEventType + ", operand='" + operand + '\'' + ", data='" + data + '\'' + '}'; } } public static class StatusEvent { public enum StatusEventType { ERROR, WARN, INFO, DEBUG, HEARTBEAT } protected final StatusEventType statusEventType; protected final String message; protected final long timestamp; public StatusEvent(StatusEventType type, String message) { this(type, message, System.currentTimeMillis()); } public StatusEvent(StatusEventType type, String message, long ts) { this.statusEventType = type; this.message = message; timestamp = ts; } public StatusEventType getStatusEventType() { return statusEventType; } public String getMessage() { return message; } public long getTimestamp() { return timestamp; } } public static final class WorkerStatusEvent extends StatusEvent { private final int stageNum; private final WorkerId workerId; private final WorkerState workerState; private final Optional<String> hostName; public WorkerStatusEvent(final StatusEventType type, final String message, final int stageNum, final WorkerId workerId, final WorkerState workerState) { super(type, message); this.stageNum = stageNum; this.workerId = workerId; this.workerState = workerState; this.hostName = empty(); } public WorkerStatusEvent(final StatusEventType type, final String message, final int stageNum, final WorkerId workerId, final WorkerState workerState, final long ts) { super(type, message, ts); this.stageNum = stageNum; this.workerId = workerId; this.workerState = workerState; this.hostName = empty(); } public WorkerStatusEvent(final StatusEventType type, final String message, final int stageNum, final WorkerId workerId, final WorkerState workerState, final String hostName, final long ts) { super(type, message, ts); this.stageNum = stageNum; this.workerId = workerId; this.workerState = workerState; this.hostName = ofNullable(hostName); } public WorkerStatusEvent(final StatusEventType type, final String message, final int stageNum, final WorkerId workerId, final WorkerState workerState, final Optional<String> hostName) { super(type, message); this.stageNum = stageNum; this.workerId = workerId; this.workerState = workerState; this.hostName = hostName; } public int getStageNum() { return stageNum; } public WorkerId getWorkerId() { return workerId; } public WorkerState getWorkerState() { return workerState; } public Optional<String> getHostName() { return this.hostName; } @Override public String toString() { return "WorkerStatusEvent{" + "stageNum=" + stageNum + ", workerId=" + workerId + ", workerState=" + workerState + ", hostName=" + hostName.orElse("") + '}'; } } public static final class JobStatusEvent extends StatusEvent { private final JobId jobId; private final JobState jobState; public JobStatusEvent(final StatusEventType type, final String message, final JobId jobId, final JobState jobState) { super(type, message); this.jobId = jobId; this.jobState = jobState; } public String getJobId() { return jobId.getId(); } public JobState getJobState() { return jobState; } @Override public String toString() { return "JobStatusEvent{" + "statusEventType=" + statusEventType + ", message='" + message + '\'' + ", timestamp=" + timestamp + ", jobId='" + jobId + '\'' + ", jobState=" + jobState + '}'; } } public static final class JobClusterStatusEvent extends StatusEvent { private final String jobCluster; public JobClusterStatusEvent(final StatusEventType type, final String message, final String jobCluster) { super(type, message); this.jobCluster = jobCluster; } public String getJobCluster() { return jobCluster; } @Override public String toString() { return "JobClusterStatusEvent{" + "statusEventType=" + statusEventType + ", message='" + message + '\'' + ", timestamp=" + timestamp + ", jobCluster='" + jobCluster + '\'' + '}'; } } public static final Status from(final StatusEvent ev) { Status.TYPE type; switch (ev.statusEventType) { case INFO: type = Status.TYPE.INFO; break; case WARN: type = Status.TYPE.WARN; break; case DEBUG: type = Status.TYPE.DEBUG; break; case ERROR: type = Status.TYPE.ERROR; break; case HEARTBEAT: type = Status.TYPE.HEARTBEAT; break; default: throw new IllegalArgumentException("status event type cannot be translated to Status Type "+ ev.statusEventType.name()); } Status status = new Status("None", -1, -1, -1, Status.TYPE.DEBUG, "Invalid", MantisJobState.Noop); if (ev instanceof LifecycleEventsProto.JobStatusEvent) { JobStatusEvent jse = (JobStatusEvent) ev; status = new Status(jse.jobId.getId(), -1, -1, -1, type, jse.getJobId() + " " + jse.message, DataFormatAdapter.convertToMantisJobState(jse.jobState)); } else if (ev instanceof LifecycleEventsProto.JobClusterStatusEvent) { JobClusterStatusEvent jcse = (JobClusterStatusEvent) ev; status = new Status(jcse.jobCluster, -1, -1, -1, type, jcse.getJobCluster() + " " + jcse.message, MantisJobState.Noop); } else if (ev instanceof LifecycleEventsProto.WorkerStatusEvent) { WorkerStatusEvent wse = (WorkerStatusEvent) ev; status = new Status(wse.workerId.getJobId(), wse.stageNum, wse.workerId.getWorkerIndex(), wse.workerId.getWorkerNum(), type, wse.getWorkerId().getId() + " " + wse.message, DataFormatAdapter.convertWorkerStateToMantisJobState(wse.workerState)); } return status; } public static class WorkerListChangedEvent { private final WorkerInfoListHolder workerInfoListHolder; public WorkerListChangedEvent(WorkerInfoListHolder workerInfoListHolder) { this.workerInfoListHolder = workerInfoListHolder; } public WorkerInfoListHolder getWorkerInfoListHolder() { return workerInfoListHolder; } @Override public String toString() { return "WorkerListChangedEvent{" + "workerInfoListHolder=" + workerInfoListHolder + '}'; } } }
4,452
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/LifecycleEventPublisherImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; public class LifecycleEventPublisherImpl implements LifecycleEventPublisher { private final AuditEventSubscriber auditEventSubscriber; private final StatusEventSubscriber statusEventSubscriber; private final WorkerEventSubscriber workerEventSubscriber; public LifecycleEventPublisherImpl(final AuditEventSubscriber auditEventSubscriber, final StatusEventSubscriber statusEventSubscriber, final WorkerEventSubscriber workerEventSubscriber) { this.auditEventSubscriber = auditEventSubscriber; this.statusEventSubscriber = statusEventSubscriber; this.workerEventSubscriber = workerEventSubscriber; } @Override public void publishAuditEvent(final LifecycleEventsProto.AuditEvent auditEvent) { auditEventSubscriber.process(auditEvent); } @Override public void publishStatusEvent(final LifecycleEventsProto.StatusEvent statusEvent) { statusEventSubscriber.process(statusEvent); if (statusEvent instanceof LifecycleEventsProto.JobStatusEvent) { LifecycleEventsProto.JobStatusEvent jobStatusEvent = (LifecycleEventsProto.JobStatusEvent) statusEvent; workerEventSubscriber.process(jobStatusEvent); } } @Override public void publishWorkerListChangedEvent(LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent) { workerEventSubscriber.process(workerListChangedEvent); } }
4,453
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/StatusEventBrokerActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import akka.actor.AbstractActor; import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.Terminated; import akka.dispatch.BoundedMessageQueueSemantics; import akka.dispatch.RequiresMessageQueue; import io.mantisrx.shaded.com.google.common.collect.EvictingQueue; import io.mantisrx.master.api.akka.route.proto.JobStatus; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.server.core.Status; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; /** * JobStatus Broker that receives StatusEvents from all actors and demultiplexes to client connections interested in * events for a specific jobId */ public class StatusEventBrokerActor extends AbstractActor implements RequiresMessageQueue<BoundedMessageQueueSemantics> { private final Logger logger = LoggerFactory.getLogger(StatusEventBrokerActor.class); private final Map<String, Set<ActorRef>> jobIdToActorMap = new HashMap<>(); private final Map<ActorRef, String> actorToJobIdMap = new HashMap<>(); private final ActorRef agentsErrorMonitorActorRef; private final Map<String, EvictingQueue<Status>> jobIdToStatusEventsBuf = new HashMap<>(); public static final int MAX_STATUS_HISTORY_PER_JOB = 100; public static Props props(ActorRef agentsErrorMonitorActorRef) { return Props.create(StatusEventBrokerActor.class, agentsErrorMonitorActorRef); } public StatusEventBrokerActor(ActorRef agentsErrorMonitorActorRef) { this.agentsErrorMonitorActorRef = agentsErrorMonitorActorRef; } public static class JobStatusRequest { private final String jobId; public JobStatusRequest(final String jobId) { this.jobId = jobId; } public String getJobId() { return jobId; } @Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final JobStatusRequest that = (JobStatusRequest) o; return Objects.equals(jobId, that.jobId); } @Override public int hashCode() { return Objects.hash(jobId); } @Override public String toString() { return "JobStatusRequest{" + "jobId='" + jobId + '\'' + '}'; } } private void onJobStatusRequest(final JobStatusRequest jsr) { logger.debug("got request {}", jsr); ActorRef sender = sender(); jobIdToActorMap.computeIfAbsent(jsr.jobId, (jobId) -> new HashSet<>()); jobIdToActorMap.get(jsr.jobId).add(sender); actorToJobIdMap.put(sender, jsr.jobId); getContext().watch(sender); // replay buffered status events on new connection EvictingQueue<Status> statusEventsBuf = jobIdToStatusEventsBuf.get(jsr.jobId); if (statusEventsBuf != null) { statusEventsBuf.forEach(se -> sender.tell(new JobStatus(se), ActorRef.noSender())); } } private void cleanupIfTerminalState(final LifecycleEventsProto.StatusEvent se) { if (se instanceof LifecycleEventsProto.JobStatusEvent) { LifecycleEventsProto.JobStatusEvent jse = (LifecycleEventsProto.JobStatusEvent) se; if (JobState.isTerminalState(jse.getJobState())) { jobIdToStatusEventsBuf.remove(jse.getJobId()); } } } // sends JobStatus messages to active connections by jobId private void onStatusEvent(final LifecycleEventsProto.StatusEvent se) { Status status = LifecycleEventsProto.from(se); String jobId = status.getJobId(); // add Status to job event history jobIdToStatusEventsBuf .computeIfAbsent(jobId, (j) -> EvictingQueue.create(MAX_STATUS_HISTORY_PER_JOB)) .add(status); cleanupIfTerminalState(se); Set<ActorRef> jobStatusActiveConnections = jobIdToActorMap.get(jobId); if (jobStatusActiveConnections != null && !jobStatusActiveConnections.isEmpty()) { logger.debug("Sending job status {}", se); jobStatusActiveConnections.forEach(connActor -> connActor.tell(new JobStatus(status), self())); } else { logger.debug("Job status dropped, no active subscribers for {}", jobId); } if(se instanceof LifecycleEventsProto.WorkerStatusEvent) { this.agentsErrorMonitorActorRef.tell(se, getSelf()); } } private void onTerminated(final Terminated t) { logger.info("actor terminated {}", t); ActorRef terminatedActor = t.actor(); String jobId = actorToJobIdMap.get(terminatedActor); if (jobId != null) { jobIdToActorMap.get(jobId).remove(terminatedActor); } actorToJobIdMap.remove(terminatedActor); } @Override public Receive createReceive() { return receiveBuilder() .match(JobStatusRequest.class, jsr -> onJobStatusRequest(jsr)) .match(LifecycleEventsProto.StatusEvent.class, js -> onStatusEvent(js)) .match(Terminated.class, t -> onTerminated(t)) .build(); } }
4,454
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/LifecycleEventPublisher.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; public interface LifecycleEventPublisher { void publishAuditEvent(LifecycleEventsProto.AuditEvent auditEvent); void publishStatusEvent(LifecycleEventsProto.StatusEvent statusEvent); void publishWorkerListChangedEvent(LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent); }
4,455
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/WorkerEventSubscriber.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; public interface WorkerEventSubscriber { void process(final LifecycleEventsProto.WorkerListChangedEvent event); void process(LifecycleEventsProto.JobStatusEvent statusEvent); }
4,456
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/JobRegistryImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobId; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; public class JobRegistryImpl implements JobRegistry { ConcurrentMap<String, IJobClusterMetadata> jobClusterMap = new ConcurrentHashMap<>() ; ConcurrentMap<JobId, IMantisJobMetadata> jobMap = new ConcurrentHashMap<>(); ConcurrentMap<String, Set<IMantisJobMetadata>> clusterToJobsMap = new ConcurrentHashMap<>(); @Override public void addClusters(List<IJobClusterMetadata> jobClusters) { jobClusters.forEach((jc) -> { jobClusterMap.put(jc.getJobClusterDefinition().getName(), jc); }); } @Override public void updateCluster(IJobClusterMetadata clusterMetadata) { jobClusterMap.put(clusterMetadata.getJobClusterDefinition().getName(), clusterMetadata); } @Override public void deleteJobCluster(String clusterName) { jobClusterMap.remove(clusterName); } @Override public void addJobs(String clusterName, List<IMantisJobMetadata> jobList) { jobList.forEach((jb) -> { jobMap.put(jb.getJobId(),jb); }); clusterToJobsMap.computeIfAbsent(clusterName,(x -> new HashSet<>())).addAll(jobList); } @Override public void addCompletedJobs(List<JobClusterDefinitionImpl.CompletedJob> completedJobList) { } @Override public void updateJob(IMantisJobMetadata jobMetadata) { } @Override public void removeJob(JobId jobId) { } }
4,457
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/WorkerEventSubscriberLoggingImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class WorkerEventSubscriberLoggingImpl implements WorkerEventSubscriber { private static final Logger logger = LoggerFactory.getLogger(WorkerEventSubscriberLoggingImpl.class); @Override public void process(LifecycleEventsProto.WorkerListChangedEvent event) { logger.info("Received worker list changed event {}", event); } @Override public void process(LifecycleEventsProto.JobStatusEvent statusEvent) { logger.info("Received status event {}", statusEvent); } }
4,458
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/StatusEventSubscriber.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; public interface StatusEventSubscriber { void process(final LifecycleEventsProto.StatusEvent statusEvent); }
4,459
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/AuditEventSubscriber.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; public interface AuditEventSubscriber { void process(final LifecycleEventsProto.AuditEvent event); }
4,460
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/AuditEventSubscriberLoggingImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class AuditEventSubscriberLoggingImpl implements AuditEventSubscriber { private static final Logger logger = LoggerFactory.getLogger(AuditEventSubscriberLoggingImpl.class); @Override public void process(final LifecycleEventsProto.AuditEvent event) { logger.info("[AUDIT] {}", event); } }
4,461
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/StatusEventSubscriberAkkaImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import akka.actor.ActorRef; import io.mantisrx.master.api.akka.route.proto.JobStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class StatusEventSubscriberAkkaImpl implements StatusEventSubscriber { private static final Logger logger = LoggerFactory.getLogger(StatusEventSubscriberAkkaImpl.class); private final ActorRef statusEventBrokerActor; public StatusEventSubscriberAkkaImpl(final ActorRef statusEventBrokerActor) { this.statusEventBrokerActor = statusEventBrokerActor; } @Override public void process(final LifecycleEventsProto.StatusEvent statusEvent) { logger.debug("[STATUS] {}", statusEvent); statusEventBrokerActor.tell(statusEvent, ActorRef.noSender()); } }
4,462
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/AuditEventBrokerActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import akka.actor.AbstractActor; import akka.actor.Props; import akka.dispatch.BoundedMessageQueueSemantics; import akka.dispatch.RequiresMessageQueue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class AuditEventBrokerActor extends AbstractActor implements RequiresMessageQueue<BoundedMessageQueueSemantics> { private static final Logger logger = LoggerFactory.getLogger(AuditEventBrokerActor.class); private final AuditEventSubscriber auditEventSubscriber; public static Props props(AuditEventSubscriber auditEventSubscriber) { return Props.create(AuditEventBrokerActor.class, auditEventSubscriber); } public AuditEventBrokerActor(AuditEventSubscriber auditEventSubscriber) { this.auditEventSubscriber = auditEventSubscriber; } private void onAuditEvent(final LifecycleEventsProto.AuditEvent auditEvent) { this.auditEventSubscriber.process(auditEvent); } @Override public Receive createReceive() { return receiveBuilder() .match(LifecycleEventsProto.AuditEvent.class, auditEvent -> onAuditEvent(auditEvent)) .build(); } }
4,463
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/AuditEventSubscriberAkkaImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import akka.actor.ActorRef; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class AuditEventSubscriberAkkaImpl implements AuditEventSubscriber { private static final Logger logger = LoggerFactory.getLogger(AuditEventSubscriberAkkaImpl.class); private final ActorRef auditEventBrokerActor; public AuditEventSubscriberAkkaImpl(final ActorRef auditEventBrokerActor) { this.auditEventBrokerActor = auditEventBrokerActor; } @Override public void process(final LifecycleEventsProto.AuditEvent auditEvent) { logger.debug("[AUDIT] {}", auditEvent); auditEventBrokerActor.tell(auditEvent, ActorRef.noSender()); } }
4,464
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/JobRegistry.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobId; import java.util.List; public interface JobRegistry { public void addClusters(List<IJobClusterMetadata> jobClusters); public void updateCluster(IJobClusterMetadata clusterMetadata); public void deleteJobCluster(String clusterName); void addJobs(String clusterName, List<IMantisJobMetadata> jobList); public void addCompletedJobs(List<JobClusterDefinitionImpl.CompletedJob> completedJobList); public void updateJob(IMantisJobMetadata jobMetadata); public void removeJob(JobId jobId); }
4,465
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/JobStatusConnectedWSActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import akka.actor.AbstractActor; import akka.actor.ActorRef; import akka.actor.PoisonPill; import akka.actor.Props; import akka.actor.Terminated; import io.mantisrx.master.api.akka.route.proto.JobStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Proxy actor that receives the StatusEvent messages from the StatusEventBrokerActor and forwards * it onto the Websocket connection from the client */ public class JobStatusConnectedWSActor extends AbstractActor { private final Logger logger = LoggerFactory.getLogger(JobStatusConnectedWSActor.class); public static Props props(final String jobId, final ActorRef statusEventBrokerActor) { return Props.create(JobStatusConnectedWSActor.class, jobId, statusEventBrokerActor); } private final String jobId; private final ActorRef statusEventBrokerActor; public JobStatusConnectedWSActor(final String jobId, final ActorRef statusEventBrokerActor) { this.jobId = jobId; this.statusEventBrokerActor = statusEventBrokerActor; } // Proto public static class Connected { private final ActorRef wsActor; public Connected(final ActorRef wsActor) { this.wsActor = wsActor; } public ActorRef getWsActor() { return wsActor; } @Override public String toString() { return "Connected{" + "wsActor=" + wsActor + '}'; } } // Behavior private final Receive waitingBehavior() { return receiveBuilder() .match(Connected.class, this::onConnected) .build(); } private void onConnected(final Connected connectedMsg) { logger.info("connected {}", connectedMsg); statusEventBrokerActor.tell(new StatusEventBrokerActor.JobStatusRequest(jobId), self()); getContext().watch(connectedMsg.wsActor); Receive connected = connectedBehavior(connectedMsg.wsActor); getContext().become(connected); } private void onTerminated(final Terminated t) { logger.info("actor terminated {}", t); getSelf().tell(PoisonPill.getInstance(), ActorRef.noSender()); } private Receive connectedBehavior(final ActorRef wsActor) { return receiveBuilder() .match(JobStatus.class, js -> { logger.debug("writing to WS {}", js); wsActor.tell(js, self()); }) .match(Terminated.class, t -> onTerminated(t)) .build(); } @Override public Receive createReceive() { return waitingBehavior(); } }
4,466
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/events/StatusEventSubscriberLoggingImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class StatusEventSubscriberLoggingImpl implements StatusEventSubscriber { private static final Logger logger = LoggerFactory.getLogger(StatusEventSubscriberLoggingImpl.class); @Override public void process(final LifecycleEventsProto.StatusEvent statusEvent) { String message = " " + statusEvent.statusEventType + " " + statusEvent.message + " "; if (statusEvent instanceof LifecycleEventsProto.WorkerStatusEvent) { LifecycleEventsProto.WorkerStatusEvent wse = (LifecycleEventsProto.WorkerStatusEvent) statusEvent; message = wse.getWorkerId().getId() + message + wse.getWorkerState(); } else if (statusEvent instanceof LifecycleEventsProto.JobStatusEvent) { LifecycleEventsProto.JobStatusEvent jse = (LifecycleEventsProto.JobStatusEvent) statusEvent; message = jse.getJobId() + message + jse.getJobState(); } else if (statusEvent instanceof LifecycleEventsProto.JobClusterStatusEvent) { LifecycleEventsProto.JobClusterStatusEvent jcse = (LifecycleEventsProto.JobClusterStatusEvent) statusEvent; message = jcse.getJobCluster() + message; } logger.info("[STATUS] {}", message); } }
4,467
0
Create_ds/mantis-control-plane/client/src/test/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/test/java/io/mantisrx/server/master/client/MantisMasterClientApiTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; //import io.mantisrx.master.api.proto.CreateJobClusterRequest; //import io.mantisrx.master.api.proto.SubmitJobRequest; //import io.mantisrx.master.core.proto.JobDefinition; //import io.mantisrx.master.core.proto.MachineDefinition; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.master.MasterMonitor; import io.netty.channel.ChannelOption; import io.netty.channel.WriteBufferWaterMark; import mantis.io.reactivex.netty.RxNetty; import mantis.io.reactivex.netty.pipeline.PipelineConfigurators; import mantis.io.reactivex.netty.protocol.http.server.HttpServer; import mantis.io.reactivex.netty.protocol.http.server.HttpServerRequest; import mantis.io.reactivex.netty.protocol.http.server.HttpServerResponse; import mantis.io.reactivex.netty.protocol.http.server.RequestHandler; import org.junit.AfterClass; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Action0; import rx.functions.Action1; import rx.functions.Func1; import rx.functions.Func2; import rx.schedulers.Schedulers; import rx.subjects.BehaviorSubject; public class MantisMasterClientApiTest { private static final Logger logger = LoggerFactory.getLogger(MantisMasterClientApiTest.class); private static AtomicInteger port = new AtomicInteger(8950); private static List<HttpServer<String, String>> startedServers = new ArrayList<>(); @AfterClass public static void cleanup() throws InterruptedException { for (HttpServer<String, String> startedServer : startedServers) { logger.info("shutting down server on port {}", startedServer.getServerPort()); startedServer.shutdown(); } } public HttpServer<String, String> createHttpServer(int port) { final HttpServer<String, String> server = RxNetty.newHttpServerBuilder( port, new RequestHandler<String, String>() { @Override public Observable<Void> handle(HttpServerRequest<String> req, HttpServerResponse<String> resp) { resp.writeAndFlush("200 OK"); return Observable.empty(); } }) .pipelineConfigurator(PipelineConfigurators.httpServerConfigurator()) .channelOption(ChannelOption.WRITE_BUFFER_WATER_MARK, WriteBufferWaterMark.DEFAULT) .build(); return server; } // @Test // @Ignore // public void testNamedJobCreate() throws InterruptedException { // // MasterMonitor mockMasterMonitor = mock(MasterMonitor.class); // final BehaviorSubject<MasterDescription> mdSubject = BehaviorSubject.create(); // when(mockMasterMonitor.getMasterObservable()).thenReturn(mdSubject); // // MantisMasterClientApi mantisMasterClientApi = new MantisMasterClientApi(mockMasterMonitor); // // final int serverPort = 8182; // final String jobName = "TestCreateJobCluster"; // mdSubject.onNext(new MasterDescription("localhost", "127.0.0.1", serverPort, 7090, 7091, "status", 8900, System.currentTimeMillis())); // // JobDefinition jobDefinition = JobDefinition.newBuilder() // .setName(jobName) // .setVersion("0.0.1") // .setUrl("http://www.example.com") // .setJobSla(io.mantisrx.master.core.proto.JobSla.newBuilder() // .setUserProvidedType("") // .setDurationType(io.mantisrx.master.core.proto.JobSla.MantisJobDurationType.Transient) // .setSlaType(io.mantisrx.master.core.proto.JobSla.StreamSLAType.Lossy) // .setMinRuntimeSecs(0) // .setRuntimeLimitSecs(0)) // .setSchedulingInfo(io.mantisrx.master.core.proto.SchedulingInfo.newBuilder() // .putStages(1, io.mantisrx.master.core.proto.SchedulingInfo.StageSchedulingInfo.newBuilder() // .setNumberOfInstances(1) // .setMachineDefinition(MachineDefinition.newBuilder() // .setCpuCores(2) // .setDiskMB(1024) // .setMemoryMB(2048) // .setNetworkMbps(64) // .setNumPorts(1) // .build()) // .build()) // .build()) // .build(); // io.mantisrx.master.core.proto.JobOwner owner = io.mantisrx.master.core.proto.JobOwner.newBuilder() // .setName("Test") // .setContactEmail("test@netflix.com") // .setDescription("") // .setRepo("http://www.example.com") // .build(); // CreateJobClusterRequest req = CreateJobClusterRequest.newBuilder() // .setJobDefinition(jobDefinition) // .setOwner(owner) // .build(); // // Observable<Void> testCluster = mantisMasterClientApi.createNamedJob(req); // final CountDownLatch latch = new CountDownLatch(1); // // testCluster.subscribe((x) -> { // latch.countDown(); // System.out.println("job cluster create response complete"); // }); // // latch.await(); // // Observable<JobSubmitResponse> jobSubmitResponseObs = mantisMasterClientApi.submitJob(jobDefinition); // final CountDownLatch latch2 = new CountDownLatch(1); // // jobSubmitResponseObs.subscribe((x) -> { // latch2.countDown(); // System.out.println("job submit complete"); // }); // // latch2.await(); // } @Test public void testScaleStageRequestRetries() throws InterruptedException { MasterMonitor mockMasterMonitor = mock(MasterMonitor.class); final BehaviorSubject<MasterDescription> mdSubject = BehaviorSubject.create(); when(mockMasterMonitor.getMasterObservable()).thenReturn(mdSubject); MantisMasterClientApi mantisMasterClientApi = new MantisMasterClientApi(mockMasterMonitor); final int serverPort = port.incrementAndGet(); final String jobId = "test-job-id"; final int stageNum = 1; final int numWorkers = 2; final String reason = "test reason"; mdSubject.onNext(new MasterDescription("localhost", "127.0.0.1", serverPort, 7090, 7091, "status", 8900, System.currentTimeMillis())); final CountDownLatch retryLatch = new CountDownLatch(2); final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = new Func1<Observable<? extends Throwable>, Observable<?>>() { @Override public Observable<?> call(Observable<? extends Throwable> attempts) { return attempts .zipWith(Observable.range(1, 5), new Func2<Throwable, Integer, Integer>() { @Override public Integer call(Throwable t1, Integer integer) { return integer; } }) .flatMap(new Func1<Integer, Observable<?>>() { @Override public Observable<?> call(Integer retryCount) { logger.info(retryCount + " retrying conx after sleeping for 250ms"); if (retryCount == 2) { Schedulers.newThread().createWorker().schedule(new Action0() { @Override public void call() { final HttpServer<String, String> httpServer = createHttpServer(serverPort); startedServers.add(httpServer); httpServer.start(); } }); } retryLatch.countDown(); return Observable.timer(250, TimeUnit.MILLISECONDS); } }); } }; final Observable<Boolean> resultObs = mantisMasterClientApi.scaleJobStage(jobId, stageNum, numWorkers, reason) .retryWhen(retryLogic); final CountDownLatch completedLatch = new CountDownLatch(1); resultObs .doOnError(new Action1<Throwable>() { @Override public void call(Throwable throwable) { fail("got unexpected error" + throwable.getMessage()); } }) .doOnCompleted(new Action0() { @Override public void call() { completedLatch.countDown(); } }).subscribe(); assertTrue(retryLatch.await(5, TimeUnit.SECONDS)); assertTrue(completedLatch.await(5, TimeUnit.SECONDS)); } @Test public void testScaleStageRequestRetriesNewMaster() throws InterruptedException { MasterMonitor mockMasterMonitor = mock(MasterMonitor.class); final BehaviorSubject<MasterDescription> mdSubject = BehaviorSubject.create(); when(mockMasterMonitor.getMasterObservable()).thenReturn(mdSubject); MantisMasterClientApi mantisMasterClientApi = new MantisMasterClientApi(mockMasterMonitor); final int oldMasterPort = port.incrementAndGet(); final int newMasterPort = port.incrementAndGet(); final String jobId = "test-job-id"; final int stageNum = 1; final int numWorkers = 2; final String reason = "test reason"; mdSubject.onNext(new MasterDescription("localhost", "127.0.0.1", oldMasterPort, 7090, 7091, "status", 8900, System.currentTimeMillis())); final CountDownLatch retryLatch = new CountDownLatch(3); final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = new Func1<Observable<? extends Throwable>, Observable<?>>() { @Override public Observable<?> call(Observable<? extends Throwable> attempts) { return attempts .zipWith(Observable.range(1, 5), new Func2<Throwable, Integer, Integer>() { @Override public Integer call(Throwable t1, Integer integer) { return integer; } }) .flatMap(new Func1<Integer, Observable<?>>() { @Override public Observable<?> call(Integer retryCount) { logger.info(retryCount + " retrying conx after sleeping for 250ms"); if (retryCount == 2) { Schedulers.newThread().createWorker().schedule(new Action0() { @Override public void call() { final HttpServer<String, String> httpServer = createHttpServer(newMasterPort); startedServers.add(httpServer); httpServer.start(); } }); } if (retryCount == 3) { mdSubject.onNext(new MasterDescription("localhost", "127.0.0.1", newMasterPort, 7090, 7091, "status", 8900, System.currentTimeMillis())); } retryLatch.countDown(); return Observable.timer(250, TimeUnit.MILLISECONDS); } }); } }; final Observable<Boolean> resultObs = mantisMasterClientApi.scaleJobStage(jobId, stageNum, numWorkers, reason) .retryWhen(retryLogic); final CountDownLatch completedLatch = new CountDownLatch(1); resultObs .doOnError(new Action1<Throwable>() { @Override public void call(Throwable throwable) { fail("got unexpected error" + throwable.getMessage()); } }) .doOnCompleted(new Action0() { @Override public void call() { completedLatch.countDown(); } }).subscribe(); assertTrue(retryLatch.await(5, TimeUnit.SECONDS)); assertTrue(completedLatch.await(5, TimeUnit.SECONDS)); } }
4,468
0
Create_ds/mantis-control-plane/client/src/test/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/test/java/io/mantisrx/server/master/client/MasterClientWrapperTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import io.mantisrx.server.core.WorkerAssignments; import io.mantisrx.server.core.WorkerHost; import io.reactivex.mantis.remote.observable.EndpointChange; import org.junit.Test; import rx.Observable; import rx.functions.Func1; import rx.schedulers.Schedulers; public class MasterClientWrapperTest { private static final int sinkStageNumber = 3; static Properties zkProps = new Properties(); static { zkProps.put("mantis.zookeeper.connectString", "100.67.80.172:2181,100.67.71.221:2181,100.67.89.26:2181,100.67.71.34:2181,100.67.80.18:2181"); zkProps.put("mantis.zookeeper.leader.announcement.path", "/leader"); zkProps.put("mantis.zookeeper.root", "/mantis/master"); } MasterClientWrapper clientWrapper = null; //@Before public void init() { clientWrapper = new MasterClientWrapper(zkProps); } // @Test public void getNamedJobIdsTest() { String jobname = "APIRequestSource"; CountDownLatch cdLatch = new CountDownLatch(1); clientWrapper .getNamedJobsIds(jobname) .subscribe((jId) -> { cdLatch.countDown(); System.out.println("job id " + jId); assertTrue(jId.startsWith(jobname)); }); try { cdLatch.await(10, TimeUnit.SECONDS); } catch (InterruptedException e) { fail(); } } // @Test public void getSinkLocationsTest() { String jobname = "APIRequestSource"; CountDownLatch cdLatch = new CountDownLatch(1); clientWrapper .getNamedJobsIds(jobname) .flatMap((jName) -> { return clientWrapper.getSinkLocations(jName, 1, 0, 0); }) .subscribe((ep) -> { System.out.println("Got EP " + ep.getEndpoint() + " type " + ep.getType()); cdLatch.countDown(); }); try { cdLatch.await(10, TimeUnit.SECONDS); } catch (InterruptedException e) { fail(); } } //@Test public void getSchedulingInfoTest() { String jobname = "GroupByIP"; CountDownLatch cdLatch = new CountDownLatch(3); Observable<String> jobidO = clientWrapper.getNamedJobsIds(jobname).take(1).cache().subscribeOn(Schedulers.io()); Observable<MantisMasterClientApi> mmciO = clientWrapper.getMasterClientApi().take(1).cache().subscribeOn(Schedulers.io()); Observable<EndpointChange> epO = jobidO.map((jId) -> clientWrapper.getSinkLocations(jId, sinkStageNumber, 0, 0)) .flatMap(e -> e) .take(3) .doOnNext((ep) -> System.out.println("Ep change: " + ep)) .doOnNext((ep) -> cdLatch.countDown()); Observable<Boolean> deleteWorkerO = jobidO.zipWith(mmciO, (String jId, MantisMasterClientApi mmci) -> { System.out.println("Job id is " + jId); return mmci.schedulingChanges(jId) .map(jsi -> { Map<Integer, WorkerAssignments> workerAssignments = jsi.getWorkerAssignments(); System.out.println("WorkerAssignments -> " + workerAssignments); WorkerAssignments workerAssignmentsForSink = workerAssignments.get(sinkStageNumber); System.out.println("WorkerAssignmentsForSink -> " + workerAssignmentsForSink); Map<Integer, WorkerHost> hostsForSink = workerAssignmentsForSink.getHosts(); System.out.println("Host map -> " + hostsForSink); assertTrue(!hostsForSink.isEmpty()); Iterator<Entry<Integer, WorkerHost>> it = hostsForSink.entrySet().iterator(); while (it.hasNext()) { Entry<Integer, WorkerHost> e = it.next(); return e.getValue().getWorkerNumber(); } return -1; }) .take(1) .map((Integer workerNo) -> { System.out.println("Worker no is -> " + workerNo); return mmci.resubmitJobWorker(jId, "tester", workerNo, "testing"); }).flatMap(b -> b); }) .flatMap(b -> b) .doOnNext((result) -> { assertTrue(result); cdLatch.countDown(); }); epO.subscribeOn(Schedulers.io()).subscribe((ep) -> System.out.println(ep), (t) -> t.printStackTrace(), () -> System.out.println("ep change completed")); deleteWorkerO.toBlocking().subscribe((n) -> System.out.println(n), (t) -> t.printStackTrace(), () -> System.out.println("worker deletion completed")); try { cdLatch.await(10, TimeUnit.SECONDS); } catch (InterruptedException e) { fail(); } } // @Test public void testJobStatusEndpoint() { MasterClientWrapper clientWrapper = new MasterClientWrapper(zkProps); String jobId = "PriamRequestSource-45"; clientWrapper.getMasterClientApi() .flatMap(new Func1<MantisMasterClientApi, Observable<String>>() { @Override public Observable<String> call(MantisMasterClientApi mantisMasterClientApi) { Integer sinkStage = null; return mantisMasterClientApi.getJobStatusObservable(jobId) .map((status) -> { return status; }) ; } }).take(2).toBlocking().subscribe((ep) -> { System.out.println("Endpoint Change -> " + ep); }); } @Test public void testNamedJobExists() { MasterClientWrapper clientWrapper = new MasterClientWrapper(zkProps); CountDownLatch cdLatch = new CountDownLatch(1); clientWrapper.namedJobExists("APIRequestSource") .subscribe((exists) -> { assertTrue(exists); cdLatch.countDown(); }); try { cdLatch.await(10, TimeUnit.SECONDS); } catch (InterruptedException e) { fail(); } } }
4,469
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/MasterClientWrapper.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.common.network.Endpoint; import io.mantisrx.common.network.WorkerEndpoint; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.core.CoreConfiguration; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.NamedJobInfo; import io.mantisrx.server.core.WorkerAssignments; import io.mantisrx.server.core.WorkerHost; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.master.MasterMonitor; import io.mantisrx.server.core.zookeeper.CuratorService; import io.mantisrx.server.master.client.config.ConfigurationFactory; import io.mantisrx.server.master.client.config.StaticPropertiesConfigurationFactory; import io.reactivex.mantis.remote.observable.EndpointChange; import io.reactivex.mantis.remote.observable.ToDeltaEndpointInjector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observer; import rx.functions.Action0; import rx.functions.Action1; import rx.functions.Func1; import rx.subjects.BehaviorSubject; import rx.subjects.PublishSubject; public class MasterClientWrapper { public static final String InvalidNamedJob = "No_such_named_job"; private static final Logger logger = LoggerFactory.getLogger(MasterClientWrapper.class); final CountDownLatch latch = new CountDownLatch(1); final BehaviorSubject<Boolean> initialMaster = BehaviorSubject.create(); private final MasterMonitor masterMonitor; private final Counter masterConnectRetryCounter; ConfigurationFactory configurationFactory; private MantisMasterClientApi masterClientApi; private PublishSubject<JobSinkNumWorkers> numSinkWorkersSubject = PublishSubject.create(); private PublishSubject<JobNumWorkers> numWorkersSubject = PublishSubject.create(); public MasterClientWrapper(Properties properties) { this(new StaticPropertiesConfigurationFactory(properties)); } // blocks until getting master info from zookeeper public MasterClientWrapper(ConfigurationFactory configurationFactory) { this.configurationFactory = configurationFactory; masterMonitor = initializeMasterMonitor(); Metrics m = new Metrics.Builder() .name(MasterClientWrapper.class.getCanonicalName()) .addCounter("MasterConnectRetryCount") .build(); m = MetricsRegistry.getInstance().registerAndGet(m); masterConnectRetryCounter = m.getCounter("MasterConnectRetryCount"); } public static String getWrappedHost(String host, int workerNumber) { return host + "-" + workerNumber; } public static String getUnwrappedHost(String wrappedHost) { final int i = wrappedHost.lastIndexOf('-'); if (i < 0) return wrappedHost; return wrappedHost.substring(0, i); } public static void main(String[] args) throws InterruptedException { Properties zkProps = new Properties(); zkProps.put("mantis.zookeeper.connectString", "ec2-50-19-255-1.compute-1.amazonaws.com:2181,ec2-54-235-159-245.compute-1.amazonaws.com:2181,ec2-50-19-255-97.compute-1.amazonaws.com:2181,ec2-184-73-152-248.compute-1.amazonaws.com:2181,ec2-50-17-247-179.compute-1.amazonaws.com:2181"); zkProps.put("mantis.zookeeper.leader.announcement.path", "/leader"); zkProps.put("mantis.zookeeper.root", "/mantis/master"); String jobId = "GroupByIPNJ-12"; MasterClientWrapper clientWrapper = new MasterClientWrapper(zkProps); clientWrapper.getMasterClientApi() .flatMap(new Func1<MantisMasterClientApi, Observable<EndpointChange>>() { @Override public Observable<EndpointChange> call(MantisMasterClientApi mantisMasterClientApi) { Integer sinkStage = null; return mantisMasterClientApi.getSinkStageNum(jobId) .take(1) // only need to figure out sink stage number once .flatMap(new Func1<Integer, Observable<EndpointChange>>() { @Override public Observable<EndpointChange> call(Integer integer) { logger.info("Getting sink locations for " + jobId); return clientWrapper.getSinkLocations(jobId, integer, 0, 0); } }); } }).toBlocking().subscribe((ep) -> { System.out.println("Endpoint Change -> " + ep); }); Thread.sleep(50000); } public MasterMonitor getMasterMonitor() { return masterMonitor; } public void addNumSinkWorkersObserver(Observer<JobSinkNumWorkers> numSinkWorkersObserver) { numSinkWorkersSubject.subscribe(numSinkWorkersObserver); } public void addNumWorkersObserver(Observer<JobNumWorkers> numWorkersObserver) { numWorkersSubject.subscribe(numWorkersObserver); } /** * Returns an Observable that emits only once, after the MasterClientApi has been initialized */ public Observable<MantisMasterClientApi> getMasterClientApi() { return initialMaster .onErrorResumeNext((Throwable throwable) -> { logger.warn("Error getting initial master from zookeeper: " + throwable.getMessage()); return Observable.empty(); }) .take(1) .map((Boolean aBoolean) -> { return masterClientApi; }); } private void startInitialMasterDescriptionGetter(CuratorService curatorService, final MasterMonitor masterMonitor) { final AtomicBoolean initialMasterGotten = new AtomicBoolean(false); masterMonitor.getMasterObservable() .takeWhile((MasterDescription masterDescription) -> { return !initialMasterGotten.get(); }) .subscribe((MasterDescription masterDescription) -> { if (masterDescription == null) { return; } logger.info("Initialized master description=" + masterDescription); initialMasterGotten.set(true); masterClientApi = new MantisMasterClientApi(masterMonitor); initialMaster.onNext(true); }); curatorService.start(); } private MasterMonitor initializeMasterMonitor() { CoreConfiguration config = configurationFactory.getConfig(); CuratorService curatorService = new CuratorService(config, null); MasterMonitor masterMonitor = curatorService.getMasterMonitor(); startInitialMasterDescriptionGetter(curatorService, masterMonitor); return masterMonitor; } private List<Endpoint> getAllNonJobMasterEndpoints(final String jobId, final Map<Integer, WorkerAssignments> workerAssignments) { List<Endpoint> endpoints = new ArrayList<>(); int totalWorkers = 0; for (Map.Entry<Integer, WorkerAssignments> workerAssignment : workerAssignments.entrySet()) { final Integer stageNum = workerAssignment.getKey(); // skip workers for stage 0 if (stageNum == 0) { continue; } final WorkerAssignments assignments = workerAssignment.getValue(); logger.info("job {} Creating endpoints conx from {} worker assignments for stage {}", jobId, assignments.getHosts().size(), stageNum); logger.info("stage {} hosts: {}", stageNum, assignments.getHosts()); totalWorkers += assignments.getNumWorkers(); for (WorkerHost host : assignments.getHosts().values()) { final int workerIndex = host.getWorkerIndex(); if (host.getState() == MantisJobState.Started) { logger.info("job " + jobId + ": creating new endpoint for worker number=" + host.getWorkerNumber() + ", index=" + host.getWorkerIndex() + ", host:port=" + host.getHost() + ":" + host.getPort().get(0)); Endpoint ep = new WorkerEndpoint(getWrappedHost(host.getHost(), host.getWorkerNumber()), host.getPort().get(0), stageNum, host.getMetricsPort(), host.getWorkerIndex(), host.getWorkerNumber(), // completed callback new Action0() { @Override public void call() { logger.info("job " + jobId + " WorkerIndex " + workerIndex + " completed"); } }, // error callback new Action1<Throwable>() { @Override public void call(Throwable t1) { logger.info("job " + jobId + " WorkerIndex " + workerIndex + " failed"); } } ); endpoints.add(ep); } } } numWorkersSubject.onNext(new JobNumWorkers(jobId, totalWorkers)); return endpoints; } public Observable<EndpointChange> getAllWorkerMetricLocations(final String jobId) { final ConditionalRetry schedInfoRetry = new ConditionalRetry(masterConnectRetryCounter, "AllSchedInfoRetry", 10); Observable<List<Endpoint>> schedulingUpdates = getMasterClientApi() .take(1) .flatMap(new Func1<MantisMasterClientApi, Observable<? extends List<Endpoint>>>() { @Override public Observable<? extends List<Endpoint>> call(MantisMasterClientApi mantisMasterClientApi) { return mantisMasterClientApi .schedulingChanges(jobId) .doOnError(new Action1<Throwable>() { @Override public void call(Throwable throwable) { logger.warn("Error on scheduling changes observable: " + throwable); } }) .retryWhen(schedInfoRetry.getRetryLogic()) .map(new Func1<JobSchedulingInfo, Map<Integer, WorkerAssignments>>() { @Override public Map<Integer, WorkerAssignments> call(JobSchedulingInfo jobSchedulingInfo) { logger.info("Got scheduling info for " + jobId); return jobSchedulingInfo.getWorkerAssignments(); } }) .filter(new Func1<Map<Integer, WorkerAssignments>, Boolean>() { @Override public Boolean call(Map<Integer, WorkerAssignments> workerAssignments) { return workerAssignments != null; } }) .map(new Func1<Map<Integer, WorkerAssignments>, List<Endpoint>>() { @Override public List<Endpoint> call(Map<Integer, WorkerAssignments> workerAssignments) { return getAllNonJobMasterEndpoints(jobId, workerAssignments); } }) .doOnError(new Action1<Throwable>() { @Override public void call(Throwable throwable) { logger.error(throwable.getMessage(), throwable); } }); } }); return (new ToDeltaEndpointInjector(schedulingUpdates)).deltas(); } public Observable<EndpointChange> getSinkLocations(final String jobId, final int sinkStage, final int forPartition, final int totalPartitions) { final ConditionalRetry schedInfoRetry = new ConditionalRetry(masterConnectRetryCounter, "SchedInfoRetry", 10); Observable<List<Endpoint>> schedulingUpdates = getMasterClientApi() .take(1) .flatMap((MantisMasterClientApi mantisMasterClientApi) -> { return mantisMasterClientApi .schedulingChanges(jobId) .doOnError((Throwable throwable) -> { logger.warn(throwable.getMessage()); }) .retryWhen(schedInfoRetry.getRetryLogic()) .map((JobSchedulingInfo jobSchedulingInfo) -> { logger.info("Got scheduling info for " + jobId); logger.info("Worker Assignments " + jobSchedulingInfo.getWorkerAssignments().get(sinkStage)); return jobSchedulingInfo.getWorkerAssignments().get(sinkStage); }) // Worker assignments can be empty if the job has completed so do not filter these events out .map((WorkerAssignments workerAssignments) -> { List<Endpoint> endpoints = new ArrayList<>(); if (workerAssignments != null) { logger.info("job " + jobId + " Creating endpoints conx from " + workerAssignments.getHosts().size() + " worker assignments"); for (WorkerHost host : workerAssignments.getHosts().values()) { final int workerIndex = host.getWorkerIndex(); final int totalFromPartitions = workerAssignments.getNumWorkers(); numSinkWorkersSubject.onNext(new JobSinkNumWorkers(jobId, totalFromPartitions)); if (usePartition(workerIndex, totalFromPartitions, forPartition, totalPartitions)) { //logger.info("Using partition " + workerIndex); if (host.getState() == MantisJobState.Started) { Endpoint ep = new Endpoint(getWrappedHost(host.getHost(), host.getWorkerNumber()), host.getPort().get(0), // completed callback () -> logger.info("job " + jobId + " WorkerIndex " + workerIndex + " completed"), // error callback t1 -> logger.info("job " + jobId + " WorkerIndex " + workerIndex + " failed") ); endpoints.add(ep); } } } } else { logger.info("job " + jobId + " Has no active workers!"); } return endpoints; }) .doOnError((Throwable throwable) -> { logger.error(throwable.getMessage(), throwable); }); }); return (new ToDeltaEndpointInjector(schedulingUpdates)).deltas(); } private boolean usePartition(int fromPartition, int fromTotalPartitions, int toPartition, int toTotalPartitions) { if (toPartition < 0 || toTotalPartitions == 0) return true; // not partitioning long n = Math.round((double) fromTotalPartitions / (double) toTotalPartitions); long beg = toPartition * n; long end = toPartition == toTotalPartitions - 1 ? fromTotalPartitions : (toPartition + 1) * n; return beg < fromTotalPartitions && fromPartition >= beg && fromPartition < end; } public Observable<Boolean> namedJobExists(final String jobName) { final ConditionalRetry namedJobRetry = new ConditionalRetry(masterConnectRetryCounter, "NamedJobExists", Integer.MAX_VALUE); return getMasterClientApi() .flatMap((final MantisMasterClientApi mantisMasterClientApi) -> { logger.info("verifying if job name exists: " + jobName); return mantisMasterClientApi.namedJobExists(jobName); }) .retryWhen(namedJobRetry.getRetryLogic()); } public Observable<String> getNamedJobsIds(final String jobName) { final ConditionalRetry namedJobsIdsRetry = new ConditionalRetry(masterConnectRetryCounter, "NamedJobsIds", Integer.MAX_VALUE); return getMasterClientApi() .flatMap((final MantisMasterClientApi mantisMasterClientApi) -> { logger.info("verifying if job name exists: " + jobName); return mantisMasterClientApi.namedJobExists(jobName) .map((Boolean aBoolean) -> { return aBoolean ? mantisMasterClientApi : null; }); }) .onErrorResumeNext((Throwable throwable) -> { logger.error(throwable.getMessage()); return Observable.empty(); }) .take(1) .map((MantisMasterClientApi mantisMasterClientApi) -> { if (mantisMasterClientApi == null) { final Exception exception = new Exception("No such Job Cluster " + jobName); namedJobsIdsRetry.setErrorRef(exception); return Observable.just(new NamedJobInfo(jobName, InvalidNamedJob)); } logger.info("Getting Job cluster info for " + jobName); return mantisMasterClientApi.namedJobInfo(jobName); }) .doOnError((Throwable throwable) -> { logger.error(throwable.getMessage(), throwable); }) .retryWhen(namedJobsIdsRetry.getRetryLogic()) .flatMap((Observable<NamedJobInfo> namedJobInfo) -> { return namedJobInfo.map((NamedJobInfo nji) -> { return nji.getJobId(); }); }); } public static class JobSinkNumWorkers { protected final int numSinkWorkers; private final String jobId; public JobSinkNumWorkers(String jobId, int numSinkWorkers) { this.jobId = jobId; this.numSinkWorkers = numSinkWorkers; } public String getJobId() { return jobId; } public int getNumSinkWorkers() { return numSinkWorkers; } } public static class JobNumWorkers { protected final int numWorkers; private final String jobId; public JobNumWorkers(String jobId, int numWorkers) { this.jobId = jobId; this.numWorkers = numWorkers; } public String getJobId() { return jobId; } public int getNumWorkers() { return numWorkers; } } }
4,470
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/MantisProtoAdapter.java
//package io.mantisrx.server.master.client; // //import io.mantisrx.common.Label; //import io.mantisrx.master.api.proto.CreateJobClusterRequest; //import io.mantisrx.master.api.proto.UpdateJobClusterRequest; //import io.mantisrx.master.core.proto.JobDefinition; //import io.mantisrx.runtime.JobConstraints; //import io.mantisrx.runtime.JobOwner; //import io.mantisrx.runtime.JobSla; //import io.mantisrx.runtime.MachineDefinition; //import io.mantisrx.runtime.MantisJobDefinition; //import io.mantisrx.runtime.MantisJobDurationType; //import io.mantisrx.runtime.NamedJobDefinition; //import io.mantisrx.runtime.WorkerMigrationConfig; //import io.mantisrx.runtime.descriptor.SchedulingInfo; //import io.mantisrx.runtime.descriptor.StageScalingPolicy; //import io.mantisrx.runtime.descriptor.StageSchedulingInfo; //import io.mantisrx.runtime.parameter.Parameter; // //import java.net.MalformedURLException; //import java.net.URL; //import java.util.stream.Collectors; // //public class MantisProtoAdapter { // public static final StageScalingPolicy.Strategy toStageScalingStrategy(final io.mantisrx.master.core.proto.StageScalingPolicy.Strategy s) { // return new StageScalingPolicy.Strategy( // StageScalingPolicy.ScalingReason.valueOf(s.getReason().name()), // s.getScaleDownBelowPct(), // s.getScaleUpAbovePct(), // s.hasRollingCount() ? // new StageScalingPolicy.RollingCount( // s.getRollingCount().getCount(), // s.getRollingCount().getOf()) : // null // ); // } // public static final StageScalingPolicy toStageScalingPolicy(final io.mantisrx.master.core.proto.StageScalingPolicy p) { // return new StageScalingPolicy( // p.getStage(), // p.getMin(), // p.getMax(), // p.getIncrement(), // p.getDecrement(), // p.getCoolDownSecs(), // p.getStrategiesMap().entrySet().stream().collect( // Collectors.toMap( // e -> StageScalingPolicy.ScalingReason.valueOf(e.getKey()), // e -> toStageScalingStrategy(e.getValue()) // ) // ) // ); // } // // public static final MachineDefinition toMachineDefinition(final io.mantisrx.master.core.proto.MachineDefinition md) { // return new MachineDefinition(md.getCpuCores(), // md.getMemoryMB(), md.getNetworkMbps(), md.getDiskMB(), md.getNumPorts()); // } // // private static final StageSchedulingInfo toStageSchedulingInfo(final io.mantisrx.master.core.proto.SchedulingInfo.StageSchedulingInfo s) { // return new StageSchedulingInfo( // s.getNumberOfInstances(), // toMachineDefinition(s.getMachineDefinition()), // s.getHardConstraintsList().stream().map(c -> JobConstraints.valueOf(c.name())).collect(Collectors.toList()), // s.getSoftConstraintsList().stream().map(c -> JobConstraints.valueOf(c.name())).collect(Collectors.toList()), // s.hasScalingPolicy() ? toStageScalingPolicy(s.getScalingPolicy()) : null, // s.getScalable() // ); // } // private static final SchedulingInfo toSchedulingInfo(final io.mantisrx.master.core.proto.SchedulingInfo s) { // // return new SchedulingInfo( // s.getStagesMap().entrySet().stream() // .collect(Collectors.toMap(e -> e.getKey(), // e -> toStageSchedulingInfo(e.getValue()))) // ); // } // // public static final JobSla toJobSla(final io.mantisrx.master.core.proto.JobSla protoSla) { // return new JobSla(protoSla.getRuntimeLimitSecs(), // protoSla.getMinRuntimeSecs(), // JobSla.StreamSLAType.valueOf(protoSla.getSlaType().name()), // MantisJobDurationType.valueOf(protoSla.getDurationType().name()), // protoSla.getUserProvidedType()); // } // // private static final WorkerMigrationConfig toMigrationConfig(final io.mantisrx.master.core.proto.WorkerMigrationConfig cfg) { // return new WorkerMigrationConfig( // WorkerMigrationConfig.MigrationStrategyEnum.valueOf(cfg.getStrategy().name()), // cfg.getConfigString() // ); // } // // private static final JobOwner toJobOwner(final io.mantisrx.master.core.proto.JobOwner owner) { // return new JobOwner( // owner.getName(), // owner.getTeamName(), // owner.getDescription(), // owner.getContactEmail(), // owner.getRepo() // ); // } // // public static NamedJobDefinition toNamedJobDefinition(final CreateJobClusterRequest request) throws MalformedURLException { // JobDefinition jd = request.getJobDefinition(); // io.mantisrx.master.core.proto.JobOwner owner = request.getOwner(); // MantisJobDefinition jobDefinition = new MantisJobDefinition( // jd.getName(), // jd.getUser(), // jd.getUrl() == null ? null : new URL(jd.getUrl()), // jd.getVersion(), // jd.getParametersList().stream().map(p -> new Parameter(p.getName(), p.getValue())).collect(Collectors.toList()), // jd.hasJobSla() ? toJobSla(jd.getJobSla()) : null, // jd.getSubscriptionTimeoutSecs(), // jd.hasSchedulingInfo() ? toSchedulingInfo(jd.getSchedulingInfo()) : null, // jd.getSlaMin(), // jd.getSlaMax(), // jd.getCronSpec(), // NamedJobDefinition.CronPolicy.valueOf(jd.getCronPolicy().name()), // jd.getIsReadyForJobMaster(), // jd.hasMigrationConfig() ? toMigrationConfig(jd.getMigrationConfig()) : WorkerMigrationConfig.DEFAULT, // jd.getLabelsList().stream().map(l -> new Label(l.getName(), l.getValue())).collect(Collectors.toList())); // return new NamedJobDefinition( // jobDefinition, // request.hasOwner() ? toJobOwner(owner) : null // ); // } // // public static NamedJobDefinition toNamedJobDefinition(final UpdateJobClusterRequest request) throws MalformedURLException { // JobDefinition jd = request.getJobDefinition(); // io.mantisrx.master.core.proto.JobOwner owner = request.getOwner(); // MantisJobDefinition jobDefinition = new MantisJobDefinition( // jd.getName(), // jd.getUser(), // jd.getUrl() == null ? null : new URL(jd.getUrl()), // jd.getVersion(), // jd.getParametersList().stream().map(p -> new Parameter(p.getName(), p.getValue())).collect(Collectors.toList()), // jd.hasJobSla() ? toJobSla(jd.getJobSla()) : null, // jd.getSubscriptionTimeoutSecs(), // jd.hasSchedulingInfo() ? toSchedulingInfo(jd.getSchedulingInfo()) : null, // jd.getSlaMin(), // jd.getSlaMax(), // jd.getCronSpec(), // NamedJobDefinition.CronPolicy.valueOf(jd.getCronPolicy().name()), // jd.getIsReadyForJobMaster(), // jd.hasMigrationConfig() ? toMigrationConfig(jd.getMigrationConfig()) : WorkerMigrationConfig.DEFAULT, // jd.getLabelsList().stream().map(l -> new Label(l.getName(), l.getValue())).collect(Collectors.toList())); // return new NamedJobDefinition( // jobDefinition, // request.hasOwner() ? toJobOwner(owner) : null // ); // } // //}
4,471
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/SimpleSchedulerObserver.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Properties; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import com.sampullara.cli.Args; import com.sampullara.cli.Argument; import io.mantisrx.server.core.JobAssignmentResult; import rx.Observable; import rx.functions.Action0; import rx.functions.Action1; import rx.functions.Func1; public class SimpleSchedulerObserver { private static final ObjectMapper objectMapper = new ObjectMapper(); @Argument(alias = "p", description = "Specify a configuration file", required = true) private static String propFile = ""; @Argument(alias = "j", description = "Specify a jobId", required = false) private static String jobId = ""; private final MasterClientWrapper clientWrapper; SimpleSchedulerObserver(Properties properties) { clientWrapper = new MasterClientWrapper(properties); } public static void main(String[] args) { objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); objectMapper.registerModule(new Jdk8Module()); try { Args.parse(SimpleSchedulerObserver.class, args); } catch (IllegalArgumentException e) { Args.usage(SimpleSchedulerObserver.class); System.exit(1); } Properties properties = new Properties(); try (InputStream inputStream = new FileInputStream(propFile)) { properties.load(inputStream); } catch (IOException e) { e.printStackTrace(); } System.out.println("Listening to scheduling assignments with jobId=" + jobId); final CountDownLatch latch = new CountDownLatch(1); SimpleSchedulerObserver schedulerObserver = new SimpleSchedulerObserver(properties); final AtomicReference<JobAssignmentResult> ref = new AtomicReference<>(null); schedulerObserver.getObservable(jobId) .filter(new Func1<JobAssignmentResult, Boolean>() { @Override public Boolean call(JobAssignmentResult jobAssignmentResult) { if (jobAssignmentResult == null) return false; if (jobAssignmentResult.isIdentical(ref.get())) return false; ref.set(jobAssignmentResult); return true; } }) .doOnNext(new Action1<JobAssignmentResult>() { @Override public void call(JobAssignmentResult jobAssignmentResult) { System.out.println("Failures for job " + jobAssignmentResult.getJobId() + ":"); for (JobAssignmentResult.Failure failure : jobAssignmentResult.getFailures()) try { System.out.println(" " + objectMapper.writeValueAsString(failure)); } catch (JsonProcessingException e) { e.printStackTrace(); } } }) .doOnCompleted(new Action0() { @Override public void call() { latch.countDown(); } }) .doOnError(new Action1<Throwable>() { @Override public void call(Throwable throwable) { throwable.printStackTrace(); latch.countDown(); } }) .subscribe(); System.out.println("Subscribed."); try { latch.await(); } catch (InterruptedException e) { e.printStackTrace(); } } Observable<JobAssignmentResult> getObservable(final String jobId) { return clientWrapper .getMasterClientApi() .flatMap(new Func1<MantisMasterClientApi, Observable<? extends JobAssignmentResult>>() { @Override public Observable<? extends JobAssignmentResult> call(MantisMasterClientApi mantisMasterClientApi) { return mantisMasterClientApi.assignmentResults(jobId); } }); } }
4,472
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/ConditionalRetry.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import io.mantisrx.common.metrics.Counter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Func1; import rx.functions.Func2; public class ConditionalRetry { private static final Logger logger = LoggerFactory.getLogger(ConditionalRetry.class); private final Counter counter; private final String name; private final AtomicReference<Throwable> errorRef = new AtomicReference<>(null); private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic; public ConditionalRetry(Counter counter, String name) { this(counter, name, Integer.MAX_VALUE); } public ConditionalRetry(Counter counter, String name, final int max) { this.counter = counter; this.name = name; this.retryLogic = new Func1<Observable<? extends Throwable>, Observable<?>>() { @Override public Observable<?> call(Observable<? extends Throwable> attempts) { return attempts .zipWith(Observable.range(1, max), new Func2<Throwable, Integer, Integer>() { @Override public Integer call(Throwable t1, Integer integer) { return integer; } }) .flatMap(new Func1<Integer, Observable<?>>() { @Override public Observable<?> call(Integer integer) { if (errorRef.get() != null) return Observable.error(errorRef.get()); if (ConditionalRetry.this.counter != null) ConditionalRetry.this.counter.increment(); long delay = 2 * (integer > 10 ? 10 : integer); logger.info(": retrying " + ConditionalRetry.this.name + " after sleeping for " + delay + " secs"); return Observable.timer(delay, TimeUnit.SECONDS); } }); } }; } public void setErrorRef(Throwable error) { errorRef.set(error); } public Counter getCounter() { return counter; } public Func1<Observable<? extends Throwable>, Observable<?>> getRetryLogic() { return retryLogic; } }
4,473
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/MasterClientException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; public class MasterClientException extends Exception { public MasterClientException(String s) { super(s); } public MasterClientException(String s, Throwable t) { super(s, t); } public MasterClientException(Exception e) { super(e); } }
4,474
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/StageScaleRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; public class StageScaleRequest { @JsonProperty("JobId") private final String jobId; @JsonProperty("StageNumber") private final int stageNumber; @JsonProperty("NumWorkers") private final int numWorkers; @JsonProperty("Reason") private final String reason; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public StageScaleRequest(final String jobId, final int stageNumber, final int numWorkers, final String reason) { this.jobId = jobId; this.stageNumber = stageNumber; this.numWorkers = numWorkers; this.reason = reason; } public String getJobId() { return jobId; } public int getStageNumber() { return stageNumber; } public int getNumWorkers() { return numWorkers; } public String getReason() { return reason; } @Override public String toString() { return "StageScaleRequest{" + "jobId='" + jobId + '\'' + ", stageNumber=" + stageNumber + ", numWorkers=" + numWorkers + ", reason='" + reason + '\'' + '}'; } }
4,475
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/HttpUtility.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import java.nio.charset.Charset; import java.util.NoSuchElementException; import java.util.concurrent.TimeUnit; import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPipeline; import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import mantis.io.reactivex.netty.client.RxClient; import mantis.io.reactivex.netty.pipeline.PipelineConfigurator; import mantis.io.reactivex.netty.protocol.http.client.CompositeHttpClientBuilder; import mantis.io.reactivex.netty.protocol.http.client.HttpClient; import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest; import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Action1; import rx.functions.Func1; /* package */ class HttpUtility { private static final Logger logger = LoggerFactory.getLogger(HttpUtility.class); private static final long GET_TIMEOUT_SECS = 30; private static final int MAX_REDIRECTS = 10; static Observable<String> getGetResponse(String host, int port, String uri) { return new CompositeHttpClientBuilder<ByteBuf, ByteBuf>() .appendPipelineConfigurator( new PipelineConfigurator<HttpClientResponse<ByteBuf>, HttpClientRequest<ByteBuf>>() { @Override public void configureNewPipeline(ChannelPipeline pipeline) { pipeline.addLast("introspecting-handler", new ChannelDuplexHandler() { private String uri = "<undefined>"; @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { if (msg instanceof HttpRequest) { HttpRequest request = (HttpRequest) msg; uri = request.getUri(); logger.info("Sending request on channel id: " + ctx.channel().toString() + ", request URI: " + uri); } super.write(ctx, msg, promise); } @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof HttpResponse) { logger.info("Received response on channel id: " + ctx.channel().toString() + ", request URI: " + uri); } super.channelRead(ctx, msg); } }); try { int maxContentLength = 10 * 1024 * 1024; // Ten megabytes pipeline.replace(HttpObjectAggregator.class, "http-object-aggregator", new HttpObjectAggregator(maxContentLength)); } catch (NoSuchElementException ex) { logger.error("HttpObjectAggregator did not exist in this pipeline. Error: {}", ex.getMessage(), ex); } catch (IllegalArgumentException ex) { logger.error("ChannelHandler named http-object-aggregator already existed in this" + " pipeline. Error: {}", ex.getMessage(), ex); } catch (Throwable t) { logger.error("Unknown error adding HttpObjectAggregator to Master Client " + "Pipeline. Error: {}", t.getMessage(), t); } } }) .build() .submit(new RxClient.ServerInfo(host, port), HttpClientRequest.createGet(uri), new HttpClient.HttpClientConfig.Builder().setFollowRedirect(true).followRedirect(MAX_REDIRECTS).build()) .flatMap(new Func1<HttpClientResponse<ByteBuf>, Observable<ByteBuf>>() { @Override public Observable<ByteBuf> call(HttpClientResponse<ByteBuf> response) { return response.getContent(); } }) .map(new Func1<ByteBuf, String>() { @Override public String call(ByteBuf o) { return o.toString(Charset.defaultCharset()); } }) .doOnError(new Action1<Throwable>() { @Override public void call(Throwable throwable) { logger.warn("Error: " + throwable.getMessage(), throwable); } }) .timeout(GET_TIMEOUT_SECS, TimeUnit.SECONDS); } }
4,476
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/NoSuchJobException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; public class NoSuchJobException extends Exception { public NoSuchJobException(String jobId) { super(jobId + " doesn't exist"); } public NoSuchJobException(String jobId, Throwable t) { super(jobId + " doesn't exist", t); } }
4,477
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/ResubmitJobWorkerRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; public class ResubmitJobWorkerRequest { @JsonProperty("JobId") private final String jobId; @JsonProperty("user") private final String user; @JsonProperty("workerNumber") private final int workerNumber; @JsonProperty("reason") private final String reason; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public ResubmitJobWorkerRequest(final String jobId, final String user, final int workerNumber, final String reason) { this.jobId = jobId; this.user = user; this.workerNumber = workerNumber; this.reason = reason; } public String getJobId() { return jobId; } public String getUser() { return user; } public int getWorkerNumber() { return workerNumber; } public String getReason() { return reason; } @Override public String toString() { return "ResubmitJobWorkerRequest{" + "jobId='" + jobId + '\'' + ", user='" + user + '\'' + ", workerNumber=" + workerNumber + ", reason='" + reason + '\'' + '}'; } }
4,478
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/JobSubmitResponse.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; public class JobSubmitResponse { private final String jobId; private final boolean failed; private final String errorMessage; public JobSubmitResponse(String jobId, boolean failed, String errorMessage) { this.jobId = jobId; this.failed = failed; this.errorMessage = errorMessage; } public String getJobId() { return jobId; } public boolean isFailed() { return failed; } public String getErrorMessage() { return errorMessage; } }
4,479
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/TestGetMasterMonitor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Properties; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import com.sampullara.cli.Args; import com.sampullara.cli.Argument; import io.mantisrx.server.core.CoreConfiguration; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.master.MasterMonitor; import io.mantisrx.server.core.zookeeper.CuratorService; import io.mantisrx.server.master.client.config.StaticPropertiesConfigurationFactory; import rx.functions.Action1; import rx.functions.Func1; public class TestGetMasterMonitor { @Argument(alias = "p", description = "Specify a configuration file", required = true) private static String propFile = ""; public static void main(String[] args) { try { Args.parse(TestGetMasterMonitor.class, args); } catch (IllegalArgumentException e) { Args.usage(TestGetMasterMonitor.class); System.exit(1); } Properties properties = new Properties(); System.out.println("propfile=" + propFile); try (InputStream inputStream = new FileInputStream(propFile)) { properties.load(inputStream); } catch (IOException e) { e.printStackTrace(); } final AtomicInteger counter = new AtomicInteger(); final CountDownLatch latch = new CountDownLatch(5); StaticPropertiesConfigurationFactory configurationFactory = new StaticPropertiesConfigurationFactory(properties); CoreConfiguration config = configurationFactory.getConfig(); final CuratorService curatorService = new CuratorService(config, null); MasterMonitor masterMonitor = curatorService.getMasterMonitor(); masterMonitor.getMasterObservable() .filter(new Func1<MasterDescription, Boolean>() { @Override public Boolean call(MasterDescription masterDescription) { return masterDescription != null; } }) .doOnNext(new Action1<MasterDescription>() { @Override public void call(MasterDescription masterDescription) { System.out.println(counter.incrementAndGet() + ": Got new master: " + masterDescription.toString()); latch.countDown(); } }) .subscribe(); curatorService.start(); try { latch.await(); } catch (InterruptedException e) { e.printStackTrace(); } } }
4,480
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/MantisClientException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; public class MantisClientException extends Exception { public MantisClientException(String message) { super(message); } public MantisClientException(String message, Throwable cause) { super(message, cause); } public MantisClientException(Throwable cause) { super(cause); } }
4,481
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/MantisMasterClientApi.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import io.mantisrx.common.Label; import io.mantisrx.common.network.Endpoint; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MantisJobDefinition; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.codec.JsonCodec; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.core.JobAssignmentResult; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.NamedJobInfo; import io.mantisrx.server.core.master.LocalMasterMonitor; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.master.MasterMonitor; import io.netty.buffer.ByteBuf; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpStatusClass; import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.reactivex.mantis.remote.observable.ConnectToObservable; import io.reactivex.mantis.remote.observable.DynamicConnectionSet; import io.reactivex.mantis.remote.observable.ToDeltaEndpointInjector; import io.reactivex.mantis.remote.observable.reconciliator.Reconciliator; import mantis.io.reactivex.netty.RxNetty; import mantis.io.reactivex.netty.channel.ObservableConnection; import mantis.io.reactivex.netty.pipeline.PipelineConfigurators; import mantis.io.reactivex.netty.protocol.http.client.HttpClient; import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest; import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse; import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent; import mantis.io.reactivex.netty.protocol.http.websocket.WebSocketClient; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Func1; import rx.functions.Func2; /** * */ public class MantisMasterClientApi { static final String ConnectTimeoutSecsPropertyName = "MantisClientConnectTimeoutSecs"; private static final ObjectMapper objectMapper; private static final Logger logger = LoggerFactory.getLogger(MantisMasterClientApi.class); private static final String JOB_METADATA_FIELD = "jobMetadata"; private static final String STAGE_MEDATA_LIST_FIELD = "stageMetadataList"; private static final String STAGE_NUM_FIELD = "stageNum"; private static final String NUM_STAGES_FIELD = "numStages"; private static final int MAX_REDIRECTS = 10; private static final String API_JOBS_LIST_PATH = "/api/jobs/list"; private static final String API_JOBS_LIST_MATCHING_PATH = "/api/jobs/list/matching"; private static final String API_JOB_SUBMIT_PATH = "/api/submit"; private static final String API_JOB_NAME_CREATE = "/api/namedjob/create"; private static final String API_JOB_NAME_UPDATE = "/api/namedjob/update"; private static final String API_JOB_NAME_LIST = "/api/namedjob/list"; private static final String API_JOB_KILL = "/api/jobs/kill"; private static final String API_JOB_STAGE_SCALE = "/api/jobs/scaleStage"; private static final String API_JOB_RESUBMIT_WORKER = "/api/jobs/resubmitWorker"; // Retry attempts before giving up in connection to master // each attempt waits attempt amount of time, 10=55 seconds private static final int SUBSCRIBE_ATTEMPTS_TO_MASTER = 100; private static final int MAX_RANDOM_WAIT_RETRY_SEC = 10; // The following timeout should be what's in master configuration's mantis.scheduling.info.observable.heartbeat.interval.secs private static final long MASTER_SCHED_INFO_HEARTBEAT_INTERVAL_SECS = 120; static { objectMapper = new ObjectMapper(); objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); objectMapper.registerModule(new Jdk8Module()); } final String DEFAULT_RESPONSE = "NO_RESPONSE_FROM_MASTER"; private final long GET_TIMEOUT_SECS = 30; private final Observable<Endpoint> masterEndpoint; private final int subscribeAttemptsToMaster; private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = attempts -> attempts .zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Throwable, Integer, Integer>) (t1, integer) -> integer) .flatMap((Func1<Integer, Observable<?>>) integer -> { long delay = 2 * (integer > 10 ? 10 : integer); logger.info(": retrying conx after sleeping for " + delay + " secs"); return Observable.timer(delay, TimeUnit.SECONDS); }); private final Func1<Observable<? extends Void>, Observable<?>> repeatLogic = attempts -> attempts .zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Void, Integer, Integer>) (t1, integer) -> integer) .flatMap((Func1<Integer, Observable<?>>) integer -> { long delay = 2 * (integer > 10 ? 10 : integer); logger.warn("On Complete received! : repeating conx after sleeping for " + delay + " secs"); return Observable.timer(delay, TimeUnit.SECONDS); }); private MasterMonitor masterMonitor; /** * * @param masterMonitor */ public MantisMasterClientApi(MasterMonitor masterMonitor) { this.masterMonitor = masterMonitor; masterEndpoint = masterMonitor.getMasterObservable() .filter(masterDescription -> masterDescription != null) .map(description -> { logger.info("New Mantis Master notification, host: " + description.getHostname() + "," + " swapping out client API connection to new master."); return new Endpoint(description.getHostname(), description.getApiPortV2()); }); int a = SUBSCRIBE_ATTEMPTS_TO_MASTER; final String p = System.getProperty(ConnectTimeoutSecsPropertyName); if (p != null) { try { long t = Long.parseLong(p); a = Math.max(1, (int) Math.sqrt(2.0 * t)); // timeout = SUM(1 + 2 + ... + N) =~ (N^2)/2 } catch (NumberFormatException e) { logger.warn("Invalid number for connectTimeoutSecs: " + p); } } subscribeAttemptsToMaster = Integer.MAX_VALUE; } private String toUri(MasterDescription md, String path) { return "http://" + md.getHostname() + ":" + md.getApiPort() + path; } /** * * @param name * @param version * @param parameters * @param jobSla * @param schedulingInfo * @return */ public Observable<JobSubmitResponse> submitJob(final String name, final String version, final List<Parameter> parameters, final JobSla jobSla, final SchedulingInfo schedulingInfo) { return submitJob(name, version, parameters, jobSla, 0L, schedulingInfo, WorkerMigrationConfig.DEFAULT); } /** * * @param name * @param version * @param parameters * @param jobSla * @param subscriptionTimeoutSecs * @param schedulingInfo * @param migrationConfig * @return */ public Observable<JobSubmitResponse> submitJob(final String name, final String version, final List<Parameter> parameters, final JobSla jobSla, final long subscriptionTimeoutSecs, final SchedulingInfo schedulingInfo, final WorkerMigrationConfig migrationConfig) { return submitJob(name, version, parameters, jobSla, subscriptionTimeoutSecs, schedulingInfo, false, migrationConfig); } /** * * @param name * @param version * @param parameters * @param jobSla * @param subscriptionTimeoutSecs * @param schedulingInfo * @return */ public Observable<JobSubmitResponse> submitJob(final String name, final String version, final List<Parameter> parameters, final JobSla jobSla, final long subscriptionTimeoutSecs, final SchedulingInfo schedulingInfo) { return submitJob(name, version, parameters, jobSla, subscriptionTimeoutSecs, schedulingInfo, false, WorkerMigrationConfig.DEFAULT); } /** * * @param name * @param version * @param parameters * @param jobSla * @param subscriptionTimeoutSecs * @param schedulingInfo * @param readyForJobMaster * @return */ public Observable<JobSubmitResponse> submitJob(final String name, final String version, final List<Parameter> parameters, final JobSla jobSla, final long subscriptionTimeoutSecs, final SchedulingInfo schedulingInfo, final boolean readyForJobMaster) { return submitJob(name, version, parameters, jobSla, subscriptionTimeoutSecs, schedulingInfo, readyForJobMaster, WorkerMigrationConfig.DEFAULT); } /** * * @param name * @param version * @param parameters * @param jobSla * @param subscriptionTimeoutSecs * @param schedulingInfo * @param readyForJobMaster * @param migrationConfig * @return */ public Observable<JobSubmitResponse> submitJob(final String name, final String version, final List<Parameter> parameters, final JobSla jobSla, final long subscriptionTimeoutSecs, final SchedulingInfo schedulingInfo, final boolean readyForJobMaster, final WorkerMigrationConfig migrationConfig) { return submitJob(name, version, parameters, jobSla, subscriptionTimeoutSecs, schedulingInfo, readyForJobMaster, migrationConfig, new LinkedList<>()); } /** * * @param name * @param version * @param parameters * @param jobSla * @param subscriptionTimeoutSecs * @param schedulingInfo * @param readyForJobMaster * @param migrationConfig * @param labels * @return */ public Observable<JobSubmitResponse> submitJob(final String name, final String version, final List<Parameter> parameters, final JobSla jobSla, final long subscriptionTimeoutSecs, final SchedulingInfo schedulingInfo, final boolean readyForJobMaster, final WorkerMigrationConfig migrationConfig, final List<Label> labels) { try { String jobDef = getJobDefinitionString(name, null, version, parameters, jobSla, subscriptionTimeoutSecs, schedulingInfo, readyForJobMaster, migrationConfig, labels); return submitJob(jobDef); } catch (MalformedURLException | JsonProcessingException e) { return Observable.error(e); } } /** * * @param submitJobRequestJson * @return */ public Observable<JobSubmitResponse> submitJob(final String submitJobRequestJson) { return masterMonitor.getMasterObservable() .filter(masterDescription -> masterDescription != null) .switchMap((Func1<MasterDescription, Observable<JobSubmitResponse>>) masterDescription -> { String uri = "http://" + masterDescription.getHostname() + ":" + masterDescription.getApiPort() + API_JOB_SUBMIT_PATH; logger.info("Doing POST on " + uri); try { return getPostResponse(uri, submitJobRequestJson) .onErrorResumeNext(throwable -> { logger.warn("Can't connect to master: {}", throwable.getMessage(), throwable); return Observable.empty(); }) .map(s -> new JobSubmitResponse(s, false, null)); } catch (Exception e) { return Observable.error(e); } }); } private String getJobDefinitionString(String name, String jobUrl, String version, List<Parameter> parameters, JobSla jobSla, long subscriptionTimeoutSecs, SchedulingInfo schedulingInfo, boolean readyForJobMaster, final WorkerMigrationConfig migrationConfig, final List<Label> labels) throws JsonProcessingException, MalformedURLException { MantisJobDefinition jobDefinition = new MantisJobDefinition(name, System.getProperty("user.name"), jobUrl == null ? null : new URL(jobUrl), version, parameters, jobSla, subscriptionTimeoutSecs, schedulingInfo, 0, 0, null, null, readyForJobMaster, migrationConfig, labels); return objectMapper.writeValueAsString(jobDefinition); } public Observable<Void> killJob(final String jobId) { return killJob(jobId, "Unknown", "User requested"); } /** * * @param jobId * @param user * @param reason * @return */ public Observable<Void> killJob(final String jobId, final String user, final String reason) { return masterMonitor.getMasterObservable() .filter(md -> md != null) .switchMap((Func1<MasterDescription, Observable<Void>>) md -> { Map<String, String> content = new HashMap<>(); content.put("JobId", jobId); content.put("user", user); content.put("reason", reason); try { return getPostResponse(toUri(md, API_JOB_KILL), objectMapper.writeValueAsString(content)) .onErrorResumeNext(throwable -> { logger.warn("Can't connect to master: {}", throwable.getMessage(), throwable); return Observable.empty(); }) .map(s -> { logger.info(s); return null; }); } catch (JsonProcessingException e) { return Observable.error(e); } }); } /** * * @param jobId * @param stageNum * @param numWorkers * @param reason * @return */ public Observable<Boolean> scaleJobStage(final String jobId, final int stageNum, final int numWorkers, final String reason) { return masterMonitor .getMasterObservable() .filter(md -> md != null) .take(1) .flatMap((Func1<MasterDescription, Observable<Boolean>>) md -> { final StageScaleRequest stageScaleRequest = new StageScaleRequest(jobId, stageNum, numWorkers, reason); try { return submitPostRequest(toUri(md, API_JOB_STAGE_SCALE), objectMapper.writeValueAsString(stageScaleRequest)) .map(s -> { logger.info("POST to scale stage returned status: {}", s); return s.codeClass().equals(HttpStatusClass.SUCCESS); }); } catch (JsonProcessingException e) { logger.error("failed to serialize stage scale request {} to json", stageScaleRequest); return Observable.error(e); } }); } /** * * @param jobId * @param user * @param workerNum * @param reason * @return */ public Observable<Boolean> resubmitJobWorker(final String jobId, final String user, final int workerNum, final String reason) { return masterMonitor.getMasterObservable() .filter(md -> md != null) .take(1) .flatMap((Func1<MasterDescription, Observable<Boolean>>) md -> { final ResubmitJobWorkerRequest resubmitJobWorkerRequest = new ResubmitJobWorkerRequest(jobId, user, workerNum, reason); logger.info("sending request to resubmit worker {} for jobId {}", workerNum, jobId); try { return submitPostRequest(toUri(md, API_JOB_RESUBMIT_WORKER), objectMapper.writeValueAsString(resubmitJobWorkerRequest)) .map(s -> { logger.info("POST to resubmit worker {} returned status: {}", workerNum, s); return s.codeClass().equals(HttpStatusClass.SUCCESS); }); } catch (JsonProcessingException e) { logger.error("failed to serialize resubmit job worker request {} to json", resubmitJobWorkerRequest); return Observable.error(e); } }); } private Observable<HttpResponseStatus> submitPostRequest(String uri, String postContent) { logger.info("sending POST request to {} content {}", uri, postContent); return RxNetty .createHttpRequest( HttpClientRequest.createPost(uri) .withContent(postContent), new HttpClient.HttpClientConfig.Builder() .build()) .map(b -> b.getStatus()); } private Observable<String> getPostResponse(String uri, String postContent) { logger.info("sending POST request to {} content {}", uri, postContent); return RxNetty .createHttpRequest( HttpClientRequest.createPost(uri) .withContent(postContent), new HttpClient.HttpClientConfig.Builder() .build()) .flatMap((Func1<HttpClientResponse<ByteBuf>, Observable<ByteBuf>>) b -> b.getContent()) .map(o -> o.toString(Charset.defaultCharset())); } /** * * @param jobName * @return */ public Observable<Boolean> namedJobExists(final String jobName) { return masterMonitor.getMasterObservable() .filter(md -> md != null) .switchMap((Func1<MasterDescription, Observable<Boolean>>) masterDescription -> { String uri = API_JOB_NAME_LIST + "/" + jobName; logger.info("Calling GET on " + uri); return HttpUtility.getGetResponse(masterDescription.getHostname(), masterDescription.getApiPort(), uri) .onErrorResumeNext(throwable -> { logger.warn("Can't connect to master: {}", throwable.getMessage(), throwable); return Observable.error(throwable); }) .map(response -> { logger.debug("Job cluster response: " + response); JSONArray jsonArray = new JSONArray(response); return jsonArray.length() > 0; }) .retryWhen(retryLogic) ; }) .retryWhen(retryLogic) ; } /** * * @param jobId * @return */ public Observable<Integer> getSinkStageNum(final String jobId) { return masterMonitor.getMasterObservable() .filter(masterDescription -> masterDescription != null) .switchMap(masterDescription -> { String uri = API_JOBS_LIST_PATH + "/" + jobId; logger.info("Calling GET on " + uri); return HttpUtility.getGetResponse(masterDescription.getHostname(), masterDescription.getApiPort(), uri) .onErrorResumeNext(throwable -> { logger.warn("Can't connect to master: {}", throwable.getMessage(), throwable); return Observable.error(throwable); }) .flatMap(response -> { try { logger.info("Got response for job info on " + jobId); Integer sinkStage = getSinkStageNumFromJsonResponse(jobId, response); if (sinkStage < 0) { logger.warn("Job " + jobId + " not found"); return Observable.empty(); } return Observable.just(sinkStage); } catch (MasterClientException e) { logger.warn("Can't get sink stage info for " + jobId + ": " + e.getMessage()); return Observable.empty(); } }) .retryWhen(retryLogic) ; }); } /** * * @param jobName * @param state * @return */ // returns json array of job metadata public Observable<String> getJobsOfNamedJob(final String jobName, final MantisJobState.MetaState state) { return masterMonitor.getMasterObservable() .filter(masterDescription -> masterDescription != null) .switchMap(masterDescription -> { String uri = API_JOBS_LIST_MATCHING_PATH + "/" + jobName; if (state != null) uri = uri + "?jobState=" + state; logger.info("Calling GET on " + uri); return HttpUtility.getGetResponse(masterDescription.getHostname(), masterDescription.getApiPort(), uri) .onErrorResumeNext(throwable -> { logger.warn("Can't connect to master: {}", throwable.getMessage(), throwable); return Observable.empty(); }); }) .retryWhen(retryLogic) ; } /** * Checks the existence of a jobId by calling GET on the Master * for /api/jobs/list/_jobId_ and ensuring the response is not an error. * * @param jobId The id of the Mantis job. * @return A boolean indicating whether the job id exists or not. */ public Observable<Boolean> jobIdExists(final String jobId) { return masterMonitor.getMasterObservable() .filter(masterDescription -> masterDescription != null) .switchMap(masterDescription -> { String uri = API_JOBS_LIST_PATH + "/" + jobId; logger.info("Calling GET on " + uri); return HttpUtility.getGetResponse(masterDescription.getHostname(), masterDescription.getApiPort(), uri) .onErrorResumeNext(throwable -> { logger.warn("Can't connect to master: {}", throwable.getMessage(), throwable); return Observable.empty(); }); }) .retryWhen(retryLogic) .map(payload -> !payloadIsError(payload)); } /** * Checks wether a master response is of the form <code>{"error": "message"}</code> * @param payload A string representation of the payload returned by Master GET /api/jobs/list/_jobId_ * @return A boolean indicating true if this payload represents an error. */ private boolean payloadIsError(String payload) { try { Map<String, String> decoded = objectMapper.readValue(payload, new TypeReference<Map<String, String>>() {}); return decoded.get("error") != null; } catch(Exception ex) { // No op } return false; } private Integer getSinkStageNumFromJsonResponse(String jobId, String response) throws MasterClientException { final String throwMessage = "Can't parse json response for job " + jobId; if (response == null) { logger.warn("Null info response from master for job " + jobId); throw new MasterClientException(throwMessage); } try { JSONObject jsonObject = new JSONObject(response); JSONObject jobMetadata = jsonObject.optJSONObject(JOB_METADATA_FIELD); if (jobMetadata == null) { logger.warn("Didn't find meta data for job " + jobId + " in json (" + response + ")"); return -1; } String state = jobMetadata.optString("state"); if (state == null) { throw new MasterClientException("Can't read job state in response (" + response + ")"); } if (MantisJobState.isTerminalState(MantisJobState.valueOf(state))) { logger.info("Can't get sink stage of job in state " + MantisJobState.valueOf(state)); return -1; } int lastStage = 0; JSONArray stages = jsonObject.optJSONArray(STAGE_MEDATA_LIST_FIELD); if (stages == null) { logger.warn("Didn't find stages metadata for job " + jobId + " in json: " + response); throw new MasterClientException(throwMessage); } for (int i = 0; i < stages.length(); i++) { final JSONObject s = stages.getJSONObject(i); final int n = s.optInt(STAGE_NUM_FIELD, 0); lastStage = Math.max(lastStage, n); } if (lastStage == 0) { logger.warn("Didn't find " + STAGE_NUM_FIELD + " field in stage metadata json (" + response + ")"); throw new MasterClientException(throwMessage); } logger.info("Got sink stage number for job " + jobId + ": " + lastStage); return lastStage; } catch (JSONException e) { logger.error("Error parsing info for job " + jobId + " from json data (" + response + "): " + e.getMessage()); throw new MasterClientException(throwMessage); } } private HttpClient<ByteBuf, ServerSentEvent> getRxnettySseClient(String hostname, int port) { return RxNetty.<ByteBuf, ServerSentEvent>newHttpClientBuilder(hostname, port) .pipelineConfigurator(PipelineConfigurators.clientSseConfigurator()) //.enableWireLogging(LogLevel.ERROR) .withNoConnectionPooling().build(); } private WebSocketClient<TextWebSocketFrame, TextWebSocketFrame> getRxnettyWebSocketClient(String host, int port, String uri) { return RxNetty.<TextWebSocketFrame, TextWebSocketFrame>newWebSocketClientBuilder(host, port) .withWebSocketURI(uri) // .withWebSocketVersion(WebSocketVersion.V13) .build(); } /** * * @param jobId * @return */ public Observable<String> getJobStatusObservable(final String jobId) { return masterMonitor.getMasterObservable() .filter((md) -> md != null) .retryWhen(retryLogic) .switchMap((md) -> getRxnettyWebSocketClient(md.getHostname(), md.getConsolePort(), "ws://" + md.getHostname() + ":" + md.getApiPort() + "/job/status/" + jobId) .connect() .flatMap((ObservableConnection<TextWebSocketFrame, TextWebSocketFrame> connection) -> connection.getInput() .map((TextWebSocketFrame webSocketFrame) -> webSocketFrame.text()))) .onErrorResumeNext(Observable.empty()); } /** * * @param jobId * @return */ public Observable<JobSchedulingInfo> schedulingChanges(final String jobId) { return masterMonitor.getMasterObservable() .filter(masterDescription -> masterDescription != null) .retryWhen(retryLogic) .switchMap((Func1<MasterDescription, Observable<JobSchedulingInfo>>) masterDescription -> getRxnettySseClient( masterDescription.getHostname(), masterDescription.getSchedInfoPort()) .submit(HttpClientRequest.createGet("/assignmentresults/" + jobId + "?sendHB=true")) .flatMap((Func1<HttpClientResponse<ServerSentEvent>, Observable<JobSchedulingInfo>>) response -> { if (!HttpResponseStatus.OK.equals(response.getStatus())) { return Observable.error(new Exception(response.getStatus().reasonPhrase())); } return response.getContent() .map(event -> { try { return objectMapper.readValue(event.contentAsString(), JobSchedulingInfo.class); } catch (IOException e) { throw new RuntimeException("Invalid schedInfo json: " + e.getMessage(), e); } }) .timeout(3 * MASTER_SCHED_INFO_HEARTBEAT_INTERVAL_SECS, TimeUnit.SECONDS) .filter(schedulingInfo -> schedulingInfo != null && !JobSchedulingInfo.HB_JobId.equals(schedulingInfo.getJobId())) ; })) .repeatWhen(repeatLogic) .retryWhen(retryLogic) ; } /** * * @param jobName * @return */ public Observable<NamedJobInfo> namedJobInfo(final String jobName) { return masterMonitor.getMasterObservable() .filter(masterDescription -> masterDescription != null) .retryWhen(retryLogic) .switchMap((Func1<MasterDescription, Observable<NamedJobInfo>>) masterDescription -> getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort()) .submit(HttpClientRequest.createGet("/namedjobs/" + jobName + "?sendHB=true")) .flatMap((Func1<HttpClientResponse<ServerSentEvent>, Observable<NamedJobInfo>>) response -> { if (!HttpResponseStatus.OK.equals(response.getStatus())) return Observable.error(new Exception(response.getStatus().reasonPhrase())); return response.getContent() .map(event -> { try { return objectMapper.readValue(event.contentAsString(), NamedJobInfo.class); } catch (IOException e) { throw new RuntimeException("Invalid namedJobInfo json: " + e.getMessage(), e); } }) .timeout(3 * MASTER_SCHED_INFO_HEARTBEAT_INTERVAL_SECS, TimeUnit.SECONDS) .filter(namedJobInfo -> namedJobInfo != null && !JobSchedulingInfo.HB_JobId.equals(namedJobInfo.getName())) ; })) .repeatWhen(repeatLogic) .retryWhen(retryLogic) ; } /** * * @param jobId * @return */ public Observable<JobAssignmentResult> assignmentResults(String jobId) { ConnectToObservable.Builder<JobAssignmentResult> connectionBuilder = new ConnectToObservable.Builder<JobAssignmentResult>() .subscribeAttempts(subscribeAttemptsToMaster) .name("/v1/api/master/assignmentresults") .decoder(new JsonCodec<JobAssignmentResult>(JobAssignmentResult.class)); if (jobId != null && !jobId.isEmpty()) { Map<String, String> subscriptionParams = new HashMap<>(); subscriptionParams.put("jobId", jobId); connectionBuilder = connectionBuilder.subscribeParameters(subscriptionParams); } Observable<List<Endpoint>> changes = masterEndpoint .map(t1 -> { List<Endpoint> list = new ArrayList<>(1); list.add(t1); return list; }); Reconciliator<JobAssignmentResult> reconciliator = new Reconciliator.Builder<JobAssignmentResult>() .name("master-jobAssignmentResults") .connectionSet(DynamicConnectionSet.create(connectionBuilder, MAX_RANDOM_WAIT_RETRY_SEC)) .injector(new ToDeltaEndpointInjector(changes)) .build(); return Observable.merge(reconciliator.observables()); } }
4,482
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/config/ConfigurationFactory.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client.config; import io.mantisrx.server.core.CoreConfiguration; public interface ConfigurationFactory { CoreConfiguration getConfig(); }
4,483
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/config/StaticPropertiesConfigurationFactory.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client.config; import io.mantisrx.server.core.CoreConfiguration; import io.mantisrx.server.core.MetricsCoercer; import org.skife.config.ConfigurationObjectFactory; import java.util.Properties; public class StaticPropertiesConfigurationFactory implements ConfigurationFactory { private final ConfigurationObjectFactory delegate; private final CoreConfiguration config; public StaticPropertiesConfigurationFactory(Properties props) { delegate = new ConfigurationObjectFactory(props); delegate.addCoercible(new MetricsCoercer(props)); // delegate.addCoercible(new MantisPropertiesCoercer(props)); config = delegate.build(CoreConfiguration.class); } @Override public CoreConfiguration getConfig() { return config; } @Override public String toString() { return "StaticPropertiesConfigurationFactory{" + "delegate=" + delegate + ", config=" + config + '}'; } }
4,484
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/diagnostic
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/diagnostic/plugin/DiagnosticMessageType.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client.diagnostic.plugin; /** an enumeration of different types of diagnostic messages */ public enum DiagnosticMessageType { }
4,485
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/diagnostic
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/diagnostic/plugin/DiagnosticMessage.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client.diagnostic.plugin; import java.util.Map; /** struct for recording messages */ public class DiagnosticMessage { public static class Builder { private final DiagnosticMessageType messageType; private Throwable error; private String description; private Map<String, String> tags; private Builder(final DiagnosticMessageType messageType) { this.messageType = messageType; } public Builder withError(final Throwable error) { this.error = error; return this; } public Builder withDescription(final String description) { this.description = description; return this; } public Builder withTags(final Map<String, String> tags) { this.tags = tags; return this; } public DiagnosticMessage build() { return new DiagnosticMessage(this); } } private DiagnosticMessage(final Builder builder) { this.messageType = builder.messageType; this.error = builder.error; this.description = builder.description; this.tags = builder.tags; } /** used to construct a diagnostic message with the standard Java builder pattern */ public static Builder builder(final DiagnosticMessageType messageType) { return new Builder(messageType); } /** the message type of the diagnostic message */ public DiagnosticMessageType getMessageType() { return messageType; } /** if an exception is part of the diagnostic message, the exception. Otherwise null */ public Throwable getError() { return error; } /** if the message has descriptive tags, the tags. Otherwise null */ public Map<String, String> getTags() { return tags; } /** if there is descriptive text, like a log message associated with the log message, the text. Otherwise null */ public String getDescription() { return description; } @Override public String toString() { return "DiagnosticMessage [messageType=" + messageType + ", error=" + error + ", tags=" + tags + ", description=" + description + "]"; } private final DiagnosticMessageType messageType; private final Throwable error; private final Map<String, String> tags; private final String description; }
4,486
0
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/diagnostic
Create_ds/mantis-control-plane/client/src/main/java/io/mantisrx/server/master/client/diagnostic/plugin/DiagnosticPlugin.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.client.diagnostic.plugin; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.observers.SerializedObserver; import rx.subjects.PublishSubject; /** mechanism for listening for diagnostic messages. You can record a message, and callers can subscribe to the observable to take action on the messages. * This is a simple singleton pattern class */ public class DiagnosticPlugin { private static final Logger logger = LoggerFactory.getLogger(DiagnosticPlugin.class); private final PublishSubject<DiagnosticMessage> messagePublisher = PublishSubject.create(); private final SerializedObserver<DiagnosticMessage> messagePublisherSerialized = new SerializedObserver<>(messagePublisher); private DiagnosticPlugin() { } /** * Record a message for diagnostic purposes. Serialized order is enforced. * @param message the message to record */ public void record(final DiagnosticMessage message) { if (message != null) { messagePublisherSerialized.onNext(message); } else { logger.error("RECORDING_NULL_MESSAGE_PROHBIITED"); } } /** * Return an observable that calling program can process using standard rx idioms. * @param maxBackPressureBuffer - maximum number of messages to permit before back pressure exceeeded. * @return an observable for processing */ public Observable<DiagnosticMessage> getDiagnosticObservable(final int maxBackPressureBuffer) { return messagePublisher.onBackpressureBuffer(maxBackPressureBuffer); } /** the singleton instance of the diagnostic plugin */ public static final DiagnosticPlugin INSTANCE = new DiagnosticPlugin(); }
4,487
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbFlapDoodleExtensionTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleExtension; import java.io.IOException; public class DocumentDbFlapDoodleExtensionTest extends DocumentDbFlapDoodleExtension { /** * Ensures any started instance is stopped. */ @AfterAll protected static void cleanup() { stopMongoDbInstance(); } /** * Tests that mongod can be started and stop using default parameters. * @throws IOException if unable to start the process. */ @Test protected void testStartMongoDbDefault() throws IOException { Assertions.assertFalse(isMongoDbProcessRunning()); Assertions.assertTrue(startMongoDbInstance()); Assertions.assertTrue(isMongoDbProcessRunning()); Assertions.assertTrue(stopMongoDbInstance()); Assertions.assertFalse(isMongoDbProcessRunning()); } /** * Tests that mongod can be started on a non-default port. * @throws IOException if unable to start the process. */ @Test protected void testStartMongoDbCustomPort() throws IOException { Assertions.assertFalse(isMongoDbProcessRunning()); Assertions.assertTrue(startMongoDbInstance(27018)); Assertions.assertTrue(isMongoDbProcessRunning()); Assertions.assertEquals(27018, getMongoPort()); Assertions.assertTrue(stopMongoDbInstance()); Assertions.assertFalse(isMongoDbProcessRunning()); } /** * Tests that mongod can be started with -auth flag. * @throws IOException if unable to start the process. */ @Test protected void testStartMongoDbCustomArgs() throws IOException { Assertions.assertFalse(isMongoDbProcessRunning()); Assertions.assertTrue(startMongoDbInstance(true)); Assertions.assertTrue(isMongoDbProcessRunning()); Assertions.assertTrue(stopMongoDbInstance()); Assertions.assertFalse(isMongoDbProcessRunning()); } }
4,488
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbFlapDoodleTestTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import com.mongodb.client.MongoClient; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoCursor; import com.mongodb.client.MongoDatabase; import org.bson.BsonDocument; import org.bson.Document; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleExtension; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleTest; /** * Test that the base class can prepare test data. */ @ExtendWith(DocumentDbFlapDoodleExtension.class) public class DocumentDbFlapDoodleTestTest extends DocumentDbFlapDoodleTest { /** * Tests that we can create a user and authenticate with it. */ @Test protected void testAuthenticatedUser() { final String databaseName = "testAuthenticatedDatabase"; final String authDatabase = "admin"; final String username = "username"; final String password = "password"; createUser(databaseName, username, password); try (MongoClient client = createMongoClient(authDatabase, username, password)) { final MongoDatabase database = client.getDatabase(databaseName); database.runCommand(new Document("ping", 1)); } } /** * Tests that we can prepare data and retrieve it back again. */ @Test protected void testPrepareData() { final String database = "testPrepareDataDatabase"; final String collection = "testPrepareDataCollection"; final int expectedRecordCount = 10; prepareSimpleConsistentData(database, collection, expectedRecordCount, ADMIN_USERNAME, ADMIN_PASSWORD); try (MongoClient client = createMongoClient(ADMIN_DATABASE, ADMIN_USERNAME, ADMIN_PASSWORD)) { final MongoDatabase mongoDatabase = client.getDatabase(database); final MongoCollection<BsonDocument> mongoCollection = mongoDatabase .getCollection(collection, BsonDocument.class); final MongoCursor<BsonDocument> cursor = mongoCollection.find().cursor(); int actualRecordCount = 0; while (cursor.hasNext()) { final BsonDocument document = cursor.next(); actualRecordCount++; Assertions.assertEquals(Integer.MAX_VALUE, document.getInt32("fieldInt").getValue()); } Assertions.assertEquals(expectedRecordCount, actualRecordCount); } } }
4,489
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbStatementBasicTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import org.bson.BsonDateTime; import org.bson.BsonDocument; import org.bson.BsonTimestamp; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import software.amazon.documentdb.jdbc.common.test.DocumentDbTestEnvironment; import java.io.IOException; import java.math.BigDecimal; import java.sql.Blob; import java.sql.Connection; import java.sql.Date; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.text.ParseException; import java.text.SimpleDateFormat; import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; public class DocumentDbStatementBasicTest extends DocumentDbStatementTest { /** * Tests querying for all data types with all scan methods. * * @throws SQLException occurs if executing the statement or retrieving a value fails. * @throws IOException occurs if reading an input stream fails. */ @DisplayName("Tests that all supported data types can be scanned and retrieved.") @ParameterizedTest(name = "testQueryWithAllDataTypes - [{index}] - {arguments}") @MethodSource({"getTestEnvironmentsForScanMethods"}) protected void testQueryWithAllDataTypes(final DocumentDbTestEnvironment testEnvironment, final DocumentDbMetadataScanMethod scanMethod) throws SQLException, IOException { setTestEnvironment(testEnvironment); final String collectionName = "testDocumentDbDriverTest-" + scanMethod.getName(); final int recordCount = 10; prepareSimpleConsistentData(collectionName, recordCount); try (Connection connection = getConnection(scanMethod)) { final DocumentDbStatement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), collectionName)); Assertions.assertNotNull(resultSet); int count = 0; while (resultSet.next()) { Assertions.assertTrue( Pattern.matches("^\\w+$", resultSet.getString(collectionName + "__id"))); Assertions.assertEquals(Double.MAX_VALUE, resultSet.getDouble("fieldDouble")); Assertions.assertEquals("新年快乐", resultSet.getString("fieldString")); Assertions.assertTrue( Pattern.matches("^\\w+$", resultSet.getString("fieldObjectId"))); Assertions.assertTrue(resultSet.getBoolean("fieldBoolean")); Assertions.assertEquals( Instant.parse("2020-01-01T00:00:00.00Z"), resultSet.getTimestamp("fieldDate").toInstant()); Assertions.assertEquals(Integer.MAX_VALUE, resultSet.getInt("fieldInt")); Assertions.assertEquals(Long.MAX_VALUE, resultSet.getLong("fieldLong")); Assertions.assertEquals("MaxKey", resultSet.getString("fieldMaxKey")); Assertions.assertEquals("MinKey", resultSet.getString("fieldMinKey")); Assertions.assertNull(resultSet.getString("fieldNull")); // Test for binary/blob types. final Blob blob = resultSet.getBlob("fieldBinary"); final byte[] expectedBytes = new byte[]{0, 1, 2}; // Note: pos is 1-indexed Assertions.assertArrayEquals( expectedBytes, blob.getBytes(1, (int) blob.length())); final byte[] actualBytes = new byte[(int) blob.length()]; Assertions.assertEquals(3, resultSet.getBinaryStream("fieldBinary").read(actualBytes)); Assertions.assertArrayEquals(expectedBytes, actualBytes); Assertions.assertEquals(Double.MAX_VALUE, resultSet.getBigDecimal("fieldDecimal128").doubleValue()); count++; } Assertions.assertEquals(recordCount, count); } } /** * Test querying when there is a conflict between an array and a scalar. The column * should become VARCHAR. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Tests querying when there is conflict between an array and a scalar.") @ParameterizedTest(name = "testArrayScalarConflict - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testArrayScalarConflict(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final List<BsonDocument> documents = new ArrayList<>(); BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key0\", \n" + " \"array\" : [ {\n" + " \"field1\" : 1, \n" + " \"field2\" : 2 \n" + " } ] \n" + "}" ); documents.add(document); document = BsonDocument.parse( "{ \"_id\" : \"key1\", \n" + " \"array\" : [ 1, 2, 3 ] \n" + "}" ); documents.add(document); insertBsonDocuments("testArrayScalarConflict", documents.toArray(new BsonDocument[]{})); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testArrayScalarConflict_array")); for (int i = 0; i < 4; i++) { Assertions.assertTrue(resultSet.next()); final String arrayValue = resultSet.getString("value"); if (i == 0) { Assertions.assertEquals("{\"field1\": 1, \"field2\": 2}", arrayValue); } else { Assertions.assertEquals(String.valueOf(i), arrayValue); } } Assertions.assertFalse(resultSet.next()); } } /** * Test querying when there is a conflict between a document field and a scalar. The column * should become VARCHAR. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying when there is a conflict between a document field and a scalar.") @ParameterizedTest(name = "testDocumentScalarConflict - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testDocumentScalarConflict(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final List<BsonDocument> documents = new ArrayList<>(); BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key0\", \n" + " \"doc\" : {\n" + " \"field1\" : 1, \n" + " \"field2\" : 2 \n" + " } \n" + "}" ); documents.add(document); document = BsonDocument.parse( "{ \"_id\" : \"key1\", \n" + " \"doc\" : 1 \n" + "}" ); documents.add(document); insertBsonDocuments("testDocumentScalarConflict", documents.toArray(new BsonDocument[]{})); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testDocumentScalarConflict")); for (int i = 0; i < 2; i++) { Assertions.assertTrue(resultSet.next()); final String arrayValue = resultSet.getString("doc"); if (i == 0) { Assertions.assertEquals("{\"field1\": 1, \"field2\": 2}", arrayValue); } else { Assertions.assertEquals(String.valueOf(i), arrayValue); } } Assertions.assertFalse(resultSet.next()); } } /** * Test querying when there is a conflict between a document field and an array. The column * should become VARCHAR. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying when there is a conflict between a document field and an array.") @ParameterizedTest(name = "testArrayDocumentConflict - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testArrayDocumentConflict(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final List<BsonDocument> documents = new ArrayList<>(); BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key0\", \n" + " \"field\" : {\n" + " \"field1\" : 1, \n" + " \"field2\" : 2 \n" + " } \n" + "}" ); documents.add(document); document = BsonDocument.parse( "{ \"_id\" : \"key1\", \n" + " \"field\" : [1, 2, 3, 4] \n" + "}" ); documents.add(document); insertBsonDocuments("testArrayDocumentConflict", documents.toArray(new BsonDocument[]{})); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testArrayDocumentConflict")); for (int i = 0; i < 2; i++) { Assertions.assertTrue(resultSet.next()); final String arrayValue = resultSet.getString("field"); if (i == 0) { Assertions.assertEquals("{\"field1\": 1, \"field2\": 2}", arrayValue); } else { Assertions.assertEquals("[1, 2, 3, 4]", arrayValue); } } Assertions.assertFalse(resultSet.next()); } } /** * Test querying when there is a conflict between a document field and an array of mixed type. The column * should become VARCHAR. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying when there is a conflict between a document field and an array of mixed type.") @ParameterizedTest(name = "testDocumentAndArrayOfMixedTypesConflict - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testDocumentAndArrayOfMixedTypesConflict(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final List<BsonDocument> documents = new ArrayList<>(); BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key0\", \n" + " \"field\" : {\n" + " \"field1\" : 1, \n" + " \"field2\" : 2 \n" + " } \n" + "}" ); documents.add(document); document = BsonDocument.parse( "{ \"_id\" : \"key1\", \n" + " \"field\" : [\n " + " {\n" + " \"field1\" : 1,\n" + " \"field2\" : 2 \n" + " },\n " + " 1 ] \n " + "}" ); documents.add(document); insertBsonDocuments("testDocumentAndArrayOfMixedTypesConflict", documents.toArray(new BsonDocument[]{})); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testDocumentAndArrayOfMixedTypesConflict")); for (int i = 0; i < 2; i++) { Assertions.assertTrue(resultSet.next()); final String arrayValue = resultSet.getString("field"); if (i == 0) { Assertions.assertEquals("{\"field1\": 1, \"field2\": 2}", arrayValue); } else { Assertions.assertEquals("[{\"field1\": 1, \"field2\": 2}, 1]", arrayValue); } } Assertions.assertFalse(resultSet.next()); } } /** * Tests that documents missing a sub-document do not create null rows. */ @DisplayName("Test that documents not containing a sub-document do not add null rows.") @ParameterizedTest(name = "testDocumentWithMissingSubDocument - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testDocumentWithMissingSubDocument(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testMissingSubDocumentNotNull"; final List<BsonDocument> documents = new ArrayList<>(); BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key0\", \n" + " \"name\" : \"withDocument\", \n" + " \"subDocument\" : {\n" + " \"field1\" : 1, \n" + " \"field2\" : 2 \n" + " } \n" + "}" ); documents.add(document); document = BsonDocument.parse( "{ \"_id\" : \"key1\", \n" + " \"name\" : \"withoutDocument\" \n" + "}" ); documents.add(document); insertBsonDocuments(collection,documents.toArray(new BsonDocument[]{})); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), collection + "_subDocument")); final ResultSet results = statement.getResultSet(); Assertions.assertTrue(results.next()); Assertions.assertEquals("key0", results.getString(1)); Assertions.assertEquals(1, results.getInt(2)); Assertions.assertEquals(2, results.getInt(3)); Assertions.assertFalse(results.next(), "Contained unexpected extra row."); } } /** * Tests that documents missing a nested sub-document do not create null rows. */ @DisplayName("Test that documents not containing a sub-document do not add null rows.") @ParameterizedTest(name = "testDocumentWithMissingNestedSubDocument - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testDocumentWithMissingNestedSubDocument(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testMissingNestedSubDocumentNotNull"; final List<BsonDocument> documents = new ArrayList<>(); BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key0\", \n" + " \"name\" : \"withDocument\", \n" + " \"subDocument\" : {\n" + " \"field1\" : 1, \n" + " \"nestedSubDoc\" : {\n" + " \"nestedField\": 7 \n" + " } \n" + " } \n" + "}" ); documents.add(document); document = BsonDocument.parse( "{ \"_id\" : \"key1\", \n" + " \"name\" : \"withoutDocument\" \n" + "}" ); documents.add(document); insertBsonDocuments(collection, documents.toArray(new BsonDocument[]{})); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), collection + "_subDocument_nestedSubDoc")); final ResultSet results = statement.getResultSet(); Assertions.assertTrue(results.next()); Assertions.assertEquals("key0", results.getString(1)); Assertions.assertEquals(7, results.getInt(2)); Assertions.assertFalse(results.next(), "Contained unexpected extra row."); } } /** * Tests COUNT(columnName) doesn't count null or missing values. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests count('column') works ensuring null/undefined values are not counted") @ParameterizedTest(name = "testCountColumnName - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testCountColumnName(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCountColumn"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abc\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": null}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103\n}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT COUNT(\"field\") from \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(1, resultSet.getInt(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that SUM(1) works, equivalent to COUNT(*). * * @throws SQLException occurs if query fails. */ @DisplayName("Tests query with SUM(1).") @ParameterizedTest(name = "testQuerySumOne - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySumOne(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySumOne"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 4}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT SUM(1) from \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(3, resultSet.getInt(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries containing ORDER BY and OFFSET work. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests queries with OFFSET") @ParameterizedTest(name = "testQueryOffset - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryOffset(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryOffset"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 1,\n" + "\"fieldB\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": 3,\n" + "\"fieldB\": 4}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"fieldA\": 5,\n" + "\"fieldB\": 6}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" LIMIT 2 OFFSET 1", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries containing IN (c1, c2...) work. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests queries with WHERE [field] IN (...)") @ParameterizedTest(name = "testQueryWhereIN - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWhereIN(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWhereIN"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 1,\n" + "\"fieldB\": \"abc\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": 3,\n" + "\"fieldB\": \"def\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"fieldA\": 5,\n" + "\"fieldB\": \"ghi\"}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"fieldA\" IN (1, 5)", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"fieldB\" IN ('abc', 'ghi')", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries containing NOT IN (c1, c2...) work. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests queries with WHERE [field] NOT IN (...)") @ParameterizedTest(name = "testQueryWhereNotIN - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWhereNotIN(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWhereNOTIN"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 1,\n" + "\"fieldB\": \"abc\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": 3,\n" + "\"fieldB\": \"def\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"fieldA\": 5, \n" + "\"fieldB\": \"ghi\"}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"fieldA\" NOT IN (1, 5)", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"fieldB\" NOT IN ('abc', 'ghi')", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries containing casts from numbers work. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests queries with cast from number.") @ParameterizedTest(name = "testQueryCastNum - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryCastNum(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryCastNum"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 1,\n" + "\"fieldB\": 1.2," + "\"fieldC\": 10000000000}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CAST(\"fieldA\" AS INTEGER), " + "CAST(\"fieldA\" AS BIGINT), " + "CAST(\"fieldA\" AS DOUBLE), " + "CAST(\"fieldB\" AS INTEGER)," + "CAST(\"fieldB\" AS BIGINT), " + "CAST(\"fieldB\" AS DOUBLE), " + "CAST(\"fieldC\" AS BIGINT), " + "CAST(\"fieldC\" AS DOUBLE) " + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(1, resultSet.getInt(1)); Assertions.assertEquals(1L, resultSet.getLong(2)); Assertions.assertEquals(1D, resultSet.getDouble(3)); Assertions.assertEquals(1, resultSet.getInt(4)); Assertions.assertEquals(1L, resultSet.getLong(5)); Assertions.assertEquals(1.2D, resultSet.getDouble(6)); Assertions.assertEquals(10000000000L, resultSet.getLong(7)); Assertions.assertEquals(10000000000D, resultSet.getDouble(8)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries containing casts from strings work. * @throws SQLException occurs if query or connection fails. */ @Disabled("Cast to date not working.") @DisplayName("Tests queries with cast from string.") @ParameterizedTest(name = "testQueryCastString - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryCastString(final DocumentDbTestEnvironment testEnvironment) throws SQLException, ParseException { setTestEnvironment(testEnvironment); final String tableName = "testQueryCastString"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": \"2020-03-11\", \n" + "\"fieldNum\": \"100.5\"}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CAST(\"fieldA\" AS DATE), " + "CAST(\"fieldA\" AS TIMESTAMP), " + "CAST(\"fieldNum\" AS DOUBLE), " + "CAST(\"fieldNum\" AS INTEGER)," + "CAST(\"fieldNum\" AS BIGINT) " + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("2020-03-11", resultSet.getString(1)); Assertions.assertEquals( new SimpleDateFormat("yyyy/MM/dd").parse("2020/03/11").getTime(), resultSet.getTimestamp(2).getTime()); Assertions.assertEquals(100.5D, resultSet.getDouble(3)); Assertions.assertEquals(100, resultSet.getInt(4)); Assertions.assertEquals(100, resultSet.getLong(5)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries containing casts from dates work. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests queries with cast from date.") @ParameterizedTest(name = "testQueryCastDate - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryCastDate(final DocumentDbTestEnvironment testEnvironment) throws SQLException, ParseException { setTestEnvironment(testEnvironment); final String tableName = "testQueryCastDate"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("date", new BsonTimestamp((int) TimeUnit.MILLISECONDS.toSeconds( new SimpleDateFormat("yyyy/MM/dd").parse("2020/03/11").getTime()), 1)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CAST(\"date\" AS DATE), " + "CAST(\"date\" AS TIMESTAMP), " + "CAST(\"date\" AS TIME)," + "CAST(\"date\" AS VARCHAR) " + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("2020-03-11", resultSet.getDate(1).toString()); Assertions.assertEquals( new SimpleDateFormat("yyyy/MM/dd").parse("2020/03/11").getTime(), resultSet.getTimestamp(2).getTime()); Assertions.assertEquals( new SimpleDateFormat("yyyy/MM/dd").parse("2020/03/11").getTime(), resultSet.getTime(3).getTime()); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries containing casts from boolean work. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests queries with cast from boolean.") @ParameterizedTest(name = "testQueryCastBoolean - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryCastBoolean(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryCastBoolean"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": false}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CAST(\"fieldA\" AS VARCHAR)" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("false", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries containing various nested casts work. * @throws SQLException occurs if query or connection fails. */ @Disabled("Casts are not functioning from string to date.") @DisplayName("Tests queries with nested CAST.") @ParameterizedTest(name = "testQueryNestedCast - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryNestedCast(final DocumentDbTestEnvironment testEnvironment) throws SQLException, ParseException { setTestEnvironment(testEnvironment); final String tableName = "testQueryNestedCast"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"dateString\": \"2020-03-11\"," + "\"fieldNum\": 7," + "\"fieldString\": \"5\"}"); doc1.append("fieldTimestamp", new BsonTimestamp( new SimpleDateFormat("yyyy/MM/dd").parse("2020/03/11").getTime())); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CAST(CAST(\"dateString\" AS DATE) AS VARCHAR), " + "CAST(CAST(\"fieldTimestamp\" AS DATE) AS VARCHAR), " + "CAST(CAST(\"fieldNum\" AS DOUBLE) AS INTEGER)" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("2020-03-11", resultSet.getString(1)); Assertions.assertEquals("2020-03-11", resultSet.getString(2)); Assertions.assertEquals(5, resultSet.getInt(3)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that the result set of a query does not close prematurely when results are retrieved in multiple batches. * Uses max fetch size of 10 to ensure multiple batches are retrieved. * @throws SQLException if connection or query fails. */ @DisplayName("Tests that the result set does not close prematurely when results are retrieved in multiple batches.") @ParameterizedTest(name = "testResultSetDoesNotClose - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testResultSetDoesNotClose(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testResultsetClose"; final int numDocs = 100; final BsonDocument[] docs = new BsonDocument[numDocs]; for (int i = 0; i < numDocs; i++) { final BsonDocument doc = BsonDocument.parse("{\"_id\": " + i + ", \n" + "\"field\":\"abc\"}"); docs[i] = doc; } insertBsonDocuments(tableName, docs); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); statement.setFetchSize(1); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * from \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); for (int i = 0; i < numDocs; i++) { Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(String.valueOf(i), resultSet.getString(1)); } Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries with a batch size of zero work. * @throws SQLException if connection or query fails. */ @DisplayName("Tests that a batch size of zero works.") @ParameterizedTest(name = "testBatchSizeZero - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testBatchSizeZero(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testBatchSizeZero"; final int numDocs = 10; final BsonDocument[] docs = new BsonDocument[numDocs]; for (int i = 0; i < numDocs; i++) { final BsonDocument doc = BsonDocument.parse("{\"_id\": " + i + ", \n" + "\"field\":\"abc\"}"); docs[i] = doc; } insertBsonDocuments(tableName, docs); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); statement.setFetchSize(0); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * from \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); for (int i = 0; i < numDocs; i++) { Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(String.valueOf(i), resultSet.getString(1)); } Assertions.assertFalse(resultSet.next()); } } /** * Tests for queries containing IS NULL and IS NOT NULL in the select clause. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests queries with IS [NOT] NULL in the select clause.") @ParameterizedTest(name = "testQuerySelectIsNull - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySelectIsNull(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySelectIsNull"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abc\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": null}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT \"field\" IS NULL, \"field\" IS NOT NULL FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.getBoolean(1)); Assertions.assertFalse(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.getBoolean(1)); Assertions.assertFalse(resultSet.getBoolean(2)); Assertions.assertFalse(resultSet.next()); } } /** * Test that queries selecting a boolean expression with NOT work. * @throws SQLException occurs if query fails. */ @DisplayName("Test that queries selecting boolean expressions with NOT are correct.") @ParameterizedTest(name = "testQueryWithNot - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithNot(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWithNot"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field1\": true, \n" + "\"field2\": false, \n" + "\"field3\": 1}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field1\": true, \n" + "\"field2\": true, \n" + "\"field3\": 5}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field1\": false, \n" + "\"field2\": false, \n" + "\"field3\": 5}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT NOT (\"field1\"), " + "NOT (\"field1\" AND \"field2\"), " + "NOT (\"field1\" OR \"field2\"), " + "NOT (\"field1\" AND \"field3\" > 2) FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertFalse(resultSet.getBoolean(3)); Assertions.assertTrue(resultSet.getBoolean(4)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertFalse(resultSet.getBoolean(2)); Assertions.assertFalse(resultSet.getBoolean(3)); Assertions.assertFalse(resultSet.getBoolean(4)); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.getBoolean(3)); Assertions.assertTrue(resultSet.getBoolean(4)); Assertions.assertFalse(resultSet.next()); } } /** * Test that queries selecting a boolean expression with NOT from nulls. * @throws SQLException occurs if query fails. */ @DisplayName("Test that queries selecting a boolean expression with NOT from nulls are correct.") @ParameterizedTest(name = "testQueryWithAndOrNotNulls - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithAndOrNotNulls(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWithAndOrNotNulls"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field1\": true, \n" + // Added this document only for metadata "\"field2\": true, \n" + "\"field3\": 1}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field1\": null, \n" + "\"field2\": null, \n" + "\"field3\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT NOT (\"field1\" AND \"field2\"), " + "NOT (\"field1\" OR \"field2\"), " + "NOT (\"field1\" AND \"field3\" > 2) FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertNull(resultSet.getString(2)); Assertions.assertNull(resultSet.getString(3)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Test that queries using COALESCE() are correct.") @ParameterizedTest(name = "testQueryCoalesce - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryCoalesce(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryCoalesce"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field1\": null, \n" + // Added this document only for metadata "\"field2\": 1, \n" + "\"field3\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field1\": null, \n" + "\"field2\": null, \n" + "\"field3\": 2}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT COALESCE(\"%s\", \"%s\", \"%s\" ) FROM \"%s\".\"%s\"", "field1", "field2", "field3", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(resultSet.getInt(1), 1); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(resultSet.getInt(1), 2); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests closing a Statement will not cause exception for cancelQuery.") @ParameterizedTest(name = "testCloseStatement - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testCloseStatement(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); Assertions.assertDoesNotThrow(statement::close); } } @ParameterizedTest(name = "testQueryWithSelectBoolean - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySelectBoolean(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySelectBooleanExpr"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field\": 1, \n " + "\"field2\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field\": 1, \n " + "\"field2\": 3}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \n" + "\"field\": 1, \n " + "\"field2\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT \"field2\" <> 2 FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertFalse(resultSet.next()); } } @ParameterizedTest(name = "testQueryWithNotNull - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySelectNotWithNull(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySelectNotNulls"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field\": true}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field\": false}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT NOT(\"field\") FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @ParameterizedTest(name = "testQuerySelectLogicNulls - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySelectLogicNulls(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySelectLogicNulls"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field\": true, \n" + "\"field1\": null}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field\": false, \n" + "\"field1\": null}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \n" + "\"field\": true, \n" + "\"field1\": true}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null, \n" + "\"field1\": null}"); final BsonDocument doc5 = BsonDocument.parse("{\"_id\": 105, \n" + "\"field\": true, \n" + "\"field1\": false}"); final BsonDocument doc6 = BsonDocument.parse("{\"_id\": 106, \n" + "\"field\": false, \n" + "\"field1\": true}"); final BsonDocument doc7 = BsonDocument.parse("{\"_id\": 107, \n" + "\"field\": false, \n" + "\"field1\": false}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4, doc5, doc6, doc7}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT " + "(\"field\" AND \"field1\")," + "(\"field\" OR \"field1\")" + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertNull(resultSet.getString(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertNull(resultSet.getString(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertFalse(resultSet.getBoolean(2)); Assertions.assertFalse(resultSet.next()); } } @ParameterizedTest(name = "testQuerySelectLogicManyNulls - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySelectLogicManyNulls(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySelectLogicManyNulls"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field\": true, \n" + "\"field1\": true, \n" + "\"field2\": null, \n" + "\"field3\": null, \n" + "\"field4\": true, \n" + "\"field5\": true, \n" + "\"field6\": false, \n" + "\"field7\": false, \n" + "\"field8\": true, \n" + "\"field9\": true}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field\": true, \n" + "\"field1\": true, \n" + "\"field2\": true, \n" + "\"field3\": true, \n" + "\"field4\": true, \n" + "\"field5\": null, \n" + "\"field6\": null, \n" + "\"field7\": true, \n" + "\"field8\": true, \n" + "\"field9\": null}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \n" + "\"field\": true, \n" + "\"field1\": true, \n" + "\"field2\": true, \n" + "\"field3\": true, \n" + "\"field4\": true, \n" + "\"field5\": true, \n" + "\"field6\": true, \n" + "\"field7\": true, \n" + "\"field8\": true, \n" + "\"field9\": true}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null, \n" + "\"field1\": null, \n" + "\"field2\": null, \n" + "\"field3\": null, \n" + "\"field4\": null, \n" + "\"field5\": null, \n" + "\"field6\": null, \n" + "\"field7\": null, \n" + "\"field8\": null, \n" + "\"field9\": null}"); final BsonDocument doc5 = BsonDocument.parse("{\"_id\": 105, \n" + "\"field\": null, \n" + "\"field1\": null, \n" + "\"field2\": null, \n" + "\"field3\": null, \n" + "\"field4\": null, \n" + "\"field5\": null, \n" + "\"field6\": false, \n" + "\"field7\": null, \n" + "\"field8\": null, \n" + "\"field9\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4, doc5}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT " + "(\"field\" AND \"field1\" AND \"field2\" AND \"field3\" AND \"field4\" AND \"field5\" AND \"field6\" AND \"field7\" AND \"field8\" AND \"field9\")," + "(\"field\" OR \"field1\" OR \"field2\" OR \"field3\" OR \"field4\" OR \"field5\" OR \"field6\" OR \"field7\" OR \"field8\" OR \"field9\")" + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertNull(resultSet.getString(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertNull(resultSet.getString(2)); Assertions.assertFalse(resultSet.next()); } } @ParameterizedTest(name = "testQuerySelectLogicNullAndTypes - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySelectLogicNullAndTypes(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySelectLogicNullAndTypes"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field\": \"abc\", \n" + "\"field1\": true, \n" + "\"field2\": 3}"); doc1.append("field3", new BsonDateTime(Instant.parse("2020-01-03T00:00:00.00Z").toEpochMilli())); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field\": \"def\", \n" + "\"field1\": null, \n" + "\"field2\": 1}"); doc2.append("field3", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \n" + "\"field\": null, \n" + "\"field1\": false, \n" + "\"field2\": null}"); doc2.append("field3", new BsonDateTime(Instant.parse("2020-01-03T00:00:00.00Z").toEpochMilli())); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": \"abc\", \n" + "\"field1\": null, \n" + "\"field2\": 4}"); doc4.append("field3", new BsonDateTime(Instant.parse("2020-01-03T00:00:00.00Z").toEpochMilli())); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT " + "(\"field\" = 'abc' AND \"field1\")," + "(\"field2\" > 2 OR \"field1\")," + "(\"field3\" > '2020-01-02' AND \"field1\")" + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertTrue(resultSet.getBoolean(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertTrue(resultSet.getBoolean(3)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertNull(resultSet.getString(2)); Assertions.assertFalse(resultSet.getBoolean(3)); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.getBoolean(1)); Assertions.assertNull(resultSet.getString(2)); Assertions.assertFalse(resultSet.getBoolean(3)); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertTrue(resultSet.getBoolean(2)); Assertions.assertNull(resultSet.getString(3)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests querying with inconsistent data types using aggregate operator syntax.") @ParameterizedTest(name = "testTypeComparisonsWithAggregateOperators - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testTypeComparisonsWithAggregateOperators(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testTypeComparisonsWithAggregateOperators"; final BsonDocument document1 = BsonDocument.parse("{ \"_id\" : \"key0\", \"array\": [1, 2, \"3\", \"4\", true] }"); insertBsonDocuments(collection, new BsonDocument[]{document1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Aggregate comparison operator is used. // All string and boolean values will return true since these are greater than numeric. statement.execute(String.format( "SELECT \"%3$s\" > 3 FROM \"%1$s\".\"%2$s\"", getDatabaseName(), collection + "_array", "value")); final ResultSet resultSet1 = statement.getResultSet(); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(false, resultSet1.getBoolean(1)); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(false, resultSet1.getBoolean(1)); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(true, resultSet1.getBoolean(1)); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(true, resultSet1.getBoolean(1)); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(true, resultSet1.getBoolean(1)); Assertions.assertFalse(resultSet1.next()); // Aggregate comparison operator is used. // All numeric values returned since these are less than any string. // Boolean is greater than string. statement.execute(String.format( "SELECT \"%3$s\" FROM \"%1$s\".\"%2$s\" WHERE \"%3$s\" < \"%4$s\"", getDatabaseName(), collection + "_array", "value", collection + "__id")); final ResultSet resultSet2 = statement.getResultSet(); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("1", resultSet2.getString("value")); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("2", resultSet2.getString("value")); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("3", resultSet2.getString("value")); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("4", resultSet2.getString("value")); Assertions.assertFalse(resultSet2.next()); } } @DisplayName("Tests querying for inconsistent data types using query operator syntax.") @ParameterizedTest(name = "testTypeComparisonsWithAggregateOperators - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testTypeComparisonsWithQueryOperators(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testTypeComparisonsWithQueryOperators"; final BsonDocument document1 = BsonDocument.parse("{ \"_id\" : \"key0\", \"array\": [1, 2, \"3\", \"4\", true] }"); insertBsonDocuments(collection, new BsonDocument[]{document1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Query comparison operator is used. // No values returned since comparisons must match type. statement.execute(String.format( "SELECT \"%3$s\" FROM \"%1$s\".\"%2$s\"" + "WHERE \"%3$s\" < '3'", getDatabaseName(), collection + "_array", "value")); final ResultSet resultSet1 = statement.getResultSet(); Assertions.assertNotNull(resultSet1); Assertions.assertFalse(resultSet1.next()); // Query comparison operator is used to check BOTH string and numeric. // Numeric and string values are compared. Boolean value is rejected. statement.execute( String.format( "SELECT \"%3$s\" FROM \"%1$s\".\"%2$s\" WHERE \"%3$s\" < 4 OR \"%3$s\" < '4'", getDatabaseName(), collection + "_array", "value")); final ResultSet resultSet2 = statement.getResultSet(); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("1", resultSet2.getString("value")); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("2", resultSet2.getString("value")); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("3", resultSet2.getString("value")); Assertions.assertFalse(resultSet2.next()); } } @DisplayName("Tests that calculating a distinct aggregate after " + "grouping by a single column returns correct result. ") @ParameterizedTest(name = "testSingleColumnGroupByWithDistinctAggregate - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testSingleColumnGroupByWithDistinctAggregate(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSingleColumnGroupByWithAggregate"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field1\": 1, \"field2\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \"field1\": null, \"field2\": 2}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \"field2\": 2}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT SUM(DISTINCT\"field1\") FROM \"%s\".\"%s\" GROUP BY \"field2\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals( 1, resultSet1.getObject(1), "Correct sum should be returned after grouping by single column."); Assertions.assertFalse(resultSet1.next()); } } @DisplayName("Tests that query with SUM() where all values are null returns null" + "and where some values are null returns the sum.") @ParameterizedTest(name = "testQuerySumNulls - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySumNulls(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySumNulls"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field1\": 1, \"field2\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \"field1\": null, \"field2\": 3}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \"field2\": 3}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT SUM(DISTINCT\"field1\") FROM \"%s\".\"%s\" WHERE \"field2\" <> 2", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertNull( resultSet1.getObject(1), "SUM(DISTINCT value) where all fields are null/undefined should be null."); Assertions.assertFalse(resultSet1.next()); final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT SUM(\"field1\") FROM \"%s\".\"%s\" WHERE \"field2\" <> 2", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertNull( resultSet2.getObject(1), "SUM(value) where all fields are null/undefined should be null."); Assertions.assertFalse(resultSet2.next()); final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT SUM(\"field1\") FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals( 1, resultSet3.getObject(1), "SUM(value) where only some fields are null/undefined should return the sum."); Assertions.assertFalse(resultSet3.next()); } } @DisplayName("Tests that query where aggregate is renamed to existing field returns correct result. " + "Addresses [AD-454].") @ParameterizedTest(name = "testAggregateWithNameConflict - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testAggregateWithNameConflict(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testAggregateWithNameConflict"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"document\": {\"rating\": 1}}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"document\": {\"rating\": 2}}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"document\": {\"rating\": 3}}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT MIN(\"rating\") AS \"rating\" FROM \"%s\".\"%s\"", getDatabaseName(), tableName + "_document")); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(1, resultSet1.getInt(1)); Assertions.assertFalse(resultSet1.next()); } } @DisplayName("Tests that all supported literal types can be retrieved.") @ParameterizedTest(name = "testBooleanLiteralTypes - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testBooleanLiteralTypes(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testBooleanLiteralTypes"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT TRUE AS \"literalTrue\", " + "FALSE AS \"literalFalse\", " + "UNKNOWN AS \"literalUnknown\" " + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(Types.BOOLEAN, resultSet.getMetaData().getColumnType(1)); Assertions.assertEquals(Types.BOOLEAN, resultSet.getMetaData().getColumnType(2)); Assertions.assertEquals(Types.BOOLEAN, resultSet.getMetaData().getColumnType(3)); Assertions.assertEquals(true, resultSet.getBoolean(1)); Assertions.assertEquals(false, resultSet.getBoolean(2)); Assertions.assertEquals(null, resultSet.getObject(3)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests that all supported numeric literal types can be retrieved.") @ParameterizedTest(name = "testNumericLiteralTypes - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testNumericLiteralTypes(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testNumericLiteralTypes"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Values wrapped in CAST to ensure they aren't interpreted as a wider type. final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CAST(-128 AS TINYINT) AS \"literalTinyInt\", " + "CAST(-32768 AS SMALLINT) AS \"literalSmallInt\", " + "CAST(-2147483648 AS INT) AS \"literalInt\", " + "CAST(-9223372036854775808 AS BIGINT) AS \"literalBigInt\", " + "CAST('123456789012345678901234567890.45' AS DECIMAL(5, 2)) AS \"literalDecimal\", " + "CAST('987654321098765432109876543210.45' AS NUMERIC(5, 2)) AS \"literalNumeric\", " + "CAST(1234.56 AS FLOAT) AS \"literalFloat\", " + "CAST(12345.678 AS REAL) AS \"literalReal\", " + "CAST(12345.6789999999999 AS DOUBLE) AS \"literalDouble\"" + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(Types.TINYINT, resultSet.getMetaData().getColumnType(1)); Assertions.assertEquals(Types.SMALLINT, resultSet.getMetaData().getColumnType(2)); Assertions.assertEquals(Types.INTEGER, resultSet.getMetaData().getColumnType(3)); Assertions.assertEquals(Types.BIGINT, resultSet.getMetaData().getColumnType(4)); Assertions.assertEquals(Types.DECIMAL, resultSet.getMetaData().getColumnType(5)); Assertions.assertEquals(Types.DECIMAL, resultSet.getMetaData().getColumnType(6)); Assertions.assertEquals(Types.FLOAT, resultSet.getMetaData().getColumnType(7)); Assertions.assertEquals(Types.REAL, resultSet.getMetaData().getColumnType(8));; Assertions.assertEquals(Types.DOUBLE, resultSet.getMetaData().getColumnType(9));; Assertions.assertEquals(-128, resultSet.getInt(1)); Assertions.assertEquals(-32768, resultSet.getInt(2)); Assertions.assertEquals(-2147483648, resultSet.getInt(3)); Assertions.assertEquals(-9223372036854775808L, resultSet.getLong(4)); Assertions.assertEquals(new BigDecimal("123456789012345678901234567890.45"), resultSet.getBigDecimal(5)); Assertions.assertEquals(new BigDecimal("987654321098765432109876543210.45"), resultSet.getBigDecimal(6)); Assertions.assertEquals(1234.56, resultSet.getDouble(7)); Assertions.assertEquals(12345.678, resultSet.getDouble(8)); Assertions.assertEquals(12345.6789999999999, resultSet.getDouble(9)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests that all supported string literal types can be retrieved.") @ParameterizedTest(name = "testStringLiteralTypes - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testStringLiteralTypes(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testStringLiteralTypes"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Values wrapped in CAST to ensure they aren't interpreted as a wider type. final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CAST('Hello' AS CHAR(5)) AS \"literalChar\", " + "CAST('' AS CHAR(5)) AS \"literalCharEmpty\", " + "CAST('Hello' AS VARCHAR) AS \"literalVarchar\", " + "CAST('' AS VARCHAR) AS \"literalVarcharEmpty\" " + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(Types.CHAR, resultSet.getMetaData().getColumnType(1)); Assertions.assertEquals(Types.CHAR, resultSet.getMetaData().getColumnType(2)); Assertions.assertEquals(Types.VARCHAR, resultSet.getMetaData().getColumnType(3)); Assertions.assertEquals(Types.VARCHAR, resultSet.getMetaData().getColumnType(4)); Assertions.assertEquals("Hello", resultSet.getString(1)); Assertions.assertEquals(" ", resultSet.getString(2)); Assertions.assertEquals("Hello", resultSet.getString(3)); Assertions.assertEquals("", resultSet.getString(4)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests that all supported binary literal types can be retrieved.") @ParameterizedTest(name = "testBinaryLiteralTypes - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testBinaryLiteralTypes(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testBinaryLiteralTypes"; final byte[] expected = {69, -16, -85}; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Values wrapped in CAST to ensure they aren't interpreted as a wider type. final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CAST(x'45F0AB' AS BINARY(3)) AS \"literalBinary\", " + "CAST(x'' AS BINARY(3)) AS \"literalBinaryEmpty\", " + "CAST(x'45F0AB' AS VARBINARY) AS \"literalVarbinary\", " + "CAST(x'' AS VARBINARY) AS \"literalVarbinaryEmpty\" " + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(Types.BINARY, resultSet.getMetaData().getColumnType(1)); Assertions.assertEquals(Types.BINARY, resultSet.getMetaData().getColumnType(2)); Assertions.assertEquals(Types.VARBINARY, resultSet.getMetaData().getColumnType(3)); Assertions.assertEquals(Types.VARBINARY, resultSet.getMetaData().getColumnType(4)); Assertions.assertArrayEquals(expected, resultSet.getBytes(1)); Assertions.assertArrayEquals(new byte[] {0, 0, 0}, resultSet.getBytes(2)); Assertions.assertArrayEquals(expected, resultSet.getBytes(3)); Assertions.assertArrayEquals(new byte[]{}, resultSet.getBytes(4)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests that all supported date time literal types can be retrieved.") @ParameterizedTest(name = "testDateTimeLiteralTypes - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testDateTimeLiteralTypes(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testDateTimeLiteralTypes"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT TIME '20:17:40' AS \"literalTime\", " + "DATE '2017-09-20' AS \"literalDate\", " + "TIMESTAMP '2017-09-20 20:17:40' AS \"literalTimestamp\"" + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(Types.TIME, resultSet.getMetaData().getColumnType(1)); Assertions.assertEquals(Types.DATE, resultSet.getMetaData().getColumnType(2)); Assertions.assertEquals(Types.TIMESTAMP, resultSet.getMetaData().getColumnType(3)); Assertions.assertEquals(new Time(73060000), resultSet.getTime(1)); Assertions.assertEquals(new Date(1505865600000L), resultSet.getDate(2)); Assertions.assertEquals(new Timestamp(1505938660000L), resultSet.getTimestamp(3)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests that all supported interval literal types can be retrieved.") @ParameterizedTest(name = "testIntervalLiteralTypes - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testIntervalLiteralTypes(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testIntervalLiteralTypes"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT INTERVAL '123-2' YEAR(3) TO MONTH AS \"literalYearToMonth\", " + "INTERVAL '123' YEAR(3) AS \"literalYear\", " + "INTERVAL 300 MONTH(3) AS \"literalMonth\", " + "INTERVAL '400' DAY(3) AS \"literalDay\", " + "INTERVAL '400 5' DAY(3) TO HOUR AS \"literalDayToHour\", " + "INTERVAL '4 5:12' DAY TO MINUTE AS \"literalDayToMinute\", " + "INTERVAL '4 5:12:10.789' DAY TO SECOND AS \"literalDayToSecond\", " + "INTERVAL '10' HOUR AS \"literalHour\", " + "INTERVAL '11:20' HOUR TO MINUTE AS \"literalHourToMinute\", " + "INTERVAL '11:20:10' HOUR TO SECOND AS \"literalHourToSecond\", " + "INTERVAL '10' MINUTE AS \"literalMinute\", " + "INTERVAL '10:22' MINUTE TO SECOND AS \"literalMinuteToSecond\", " + "INTERVAL '30' SECOND AS \"literalSecond\"" + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); // Interval SQL type is not a JDBC type but can be retrieved as a Long. Assertions.assertTrue(resultSet.next()); for (int i = 1; i <= resultSet.getMetaData().getColumnCount(); i++ ) { Assertions.assertEquals(Types.OTHER, resultSet.getMetaData().getColumnType(i)); } // YEAR TO MONTH intervals are represented as months. Assertions.assertEquals(1478, resultSet.getLong(1)); Assertions.assertEquals(1476, resultSet.getLong(2)); Assertions.assertEquals(300, resultSet.getLong(3)); // DAY TO SECOND intervals are represented as milliseconds. Assertions.assertEquals(34560000000L, resultSet.getLong(4)); Assertions.assertEquals(34578000000L, resultSet.getLong(5)); Assertions.assertEquals(364320000, resultSet.getLong(6)); Assertions.assertEquals(364330789, resultSet.getLong(7)); Assertions.assertEquals(36000000, resultSet.getLong(8)); Assertions.assertEquals(40800000, resultSet.getLong(9)); Assertions.assertEquals(40810000, resultSet.getLong(10)); Assertions.assertEquals(600000, resultSet.getLong(11)); Assertions.assertEquals(622000, resultSet.getLong(12)); Assertions.assertEquals(30000, resultSet.getLong(13)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests that supported interval literals can be used to calculate a new interval literal.") @ParameterizedTest(name = "testIntervalLiteralOperations - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testIntervalLiteralOperations(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testIntervalLiteralOperations"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); insertBsonDocuments(tableName, new BsonDocument[] {doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT INTERVAL '5-3' YEAR TO MONTH + INTERVAL '20' MONTH, " + "INTERVAL '20' DAY - INTERVAL '240' HOUR(3) " + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); // YEAR TO MONTH intervals are represented as months (6 years, 11 months) Assertions.assertEquals(83, resultSet.getLong(1)); // DAY TO SECOND intervals are represented as milliseconds (10 days) Assertions.assertEquals(864000000, resultSet.getLong(2)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests query with MOD() for real and integer numbers.") @ParameterizedTest(name = "testQueryMod - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryMod(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryMod"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 4}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \"field\": 5}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT MOD(\"field\" * 0.5, 1) from \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(0, resultSet.getFloat(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(.5, resultSet.getFloat(1)); Assertions.assertFalse(resultSet.next()); } } }
4,490
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbPreparedStatementTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleExtension; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleTest; import software.amazon.documentdb.jdbc.metadata.DocumentDbSchema; import software.amazon.documentdb.jdbc.persist.DocumentDbSchemaWriter; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Types; import java.time.Instant; import java.util.Properties; import java.util.regex.Pattern; @ExtendWith(DocumentDbFlapDoodleExtension.class) class DocumentDbPreparedStatementTest extends DocumentDbFlapDoodleTest { private static final String DATABASE_NAME = "database"; private static final String USER = "user"; private static final String PASSWORD = "password"; private static final String CONNECTION_STRING_TEMPLATE = "jdbc:documentdb://%s:%s@localhost:%s/%s?tls=false&scanLimit=1000&scanMethod=%s"; private static final String COLLECTION_NAME = "testCollection"; private static final String QUERY = String.format("SELECT * FROM \"%s\".\"%s\"", DATABASE_NAME, COLLECTION_NAME); private static final int RECORD_COUNT = 10; @BeforeAll static void initialize() { // Add a valid users to the local MongoDB instance. createUser(DATABASE_NAME, USER, PASSWORD); prepareSimpleConsistentData(DATABASE_NAME, COLLECTION_NAME, RECORD_COUNT, USER, PASSWORD); } @AfterEach void afterEach() throws Exception { final DocumentDbConnectionProperties properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString( new Properties(), getJdbcConnectionString(), "jdbc:documentdb:"); try (DocumentDbSchemaWriter schemaWriter = new DocumentDbSchemaWriter(properties, null)) { schemaWriter.remove(DocumentDbSchema.DEFAULT_SCHEMA_NAME); } } /** * Tests that queries can be executed with PreparedStatement * * @throws SQLException if connection or query fails. */ @Test @DisplayName("Tests that queries can be executed using PreparedStatement.") void testExecuteQuery() throws SQLException { final Connection connection = DriverManager.getConnection(getJdbcConnectionString()); final PreparedStatement preparedStatement = new DocumentDbPreparedStatement(connection, QUERY); try (ResultSet resultSet = preparedStatement.executeQuery()) { int count = 0; while (resultSet.next()) { Assertions.assertTrue( Pattern.matches("^\\w+$", resultSet.getString(COLLECTION_NAME + "__id"))); Assertions.assertEquals(Double.MAX_VALUE, resultSet.getDouble("fieldDouble")); Assertions.assertEquals("新年快乐", resultSet.getString("fieldString")); Assertions.assertTrue( Pattern.matches("^\\w+$", resultSet.getString("fieldObjectId"))); Assertions.assertTrue(resultSet.getBoolean("fieldBoolean")); Assertions.assertEquals( Instant.parse("2020-01-01T00:00:00.00Z"), resultSet.getTimestamp("fieldDate").toInstant()); Assertions.assertEquals(Integer.MAX_VALUE, resultSet.getInt("fieldInt")); Assertions.assertEquals(Long.MAX_VALUE, resultSet.getLong("fieldLong")); Assertions.assertEquals("MaxKey", resultSet.getString("fieldMaxKey")); Assertions.assertEquals("MinKey", resultSet.getString("fieldMinKey")); Assertions.assertNull(resultSet.getString("fieldNull")); count++; } Assertions.assertEquals(RECORD_COUNT, count); } } /** * Tests that metadata can be retrieved before the query is executed and * that it matches after execution. * * @throws SQLException if connection or query fails. */ @Test @DisplayName("Tests getMetadata without querying and after querying.") void testGetMetadataQueryBeforeExecute() throws SQLException { final Connection connection = DriverManager.getConnection(getJdbcConnectionString()); final PreparedStatement preparedStatement = new DocumentDbPreparedStatement(connection, QUERY); // Check the metadata. checkMetadata(preparedStatement.getMetaData()); // Execute the statement. preparedStatement.execute(); // Check the metadata again. checkMetadata(preparedStatement.getMetaData()); } private static void checkMetadata(final ResultSetMetaData resultSetMetaData) throws SQLException { Assertions.assertNotNull(resultSetMetaData); Assertions.assertEquals(13, resultSetMetaData.getColumnCount()); Assertions.assertEquals(COLLECTION_NAME, resultSetMetaData.getTableName(1)); Assertions.assertNull(resultSetMetaData.getCatalogName(1)); Assertions.assertEquals(DATABASE_NAME, resultSetMetaData.getSchemaName(1)); Assertions.assertEquals(COLLECTION_NAME + "__id", resultSetMetaData.getColumnName(1)); Assertions.assertEquals(COLLECTION_NAME + "__id", resultSetMetaData.getColumnLabel(1)); Assertions.assertEquals("VARCHAR", resultSetMetaData.getColumnTypeName(1)); Assertions.assertEquals("java.lang.String", resultSetMetaData.getColumnClassName(1)); Assertions.assertEquals(Types.VARCHAR, resultSetMetaData.getColumnType(1)); Assertions.assertEquals(0, resultSetMetaData.isNullable(1)); Assertions.assertEquals(65536, resultSetMetaData.getPrecision(1)); Assertions.assertEquals(65536, resultSetMetaData.getColumnDisplaySize(1)); Assertions.assertTrue(resultSetMetaData.isReadOnly(1)); Assertions.assertTrue(resultSetMetaData.isSigned(1)); Assertions.assertTrue(resultSetMetaData.isCaseSensitive(1)); Assertions.assertFalse(resultSetMetaData.isWritable(1)); Assertions.assertFalse(resultSetMetaData.isAutoIncrement(1)); Assertions.assertFalse(resultSetMetaData.isCurrency(1)); Assertions.assertEquals("fieldDouble", resultSetMetaData.getColumnName(2)); Assertions.assertEquals("DOUBLE", resultSetMetaData.getColumnTypeName(2)); Assertions.assertEquals(1, resultSetMetaData.isNullable(2)); Assertions.assertEquals(0, resultSetMetaData.getScale(2)); Assertions.assertEquals("fieldString", resultSetMetaData.getColumnName(3)); Assertions.assertEquals("VARCHAR", resultSetMetaData.getColumnTypeName(3)); Assertions.assertEquals("fieldObjectId", resultSetMetaData.getColumnName(4)); Assertions.assertEquals("VARCHAR", resultSetMetaData.getColumnTypeName(4)); Assertions.assertEquals("fieldBoolean", resultSetMetaData.getColumnName(5)); Assertions.assertEquals("BOOLEAN", resultSetMetaData.getColumnTypeName(5)); Assertions.assertEquals("fieldDate", resultSetMetaData.getColumnName(6)); Assertions.assertEquals("TIMESTAMP", resultSetMetaData.getColumnTypeName(6)); Assertions.assertEquals("fieldInt", resultSetMetaData.getColumnName(7)); Assertions.assertEquals("INTEGER", resultSetMetaData.getColumnTypeName(7)); Assertions.assertEquals("fieldLong", resultSetMetaData.getColumnName(8)); Assertions.assertEquals("BIGINT", resultSetMetaData.getColumnTypeName(8)); Assertions.assertEquals("fieldMaxKey", resultSetMetaData.getColumnName(9)); Assertions.assertEquals("VARCHAR", resultSetMetaData.getColumnTypeName(9)); Assertions.assertEquals("fieldMinKey", resultSetMetaData.getColumnName(10)); Assertions.assertEquals("VARCHAR", resultSetMetaData.getColumnTypeName(10)); Assertions.assertEquals("fieldNull", resultSetMetaData.getColumnName(11)); Assertions.assertEquals("VARCHAR", resultSetMetaData.getColumnTypeName(11)); Assertions.assertEquals("fieldBinary", resultSetMetaData.getColumnName(12)); Assertions.assertEquals("VARBINARY", resultSetMetaData.getColumnTypeName(12)); Assertions.assertEquals("fieldDecimal128", resultSetMetaData.getColumnName(13)); Assertions.assertEquals("DECIMAL", resultSetMetaData.getColumnTypeName(13)); } @Test @DisplayName("Tests getting and setting query timeout") void testGetSetQueryTimeout() throws SQLException { final Connection connection = DriverManager.getConnection(getJdbcConnectionString()); final PreparedStatement preparedStatement = new DocumentDbPreparedStatement(connection, QUERY); Assertions.assertDoesNotThrow(() -> preparedStatement.setQueryTimeout(30)); Assertions.assertEquals(30, preparedStatement.getQueryTimeout()); } private static String getJdbcConnectionString() { return String.format( CONNECTION_STRING_TEMPLATE, USER, PASSWORD, getMongoPort(), DATABASE_NAME, DocumentDbMetadataScanMethod.RANDOM.getName()); } }
4,491
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbIntegrationTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.util.regex.Pattern; /** * DocumentDB integration tests */ public class DocumentDbIntegrationTest { /** * Ensure local environment is correctly setup */ @Test @Tag("local-integration") public void runLocalTest() { Assertions.assertEquals(true, true); final String configuredEnvironments = System.getenv("CONFIGURED_ENVIRONMENTS"); Assertions.assertNotNull(configuredEnvironments); Assertions.assertFalse(Pattern.matches(".*DOCUMENTDB40_SSH_TUNNEL.*", configuredEnvironments)); } /** * Ensure remote environment is correctly setup */ @Test @Tag("remote-integration") public void runRemoteTest() { Assertions.assertEquals(true, true); final String configuredEnvironments = System.getenv("CONFIGURED_ENVIRONMENTS"); Assertions.assertNotNull(configuredEnvironments); Assertions.assertTrue(Pattern.matches(".*DOCUMENTDB40_SSH_TUNNEL.*", configuredEnvironments)); } }
4,492
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbStatementFilterTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import org.bson.BsonBoolean; import org.bson.BsonDateTime; import org.bson.BsonDocument; import org.bson.BsonDouble; import org.bson.BsonInt64; import org.bson.BsonMinKey; import org.bson.BsonNull; import org.bson.BsonObjectId; import org.bson.BsonString; import org.bson.types.ObjectId; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import software.amazon.documentdb.jdbc.common.test.DocumentDbTestEnvironment; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Timestamp; import java.time.Instant; import java.time.temporal.ChronoUnit; public class DocumentDbStatementFilterTest extends DocumentDbStatementTest { /** * Tests that a statement with project, where, group by, having, order, and limit works for a single table. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Tests that a statement with project, where, group by, having, order, and limit works for a single table.") @ParameterizedTest(name = "testComplexQuery - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testComplexQuery(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testComplexQuery"; final BsonDocument document1 = BsonDocument.parse("{ \"_id\" : \"key0\", \"array\": [1, 2, 3, 4, 5] }"); final BsonDocument document2 = BsonDocument.parse("{ \"_id\" : \"key1\", \"array\": [1, 2, 3] }"); final BsonDocument document3 = BsonDocument.parse("{ \"_id\" : \"key2\", \"array\": [1, 2] }"); final BsonDocument document4 = BsonDocument.parse("{ \"_id\" : \"key3\", \"array\": [1, 2, 3, 4, 5] }"); insertBsonDocuments(collection, new BsonDocument[]{document1, document2, document3, document4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Verify that result set has correct values. statement.execute(String.format( "SELECT \"%s\", COUNT(*) AS \"Count\" FROM \"%s\".\"%s\"" + "WHERE \"%s\" <> 'key3' " + "GROUP BY \"%s\" HAVING COUNT(*) > 1" + "ORDER BY \"Count\" DESC LIMIT 1", collection + "__id", getDatabaseName(), collection + "_array", collection + "__id", collection + "__id")); final ResultSet resultSet1 = statement.getResultSet(); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals("key0", resultSet1.getString(collection + "__id")); Assertions.assertEquals(5, resultSet1.getInt("Count")); Assertions.assertFalse(resultSet1.next()); } } /** * Tests that queries with not-equals do not return null or undefined values. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests that comparisons to null do not return a value.") @ParameterizedTest(name = "testComparisonToNull - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testComparisonToNull(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testComparisonsToNull"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 4}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n}" + "\"field\": null"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * from \"%s\".\"%s\" WHERE \"field\" <> 5", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.next()); } } /** * Tests query with two literals in a where clause such that the comparison is true. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests query WHERE clause containing two literals such that the comparison is true.") @ParameterizedTest(name = "testQueryWhereTwoLiteralsTrue - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWhereTwoLiteralsTrue(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWhereTwoLiteralsTrue"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 4}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * from \"%s\".\"%s\" WHERE 2 > 1", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(4, resultSet.getInt(2)); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(2)); Assertions.assertFalse(resultSet.next()); } } /** Tests that queries with multiple not-equals clauses are correct. * @throws SQLException occurs if query fails. */ @DisplayName("Tests that multiple != conditions can be used.") @ParameterizedTest(name = "testMultipleNotEquals - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testMultipleNotEquals(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testMultipleNotEquals"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 4}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field\": 3}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \n" + "\"field\": 2}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * from \"%s\".\"%s\" WHERE \"field\" <> 4 AND \"field\" <> 3", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(2, resultSet.getInt(2)); Assertions.assertFalse(resultSet.next()); } } /** * Tests query with boolean literal values. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests query WHERE clause with boolean literal.") @ParameterizedTest(name = "testQueryWhereLiteralBoolean - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWhereLiteralBoolean(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWhereLiteralBoolean"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": true, \n " + "\"fieldB\": false}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": false, \n " + "\"fieldB\": true}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT * from \"%s\".\"%s\" WHERE \"fieldA\" = TRUE AND \"fieldB\" = FALSE", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertTrue(resultSet1.getBoolean(2)); Assertions.assertFalse(resultSet1.next()); // Test same query but using IS TRUE / IS FALSE syntax. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * from \"%s\".\"%s\" WHERE \"fieldA\" IS TRUE AND \"fieldB\" IS FALSE", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertTrue(resultSet2.getBoolean(2)); Assertions.assertFalse(resultSet2.next()); } } /** * Tests that queries with CASE are correct, particularly where null or undefined values are involved. * @throws SQLException occurs if query fails. */ @DisplayName("Tests queries with searched CASE format and null values are correct.") @ParameterizedTest(name = "testSearchedCase - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testSearchedCase(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSearchedCase"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 1}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": 2}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": 5}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104,\n" + "\"field\": 4}"); final BsonDocument doc5 = BsonDocument.parse("{\"_id\": 105,\n" + "\"field\": 3}"); final BsonDocument doc6 = BsonDocument.parse("{\"_id\": 106,\n" + "\"field2\": 9}"); final BsonDocument doc7 = BsonDocument.parse("{\"_id\": 107,\n" + "\"field\": null}"); final BsonDocument doc8 = BsonDocument.parse("{\"_id\": 108}"); final BsonDocument doc9 = BsonDocument.parse("{\"_id\": 109}"); doc9.append("field", new BsonMinKey()); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4, doc5, doc6, doc7, doc8, doc9}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CASE " + "WHEN \"field\" < 2 THEN 'A' " + "WHEN \"field\" <= 2 THEN 'B' " + "WHEN \"field\" > 4 THEN 'C' " + "WHEN \"field\" >= 4 THEN 'D' " + "WHEN \"field\" <> 7 THEN 'E' " + "WHEN \"field2\" IN (9, 10) THEN 'F' " + "ELSE 'G' END FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("A", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("B", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("C", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("D", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("E", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("F", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("G", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("G", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("G", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries with CASE are correct, particularly where null or undefined values are involved. * @throws SQLException occurs if query fails. */ @DisplayName("Tests queries with simple CASE format and null values are correct.") @ParameterizedTest(name = "testSimpleCase - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testSimpleCase(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSimpleCase"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 1}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": 2}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": 5}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104,\n" + "\"field\": 4}"); final BsonDocument doc5 = BsonDocument.parse("{\"_id\": 105,\n" + "\"field\": 3}"); final BsonDocument doc6 = BsonDocument.parse("{\"_id\": 106,\n" + "\"field2\": 9}"); final BsonDocument doc7 = BsonDocument.parse("{\"_id\": 107,\n" + "\"field\": null}"); final BsonDocument doc8 = BsonDocument.parse("{\"_id\": 108}"); final BsonDocument doc9 = BsonDocument.parse("{\"_id\": 109}"); doc9.append("field", new BsonMinKey()); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4, doc5, doc6, doc7, doc8, doc9}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CASE \"field\"" + "WHEN 1 THEN 'A' " + "WHEN 2 THEN 'B' " + "WHEN 5, 6 THEN 'C' " + "WHEN 4, 5, 6 THEN 'D' " + "WHEN 1, 2, 3, 4, 5, 6, 8, 9 THEN 'E' " + "ELSE 'F' END FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("A", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("B", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("C", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("D", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("E", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("F", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("F", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("F", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("F", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests query with a where clause comparing two fields. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests query with WHERE clause comparing two fields.") @ParameterizedTest(name = "testQueryWhereTwoColumns - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWhereTwoColumns(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWhereTwoFields"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 4, \n " + "\"fieldB\": 5}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": 5, \n " + "\"fieldB\": 4}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * from \"%s\".\"%s\" WHERE \"fieldA\" < \"fieldB\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(4, resultSet.getInt(2)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries with CASE are correct with two different fields involved. * @throws SQLException occurs if query fails. */ @DisplayName("Tests queries with two field CASE.") @ParameterizedTest(name = "testCaseTwoFields - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testCaseTwoFields(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCASETwoFields"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 1,\n" + "\"fieldB\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": 2,\n" + "\"fieldB\": 1}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"fieldA\": 1}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CASE " + "WHEN \"fieldA\" < \"fieldB\" THEN 'A' " + "WHEN \"fieldA\" > \"fieldB\" THEN 'B' " + "ELSE 'C' END FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("A", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("B", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("C", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries can contain nested OR conditions in WHERE clause. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests queries with nested OR.") @ParameterizedTest(name = "testWhereNestedOR - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereNestedOR(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testNestedOR"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 1,\n" + "\"fieldB\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": 2,\n" + "\"fieldB\": 1}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"fieldA\": 1}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104,\n" + "\"fieldA\": 13, \n" + "\"fieldB\": 1}"); final BsonDocument doc5 = BsonDocument.parse("{\"_id\": 105,\n" + "\"fieldA\": 1, \n" + "\"fieldB\": 10}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4, doc5}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * from \"%s\".\"%s\" " + "WHERE (\"fieldA\" < 3 OR \"fieldB\" < 2) " + "AND (\"fieldA\" > 12 OR \"fieldB\" > 8)", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(104, resultSet.getInt(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(105, resultSet.getInt(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries with various types in WHERE clause") @ParameterizedTest(name = "testQueryWhereTypes - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWhereTypes(final DocumentDbTestEnvironment testEnvironment)throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWhereTypes"; final BsonDateTime date = new BsonDateTime(Instant.now().toEpochMilli()); final long bigInt = 100000000000L; final double doubleValue = 1.2345; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": \"abc\", \n " + "\"fieldB\": 5}"); doc1.append("fieldC", BsonBoolean.TRUE); doc1.append("fieldD", date); doc1.append("fieldE", new BsonInt64(bigInt)); doc1.append("fieldF", new BsonDouble(doubleValue)); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": \"def\", \n " + "\"fieldB\": 4}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * from \"%s\".\"%s\" WHERE \"fieldA\" = 'abc' AND " + "\"fieldB\" = 5 AND \"fieldC\" = TRUE AND \"fieldD\" > '2020-03-11' AND \"fieldE\" = %d AND \"fieldF\" = %f", getDatabaseName(), tableName, bigInt, doubleValue)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("abc", resultSet.getString(2)); Assertions.assertEquals(5, resultSet.getInt(3)); Assertions.assertTrue(resultSet.getBoolean(4)); Assertions.assertEquals(date.getValue(), resultSet.getTimestamp(5).getTime()); Assertions.assertEquals(bigInt, resultSet.getLong(6)); Assertions.assertEquals(doubleValue, resultSet.getDouble(7)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that date literals can be used in WHERE comparisons. * @throws SQLException occurs if query fails. */ @DisplayName("Tests that date literals can be used in WHERE comparisons") @ParameterizedTest(name = "testQueryWhereDateLiteral - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWhereDateLiteral(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testDateLiteral"; final long dateTime = Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" WHERE \"field\" < DATE '2020-01-02'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(new Timestamp(dateTime), resultSet1.getTimestamp(2)); Assertions.assertFalse(resultSet1.next()); final ResultSet resultSet2 = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" WHERE \"field\" = DATE '2020-01-01'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals(new Timestamp(dateTime), resultSet2.getTimestamp(2)); Assertions.assertFalse(resultSet2.next()); final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"field\" <> DATE '2020-01-01'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertFalse(resultSet3.next()); } } /** * Tests that date literals can be used in WHERE comparisons. * @throws SQLException occurs if query fails. */ @DisplayName("Tests that timestamp literals can be used in WHERE comparisons") @ParameterizedTest(name = "testQueryWhereTimestampLiteral - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWhereTimestampLiteral(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testTimestampLiteral"; final long dateTime = Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"field\" < TIMESTAMP '2020-01-02 00:00:00'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(new Timestamp(dateTime), resultSet1.getTimestamp(2)); Assertions.assertFalse(resultSet1.next()); final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"field\" = TIMESTAMP '2020-01-01 00:00:00'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals(new Timestamp(dateTime), resultSet2.getTimestamp(2)); Assertions.assertFalse(resultSet2.next()); final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"field\" <> TIMESTAMP '2020-01-01 00:00:00'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertFalse(resultSet3.next()); } } /** * Tests that calls to timestampAdd can be used in WHERE comparisons. * @throws SQLException occurs if query fails. */ @DisplayName("Tests that timestampadd can be used in WHERE comparisons") @ParameterizedTest(name = "testQueryWhereTimestampAdd - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWhereTimestampAdd(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereTimestampAdd"; final long dateTime = Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE TIMESTAMPADD(DAY, 1, \"field\") = TIMESTAMP '2020-01-02 00:00:00'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(new Timestamp(dateTime), resultSet1.getTimestamp(2)); Assertions.assertFalse(resultSet1.next()); } } /** * Tests where condition of field compared to CURRENT_DATE. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests where condition of field compared to CURRENT_DATE.") @ParameterizedTest(name = "testWhereFieldComparedToCurrentDate - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testWhereFieldComparedToCurrentDate(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereFieldComparedToCurrentDate"; final long dateTimePast = Instant.now().minus(2, ChronoUnit.DAYS).toEpochMilli(); final long dateTimeFuture = Instant.now().plus(2, ChronoUnit.DAYS).toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTimePast)); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("field", new BsonDateTime(dateTimeFuture)); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103}"); doc3.append("field", new BsonNull()); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); for (final String currentFunc : new String[]{"CURRENT_DATE", "CURRENT_TIMESTAMP"}) { // Find condition that does exist. final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT \"field\"%n" + " FROM \"%s\".\"%s\"%n" + " WHERE \"field\" < TIMESTAMPADD(DAY, 1, %s)", getDatabaseName(), tableName, currentFunc)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertFalse(resultSet1.next()); // Find condition that does exist. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT \"field\"%n" + " FROM \"%s\".\"%s\"%n" + " WHERE \"field\" > TIMESTAMPADD(DAY, 1, %s)", getDatabaseName(), tableName, currentFunc)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertFalse(resultSet2.next()); // Find condition that does NOT exist. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT \"field\"%n" + " FROM \"%s\".\"%s\"%n" + " WHERE \"field\" > TIMESTAMPADD(DAY, 10, %s)", getDatabaseName(), tableName, currentFunc)); Assertions.assertNotNull(resultSet3); Assertions.assertFalse(resultSet3.next()); // Find condition that does NOT exist. final ResultSet resultSet4 = statement.executeQuery( String.format( "SELECT \"field\"%n" + " FROM \"%s\".\"%s\"%n" + " WHERE \"field\" < TIMESTAMPADD(DAY, -10, %s)", getDatabaseName(), tableName, currentFunc)); Assertions.assertNotNull(resultSet4); Assertions.assertFalse(resultSet4.next()); } } } /** * Tests for queries filtering by IS NULL. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests for IS NULL") @ParameterizedTest(name = "testQueryWithIsNull - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWithIsNull(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereQueryIsNull"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abc\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": null}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" WHERE \"field\" IS NULL", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests for queries filtering by IS NOT NULL. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests for IS NOT NULL") @ParameterizedTest(name = "testQueryWithIsNotNull - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWithIsNotNull(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereQueryIsNotNull"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abc\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": null}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" WHERE \"field\" IS NOT NULL", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests for CASE statements containing IS NULL and IS NOT NULL. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests for CASE statements with IS [NOT] NULL") @ParameterizedTest(name = "testQueryWithIsNotNullCase - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryWithIsNotNullCase(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereQueryIsNotNullCase"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abc\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": null}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT CASE " + "WHEN \"field\" IS NULL THEN 1" + "WHEN \"field\" IS NOT NULL THEN 2" + "ELSE 3 END " + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(2, resultSet.getInt(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(1, resultSet.getInt(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(1, resultSet.getInt(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests a query with CASE in the WHERE clause. * @throws SQLException occurs if query fails. */ @DisplayName("Tests query with WHERE and CASE.") @ParameterizedTest(name = "testWhereWithCase - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereWithCase(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereCASE"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 1}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": 2}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": 5}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104,\n" + "\"field\": 4}"); final BsonDocument doc5 = BsonDocument.parse("{\"_id\": 105,\n" + "\"field\": 3}"); final BsonDocument doc6 = BsonDocument.parse("{\"_id\": 106,\n" + "\"field\": null}"); final BsonDocument doc7 = BsonDocument.parse("{\"_id\": 107}"); final BsonDocument doc8 = BsonDocument.parse("{\"_id\": 108}"); doc8.append("field", new BsonMinKey()); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4, doc5, doc6, doc7, doc8}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "WHERE (CASE " + "WHEN \"field\" < 2 THEN 'A' " + "WHEN \"field\" <= 2 THEN 'B' " + "WHEN \"field\" > 4 THEN 'C' " + "WHEN \"field\" >= 4 THEN 'D' " + "WHEN \"field\" <> 7 THEN 'E' " + "ELSE 'F' END) = 'A'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(101, resultSet.getInt(1)); Assertions.assertEquals(1, resultSet.getInt(2)); Assertions.assertFalse(resultSet.next()); } } /** * Tests queries with WHERE using string literals with '$'. * @throws SQLException occurs if query fails. */ @DisplayName("Tests queries with WHERE using string literals with '$'.") @ParameterizedTest(name = "testWhereWithConflictingStringLiterals - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereWithConflictingStringLiterals(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereWithConflictingStringLiterals"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"price\": \"$1\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"price\": \"$2.25\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"price\": \"1\"}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "WHERE \"price\" = '$1'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(101, resultSet1.getInt(1)); Assertions.assertEquals("$1", resultSet1.getString(2)); Assertions.assertFalse(resultSet1.next()); } } /** * Tests a query with nested CASE. */ @DisplayName("Tests a query with nested CASE.") @ParameterizedTest(name = "testNestedCase - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testNestedCase(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testNestedCASE"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 1}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": 2}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": 3}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CASE " + "WHEN \"field\" < 3 THEN " + "( CASE WHEN \"field\" < 2 THEN 'A' " + "ELSE 'B' END )" + "ELSE 'C' END FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("A", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("B", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("C", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests queries with CASE where a string literal contains '$'. */ @DisplayName("Tests queries with CASE where a string literal contains '$'.") @ParameterizedTest(name = "testCaseWithConflictingStringLiterals - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testCaseWithConflictingStringLiterals(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCaseWithConflictingStringLiterals"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"price\": \"$1\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"price\": \"$2.25\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"price\": \"1\"}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT CASE " + "WHEN \"price\" = '$1' THEN 'A' " + "ELSE 'B' END FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals("A", resultSet1.getString(1)); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals("B", resultSet1.getString(1)); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals("B", resultSet1.getString(1)); Assertions.assertFalse(resultSet1.next()); final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT CASE " + "WHEN \"price\" = '1' THEN 'YES' " + "ELSE '$price' END FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("$price", resultSet2.getString(1)); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("$price", resultSet2.getString(1)); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("YES", resultSet2.getString(1)); Assertions.assertFalse(resultSet2.next()); } } @DisplayName("Tests queries with CASE with boolean columns.") @ParameterizedTest(name = "testCaseWithBooleanColumns - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testCaseWithBooleanColumns(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCaseWithBooleanColumns"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": true }"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": false }"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103 }"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT CASE " + "WHEN \"field\" THEN 'Yes' " + "WHEN NOT \"field\" THEN 'No' " + "ELSE 'Unknown' END FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("Yes", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("No", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("Unknown", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries with substring work. * @throws SQLException occurs if query fails. */ @DisplayName("Test that queries filtering with substring work.") @ParameterizedTest(name = "testQuerySubstring - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQuerySubstring(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereQuerySubstring"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abcdefg\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": \"uvwxyz\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": \"\"}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE SUBSTRING(\"field\", 2, 3) = 'bcd'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries with substring without a length input work. * @throws SQLException occurs if query fails. */ @DisplayName("Test that queries filtering with substring without a length input work.") @ParameterizedTest(name = "testQuerySubstringNoLength - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQuerySubstringNoLength(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereQuerySubstringNoLength"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abcdefg\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": \"abcdefgh\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": \"\"}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE SUBSTRING(\"field\", 2) = 'bcdefg'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that queries with case containing substring. * @throws SQLException occurs if query fails. */ @DisplayName("Test that queries with case containing substring work.") @ParameterizedTest(name = "testQueryCaseSubstring - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testQueryCaseSubstring(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCaseQuerySubstring"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abcdefg\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": \"abcmno\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": \"\"}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT CASE " + "WHEN SUBSTRING(\"field\", 1, 4) = 'abcd' THEN 'A'" + "WHEN SUBSTRING(\"field\", 1, 3) = 'abc' THEN 'B'" + "ELSE 'C' END FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("A", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("B", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("C", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("C", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that substring works with a literal. * @throws SQLException occurs if query fails. */ @DisplayName("Tests substring with a literal.") @ParameterizedTest(name = "testSubstringLiteral - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testSubstringLiteral(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSubstringLiteral"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abc\", \n" + "\"field2\": 3}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": \"abcmno\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": \"\"}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"field\" = SUBSTRING('abcdef', 1, 3)", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests substring with expressions for index and length.") @ParameterizedTest(name = "testComplexQuery - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testSubstringExpressions(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSubstringExpressions"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"abcdef\", \n" + "\"field2\": 3, \n" + "\"field3\": 1}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": \"abcdef\", \n" + "\"field2\": 2 \n" + "\"field3\": 1}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": \"\", \n" + "\"field2\": 3, \n" + "\"field3\": 1}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null, \n" + "\"field2\": 3, \n" + "\"field3\": 1}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT SUBSTRING(\"field\", \"field3\", \"field2\" - \"field3\") " + "FROM \"%s\".\"%s\" " + "WHERE SUBSTRING(\"field\", \"field3\", \"field2\" + \"field3\") = 'abcd'", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("ab", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests substring where a conflict with a field exists") @ParameterizedTest(name = "testSubstringFieldConflict - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testSubstringFieldConflict(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSubstringLiteralConflict"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"$100\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": \"abc\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": \"\"}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"field\" = SUBSTRING('$1000', 1, 4)", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests substring where a conflict with an operator exists") @ParameterizedTest(name = "testSubstringOperatorConflict - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testSubstringOperatorConflict(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSubstringOperatorConflict"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": \"$o\"}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": \"abc\"}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": \"\"}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"field\" = SUBSTRING('$or', 1, 2)", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests FLOOR(... TO ...) in WHERE clause.") @ParameterizedTest(name = "testFloorForDateInWhere - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testFloorForDateInWhere(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testFloorForDateInWhere"; final Instant dateTime = Instant.parse("2020-02-03T12:34:56.78Z"); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime.toEpochMilli())); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"" + " WHERE FLOOR(\"field\" TO SECOND) >= \"field\"", getDatabaseName(), tableName)); Assertions.assertFalse(resultSet.next()); final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"" + " WHERE FLOOR(\"field\" TO SECOND) < \"field\"", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("101", resultSet2.getString(1)); Assertions.assertFalse(resultSet2.next()); } } @DisplayName("Tests arithmetic functions in WHERE clause.") @ParameterizedTest(name = "testArithmeticWhere - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testArithmeticWhere(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereArithmetic"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 4, \n" + "\"field2\": 3, \n" + "\"field3\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": 2, \n" + "\"field2\": 2 \n" + "\"field3\": 1}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": 2, \n" + "\"field2\": 3, \n" + "\"field3\": 1}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null, \n" + "\"field2\": 3, \n" + "\"field3\": 1}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE \"field\" * \"field2\" / \"field3\" + \"field2\" - \"field3\" = 7", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests that queries filtering by modulo work.") @ParameterizedTest(name = "testModuloWhere - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testModuloWhere(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereModulo"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": 5}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": 6}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": 1}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE MOD(\"field\", 3) = 2" + "OR MOD(8, \"field\") = 2" + "OR MOD(3, 2) = \"field\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries containing nested OR.") @ParameterizedTest(name = "testNestedOR - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testNestedOR(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "TestWhereOr"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": true, \n" + "\"field2\": false, \n" + "\"field3\": 5}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": false, \n" + "\"field2\": false \n" + "\"field3\": 7}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": false, \n" + "\"field2\": true, \n" + "\"field3\": 1}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": false, \n" + "\"field2\": false, \n" + "\"field3\": 1}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE \"field\" OR (\"field2\" OR \"field3\" > 6)", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries with nested AND.") @ParameterizedTest(name = "testNestedAND - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testNestedAND(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereAnd"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": true, \n" + "\"field2\": true, \n" + "\"field3\": 7}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": true, \n" + "\"field2\": false \n" + "\"field3\": 7}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": false, \n" + "\"field2\": true, \n" + "\"field3\": 8}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": false, \n" + "\"field2\": false, \n" + "\"field3\": 1}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE \"field\" AND (\"field2\" AND \"field3\" > 6)", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries with nested combined OR and AND.") @ParameterizedTest(name = "testNestedOR - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testNestedAndOr(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereAndOrCombined"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": true, \n" + "\"field2\": true, \n" + "\"field3\": 7, \n" + "\"field4\": false}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": true, \n" + "\"field2\": false \n" + "\"field3\": 7, \n" + "\"field4\": false}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": false, \n" + "\"field2\": true, \n" + "\"field3\": 8, \n" + "\"field4\": true}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": false, \n" + "\"field2\": false, \n" + "\"field3\": 1}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE ((\"field\" AND \"field3\" < 10) AND (\"field2\" OR \"field3\" > 6)) OR \"field4\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries with NOT combined with OR and AND.") @ParameterizedTest(name = "testNotCombinedWithAndOr - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testNotCombinedWithAndOr(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereNotAndOr"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"field\": true, \n" + "\"field2\": true, \n" + "\"field3\": 7, \n" + "\"field4\": false}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"field\": false, \n" + "\"field2\": false \n" + "\"field3\": 7, \n" + "\"field4\": false}"); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103,\n" + "\"field\": false, \n" + "\"field2\": true, \n" + "\"field3\": 8, \n" + "\"field4\": true}"); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104, \n" + "\"field\": false, \n" + "\"field2\": false, \n" + "\"field3\": 1}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE ((NOT \"field\" AND \"field3\" < 10) AND (NOT \"field2\" OR \"field3\" > 6)) OR \"field4\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("104", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Test queries filtering by CURRENT_DATE.") @ParameterizedTest(name = "testWhereCurrentDate - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereCurrentDate(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereCurrentDate"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 104, \n" + "\"date\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE CURRENT_DATE > \"date\"", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries filtering by CURRENT_TIME.") @ParameterizedTest(name = "testWhereCurrentTime - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereCurrentTime(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereCurrentTime"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 104, \n" + "\"date\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE CURRENT_TIME <> CAST(\"date\" AS TIME)", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries filtering by CURRENT_TIMESTAMP.") @ParameterizedTest(name = "testWhereCurrentTimestamp - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereCurrentTimestamp(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereCurrentTimestamp"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103, \n" + "\"date\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE CURRENT_TIMESTAMP <> \"date\"", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries filtering by date extract.") @ParameterizedTest(name = "testWhereExtract - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereExtract(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereSqlExtract"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("date", new BsonDateTime(Instant.parse("2021-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("date", new BsonDateTime(Instant.parse("2020-02-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 103}"); doc3.append("date", new BsonDateTime(Instant.parse("2020-01-02T00:00:00.00Z").toEpochMilli())); final BsonDocument doc4 = BsonDocument.parse("{\"_id\": 104}"); doc4.append("date", new BsonDateTime(Instant.parse("2020-01-01T01:00:00.00Z").toEpochMilli())); final BsonDocument doc5 = BsonDocument.parse("{\"_id\": 105}"); doc5.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:01:00.00Z").toEpochMilli())); final BsonDocument doc6 = BsonDocument.parse("{\"_id\": 106}"); doc6.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:01.00Z").toEpochMilli())); final BsonDocument doc7 = BsonDocument.parse("{\"_id\": 107}"); doc7.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc8 = BsonDocument.parse("{\"_id\": 108, \n" + "\"date\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3, doc4, doc5, doc6, doc7, doc8}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE EXTRACT(YEAR FROM \"date\") = 2021" + "OR EXTRACT(MONTH FROM \"date\") = 2" + "OR EXTRACT(DAY FROM \"date\") = 2" + "OR EXTRACT(HOUR FROM \"date\") = 1" + "OR EXTRACT(MINUTE FROM \"date\") = 1" + "OR EXTRACT(SECOND FROM \"date\") = 1", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("103", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("104", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("105", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("106", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries filtering by DAYNAME") @ParameterizedTest(name = "testWhereDayName - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereDayName(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereDAYNAME"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("date", new BsonDateTime(Instant.parse("2020-01-07T00:00:00.00Z").toEpochMilli())); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 104, \n" + "\"date\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE DAYNAME(\"date\") = 'Tuesday'", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries filtering by MONTHNAME") @ParameterizedTest(name = "testWhereMonthName - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testWhereMonthName(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereMonthName"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("date", new BsonDateTime(Instant.parse("2020-02-01T00:00:00.00Z").toEpochMilli())); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 104, \n" + "\"date\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE MONTHNAME(\"date\") = 'February'", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests queries filtering with date diff.") @ParameterizedTest(name = "testDateMinus - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testDateMinus(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testWhereDateMinus"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); doc1.append("date2", new BsonDateTime(Instant.parse("2020-01-03T00:00:00.00Z").toEpochMilli())); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("date", new BsonDateTime(Instant.parse("2020-01-01T00:00:00.00Z").toEpochMilli())); doc2.append("date2", new BsonDateTime(Instant.parse("2020-01-02T00:00:00.00Z").toEpochMilli())); final BsonDocument doc3 = BsonDocument.parse("{\"_id\": 104, \n" + "\"date\": null}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2, doc3}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" " + "WHERE TIMESTAMPDIFF(DAY, \"date\", \"date2\") = 2", getDatabaseName(), tableName)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests that setMaxRows limits the number of rows returned in result set.") @ParameterizedTest(name = "testSetMaxRows - [{index}] - {arguments}") @MethodSource("getTestEnvironments") void testSetMaxRows(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testSetMaxRows"; final BsonDocument[] documents = new BsonDocument[10]; final int totalNumberDocuments = 10; final int maxRows = 5; for (int i = 0; i < totalNumberDocuments; i++) { documents[i] = new BsonDocument("field", new BsonString("value")); } insertBsonDocuments(collection, documents); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Don't set max rows ResultSet resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), collection)); int actualRowCount = 0; while (resultSet.next()) { actualRowCount++; } Assertions.assertEquals(0, statement.getMaxRows()); Assertions.assertEquals(totalNumberDocuments, actualRowCount); // Set max rows < actual statement.setMaxRows(maxRows); resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), collection)); actualRowCount = 0; while (resultSet.next()) { actualRowCount++; } Assertions.assertEquals(maxRows, statement.getMaxRows()); Assertions.assertEquals(maxRows, actualRowCount); // Set unlimited statement.setMaxRows(0); resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), collection)); actualRowCount = 0; while (resultSet.next()) { actualRowCount++; } Assertions.assertEquals(0, statement.getMaxRows()); Assertions.assertEquals(totalNumberDocuments, actualRowCount); // Set max rows > SQL LIMIT int limit = maxRows - 1; statement.setMaxRows(maxRows); resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" LIMIT %d", getDatabaseName(), collection, limit)); actualRowCount = 0; while (resultSet.next()) { actualRowCount++; } Assertions.assertEquals(maxRows, statement.getMaxRows()); Assertions.assertEquals(limit, actualRowCount); // Set max rows < SQL LIMIT limit = maxRows + 1; statement.setMaxRows(maxRows); resultSet = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\" LIMIT %d", getDatabaseName(), collection, limit)); actualRowCount = 0; while (resultSet.next()) { actualRowCount++; } Assertions.assertEquals(maxRows, statement.getMaxRows()); Assertions.assertEquals(maxRows, actualRowCount); } } @DisplayName("Test that queries using COALESCE() are correct.") @ParameterizedTest(name = "testQueryWhereCoalesce - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWhereCoalesce(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWhereCoalesce"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field1\": null, \n" + // Added this document only for metadata "\"field2\": 1, \n" + "\"field3\": 2}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field1\": null, \n" + "\"field2\": null, \n" + "\"field3\": 2}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\" WHERE COALESCE(\"%s\", \"%s\", \"%s\") = 2 ", getDatabaseName(), tableName, "field1", "field2", "field3")); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(resultSet.getInt(1), 102); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Test that queries using [NOT] BETWEEN are correct.") @ParameterizedTest(name = "testQueryBetween - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryBetween(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryBetween"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101, \n" + "\"field1\": 1, \n" + // Added this document only for metadata "\"field2\": 4, \n" + "\"field3\": 3}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102, \n" + "\"field1\": 1, \n" + "\"field2\": 2, \n" + "\"field3\": 3}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"%s\" BETWEEN \"%s\" AND \"%s\"", getDatabaseName(), tableName, "field2", "field1", "field3")); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(resultSet1.getInt(1), 102); Assertions.assertFalse(resultSet1.next()); final ResultSet resultSet2 = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"%s\" NOT BETWEEN \"%s\" AND \"%s\"", getDatabaseName(), tableName, "field2", "field1", "field3")); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals(resultSet2.getInt(1), 101); Assertions.assertFalse(resultSet2.next()); } } @ParameterizedTest(name = "testQueryObjectId - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryObjectId(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testQueryObjectId"; final BsonDocument document1 = BsonDocument.parse("{ \"field\": 1 }") .append("_id", new BsonObjectId(new ObjectId("111111111111111111111111"))); final BsonDocument document2 = BsonDocument.parse("{ \"field\": 2 }") .append("_id", new BsonObjectId(new ObjectId("222222222222222222222222"))); final BsonDocument document3 = BsonDocument.parse("{ \"field\": 3 }") .append("_id", new BsonString("3")); final BsonDocument document4 = BsonDocument.parse("{ \"field\": 4 }") .append("_id", new BsonObjectId(new ObjectId("444444444444444444444444"))); final BsonDocument document5 = BsonDocument.parse("{ \"field\": 5 }") .append("_id", new BsonString("333333333333333333333333")); insertBsonDocuments(collection, new BsonDocument[]{document1, document2, document3, document4, document5}); // NOTE: Using ID_FORWARD to process the documents so the last record has _id with an ObjectId // data type. This will allow us to search by ObjectId, as well as string. try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Verify that result set has correct values. statement.execute(String.format( "SELECT * FROM \"%1$s\".\"%2$s\" ORDER BY \"%2$s__id\" DESC LIMIT 1", getDatabaseName(), collection)); final ResultSet resultSet1 = statement.getResultSet(); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); final String id = resultSet1.getString(collection + "__id"); Assertions.assertNotNull(id); Assertions.assertEquals("444444444444444444444444", id); Assertions.assertFalse(resultSet1.next()); // Verify that result set has correct values. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\"" + " WHERE \"%s\" = '444444444444444444444444'", getDatabaseName(), collection, collection + "__id")); final ResultSet resultSet2 = statement.getResultSet(); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals(id, resultSet2.getString(collection + "__id")); Assertions.assertFalse(resultSet2.next()); // Verify that result set has correct values. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\"" + " WHERE \"%s\" = x'444444444444444444444444'", getDatabaseName(), collection, collection + "__id")); final ResultSet resultSet3 = statement.getResultSet(); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals(id, resultSet3.getString(collection + "__id")); Assertions.assertFalse(resultSet3.next()); // Verify that result set has correct values. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\"" + " WHERE \"%s\" = '3'", getDatabaseName(), collection, collection + "__id")); final ResultSet resultSet4 = statement.getResultSet(); Assertions.assertNotNull(resultSet4); Assertions.assertTrue(resultSet4.next()); Assertions.assertEquals("3", resultSet4.getString(collection + "__id")); Assertions.assertFalse(resultSet4.next()); // Verify that result set has correct values. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\"" + " WHERE \"%s\" = DATE '2020-01-01'", getDatabaseName(), collection, collection + "__id")); final ResultSet resultSet5 = statement.getResultSet(); Assertions.assertNotNull(resultSet5); Assertions.assertFalse(resultSet5.next()); // Verify that result set has correct values. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\"" + " WHERE \"%s\" = 12345", getDatabaseName(), collection, collection + "__id")); final ResultSet resultSet6 = statement.getResultSet(); Assertions.assertNotNull(resultSet6); Assertions.assertFalse(resultSet6.next()); // Query syntax compare operators will return false if type is not matched. // No matches returned for this case. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\"" + " WHERE \"%s\" < x'111111111111111111111111'", getDatabaseName(), collection, collection + "__id")); final ResultSet resultSet7 = statement.getResultSet(); Assertions.assertNotNull(resultSet7); Assertions.assertFalse(resultSet7.next()); // Only string entries will be returned. Others don't match type. statement.execute(String.format( "SELECT * FROM \"%1$s\".\"%2$s\"" + " WHERE \"%3$s\" > '3' ORDER BY %3$s", getDatabaseName(), collection, collection + "__id")); final ResultSet resultSet8 = statement.getResultSet(); Assertions.assertNotNull(resultSet8); Assertions.assertTrue(resultSet8.next()); // String data type Assertions.assertEquals("333333333333333333333333", resultSet8.getString(collection + "__id")); Assertions.assertFalse(resultSet8.next()); // String data type. statement.execute(String.format( "SELECT * FROM \"%1$s\".\"%2$s\"" + " WHERE \"%3$s\" = '333333333333333333333333'", getDatabaseName(), collection, collection + "__id")); final ResultSet resultSet9 = statement.getResultSet(); Assertions.assertNotNull(resultSet9); Assertions.assertTrue(resultSet9.next()); Assertions.assertEquals("333333333333333333333333", resultSet9.getString(collection + "__id")); Assertions.assertFalse(resultSet9.next()); } } }
4,493
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbConnectionPropertiesTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import com.google.common.base.Strings; import com.mongodb.MongoClientSettings; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.checkerframework.checker.nullness.qual.NonNull; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.function.ThrowingSupplier; import software.amazon.documentdb.jdbc.common.test.DocumentDbTestEnvironment; import java.io.IOException; import java.lang.reflect.Field; import java.security.cert.Certificate; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import static software.amazon.documentdb.jdbc.DocumentDbConnectionProperties.DOCUMENTDB_CUSTOM_OPTIONS; import static software.amazon.documentdb.jdbc.DocumentDbConnectionProperties.DOCUMENT_DB_SCHEME; import static software.amazon.documentdb.jdbc.DocumentDbConnectionProperties.ValidationType.SSH_TUNNEL; public class DocumentDbConnectionPropertiesTest { /** * Tests building the client settings and sanitized connection string from valid properties. */ @Test @DisplayName("Tests building the client settings and sanitized connection string from valid properties.") @SuppressFBWarnings(value = "HARD_CODE_PASSWORD", justification = "Hardcoded for test purposes only.") public void testValidProperties() { // Set properties. final DocumentDbConnectionProperties properties = new DocumentDbConnectionProperties(); properties.setUser("USER"); properties.setPassword("PASSWORD"); properties.setDatabase("DATABASE"); properties.setApplicationName("APPNAME"); properties.setHostname("HOSTNAME"); properties.setReplicaSet("rs0"); properties.setLoginTimeout("100"); properties.setMetadataScanLimit("100"); properties.setTlsAllowInvalidHostnames("true"); properties.setTlsEnabled("true"); properties.setRetryReadsEnabled("true"); properties.setTlsCAFilePath("src/main/resources/rds-ca-2019-root.pem"); properties.setSshUser("SSHUSER"); properties.setSshHostname("SSHHOST"); properties.setSshPrivateKeyFile("~/.ssh/test-file-name.pem"); properties.setSshPrivateKeyPassphrase("PASSPHRASE"); properties.setSshStrictHostKeyChecking("false"); properties.setSshKnownHostsFile("~/.ssh/unknown_hosts"); properties.setDefaultFetchSize("1000"); properties.setRefreshSchema("true"); properties.setDefaultAuthenticationDatabase("test"); properties.setAllowDiskUseOption("disable"); // Get properties. Assertions.assertEquals("USER", properties.getUser()); Assertions.assertEquals("PASSWORD", properties.getPassword()); Assertions.assertEquals("DATABASE", properties.getDatabase()); Assertions.assertEquals("APPNAME", properties.getApplicationName()); Assertions.assertEquals("HOSTNAME", properties.getHostname()); Assertions.assertEquals("rs0", properties.getReplicaSet()); Assertions.assertEquals(100, properties.getLoginTimeout()); Assertions.assertEquals(100, properties.getMetadataScanLimit()); Assertions.assertTrue(properties.getTlsEnabled()); Assertions.assertTrue(properties.getTlsAllowInvalidHostnames()); Assertions.assertTrue(properties.getRetryReadsEnabled()); Assertions.assertEquals("src/main/resources/rds-ca-2019-root.pem", properties.getTlsCAFilePath()); Assertions.assertEquals("SSHUSER", properties.getSshUser()); Assertions.assertEquals("SSHHOST", properties.getSshHostname()); Assertions.assertEquals("~/.ssh/test-file-name.pem", properties.getSshPrivateKeyFile()); Assertions.assertEquals("PASSPHRASE", properties.getSshPrivateKeyPassphrase()); Assertions.assertFalse(properties.getSshStrictHostKeyChecking()); Assertions.assertEquals("~/.ssh/unknown_hosts", properties.getSshKnownHostsFile()); Assertions.assertEquals(1000, properties.getDefaultFetchSize()); Assertions.assertTrue(properties.getRefreshSchema()); Assertions.assertEquals("test", properties.getDefaultAuthenticationDatabase()); Assertions.assertEquals(DocumentDbAllowDiskUseOption.DISABLE, properties.getAllowDiskUseOption()); // Build sanitized connection string. Assertions.assertEquals( "//USER@HOSTNAME/DATABASE?appName=APPNAME" + "&loginTimeoutSec=100" + "&scanLimit=100" + "&replicaSet=rs0" + "&tlsAllowInvalidHostnames=true" + "&tlsCAFile=src%2Fmain%2Fresources%2Frds-ca-2019-root.pem" + "&sshUser=SSHUSER" + "&sshHost=SSHHOST" + "&sshPrivateKeyFile=%7E%2F.ssh%2Ftest-file-name.pem" + "&sshStrictHostKeyChecking=false" + "&sshKnownHostsFile=%7E%2F.ssh%2Funknown_hosts" + "&defaultFetchSize=1000" + "&refreshSchema=true" + "&defaultAuthDb=test" + "&allowDiskUse=disable", properties.buildSanitizedConnectionString()); // Build client settings. final MongoClientSettings settings = properties.buildMongoClientSettings(); Assertions.assertNotNull(settings); Assertions.assertEquals("USER", settings.getCredential().getUserName()); Assertions.assertEquals("PASSWORD", String.valueOf(settings.getCredential().getPassword())); Assertions.assertEquals("test", settings.getCredential().getSource()); Assertions.assertEquals("hostname", settings.getClusterSettings().getHosts().get(0).getHost()); Assertions.assertEquals("APPNAME", settings.getApplicationName()); Assertions.assertEquals("rs0", settings.getClusterSettings().getRequiredReplicaSetName()); Assertions.assertEquals(100, settings.getSocketSettings().getConnectTimeout(TimeUnit.SECONDS)); Assertions.assertTrue(settings.getRetryReads()); Assertions.assertTrue(settings.getSslSettings().isEnabled()); Assertions.assertTrue(settings.getSslSettings().isInvalidHostNameAllowed()); Assertions.assertNotNull(settings.getSslSettings().getContext().getClientSessionContext()); } /** * Tests setting the scan method with the DocumentDbScanMethod enum. */ @Test @DisplayName("Tests setting the scan method with the DocumentDbScanMethod enum.") public void testMetadataScanMethods() { final DocumentDbConnectionProperties properties = new DocumentDbConnectionProperties(); properties.setMetadataScanMethod("random"); Assertions.assertEquals(DocumentDbMetadataScanMethod.RANDOM, properties.getMetadataScanMethod()); properties.setMetadataScanMethod("all"); Assertions.assertEquals(DocumentDbMetadataScanMethod.ALL, properties.getMetadataScanMethod()); properties.setMetadataScanMethod("idForward"); Assertions.assertEquals(DocumentDbMetadataScanMethod.ID_FORWARD, properties.getMetadataScanMethod()); properties.setMetadataScanMethod("idReverse"); Assertions.assertEquals(DocumentDbMetadataScanMethod.ID_REVERSE, properties.getMetadataScanMethod()); properties.setMetadataScanMethod("garbage"); Assertions.assertNull(properties.getMetadataScanMethod()); } /** * Tests setting the allow disk use option with the DocumentDbAllowDiskUseOption enum. */ @Test @DisplayName("Tests setting the allow disk use option with the DocumentDbAllowDiskUseOption enum.") public void testAllowDiskUseOptions() { final DocumentDbConnectionProperties properties = new DocumentDbConnectionProperties(); properties.setAllowDiskUseOption(DocumentDbAllowDiskUseOption.DEFAULT.getName()); Assertions.assertEquals(DocumentDbAllowDiskUseOption.DEFAULT, properties.getAllowDiskUseOption()); properties.setAllowDiskUseOption(DocumentDbAllowDiskUseOption.DISABLE.getName()); Assertions.assertEquals(DocumentDbAllowDiskUseOption.DISABLE, properties.getAllowDiskUseOption()); properties.setAllowDiskUseOption(DocumentDbAllowDiskUseOption.ENABLE.getName()); Assertions.assertEquals(DocumentDbAllowDiskUseOption.ENABLE, properties.getAllowDiskUseOption()); properties.setAllowDiskUseOption("garbage"); Assertions.assertNull(properties.getAllowDiskUseOption()); } /** * Tests setting the read preference with the DocumentDbReadPreference enum. */ @Test @DisplayName("Tests setting the read preference with the DocumentDbReadPreference enum.") public void testReadPreferences() { final DocumentDbConnectionProperties properties = new DocumentDbConnectionProperties(); properties.setReadPreference("primary"); Assertions.assertEquals(DocumentDbReadPreference.PRIMARY, properties.getReadPreference()); properties.setReadPreference("primaryPreferred"); Assertions.assertEquals(DocumentDbReadPreference.PRIMARY_PREFERRED, properties.getReadPreference()); properties.setReadPreference("secondary"); Assertions.assertEquals(DocumentDbReadPreference.SECONDARY, properties.getReadPreference()); properties.setReadPreference("secondaryPreferred"); Assertions.assertEquals(DocumentDbReadPreference.SECONDARY_PREFERRED, properties.getReadPreference()); properties.setReadPreference("nearest"); Assertions.assertEquals(DocumentDbReadPreference.NEAREST, properties.getReadPreference()); properties.setReadPreference("garbage"); Assertions.assertNull(properties.getReadPreference()); } /** * Tests the properties builder function. */ @Test @DisplayName("Tests the properties builder function on various connection strings.") public void testSetPropertiesFromConnectionString() throws SQLException { final Properties info = new Properties(); String connectionString = "jdbc:documentdb://username:password@localhost/database"; DocumentDbConnectionProperties properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); Assertions.assertEquals(4, properties.size()); Assertions.assertEquals("localhost", properties.getProperty("host")); Assertions.assertEquals("database", properties.getProperty("database")); Assertions.assertEquals("username", properties.getProperty("user")); Assertions.assertEquals("password", properties.getProperty("password")); // Connection string does not override existing properties. connectionString = "jdbc:documentdb://username:password@127.0.0.1/newdatabase"; properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); Assertions.assertEquals(4, properties.size()); Assertions.assertEquals("127.0.0.1", properties.getProperty("host")); Assertions.assertEquals("newdatabase", properties.getProperty("database")); Assertions.assertEquals("username", properties.getProperty("user")); Assertions.assertEquals("password", properties.getProperty("password")); // Get user (unencoded) name and password. info.clear(); connectionString = "jdbc:documentdb://user%20name:pass%20word@127.0.0.1/newdatabase"; properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); Assertions.assertEquals(4, properties.size()); Assertions.assertEquals("127.0.0.1", properties.getProperty("host")); Assertions.assertEquals("newdatabase", properties.getProperty("database")); Assertions.assertEquals("user name", properties.getProperty("user")); Assertions.assertEquals("pass word", properties.getProperty("password")); // Check that all properties can be added. info.clear(); connectionString = "jdbc:documentdb://user%20name:pass%20word@127.0.0.1/newdatabase" + "?" + DocumentDbConnectionProperty.READ_PREFERENCE.getName() + "=" + "secondaryPreferred" + "&" + DocumentDbConnectionProperty.APPLICATION_NAME.getName() + "=" + "application" + "&" + DocumentDbConnectionProperty.REPLICA_SET.getName() + "=" + "rs0" + "&" + DocumentDbConnectionProperty.TLS_ENABLED.getName() + "=" + "true" + "&" + DocumentDbConnectionProperty.TLS_ALLOW_INVALID_HOSTNAMES.getName() + "=" + "true" + "&" + DocumentDbConnectionProperty.TLS_CA_FILE.getName() + "=" + "~/rds-ca-2019-root.pem" + "&" + DocumentDbConnectionProperty.LOGIN_TIMEOUT_SEC.getName() + "=" + "4" + "&" + DocumentDbConnectionProperty.RETRY_READS_ENABLED.getName() + "=" + "true" + "&" + DocumentDbConnectionProperty.METADATA_SCAN_METHOD.getName() + "=" + "random" + "&" + DocumentDbConnectionProperty.METADATA_SCAN_LIMIT.getName() + "=" + "1" + "&" + DocumentDbConnectionProperty.SCHEMA_NAME.getName() + "=" + "notDefault" + "&" + DocumentDbConnectionProperty.SSH_USER.getName() + "=" + "sshUser" + "&" + DocumentDbConnectionProperty.SSH_HOSTNAME.getName() + "=" + "sshHost" + "&" + DocumentDbConnectionProperty.SSH_PRIVATE_KEY_FILE.getName() + "=" + "~/.ssh/key.pem" + "&" + DocumentDbConnectionProperty.SSH_PRIVATE_KEY_PASSPHRASE.getName() + "=" + "passphrase" + "&" + DocumentDbConnectionProperty.SSH_STRICT_HOST_KEY_CHECKING.getName() + "=" + "false" + "&" + DocumentDbConnectionProperty.SSH_KNOWN_HOSTS_FILE.getName() + "=" + "~/.ssh/known_hosts" + "&" + DocumentDbConnectionProperty.DEFAULT_FETCH_SIZE.getName() + "=" + "1000" + "&" + DocumentDbConnectionProperty.REFRESH_SCHEMA.getName() + "=" + "true" + "&" + DocumentDbConnectionProperty.DEFAULT_AUTH_DB.getName() + "=" + "test" + "&" + DocumentDbConnectionProperty.ALLOW_DISK_USE.getName() + "=" + "disable"; properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); Assertions.assertEquals(DocumentDbConnectionProperty.values().length, properties.size()); // Check that unsupported properties are ignored. connectionString = "jdbc:documentdb://user%20name:pass%20word@127.0.0.1/newdatabase" + "?" + "maxStalenessSeconds" + "=" + "value"; properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); Assertions.assertEquals(4, properties.size()); Assertions.assertNull(properties.getProperty("maxStalenessSeconds")); } @DisplayName("Test that custom options are added.") @Test void testCustomOptions() throws Exception { final Properties info = new Properties(); final Map<String, String> environment = new HashMap<>(); environment.putAll(System.getenv()); environment.put(DOCUMENTDB_CUSTOM_OPTIONS, "allowDiskUse=enable;unknownOption=true"); try { setEnv(environment); final String connectionString = "jdbc:documentdb://username:password@127.0.0.1/database"; final DocumentDbConnectionProperties properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); Assertions.assertEquals(6, properties.size()); Assertions.assertEquals("127.0.0.1", properties.getProperty("host")); Assertions.assertEquals("database", properties.getProperty("database")); Assertions.assertEquals("username", properties.getProperty("user")); Assertions.assertEquals("password", properties.getProperty("password")); Assertions.assertEquals("enable", properties.getProperty("allowDiskUse")); Assertions.assertEquals("true", properties.getProperty("unknownOption")); } finally { // Restore the environment, so it doesn't affect other tests. environment.clear(); environment.putAll(System.getenv()); environment.remove(DOCUMENTDB_CUSTOM_OPTIONS); Assertions.assertEquals(null, environment.get(DOCUMENTDB_CUSTOM_OPTIONS)); setEnv(environment); Assertions.assertEquals(null, System.getenv(DOCUMENTDB_CUSTOM_OPTIONS)); } } /** * Resets the System's environment maps. * * https://stackoverflow.com/questions/318239/how-do-i-set-environment-variables-from-java * * @param newEnv the new map to replace the existing environment map. * @throws Exception if environment private fields are not found or are not as * previously implemented. */ @SuppressWarnings("unchecked") protected static void setEnv(final Map<String, String> newEnv) throws Exception { try { final Class<?> processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment"); final Field theEnvironmentField = processEnvironmentClass.getDeclaredField("theEnvironment"); theEnvironmentField.setAccessible(true); final Map<String, String> env = (Map<String, String>) theEnvironmentField.get(null); env.clear(); env.putAll(newEnv); final Field theCaseInsensitiveEnvironmentField = processEnvironmentClass.getDeclaredField("theCaseInsensitiveEnvironment"); theCaseInsensitiveEnvironmentField.setAccessible(true); final Map<String, String> ciEnv = (Map<String, String>) theCaseInsensitiveEnvironmentField.get(null); ciEnv.clear(); ciEnv.putAll(newEnv); } catch (NoSuchFieldException e) { final Class[] classes = Collections.class.getDeclaredClasses(); final Map<String, String> env = System.getenv(); for (final Class cl : classes) { if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) { final Field field = cl.getDeclaredField("m"); field.setAccessible(true); final Object obj = field.get(env); final Map<String, String> map = (Map<String, String>) obj; map.clear(); map.putAll(newEnv); } } } } @DisplayName("Test that non-existent tlsCAfile is handled correctly.") @SuppressFBWarnings(value = "HARD_CODE_PASSWORD", justification = "Hardcoded for test purposes only.") @Test void testInvalidTlsCAFilePath() { final DocumentDbConnectionProperties properties1 = new DocumentDbConnectionProperties(); properties1.setUser("USER"); properties1.setPassword("PASSWORD"); properties1.setDatabase("DATABASE"); properties1.setHostname("HOSTNAME"); properties1.setTlsEnabled("true"); properties1.setTlsCAFilePath("~/invalid-filename.pem"); final String pattern = Matcher.quoteReplacement("TLS Certificate Authority file '") + ".*" + Matcher.quoteReplacement("invalid-filename.pem' not found."); Assertions.assertTrue( Assertions.assertThrows(SQLException.class, properties1::buildMongoClientSettings) .getMessage().matches(pattern)); final DocumentDbConnectionProperties properties2 = new DocumentDbConnectionProperties(); properties2.setUser("USER"); properties2.setPassword("PASSWORD"); properties2.setDatabase("DATABASE"); properties2.setHostname("HOSTNAME"); // tlsCAFile option is ignored if tls is false. properties2.setTlsEnabled("false"); properties2.setTlsCAFilePath("~/invalid-filename.pem"); Assertions.assertDoesNotThrow( (ThrowingSupplier<MongoClientSettings>) properties2::buildMongoClientSettings); } /** * Tests getting and setting the application name. */ @Test @DisplayName("Tests retrieving default and overridden application name and that the name is used in client settings.") public void testApplicationName() throws SQLException { // Get default app name. final Properties info = new Properties(); final String connectionString = "jdbc:documentdb://username:password@localhost/database"; final DocumentDbConnectionProperties properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); Assertions.assertFalse(Strings.isNullOrEmpty(properties.getApplicationName())); Assertions.assertEquals(DocumentDbConnectionProperties.DEFAULT_APPLICATION_NAME, properties.getApplicationName()); // Override app name. properties.setApplicationName("APPNAME"); Assertions.assertEquals("APPNAME", properties.getApplicationName()); // Build client settings and ensure app name is passed. final MongoClientSettings settings = properties.buildMongoClientSettings(); Assertions.assertEquals("APPNAME", settings.getApplicationName()); } /** * Tests getting and setting the default authentication database. */ @Test @DisplayName("Tests retrieving default and overridden authentication database and that the database is used in client settings.") public void testDefaultAuthenticationDatabase() throws SQLException { // Get default authentication database. final Properties info = new Properties(); final String connectionString = "jdbc:documentdb://username:password@localhost/database"; final DocumentDbConnectionProperties properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); Assertions.assertEquals(DocumentDbConnectionProperty.DEFAULT_AUTH_DB.getDefaultValue(), properties.getDefaultAuthenticationDatabase()); // Override test database. properties.setDefaultAuthenticationDatabase("test"); Assertions.assertEquals("test", properties.getDefaultAuthenticationDatabase()); // Build client settings and ensure authentication database is passed. final MongoClientSettings settings = properties.buildMongoClientSettings(); Assertions.assertEquals("test", settings.getCredential().getSource()); } @Test @DisplayName("Tests the appendEmbeddedAndOptionalCaCertificates method") void testAppendEmbeddedAndOptionalCaCertificates() throws SQLException, IOException { final Properties info = new Properties(); final String connectionString = "jdbc:documentdb://username:password@localhost/database"; final DocumentDbConnectionProperties properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, connectionString, DOCUMENT_DB_SCHEME); final List<Certificate> caCertificates = new ArrayList<>(); properties.appendEmbeddedAndOptionalCaCertificates(caCertificates); Assertions.assertEquals(2, caCertificates.size()); caCertificates.clear(); properties.setTlsCAFilePath("src/main/resources/rds-ca-2019-root.pem"); properties.appendEmbeddedAndOptionalCaCertificates(caCertificates); Assertions.assertEquals(3, caCertificates.size()); caCertificates.clear(); properties.setTlsCAFilePath("invalid-path.pem"); Assertions.assertThrows(SQLException.class, () -> properties.appendEmbeddedAndOptionalCaCertificates(caCertificates)); Assertions.assertEquals(0, caCertificates.size()); } @SuppressFBWarnings("HARD_CODE_PASSWORD") @Test @DisplayName("Tests that it can build just the SSH tunnel connection string.") void testBuildSshConnectionProperties() throws SQLException { final DocumentDbConnectionProperties properties = new DocumentDbConnectionProperties(); properties.setUser("USER"); properties.setPassword("PASSWORD"); properties.setDatabase("DATABASE"); properties.setApplicationName("APPNAME"); properties.setHostname("HOSTNAME"); properties.setReplicaSet("rs0"); properties.setLoginTimeout("100"); properties.setMetadataScanLimit("100"); properties.setTlsAllowInvalidHostnames("true"); properties.setTlsEnabled("true"); properties.setRetryReadsEnabled("true"); properties.setTlsCAFilePath("src/main/resources/rds-ca-2019-root.pem"); properties.setSshUser("SSHUSER"); properties.setSshHostname("SSHHOST"); properties.setSshPrivateKeyFile("~/.ssh/test-file-name.pem"); properties.setSshPrivateKeyPassphrase("PASSPHRASE"); properties.setSshStrictHostKeyChecking("false"); properties.setSshKnownHostsFile("~/.ssh/unknown_hosts"); properties.setDefaultFetchSize("1000"); properties.setRefreshSchema("true"); properties.setDefaultAuthenticationDatabase("test"); properties.setAllowDiskUseOption("disable"); Assertions.assertEquals("//HOSTNAME/" + "?sshUser=SSHUSER" + "&sshHost=SSHHOST" + "&sshPrivateKeyFile=%7E%2F.ssh%2Ftest-file-name.pem" + "&sshPrivateKeyPassphrase=PASSPHRASE" + "&sshStrictHostKeyChecking=false" + "&sshKnownHostsFile=%7E%2F.ssh%2Funknown_hosts", properties.buildSshConnectionString()); final DocumentDbConnectionProperties parsedProperties = DocumentDbConnectionProperties.getPropertiesFromConnectionString( DOCUMENT_DB_SCHEME + properties.buildSshConnectionString(), SSH_TUNNEL); Assertions.assertEquals(properties.buildSshConnectionString(), parsedProperties.buildSshConnectionString()); } static @NonNull String buildInternalSshTunnelConnectionString( final @NonNull DocumentDbTestEnvironment environment) throws SQLException { final DocumentDbConnectionProperties properties = DocumentDbConnectionTest .getInternalSSHTunnelProperties(environment); final String loginInfo = DocumentDbConnectionProperties.buildLoginInfo( properties.getUser(), properties.getPassword()); final String hostInfo = DocumentDbConnectionProperties.buildHostInfo(properties.getHostname()); final String databaseInfo = DocumentDbConnectionProperties.buildDatabaseInfo(properties.getDatabase()); final StringBuilder optionalInfo = new StringBuilder(); DocumentDbConnectionProperties.buildSanitizedOptionalInfo(optionalInfo, properties); DocumentDbConnectionProperties.maybeAppendOptionalValue( optionalInfo, DocumentDbConnectionProperty.SSH_PRIVATE_KEY_PASSPHRASE, properties.getSshPrivateKeyPassphrase(), null); return DOCUMENT_DB_SCHEME + DocumentDbConnectionProperties.buildConnectionString( loginInfo, hostInfo, databaseInfo, optionalInfo.toString()); } }
4,494
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbDataSourceTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleExtension; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleTest; import java.sql.SQLException; /** * Tests for the DocumentDbDataSource */ @ExtendWith(DocumentDbFlapDoodleExtension.class) public class DocumentDbDataSourceTest extends DocumentDbFlapDoodleTest { private static final String HOSTNAME = "localhost"; private static final String USERNAME = "user"; private static final String PASSWORD = "password"; private static final String DATABASE = "testDb"; private DocumentDbDataSource dataSource; @BeforeAll void setup() { // Add 1 valid user so we can successfully authenticate. createUser(DATABASE, USERNAME, PASSWORD); } /** * Instantiates data source object for testing. */ @BeforeEach public void initialize() { dataSource = new DocumentDbDataSource(); } /** * Tests and validates with valid properties. * * @throws SQLException on invalid validation of properties. */ @Test public void testValidProperties() throws SQLException { dataSource.setUser("username"); dataSource.setPassword("password"); dataSource.setDatabase("testDb"); dataSource.setHostname("host"); dataSource.setReplicaSet("rs0"); dataSource.setReadPreference(DocumentDbReadPreference.PRIMARY); dataSource.setApplicationName("appName"); dataSource.setTlsEnabled(false); dataSource.setTlsAllowInvalidHostnames(false); dataSource.setLoginTimeout(5); dataSource.setRetryReadsEnabled(false); dataSource.validateRequiredProperties(); // Will throw SQL exception if invalid Assertions.assertEquals("username", dataSource.getUser()); Assertions.assertEquals("password", dataSource.getPassword()); Assertions.assertEquals("testDb", dataSource.getDatabase()); Assertions.assertEquals("host", dataSource.getHostname()); Assertions.assertEquals("rs0", dataSource.getReplicaSet()); Assertions.assertEquals(DocumentDbReadPreference.PRIMARY, dataSource.getReadPreference()); Assertions.assertEquals("appName", dataSource.getApplicationName()); Assertions.assertFalse(dataSource.getTlsEnabled()); Assertions.assertFalse(dataSource.getTlsAllowInvalidHosts()); Assertions.assertEquals(5L, dataSource.getLoginTimeout()); Assertions.assertFalse(dataSource.getRetryReadsEnabled()); } /** * Tests invalid property settings. */ @Test public void testInvalidPropertySettings() { Assertions.assertDoesNotThrow( () -> dataSource.setReplicaSet("rs2")); Assertions.assertThrows(SQLException.class, () -> dataSource.setLoginTimeout(-1)); } /** * Tests required properties validation with invalid inputs. */ @Test public void testMissingPropertiesValidation() { Assertions.assertThrows(SQLException.class, () -> dataSource.validateRequiredProperties()); dataSource.setUser(""); dataSource.setPassword("password"); dataSource.setDatabase("db"); dataSource.setHostname("validHost"); Assertions.assertThrows(SQLException.class, () -> dataSource.validateRequiredProperties()); dataSource.setUser("user"); dataSource.setPassword(" "); Assertions.assertThrows(SQLException.class, () -> dataSource.validateRequiredProperties()); dataSource.setPassword("password"); dataSource.setDatabase(" "); Assertions.assertThrows(SQLException.class, () -> dataSource.validateRequiredProperties()); dataSource.setDatabase("database"); dataSource.setHostname(""); Assertions.assertThrows(SQLException.class, () -> dataSource.validateRequiredProperties()); } @Test void getConnection() throws SQLException { dataSource.setUser(USERNAME); dataSource.setPassword(PASSWORD); dataSource.setDatabase(DATABASE); dataSource.setHostname(HOSTNAME + ":" + getMongoPort()); dataSource.setTlsEnabled(false); Assertions.assertNotNull(dataSource.getConnection()); } @Test void getConnectionWithUsernamePassword() throws SQLException { dataSource.setDatabase(DATABASE); dataSource.setHostname(HOSTNAME + ":" + getMongoPort()); dataSource.setTlsEnabled(false); Assertions.assertNotNull(dataSource.getConnection(USERNAME, PASSWORD)); } @Test void getPooledConnection() throws SQLException { dataSource.setUser(USERNAME); dataSource.setPassword(PASSWORD); dataSource.setDatabase(DATABASE); dataSource.setHostname(HOSTNAME + ":" + getMongoPort()); dataSource.setTlsEnabled(false); Assertions.assertNotNull(dataSource.getPooledConnection()); } @Test void getPooledConnectionWithUsernamePassword() throws SQLException { dataSource.setDatabase(DATABASE); dataSource.setHostname(HOSTNAME + ":" + getMongoPort()); dataSource.setTlsEnabled(false); Assertions.assertNotNull(dataSource.getPooledConnection(USERNAME, PASSWORD)); } }
4,495
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbStatementJoinTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import com.google.common.collect.Lists; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.bson.BsonDocument; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import software.amazon.documentdb.jdbc.common.test.DocumentDbTestEnvironment; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.List; public class DocumentDbStatementJoinTest extends DocumentDbStatementTest { private static final String MULTI_NESTED_ARRAY_AND_DOCUMENT_JSON = "{ \"_id\" : \"key1\", " + "\"document\": {\"field\": 1}," + "\"array\" : [" + " {\"field\": 1, \"document\": {\"field\": 1} , \"array\": [1, 2, 3, null], \"otherArray\": [4, 5, 6]}, " + " {\"field\": 2, \"document\": {\"field\": 2} , \"array\": [4, 5, 6], \"otherArray\": [7, 8, 9, null]}, " + " null ], " + "\"otherArray\" : [" + " {\"field\": 1, \"document\": {\"field\": 1} , \"array\": [1, 2, 3, null], \"otherArray\": [4, 5, 6]}, " + " {\"field\": 2, \"document\": {\"field\": 2} , \"array\": [4, 5, 6], \"otherArray\": [7, 8, 9, null]}," + " null ] }"; /** * Test querying for a virtual table from a nested document. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying for a virtual table from a nested document.") @ParameterizedTest(name = "testQueryWithTwoLevelDocument - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithTwoLevelDocument(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final BsonDocument document = BsonDocument.parse("{ \"_id\" : \"key\", \"doc\" : { \"field\" : 1 } }"); insertBsonDocuments( "testComplexDocument", new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify the base table. final ResultSet resultSet1 = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testComplexDocument")); Assertions.assertNotNull(resultSet1); // Verify the nested table from the field doc. final ResultSet resultSet2 = statement.executeQuery(String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testComplexDocument_doc")); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("key", resultSet2.getString("testComplexDocument__id")); Assertions.assertEquals(1, resultSet2.getInt("field")); // Verify PROJECT. final ResultSet resultSet3 = statement.executeQuery(String.format( "SELECT \"%s\", \"%s\" FROM \"%s\".\"%s\"", "field", "testComplexDocument__id", getDatabaseName(), "testComplexDocument_doc")); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals("key", resultSet3.getString("testComplexDocument__id")); Assertions.assertEquals(1, resultSet3.getInt("field")); // Verify JOIN on the base table and nested table to produce 3 columns and 1 row. final ResultSet resultSet4 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s", getDatabaseName(), "testComplexDocument", getDatabaseName(), "testComplexDocument_doc", "\"testComplexDocument\".\"testComplexDocument__id\"", "\"testComplexDocument_doc\".\"testComplexDocument__id\"")); Assertions.assertNotNull(resultSet4); Assertions.assertEquals(3, resultSet4.getMetaData().getColumnCount()); int rowCount = 0; while (resultSet4.next()) { rowCount++; } Assertions.assertEquals(1, rowCount); } } /** * Test querying for a virtual table from a doubly nested document. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying for a virtual table from a doubly nested document.") @ParameterizedTest(name = "testQueryWithThreeLevelDocument - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithThreeLevelDocument(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key\", \"doc\" : { \"field\" : 1, \"doc2\" : { \"field2\" : \"value\" } } }"); insertBsonDocuments( "testComplex3LevelDocument", new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify the base table. final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testComplex3LevelDocument")); Assertions.assertNotNull(resultSet1); // Verify the nested table from the field doc. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testComplex3LevelDocument_doc")); Assertions.assertNotNull(resultSet2); // Verify the nested table from the field doc2 from the field doc. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testComplex3LevelDocument_doc_doc2")); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals("key", resultSet3.getString("testComplex3LevelDocument__id")); Assertions.assertEquals("value", resultSet3.getString("field2")); // Verify JOIN on the 3 tables to produce 5 columns and 1 row. final ResultSet resultSet4 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s", getDatabaseName(), "testComplex3LevelDocument", getDatabaseName(), "testComplex3LevelDocument_doc", "\"testComplex3LevelDocument\".\"testComplex3LevelDocument__id\"", "\"testComplex3LevelDocument_doc\".\"testComplex3LevelDocument__id\"", getDatabaseName(), "testComplex3LevelDocument_doc_doc2", "\"testComplex3LevelDocument_doc\".\"testComplex3LevelDocument__id\"", "\"testComplex3LevelDocument_doc_doc2\".\"testComplex3LevelDocument__id\"")); Assertions.assertNotNull(resultSet4); Assertions.assertEquals(5, resultSet4.getMetaData().getColumnCount()); int rowCount = 0; while (resultSet4.next()) { rowCount++; } Assertions.assertEquals(1, rowCount); } } /** * Test querying for a virtual table from a nested scalar array. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying for a virtual table from a nested scalar array.") @ParameterizedTest(name = "testQueryWithScalarArray - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithScalarArray(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final BsonDocument document = BsonDocument.parse("{ \"_id\" : \"key\", \"array\" : [ 1, 2, 3 ] }"); insertBsonDocuments( "testScalarArray", new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify the base table. final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testScalarArray")); Assertions.assertNotNull(resultSet1); // Verify the nested table with 3 rows from the field array. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testScalarArray_array")); Assertions.assertNotNull(resultSet2); for (int i = 0; i < 3; i++) { Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("key", resultSet2.getString("testScalarArray__id")); Assertions.assertEquals(i, resultSet2.getLong("array_index_lvl_0")); Assertions.assertEquals(i + 1, resultSet2.getInt("value")); } // Verify JOIN on the base table and nested table to produce 4 columns and 3 rows. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s", getDatabaseName(), "testScalarArray", getDatabaseName(), "testScalarArray_array", "\"testScalarArray\".\"testScalarArray__id\"", "\"testScalarArray_array\".\"testScalarArray__id\"")); Assertions.assertNotNull(resultSet3); Assertions.assertEquals(4, resultSet3.getMetaData().getColumnCount()); int rowCount = 0; while (resultSet3.next()) { rowCount++; } Assertions.assertEquals(3, rowCount); } } /** * Test querying for a virtual table from a nested array of documents. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Tests querying for a virtual table from a nested array of documents.") @ParameterizedTest(name = "testQueryWithArrayOfDocuments - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithArrayOfDocuments(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key\", \"array\" : [ { \"field\" : 1, \"field1\": \"value\" }, { \"field\" : 2, \"field2\" : \"value\" } ]}"); insertBsonDocuments( "testDocumentArray", new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify the base table. final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testDocumentArray")); Assertions.assertNotNull(resultSet1); // Verify the nested table with 2 rows from the field array. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testDocumentArray_array")); Assertions.assertNotNull(resultSet2); for (int i = 0; i < 2; i++) { Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("key", resultSet2.getString("testDocumentArray__id")); Assertions.assertEquals(i, resultSet2.getLong("array_index_lvl_0")); Assertions.assertEquals(i + 1, resultSet2.getInt("field")); Assertions.assertEquals("value", resultSet2.getString(i == 0 ? "field1" : "field2")); Assertions.assertNull(resultSet2.getString(i == 0 ? "field2" : "field1")); } // Verify WHERE on the nested table to produce only rows where field = 2. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"field\" = 2", getDatabaseName(), "testDocumentArray_array")); Assertions.assertNotNull(resultSet3); int rowCount = 0; while (resultSet3.next()) { Assertions.assertEquals(2, resultSet3.getInt("field")); rowCount++; } Assertions.assertEquals(1, rowCount); // Verify JOIN on the base table and nested table to produce 6 columns and 2 rows. final ResultSet resultSet4 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s", getDatabaseName(), "testDocumentArray", getDatabaseName(), "testDocumentArray_array", "\"testDocumentArray\".\"testDocumentArray__id\"", "\"testDocumentArray_array\".\"testDocumentArray__id\"")); Assertions.assertNotNull(resultSet4); Assertions.assertEquals(6, resultSet4.getMetaData().getColumnCount()); rowCount = 0; while (resultSet4.next()) { rowCount++; } Assertions.assertEquals(2, rowCount); } } /** * Test querying for a virtual table from a 2 level array. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying for a virtual table from a 2 level array.") @ParameterizedTest(name = "testQueryWithTwoLevelArray - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithTwoLevelArray(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final BsonDocument document = BsonDocument.parse("{ \"_id\" : \"key\", \"array\" : [ [1, 2, 3 ], [ 4, 5, 6 ] ]}"); insertBsonDocuments( "test2LevelArray", new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify the base table. final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "test2LevelArray")); Assertions.assertNotNull(resultSet1); // Verify the nested table with 6 rows from the field array. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "test2LevelArray_array")); Assertions.assertNotNull(resultSet2); int expectedValue = 1; for (int i = 0; i < 2; i++) { for (int j = 0; j < 3; j++) { Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("key", resultSet2.getString("test2LevelArray__id")); Assertions.assertEquals(i, resultSet2.getLong("array_index_lvl_0")); Assertions.assertEquals(j, resultSet2.getLong("array_index_lvl_1")); Assertions.assertEquals(expectedValue, resultSet2.getInt("value")); expectedValue++; } } // Verify WHERE on the nested table to produce only rows where array_index_lvl_0 is 0. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"array_index_lvl_0\" = 0", getDatabaseName(), "test2LevelArray_array")); Assertions.assertNotNull(resultSet3); int rowCount = 0; while (resultSet3.next()) { Assertions.assertEquals(0, resultSet3.getLong("array_index_lvl_0")); rowCount++; } Assertions.assertEquals(3, rowCount); // Verify JOIN on the base table and nested table to produce 5 columns and 6. final ResultSet resultSet4 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s", getDatabaseName(), "test2LevelArray", getDatabaseName(), "test2LevelArray_array", "\"test2LevelArray\".\"test2LevelArray__id\"", "\"test2LevelArray_array\".\"test2LevelArray__id\"")); Assertions.assertNotNull(resultSet4); Assertions.assertEquals(5, resultSet4.getMetaData().getColumnCount()); rowCount = 0; while (resultSet4.next()) { rowCount++; } Assertions.assertEquals(6, rowCount); } } /** * Test querying for a virtual table from a nested array in a nested document. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying for a virtual table from a nested array in a nested document.") @ParameterizedTest(name = "testQueryWithTwoLevelDocumentWithArray - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithTwoLevelDocumentWithArray(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final BsonDocument document = BsonDocument.parse("{ \"_id\" : \"key\", \"doc\" : { \"field\" : 1, \"array\" : [1, 2, 3 ] } }"); insertBsonDocuments("testComplexDocumentWithArray", new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify the base table. final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testComplexDocumentWithArray")); Assertions.assertNotNull(resultSet1); // Verify the nested table from the field doc. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testComplexDocumentWithArray_doc")); Assertions.assertNotNull(resultSet2); // Verify the nested table with 3 rows from the field array in the field doc. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testComplexDocumentWithArray_doc_array")); Assertions.assertNotNull(resultSet3); for (int i = 0; i < 3; i++) { Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals("key", resultSet3.getString("testComplexDocumentWithArray__id")); Assertions.assertEquals(i, resultSet3.getLong("doc_array_index_lvl_0")); Assertions.assertEquals(i + 1, resultSet3.getInt("value")); } // Verify WHERE on the nested table to produce only rows where value is 1. final ResultSet resultSet4 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"value\" = 1", getDatabaseName(), "testComplexDocumentWithArray_doc_array")); Assertions.assertNotNull(resultSet4); int rowCount = 0; while (resultSet4.next()) { Assertions.assertEquals(1, resultSet4.getInt("value")); rowCount++; } Assertions.assertEquals(1, rowCount); // Verify JOIN on the 3 tables to get 6 columns and 3 rows. final ResultSet resultSet5 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s", getDatabaseName(), "testComplexDocumentWithArray", getDatabaseName(), "testComplexDocumentWithArray_doc", "\"testComplexDocumentWithArray\".\"testComplexDocumentWithArray__id\"", "\"testComplexDocumentWithArray_doc\".\"testComplexDocumentWithArray__id\"", getDatabaseName(), "testComplexDocumentWithArray_doc_array", "\"testComplexDocumentWithArray_doc\".\"testComplexDocumentWithArray__id\"", "\"testComplexDocumentWithArray_doc_array\".\"testComplexDocumentWithArray__id\"")); Assertions.assertNotNull(resultSet5); Assertions.assertEquals(6, resultSet5.getMetaData().getColumnCount()); rowCount = 0; while (resultSet5.next()) { rowCount++; } Assertions.assertEquals(3, rowCount); } } /** * Test querying for a virtual table from a nested array in a document in a nested array. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Test querying for a virtual table from a nested array in a document in a nested array.") @ParameterizedTest(name = "testQueryWithArrayOfDocumentsWithArrays - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithArrayOfDocumentsWithArrays(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key\", \"array\" : [ { \"array2\" : [ 1, 2, 3 ] }, { \"array2\" : [ 4, 5, 6 ] } ]}"); insertBsonDocuments("testArrayOfDocumentsWithArray", new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify the base table. final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testArrayOfDocumentsWithArray")); Assertions.assertNotNull(resultSet1); // Verify the nested table with 2 rows from the field array. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testArrayOfDocumentsWithArray_array")); Assertions.assertNotNull(resultSet2); // Verify the nested table with 6 rows from the field array2 in the documents of array. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\"", getDatabaseName(), "testArrayOfDocumentsWithArray_array_array2")); Assertions.assertNotNull(resultSet3); int expectedValue = 1; for (int i = 0; i < 2; i++) { for (int j = 0; j < 3; j++) { Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals("key", resultSet3.getString("testArrayOfDocumentsWithArray__id")); Assertions.assertEquals(i, resultSet3.getLong("array_index_lvl_0")); Assertions.assertEquals(j, resultSet3.getLong("array_array2_index_lvl_0")); Assertions.assertEquals(expectedValue, resultSet3.getInt("value")); expectedValue++; } } // Verify WHERE on the array2 nested table to produce only rows where array_index_lvl_0 is 0. final ResultSet resultSet4 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" WHERE \"array_index_lvl_0\" = 0", getDatabaseName(), "testArrayOfDocumentsWithArray_array_array2")); Assertions.assertNotNull(resultSet4); int rowCount = 0; while (resultSet4.next()) { Assertions.assertEquals(0, resultSet4.getLong("array_index_lvl_0")); rowCount++; } Assertions.assertEquals(3, rowCount); Assertions.assertFalse(resultSet4.next()); // Verify JOIN on the 3 tables to get 7 columns and 6 rows. final ResultSet resultSet5 = statement.executeQuery( String.format( "SELECT * FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s " + "INNER JOIN \"%s\".\"%s\" " + "ON %s = %s " + "AND %s = %s", getDatabaseName(), "testArrayOfDocumentsWithArray", getDatabaseName(), "testArrayOfDocumentsWithArray_array", "\"testArrayOfDocumentsWithArray\".\"testArrayOfDocumentsWithArray__id\"", "\"testArrayOfDocumentsWithArray_array\".\"testArrayOfDocumentsWithArray__id\"", getDatabaseName(), "testArrayOfDocumentsWithArray_array_array2", "\"testArrayOfDocumentsWithArray_array\".\"testArrayOfDocumentsWithArray__id\"", "\"testArrayOfDocumentsWithArray_array_array2\".\"testArrayOfDocumentsWithArray__id\"", "\"testArrayOfDocumentsWithArray_array\".\"array_index_lvl_0\"", "\"testArrayOfDocumentsWithArray_array_array2\".\"array_index_lvl_0\"")); Assertions.assertNotNull(resultSet5); Assertions.assertEquals(7, resultSet5.getMetaData().getColumnCount()); rowCount = 0; while (resultSet5.next()) { rowCount++; } Assertions.assertEquals(6, rowCount); } } /** * Tests that a statement with project, where, group by, having, order, and limit works with same collection joins. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Tests that a statement with project, where, group by, having, order, and limit works with same collection joins.") @ParameterizedTest(name = "testComplexQueryWithSameCollectionJoin - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testComplexQueryWithSameCollectionJoin(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testComplexQueryJoin"; final BsonDocument document1 = BsonDocument.parse("{ \"_id\" : \"key0\", \"field\": 0, \"array\": [1, 2, 3, 4, 5] }"); final BsonDocument document2 = BsonDocument.parse("{ \"_id\" : \"key1\", \"field\": 0, \"array\": [1, 2, 3] }"); final BsonDocument document3 = BsonDocument.parse("{ \"_id\" : \"key2\", \"field\": 0, \"array\": [1, 2] }"); final BsonDocument document4 = BsonDocument.parse("{ \"_id\" : \"key3\", \"field\": 1, \"array\": [1, 2, 3, 4, 5] }"); insertBsonDocuments(collection, new BsonDocument[]{document1, document2, document3, document4}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Verify that result set has correct values. statement.execute(String.format( "SELECT SUM(\"%s\") as \"Sum\", COUNT(*) AS \"Count\" FROM \"%s\".\"%s\"" + "INNER JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"" + "WHERE \"%s\" <> 1 " + "GROUP BY \"%s\".\"%s\" HAVING COUNT(*) > 1" + "ORDER BY \"Count\" DESC LIMIT 1", "field", getDatabaseName(), collection, getDatabaseName(), collection + "_array", collection, collection + "__id", collection + "_array", collection + "__id", "field", collection, collection + "__id")); final ResultSet resultSet1 = statement.getResultSet(); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(0, resultSet1.getInt("Sum")); Assertions.assertEquals(5, resultSet1.getInt("Count")); Assertions.assertFalse(resultSet1.next()); } } /** * Tests that different join types produce the correct result for tables from same collection. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Tests that different join types produce the correct result for tables from same collection.") @ParameterizedTest(name = "testJoinTypesForTablesFromSameCollection - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testJoinTypesForTablesFromSameCollection(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testSameCollectionJoin"; final BsonDocument document1 = BsonDocument.parse("{ \"_id\" : \"key0\", \"doc1\": { \"field\" : 1 } }"); final BsonDocument document2 = BsonDocument.parse("{ \"_id\" : \"key1\", \"doc2\": { \"field\": 2 } }"); insertBsonDocuments(collection, new BsonDocument[]{document1, document2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Verify that an inner join will return an empty result set. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\" INNER JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"", getDatabaseName(), collection + "_doc1", getDatabaseName(), collection + "_doc2", collection + "_doc1", collection + "__id", collection + "_doc2", collection + "__id")); final ResultSet resultSet1 = statement.getResultSet(); Assertions.assertNotNull(resultSet1); Assertions.assertFalse(resultSet1.next()); // Verify that a left outer join will return 1 row. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\" LEFT JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"", getDatabaseName(), collection + "_doc1", getDatabaseName(), collection + "_doc2", collection + "_doc1", collection + "__id", collection + "_doc2", collection + "__id")); final ResultSet resultSet2 = statement.getResultSet(); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("key0", resultSet2.getString(collection + "__id")); Assertions.assertEquals(1, resultSet2.getInt("field")); Assertions.assertNull(resultSet2.getString(collection + "__id0")); Assertions.assertEquals(0, resultSet2.getInt("field0")); Assertions.assertFalse(resultSet2.next()); } } @Disabled("Incorrect behaviour for right or full joins involving more than 2 virtual tables.") @DisplayName("Tests behaviour of right join for tables from the same collection.") @ParameterizedTest(name = "testRightJoinForTablesFromSameCollection - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testRightJoinForTablesFromSameCollection(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection = "testSameCollectionRightJoin"; final BsonDocument document1 = BsonDocument.parse( "{ \"_id\" : \"key0\", \"doc1\": { \"field\" : 1 }, \"doc2\": { \"field\": 2 }}"); final BsonDocument document2 = BsonDocument.parse("{ \"_id\" : \"key1\", \"doc2\": { \"field\": 2 } }"); insertBsonDocuments(collection, new BsonDocument[]{document1, document2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Verify that a right outer join will return 1 rows. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\" RIGHT JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"", getDatabaseName(), collection, getDatabaseName(), collection + "_doc1", collection, collection + "__id", collection + "_doc1", collection + "__id")); final ResultSet resultSet = statement.getResultSet(); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.next()); // Verify that an inner join combined with a right outer join will return 2 rows. statement.execute( String.format( "SELECT * FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"" + "RIGHT JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"", getDatabaseName(), collection, getDatabaseName(), collection + "_doc1", collection, collection + "__id", collection + "_doc1", collection + "__id", getDatabaseName(), collection + "_doc2", collection + "_doc1", collection + "__id", collection + "_doc2", collection + "__id")); final ResultSet resultSet2 = statement.getResultSet(); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertTrue(resultSet2.next()); Assertions.assertFalse(resultSet2.next()); } } /** * Tests that a statement with project, where, group by, having, order, and limit works with same collection joins. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @Disabled("Relies on $lookup with pipeline.") @DisplayName("Tests that a statement with project, where, group by, having, order, and limit works with a different collection join.") @ParameterizedTest(name = "testComplexQueryWithDifferentCollectionJoin - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testComplexQueryWithDifferentCollectionJoin(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collection1 = "testComplexQueryDifferentCollectionJoin1"; final String collection2 = "testComplexQueryDifferentCollectionJoin2"; final BsonDocument document1 = BsonDocument.parse("{ \"_id\" : \"key0\", \"array\": [1, 2, 3, 4, 5] }"); final BsonDocument document2 = BsonDocument.parse("{ \"_id\" : \"key1\", \"array\": [1, 2, 3, 4] }"); final BsonDocument document3 = BsonDocument.parse("{ \"_id\" : \"key2\", \"array\": [1, 2, 3] }"); final BsonDocument document4 = BsonDocument.parse("{ \"_id\" : \"key3\", \"array\": [1, 2, 3, 4 ] }"); final BsonDocument document5 = BsonDocument.parse("{ \"_id\" : \"key0\", \"field\": 1, \"field2\" : 0 }"); final BsonDocument document6 = BsonDocument.parse("{ \"_id\" : \"key1\", \"field\": 0, \"field2\" : 1 }"); final BsonDocument document7 = BsonDocument.parse("{ \"_id\" : \"key2\", \"field\": 0, \"field2\": 0 }"); insertBsonDocuments( collection1, new BsonDocument[]{document1, document2, document3, document4}); insertBsonDocuments( collection2, new BsonDocument[]{document5, document6, document7}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Verify that result set has correct values. Expecting query to single out document3. statement.execute(String.format( "SELECT \"%s\", SUM(\"%s\") as \"Sum\", COUNT(*) AS \"Count\" FROM \"%s\".\"%s\"" + "INNER JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"" + "WHERE \"%s\" <> 1 " + "GROUP BY \"%s\".\"%s\" HAVING COUNT(*) < 5" + "ORDER BY \"Count\" DESC LIMIT 1", collection2 + "__id", "field", getDatabaseName(), collection1 + "_array", getDatabaseName(), collection2, collection1 + "_array", collection1 + "__id", collection2, collection2 + "__id", "field2", collection2, collection2 + "__id")); final ResultSet resultSet1 = statement.getResultSet(); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals("key2", resultSet1.getString(collection2 + "__id")); Assertions.assertEquals(0, resultSet1.getInt("Sum")); Assertions.assertEquals(3, resultSet1.getInt("Count")); Assertions.assertFalse(resultSet1.next()); } } /** * Tests that different join types produce the correct result for tables from different collections. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @Disabled("Relies on $lookup with pipeline.") @DisplayName("Tests that different join types produce the correct result for tables from different collections.") @ParameterizedTest(name = "testJoinTypesForTablesFromDifferentCollection - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testJoinTypesForTablesFromDifferentCollection() throws SQLException { final String collection1 = "testDifferentCollectionJoin1"; final String collection2 = "testDifferentCollectionJoin2"; final BsonDocument document1 = BsonDocument.parse( "{ \"_id\" : \"key0\", \"array\": [ {\"field\": 1, \"field2\": \"value\"}, {\"field\": 2, \"field2\": \"value2\"}] }"); final BsonDocument document2 = BsonDocument.parse( "{ \"_id\" : \"key1\", \"doc\": { \"field\": 1, field3: \"value3\"} }"); insertBsonDocuments( collection1, new BsonDocument[]{document1}); insertBsonDocuments( collection2, new BsonDocument[]{document2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Verify that an inner join will return 1 row where field0 = field. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\" INNER JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"", getDatabaseName(), collection1 + "_array", getDatabaseName(), collection2 + "_doc", collection1 + "_array", "field", collection2 + "_doc", "field")); final ResultSet resultSet1 = statement.getResultSet(); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(resultSet1.getInt("field"), resultSet1.getInt("field0")); Assertions.assertEquals("key0", resultSet1.getString(collection1 + "__id")); Assertions.assertEquals("key1", resultSet1.getString(collection2 + "__id")); Assertions.assertFalse(resultSet1.next()); // Verify that a left outer join will return 2 rows but only 1 match from the right. statement.execute(String.format( "SELECT * FROM \"%s\".\"%s\" LEFT JOIN \"%s\".\"%s\" ON \"%s\".\"%s\" = \"%s\".\"%s\"", getDatabaseName(), collection1 + "_array", getDatabaseName(), collection2 + "_doc", collection1 + "_array", "field", collection2 + "_doc", "field")); final ResultSet resultSet2 = statement.getResultSet(); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals(resultSet2.getInt("field"), resultSet2.getInt("field0")); Assertions.assertEquals("key0", resultSet2.getString(collection1 + "__id")); Assertions.assertEquals("key1", resultSet2.getString(collection2 + "__id")); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("key0", resultSet2.getString(collection1 + "__id")); Assertions.assertNull(resultSet2.getString(collection2 + "__id")); Assertions.assertEquals(2, resultSet2.getInt("field")); Assertions.assertEquals(0, resultSet2.getInt("field0")); Assertions.assertFalse(resultSet2.next()); } } /** * Test querying using projection a three-level document. * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Tests querying with projects on a three-level document. Addresses AD-115.") @ParameterizedTest(name = "testProjectionQueryWithThreeLevelDocument - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testProjectionQueryWithThreeLevelDocument(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testProjectionQueryWithThreeLevelDocument"; final String keyColumnName = tableName + "__id"; final BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key\", \"doc\" : { \"field\" : 1, \"doc2\" : { \"field2\" : \"value\" } } }"); insertBsonDocuments(tableName, new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify the nested table from the field doc2 from the field doc. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT \"%s__id\" FROM \"%s\".\"%s\"", tableName, getDatabaseName(), tableName + "_doc")); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("key", resultSet2.getString(keyColumnName)); // Verify the nested table from the field doc2 from the field doc. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT \"%s__id\" FROM \"%s\".\"%s\"", tableName, getDatabaseName(), tableName + "_doc_doc2")); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals("key", resultSet3.getString(keyColumnName)); // Verify JOIN on the 3 tables to produce 2 columns and 1 row. final ResultSet resultSet4 = statement.executeQuery( String.format( "SELECT \"%s\".\"%s__id\", \"field2\" FROM \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON \"%s\".\"%s\" = \"%s\".\"%s\" " + "INNER JOIN \"%s\".\"%s\" " + "ON \"%s\".\"%s\" = \"%s\".\"%s\"", tableName, tableName, getDatabaseName(), tableName, getDatabaseName(), tableName + "_doc", tableName, keyColumnName, tableName + "_doc", keyColumnName, getDatabaseName(), tableName + "_doc_doc2", tableName + "_doc", keyColumnName, tableName + "_doc_doc2", keyColumnName)); Assertions.assertNotNull(resultSet4); Assertions.assertEquals(2, resultSet4.getMetaData().getColumnCount()); int rowCount = 0; while (resultSet4.next()) { rowCount++; } Assertions.assertEquals(1, rowCount); } } /** * Tests queries with natural joins where there are no matching fields other than ID. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests queries with natural joins.") @ParameterizedTest(name = "testNaturalJoin - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testNaturalJoin(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testNaturalJoin"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 10, " + "\"sub\": {" + " \"subField\": 15}}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * from \"%s\".\"%s\" NATURAL JOIN \"%s\".\"%s\"", getDatabaseName(), tableName, getDatabaseName(), tableName + "_sub")); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertEquals(10, resultSet.getInt(2)); Assertions.assertEquals(15, resultSet.getInt(3)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that natural joins where there is an additional matching column works. * @throws SQLException occurs if query or connection fails. */ @Disabled("Only joins on foreign keys are supported currently.") @DisplayName("Tests queries with natural join where an additional column matches the sub-table.") @ParameterizedTest(name = "testNaturalJoinWithExtraColumn - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testNaturalJoinWithExtraColumn(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testNaturalJoinWithExtraColumn"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 10, " + "\"sub\": {" + " \"subField\": 15," + " \"fieldA\": 10}}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * from \"%s\".\"%s\" NATURAL JOIN \"%s\".\"%s\"", getDatabaseName(), tableName, getDatabaseName(), tableName + "_sub")); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertEquals(10, resultSet.getInt(2)); Assertions.assertEquals(15, resultSet.getInt(3)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that a cross join with a WHERE clause matching IDs works. * @throws SQLException occurs if query or connection fails. */ @DisplayName("Tests basic cross-join with WHERE condition.") @ParameterizedTest(name = "testCrossJoinBasic - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testCrossJoinBasic(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCrossJoinBasic"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 10, " + "\"sub\": {" + " \"subField\": 15," + " \"fieldA\": 10}}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * from \"%s\".\"%s\" CROSS JOIN \"%s\".\"%s\" WHERE " + "\"testCrossJoinBasic\".\"testCrossJoinBasic__id\" = \"testCrossJoinBasic_sub\".\"testCrossJoinBasic__id\"", getDatabaseName(), tableName, getDatabaseName(), tableName + "_sub")); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertEquals(10, resultSet.getInt(2)); Assertions.assertEquals("101", resultSet.getString(3)); Assertions.assertEquals(15, resultSet.getInt(4)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that a cross join works. * @throws SQLException occurs if query or connection fails. */ @Disabled("Only joins on foreign keys are supported currently.") @DisplayName("Tests cross-join without WHERE condition.") @ParameterizedTest(name = "testCrossJoin - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testCrossJoin(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCrossJoin"; final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101,\n" + "\"fieldA\": 10, " + "\"sub\": {" + " \"subField\": 15," + " \"fieldA\": 10}}"); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102,\n" + "\"fieldA\": 10, " + "\"sub\": {" + " \"subField\": 15," + " \"fieldA\": 10}}"); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT * from \"%s\".\"%s\" CROSS JOIN \"%s\".\"%s\"", getDatabaseName(), tableName, getDatabaseName(), tableName + "_sub")); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("101", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals("102", resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests joins with array of two level documents.") @ParameterizedTest(name = "testQueryWithArrayOfTwoLevelDocuments - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryWithArrayOfTwoLevelDocuments(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQueryWithArrayOfTwoLevelDocuments"; final BsonDocument document = BsonDocument.parse( "{ \"_id\" : \"key\", \"array\" : [ { \"field\" : 1, \"field1\": { \"field2\" : 2, \"field3\" : \"value\" } }, { \"field\" : 1 } ]}"); insertBsonDocuments( tableName, new BsonDocument[]{document}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Verify LEFT OUTER JOIN on the nested table and 2nd nested table to produce 2 rows. final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT * FROM \"%1$s\".\"%2$s\" " + "LEFT OUTER JOIN \"%1$s\".\"%3$s\" " + "ON \"%2$s\".\"%4$s\" = \"%3$s\".\"%4$s\" " + "AND \"%2$s\".\"%5$s\" = \"%3$s\".\"%5$s\"", getDatabaseName(), tableName + "_array", tableName + "_array_field1", tableName + "__id", "array_index_lvl_0")); Assertions.assertNotNull(resultSet1); int rowCount = 0; while (resultSet1.next()) { rowCount++; } Assertions.assertEquals(2, rowCount); // Verify INNER JOIN on the nested table and 2nd nested table to produce 1 row. final ResultSet resultSet2 = statement.executeQuery( String.format( "SELECT * FROM \"%1$s\".\"%2$s\" " + "INNER JOIN \"%1$s\".\"%3$s\" " + "ON \"%2$s\".\"%4$s\" = \"%3$s\".\"%4$s\" " + "AND \"%2$s\".\"%5$s\" = \"%3$s\".\"%5$s\"", getDatabaseName(), tableName + "_array", tableName + "_array_field1", tableName + "__id", "array_index_lvl_0")); Assertions.assertNotNull(resultSet2); rowCount = 0; while (resultSet2.next()) { Assertions.assertEquals("key", resultSet2.getString(tableName + "__id0")); Assertions.assertEquals(0, resultSet2.getInt("array_index_lvl_00")); rowCount++; } Assertions.assertEquals(1, rowCount); // Verify LEFT OUTER JOIN on the nested table and 2nd nested table with filter to produce 1 row. final ResultSet resultSet3 = statement.executeQuery( String.format( "SELECT * FROM \"%1$s\".\"%2$s\" " + "LEFT OUTER JOIN \"%1$s\".\"%3$s\" " + "ON \"%2$s\".\"%4$s\" = \"%3$s\".\"%4$s\" " + "AND \"%2$s\".\"%5$s\" = \"%3$s\".\"%5$s\"" + "WHERE \"%3$s\".\"%4$s\" IS NULL " + "AND \"%3$s\".\"%5$s\" IS NULL ", getDatabaseName(), tableName + "_array", tableName + "_array_field1", tableName + "__id", "array_index_lvl_0")); Assertions.assertNotNull(resultSet3); rowCount = 0; while (resultSet3.next()) { Assertions.assertNull(resultSet3.getString(tableName + "__id0")); rowCount++; } Assertions.assertEquals(1, rowCount); } } @DisplayName("Test adding filter after doing multiple joins.") @ParameterizedTest(name = "testFilterWithMultipleJoins - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testFilterWithMultipleJoins(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testFilterWithMultipleJoins"; final BsonDocument document1 = BsonDocument.parse( "{ \"_id\" : \"key1\", \"document\": { \"field\": 1, \"array\" : [ 1, 2, 3 ] } }"); final BsonDocument document2 = BsonDocument.parse( "{ \"_id\" : \"key2\", \"document\": { \"field\": 2, \"array\" : [ 4, 5, 6 ] } }"); insertBsonDocuments(tableName, new BsonDocument[]{document1, document2}); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format( "SELECT \"%1$s\".\"%1$s__id\", \"%2$s\".\"field\" " + "FROM \"%3$s\".\"%1$s\" " + "LEFT OUTER JOIN \"%3$s\".\"%4$s\" " + "ON \"%1$s\".\"%1$s__id\" = \"%4$s\".\"%1$s__id\"" + "LEFT OUTER JOIN \"%3$s\".\"%2$s\" " + "ON \"%1$s\".\"%1$s__id\" = \"%2$s\".\"%1$s__id\" " + "WHERE \"%4$s\".\"value\" = 1", tableName, tableName + "_document", getDatabaseName(), tableName + "_document_array")); Assertions.assertNotNull(resultSet); Assertions.assertEquals(2, resultSet.getMetaData().getColumnCount()); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(1, resultSet.getInt(2)); Assertions.assertFalse(resultSet.next()); } } @DisplayName("Tests validation for all combination of join between two tables.") @ParameterizedTest(name = "testJoinConditionsTwoTables - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testJoinConditionsTwoTables(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testJoinConditionsTwoTables"; final BsonDocument document = BsonDocument.parse(MULTI_NESTED_ARRAY_AND_DOCUMENT_JSON); insertBsonDocuments(tableName, new BsonDocument[]{document}); // List of all tables final List<String> tables = Lists.newArrayList( "testJoinConditionsTwoTables", "testJoinConditionsTwoTables_array", "testJoinConditionsTwoTables_array_array", "testJoinConditionsTwoTables_array_document", "testJoinConditionsTwoTables_array_otherArray", "testJoinConditionsTwoTables_document", "testJoinConditionsTwoTables_otherArray", "testJoinConditionsTwoTables_otherArray_array", "testJoinConditionsTwoTables_otherArray_document", "testJoinConditionsTwoTables_otherArray_otherArray"); // List of all tables with "array" as parent final List<String> arrayTables = Lists.newArrayList( "testJoinConditionsTwoTables_array", "testJoinConditionsTwoTables_array_array", "testJoinConditionsTwoTables_array_document", "testJoinConditionsTwoTables_array_otherArray"); // List of all tables with "otherArray" as parent final List<String> otherArrayTables = Lists.newArrayList( "testJoinConditionsTwoTables_otherArray", "testJoinConditionsTwoTables_otherArray_array", "testJoinConditionsTwoTables_otherArray_document", "testJoinConditionsTwoTables_otherArray_otherArray"); final Level level = LogManager.getRootLogger().getLevel(); LogManager.getRootLogger().setLevel(Level.FATAL); // Test all combination of tables - only using base table "_id" in join condition try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); for (String firstTable : tables) { for (String secondTable : tables) { final String query = String.format( "SELECT * " + "FROM \"%2$s\".\"%3$s\" " + "INNER JOIN \"%2$s\".\"%4$s\" " + "ON \"%3$s\".\"%1$s__id\" = \"%4$s\".\"%1$s__id\" ", tableName, getDatabaseName(), firstTable, secondTable); if (firstTable.equals(secondTable)) { // Disallow join with the same table. Assertions.assertThrows( SQLException.class, () -> statement.executeQuery(query)); } else if ((arrayTables.contains(firstTable) && arrayTables.contains(secondTable)) || (otherArrayTables.contains(firstTable) && otherArrayTables.contains(secondTable))) { // Requires common array index in join condition Assertions.assertThrows( SQLException.class, () -> statement.executeQuery(query)); } else { // Does not require common array index in join condition Assertions.assertDoesNotThrow(() -> statement.executeQuery(query)); } } } // Test combination of tables with "array" as parent for (String firstTable : arrayTables) { for (String secondTable : arrayTables) { final String query = String.format( "SELECT * " + "FROM \"%2$s\".\"%3$s\" " + "INNER JOIN \"%2$s\".\"%4$s\" " + "ON \"%3$s\".\"%1$s__id\" = \"%4$s\".\"%1$s__id\" " + " AND \"%3$s\".\"array_index_lvl_0\" = \"%4$s\".\"array_index_lvl_0\" ", tableName, getDatabaseName(), firstTable, secondTable); if (firstTable.equals(secondTable)) { // Disallow join with the same table. Assertions.assertThrows( SQLException.class, () -> statement.executeQuery(query)); } else { // Required common array index in join condition provided Assertions.assertDoesNotThrow(() -> statement.executeQuery(query)); } } } // Test combination of tables with "otherArray" as parent for (String firstTable : otherArrayTables) { for (String secondTable : otherArrayTables) { final String query = String.format( "SELECT * " + "FROM \"%2$s\".\"%3$s\" " + "INNER JOIN \"%2$s\".\"%4$s\" " + "ON \"%3$s\".\"%1$s__id\" = \"%4$s\".\"%1$s__id\" " + " AND \"%3$s\".\"otherArray_index_lvl_0\" = \"%4$s\".\"otherArray_index_lvl_0\" ", tableName, getDatabaseName(), firstTable, secondTable); if (firstTable.equals(secondTable)) { // Disallow join with the same table. Assertions.assertThrows( SQLException.class, () -> statement.executeQuery(query)); } else { // Required common array index in join condition provided Assertions.assertDoesNotThrow(() -> statement.executeQuery(query)); } } } } finally { LogManager.getRootLogger().setLevel(level); } } @DisplayName("Tests validation for all combination of join between three tables.") @ParameterizedTest(name = "testJoinConditionsThreeTables - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testJoinConditionsThreeTables(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testJoinConditionsThreeTables"; final BsonDocument document = BsonDocument.parse(MULTI_NESTED_ARRAY_AND_DOCUMENT_JSON); insertBsonDocuments(tableName, new BsonDocument[]{document}); // List of all tables final List<String> tables = Lists.newArrayList( "testJoinConditionsThreeTables", "testJoinConditionsThreeTables_array", "testJoinConditionsThreeTables_array_array", "testJoinConditionsThreeTables_array_document", "testJoinConditionsThreeTables_array_otherArray", "testJoinConditionsThreeTables_document", "testJoinConditionsThreeTables_otherArray", "testJoinConditionsThreeTables_otherArray_array", "testJoinConditionsThreeTables_otherArray_document", "testJoinConditionsThreeTables_otherArray_otherArray"); // List of all tables with "array" as parent final List<String> arrayTables = Lists.newArrayList( "testJoinConditionsThreeTables_array", "testJoinConditionsThreeTables_array_array", "testJoinConditionsThreeTables_array_document", "testJoinConditionsThreeTables_array_otherArray"); // List of all tables with "otherArray" as parent final List<String> otherArrayTables = Lists.newArrayList( "testJoinConditionsThreeTables_otherArray", "testJoinConditionsThreeTables_otherArray_array", "testJoinConditionsThreeTables_otherArray_document", "testJoinConditionsThreeTables_otherArray_otherArray"); final Level level = LogManager.getRootLogger().getLevel(); LogManager.getRootLogger().setLevel(Level.FATAL); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); // Test all combination of tables - only using base table "_id" in join condition for (String firstTable : tables) { for (String secondTable : tables) { for (String thirdTable : tables) { if (firstTable.equals(secondTable) || firstTable.equals(thirdTable) || secondTable.equals(thirdTable)) { // Does not allow duplicate table names in JOIN conditions continue; } final String query = String.format( "SELECT *%n" + " FROM \"%2$s\".\"%3$s\"%n" + " INNER JOIN \"%2$s\".\"%4$s\"%n" + " ON \"%3$s\".\"%1$s__id\" = \"%4$s\".\"%1$s__id\"%n" + " INNER JOIN \"%2$s\".\"%5$s\"%n" + " ON \"%3$s\".\"%1$s__id\" = \"%5$s\".\"%1$s__id\"", tableName, getDatabaseName(), firstTable, secondTable, thirdTable); if (((arrayTables.contains(firstTable) && arrayTables.contains(secondTable)) || (arrayTables.contains(firstTable) && arrayTables.contains(thirdTable)) || (arrayTables.contains(secondTable) && arrayTables.contains(thirdTable))) || ((otherArrayTables.contains(firstTable) && otherArrayTables.contains(secondTable) || (otherArrayTables.contains(firstTable) && otherArrayTables.contains(thirdTable)) || (otherArrayTables.contains(secondTable) && otherArrayTables.contains(thirdTable)))) ) { // Requires common array index in join condition Assertions.assertThrows( SQLException.class, () -> statement.executeQuery(query)); } else { // Does not require common array index in join condition Assertions.assertDoesNotThrow(() -> statement.executeQuery(query)); } } } } // Test combination of tables with "array" as parent for (String firstTable : arrayTables) { for (String secondTable : arrayTables) { for (String thirdTable : tables) { if (firstTable.equals(secondTable) || firstTable.equals(thirdTable) || secondTable.equals(thirdTable)) { // Disallow join with the same table. continue; } final String query = String.format( "SELECT * %n" + " FROM \"%2$s\".\"%3$s\" %n" + " INNER JOIN \"%2$s\".\"%4$s\" %n" + " ON \"%3$s\".\"%1$s__id\" = \"%4$s\".\"%1$s__id\" %n" + " AND \"%3$s\".\"array_index_lvl_0\" = \"%4$s\".\"array_index_lvl_0\" %n" + " INNER JOIN \"%2$s\".\"%5$s\" %n" + " ON \"%3$s\".\"%1$s__id\" = \"%5$s\".\"%1$s__id\" ", tableName, getDatabaseName(), firstTable, secondTable, thirdTable); if (arrayTables.contains(thirdTable)) { // Requires common array index in join condition Assertions.assertThrows( SQLException.class, () -> statement.executeQuery(query)); } else { // Required common array index in join condition provided Assertions.assertDoesNotThrow(() -> statement.executeQuery(query)); } } } } // Test combination of tables with "array" as parent for (String firstTable : arrayTables) { for (String secondTable : arrayTables) { for (String thirdTable : arrayTables) { if (firstTable.equals(secondTable) || firstTable.equals(thirdTable) || secondTable.equals(thirdTable)) { // Disallow join with the same table. continue; } final String query = String.format( "SELECT * %n" + " FROM \"%2$s\".\"%3$s\" %n" + " INNER JOIN \"%2$s\".\"%4$s\" %n" + " ON \"%3$s\".\"%1$s__id\" = \"%4$s\".\"%1$s__id\" %n" + " AND \"%3$s\".\"array_index_lvl_0\" = \"%4$s\".\"array_index_lvl_0\" %n" + " INNER JOIN \"%2$s\".\"%5$s\" %n" + " ON \"%3$s\".\"%1$s__id\" = \"%5$s\".\"%1$s__id\" " + " AND \"%3$s\".\"array_index_lvl_0\" = \"%5$s\".\"array_index_lvl_0\" %n", tableName, getDatabaseName(), firstTable, secondTable, thirdTable); // Required common array index in join condition provided Assertions.assertDoesNotThrow(() -> statement.executeQuery(query)); } } } // Test combination of tables with "array" as parent for (String firstTable : arrayTables) { for (String secondTable : tables) { for (String thirdTable : arrayTables) { if (firstTable.equals(secondTable) || firstTable.equals(thirdTable) || secondTable.equals(thirdTable)) { // Disallow join with the same table. continue; } final String query = String.format( "SELECT * %n" + " FROM \"%2$s\".\"%3$s\" %n" + " INNER JOIN \"%2$s\".\"%4$s\" %n" + " ON \"%3$s\".\"%1$s__id\" = \"%4$s\".\"%1$s__id\" %n" + " INNER JOIN \"%2$s\".\"%5$s\" %n" + " ON \"%3$s\".\"%1$s__id\" = \"%5$s\".\"%1$s__id\" " + " AND \"%3$s\".\"array_index_lvl_0\" = \"%5$s\".\"array_index_lvl_0\" %n", tableName, getDatabaseName(), firstTable, secondTable, thirdTable); if (arrayTables.contains(secondTable)) { // Requires common array index in join condition Assertions.assertThrows( SQLException.class, () -> statement.executeQuery(query)); } else { // Required common array index in join condition provided Assertions.assertDoesNotThrow(() -> statement.executeQuery(query)); } } } } } finally { LogManager.getRootLogger().setLevel(level); } } /** * Tests WITH tableName AS (subQuery). * * @throws SQLException occurs if executing the statement or retrieving a value fails. */ @DisplayName("Tests WITH tableName AS (<subQuery>).") @ParameterizedTest(name = "testWithSubQuery - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testWithSubQuery( final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String collectionName = "testWithSubQuery"; final int recordCount = 10; prepareSimpleConsistentData(collectionName, recordCount); try (Connection connection = getConnection()) { final DocumentDbStatement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery(String.format( "WITH mySubQuery1 AS (SELECT * FROM \"%1$s\".\"%2$s\"), %n" + "mySubQuery2 AS (SELECT * FROM \"%1$s\".\"%2$s\") %n" + "SELECT * FROM mySubQuery1, mySubQuery2 %n" + " WHERE mySubQuery1.%2$s__id = mySubQuery2.%2$s__id", getDatabaseName(), collectionName)); Assertions.assertNotNull(resultSet1); int count = 0; while (resultSet1.next()) { count++; } Assertions.assertEquals(recordCount, count); final ResultSet resultSet2 = statement.executeQuery(String.format( "SELECT * FROM %n" + " (SELECT * FROM \"%1$s\".\"%2$s\") as mySubQuery1, %n" + " (SELECT * FROM \"%1$s\".\"%2$s\") as mySubQuery2 %n" + " WHERE mySubQuery1.%2$s__id = mySubQuery2.%2$s__id", getDatabaseName(), collectionName)); Assertions.assertNotNull(resultSet2); count = 0; while (resultSet2.next()) { count++; } Assertions.assertEquals(recordCount, count); } } }
4,496
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbListResultSetTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import com.google.common.collect.ImmutableList; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import software.amazon.documentdb.jdbc.common.utilities.JdbcColumnMetaData; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.List; public class DocumentDbListResultSetTest { private static final int TEST_METADATA_SIZE = 3; @Mock private List<List<Object>> mockList; @Mock private JdbcColumnMetaData mockMetadataColumnA; @Mock private JdbcColumnMetaData mockMetadataColumnB; @Mock private JdbcColumnMetaData mockMetadataColumnC; @Mock private Statement mockStatement; private DocumentDbListResultSet resultSet; @BeforeEach void init() { MockitoAnnotations.openMocks(this); // Prepare mock metadata with bare minimum. Mockito.when(mockMetadataColumnA.getColumnLabel()).thenReturn("A"); Mockito.when(mockMetadataColumnB.getColumnLabel()).thenReturn("B"); Mockito.when(mockMetadataColumnC.getColumnLabel()).thenReturn("C"); Mockito.when(mockMetadataColumnA.getOrdinal()).thenReturn(0); Mockito.when(mockMetadataColumnB.getOrdinal()).thenReturn(1); Mockito.when(mockMetadataColumnC.getOrdinal()).thenReturn(2); Mockito.when(mockList.size()).thenReturn(TEST_METADATA_SIZE); final ImmutableList<JdbcColumnMetaData> mockMetadata = ImmutableList .of(mockMetadataColumnA, mockMetadataColumnB, mockMetadataColumnC); resultSet = new DocumentDbListResultSet(mockStatement, mockMetadata, mockList); } @Test @DisplayName("Test that next() moves cursor to correct row and handles invalid inputs.") void testNext() throws SQLException { // Test cursor before first row. Assertions.assertTrue(resultSet.isBeforeFirst()); Assertions.assertFalse(resultSet.isFirst()); Assertions.assertFalse(resultSet.isLast()); Assertions.assertFalse(resultSet.isAfterLast()); Assertions.assertEquals(-1, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); // Test cursor at first row. Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.isBeforeFirst()); Assertions.assertTrue(resultSet.isFirst()); Assertions.assertFalse(resultSet.isLast()); Assertions.assertFalse(resultSet.isAfterLast()); Assertions.assertEquals(0, resultSet.getRowIndex()); Assertions.assertEquals(1, resultSet.getRow()); // Test cursor at second row. Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.isBeforeFirst()); Assertions.assertFalse(resultSet.isFirst()); Assertions.assertFalse(resultSet.isLast()); Assertions.assertFalse(resultSet.isAfterLast()); Assertions.assertEquals(1, resultSet.getRowIndex()); Assertions.assertEquals(2, resultSet.getRow()); // Test cursor at last row. Assertions.assertTrue(resultSet.next()); Assertions.assertFalse(resultSet.isBeforeFirst()); Assertions.assertFalse(resultSet.isFirst()); Assertions.assertTrue(resultSet.isLast()); Assertions.assertFalse(resultSet.isAfterLast()); Assertions.assertEquals(2, resultSet.getRowIndex()); Assertions.assertEquals(3, resultSet.getRow()); // Test cursor after last row. Assertions.assertFalse(resultSet.next()); Assertions.assertFalse(resultSet.isBeforeFirst()); Assertions.assertFalse(resultSet.isFirst()); Assertions.assertFalse(resultSet.isLast()); Assertions.assertTrue(resultSet.isAfterLast()); Assertions.assertEquals(3, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); } @Test @DisplayName("Test that absolute() moves cursor to correct row and handles invalid inputs.") void testAbsolute() throws SQLException { // Test going to valid row number with respect to start. Assertions.assertTrue(resultSet.absolute(2)); Assertions.assertEquals(1, resultSet.getRowIndex()); Assertions.assertEquals(2, resultSet.getRow()); // Test going to valid row number with respect to end. Assertions.assertTrue(resultSet.absolute(-2)); Assertions.assertEquals(1, resultSet.getRowIndex()); Assertions.assertEquals(2, resultSet.getRow()); // Test going to out of range row numbers. Assertions.assertFalse(resultSet.absolute(4)); Assertions.assertEquals(-1, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); Assertions.assertFalse(resultSet.absolute(-4)); Assertions.assertEquals(-1, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); } @Test @DisplayName("Test that relative() moves cursor to correct row and handles invalid inputs.") void testRelative() throws SQLException { // Test going to valid forward row number. (0 -> 2) Assertions.assertTrue(resultSet.relative(2)); Assertions.assertEquals(1, resultSet.getRowIndex()); Assertions.assertEquals(2, resultSet.getRow()); // Test going to previous row number. (2 -> 1) Assertions.assertTrue(resultSet.relative(-1)); Assertions.assertEquals(0, resultSet.getRowIndex()); Assertions.assertEquals(1, resultSet.getRow()); // Test staying in same row. (1 -> 1) Assertions.assertTrue(resultSet.relative(0)); Assertions.assertEquals(0, resultSet.getRowIndex()); Assertions.assertEquals(1, resultSet.getRow()); // Test going to out of range row number. (1 -> 4) Assertions.assertFalse(resultSet.relative(3)); Assertions.assertEquals(3, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); // Test going to out of range row number. (4 -> -1) Assertions.assertFalse(resultSet.relative(-5)); Assertions.assertEquals(-1, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); } @Test @DisplayName("Test that previous() moves cursor to correct row and handles invalid inputs.") void testPrevious() throws SQLException { // First go to valid forward row number. (0 -> 2) Assertions.assertTrue(resultSet.relative(2)); Assertions.assertEquals(1, resultSet.getRowIndex()); Assertions.assertEquals(2, resultSet.getRow()); // Test going to previous row number. (2 -> 1) Assertions.assertTrue(resultSet.previous()); Assertions.assertEquals(0, resultSet.getRowIndex()); Assertions.assertEquals(1, resultSet.getRow()); // Test going to out of range row number. (1 -> 0) Assertions.assertFalse(resultSet.previous()); Assertions.assertEquals(-1, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); } @Test @DisplayName("Test that first() moves cursor to correct row.") void testFirst() throws SQLException { Assertions.assertTrue(resultSet.first()); Assertions.assertEquals(0, resultSet.getRowIndex()); Assertions.assertEquals(1, resultSet.getRow()); } @Test @DisplayName("Test that last() moves cursor to correct row.") void testLast() throws SQLException { Assertions.assertTrue(resultSet.last()); Assertions.assertEquals(TEST_METADATA_SIZE - 1, resultSet.getRowIndex()); Assertions.assertEquals(TEST_METADATA_SIZE, resultSet.getRow()); } @Test @DisplayName("Test that beforeFirst() moves cursor to correct row.") void testBeforeFirst() throws SQLException { // First go to valid forward row number. (0 -> 2) Assertions.assertTrue(resultSet.relative(2)); Assertions.assertEquals(1, resultSet.getRowIndex()); Assertions.assertEquals(2, resultSet.getRow()); resultSet.beforeFirst(); Assertions.assertEquals(-1, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); } @Test @DisplayName("Test that afterLast() moves cursor to correct row.") void testAfterLast() throws SQLException { resultSet.afterLast(); Assertions.assertEquals(TEST_METADATA_SIZE, resultSet.getRowIndex()); Assertions.assertEquals(0, resultSet.getRow()); } @Test @DisplayName("Test that getType() returns scrollable and insensitive.") void testGetType() { Assertions.assertEquals(ResultSet.TYPE_SCROLL_INSENSITIVE, resultSet.getType()); } @Test @DisplayName("Test that getConcurrency() returns read-only.") void testGetConcurrency() { Assertions.assertEquals(ResultSet.CONCUR_READ_ONLY, resultSet.getConcurrency()); } }
4,497
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbDatabaseMetaDataTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import com.mongodb.client.MongoClient; import org.bson.Document; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleExtension; import software.amazon.documentdb.jdbc.common.test.DocumentDbFlapDoodleTest; import software.amazon.documentdb.jdbc.common.utilities.JdbcType; import software.amazon.documentdb.jdbc.metadata.DocumentDbSchema; import software.amazon.documentdb.jdbc.persist.DocumentDbSchemaWriter; import java.io.IOException; import java.io.InputStream; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Types; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Properties; import java.util.stream.Collectors; @ExtendWith(DocumentDbFlapDoodleExtension.class) public class DocumentDbDatabaseMetaDataTest extends DocumentDbFlapDoodleTest { private static final String USERNAME = "user"; private static final String PASSWORD = "password"; private static final String DATABASE = "testDb"; private static final String COLLECTION_BASIC = "COLLECTION"; private static final String COLLECTION_SUB = "collectionSubDocument"; private static final String COLLECTION_ARRAY = "collectionWithArray"; private static final String HOSTNAME = "localhost"; private static final String DRIVER_MAJOR_VERSION_KEY = "driver.major.version"; private static final String DRIVER_MINOR_VERSION_KEY = "driver.minor.version"; private static final String DRIVER_FULL_VERSION_KEY = "driver.full.version"; private static final String PROPERTIES_FILE_PATH = "/project.properties"; private static Connection connection; private static DatabaseMetaData metadata; /** Initializes the test class. */ @BeforeAll public static void initialize() throws SQLException { createUser(DATABASE, USERNAME, PASSWORD); prepareSimpleConsistentData(DATABASE, COLLECTION_BASIC, 5, USERNAME, PASSWORD); final MongoClient client = createMongoClient(ADMIN_DATABASE, USERNAME, PASSWORD); final Document nestedDocument = Document.parse( "{ \"_id\" : \"key\", " + "\"doc\" : { \"field\" : 1 } }"); client.getDatabase(DATABASE).getCollection(COLLECTION_SUB).insertOne(nestedDocument); final Document arrayDocument = Document.parse( "{\n" + " \"_id\":3,\n" + " \"field\":\"string\",\n" + " \"array\": [\n" + " 1, 2, 3\n" + " ]\n" + "}" ); client.getDatabase(DATABASE).getCollection(COLLECTION_ARRAY).insertOne(arrayDocument); final String connectionString = String.format( "jdbc:documentdb://%s:%s@%s:%s/%s?tls=false", USERNAME, PASSWORD, HOSTNAME, getMongoPort(), DATABASE); connection = DriverManager.getConnection(connectionString); metadata = connection.getMetaData(); } @AfterAll static void afterAll() throws Exception { final Properties info = connection.getClientInfo(); final DocumentDbConnectionProperties properties = DocumentDbConnectionProperties .getPropertiesFromConnectionString(info, "jdbc:documentdb:", "jdbc:documentdb:"); try (DocumentDbSchemaWriter schemaWriter = new DocumentDbSchemaWriter(properties, null)) { schemaWriter.remove(DocumentDbSchema.DEFAULT_SCHEMA_NAME); } connection.close(); } /** * Tests for basic metadata fields. */ @Test @DisplayName("Tests basic common properties of a database.") void testBasicMetadata() throws SQLException, IOException { // Retrieve the version metadata from properties file. final int majorVersion; final int minorVersion; final String fullVersion; try (InputStream is = DocumentDbDatabaseMetaData.class.getResourceAsStream(PROPERTIES_FILE_PATH)) { final Properties p = new Properties(); p.load(is); majorVersion = Integer.parseInt(p.getProperty(DRIVER_MAJOR_VERSION_KEY)); minorVersion = Integer.parseInt(p.getProperty(DRIVER_MINOR_VERSION_KEY)); fullVersion = p.getProperty(DRIVER_FULL_VERSION_KEY); } Assertions.assertEquals("DocumentDB", metadata.getDatabaseProductName()); Assertions.assertEquals("4.0", metadata.getDatabaseProductVersion()); Assertions.assertEquals("DocumentDB JDBC Driver", metadata.getDriverName()); Assertions.assertNotNull(metadata.getSQLKeywords()); Assertions.assertNotNull(metadata.getNumericFunctions()); Assertions.assertNotNull(metadata.getStringFunctions()); Assertions.assertNotNull(metadata.getTimeDateFunctions()); Assertions.assertEquals("\\", metadata.getSearchStringEscape()); Assertions.assertEquals("",metadata.getExtraNameCharacters()); Assertions.assertEquals("catalog", metadata.getCatalogTerm()); Assertions.assertEquals(".", metadata.getCatalogSeparator()); Assertions.assertEquals(0, metadata.getMaxRowSize()); Assertions.assertEquals(4, metadata.getDatabaseMajorVersion()); Assertions.assertEquals(0, metadata.getDatabaseMinorVersion()); Assertions.assertEquals(4, metadata.getJDBCMajorVersion()); Assertions.assertEquals(2, metadata.getJDBCMinorVersion()); Assertions.assertEquals(majorVersion, metadata.getDriverMajorVersion()); Assertions.assertEquals(minorVersion, metadata.getDriverMinorVersion()); Assertions.assertEquals(fullVersion, metadata.getDriverVersion()); } /** * Tests columns of getProcedures(). */ @Test @DisplayName("Tests the correct columns of getProcedures.") void testGetProcedures() throws SQLException { final ResultSet procedures = metadata.getProcedures(null, null, null); final ResultSetMetaData proceduresMetadata = procedures.getMetaData(); Assertions.assertEquals("PROCEDURE_CAT", proceduresMetadata.getColumnName(1)); Assertions.assertEquals("PROCEDURE_SCHEM", proceduresMetadata.getColumnName(2)); Assertions.assertEquals("PROCEDURE_NAME", proceduresMetadata.getColumnName(3)); Assertions.assertEquals("REMARKS", proceduresMetadata.getColumnName(7)); Assertions.assertEquals("PROCEDURE_TYPE", proceduresMetadata.getColumnName(8)); Assertions.assertEquals("SPECIFIC_NAME", proceduresMetadata.getColumnName(9)); } /** * Tests columns of getTables */ @Test @DisplayName("Tests the correct columns of getTables.") void testGetTables() throws SQLException { // Catalog pattern, schema pattern, table name pattern, table types final String[][] tests = new String [][]{ {null, null, null, null}, {" ", " ", " ", " "}, {null, null, null, "TABLE"}, {null, null, COLLECTION_BASIC, "TABLE"}, {"", null, COLLECTION_BASIC, "TABLE"}, {" ", null, COLLECTION_BASIC, " "}, {null, DATABASE, COLLECTION_BASIC, "TABLE"}, {" ", DATABASE, COLLECTION_BASIC, "TABLE"}, {"non-existing catalog", DATABASE, COLLECTION_BASIC, "non-existing table"}, {null, null, null}, {" ", " ", " "} }; for (String[] test : tests) { final String[] tableTypes = test.length == 4 ? new String[]{test[3]} : new String[]{}; final ResultSet tables = metadata.getTables(test[0], test[1], test[2], tableTypes); final ResultSetMetaData tablesMetadata = tables.getMetaData(); Assertions.assertEquals("TABLE_CAT", tablesMetadata.getColumnName(1)); Assertions.assertEquals("TABLE_SCHEM", tablesMetadata.getColumnName(2)); Assertions.assertEquals("TABLE_NAME", tablesMetadata.getColumnName(3)); Assertions.assertEquals("TABLE_TYPE", tablesMetadata.getColumnName(4)); Assertions.assertEquals("REMARKS", tablesMetadata.getColumnName(5)); Assertions.assertEquals("TYPE_CAT", tablesMetadata.getColumnName(6)); Assertions.assertEquals("TYPE_SCHEM", tablesMetadata.getColumnName(7)); Assertions.assertEquals("TYPE_NAME", tablesMetadata.getColumnName(8)); Assertions.assertEquals("SELF_REFERENCING_COL_NAME", tablesMetadata.getColumnName(9)); Assertions.assertEquals("REF_GENERATION", tablesMetadata.getColumnName(10)); } } /** * Test getTables returns empty ResultSet. */ @Test @DisplayName("Test getTables returns empty ResultSet.") void testGetMetadataTablesEmpty() throws SQLException { // Catalog pattern, schema pattern, table name pattern, table types final String[][] tests = new String [][]{ {null, "", COLLECTION_BASIC, "TABLE"}, {" ", "", COLLECTION_BASIC, "TABLE"}, {"non-existing catalog", DATABASE, COLLECTION_BASIC, "non-existing table"}, {" ", "", COLLECTION_BASIC}, {null, "", COLLECTION_BASIC}, }; for (String[] test : tests) { final String[] tableTypes = test.length == 4 ? new String[]{test[3]} : null; final ResultSet tables = metadata.getTables(test[0], test[1], test[2], tableTypes); Assertions.assertFalse(tables.next()); } } /** * Tests columns of getSchemas. */ @Test @DisplayName("Tests the correct columns of getSchemas.") void testGetSchemas() throws SQLException { final ResultSet schemas = metadata.getSchemas(); final ResultSetMetaData schemasMetadata = schemas.getMetaData(); Assertions.assertEquals("TABLE_SCHEM", schemasMetadata.getColumnName(1)); Assertions.assertEquals("TABLE_CATALOG", schemasMetadata.getColumnName(2)); } /** * Tests columns of getCatalogs() */ @Test @DisplayName("Tests the correct columns of getCatalogs.") void testGetCatalogs() throws SQLException { final ResultSet catalogs = metadata.getCatalogs(); final ResultSetMetaData catalogMetadata = catalogs.getMetaData(); Assertions.assertEquals("TABLE_CAT", catalogMetadata.getColumnName(1)); } /** * Tests columns of getColumns result set. */ @Test @DisplayName("Tests the correct columns of getColumns.") void testGetColumns() throws SQLException { // Catalog pattern, schema pattern, table pattern, column pattern final String[][] tests = new String [][]{ {null, null, null, null}, {" ", " ", " ", " "}, {null, null, null, "%__id"}, {null, "%", null, "%__id"}, {null, "%", "%", "%__id"}, {"", null, COLLECTION_BASIC, "%__id"}, {null, DATABASE, COLLECTION_BASIC, "%__id"}, {"", null, COLLECTION_BASIC, "%\\_\\_id"}, }; for (String[] test : tests) { final ResultSet columns = metadata.getColumns(test[0], test[1], test[2], test[3]); final ResultSetMetaData columnsMetadata = columns.getMetaData(); Assertions.assertEquals("TABLE_CAT", columnsMetadata.getColumnName(1)); Assertions.assertEquals("TABLE_SCHEM", columnsMetadata.getColumnName(2)); Assertions.assertEquals("TABLE_NAME", columnsMetadata.getColumnName(3)); Assertions.assertEquals("COLUMN_NAME", columnsMetadata.getColumnName(4)); Assertions.assertEquals("DATA_TYPE", columnsMetadata.getColumnName(5)); Assertions.assertEquals("TYPE_NAME", columnsMetadata.getColumnName(6)); Assertions.assertEquals("COLUMN_SIZE", columnsMetadata.getColumnName(7)); Assertions.assertEquals("BUFFER_LENGTH", columnsMetadata.getColumnName(8)); Assertions.assertEquals("DECIMAL_DIGITS", columnsMetadata.getColumnName(9)); Assertions.assertEquals("NUM_PREC_RADIX", columnsMetadata.getColumnName(10)); Assertions.assertEquals("NULLABLE", columnsMetadata.getColumnName(11)); Assertions.assertEquals("REMARKS", columnsMetadata.getColumnName(12)); Assertions.assertEquals("COLUMN_DEF", columnsMetadata.getColumnName(13)); Assertions.assertEquals("SQL_DATA_TYPE", columnsMetadata.getColumnName(14)); Assertions.assertEquals("SQL_DATETIME_SUB", columnsMetadata.getColumnName(15)); Assertions.assertEquals("CHAR_OCTET_LENGTH", columnsMetadata.getColumnName(16)); Assertions.assertEquals("ORDINAL_POSITION", columnsMetadata.getColumnName(17)); Assertions.assertEquals("IS_NULLABLE", columnsMetadata.getColumnName(18)); Assertions.assertEquals("SCOPE_CATALOG", columnsMetadata.getColumnName(19)); Assertions.assertEquals("SCOPE_SCHEMA", columnsMetadata.getColumnName(20)); Assertions.assertEquals("SCOPE_TABLE", columnsMetadata.getColumnName(21)); Assertions.assertEquals("SOURCE_DATA_TYPE", columnsMetadata.getColumnName(22)); Assertions.assertEquals("IS_AUTOINCREMENT", columnsMetadata.getColumnName(23)); Assertions.assertEquals("IS_GENERATEDCOLUMN", columnsMetadata.getColumnName(24)); } } /** * Test getColumns returns empty ResultSet. */ @Test @DisplayName("Test getColumns returns empty ResultSet.") void testGetMetadataColumnsEmpty() throws SQLException { // Catalog pattern, schema pattern, table pattern, column pattern final String[][] tests = new String [][]{ {null, "", COLLECTION_BASIC, "%__id"}, {" ", "", COLLECTION_BASIC, "%__id"}, {"non-existent catalog", DATABASE, COLLECTION_BASIC, "%\\_\\_id"}, {"non-existent catalog", null, COLLECTION_BASIC, "non-existent column"} }; for (String[] test : tests) { final ResultSet columns = metadata.getColumns(test[0], test[1], test[2], test[3]); Assertions.assertFalse(columns.next()); } } /** * Tests that convertPatternToRegex works as expected. */ @Test @DisplayName("Tests that convertPatternToRegex works as expected.") void testConvertPattern() { // Test input, expected result final String[][] tests = new String [][]{ {null, ""}, {"", ""}, {" ", ""}, {"_", "."}, {"_b", ".\\Qb\\E"}, {"a_b", "\\Qa\\E.\\Qb\\E"}, {"a_", "\\Qa\\E."}, {"%", ".*"}, {"a%b", "\\Qa\\E.*\\Qb\\E"}, {"%b", ".*\\Qb\\E"}, {"a%", "\\Qa\\E.*"}, {"\\_", "[_]"}, {"\\%", "[%]"}, {"\\\\", "[\\]"}, {"\\_\\%\\\\", "[_][%][\\]"}, {"\\_\\%", "[_][%]"}, {"a\\_b\\%c", "\\Qa\\E[_]\\Qb\\E[%]\\Qc\\E"}, {"a_b%c", "\\Qa\\E.\\Qb\\E.*\\Qc\\E"}, {"\\_\\\\%", "[_][\\].*"}, {"\\", "\\Q\\\\E"}, // mis-balanced escape {"\\\\\\\\\\", "[\\][\\]\\Q\\\\E"}, // mis-balanced escape {"\\_\\%\\", "[_][%]\\Q\\\\E"}, // mis-balanced escape }; for (String[] test : tests) { Assertions.assertEquals(test[1], DocumentDbDatabaseMetaData.convertPatternToRegex(test[0])); } } /** * Tests columns of getColumnPrivileges. */ @Test @DisplayName("Tests the correct columns of getColumnPrivileges.") void testGetColumnPrivileges() throws SQLException { // Catalog pattern, schema pattern, table pattern, column pattern final String[][] tests = new String [][]{ {null, null, null, null}, {" ", " ", " ", " "}, {null, "%", null, null}, {null, null, null, "%__id"}, {null, null, COLLECTION_BASIC, "%__id"}, {null, DATABASE, COLLECTION_BASIC, "%__id"} }; for (String[] test : tests) { final ResultSet columnPrivileges = metadata.getColumnPrivileges(test[0], test[1], test[2], test[3]); final ResultSetMetaData columnPrivilegesMetadata = columnPrivileges.getMetaData(); Assertions.assertEquals("TABLE_CAT", columnPrivilegesMetadata.getColumnName(1)); Assertions.assertEquals("TABLE_SCHEM", columnPrivilegesMetadata.getColumnName(2)); Assertions.assertEquals("TABLE_NAME", columnPrivilegesMetadata.getColumnName(3)); Assertions.assertEquals("COLUMN_NAME", columnPrivilegesMetadata.getColumnName(4)); Assertions.assertEquals("GRANTOR", columnPrivilegesMetadata.getColumnName(5)); Assertions.assertEquals("GRANTEE", columnPrivilegesMetadata.getColumnName(6)); Assertions.assertEquals("PRIVILEGE", columnPrivilegesMetadata.getColumnName(7)); Assertions.assertEquals("IS_GRANTABLE", columnPrivilegesMetadata.getColumnName(8)); } } @Test @DisplayName("Tests single table type TABLE is returned from getTableTypes.") void testGetTableTypes() throws SQLException { final ResultSet tableTypes = metadata.getTableTypes(); final ResultSetMetaData tableTypesMetadata = tableTypes.getMetaData(); Assertions.assertEquals(1, tableTypesMetadata.getColumnCount()); Assertions.assertEquals("TABLE_TYPE", tableTypesMetadata.getColumnName(1)); Assertions.assertEquals("TABLE_TYPE", tableTypesMetadata.getColumnLabel(1)); Assertions.assertEquals(DATABASE, tableTypesMetadata.getSchemaName(1)); Assertions.assertEquals(0,tableTypesMetadata.isNullable(1)); Assertions.assertEquals(0,tableTypesMetadata.getPrecision(1)); Assertions.assertEquals(0,tableTypesMetadata.getScale(1)); Assertions.assertEquals(64,tableTypesMetadata.getColumnDisplaySize(1)); Assertions.assertEquals(Types.VARCHAR, tableTypesMetadata.getColumnType(1)); Assertions.assertEquals(JdbcType.VARCHAR.name(), tableTypesMetadata.getColumnTypeName(1)); Assertions.assertTrue(tableTypes.next()); Assertions.assertEquals("TABLE", tableTypes.getString(1)); Assertions.assertFalse(tableTypes.next()); } /** * Tests columns of getPrimaryKeys(). */ @Test @DisplayName("Tests the correct columns of getPrimaryKeys.") void testGetPrimaryKeys() throws SQLException { final ResultSet primaryKeys = metadata.getPrimaryKeys(null, null, COLLECTION_BASIC); final ResultSetMetaData primaryKeysMetadata = primaryKeys.getMetaData(); Assertions.assertEquals("TABLE_CAT", primaryKeysMetadata.getColumnName(1)); Assertions.assertEquals("TABLE_SCHEM", primaryKeysMetadata.getColumnName(2)); Assertions.assertEquals("TABLE_NAME", primaryKeysMetadata.getColumnName(3)); Assertions.assertEquals("COLUMN_NAME", primaryKeysMetadata.getColumnName(4)); Assertions.assertEquals("KEY_SEQ", primaryKeysMetadata.getColumnName(5)); Assertions.assertEquals("PK_NAME", primaryKeysMetadata.getColumnName(6)); } /** * Tests columns of getImportedKeys() */ @Test @DisplayName("Tests the correct columns of foreign keys.") void testGetImportedKeys() throws SQLException { final ResultSet importedKeys = metadata.getImportedKeys(null, null, COLLECTION_BASIC); final ResultSetMetaData foreignKeysMetadata = importedKeys.getMetaData(); Assertions.assertEquals("PKTABLE_CAT", foreignKeysMetadata.getColumnName(1)); Assertions.assertEquals("PKTABLE_SCHEM", foreignKeysMetadata.getColumnName(2)); Assertions.assertEquals("PKTABLE_NAME", foreignKeysMetadata.getColumnName(3)); Assertions.assertEquals("PKCOLUMN_NAME", foreignKeysMetadata.getColumnName(4)); Assertions.assertEquals("FKTABLE_CAT", foreignKeysMetadata.getColumnName(5)); Assertions.assertEquals("FKTABLE_SCHEM", foreignKeysMetadata.getColumnName(6)); Assertions.assertEquals("FKTABLE_NAME", foreignKeysMetadata.getColumnName(7)); Assertions.assertEquals("FKCOLUMN_NAME", foreignKeysMetadata.getColumnName(8)); Assertions.assertEquals("KEY_SEQ", foreignKeysMetadata.getColumnName(9)); Assertions.assertEquals("UPDATE_RULE", foreignKeysMetadata.getColumnName(10)); Assertions.assertEquals("DELETE_RULE", foreignKeysMetadata.getColumnName(11)); Assertions.assertEquals("FK_NAME", foreignKeysMetadata.getColumnName(12)); Assertions.assertEquals("PK_NAME", foreignKeysMetadata.getColumnName(13)); Assertions.assertEquals("DEFERRABILITY", foreignKeysMetadata.getColumnName(14)); } /** * Tests columns of getAttributes(). */ @Test @DisplayName("Tests the correct columns of getAttributes.") void testGetAttributes() throws SQLException { final ResultSet attributes = metadata.getAttributes(null, null, null, null); final ResultSetMetaData attributesMetadata = attributes.getMetaData(); Assertions.assertEquals("TYPE_CAT", attributesMetadata.getColumnName(1)); Assertions.assertEquals("TYPE_SCHEM", attributesMetadata.getColumnName(2)); Assertions.assertEquals("TYPE_NAME", attributesMetadata.getColumnName(3)); Assertions.assertEquals("ATTR_NAME", attributesMetadata.getColumnName(4)); Assertions.assertEquals("DATA_TYPE", attributesMetadata.getColumnName(5)); Assertions.assertEquals("ATTR_TYPE_NAME", attributesMetadata.getColumnName(6)); Assertions.assertEquals("ATTR_SIZE", attributesMetadata.getColumnName(7)); Assertions.assertEquals("DECIMAL_DIGITS", attributesMetadata.getColumnName(8)); Assertions.assertEquals("NUM_PREC_RADIX", attributesMetadata.getColumnName(9)); Assertions.assertEquals("NULLABLE", attributesMetadata.getColumnName(10)); Assertions.assertEquals("REMARKS", attributesMetadata.getColumnName(11)); Assertions.assertEquals("ATTR_DEF", attributesMetadata.getColumnName(12)); Assertions.assertEquals("SQL_DATA_TYPE", attributesMetadata.getColumnName(13)); Assertions.assertEquals("SQL_DATETIME_SUB", attributesMetadata.getColumnName(14)); Assertions.assertEquals("CHAR_OCTET_LENGTH", attributesMetadata.getColumnName(15)); Assertions.assertEquals("ORDINAL_POSITION", attributesMetadata.getColumnName(16)); Assertions.assertEquals("IS_NULLABLE", attributesMetadata.getColumnName(17)); Assertions.assertEquals("SCOPE_CATALOG", attributesMetadata.getColumnName(18)); Assertions.assertEquals("SCOPE_SCHEMA", attributesMetadata.getColumnName(19)); Assertions.assertEquals("SCOPE_TABLE", attributesMetadata.getColumnName(20)); Assertions.assertEquals("SOURCE_DATA_TYPE", attributesMetadata.getColumnName(21)); } @Test @DisplayName("Tests basic primary key metadata.") void testGetPrimaryKeyBasic() throws SQLException { final ResultSet primaryKeys = metadata.getPrimaryKeys(null, DATABASE, COLLECTION_BASIC); Assertions.assertNotNull(primaryKeys); Assertions.assertTrue(primaryKeys.next()); Assertions.assertNull(primaryKeys.getString(1)); Assertions.assertEquals(DATABASE, primaryKeys.getString(2)); Assertions.assertEquals(COLLECTION_BASIC, primaryKeys.getString(3)); Assertions.assertEquals(COLLECTION_BASIC + "__id", primaryKeys.getString(4)); Assertions.assertEquals(1, primaryKeys.getShort(5)); Assertions.assertNull(primaryKeys.getString(6)); Assertions.assertFalse(primaryKeys.next()); } @Test @DisplayName("Tests primary keys of sub-document virtual table.") void testGetPrimaryKeySubdocument() throws SQLException { final ResultSet primaryKeys = metadata.getPrimaryKeys(null, DATABASE, COLLECTION_SUB + "_doc"); Assertions.assertNotNull(primaryKeys); Assertions.assertTrue(primaryKeys.next()); Assertions.assertNull(primaryKeys.getString(1)); Assertions.assertEquals(DATABASE, primaryKeys.getString(2)); Assertions.assertEquals(COLLECTION_SUB + "_doc", primaryKeys.getString(3)); Assertions.assertEquals(COLLECTION_SUB + "__id", primaryKeys.getString(4)); Assertions.assertEquals(1, primaryKeys.getShort(5)); Assertions.assertNull(primaryKeys.getString(6)); Assertions.assertFalse(primaryKeys.next()); } @Test @DisplayName("Tests that filtering by schema and table works on getPrimaryKeys.") void testGetPrimaryKeysFilters() throws SQLException { final ResultSet emptyResultSetSchema = metadata.getPrimaryKeys(null, "invalidDb", null); Assertions.assertFalse(emptyResultSetSchema.next()); final ResultSet emptyResultSetTable = metadata.getPrimaryKeys(null, null, "invalidCollection"); Assertions.assertFalse(emptyResultSetTable.next()); final ResultSet noFilterPrimaryKeys = metadata.getPrimaryKeys(null, null, null); Assertions.assertTrue(noFilterPrimaryKeys.next()); } @Test @DisplayName("Tests that filtering by schema and table works on getImportedKeys.") void testGetImportedKeysFilters() throws SQLException { final String[][] tests = new String [][]{ {null, "invalidDb", null}, {null, null, "invalidCollection"}, {null, DATABASE, "invalidCollection"}, {"invalidCatalog", null, COLLECTION_SUB + "_doc"}, {null, null, "invalidCollection"}, {"invalidCatalog", "invalidDb", COLLECTION_SUB + "_doc"}, }; for (String[] test : tests) { final ResultSet emptyResultSetSchema = metadata.getImportedKeys(test[0], test[1], test[2]); Assertions.assertFalse(emptyResultSetSchema.next()); } final ResultSet noFilterImportedKeys = metadata.getImportedKeys(null, null, null); Assertions.assertTrue(noFilterImportedKeys.next()); } @Test @DisplayName("Tests primary keys of array virtual tables.") void testGetPrimaryKeysArray() throws SQLException { final ResultSet arrayPrimaryKeys = metadata.getPrimaryKeys(null, null, COLLECTION_ARRAY + "_array"); Assertions.assertTrue(arrayPrimaryKeys.next()); Assertions.assertNull(arrayPrimaryKeys.getString(1)); Assertions.assertEquals(DATABASE, arrayPrimaryKeys.getString(2)); Assertions.assertEquals(COLLECTION_ARRAY + "_array", arrayPrimaryKeys.getString(3)); Assertions.assertEquals(COLLECTION_ARRAY + "__id", arrayPrimaryKeys.getString(4)); Assertions.assertEquals(1, arrayPrimaryKeys.getShort(5)); Assertions.assertNull(arrayPrimaryKeys.getString(6)); Assertions.assertTrue(arrayPrimaryKeys.next()); Assertions.assertNull(arrayPrimaryKeys.getString(1)); Assertions.assertEquals(DATABASE, arrayPrimaryKeys.getString(2)); Assertions.assertEquals(COLLECTION_ARRAY + "_array", arrayPrimaryKeys.getString(3)); Assertions.assertEquals("array_index_lvl_0", arrayPrimaryKeys.getString(4)); Assertions.assertEquals(2, arrayPrimaryKeys.getShort(5)); // Indicates second column of PK Assertions.assertNull(arrayPrimaryKeys.getString(6)); Assertions.assertFalse(arrayPrimaryKeys.next()); } @Test @DisplayName("Tests primary keys of array virtual tables with empty string parameters. An empty ResultSet should be returned.") void testGetPrimaryKeysArrayWithWhiteSpace() throws SQLException { final ResultSet arrayPrimaryKeys = metadata.getPrimaryKeys("", "", COLLECTION_ARRAY + "_array"); Assertions.assertFalse(arrayPrimaryKeys.next()); } @Test @DisplayName("Tests foreign keys of sub-document virtual tables.") void testGetImportedKeysDocument() throws SQLException { final ResultSet subdocImportedKeys = metadata.getImportedKeys(null, null, COLLECTION_SUB + "_doc"); Assertions.assertTrue(subdocImportedKeys.next()); Assertions.assertNull(subdocImportedKeys.getString(1)); Assertions.assertEquals(DATABASE, subdocImportedKeys.getString(2)); Assertions.assertEquals(COLLECTION_SUB, subdocImportedKeys.getString(3)); Assertions.assertEquals(COLLECTION_SUB + "__id", subdocImportedKeys.getString(4)); Assertions.assertNull(subdocImportedKeys.getString(5)); Assertions.assertEquals(DATABASE, subdocImportedKeys.getString(6)); Assertions.assertEquals(COLLECTION_SUB + "_doc", subdocImportedKeys.getString(7)); Assertions.assertEquals(COLLECTION_SUB + "__id", subdocImportedKeys.getString(8)); Assertions.assertEquals(1, subdocImportedKeys.getShort(9)); Assertions.assertFalse(subdocImportedKeys.next()); } @Test @DisplayName("Tests foreign keys of array virtual tables.") void testGetImportedKeysArray() throws SQLException { final ResultSet arrayImportedKeys = metadata.getImportedKeys(null, null, COLLECTION_ARRAY + "_array"); Assertions.assertTrue(arrayImportedKeys.next()); Assertions.assertNull(arrayImportedKeys.getString(1)); Assertions.assertEquals(DATABASE, arrayImportedKeys.getString(2)); Assertions.assertEquals(COLLECTION_ARRAY, arrayImportedKeys.getString(3)); Assertions.assertEquals(COLLECTION_ARRAY + "__id", arrayImportedKeys.getString(4)); Assertions.assertNull(arrayImportedKeys.getString(5)); Assertions.assertEquals(DATABASE, arrayImportedKeys.getString(6)); Assertions.assertEquals(COLLECTION_ARRAY + "_array", arrayImportedKeys.getString(7)); Assertions.assertEquals(COLLECTION_ARRAY + "__id", arrayImportedKeys.getString(8)); Assertions.assertEquals(1, arrayImportedKeys.getShort(9)); Assertions.assertFalse(arrayImportedKeys.next()); } @Test @DisplayName("Tests foreign keys of array virtual tables with whitespace parameters. An empty ResultSet should be returned") void testGetImportedKeysArrayWithWhiteSpaces() throws SQLException { final ResultSet arrayImportedKeys = metadata.getImportedKeys("", "", COLLECTION_ARRAY + "_array"); Assertions.assertFalse(arrayImportedKeys.next()); } @Test @DisplayName("Tests getting type information.") void testGetTypeInfo() throws SQLException { final List<Integer> expectedDataTypes = Arrays.asList( Types.BINARY, Types.BIGINT, Types.BOOLEAN, Types.CHAR, Types.DATE, Types.DECIMAL, Types.DOUBLE, Types.FLOAT, Types.INTEGER, Types.NCHAR, Types.NULL, Types.NVARCHAR, Types.REAL, Types.SMALLINT, Types.TIME, Types.TIMESTAMP, Types.TINYINT, Types.VARBINARY, Types.VARCHAR ).stream().sorted().collect(Collectors.toList()); final List<Integer> observedDataTypes = new ArrayList<>(); final ResultSet resultSet = metadata.getTypeInfo(); while (resultSet.next()) { final int jdbcDataType = resultSet.getInt(2); observedDataTypes.add(jdbcDataType); if (expectedDataTypes.contains(jdbcDataType)) { Assertions.assertEquals(JdbcType.fromType(jdbcDataType).name(), resultSet.getString(1)); } else { Assertions.fail(String.format( "Unexpected type returned %d-'%s'", jdbcDataType, JdbcType.fromType(jdbcDataType).name())); } } final List<Integer> sortedDataTypes = observedDataTypes.stream() .sorted() .collect(Collectors.toList()); Assertions.assertArrayEquals(expectedDataTypes.toArray(), sortedDataTypes.toArray()); } }
4,498
0
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb
Create_ds/amazon-documentdb-jdbc-driver/src/test/java/software/amazon/documentdb/jdbc/DocumentDbStatementDateTimeTest.java
/* * Copyright <2021> Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package software.amazon.documentdb.jdbc; import org.bson.BsonDateTime; import org.bson.BsonDocument; import org.bson.BsonNull; import org.bson.BsonTimestamp; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import software.amazon.documentdb.jdbc.common.test.DocumentDbTestEnvironment; import java.sql.Connection; import java.sql.Date; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.time.DayOfWeek; import java.time.Instant; import java.time.Month; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.format.TextStyle; import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.function.Function; public class DocumentDbStatementDateTimeTest extends DocumentDbStatementTest { /** * Tests TIMESTAMPADD() and TIMESTAMPDIFF() for intervals that can be converted to ms. * @throws SQLException occurs if query fails. */ @DisplayName("Tests TIMESTAMPADD() and TIMESTAMPDIFF() with different intervals.") @ParameterizedTest(name = "testQueryTimestampAddDiff - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryTimestampAddDiff(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testTimestampAddDiff"; final long dateTime = Instant.parse("2020-02-22T00:00:00.00Z").toEpochMilli(); final long weekAfterDateTime = Instant.parse("2020-02-29T00:00:00.00Z").toEpochMilli(); final long dayAfterDateTime = Instant.parse("2020-02-23T00:00:00.00Z").toEpochMilli(); final long hourAfterDateTime = Instant.parse("2020-02-22T01:00:00.00Z").toEpochMilli(); final long minuteAfterDateTime = Instant.parse("2020-02-22T00:01:00.00Z").toEpochMilli(); final long secondAfterDateTime = Instant.parse("2020-02-22T00:00:01.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); doc1.append("fieldWeekAfter", new BsonDateTime(weekAfterDateTime)); doc1.append("fieldDayAfter", new BsonDateTime(dayAfterDateTime)); doc1.append("fieldHourAfter", new BsonDateTime(hourAfterDateTime)); doc1.append("fieldMinuteAfter", new BsonDateTime(minuteAfterDateTime)); doc1.append("fieldSecondAfter", new BsonDateTime(secondAfterDateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Add 1 day to a date column. final ResultSet resultSet = statement.executeQuery( String.format("SELECT TIMESTAMPADD(WEEK" + ", 1, \"field\") FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(new Timestamp(weekAfterDateTime), resultSet.getTimestamp(1)); Assertions.assertFalse(resultSet.next()); // Add 1 day to a date column. final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT TIMESTAMPADD(DAY" + ", 1, \"field\") FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(new Timestamp(dayAfterDateTime), resultSet1.getTimestamp(1)); Assertions.assertFalse(resultSet1.next()); // Add 1 hour to a date column. final ResultSet resultSet2 = statement.executeQuery( String.format("SELECT TIMESTAMPADD(HOUR" + ", 1, \"field\") FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals(new Timestamp(hourAfterDateTime), resultSet2.getTimestamp(1)); Assertions.assertFalse(resultSet2.next()); // Add 1 minute to a date column. final ResultSet resultSet3 = statement.executeQuery( String.format("SELECT TIMESTAMPADD(MINUTE" + ", 1, \"field\") FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals(new Timestamp(minuteAfterDateTime), resultSet3.getTimestamp(1)); Assertions.assertFalse(resultSet3.next()); // Add 1 second to a date column. final ResultSet resultSet4 = statement.executeQuery( String.format("SELECT TIMESTAMPADD(SECOND" + ", 1, \"field\") FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet4); Assertions.assertTrue(resultSet4.next()); Assertions.assertEquals(new Timestamp(secondAfterDateTime), resultSet4.getTimestamp(1)); Assertions.assertFalse(resultSet4.next()); // Add 1 day to a date literal. final ResultSet resultSet5 = statement.executeQuery( String.format("SELECT TIMESTAMPADD(DAY" + ", 1, TIMESTAMP '2020-02-22 00:00:00' ) FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet5); Assertions.assertTrue(resultSet5.next()); Assertions.assertEquals(new Timestamp(dayAfterDateTime), resultSet5.getTimestamp(1)); Assertions.assertFalse(resultSet5.next()); // Add 1 day to the date and extract the day of the month from result. final ResultSet resultSet6 = statement.executeQuery( String.format("SELECT DAYOFMONTH(TIMESTAMPADD(DAY" + ", 1, \"field\")) FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet6); Assertions.assertTrue(resultSet6.next()); Assertions.assertEquals(23, resultSet6.getInt(1)); Assertions.assertFalse(resultSet6.next()); // Difference of DAY final ResultSet resultSet7 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(DAY, \"field\", \"fieldDayAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet7); Assertions.assertTrue(resultSet7.next()); Assertions.assertEquals(1, resultSet7.getLong(1)); Assertions.assertFalse(resultSet7.next()); // Difference of WEEK final ResultSet resultSet8 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(WEEK, \"field\", \"fieldWeekAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet8); Assertions.assertTrue(resultSet8.next()); Assertions.assertEquals(1, resultSet8.getLong(1)); Assertions.assertFalse(resultSet8.next()); // Difference of HOUR final ResultSet resultSet9 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(HOUR, \"field\", \"fieldHourAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet9); Assertions.assertTrue(resultSet9.next()); Assertions.assertEquals(1, resultSet9.getLong(1)); Assertions.assertFalse(resultSet9.next()); // Difference of MINUTE final ResultSet resultSet10 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(MINUTE, \"field\", \"fieldMinuteAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet10); Assertions.assertTrue(resultSet10.next()); Assertions.assertEquals(1, resultSet10.getLong(1)); Assertions.assertFalse(resultSet10.next()); // Difference of SECOND final ResultSet resultSet11 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(SECOND, \"field\", \"fieldSecondAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet11); Assertions.assertTrue(resultSet11.next()); Assertions.assertEquals(1, resultSet11.getLong(1)); Assertions.assertFalse(resultSet11.next()); // Difference of MINUTE in SECOND final ResultSet resultSet12 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(SECOND, \"field\", \"fieldMinuteAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet12); Assertions.assertTrue(resultSet12.next()); Assertions.assertEquals(60, resultSet12.getLong(1)); Assertions.assertFalse(resultSet12.next()); // Difference of HOUR in SECOND final ResultSet resultSet13 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(SECOND, \"field\", \"fieldHourAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet13); Assertions.assertTrue(resultSet13.next()); Assertions.assertEquals(3600, resultSet13.getLong(1)); Assertions.assertFalse(resultSet13.next()); // Difference of DAY in SECOND final ResultSet resultSet14 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(SECOND, \"field\", \"fieldDayAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet14); Assertions.assertTrue(resultSet14.next()); Assertions.assertEquals(86400, resultSet14.getLong(1)); Assertions.assertFalse(resultSet14.next()); // Difference of WEEK in SECOND final ResultSet resultSet15 = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(SECOND, \"field\", \"fieldWeekAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet15); Assertions.assertTrue(resultSet15.next()); Assertions.assertEquals(604800, resultSet15.getLong(1)); Assertions.assertFalse(resultSet15.next()); // Difference of SECOND in MICROSECOND final ResultSet resultSet16 = statement.executeQuery( String.format( "SELECT TIMESTAMPDIFF(MICROSECOND, \"field\", \"fieldSecondAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet16); Assertions.assertTrue(resultSet16.next()); Assertions.assertEquals(1000000, resultSet16.getLong(1)); Assertions.assertFalse(resultSet16.next()); } } /** * Tests TIMESTAMPDIFF() for YEAR. * @throws SQLException occurs if query fails. */ @DisplayName("Tests TIMESTAMPDIFF() for YEAR.") @ParameterizedTest(name = "testQueryTimestampDiffYear - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryTimestampDiffYear(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testTimestampDiffYear"; final long dateTime = Instant.parse("2020-02-22T00:00:00.00Z").toEpochMilli(); final long yearAfterDateTime = Instant.parse("2021-02-22T00:00:00.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); doc1.append("fieldYearAfter", new BsonDateTime(yearAfterDateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Difference of 12 months in YEAR final ResultSet resultSet = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(YEAR, \"field\", \"fieldYearAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(1, resultSet.getLong(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests TIMESTAMPADD() for QUARTER. * @throws SQLException occurs if query fails. */ @DisplayName("Tests TIMESTAMPDIFF() for QUARTER.") @ParameterizedTest(name = "testQueryTimestampDiffQuarter - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryTimestampDiffQuarter(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testTimestampDiffQuarter"; final long dateTime = Instant.parse("2020-02-22T00:00:00.00Z").toEpochMilli(); final long yearAfterDateTime = Instant.parse("2021-02-22T00:00:00.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); doc1.append("fieldYearAfter", new BsonDateTime(yearAfterDateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Difference of 12 months in QUARTER final ResultSet resultSet = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(QUARTER, \"field\", \"fieldYearAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(4, resultSet.getLong(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests TIMESTAMPADD() for MONTH. * @throws SQLException occurs if query fails. */ @DisplayName("Tests TIMESTAMPDIFF() for MONTH.") @ParameterizedTest(name = "testQueryTimestampDiffMonth - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryTimestampDiffMonth(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testTimestampDiffMonth"; final long dateTime = Instant.parse("2020-02-22T00:00:00.00Z").toEpochMilli(); final long yearAfterDateTime = Instant.parse("2021-02-22T00:00:00.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); doc1.append("fieldYearAfter", new BsonDateTime(yearAfterDateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Difference of 12 months in MONTH final ResultSet resultSet = statement.executeQuery( String.format("SELECT TIMESTAMPDIFF(MONTH, \"field\", \"fieldYearAfter\")" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals(12, resultSet.getLong(1)); Assertions.assertFalse(resultSet.next()); } } /** * Tests that EXTRACT works for different time units. * @throws SQLException occurs if query fails. */ @DisplayName("Tests EXTRACT() for different time units.") @ParameterizedTest(name = "testQueryExtract - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQueryExtract(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testExtract"; final long dateTime = Instant.parse("2020-02-03T04:05:06.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("field", new BsonDateTime(dateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Get date parts and use group by to remove any duplicate rows. final Locale originalLocale = Locale.getDefault(); try { Locale.setDefault(Locale.SIMPLIFIED_CHINESE); // Get date parts. final ResultSet resultSet = statement.executeQuery(String.format( "SELECT %n" + "YEAR(\"field\"),%n" + "MONTH(\"field\"),%n" + "WEEK(\"field\"),%n" + "DAYOFMONTH(\"field\"),%n" + "DAYOFWEEK(\"field\"),%n" + "DAYOFYEAR(\"field\"),%n" + "HOUR(\"field\"),%n" + "MINUTE(\"field\"),%n" + "SECOND(\"field\"),%n" + "QUARTER(\"field\"),%n" + "DAYNAME(\"field\"),%n" + "MONTHNAME(\"field\")%n" + "FROM \"%s\".\"%s\" %n" + "GROUP BY %n" + "YEAR(\"field\"),%n" + "MONTH(\"field\"),%n" + "WEEK(\"field\"),%n" + "DAYOFMONTH(\"field\"),%n" + "DAYOFWEEK(\"field\"),%n" + "DAYOFYEAR(\"field\"),%n" + "HOUR(\"field\"),%n" + "MINUTE(\"field\"),%n" + "SECOND(\"field\"),%n" + "QUARTER(\"field\"),%n" + "DAYNAME(\"field\"),%n" + "MONTHNAME(\"field\")", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); // Year is 2020. Assertions.assertEquals(2020, resultSet.getInt(1)); // Month is 2 (Feb). Assertions.assertEquals(2, resultSet.getInt(2)); // Week in year is 5. Assertions.assertEquals(5, resultSet.getInt(3)); // Day of month is 3. Assertions.assertEquals(3, resultSet.getInt(4)); // Day of week is 2 (Monday). Assertions.assertEquals(2, resultSet.getInt(5)); // Day of year is 34. Assertions.assertEquals(34, resultSet.getInt(6)); // Hour is 4. Assertions.assertEquals(4, resultSet.getInt(7)); // Minute is 5. Assertions.assertEquals(5, resultSet.getInt(8)); // Seconds is 6. Assertions.assertEquals(6, resultSet.getInt(9)); // Quarter is 1. Assertions.assertEquals(1, resultSet.getInt(10)); // Day name is Monday Assertions.assertEquals("星期一", resultSet.getString(11)); // Month name is February Assertions.assertEquals("二月", resultSet.getString(12)); Assertions.assertFalse(resultSet.next()); // Use extract in CASE. final ResultSet resultSet2 = statement.executeQuery( String.format("SELECT " + "CASE WHEN DAYOFMONTH(\"field\") < 5 " + "THEN 'A' " + "ELSE 'B' " + "END " + "FROM \"%s\".\"%s\" ", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("A", resultSet2.getString(1)); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals("A", resultSet2.getString(1)); Assertions.assertFalse(resultSet2.next()); } finally { Locale.setDefault(originalLocale); } } } @DisplayName("Tests DAYNAME") @ParameterizedTest(name = "testDayName - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testDayName(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testDayName"; final List<BsonDocument> docs = new ArrayList<>(); final Instant startingDateTime = Instant.parse("2020-02-02T04:05:06.00Z"); for (int i = 0; i < 8; i++) { docs.add(new BsonDocument( "field", new BsonDateTime(startingDateTime.plus(i, ChronoUnit.DAYS).toEpochMilli()))); } insertBsonDocuments(tableName, docs.toArray(new BsonDocument[0])); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final Locale originalLocale = Locale.getDefault(); try { Locale.setDefault(Locale.SIMPLIFIED_CHINESE); // Get date parts. final ResultSet resultSet = statement.executeQuery( String.format("SELECT " + "DAYNAME(\"field\"), \"field\"" + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( new Timestamp(startingDateTime.plus(0, ChronoUnit.DAYS).toEpochMilli()), resultSet.getTimestamp(2)); Assertions.assertEquals( DayOfWeek.SUNDAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( DayOfWeek.MONDAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( DayOfWeek.TUESDAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( DayOfWeek.WEDNESDAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( DayOfWeek.THURSDAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( DayOfWeek.FRIDAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( DayOfWeek.SATURDAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( DayOfWeek.SUNDAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } finally { Locale.setDefault(originalLocale); } } } @DisplayName("Tests MONTHNAME") @ParameterizedTest(name = "testMonthName - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testMonthName(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testMonthName"; final List<BsonDocument> docs = new ArrayList<>(); final OffsetDateTime startingDateTime = Instant.parse("2020-01-02T04:05:06.00Z").atOffset(ZoneOffset.UTC); for (int i = 0; i < 13; i++) { docs.add(new BsonDocument( "field", new BsonDateTime(startingDateTime.plusMonths(i).toInstant().toEpochMilli()))); } docs.add(new BsonDocument( "field", new BsonNull())); insertBsonDocuments(tableName, docs.toArray(new BsonDocument[0])); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final Locale originalLocale = Locale.getDefault(); try { Locale.setDefault(Locale.SIMPLIFIED_CHINESE); // Get date parts. final ResultSet resultSet = statement.executeQuery( String.format("SELECT " + "MONTHNAME(\"field\"), \"field\"" + "FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( new Timestamp(startingDateTime.toInstant().toEpochMilli()), resultSet.getTimestamp(2)); Assertions.assertEquals( Month.JANUARY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.FEBRUARY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.MARCH.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.APRIL.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.MAY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.JUNE.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.JULY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.AUGUST.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.SEPTEMBER.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.OCTOBER.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.NOVEMBER.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.DECEMBER.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( Month.JANUARY.getDisplayName(TextStyle.FULL, Locale.getDefault()), resultSet.getString(1)); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); } finally { Locale.setDefault(originalLocale); } } } @DisplayName("Tests MONTHNAME for NULL") @ParameterizedTest(name = "testMonthNameForNull - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testMonthNameForNull(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testMonthNameForNull"; final List<BsonDocument> docs = new ArrayList<>(); docs.add(new BsonDocument( "field", new BsonNull())); insertBsonDocuments(tableName, docs.toArray(new BsonDocument[0])); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Get month name. final ResultSet resultSet = statement.executeQuery( String.format("SELECT %n" + " MONTHNAME(CAST(NULL AS TIMESTAMP))%n" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); // Get date parts. final ResultSet resultSet2 = statement.executeQuery( String.format("SELECT %n" + " MONTHNAME(NULL)%n" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertNull(resultSet2.getString(1)); Assertions.assertFalse(resultSet2.next()); final ResultSet resultSet3 = statement.executeQuery( String.format("SELECT %n" + " MONTHNAME(CAST(\"field\" AS TIMESTAMP))%n" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertNull(resultSet3.getTimestamp(1)); Assertions.assertFalse(resultSet3.next()); } } @DisplayName("Tests DAYNAME for NULL") @ParameterizedTest(name = "testDayNameForNull - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testDayNameForNull(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testDayNameForNull"; final List<BsonDocument> docs = new ArrayList<>(); docs.add(new BsonDocument( "field", new BsonNull())); insertBsonDocuments(tableName, docs.toArray(new BsonDocument[0])); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Get month name. final ResultSet resultSet = statement.executeQuery( String.format("SELECT %n" + " DAYNAME(CAST(NULL AS TIMESTAMP))%n" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertNull(resultSet.getString(1)); Assertions.assertFalse(resultSet.next()); // Get date parts. final ResultSet resultSet2 = statement.executeQuery( String.format("SELECT %n" + " DAYNAME(NULL)%n" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertNull(resultSet2.getString(1)); Assertions.assertFalse(resultSet2.next()); final ResultSet resultSet3 = statement.executeQuery( String.format("SELECT %n" + " DAYNAME(CAST(\"field\" AS TIMESTAMP))%n" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertNull(resultSet3.getTimestamp(1)); Assertions.assertFalse(resultSet3.next()); } } /** * Tests CURRENT_TIMESTAMP. * @throws SQLException occurs if query fails. */ @DisplayName("Tests CURRENT_TIMESTAMP.") @ParameterizedTest(name = "testCurrentTimestamp - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testCurrentTimestamp(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCurrentTimestamp"; final long dateTime = Instant.parse("2020-02-03T04:05:06.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Get current date. final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT CURRENT_TIMESTAMP" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); final Function<ResultSet, SQLException> validateResultSet = (testResultSet) -> { try { Assertions.assertNotNull(testResultSet); Assertions.assertTrue(testResultSet.next()); Assertions.assertEquals(Types.TIMESTAMP, testResultSet.getMetaData().getColumnType(1)); final Timestamp timestamp = testResultSet.getTimestamp(1); Assertions.assertNotNull(timestamp); final OffsetDateTime actualDateTime = timestamp.toInstant() .atOffset(ZoneOffset.UTC); final OffsetDateTime currentDateTime = Instant.now().atOffset(ZoneOffset.UTC); final long diffInMilliSeconds = actualDateTime .until(currentDateTime, ChronoUnit.MILLIS); Assertions.assertTrue(diffInMilliSeconds >= 0); Assertions.assertTrue(diffInMilliSeconds < 1000); Assertions.assertFalse(testResultSet.next()); return null; } catch (SQLException e) { return e; } }; SQLException e = validateResultSet.apply(resultSet1); if (e != null) { throw e; } // Get current date as alias column name. final ResultSet resultSet2 = statement.executeQuery( String.format("SELECT CURRENT_TIMESTAMP AS \"cts\"" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); e = validateResultSet.apply(resultSet2); if (e != null) { throw e; } // Where clause use. final ResultSet resultSet3 = statement.executeQuery( String.format("SELECT \"field\" " + " FROM \"%s\".\"%s\"" + " WHERE \"field\" < CURRENT_TIMESTAMP", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals(dateTime, resultSet3.getTimestamp(1).getTime()); Assertions.assertFalse(resultSet3.next()); final ResultSet resultSet4 = statement.executeQuery( String.format("SELECT \"field\" " + " FROM \"%s\".\"%s\"" + " WHERE \"field\" > CURRENT_TIMESTAMP", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet4); Assertions.assertFalse(resultSet4.next()); } } /** * Tests CURRENT_DATE. * @throws SQLException occurs if query fails. */ @DisplayName("Tests CURRENT_DATE.") @ParameterizedTest(name = "testCurrentDate - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testCurrentDate(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCurrentDate"; final long dateTime = Instant.parse("2020-02-03T04:05:06.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Get current date. final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT CURRENT_DATE" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); final Function<ResultSet, SQLException> validateResultSet = (testResultSet) -> { try { Assertions.assertNotNull(testResultSet); Assertions.assertTrue(testResultSet.next()); Assertions.assertEquals(Types.DATE, testResultSet.getMetaData().getColumnType(1)); final Date actualDate = testResultSet.getDate(1); Assertions.assertNotNull(actualDate); final Date currentDate = new Date(Instant.now().toEpochMilli()); Assertions.assertEquals(currentDate.toString(), actualDate.toString()); Assertions.assertFalse(testResultSet.next()); return null; } catch (SQLException e) { return e; } }; SQLException e = validateResultSet.apply(resultSet1); if (e != null) { throw e; } // Get current date as alias column name. final ResultSet resultSet2 = statement.executeQuery( String.format("SELECT CURRENT_DATE AS \"cts\"" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); e = validateResultSet.apply(resultSet2); if (e != null) { throw e; } // Where clause use. final ResultSet resultSet3 = statement.executeQuery( String.format("SELECT \"field\" " + " FROM \"%s\".\"%s\"" + " WHERE \"field\" < CURRENT_DATE", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals(dateTime, resultSet3.getDate(1).getTime()); Assertions.assertFalse(resultSet3.next()); final ResultSet resultSet4 = statement.executeQuery( String.format("SELECT \"field\" " + " FROM \"%s\".\"%s\"" + " WHERE \"field\" > CURRENT_DATE", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet4); Assertions.assertFalse(resultSet4.next()); } } /** * Tests CURRENT_TIME. * @throws SQLException occurs if query fails. */ @DisplayName("Tests CURRENT_TIME.") @ParameterizedTest(name = "testCurrentTime - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testCurrentTime(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCurrentTime"; final long dateTime = Instant.parse("2020-02-03T04:05:06.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Get current date. final ResultSet resultSet1 = statement.executeQuery( String.format("SELECT CURRENT_TIME" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); final Function<ResultSet, SQLException> validateResultSet = (testResultSet) -> { try { Assertions.assertNotNull(testResultSet); Assertions.assertTrue(testResultSet.next()); Assertions.assertEquals(Types.TIME, testResultSet.getMetaData().getColumnType(1)); final Time actualDate = testResultSet.getTime(1); Assertions.assertNotNull(actualDate); final Time currentTime = new Time(Instant.now().toEpochMilli()); final long timeDiff = actualDate.toLocalTime() .until(currentTime.toLocalTime(), ChronoUnit.MILLIS); Assertions.assertTrue(timeDiff < 2000); Assertions.assertFalse(testResultSet.next()); return null; } catch (SQLException e) { return e; } }; SQLException e = validateResultSet.apply(resultSet1); if (e != null) { throw e; } // Get current date as alias column name. final ResultSet resultSet2 = statement.executeQuery( String.format("SELECT CURRENT_TIME AS \"cts\"" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); e = validateResultSet.apply(resultSet2); if (e != null) { throw e; } // Where clause use - must cast to TIME to make it comparable. final ResultSet resultSet3 = statement.executeQuery( String.format("SELECT \"field\"" + " FROM \"%s\".\"%s\"" + " WHERE CAST(\"field\" AS TIME) < CURRENT_TIME", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet3); Assertions.assertTrue(resultSet3.next()); Assertions.assertEquals(dateTime, resultSet3.getDate(1).getTime()); Assertions.assertFalse(resultSet3.next()); final ResultSet resultSet4 = statement.executeQuery( String.format("SELECT \"field\"" + " FROM \"%s\".\"%s\"" + " WHERE CAST(\"field\" AS TIME) > CURRENT_TIME", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet4); Assertions.assertFalse(resultSet4.next()); } } /** * Tests for queries FLOOR(... TO ...) in select clause. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests for queries FLOOR(... TO ...) in select clause.") @ParameterizedTest(name = "testQuerySelectFloorForDate - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testQuerySelectFloorForDate(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testQuerySelectFloorForDate"; final Instant dateTime = Instant.parse("2020-02-03T12:34:56.78Z"); final OffsetDateTime offsetDateTime = dateTime.atOffset(ZoneOffset.UTC); final Instant epochDateTime = Instant.EPOCH; final OffsetDateTime offsetEpochDateTime = epochDateTime.atOffset(ZoneOffset.UTC); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime.toEpochMilli())); doc1.append("fieldEpoch", new BsonDateTime(epochDateTime.toEpochMilli())); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet = statement.executeQuery( String.format("SELECT" + " FLOOR(\"field\" TO YEAR)," + " FLOOR(\"field\" TO MONTH)," + " FLOOR(\"field\" TO QUARTER)," + " FLOOR(\"field\" TO DAY)," + " FLOOR(\"field\" TO HOUR)," + " FLOOR(\"field\" TO MINUTE)," + " FLOOR(\"field\" TO SECOND)," + " FLOOR(\"field\" TO MILLISECOND)," + " FLOOR(\"fieldEpoch\" TO YEAR)," + " FLOOR(\"fieldEpoch\" TO MONTH)," + " FLOOR(\"fieldEpoch\" TO QUARTER)," + " FLOOR(\"fieldEpoch\" TO DAY)," + " FLOOR(\"fieldEpoch\" TO HOUR)," + " FLOOR(\"fieldEpoch\" TO MINUTE)," + " FLOOR(\"fieldEpoch\" TO SECOND)," + " FLOOR(\"fieldEpoch\" TO MILLISECOND)," + " FLOOR(NULL TO MILLISECOND)" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet); Assertions.assertTrue(resultSet.next()); Assertions.assertEquals( getTruncatedTimestamp(offsetDateTime, 1), resultSet.getTimestamp(1).getTime()); Assertions.assertEquals( getTruncatedTimestamp(offsetDateTime, offsetDateTime.getMonthValue()), resultSet.getTimestamp(2).getTime()); Assertions.assertEquals( getTruncatedTimestamp(offsetDateTime, 1), resultSet.getTimestamp(3).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.DAYS), resultSet.getTimestamp(4).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.HOURS), resultSet.getTimestamp(5).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.MINUTES), resultSet.getTimestamp(6).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.SECONDS), resultSet.getTimestamp(7).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.MILLIS), resultSet.getTimestamp(8).getTime()); Assertions.assertEquals( getTruncatedTimestamp(offsetEpochDateTime, 1), resultSet.getTimestamp(9).getTime()); Assertions.assertEquals( getTruncatedTimestamp(offsetEpochDateTime, offsetEpochDateTime.getMonthValue()), resultSet.getTimestamp(10).getTime()); Assertions.assertEquals( getTruncatedTimestamp(offsetEpochDateTime, 1), resultSet.getTimestamp(11).getTime()); Assertions.assertEquals( getTruncatedTimestamp(epochDateTime, ChronoUnit.DAYS), resultSet.getTimestamp(12).getTime()); Assertions.assertEquals( getTruncatedTimestamp(epochDateTime, ChronoUnit.HOURS), resultSet.getTimestamp(13).getTime()); Assertions.assertEquals( getTruncatedTimestamp(epochDateTime, ChronoUnit.MINUTES), resultSet.getTimestamp(14).getTime()); Assertions.assertEquals( getTruncatedTimestamp(epochDateTime, ChronoUnit.SECONDS), resultSet.getTimestamp(15).getTime()); Assertions.assertEquals( getTruncatedTimestamp(epochDateTime, ChronoUnit.MILLIS), resultSet.getTimestamp(16).getTime()); Assertions.assertNull(resultSet.getTimestamp(17)); Assertions.assertFalse(resultSet.next()); // Test WEEK (to Monday) truncation final ResultSet resultSet1 = statement.executeQuery(String.format( "SELECT" + " FLOOR(\"field\" TO WEEK)," // Monday + " FLOOR(TIMESTAMPADD(DAY, 1, \"field\") TO WEEK)," // Tuesday + " FLOOR(TIMESTAMPADD(DAY, 2, \"field\") TO WEEK)," // Wednesday + " FLOOR(TIMESTAMPADD(DAY, 3, \"field\") TO WEEK)," // Thursday + " FLOOR(TIMESTAMPADD(DAY, 4, \"field\") TO WEEK)," // Friday + " FLOOR(TIMESTAMPADD(DAY, 5, \"field\") TO WEEK)," // Saturday + " FLOOR(TIMESTAMPADD(DAY, 6, \"field\") TO WEEK)," // Sunday + " FLOOR(TIMESTAMPADD(DAY, 7, \"field\") TO WEEK)," // Next week + " FLOOR(NULL TO WEEK)" // NULL + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.DAYS), resultSet1.getTimestamp(1).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.DAYS), resultSet1.getTimestamp(2).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.DAYS), resultSet1.getTimestamp(3).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.DAYS), resultSet1.getTimestamp(4).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.DAYS), resultSet1.getTimestamp(5).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.DAYS), resultSet1.getTimestamp(6).getTime()); Assertions.assertEquals( getTruncatedTimestamp(dateTime, ChronoUnit.DAYS), resultSet1.getTimestamp(7).getTime()); // Next week Assertions.assertEquals( getTruncatedTimestamp( dateTime .atOffset(ZoneOffset.UTC) .plus(1, ChronoUnit.WEEKS) .toInstant(), ChronoUnit.DAYS), resultSet1.getTimestamp(8).getTime()); Assertions.assertNull(resultSet1.getTimestamp(9)); Assertions.assertFalse(resultSet1.next()); // Test QUARTER truncation final ResultSet resultSet2 = statement.executeQuery(String.format( "SELECT %n" + " FLOOR(\"field\" TO QUARTER), %n" + " FLOOR(TIMESTAMPADD(DAY, 100, \"field\") TO QUARTER), %n" + " FLOOR(TIMESTAMPADD(DAY, 200, \"field\") TO QUARTER), %n" + " FLOOR(TIMESTAMPADD(DAY, 300, \"field\") TO QUARTER), %n" + " FLOOR(NULL TO QUARTER) %n" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet2); Assertions.assertTrue(resultSet2.next()); Assertions.assertEquals( getTruncatedTimestamp(offsetDateTime, 1), // Jan. 01 resultSet2.getTimestamp(1).getTime()); Assertions.assertEquals( getTruncatedTimestamp(offsetDateTime, 4), // Apr. 01 resultSet2.getTimestamp(2).getTime()); Assertions.assertEquals( getTruncatedTimestamp(offsetDateTime, 7), // Jul. 01 resultSet2.getTimestamp(3).getTime()); Assertions.assertEquals( getTruncatedTimestamp(offsetDateTime, 10), // Oct. 01 resultSet2.getTimestamp(4).getTime()); Assertions.assertNull(resultSet2.getTimestamp(5)); // NULL Assertions.assertFalse(resultSet2.next()); // Don't support FLOOR for numeric, yet. final String errorMessage = Assertions.assertThrows( SQLException.class, () -> statement.executeQuery( String.format("SELECT FLOOR(12.34) FROM \"%s\".\"%s\"", getDatabaseName(), tableName))).getMessage(); Assertions.assertTrue( errorMessage.startsWith(String.format( "Unable to parse SQL 'SELECT FLOOR(12.34) FROM \"%s\".\"testQuerySelectFloorForDate\"'.", getDatabaseName())) && errorMessage.endsWith( "Additional info: 'Translation of FLOOR(12.34:DECIMAL(4, 2)) is not supported by DocumentDbRules''")); } } /** * Tests CURRENT_TIMESTAMP, CURRENT_TIME and CURRENT_DATE for multiple instances are the same value. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests CURRENT_TIMESTAMP, CURRENT_TIME and CURRENT_DATE for multiple instances are the same value.") @ParameterizedTest(name = "testCurrentTimestampMultipleInstances - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testCurrentTimestampMultipleInstances(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testCurrentTimestampMultipleInstances"; final long dateTime = Instant.parse("2020-02-03T04:05:06.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); insertBsonDocuments(tableName, new BsonDocument[]{doc1}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Get current date. final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT CURRENT_TIMESTAMP AS ts, CURRENT_TIME AS t, CURRENT_DATE AS d" + " FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); Assertions.assertEquals(Types.TIMESTAMP, resultSet1.getMetaData().getColumnType(1)); Assertions.assertEquals(Types.TIME, resultSet1.getMetaData().getColumnType(2)); Assertions.assertEquals(Types.DATE, resultSet1.getMetaData().getColumnType(3)); final Timestamp timestamp = resultSet1.getTimestamp(1); final Timestamp time = resultSet1.getTimestamp(2); final Timestamp date = resultSet1.getTimestamp(3); Assertions.assertNotNull(timestamp); Assertions.assertNotNull(time); Assertions.assertNotNull(date); Assertions.assertEquals(timestamp, time); Assertions.assertEquals(timestamp, date); Assertions.assertFalse(resultSet1.next()); } } /** * Tests select dates with AND condition. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests select dates with AND condition.") @ParameterizedTest(name = "testSelectDateWithAnd - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testSelectDateWithAnd(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSelectDateWithAnd"; final long dateTime = Instant.parse("2020-02-03T04:05:06.00Z").toEpochMilli(); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonDateTime(dateTime)); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("field", new BsonNull()); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); // Find condition that doesn't exist. final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT \"field\" > '2021-01-01' AND \"field\" < '2020-02-01' FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); // Non-NULL result Assertions.assertEquals(Types.BOOLEAN, resultSet1.getMetaData().getColumnType(1)); final Boolean aBoolean = resultSet1.getBoolean(1); Assertions.assertFalse(resultSet1.wasNull()); Assertions.assertNotNull(aBoolean); Assertions.assertFalse(aBoolean); // NULL result Assertions.assertTrue(resultSet1.next()); Assertions.assertNull(resultSet1.getObject(1)); resultSet1.getBoolean(1); Assertions.assertTrue(resultSet1.wasNull()); Assertions.assertFalse(resultSet1.next()); } } /** * Tests select dates with AND condition. * * @throws SQLException occurs if query fails. */ @DisplayName("Tests select dates with AND condition.") @ParameterizedTest(name = "testSelectDateWithAnd - [{index}] - {arguments}") @MethodSource({"getTestEnvironments"}) void testSelectTimestamp(final DocumentDbTestEnvironment testEnvironment) throws SQLException { setTestEnvironment(testEnvironment); final String tableName = "testSelectTimestamp"; final Instant dateTime = Instant.parse("2020-02-03T04:05:06.00Z"); final BsonDocument doc1 = BsonDocument.parse("{\"_id\": 101}"); doc1.append("field", new BsonTimestamp((int) dateTime.getEpochSecond(), 1)); final BsonDocument doc2 = BsonDocument.parse("{\"_id\": 102}"); doc2.append("field", new BsonNull()); insertBsonDocuments(tableName, new BsonDocument[]{doc1, doc2}); try (Connection connection = getConnection()) { final Statement statement = getDocumentDbStatement(connection); final ResultSet resultSet1 = statement.executeQuery( String.format( "SELECT \"field\" FROM \"%s\".\"%s\"", getDatabaseName(), tableName)); Assertions.assertNotNull(resultSet1); Assertions.assertTrue(resultSet1.next()); // Non-NULL result Assertions.assertEquals(Types.TIMESTAMP, resultSet1.getMetaData().getColumnType(1)); final Timestamp aTimestamp = resultSet1.getTimestamp(1); Assertions.assertFalse(resultSet1.wasNull()); Assertions.assertNotNull(aTimestamp); Assertions.assertEquals(dateTime.toEpochMilli(), aTimestamp.getTime()); final Date aDate = resultSet1.getDate(1); Assertions.assertNotNull(aDate); Assertions.assertEquals(dateTime.toEpochMilli(), aDate.getTime()); final Time aTime = resultSet1.getTime(1); Assertions.assertNotNull(aTime); Assertions.assertEquals(dateTime.toEpochMilli(), aTime.getTime()); final String aString = resultSet1.getString(1); Assertions.assertNotNull(aString); Assertions.assertEquals(new Timestamp(dateTime.toEpochMilli()).toString(), aString); // NULL result Assertions.assertTrue(resultSet1.next()); Assertions.assertNull(resultSet1.getObject(1)); resultSet1.getTimestamp(1); Assertions.assertTrue(resultSet1.wasNull()); Assertions.assertFalse(resultSet1.next()); } } private long getTruncatedTimestamp(final OffsetDateTime offsetDateTime, final int monthValue) { return OffsetDateTime.of( offsetDateTime.getYear(), monthValue, 1, 0, 0, 0, 0, ZoneOffset.UTC) .toInstant() .toEpochMilli(); } private long getTruncatedTimestamp(final Instant dateTime, final ChronoUnit chronoUnit) { return dateTime .atOffset(ZoneOffset.UTC) .truncatedTo(chronoUnit) .toInstant() .toEpochMilli(); } }
4,499