gt
stringclasses
1 value
context
stringlengths
2.05k
161k
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prestosql.server.protocol; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.AbstractIterator; import com.google.common.collect.ImmutableList; import io.prestosql.Session; import io.prestosql.client.ClientCapabilities; import io.prestosql.client.Column; import io.prestosql.spi.Page; import io.prestosql.spi.PrestoException; import io.prestosql.spi.block.Block; import io.prestosql.spi.block.BlockBuilder; import io.prestosql.spi.connector.ConnectorSession; import io.prestosql.spi.type.ArrayType; import io.prestosql.spi.type.MapType; import io.prestosql.spi.type.RowType; import io.prestosql.spi.type.SqlTimestamp; import io.prestosql.spi.type.SqlTimestampWithTimeZone; import io.prestosql.spi.type.TimestampType; import io.prestosql.spi.type.TimestampWithTimeZoneType; import io.prestosql.spi.type.Type; import javax.annotation.Nullable; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Deque; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.Consumer; import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Verify.verify; import static com.google.common.base.Verify.verifyNotNull; import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.common.collect.ImmutableMap.toImmutableMap; import static io.prestosql.spi.StandardErrorCode.SERIALIZATION_ERROR; import static io.prestosql.spi.type.BigintType.BIGINT; import static io.prestosql.spi.type.BooleanType.BOOLEAN; import static java.lang.String.format; import static java.util.Collections.unmodifiableList; import static java.util.Objects.requireNonNull; public class QueryResultRows extends AbstractIterator<List<Object>> implements Iterable<List<Object>> { private final ConnectorSession session; private final Optional<List<ColumnAndType>> columns; private final Deque<Page> pages; private final Optional<Consumer<Throwable>> exceptionConsumer; private final long totalRows; private final boolean supportsParametricDateTime; private Page currentPage; private int rowPosition = -1; private int inPageIndex = -1; private QueryResultRows(Session session, Optional<List<ColumnAndType>> columns, List<Page> pages, Consumer<Throwable> exceptionConsumer) { this.session = requireNonNull(session, "session is null").toConnectorSession(); this.columns = requireNonNull(columns, "columns is null"); this.pages = new ArrayDeque<>(requireNonNull(pages, "pages is null")); this.exceptionConsumer = Optional.ofNullable(exceptionConsumer); this.totalRows = countRows(pages); this.currentPage = this.pages.pollFirst(); this.supportsParametricDateTime = session.getClientCapabilities().contains(ClientCapabilities.PARAMETRIC_DATETIME.toString()); verify(totalRows == 0 || (totalRows > 0 && columns.isPresent()), "data present without columns and types"); } public boolean isEmpty() { return totalRows == 0; } public Optional<List<Column>> getColumns() { return columns.map(columns -> columns.stream() .map(ColumnAndType::getColumn) .collect(toImmutableList())); } /** * Returns expected row count (we don't know yet if every row is serializable). */ @VisibleForTesting public long getTotalRowsCount() { return totalRows; } public Optional<Long> getUpdateCount() { // We should have exactly single bigint value as an update count. if (totalRows != 1 || columns.isEmpty()) { return Optional.empty(); } List<ColumnAndType> columns = this.columns.get(); if (columns.size() != 1 || !columns.get(0).getType().equals(BIGINT)) { return Optional.empty(); } verifyNotNull(currentPage, "currentPage is null"); Number value = (Number) columns.get(0).getType().getObjectValue(session, currentPage.getBlock(0), 0); return Optional.ofNullable(value).map(Number::longValue); } @Override protected List<Object> computeNext() { while (true) { if (currentPage == null) { return endOfData(); } inPageIndex++; if (inPageIndex >= currentPage.getPositionCount()) { currentPage = pages.pollFirst(); if (currentPage == null) { return endOfData(); } inPageIndex = 0; } rowPosition++; Optional<List<Object>> row = getRowValues(); if (row.isEmpty()) { continue; } return row.get(); } } private Optional<List<Object>> getRowValues() { // types are present if data is present List<ColumnAndType> columns = this.columns.orElseThrow(); List<Object> row = new ArrayList<>(columns.size()); for (int channel = 0; channel < currentPage.getChannelCount(); channel++) { ColumnAndType column = columns.get(channel); Type type = column.getType(); Block block = currentPage.getBlock(channel); try { if (supportsParametricDateTime) { row.add(channel, type.getObjectValue(session, block, inPageIndex)); } else { row.add(channel, getLegacyValue(type.getObjectValue(session, block, inPageIndex), type)); } } catch (Throwable throwable) { propagateException(rowPosition, column, throwable); // skip row as it contains non-serializable value return Optional.empty(); } } return Optional.of(unmodifiableList(row)); } private Object getLegacyValue(Object value, Type type) { if (value == null) { return null; } if (!supportsParametricDateTime) { // for legacy clients we need to round timestamp and timestamp with timezone to default precision (3) if (type instanceof TimestampType) { return ((SqlTimestamp) value).roundTo(3); } if (type instanceof TimestampWithTimeZoneType) { return ((SqlTimestampWithTimeZone) value).roundTo(3); } } if (type instanceof ArrayType) { Type elementType = ((ArrayType) type).getElementType(); if (!(elementType instanceof TimestampType || elementType instanceof TimestampWithTimeZoneType)) { return value; } return ((List<Object>) value).stream() .map(element -> getLegacyValue(value, elementType)) .collect(toImmutableList()); } if (type instanceof MapType) { Type keyType = ((MapType) type).getKeyType(); Type valueType = ((MapType) type).getValueType(); return ((Map<Object, Object>) value).entrySet().stream() .collect(toImmutableMap(entry -> getLegacyValue(entry.getKey(), keyType), entry -> getLegacyValue(entry.getValue(), valueType))); } if (type instanceof RowType) { List<RowType.Field> fields = ((RowType) type).getFields(); if (value instanceof Map) { Map<String, Object> values = (Map<String, Object>) value; return fields.stream() .collect(toImmutableMap(field -> field.getName().orElseThrow(), field -> getLegacyValue(values.get(field.getName()), field.getType()))); } if (value instanceof List) { List<Object> values = (List<Object>) value; List<Type> types = fields.stream() .map(RowType.Field::getType) .collect(toImmutableList()); ImmutableList.Builder<Object> result = ImmutableList.builder(); for (int i = 0; i < values.size(); i++) { result.add(getLegacyValue(values.get(i), types.get(i))); } return result.build(); } } return value; } private void propagateException(int row, ColumnAndType column, Throwable cause) { // columns and rows are 0-indexed String message = format("Could not serialize column '%s' of type '%s' at position %d:%d", column.getColumn().getName(), column.getType(), row + 1, column.getPosition() + 1); exceptionConsumer.ifPresent(consumer -> consumer.accept(new PrestoException(SERIALIZATION_ERROR, message, cause))); } @Override public Iterator<List<Object>> iterator() { return this; } private static long countRows(List<Page> pages) { return pages.stream() .map(Page::getPositionCount) .map(Integer::longValue) .reduce(Long::sum) .orElse(0L); } @Override public String toString() { return toStringHelper(this) .add("columns", columns) .add("totalRowsCount", getTotalRowsCount()) .add("pagesCount", this.pages.size()) .toString(); } public static QueryResultRows empty(Session session) { return new QueryResultRows(session, Optional.empty(), ImmutableList.of(), null); } public static Builder queryResultRowsBuilder(Session session) { return new Builder(session); } public static class Builder { private final Session session; private ImmutableList.Builder<Page> pages = ImmutableList.builder(); private Optional<List<ColumnAndType>> columns = Optional.empty(); private Consumer<Throwable> exceptionConsumer; public Builder(Session session) { this.session = requireNonNull(session, "session is null"); } public Builder addPage(Page page) { pages.add(page); return this; } public Builder addPages(List<Page> page) { pages.addAll(page); return this; } public Builder withColumnsAndTypes(@Nullable List<Column> columns, @Nullable List<Type> types) { if (columns != null || types != null) { this.columns = Optional.of(combine(columns, types)); } return this; } public Builder withSingleBooleanValue(Column column, boolean value) { BlockBuilder blockBuilder = BOOLEAN.createBlockBuilder(null, 1); BOOLEAN.writeBoolean(blockBuilder, value); pages = ImmutableList.<Page>builder().add(new Page(blockBuilder.build())); columns = Optional.of(combine(ImmutableList.of(column), ImmutableList.of(BOOLEAN))); return this; } public Builder withExceptionConsumer(Consumer<Throwable> exceptionConsumer) { this.exceptionConsumer = exceptionConsumer; return this; } public QueryResultRows build() { return new QueryResultRows( session, columns, pages.build(), exceptionConsumer); } private static List<ColumnAndType> combine(@Nullable List<Column> columns, @Nullable List<Type> types) { checkArgument(columns != null && types != null, "columns and types must be present at the same time"); checkArgument(columns.size() == types.size(), "columns and types size mismatch"); ImmutableList.Builder<ColumnAndType> builder = ImmutableList.builder(); for (int i = 0; i < columns.size(); i++) { builder.add(new ColumnAndType(i, columns.get(i), types.get(i))); } return builder.build(); } } private static class ColumnAndType { private final int position; private final Column column; private final Type type; private ColumnAndType(int position, Column column, Type type) { this.position = position; this.column = column; this.type = type; } public Column getColumn() { return column; } public Type getType() { return type; } public int getPosition() { return position; } @Override public String toString() { return toStringHelper(this) .add("column", column) .add("type", type) .add("position", position) .toString(); } } }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.task.reduce; import java.io.IOException; import java.net.InetAddress; import java.net.URI; import java.net.UnknownHostException; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.concurrent.DelayQueue; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapred.TaskStatus; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.task.reduce.MapHost.State; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Time; @InterfaceAudience.Private @InterfaceStability.Unstable public class ShuffleSchedulerImpl<K,V> implements ShuffleScheduler<K,V> { private static final ThreadLocal<Long> SHUFFLE_START = new ThreadLocal<Long>() { protected Long initialValue() { return 0L; } }; private static final Log LOG = LogFactory.getLog(ShuffleSchedulerImpl.class); private static final int MAX_MAPS_AT_ONCE = 20; private static final long INITIAL_PENALTY = 10000; private static final float PENALTY_GROWTH_RATE = 1.3f; private final static int REPORT_FAILURE_LIMIT = 10; private static final float BYTES_PER_MILLIS_TO_MBS = 1000f / 1024 / 1024; private final boolean[] finishedMaps; private final int totalMaps; private int remainingMaps; private Map<String, MapHost> mapLocations = new HashMap<String, MapHost>(); private Set<MapHost> pendingHosts = new HashSet<MapHost>(); private Set<TaskAttemptID> obsoleteMaps = new HashSet<TaskAttemptID>(); private final TaskAttemptID reduceId; private final Random random = new Random(); private final DelayQueue<Penalty> penalties = new DelayQueue<Penalty>(); private final Referee referee = new Referee(); private final Map<TaskAttemptID,IntWritable> failureCounts = new HashMap<TaskAttemptID,IntWritable>(); private final Map<String,IntWritable> hostFailures = new HashMap<String,IntWritable>(); private final TaskStatus status; private final ExceptionReporter reporter; private final int abortFailureLimit; private final Progress progress; private final Counters.Counter shuffledMapsCounter; private final Counters.Counter reduceShuffleBytes; private final Counters.Counter failedShuffleCounter; private final long startTime; private long lastProgressTime; private final CopyTimeTracker copyTimeTracker; private volatile int maxMapRuntime = 0; private final int maxFailedUniqueFetches; private final int maxFetchFailuresBeforeReporting; private long totalBytesShuffledTillNow = 0; private final DecimalFormat mbpsFormat = new DecimalFormat("0.00"); private final boolean reportReadErrorImmediately; private long maxDelay = MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY; private int maxHostFailures; public ShuffleSchedulerImpl(JobConf job, TaskStatus status, TaskAttemptID reduceId, ExceptionReporter reporter, Progress progress, Counters.Counter shuffledMapsCounter, Counters.Counter reduceShuffleBytes, Counters.Counter failedShuffleCounter) { totalMaps = job.getNumMapTasks(); abortFailureLimit = Math.max(30, totalMaps / 10); copyTimeTracker = new CopyTimeTracker(); remainingMaps = totalMaps; finishedMaps = new boolean[remainingMaps]; this.reporter = reporter; this.status = status; this.reduceId = reduceId; this.progress = progress; this.shuffledMapsCounter = shuffledMapsCounter; this.reduceShuffleBytes = reduceShuffleBytes; this.failedShuffleCounter = failedShuffleCounter; this.startTime = Time.monotonicNow(); lastProgressTime = startTime; referee.start(); this.maxFailedUniqueFetches = Math.min(totalMaps, 5); this.maxFetchFailuresBeforeReporting = job.getInt( MRJobConfig.SHUFFLE_FETCH_FAILURES, REPORT_FAILURE_LIMIT); this.reportReadErrorImmediately = job.getBoolean( MRJobConfig.SHUFFLE_NOTIFY_READERROR, true); this.maxDelay = job.getLong(MRJobConfig.MAX_SHUFFLE_FETCH_RETRY_DELAY, MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY); this.maxHostFailures = job.getInt( MRJobConfig.MAX_SHUFFLE_FETCH_HOST_FAILURES, MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_HOST_FAILURES); } @Override public void resolve(TaskCompletionEvent event) { switch (event.getTaskStatus()) { case SUCCEEDED: URI u = getBaseURI(reduceId, event.getTaskTrackerHttp()); addKnownMapOutput(u.getHost() + ":" + u.getPort(), u.toString(), event.getTaskAttemptId()); maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime()); break; case FAILED: case KILLED: case OBSOLETE: obsoleteMapOutput(event.getTaskAttemptId()); LOG.info("Ignoring obsolete output of " + event.getTaskStatus() + " map-task: '" + event.getTaskAttemptId() + "'"); break; case TIPFAILED: tipFailed(event.getTaskAttemptId().getTaskID()); LOG.info("Ignoring output of failed map TIP: '" + event.getTaskAttemptId() + "'"); break; } } static URI getBaseURI(TaskAttemptID reduceId, String url) { StringBuffer baseUrl = new StringBuffer(url); if (!url.endsWith("/")) { baseUrl.append("/"); } baseUrl.append("mapOutput?job="); baseUrl.append(reduceId.getJobID()); baseUrl.append("&reduce="); baseUrl.append(reduceId.getTaskID().getId()); baseUrl.append("&map="); URI u = URI.create(baseUrl.toString()); return u; } public synchronized void copySucceeded(TaskAttemptID mapId, MapHost host, long bytes, long startMillis, long endMillis, MapOutput<K,V> output ) throws IOException { failureCounts.remove(mapId); hostFailures.remove(host.getHostName()); int mapIndex = mapId.getTaskID().getId(); if (!finishedMaps[mapIndex]) { output.commit(); finishedMaps[mapIndex] = true; shuffledMapsCounter.increment(1); if (--remainingMaps == 0) { notifyAll(); } // update single copy task status long copyMillis = (endMillis - startMillis); if (copyMillis == 0) copyMillis = 1; float bytesPerMillis = (float) bytes / copyMillis; float transferRate = bytesPerMillis * BYTES_PER_MILLIS_TO_MBS; String individualProgress = "copy task(" + mapId + " succeeded" + " at " + mbpsFormat.format(transferRate) + " MB/s)"; // update the aggregated status copyTimeTracker.add(startMillis, endMillis); totalBytesShuffledTillNow += bytes; updateStatus(individualProgress); reduceShuffleBytes.increment(bytes); lastProgressTime = Time.monotonicNow(); LOG.debug("map " + mapId + " done " + status.getStateString()); } } private synchronized void updateStatus(String individualProgress) { int mapsDone = totalMaps - remainingMaps; long totalCopyMillis = copyTimeTracker.getCopyMillis(); if (totalCopyMillis == 0) totalCopyMillis = 1; float bytesPerMillis = (float) totalBytesShuffledTillNow / totalCopyMillis; float transferRate = bytesPerMillis * BYTES_PER_MILLIS_TO_MBS; progress.set((float) mapsDone / totalMaps); String statusString = mapsDone + " / " + totalMaps + " copied."; status.setStateString(statusString); if (individualProgress != null) { progress.setStatus(individualProgress + " Aggregated copy rate(" + mapsDone + " of " + totalMaps + " at " + mbpsFormat.format(transferRate) + " MB/s)"); } else { progress.setStatus("copy(" + mapsDone + " of " + totalMaps + " at " + mbpsFormat.format(transferRate) + " MB/s)"); } } private void updateStatus() { updateStatus(null); } public synchronized void hostFailed(String hostname) { if (hostFailures.containsKey(hostname)) { IntWritable x = hostFailures.get(hostname); x.set(x.get() + 1); } else { hostFailures.put(hostname, new IntWritable(1)); } } public synchronized void copyFailed(TaskAttemptID mapId, MapHost host, boolean readError, boolean connectExcpt) { host.penalize(); int failures = 1; if (failureCounts.containsKey(mapId)) { IntWritable x = failureCounts.get(mapId); x.set(x.get() + 1); failures = x.get(); } else { failureCounts.put(mapId, new IntWritable(1)); } String hostname = host.getHostName(); //report failure if already retried maxHostFailures times boolean hostFail = hostFailures.get(hostname).get() > getMaxHostFailures() ? true : false; if (failures >= abortFailureLimit) { try { throw new IOException(failures + " failures downloading " + mapId); } catch (IOException ie) { reporter.reportException(ie); } } checkAndInformJobTracker(failures, mapId, readError, connectExcpt, hostFail); checkReducerHealth(); long delay = (long) (INITIAL_PENALTY * Math.pow(PENALTY_GROWTH_RATE, failures)); if (delay > maxDelay) { delay = maxDelay; } penalties.add(new Penalty(host, delay)); failedShuffleCounter.increment(1); } public void reportLocalError(IOException ioe) { try { LOG.error("Shuffle failed : local error on this node: " + InetAddress.getLocalHost()); } catch (UnknownHostException e) { LOG.error("Shuffle failed : local error on this node"); } reporter.reportException(ioe); } // Notify the JobTracker // after every read error, if 'reportReadErrorImmediately' is true or // after every 'maxFetchFailuresBeforeReporting' failures private void checkAndInformJobTracker( int failures, TaskAttemptID mapId, boolean readError, boolean connectExcpt, boolean hostFailed) { if (connectExcpt || (reportReadErrorImmediately && readError) || ((failures % maxFetchFailuresBeforeReporting) == 0) || hostFailed) { LOG.info("Reporting fetch failure for " + mapId + " to jobtracker."); status.addFetchFailedMap((org.apache.hadoop.mapred.TaskAttemptID) mapId); } } private void checkReducerHealth() { final float MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT = 0.5f; final float MIN_REQUIRED_PROGRESS_PERCENT = 0.5f; final float MAX_ALLOWED_STALL_TIME_PERCENT = 0.5f; long totalFailures = failedShuffleCounter.getValue(); int doneMaps = totalMaps - remainingMaps; boolean reducerHealthy = (((float)totalFailures / (totalFailures + doneMaps)) < MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT); // check if the reducer has progressed enough boolean reducerProgressedEnough = (((float)doneMaps / totalMaps) >= MIN_REQUIRED_PROGRESS_PERCENT); // check if the reducer is stalled for a long time // duration for which the reducer is stalled int stallDuration = (int)(Time.monotonicNow() - lastProgressTime); // duration for which the reducer ran with progress int shuffleProgressDuration = (int)(lastProgressTime - startTime); // min time the reducer should run without getting killed int minShuffleRunDuration = Math.max(shuffleProgressDuration, maxMapRuntime); boolean reducerStalled = (((float)stallDuration / minShuffleRunDuration) >= MAX_ALLOWED_STALL_TIME_PERCENT); // kill if not healthy and has insufficient progress if ((failureCounts.size() >= maxFailedUniqueFetches || failureCounts.size() == (totalMaps - doneMaps)) && !reducerHealthy && (!reducerProgressedEnough || reducerStalled)) { LOG.fatal("Shuffle failed with too many fetch failures " + "and insufficient progress!"); String errorMsg = "Exceeded MAX_FAILED_UNIQUE_FETCHES; bailing-out."; reporter.reportException(new IOException(errorMsg)); } } public synchronized void tipFailed(TaskID taskId) { if (!finishedMaps[taskId.getId()]) { finishedMaps[taskId.getId()] = true; if (--remainingMaps == 0) { notifyAll(); } updateStatus(); } } public synchronized void addKnownMapOutput(String hostName, String hostUrl, TaskAttemptID mapId) { MapHost host = mapLocations.get(hostName); if (host == null) { host = new MapHost(hostName, hostUrl); mapLocations.put(hostName, host); } host.addKnownMap(mapId); // Mark the host as pending if (host.getState() == State.PENDING) { pendingHosts.add(host); notifyAll(); } } public synchronized void obsoleteMapOutput(TaskAttemptID mapId) { obsoleteMaps.add(mapId); } public synchronized void putBackKnownMapOutput(MapHost host, TaskAttemptID mapId) { host.addKnownMap(mapId); } public synchronized MapHost getHost() throws InterruptedException { while(pendingHosts.isEmpty()) { wait(); } MapHost host = null; Iterator<MapHost> iter = pendingHosts.iterator(); int numToPick = random.nextInt(pendingHosts.size()); for (int i=0; i <= numToPick; ++i) { host = iter.next(); } pendingHosts.remove(host); host.markBusy(); LOG.info("Assigning " + host + " with " + host.getNumKnownMapOutputs() + " to " + Thread.currentThread().getName()); SHUFFLE_START.set(Time.monotonicNow()); return host; } public synchronized List<TaskAttemptID> getMapsForHost(MapHost host) { List<TaskAttemptID> list = host.getAndClearKnownMaps(); Iterator<TaskAttemptID> itr = list.iterator(); List<TaskAttemptID> result = new ArrayList<TaskAttemptID>(); int includedMaps = 0; int totalSize = list.size(); // find the maps that we still need, up to the limit while (itr.hasNext()) { TaskAttemptID id = itr.next(); if (!obsoleteMaps.contains(id) && !finishedMaps[id.getTaskID().getId()]) { result.add(id); if (++includedMaps >= MAX_MAPS_AT_ONCE) { break; } } } // put back the maps left after the limit while (itr.hasNext()) { TaskAttemptID id = itr.next(); if (!obsoleteMaps.contains(id) && !finishedMaps[id.getTaskID().getId()]) { host.addKnownMap(id); } } LOG.info("assigned " + includedMaps + " of " + totalSize + " to " + host + " to " + Thread.currentThread().getName()); return result; } public synchronized void freeHost(MapHost host) { if (host.getState() != State.PENALIZED) { if (host.markAvailable() == State.PENDING) { pendingHosts.add(host); notifyAll(); } } LOG.info(host + " freed by " + Thread.currentThread().getName() + " in " + (Time.monotonicNow()-SHUFFLE_START.get()) + "ms"); } public synchronized void resetKnownMaps() { mapLocations.clear(); obsoleteMaps.clear(); pendingHosts.clear(); } /** * Wait until the shuffle finishes or until the timeout. * @param millis maximum wait time * @return true if the shuffle is done * @throws InterruptedException */ @Override public synchronized boolean waitUntilDone(int millis ) throws InterruptedException { if (remainingMaps > 0) { wait(millis); return remainingMaps == 0; } return true; } /** * A structure that records the penalty for a host. */ private static class Penalty implements Delayed { MapHost host; private long endTime; Penalty(MapHost host, long delay) { this.host = host; this.endTime = Time.monotonicNow() + delay; } @Override public long getDelay(TimeUnit unit) { long remainingTime = endTime - Time.monotonicNow(); return unit.convert(remainingTime, TimeUnit.MILLISECONDS); } @Override public int compareTo(Delayed o) { long other = ((Penalty) o).endTime; return endTime == other ? 0 : (endTime < other ? -1 : 1); } } /** * A thread that takes hosts off of the penalty list when the timer expires. */ private class Referee extends Thread { public Referee() { setName("ShufflePenaltyReferee"); setDaemon(true); } public void run() { try { while (true) { // take the first host that has an expired penalty MapHost host = penalties.take().host; synchronized (ShuffleSchedulerImpl.this) { if (host.markAvailable() == MapHost.State.PENDING) { pendingHosts.add(host); ShuffleSchedulerImpl.this.notifyAll(); } } } } catch (InterruptedException ie) { return; } catch (Throwable t) { reporter.reportException(t); } } } @Override public void close() throws InterruptedException { referee.interrupt(); referee.join(); } public int getMaxHostFailures() { return maxHostFailures; } private static class CopyTimeTracker { List<Interval> intervals; long copyMillis; public CopyTimeTracker() { intervals = Collections.emptyList(); copyMillis = 0; } public void add(long s, long e) { Interval interval = new Interval(s, e); copyMillis = getTotalCopyMillis(interval); } public long getCopyMillis() { return copyMillis; } // This method captures the time during which any copy was in progress // each copy time period is record in the Interval list private long getTotalCopyMillis(Interval newInterval) { if (newInterval == null) { return copyMillis; } List<Interval> result = new ArrayList<Interval>(intervals.size() + 1); for (Interval interval: intervals) { if (interval.end < newInterval.start) { result.add(interval); } else if (interval.start > newInterval.end) { result.add(newInterval); newInterval = interval; } else { newInterval = new Interval( Math.min(interval.start, newInterval.start), Math.max(newInterval.end, interval.end)); } } result.add(newInterval); intervals = result; //compute total millis long length = 0; for (Interval interval : intervals) { length += interval.getIntervalLength(); } return length; } private static class Interval { final long start; final long end; public Interval(long s, long e) { start = s; end = e; } public long getIntervalLength() { return end - start; } } } }
/* * Copyright (c) 2018, Bart Hanssens <bart.hanssens@bosa.fgov.be> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package be.belgif.vocab.dao; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.SortedMap; import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; import org.eclipse.rdf4j.model.IRI; import org.eclipse.rdf4j.model.Model; import org.eclipse.rdf4j.model.Resource; import org.eclipse.rdf4j.model.Value; import org.eclipse.rdf4j.model.vocabulary.RDF; import org.eclipse.rdf4j.model.vocabulary.RDFS; /** * DAO helper class for ontology overview. * Only used for HTML view * * @author BartHanssens */ public class OwlDAO extends RdfDAO { private final List<OwlThingDAO> classes = new ArrayList<>(); private final List<OwlThingDAO> properties = new ArrayList<>(); /** * Helper class for OWL classes and properties */ public class OwlThingDAO extends RdfDAO { /** * Get domains * * @return set of IRIs or empty set */ public Set<Value> getDomains() { return objs(RDFS.DOMAIN); } /** * Get ranges * * @return set of IRIS or empty set */ public Set<Value> getRanges() { return objs(RDFS.RANGE); } /** * Get subclasses * * @return set of IRIS or empty set */ public Set<Value> getSubClasses() { return objs(RDFS.SUBCLASSOF); } /** * Get subproperties * * @return set of IRIS or empty set */ public Set<Value> getSubProperties() { return objs(RDFS.SUBPROPERTYOF); } /** * Constructor * * @param m model * @param id subject IRI */ public OwlThingDAO(Model m, Resource id) { super(m, id); } } /** * Sort a list of properties or classes and group by starting letter. * * @param lst list of properties or classes * @return sorted nested map */ private SortedMap<String,SortedSet<String>> getLetter(List<OwlThingDAO> lst) { TreeMap<String,SortedSet<String>> map = new TreeMap<>(); for (RdfDAO rdf: lst) { String name = ((IRI) rdf.getId()).getLocalName(); String letter = name.substring(0, 1); SortedSet set = map.get(letter); if (set == null) { set = new TreeSet<>(); map.put(letter, set); } set.add(name); } return map; } /** * Get RDFS/OWL classes * * @return list of classes */ public List<OwlThingDAO> getClasses() { return this.classes; } /** * Get ordered list of class names, grouped by starting letter. * * @return nested map */ public SortedMap<String,SortedSet<String>> getClassesLetter() { return getLetter(getClasses()); } /** * Get properties * * @return list of properties */ public List<OwlThingDAO> getProperties() { return this.properties; } /** * Get ordered list of property names, grouped by starting letter. * * @return nested map */ public SortedMap<String,SortedSet<String>> getPropertiesLetter() { return getLetter(getProperties()); } /** * Initialize RDF/OWL classes * * @param m */ private void initClasses(Model m) { // Get all classes and subclasses, without duplicates Set<Resource> subjs = new HashSet<>(); subjs.addAll(m.filter(null, RDF.TYPE, RDFS.CLASS).subjects()); subjs.addAll(m.filter(null, RDFS.SUBCLASSOF, null).subjects()); for(Resource subj: subjs) { classes.add(new OwlThingDAO(getModel(), (IRI) subj)); } } /** * Initialize RDF/OWL properties * * @param m */ private void initProperties(Model m) { // Get all properties and subproperies, without duplicates Set<Resource> subjs = new HashSet<>(); subjs.addAll(m.filter(null, RDF.TYPE, RDF.PROPERTY).subjects()); subjs.addAll(m.filter(null, RDFS.SUBPROPERTYOF, null).subjects()); for(Resource subj: subjs) { properties.add(new OwlThingDAO(getModel(), (IRI) subj)); } } /** * Constructor * * @param m triples * @param id subject ID */ public OwlDAO(Model m, Resource id) { super(m, id); initClasses(m); initProperties(m); } }
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 2002-2010 Oracle. All rights reserved. * * $Id: DbBackupTest.java,v 1.22 2010/01/04 15:51:08 cwl Exp $ */ package com.sleepycat.je.util; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.nio.channels.FileChannel; import junit.framework.TestCase; import com.sleepycat.bind.tuple.IntegerBinding; import com.sleepycat.je.CheckpointConfig; import com.sleepycat.je.Cursor; import com.sleepycat.je.Database; import com.sleepycat.je.DatabaseConfig; import com.sleepycat.je.DatabaseEntry; import com.sleepycat.je.DatabaseException; import com.sleepycat.je.DbInternal; import com.sleepycat.je.Environment; import com.sleepycat.je.EnvironmentConfig; import com.sleepycat.je.EnvironmentStats; import com.sleepycat.je.LockMode; import com.sleepycat.je.OperationStatus; import com.sleepycat.je.StatsConfig; import com.sleepycat.je.config.EnvironmentParams; import com.sleepycat.je.dbi.EnvironmentImpl; import com.sleepycat.je.log.FileManager; import com.sleepycat.je.utilint.DbLsn; public class DbBackupTest extends TestCase { private static StatsConfig CLEAR_CONFIG = new StatsConfig(); static { CLEAR_CONFIG.setClear(true); } private static CheckpointConfig FORCE_CONFIG = new CheckpointConfig(); static { FORCE_CONFIG.setForce(true); } private static final String SAVE1 = "save1"; private static final String SAVE2 = "save2"; private static final String SAVE3 = "save3"; private static final int NUM_RECS = 60; private final File envHome; private Environment env; private FileManager fileManager; public DbBackupTest() { envHome = new File(System.getProperty(TestUtils.DEST_DIR)); } @Override public void setUp() { TestUtils.removeLogFiles("Setup", envHome, false); deleteSaveDir(SAVE1); deleteSaveDir(SAVE2); deleteSaveDir(SAVE3); } @Override public void tearDown() { TestUtils.removeLogFiles("TearDown", envHome, false); deleteSaveDir(SAVE1); deleteSaveDir(SAVE2); deleteSaveDir(SAVE3); } /** * Test basic backup, make sure log cleaning isn't running. */ public void testBackupVsCleaning() throws Throwable { env = createEnv(false, envHome); /* read-write env */ EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); fileManager = envImpl.getFileManager(); boolean success = false; try { /* * Grow files, creating obsolete entries to create cleaner * opportunity. */ growFiles("db1", env, 8); /* Start backup. */ DbBackup backupHelper = new DbBackup(env); backupHelper.startBackup(); long lastFileNum = backupHelper.getLastFileInBackupSet(); long checkLastFileNum = lastFileNum; /* Copy the backup set. */ saveFiles(backupHelper, -1, lastFileNum, SAVE1); /* * Try to clean and checkpoint. Check that the logs grew as * a result. */ batchClean(0); long newLastFileNum = (fileManager.getLastFileNum()).longValue(); assertTrue(checkLastFileNum < newLastFileNum); checkLastFileNum = newLastFileNum; /* Copy the backup set after attempting cleaning */ saveFiles(backupHelper, -1, lastFileNum, SAVE2); /* Insert more data. */ growFiles("db2", env, 8); /* * Try to clean and checkpoint. Check that the logs grew as * a result. */ batchClean(0); newLastFileNum = fileManager.getLastFileNum().longValue(); assertTrue(checkLastFileNum < newLastFileNum); checkLastFileNum = newLastFileNum; /* Copy the backup set after inserting more data */ saveFiles(backupHelper, -1, lastFileNum, SAVE3); /* Check the membership of the saved set. */ long lastFile = backupHelper.getLastFileInBackupSet(); String[] backupSet = backupHelper.getLogFilesInBackupSet(); assertEquals((lastFile + 1), backupSet.length); /* End backup. */ backupHelper.endBackup(); /* * Run cleaning, and verify that quite a few files are deleted. */ long numCleaned = batchClean(100); assertTrue(numCleaned > 5); env.close(); env = null; /* Verify backups. */ TestUtils.removeLogFiles("Verify", envHome, false); verifyDb1(SAVE1, true); TestUtils.removeLogFiles("Verify", envHome, false); verifyDb1(SAVE2, true); TestUtils.removeLogFiles("Verify", envHome, false); verifyDb1(SAVE3, true); success = true; } finally { if (env != null) { try { env.close(); } catch (Exception e) { /* * Don't bother with this exception if there is another * earlier problem. */ if (success) { throw e; } } } } } /** * Test multiple backup passes */ public void testIncrementalBackup() throws Throwable { env = createEnv(false, envHome); /* read-write env */ EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); fileManager = envImpl.getFileManager(); try { /* * Grow files, creating obsolete entries to create cleaner * opportunity. */ growFiles("db1", env, 8); /* Backup1. */ DbBackup backupHelper1 = new DbBackup(env); backupHelper1.startBackup(); long b1LastFile = backupHelper1.getLastFileInBackupSet(); saveFiles(backupHelper1, -1, b1LastFile, SAVE1); String lastName = fileManager.getFullFileName(b1LastFile, FileManager.JE_SUFFIX); File f = new File(lastName); long savedLength = f.length(); backupHelper1.endBackup(); /* * Add more data. Check that the file did flip, and is not modified * by the additional data. */ growFiles("db2", env, 8); checkFileLen(b1LastFile, savedLength); /* Backup2. */ DbBackup backupHelper2 = new DbBackup(env, b1LastFile); backupHelper2.startBackup(); long b2LastFile = backupHelper2.getLastFileInBackupSet(); saveFiles(backupHelper2, b1LastFile, b2LastFile, SAVE2); backupHelper2.endBackup(); /* Test deprecated getLogFilesInBackupSet(long) method. */ DbBackup backupHelper3 = new DbBackup(env); backupHelper3.startBackup(); String[] fileList3 = backupHelper3.getLogFilesInBackupSet(b1LastFile); assertEquals(b1LastFile + 1, fileManager.getNumFromName(fileList3[0]).longValue()); backupHelper3.endBackup(); env.close(); env = null; /* Verify backups. */ TestUtils.removeLogFiles("Verify", envHome, false); verifyDb1(SAVE1, false); TestUtils.removeLogFiles("Verify", envHome, false); verifyBothDbs(SAVE1, SAVE2); } finally { if (env != null) { env.close(); } } } public void testBadUsage() throws Exception { Environment env = createEnv(false, envHome); /* read-write env */ try { DbBackup backup = new DbBackup(env); /* end can only be called after start. */ try { backup.endBackup(); fail("should fail"); } catch (IllegalStateException expected) { } /* start can't be called twice. */ backup.startBackup(); try { backup.startBackup(); fail("should fail"); } catch (IllegalStateException expected) { } /* * You can only get the backup set when you're in between start * and end. */ backup.endBackup(); try { backup.getLastFileInBackupSet(); fail("should fail"); } catch (IllegalStateException expected) { } try { backup.getLogFilesInBackupSet(); fail("should fail"); } catch (IllegalStateException expected) { } try { backup.getLogFilesInBackupSet(0); fail("should fail"); } catch (IllegalStateException expected) { } } finally { env.close(); } } /* * This test can't be run by default, because it makes a directory * read/only, and Java doesn't support a way to make it writable again * except in Mustang. There's no way to clean up a read-only directory. */ public void xtestReadOnly() throws Exception { /* Make a read-only handle on a read-write environment directory.*/ Environment env = createEnv(true, envHome); try { @SuppressWarnings("unused") DbBackup backup = new DbBackup(env); fail("Should fail because env is read/only."); } catch (DatabaseException expected) { } env.close(); /* * Make a read-only handle on a read-only environment directory. Use a * new environment directory because we're going to set it read0nly and * there doesn't seem to be a way of undoing that. */ File tempEnvDir = new File(envHome, SAVE1); assertTrue(tempEnvDir.mkdirs()); env = createEnv(false, tempEnvDir); growFiles("db1", env, 8); env.close(); //assertTrue(tempEnvDir.setReadOnly()); env = createEnv(true, tempEnvDir); DbBackup backupHelper = new DbBackup(env); backupHelper.startBackup(); FileManager fileManager = DbInternal.getEnvironmentImpl(env).getFileManager(); long lastFile = fileManager.getLastFileNum().longValue(); assertEquals(lastFile, backupHelper.getLastFileInBackupSet()); backupHelper.endBackup(); env.close(); assertTrue(tempEnvDir.delete()); } private Environment createEnv(boolean readOnly, File envDir) throws DatabaseException { EnvironmentConfig envConfig = TestUtils.initEnvConfig(); DbInternal.disableParameterValidation(envConfig); envConfig.setAllowCreate(true); envConfig.setReadOnly(readOnly); envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), "400"); envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); Environment env = new Environment(envDir, envConfig); return env; } private long growFiles(String dbName, Environment env, int minNumFiles) throws DatabaseException { DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); Database db = env.openDatabase(null, dbName, dbConfig); FileManager fileManager = DbInternal.getEnvironmentImpl(env).getFileManager(); long startLastFileNum = DbLsn.getFileNumber(fileManager.getLastUsedLsn()); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); /* Update twice, in order to create plenty of cleaning opportunity. */ for (int i = 0; i < NUM_RECS; i++) { IntegerBinding.intToEntry(i, key); IntegerBinding.intToEntry(i, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); } for (int i = 0; i < NUM_RECS; i++) { IntegerBinding.intToEntry(i, key); IntegerBinding.intToEntry(i+5, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); } db.close(); long endLastFileNum = DbLsn.getFileNumber(fileManager.getLastUsedLsn()); assertTrue((endLastFileNum - startLastFileNum) >= minNumFiles); return endLastFileNum; } private long batchClean(int expectedDeletions) throws DatabaseException { EnvironmentStats stats = env.getStats(CLEAR_CONFIG); while (env.cleanLog() > 0) { } env.checkpoint(FORCE_CONFIG); stats = env.getStats(CLEAR_CONFIG); assertTrue(stats.getNCleanerDeletions() <= expectedDeletions); return stats.getNCleanerDeletions(); } private void saveFiles(DbBackup backupHelper, long lastFileFromPrevBackup, long lastFileNum, String saveDirName) throws DatabaseException { /* Check that the backup set contains only the files it should have. */ String[] fileList = backupHelper.getLogFilesInBackupSet(); assertEquals(lastFileFromPrevBackup + 1, fileManager.getNumFromName(fileList[0]). longValue()); assertEquals(lastFileNum, fileManager.getNumFromName(fileList[fileList.length-1]). longValue()); /* Make a new save directory. */ File saveDir = new File(envHome, saveDirName); assertTrue(saveDir.mkdir()); copyFiles(envHome, saveDir, fileList); } private void copyFiles(File sourceDir, File destDir, String[] fileList) throws DatabaseException { try { for (int i = 0; i < fileList.length; i++) { File source = new File(sourceDir, fileList[i]); FileChannel sourceChannel = new FileInputStream(source).getChannel(); File save = new File(destDir, fileList[i]); FileChannel saveChannel = new FileOutputStream(save).getChannel(); saveChannel.transferFrom(sourceChannel, 0, sourceChannel.size()); // Close the channels sourceChannel.close(); saveChannel.close(); } } catch (IOException e) { throw new RuntimeException(e); } } /** * Delete all the contents and the directory itself. */ private void deleteSaveDir(String saveDirName) { File saveDir = new File(envHome, saveDirName); if (saveDir.exists()) { String[] savedFiles = saveDir.list(); if (savedFiles != null) { for (int i = 0; i < savedFiles.length; i++) { File f = new File(saveDir, savedFiles[i]); assertTrue(f.delete()); } assertTrue(saveDir.delete()); } } } /** * Copy the saved files in, check values. */ private void verifyDb1(String saveDirName, boolean rename) throws DatabaseException { File saveDir = new File(envHome, saveDirName); String[] savedFiles = saveDir.list(); if (rename){ for (int i = 0; i < savedFiles.length; i++) { File saved = new File(saveDir, savedFiles[i]); File dest = new File(envHome, savedFiles[i]); assertTrue(saved.renameTo(dest)); } } else { /* copy. */ copyFiles(saveDir, envHome, savedFiles); } env = createEnv(false, envHome); try { checkDb("db1"); /* Db 2 should not exist. */ DatabaseConfig dbConfig = new DatabaseConfig(); try { @SuppressWarnings("unused") Database db = env.openDatabase(null, "db2", dbConfig); fail("db2 should not exist"); } catch (DatabaseException expected) { } } finally { env.close(); env = null; } } /** * Copy the saved files in, check values. */ private void verifyBothDbs(String saveDirName1, String saveDirName2) throws DatabaseException { File saveDir = new File(envHome, saveDirName1); String[] savedFiles = saveDir.list(); for (int i = 0; i < savedFiles.length; i++) { File saved = new File(saveDir, savedFiles[i]); File dest = new File(envHome, savedFiles[i]); assertTrue(saved.renameTo(dest)); } saveDir = new File(envHome, saveDirName2); savedFiles = saveDir.list(); for (int i = 0; i < savedFiles.length; i++) { File saved = new File(saveDir, savedFiles[i]); File dest = new File(envHome, savedFiles[i]); assertTrue(saved.renameTo(dest)); } env = createEnv(false, envHome); try { checkDb("db1"); checkDb("db2"); } finally { env.close(); env = null; } } private void checkDb(String dbName) throws DatabaseException { DatabaseConfig dbConfig = new DatabaseConfig(); Database db = env.openDatabase(null, dbName, dbConfig); Cursor c = null; try { DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); c = db.openCursor(null, null); for (int i = 0; i < NUM_RECS; i++) { assertEquals(OperationStatus.SUCCESS, c.getNext(key, data, LockMode.DEFAULT)); assertEquals(i, IntegerBinding.entryToInt(key)); assertEquals(i + 5, IntegerBinding.entryToInt(data)); } assertEquals(OperationStatus.NOTFOUND, c.getNext(key, data, LockMode.DEFAULT)); } finally { if (c != null) c.close(); db.close(); } } private void checkFileLen(long fileNum, long length) { String fileName = fileManager.getFullFileName(fileNum, FileManager.JE_SUFFIX); File f = new File(fileName); assertEquals(length, f.length()); } }
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jetbrains.plugins.gradle.model.data; import java.io.Serializable; import java.util.List; /** * @author Vladislav.Soroka * @since 1/31/14 */ public class ScalaCompileOptionsData implements Serializable { private static final long serialVersionUID = 1L; private boolean useCompileDaemon; private String daemonServer; private boolean failOnError; private boolean deprecation; private boolean unchecked; private String debugLevel; private boolean optimize; private String encoding; private String force; private String targetCompatibility; private List<String> additionalParameters; private boolean listFiles; private String loggingLevel; private List<String> loggingPhases; private boolean fork; private ScalaForkOptions forkOptions; private boolean useAnt; /** * @deprecated see https://docs.gradle.org/3.0/release-notes#ant-based-scala-compiler-has-been-removed */ public boolean isUseCompileDaemon() { return useCompileDaemon; } public void setUseCompileDaemon(boolean useCompileDaemon) { this.useCompileDaemon = useCompileDaemon; } /** * @deprecated see https://docs.gradle.org/3.0/release-notes#ant-based-scala-compiler-has-been-removed */ public String getDaemonServer() { return daemonServer; } public void setDaemonServer(String daemonServer) { this.daemonServer = daemonServer; } public boolean isFailOnError() { return failOnError; } public void setFailOnError(boolean failOnError) { this.failOnError = failOnError; } public boolean isDeprecation() { return deprecation; } public void setDeprecation(boolean deprecation) { this.deprecation = deprecation; } public boolean isUnchecked() { return unchecked; } public void setUnchecked(boolean unchecked) { this.unchecked = unchecked; } public String getDebugLevel() { return debugLevel; } public void setDebugLevel(String debugLevel) { this.debugLevel = debugLevel; } public boolean isOptimize() { return optimize; } public void setOptimize(boolean optimize) { this.optimize = optimize; } public String getEncoding() { return encoding; } public void setEncoding(String encoding) { this.encoding = encoding; } public String getForce() { return force; } public void setForce(String force) { this.force = force; } public String getTargetCompatibility() { return targetCompatibility; } public void setTargetCompatibility(String targetCompatibility) { this.targetCompatibility = targetCompatibility; } public List<String> getAdditionalParameters() { return additionalParameters; } public void setAdditionalParameters(List<String> additionalParameters) { this.additionalParameters = additionalParameters; } public boolean isListFiles() { return listFiles; } public void setListFiles(boolean listFiles) { this.listFiles = listFiles; } public String getLoggingLevel() { return loggingLevel; } public void setLoggingLevel(String loggingLevel) { this.loggingLevel = loggingLevel; } public List<String> getLoggingPhases() { return loggingPhases; } public void setLoggingPhases(List<String> loggingPhases) { this.loggingPhases = loggingPhases; } /** * @deprecated see https://docs.gradle.org/3.0/release-notes#ant-based-scala-compiler-has-been-removed */ public boolean isFork() { return fork; } public void setFork(boolean fork) { this.fork = fork; } /** * @deprecated see https://docs.gradle.org/3.0/release-notes#ant-based-scala-compiler-has-been-removed */ public boolean isUseAnt() { return useAnt; } public void setUseAnt(boolean useAnt) { this.useAnt = useAnt; } public ScalaForkOptions getForkOptions() { return forkOptions; } public void setForkOptions(ScalaForkOptions forkOptions) { this.forkOptions = forkOptions; } public static class ScalaForkOptions implements Serializable { private static final long serialVersionUID = 1L; private String memoryInitialSize; private String memoryMaximumSize; private List<String> jvmArgs; public String getMemoryInitialSize() { return memoryInitialSize; } public void setMemoryInitialSize(String memoryInitialSize) { this.memoryInitialSize = memoryInitialSize; } public String getMemoryMaximumSize() { return memoryMaximumSize; } public void setMemoryMaximumSize(String memoryMaximumSize) { this.memoryMaximumSize = memoryMaximumSize; } public List<String> getJvmArgs() { return jvmArgs; } public void setJvmArgs(List<String> jvmArgs) { this.jvmArgs = jvmArgs; } } }
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chrome.browser.payments; import android.content.Context; import android.graphics.Bitmap; import android.graphics.drawable.BitmapDrawable; import android.text.TextUtils; import androidx.annotation.Nullable; import androidx.annotation.VisibleForTesting; import org.chromium.base.Log; import org.chromium.base.ThreadUtils; import org.chromium.base.annotations.CalledByNative; import org.chromium.base.annotations.NativeMethods; import org.chromium.chrome.browser.ChromeActivity; import org.chromium.chrome.browser.flags.ChromeFeatureList; import org.chromium.components.payments.MethodStrings; import org.chromium.components.url_formatter.UrlFormatter; import org.chromium.content_public.browser.RenderFrameHost; import org.chromium.content_public.browser.WebContents; import org.chromium.payments.mojom.PaymentDetailsModifier; import org.chromium.payments.mojom.PaymentMethodData; import org.chromium.url.GURL; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.HashSet; import java.util.Set; /** * Native bridge for finding payment apps. */ public class PaymentAppServiceBridge implements PaymentAppFactoryInterface { private static final String TAG = "cr_PaymentAppService"; private static boolean sCanMakePaymentForTesting; private final Set<String> mStandardizedPaymentMethods = new HashSet<>(); /* package */ PaymentAppServiceBridge() { mStandardizedPaymentMethods.add(MethodStrings.BASIC_CARD); mStandardizedPaymentMethods.add(MethodStrings.INTERLEDGER); mStandardizedPaymentMethods.add(MethodStrings.PAYEE_CREDIT_TRANSFER); mStandardizedPaymentMethods.add(MethodStrings.PAYER_CREDIT_TRANSFER); mStandardizedPaymentMethods.add(MethodStrings.TOKENIZED_CARD); } /** * Make canMakePayment() return true always for testing purpose. * * @param canMakePayment Indicates whether a SW payment app can make payment. */ @VisibleForTesting public static void setCanMakePaymentForTesting(boolean canMakePayment) { sCanMakePaymentForTesting = canMakePayment; } // PaymentAppFactoryInterface implementation. @Override public void create(PaymentAppFactoryDelegate delegate) { assert delegate.getParams().getPaymentRequestOrigin().equals( UrlFormatter.formatUrlForSecurityDisplay( delegate.getParams().getRenderFrameHost().getLastCommittedURL())); PaymentAppServiceCallback callback = new PaymentAppServiceCallback(delegate); ByteBuffer[] serializedMethodData = new ByteBuffer[delegate.getParams().getMethodData().values().size()]; int i = 0; for (PaymentMethodData methodData : delegate.getParams().getMethodData().values()) { serializedMethodData[i++] = methodData.serialize(); } PaymentAppServiceBridgeJni.get().create(delegate.getParams().getRenderFrameHost(), delegate.getParams().getTopLevelOrigin(), serializedMethodData, delegate.getParams().getMayCrawl(), callback); } /** Handles callbacks from native PaymentAppService and creates PaymentApps. */ public class PaymentAppServiceCallback { private final PaymentAppFactoryDelegate mDelegate; private boolean mPaymentHandlerWithMatchingMethodFound; private int mNumberOfPendingCanMakePaymentEvents; private boolean mDoneCreatingPaymentApps; private PaymentAppServiceCallback(PaymentAppFactoryDelegate delegate) { mDelegate = delegate; } /** Called when an installed payment handler is found. */ @CalledByNative("PaymentAppServiceCallback") private void onInstalledPaymentHandlerFound(long registrationId, GURL scope, @Nullable String name, @Nullable String userHint, @Nullable Bitmap icon, String[] methodNameArray, boolean explicitlyVerified, Object[] capabilities, String[] preferredRelatedApplications, Object supportedDelegations) { ThreadUtils.assertOnUiThread(); WebContents webContents = mDelegate.getParams().getWebContents(); ChromeActivity activity = ChromeActivity.fromWebContents(webContents); ServiceWorkerPaymentApp app = createInstalledServiceWorkerPaymentApp(webContents, registrationId, scope, name, userHint, icon, methodNameArray, explicitlyVerified, (ServiceWorkerPaymentApp.Capabilities[]) capabilities, preferredRelatedApplications, (SupportedDelegations) supportedDelegations); if (app == null) return; mPaymentHandlerWithMatchingMethodFound = true; mNumberOfPendingCanMakePaymentEvents++; ServiceWorkerPaymentAppBridge.CanMakePaymentEventCallback canMakePaymentEventCallback = new ServiceWorkerPaymentAppBridge.CanMakePaymentEventCallback() { @Override public void onCanMakePaymentEventResponse(String errorMessage, boolean canMakePayment, boolean readyForMinimalUI, @Nullable String accountBalance) { if (canMakePayment) mDelegate.onPaymentAppCreated(app); if (!TextUtils.isEmpty(errorMessage)) { mDelegate.onPaymentAppCreationError(errorMessage); } app.setIsReadyForMinimalUI(readyForMinimalUI); app.setAccountBalance(accountBalance); if (--mNumberOfPendingCanMakePaymentEvents == 0 && mDoneCreatingPaymentApps) { notifyFinished(); } } }; if (sCanMakePaymentForTesting || activity.getCurrentTabModel().isIncognito() || mStandardizedPaymentMethods.containsAll(Arrays.asList(methodNameArray)) || !explicitlyVerified) { canMakePaymentEventCallback.onCanMakePaymentEventResponse(/*errorMessage=*/null, /*canMakePayment=*/true, /*readyForMinimalUI=*/false, /*accountBalance=*/null); return; } Set<PaymentMethodData> supportedRequestedMethodData = new HashSet<>(); for (String methodName : methodNameArray) { if (mDelegate.getParams().getMethodData().containsKey(methodName)) { supportedRequestedMethodData.add( mDelegate.getParams().getMethodData().get(methodName)); } } Set<PaymentDetailsModifier> supportedRequestedModifiers = new HashSet<>(); for (String methodName : methodNameArray) { if (mDelegate.getParams().getModifiers().containsKey(methodName)) { supportedRequestedModifiers.add( mDelegate.getParams().getModifiers().get(methodName)); } } ServiceWorkerPaymentAppBridge.fireCanMakePaymentEvent(webContents, registrationId, scope, mDelegate.getParams().getId(), mDelegate.getParams().getTopLevelOrigin(), mDelegate.getParams().getPaymentRequestOrigin(), supportedRequestedMethodData.toArray(new PaymentMethodData[0]), supportedRequestedModifiers.toArray(new PaymentDetailsModifier[0]), ChromeFeatureList.isEnabled(ChromeFeatureList.WEB_PAYMENTS_MINIMAL_UI) ? mDelegate.getParams().getTotalAmountCurrency() : null, canMakePaymentEventCallback); } /** Called when an installable payment handler is found. */ @CalledByNative("PaymentAppServiceCallback") private void onInstallablePaymentHandlerFound(@Nullable String name, GURL swUrl, GURL scope, boolean useCache, @Nullable Bitmap icon, String methodName, String[] preferredRelatedApplications, Object supportedDelegations) { ThreadUtils.assertOnUiThread(); ServiceWorkerPaymentApp installableApp = createInstallableServiceWorkerPaymentApp( mDelegate.getParams().getWebContents(), name, swUrl, scope, useCache, icon, methodName, preferredRelatedApplications, (SupportedDelegations) supportedDelegations); if (installableApp == null) return; mDelegate.onPaymentAppCreated(installableApp); mPaymentHandlerWithMatchingMethodFound = true; if (mNumberOfPendingCanMakePaymentEvents == 0 && mDoneCreatingPaymentApps) { notifyFinished(); } } /** * Called when an error has occurred. * @param errorMessage Developer facing error message. */ @CalledByNative("PaymentAppServiceCallback") private void onPaymentAppCreationError(String errorMessage) { ThreadUtils.assertOnUiThread(); mDelegate.onPaymentAppCreationError(errorMessage); } /** * Called when the factory is finished creating payment apps. Expects to be called exactly * once and after all onPaymentAppCreated() calls. */ @CalledByNative("PaymentAppServiceCallback") private void onDoneCreatingPaymentApps() { ThreadUtils.assertOnUiThread(); mDoneCreatingPaymentApps = true; if (mNumberOfPendingCanMakePaymentEvents == 0) { notifyFinished(); } } /** * Signal completion of payment app lookup. */ private void notifyFinished() { assert mDoneCreatingPaymentApps; assert mNumberOfPendingCanMakePaymentEvents == 0; mDelegate.onCanMakePaymentCalculated(mPaymentHandlerWithMatchingMethodFound); mDelegate.onDoneCreatingPaymentApps(PaymentAppServiceBridge.this); } } @CalledByNative private static Object[] createCapabilities(int count) { return new ServiceWorkerPaymentApp.Capabilities[count]; } @CalledByNative private static void addCapabilities( Object[] capabilities, int index, int[] supportedCardNetworks) { assert index < capabilities.length; capabilities[index] = new ServiceWorkerPaymentApp.Capabilities(supportedCardNetworks); } @CalledByNative private static Object createSupportedDelegations( boolean shippingAddress, boolean payerName, boolean payerPhone, boolean payerEmail) { return new SupportedDelegations(shippingAddress, payerName, payerPhone, payerEmail); } private static @Nullable ServiceWorkerPaymentApp createInstalledServiceWorkerPaymentApp( WebContents webContents, long registrationId, GURL scope, @Nullable String name, @Nullable String userHint, @Nullable Bitmap icon, String[] methodNameArray, boolean explicitlyVerified, ServiceWorkerPaymentApp.Capabilities[] capabilities, String[] preferredRelatedApplications, SupportedDelegations supportedDelegations) { ChromeActivity activity = ChromeActivity.fromWebContents(webContents); if (activity == null) return null; if (!UrlUtils.isURLValid(scope)) { Log.e(TAG, "service worker scope is not a valid URL"); return null; } return new ServiceWorkerPaymentApp(webContents, registrationId, scope, name, userHint, icon == null ? null : new BitmapDrawable(activity.getResources(), icon), methodNameArray, capabilities, preferredRelatedApplications, supportedDelegations); } private static @Nullable ServiceWorkerPaymentApp createInstallableServiceWorkerPaymentApp( WebContents webContents, @Nullable String name, GURL swUrl, GURL scope, boolean useCache, @Nullable Bitmap icon, String methodName, String[] preferredRelatedApplications, SupportedDelegations supportedDelegations) { Context context = ChromeActivity.fromWebContents(webContents); if (context == null) return null; if (!UrlUtils.isURLValid(swUrl)) { Log.e(TAG, "service worker installation url is not a valid URL"); return null; } if (!UrlUtils.isURLValid(scope)) { Log.e(TAG, "service worker scope is not a valid URL"); return null; } return new ServiceWorkerPaymentApp(webContents, name, swUrl, scope, useCache, icon == null ? null : new BitmapDrawable(context.getResources(), icon), methodName, preferredRelatedApplications, supportedDelegations); } @NativeMethods /* package */ interface Natives { void create(RenderFrameHost initiatorRenderFrameHost, String topOrigin, ByteBuffer[] methodData, boolean mayCrawlForInstallablePaymentApps, PaymentAppServiceCallback callback); } }
package org.apache.tomcat.session.redis; import java.io.IOException; import java.security.Principal; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import org.apache.catalina.Context; import org.apache.catalina.ha.session.SerializablePrincipal; import org.apache.catalina.realm.GenericPrincipal; import org.apache.catalina.session.StandardSession; import org.apache.juli.logging.Log; import org.apache.juli.logging.LogFactory; /** * TODO: notes are not backupped * * @author xfacq * */ public class RedisClusterSession extends StandardSession { private static final long serialVersionUID = -2518607181636076487L; private static final Log log = LogFactory.getLog(RedisClusterSession.class); public RedisClusterSession(RedisClusterSessionManager manager) { super(manager); } private RedisClusterSessionSerializer getSerializer() { return getManager().getSerializer(); } @Override public RedisClusterSessionManager getManager() { return (RedisClusterSessionManager) manager; } @Override public void setId(String id, boolean notify) { super.setId(id, notify); } @Override public void setCreationTime(long time) { if(this.creationTime == time) return; super.setCreationTime(time); if (this.id != null) { try { Map<String, String> newMap = new HashMap<String, String>(3); newMap.put("session:creationTime", getSerializer().serialize(creationTime)); newMap.put("session:lastAccessedTime", getSerializer().serialize(lastAccessedTime)); newMap.put("session:thisAccessedTime", getSerializer().serialize(thisAccessedTime)); getManager().getSessionOperator() .hmset(getSessionKey(), newMap); if (getExpire() > 0) { getManager().getSessionOperator() .expire(getSessionKey(), getExpire()); } } catch (Exception exception) { log.error("Cannot set Creation Time", exception); } } } @Override public void access() { super.access(); if (this.id != null) { try { getManager().getSessionOperator() .hset(getSessionKey(), "session:thisAccessedTime", getSerializer().serialize(thisAccessedTime)); if (getExpire() > 0) { getManager().getSessionOperator() .expire(getSessionKey(), getExpire()); } } catch (Exception exception) { log.error("Cannot update access", exception); } } } @Override public void setMaxInactiveInterval(int interval) { if(this.maxInactiveInterval == interval) return; super.setMaxInactiveInterval(interval); if (this.id != null) { try { getManager().getSessionOperator() .hset(getSessionKey(), "session:maxInactiveInterval", getSerializer().serialize(maxInactiveInterval)); } catch (Exception exception) { log.error("Cannot set Max Inactive Interval", exception); } } } @Override public void setValid(boolean isValid) { if(this.isValid == isValid) return; super.setValid(isValid); if (this.id != null) { try { getManager().getSessionOperator() .hset(getSessionKey(), "session:isValid", getSerializer().serialize(isValid)); } catch (Exception exception) { log.error("Cannot set is valid", exception); } } } @Override public void setNew(boolean isNew) { if(this.isNew == isNew) return; super.setNew(isNew); if (this.id != null) { try { getManager().getSessionOperator() .hset(getSessionKey(), "session:isNew", getSerializer().serialize(isNew)); } catch (Exception exception) { log.error("Cannot set is new", exception); } } } @Override public void endAccess() { super.endAccess(); if (this.id != null) { try { Map<String, String> newMap = new HashMap<String, String>(3); newMap.put("session:lastAccessedTime", getSerializer().serialize(lastAccessedTime)); newMap.put("session:thisAccessedTime", getSerializer().serialize(thisAccessedTime)); getManager().getSessionOperator() .hmset(getSessionKey(), newMap); if (getExpire() > 0) { getManager().getSessionOperator() .expire(getSessionKey(), getExpire()); } } catch (Exception exception) { log.error("Cannot set end access", exception); } } } @Override public Object getAttribute(String name) { if (this.id != null && name != null) { try { String value = getManager().getSessionOperator() .hget(getSessionKey(), name); return getSerializer().deserialize(value); } catch (ClassNotFoundException|IOException exception) { log.error("Cannot get attribute", exception); } } return null; } @Override public Enumeration<String> getAttributeNames() { if (this.id != null) { try { return Collections.enumeration(getManager().getSessionOperator().hkeys(getSessionKey())); } catch (Exception exception) { log.error("Cannot get attribute names", exception); } } return Collections.emptyEnumeration(); } @Override public String[] getValueNames() { if (this.id != null) { try { Set<String> keys = getManager().getSessionOperator() .hkeys(getSessionKey()); return keys.toArray(new String[keys.size()]); } catch (Exception exception) { log.error("Cannot get value names", exception); } } return null; } @Override public void setAttribute(String name, Object value, boolean notify) { // NOTE: Null value is the same as removeAttribute() - checked & called by super.setAttribute() // retrieve current value of this attribute Object o = super.getAttribute(name); super.setAttribute(name, value, notify); if (this.id != null && name != null && value != null) { try { String outboundValue = getSerializer().serialize(o); String inboundValue = getSerializer().serialize(value); // only hset() the cluster if the value has really changed if(!outboundValue.equals(inboundValue)) { getManager().getSessionOperator() .hset(getSessionKey(), name, inboundValue); } } catch (Exception exception) { log.error("Cannot set attribute", exception); } } } @Override protected void removeAttributeInternal(String name, boolean notify) { super.removeAttributeInternal(name, notify); if (this.id != null && name != null) { try { getManager().getSessionOperator() .hdel(getSessionKey(), name); } catch (Exception exception) { log.error("Cannot remove attribute", exception); } } } protected void save() { if (this.id == null) return; try { Map<String, String> newMap = new HashMap<String, String>(); newMap.put("session:creationTime", getSerializer().serialize(creationTime)); newMap.put("session:lastAccessedTime", getSerializer().serialize(lastAccessedTime)); newMap.put("session:thisAccessedTime", getSerializer().serialize(thisAccessedTime)); newMap.put("session:maxInactiveInterval", getSerializer().serialize(maxInactiveInterval)); newMap.put("session:isValid", getSerializer().serialize(isValid)); newMap.put("session:isNew", getSerializer().serialize(isNew)); newMap.put("session:authType", getSerializer().serialize(authType)); newMap.put("session:principal", getSerializer().serialize(principal == null ? null : SerializablePrincipal.createPrincipal((GenericPrincipal) principal))); for (Enumeration<String> e = super.getAttributeNames(); e.hasMoreElements();) { String key = e.nextElement(); if(key == null) continue; Object o = super.getAttribute(key); newMap.put(key, getSerializer().serialize(o)); } getManager().getSessionOperator() .hmset(getSessionKey(), newMap); if (getExpire() > 0) { getManager().getSessionOperator() .expire(getSessionKey(), getExpire()); } } catch (Exception exception) { log.error("Cannot save", exception); } } protected void delete() { if (this.id == null) return; try { getManager().getSessionOperator() .del(getSessionKey()); } catch (Exception exception) { log.error("Cannot set authType", exception); } } protected void load(Map<String, Object> attrs) { Long creationTime = (Long) attrs.remove("session:creationTime"); if (creationTime != null) { this.creationTime = creationTime; } Long lastAccessedTime = (Long) attrs.remove("session:lastAccessedTime"); if (lastAccessedTime != null) { this.lastAccessedTime = lastAccessedTime; } Long thisAccessedTime = (Long) attrs.remove("session:thisAccessedTime"); if (thisAccessedTime != null) { this.thisAccessedTime = thisAccessedTime; } Integer maxInactiveInterval = (Integer) attrs.remove("session:maxInactiveInterval"); if (maxInactiveInterval != null) { this.maxInactiveInterval = maxInactiveInterval; } Boolean isValid = (Boolean) attrs.remove("session:isValid"); if (isValid != null) { this.isValid = isValid; } Boolean isNew = (Boolean) attrs.remove("session:isNew"); if (isNew != null) { this.isNew = isNew; } String authType = (String) attrs.remove("session:authType"); this.authType = authType; SerializablePrincipal principal = (SerializablePrincipal) attrs.remove("session:principal"); this.principal = principal == null ? null : principal.getPrincipal(); for (Entry<String, Object> entry : attrs.entrySet()) { setAttribute(entry.getKey(), entry.getValue(), false); } } private int getExpire() { int expire = ((Context) manager.getContainer()).getSessionTimeout() * 60; return expire > 0 ? expire : 0; } protected String getSessionKey() { return RedisClusterSessionManager.buildSessionKey(this); } @Override public void setAuthType(String authType) { if(this.authType != null && this.authType.equals(authType)) return; super.setAuthType(authType); if (this.id != null) { try { getManager().getSessionOperator() .hset(getSessionKey(), "session:authType", getSerializer().serialize(authType)); } catch (Exception exception) { log.error("Cannot set authType", exception); } } } @Override public void setPrincipal(Principal principal) { if(this.principal != null && this.principal.equals(principal)) return; super.setPrincipal(principal); if (this.id != null) { try { getManager().getSessionOperator() .hset(getSessionKey(), "session:principal", getSerializer().serialize(principal == null ? null : SerializablePrincipal.createPrincipal((GenericPrincipal) principal))); } catch (Exception exception) { log.error("Cannot set principal", exception); } } } }
/* * Copyright 2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gofannon.recalboxpatcher.patcher.patch.image; import io.gofannon.recalboxpatcher.patcher.model.image.FileImageRepository; import io.gofannon.recalboxpatcher.patcher.model.image.NamedImage; import javafx.scene.image.Image; import org.junit.*; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.Collections; import static org.assertj.core.api.Assertions.*; import static io.gofannon.recalboxpatcher.patcher.TestResourceHelper.*; public class FileImageRepositoryTest { FileImageRepository repository; @Rule public TemporaryFolder rootFolder = new TemporaryFolder(); @Before public void setUp() throws Exception { repository = new FileImageRepository(); } @After public void tearDown() throws Exception { } @Test(expected = IllegalArgumentException.class) public void setDirectory_shallNotAcceptFileArgument() throws Exception { File file = rootFolder.newFile(); repository.setDirectory(file); } @Test(expected = IllegalArgumentException.class) public void setDirectory_shallNotAcceptNotExistingDirectory() throws Exception { File notExistingDirectory = new File(rootFolder.getRoot(), "/sample/"); repository.setDirectory(notExistingDirectory); } @Test(expected = NullPointerException.class) public void setDirectory_shallNotAcceptNullArgument() throws Exception { repository.setDirectory(null); } @Test public void setDirectory_shallAcceptEmptyDirectoryl() throws Exception { File existingDirectory = rootFolder.newFolder(); repository.setDirectory(existingDirectory); } @Test public void setDirectory_shallAcceptDirectoryWithImageFiles() throws Exception { File directory = createDirectoryWithFiles("/image1.png", "/image2.png"); repository.setDirectory(directory); } private File createDirectoryWithFiles(String... resourceNames ) throws IOException { File directory = rootFolder.newFolder(); for(String resourceName : resourceNames ) { addResourceToDirectory(resourceName, directory); } return directory; } @Test public void setDirectory_shallAcceptDirectoryWithNoImageFiles() throws Exception { File directory = createDirectoryWithFiles("/style.css", "/text-file.txt"); repository.setDirectory(directory); } @Test public void setDirectory_shallAcceptDirectoryWithMixImageAndNoImageFiles() throws Exception { File directory = createDirectoryWithFiles("/style.css", "/text-file.txt", "/image1.png", "/image2.png"); repository.setDirectory(directory); } @Test public void getAllImages_shallReturnEmptyListWhenNoFiles() throws Exception { File existingDirectory = rootFolder.newFolder(); repository.setDirectory(existingDirectory); assertThat(repository.getAllImages()).isEmpty(); } @Test public void getAllImages_shallReturnEmptyListWhenNoImageFiles() throws Exception { File directory = createDirectoryWithFiles("/style.css", "/text-file.txt"); repository.setDirectory(directory); assertThat(repository.getAllImages()).isEmpty(); } @Test public void getAllImages_shallReturnAllImagesInDirectoryWhenOnlyImageFiles() throws Exception { File directory = createDirectoryWithFiles("/image1.png", "/image2.png"); repository.setDirectory(directory); assertThat(repository.getAllImages()).hasSize(2); assertThat( new File(directory,"image1.png")) .exists() .isFile(); assertThat( new File(directory,"image2.png")) .exists() .isFile(); } @Test public void getAllImages_shallReturnAllImagesInDirectoryWhenMixOfImageAndNoImageFiles() throws Exception { File directory = createDirectoryWithFiles("/style.css", "/text-file.txt", "/image1.png", "/image2.png"); repository.setDirectory(directory); assertThat(repository.getAllImages()).hasSize(2); assertThat( new File(directory,"image1.png")) .exists() .isFile(); assertThat( new File(directory,"image2.png")) .exists() .isFile(); } @Test public void addImages_shallSupportEmptyImageList() throws Exception { File directory = createDirectoryWithFiles("/style.css", "/image1.png"); repository.setDirectory(directory); assertThat( repository.getAllImages()).hasSize(1); repository.addImages(); assertThat( repository.getAllImages()).hasSize(1); } @Test public void addImages_shallAcceptSingleImage() throws Exception { File directory = createDirectoryWithFiles("/style.css", "/image1.png"); repository.setDirectory(directory); assertThat( repository.getAllImages()).hasSize(1); NamedImage namedImage2 = createNamedImage("/image2.png"); repository.addImages(namedImage2); assertThat( repository.getAllImages()).hasSize(2); assertThat( new File(directory,"image1.png")) .exists() .isFile(); assertThat( new File(directory,"image2.png")) .exists() .isFile(); } private NamedImage createNamedImage(String resourcePath ) throws Exception { String name = extractResourceFilename(resourcePath); Image image = getImage(resourcePath); return new NamedImage(name,image); } @Test public void addImages_shallAcceptSeveralImages() throws Exception { File directory = createDirectoryWithFiles("/style.css"); repository.setDirectory(directory); assertThat( repository.getAllImages()).hasSize(0); NamedImage namedImage1 = createNamedImage("/image1.png"); NamedImage namedImage2 = createNamedImage("/image2.png"); repository.addImages(namedImage1, namedImage2); assertThat( repository.getAllImages()).hasSize(2); assertThat( new File(directory,"image1.png")) .exists() .isFile(); assertThat( new File(directory,"image2.png")) .exists() .isFile(); } @Test(expected = NullPointerException.class) public void addAllImages_shallNotAcceptNullCollection() throws Exception { File directory = rootFolder.newFolder(); repository.setDirectory(directory); repository.addAllImages( null); } @Test public void addAllImages_shallAcceptEmptyCollection() throws Exception { File directory = rootFolder.newFolder(); repository.setDirectory(directory); assertThat( repository.getAllImages()).hasSize(0); repository.addAllImages(Collections.emptyList()); assertThat( repository.getAllImages()).hasSize(0); } @Test public void addAllImages_shallAcceptSingleton() throws Exception { File directory = rootFolder.newFolder(); repository.setDirectory(directory); assertThat( repository.getAllImages()).hasSize(0); NamedImage namedImage1 = createNamedImage("/image1.png"); repository.addImages(namedImage1); assertThat( repository.getAllImages()).hasSize(1); assertThat( new File(directory,"image1.png")) .exists() .isFile(); } @Test public void addAllImages_shallAcceptSeveralImages() throws Exception { File directory = rootFolder.newFolder(); repository.setDirectory(directory); assertThat( repository.getAllImages()).hasSize(0); NamedImage namedImage1 = createNamedImage("/image1.png"); NamedImage namedImage2 = createNamedImage("/image2.png"); repository.addImages(namedImage1, namedImage2); assertThat( repository.getAllImages()).hasSize(2); assertThat( new File(directory,"image1.png")) .exists() .isFile(); assertThat( new File(directory,"image2.png")) .exists() .isFile(); } }
/* ======================================================================== * PlantUML : a free UML diagram generator * ======================================================================== * * (C) Copyright 2009-2020, Arnaud Roques * * Project Info: https://plantuml.com * * If you like this project or if you find it useful, you can support us at: * * https://plantuml.com/patreon (only 1$ per month!) * https://plantuml.com/paypal * * This file is part of PlantUML. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * * Original Author: Arnaud Roques */ package net.sourceforge.plantuml.ftp; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import net.sourceforge.plantuml.BlockUml; import net.sourceforge.plantuml.FileFormat; import net.sourceforge.plantuml.FileFormatOption; import net.sourceforge.plantuml.SourceStringReader; import net.sourceforge.plantuml.core.DiagramDescription; public class FtpConnexion { private final String user; private final Map<String, String> incoming = new HashMap<String, String>(); private final Map<String, byte[]> outgoing = new HashMap<String, byte[]>(); private final Set<String> futureOutgoing = new HashSet<>(); private FileFormat fileFormat; public FtpConnexion(String user, FileFormat defaultfileFormat) { this.user = user; this.fileFormat = defaultfileFormat; } public synchronized void addIncoming(String fileName, String data) { if (fileName.startsWith("/")) { throw new IllegalArgumentException(); } incoming.put(fileName, data); } public synchronized void futureOutgoing(String fileName) { outgoing.remove(fileName); futureOutgoing.add(fileName); } public synchronized Collection<String> getFiles() { final List<String> result = new ArrayList<>(incoming.keySet()); result.addAll(outgoing.keySet()); result.addAll(futureOutgoing); return Collections.unmodifiableCollection(result); } public synchronized boolean willExist(String fileName) { if (incoming.containsKey(fileName)) { return true; } if (outgoing.containsKey(fileName)) { return true; } if (futureOutgoing.contains(fileName)) { return true; } return false; } public synchronized boolean doesExist(String fileName) { if (incoming.containsKey(fileName)) { return true; } if (outgoing.containsKey(fileName)) { return true; } return false; } public synchronized byte[] getData(String fileName) throws InterruptedException { if (fileName.startsWith("/")) { throw new IllegalArgumentException(); } final String data = incoming.get(fileName); if (data != null) { return data.getBytes(); } // do { // if (willExist(fileName) == false) { // return null; // } final byte data2[] = outgoing.get(fileName); if (data2 == null) { return new byte[1]; } // if (data2 != null) { return data2; // } // Thread.sleep(200L); // } while (true); } public synchronized int getSize(String fileName) { if (fileName.startsWith("/")) { throw new IllegalArgumentException(); } final String data = incoming.get(fileName); if (data != null) { return data.length(); } final byte data2[] = outgoing.get(fileName); if (data2 != null) { return data2.length; } return 0; } public void processImage(String fileName) throws IOException { if (fileName.startsWith("/")) { throw new IllegalArgumentException(); } final String pngFileName = getFutureFileName(fileName); boolean done = false; try { final SourceStringReader sourceStringReader = new SourceStringReader(incoming.get(fileName)); final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final FileFormat format = getFileFormat(); final DiagramDescription desc = sourceStringReader.generateDiagramDescription(new FileFormatOption(format)); final List<BlockUml> blocks = sourceStringReader.getBlocks(); if (blocks.size() > 0) { blocks.get(0).getDiagram().exportDiagram(baos, 0, new FileFormatOption(format)); } final String errorFileName = pngFileName.substring(0, pngFileName.length() - 4) + ".err"; synchronized (this) { outgoing.remove(pngFileName); futureOutgoing.remove(pngFileName); outgoing.remove(errorFileName); if (desc != null && desc.getDescription() != null) { outgoing.put(pngFileName, baos.toByteArray()); done = true; if (desc.getDescription().startsWith("(Error)")) { final ByteArrayOutputStream errBaos = new ByteArrayOutputStream(); sourceStringReader.outputImage(errBaos, new FileFormatOption(FileFormat.ATXT)); errBaos.close(); outgoing.put(errorFileName, errBaos.toByteArray()); } } } } finally { if (done == false) { outgoing.put(pngFileName, new byte[0]); } } } public String getFutureFileName(String fileName) { return getFileFormat().changeName(fileName, 0); } private FileFormat getFileFormat() { return fileFormat; } public synchronized void delete(String fileName) { if (fileName.contains("*")) { incoming.clear(); outgoing.clear(); futureOutgoing.clear(); } else { incoming.remove(fileName); outgoing.remove(fileName); futureOutgoing.add(fileName); } } public void setFileFormat(FileFormat fileFormat) { this.fileFormat = fileFormat; } }
/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. */ package com.microsoft.azure.management.network.v2019_09_01; import com.microsoft.azure.arm.model.HasInner; import com.microsoft.azure.management.network.v2019_09_01.implementation.ExpressRouteCrossConnectionPeeringInner; import com.microsoft.azure.arm.model.Indexable; import com.microsoft.azure.arm.model.Refreshable; import com.microsoft.azure.arm.model.Updatable; import com.microsoft.azure.arm.model.Appliable; import com.microsoft.azure.arm.model.Creatable; import com.microsoft.azure.arm.resources.models.HasManager; import com.microsoft.azure.management.network.v2019_09_01.implementation.NetworkManager; /** * Type representing ExpressRouteCrossConnectionPeering. */ public interface ExpressRouteCrossConnectionPeering extends HasInner<ExpressRouteCrossConnectionPeeringInner>, Indexable, Refreshable<ExpressRouteCrossConnectionPeering>, Updatable<ExpressRouteCrossConnectionPeering.Update>, HasManager<NetworkManager> { /** * @return the azureASN value. */ Integer azureASN(); /** * @return the etag value. */ String etag(); /** * @return the gatewayManagerEtag value. */ String gatewayManagerEtag(); /** * @return the id value. */ String id(); /** * @return the ipv6PeeringConfig value. */ Ipv6ExpressRouteCircuitPeeringConfig ipv6PeeringConfig(); /** * @return the lastModifiedBy value. */ String lastModifiedBy(); /** * @return the microsoftPeeringConfig value. */ ExpressRouteCircuitPeeringConfig microsoftPeeringConfig(); /** * @return the name value. */ String name(); /** * @return the peerASN value. */ Long peerASN(); /** * @return the peeringType value. */ ExpressRoutePeeringType peeringType(); /** * @return the primaryAzurePort value. */ String primaryAzurePort(); /** * @return the primaryPeerAddressPrefix value. */ String primaryPeerAddressPrefix(); /** * @return the provisioningState value. */ ProvisioningState provisioningState(); /** * @return the secondaryAzurePort value. */ String secondaryAzurePort(); /** * @return the secondaryPeerAddressPrefix value. */ String secondaryPeerAddressPrefix(); /** * @return the sharedKey value. */ String sharedKey(); /** * @return the state value. */ ExpressRoutePeeringState state(); /** * @return the vlanId value. */ Integer vlanId(); /** * The entirety of the ExpressRouteCrossConnectionPeering definition. */ interface Definition extends DefinitionStages.Blank, DefinitionStages.WithExpressRouteCrossConnection, DefinitionStages.WithCreate { } /** * Grouping of ExpressRouteCrossConnectionPeering definition stages. */ interface DefinitionStages { /** * The first stage of a ExpressRouteCrossConnectionPeering definition. */ interface Blank extends WithExpressRouteCrossConnection { } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify ExpressRouteCrossConnection. */ interface WithExpressRouteCrossConnection { /** * Specifies resourceGroupName, crossConnectionName. * @param resourceGroupName The name of the resource group * @param crossConnectionName The name of the ExpressRouteCrossConnection * @return the next definition stage */ WithCreate withExistingExpressRouteCrossConnection(String resourceGroupName, String crossConnectionName); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify GatewayManagerEtag. */ interface WithGatewayManagerEtag { /** * Specifies gatewayManagerEtag. * @param gatewayManagerEtag The GatewayManager Etag * @return the next definition stage */ WithCreate withGatewayManagerEtag(String gatewayManagerEtag); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify Id. */ interface WithId { /** * Specifies id. * @param id Resource ID * @return the next definition stage */ WithCreate withId(String id); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify Ipv6PeeringConfig. */ interface WithIpv6PeeringConfig { /** * Specifies ipv6PeeringConfig. * @param ipv6PeeringConfig The IPv6 peering configuration * @return the next definition stage */ WithCreate withIpv6PeeringConfig(Ipv6ExpressRouteCircuitPeeringConfig ipv6PeeringConfig); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify MicrosoftPeeringConfig. */ interface WithMicrosoftPeeringConfig { /** * Specifies microsoftPeeringConfig. * @param microsoftPeeringConfig The Microsoft peering configuration * @return the next definition stage */ WithCreate withMicrosoftPeeringConfig(ExpressRouteCircuitPeeringConfig microsoftPeeringConfig); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify Name. */ interface WithName { /** * Specifies name. * @param name The name of the resource that is unique within a resource group. This name can be used to access the resource * @return the next definition stage */ WithCreate withName(String name); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify PeerASN. */ interface WithPeerASN { /** * Specifies peerASN. * @param peerASN The peer ASN * @return the next definition stage */ WithCreate withPeerASN(Long peerASN); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify PeeringType. */ interface WithPeeringType { /** * Specifies peeringType. * @param peeringType The peering type. Possible values include: 'AzurePublicPeering', 'AzurePrivatePeering', 'MicrosoftPeering' * @return the next definition stage */ WithCreate withPeeringType(ExpressRoutePeeringType peeringType); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify PrimaryPeerAddressPrefix. */ interface WithPrimaryPeerAddressPrefix { /** * Specifies primaryPeerAddressPrefix. * @param primaryPeerAddressPrefix The primary address prefix * @return the next definition stage */ WithCreate withPrimaryPeerAddressPrefix(String primaryPeerAddressPrefix); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify SecondaryPeerAddressPrefix. */ interface WithSecondaryPeerAddressPrefix { /** * Specifies secondaryPeerAddressPrefix. * @param secondaryPeerAddressPrefix The secondary address prefix * @return the next definition stage */ WithCreate withSecondaryPeerAddressPrefix(String secondaryPeerAddressPrefix); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify SharedKey. */ interface WithSharedKey { /** * Specifies sharedKey. * @param sharedKey The shared key * @return the next definition stage */ WithCreate withSharedKey(String sharedKey); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify State. */ interface WithState { /** * Specifies state. * @param state The peering state. Possible values include: 'Disabled', 'Enabled' * @return the next definition stage */ WithCreate withState(ExpressRoutePeeringState state); } /** * The stage of the expressroutecrossconnectionpeering definition allowing to specify VlanId. */ interface WithVlanId { /** * Specifies vlanId. * @param vlanId The VLAN ID * @return the next definition stage */ WithCreate withVlanId(Integer vlanId); } /** * The stage of the definition which contains all the minimum required inputs for * the resource to be created (via {@link WithCreate#create()}), but also allows * for any other optional settings to be specified. */ interface WithCreate extends Creatable<ExpressRouteCrossConnectionPeering>, DefinitionStages.WithGatewayManagerEtag, DefinitionStages.WithId, DefinitionStages.WithIpv6PeeringConfig, DefinitionStages.WithMicrosoftPeeringConfig, DefinitionStages.WithName, DefinitionStages.WithPeerASN, DefinitionStages.WithPeeringType, DefinitionStages.WithPrimaryPeerAddressPrefix, DefinitionStages.WithSecondaryPeerAddressPrefix, DefinitionStages.WithSharedKey, DefinitionStages.WithState, DefinitionStages.WithVlanId { } } /** * The template for a ExpressRouteCrossConnectionPeering update operation, containing all the settings that can be modified. */ interface Update extends Appliable<ExpressRouteCrossConnectionPeering>, UpdateStages.WithGatewayManagerEtag, UpdateStages.WithId, UpdateStages.WithIpv6PeeringConfig, UpdateStages.WithMicrosoftPeeringConfig, UpdateStages.WithName, UpdateStages.WithPeerASN, UpdateStages.WithPeeringType, UpdateStages.WithPrimaryPeerAddressPrefix, UpdateStages.WithSecondaryPeerAddressPrefix, UpdateStages.WithSharedKey, UpdateStages.WithState, UpdateStages.WithVlanId { } /** * Grouping of ExpressRouteCrossConnectionPeering update stages. */ interface UpdateStages { /** * The stage of the expressroutecrossconnectionpeering update allowing to specify GatewayManagerEtag. */ interface WithGatewayManagerEtag { /** * Specifies gatewayManagerEtag. * @param gatewayManagerEtag The GatewayManager Etag * @return the next update stage */ Update withGatewayManagerEtag(String gatewayManagerEtag); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify Id. */ interface WithId { /** * Specifies id. * @param id Resource ID * @return the next update stage */ Update withId(String id); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify Ipv6PeeringConfig. */ interface WithIpv6PeeringConfig { /** * Specifies ipv6PeeringConfig. * @param ipv6PeeringConfig The IPv6 peering configuration * @return the next update stage */ Update withIpv6PeeringConfig(Ipv6ExpressRouteCircuitPeeringConfig ipv6PeeringConfig); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify MicrosoftPeeringConfig. */ interface WithMicrosoftPeeringConfig { /** * Specifies microsoftPeeringConfig. * @param microsoftPeeringConfig The Microsoft peering configuration * @return the next update stage */ Update withMicrosoftPeeringConfig(ExpressRouteCircuitPeeringConfig microsoftPeeringConfig); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify Name. */ interface WithName { /** * Specifies name. * @param name The name of the resource that is unique within a resource group. This name can be used to access the resource * @return the next update stage */ Update withName(String name); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify PeerASN. */ interface WithPeerASN { /** * Specifies peerASN. * @param peerASN The peer ASN * @return the next update stage */ Update withPeerASN(Long peerASN); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify PeeringType. */ interface WithPeeringType { /** * Specifies peeringType. * @param peeringType The peering type. Possible values include: 'AzurePublicPeering', 'AzurePrivatePeering', 'MicrosoftPeering' * @return the next update stage */ Update withPeeringType(ExpressRoutePeeringType peeringType); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify PrimaryPeerAddressPrefix. */ interface WithPrimaryPeerAddressPrefix { /** * Specifies primaryPeerAddressPrefix. * @param primaryPeerAddressPrefix The primary address prefix * @return the next update stage */ Update withPrimaryPeerAddressPrefix(String primaryPeerAddressPrefix); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify SecondaryPeerAddressPrefix. */ interface WithSecondaryPeerAddressPrefix { /** * Specifies secondaryPeerAddressPrefix. * @param secondaryPeerAddressPrefix The secondary address prefix * @return the next update stage */ Update withSecondaryPeerAddressPrefix(String secondaryPeerAddressPrefix); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify SharedKey. */ interface WithSharedKey { /** * Specifies sharedKey. * @param sharedKey The shared key * @return the next update stage */ Update withSharedKey(String sharedKey); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify State. */ interface WithState { /** * Specifies state. * @param state The peering state. Possible values include: 'Disabled', 'Enabled' * @return the next update stage */ Update withState(ExpressRoutePeeringState state); } /** * The stage of the expressroutecrossconnectionpeering update allowing to specify VlanId. */ interface WithVlanId { /** * Specifies vlanId. * @param vlanId The VLAN ID * @return the next update stage */ Update withVlanId(Integer vlanId); } } }
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.percolator; import org.apache.lucene.index.PrefixCodedTerms; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanFirstQuery; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanNotQuery; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Function; public final class QueryAnalyzer { private static final Map<Class<? extends Query>, Function<Query, Result>> queryProcessors; static { Map<Class<? extends Query>, Function<Query, Result>> map = new HashMap<>(); map.put(MatchNoDocsQuery.class, matchNoDocsQuery()); map.put(ConstantScoreQuery.class, constantScoreQuery()); map.put(BoostQuery.class, boostQuery()); map.put(TermQuery.class, termQuery()); map.put(TermsQuery.class, termsQuery()); map.put(CommonTermsQuery.class, commonTermsQuery()); map.put(BlendedTermQuery.class, blendedTermQuery()); map.put(PhraseQuery.class, phraseQuery()); map.put(SpanTermQuery.class, spanTermQuery()); map.put(SpanNearQuery.class, spanNearQuery()); map.put(SpanOrQuery.class, spanOrQuery()); map.put(SpanFirstQuery.class, spanFirstQuery()); map.put(SpanNotQuery.class, spanNotQuery()); map.put(BooleanQuery.class, booleanQuery()); map.put(DisjunctionMaxQuery.class, disjunctionMaxQuery()); map.put(SynonymQuery.class, synonymQuery()); map.put(FunctionScoreQuery.class, functionScoreQuery()); queryProcessors = Collections.unmodifiableMap(map); } private QueryAnalyzer() { } /** * Extracts terms from the provided query. These terms are stored with the percolator query and * used by the percolate query's candidate query as fields to be query by. The candidate query * holds the terms from the document to be percolated and allows to the percolate query to ignore * percolator queries that we know would otherwise never match. * * <p> * When extracting the terms for the specified query, we can also determine if the percolator query is * always going to match. For example if a percolator query just contains a term query or a disjunction * query then when the candidate query matches with that, we know the entire percolator query always * matches. This allows the percolate query to skip the expensive memory index verification step that * it would otherwise have to execute (for example when a percolator query contains a phrase query or a * conjunction query). * * <p> * The query analyzer doesn't always extract all terms from the specified query. For example from a * boolean query with no should clauses or phrase queries only the longest term are selected, * since that those terms are likely to be the rarest. Boolean query's must_not clauses are always ignored. * * <p> * Sometimes the query analyzer can't always extract terms from a sub query, if that happens then * query analysis is stopped and an UnsupportedQueryException is thrown. So that the caller can mark * this query in such a way that the PercolatorQuery always verifies if this query with the MemoryIndex. */ public static Result analyze(Query query) { Class queryClass = query.getClass(); if (queryClass.isAnonymousClass()) { // Sometimes queries have anonymous classes in that case we need the direct super class. // (for example blended term query) queryClass = queryClass.getSuperclass(); } Function<Query, Result> queryProcessor = queryProcessors.get(queryClass); if (queryProcessor != null) { return queryProcessor.apply(query); } else { throw new UnsupportedQueryException(query); } } static Function<Query, Result> matchNoDocsQuery() { return (query -> new Result(true, Collections.emptySet())); } static Function<Query, Result> constantScoreQuery() { return query -> { Query wrappedQuery = ((ConstantScoreQuery) query).getQuery(); return analyze(wrappedQuery); }; } static Function<Query, Result> boostQuery() { return query -> { Query wrappedQuery = ((BoostQuery) query).getQuery(); return analyze(wrappedQuery); }; } static Function<Query, Result> termQuery() { return (query -> { TermQuery termQuery = (TermQuery) query; return new Result(true, Collections.singleton(termQuery.getTerm())); }); } static Function<Query, Result> termsQuery() { return query -> { TermsQuery termsQuery = (TermsQuery) query; Set<Term> terms = new HashSet<>(); PrefixCodedTerms.TermIterator iterator = termsQuery.getTermData().iterator(); for (BytesRef term = iterator.next(); term != null; term = iterator.next()) { terms.add(new Term(iterator.field(), term)); } return new Result(true, terms); }; } static Function<Query, Result> synonymQuery() { return query -> { Set<Term> terms = new HashSet<>(((SynonymQuery) query).getTerms()); return new Result(true, terms); }; } static Function<Query, Result> commonTermsQuery() { return query -> { List<Term> terms = ((CommonTermsQuery) query).getTerms(); return new Result(false, new HashSet<>(terms)); }; } static Function<Query, Result> blendedTermQuery() { return query -> { List<Term> terms = ((BlendedTermQuery) query).getTerms(); return new Result(true, new HashSet<>(terms)); }; } static Function<Query, Result> phraseQuery() { return query -> { Term[] terms = ((PhraseQuery) query).getTerms(); if (terms.length == 0) { return new Result(true, Collections.emptySet()); } // the longest term is likely to be the rarest, // so from a performance perspective it makes sense to extract that Term longestTerm = terms[0]; for (Term term : terms) { if (longestTerm.bytes().length < term.bytes().length) { longestTerm = term; } } return new Result(false, Collections.singleton(longestTerm)); }; } static Function<Query, Result> spanTermQuery() { return query -> { Term term = ((SpanTermQuery) query).getTerm(); return new Result(true, Collections.singleton(term)); }; } static Function<Query, Result> spanNearQuery() { return query -> { Set<Term> bestClauses = null; SpanNearQuery spanNearQuery = (SpanNearQuery) query; for (SpanQuery clause : spanNearQuery.getClauses()) { Result temp = analyze(clause); bestClauses = selectTermListWithTheLongestShortestTerm(temp.terms, bestClauses); } return new Result(false, bestClauses); }; } static Function<Query, Result> spanOrQuery() { return query -> { Set<Term> terms = new HashSet<>(); SpanOrQuery spanOrQuery = (SpanOrQuery) query; for (SpanQuery clause : spanOrQuery.getClauses()) { terms.addAll(analyze(clause).terms); } return new Result(false, terms); }; } static Function<Query, Result> spanNotQuery() { return query -> { Result result = analyze(((SpanNotQuery) query).getInclude()); return new Result(false, result.terms); }; } static Function<Query, Result> spanFirstQuery() { return query -> { Result result = analyze(((SpanFirstQuery) query).getMatch()); return new Result(false, result.terms); }; } static Function<Query, Result> booleanQuery() { return query -> { BooleanQuery bq = (BooleanQuery) query; List<BooleanClause> clauses = bq.clauses(); int minimumShouldMatch = bq.getMinimumNumberShouldMatch(); int numRequiredClauses = 0; int numOptionalClauses = 0; int numProhibitedClauses = 0; for (BooleanClause clause : clauses) { if (clause.isRequired()) { numRequiredClauses++; } if (clause.isProhibited()) { numProhibitedClauses++; } if (clause.getOccur() == BooleanClause.Occur.SHOULD) { numOptionalClauses++; } } if (numRequiredClauses > 0) { Set<Term> bestClause = null; UnsupportedQueryException uqe = null; for (BooleanClause clause : clauses) { if (clause.isRequired() == false) { // skip must_not clauses, we don't need to remember the things that do *not* match... // skip should clauses, this bq has must clauses, so we don't need to remember should clauses, // since they are completely optional. continue; } Result temp; try { temp = analyze(clause.getQuery()); } catch (UnsupportedQueryException e) { uqe = e; continue; } bestClause = selectTermListWithTheLongestShortestTerm(temp.terms, bestClause); } if (bestClause != null) { return new Result(false, bestClause); } else { if (uqe != null) { // we're unable to select the best clause and an exception occurred, so we bail throw uqe; } else { // We didn't find a clause and no exception occurred, so this bq only contained MatchNoDocsQueries, return new Result(true, Collections.emptySet()); } } } else { List<Query> disjunctions = new ArrayList<>(numOptionalClauses); for (BooleanClause clause : clauses) { if (clause.getOccur() == BooleanClause.Occur.SHOULD) { disjunctions.add(clause.getQuery()); } } return handleDisjunction(disjunctions, minimumShouldMatch, numProhibitedClauses > 0); } }; } static Function<Query, Result> disjunctionMaxQuery() { return query -> { List<Query> disjuncts = ((DisjunctionMaxQuery) query).getDisjuncts(); return handleDisjunction(disjuncts, 1, false); }; } static Function<Query, Result> functionScoreQuery() { return query -> { FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) query; Result result = analyze(functionScoreQuery.getSubQuery()); // If min_score is specified we can't guarantee upfront that this percolator query matches, // so in that case we set verified to false. // (if it matches with the percolator document matches with the extracted terms. // Min score filters out docs, which is different than the functions, which just influences the score.) boolean verified = functionScoreQuery.getMinScore() == null; return new Result(verified, result.terms); }; } static Result handleDisjunction(List<Query> disjunctions, int minimumShouldMatch, boolean otherClauses) { boolean verified = minimumShouldMatch <= 1 && otherClauses == false; Set<Term> terms = new HashSet<>(); for (Query disjunct : disjunctions) { Result subResult = analyze(disjunct); if (subResult.verified == false) { verified = false; } terms.addAll(subResult.terms); } return new Result(verified, terms); } static Set<Term> selectTermListWithTheLongestShortestTerm(Set<Term> terms1, Set<Term> terms2) { if (terms1 == null) { return terms2; } else if (terms2 == null) { return terms1; } else { int terms1ShortestTerm = minTermLength(terms1); int terms2ShortestTerm = minTermLength(terms2); // keep the clause with longest terms, this likely to be rarest. if (terms1ShortestTerm >= terms2ShortestTerm) { return terms1; } else { return terms2; } } } static int minTermLength(Set<Term> terms) { int min = Integer.MAX_VALUE; for (Term term : terms) { min = Math.min(min, term.bytes().length); } return min; } static class Result { final Set<Term> terms; final boolean verified; Result(boolean verified, Set<Term> terms) { this.terms = terms; this.verified = verified; } } /** * Exception indicating that none or some query terms couldn't extracted from a percolator query. */ static class UnsupportedQueryException extends RuntimeException { private final Query unsupportedQuery; public UnsupportedQueryException(Query unsupportedQuery) { super(LoggerMessageFormat.format("no query terms can be extracted from query [{}]", unsupportedQuery)); this.unsupportedQuery = unsupportedQuery; } /** * The actual Lucene query that was unsupported and caused this exception to be thrown. */ public Query getUnsupportedQuery() { return unsupportedQuery; } } }
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.common.geo; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.mapper.GeoPointFieldMapper; import java.io.IOException; /** */ public class GeoUtils { /** Maximum valid latitude in degrees. */ public static final double MAX_LAT = 90.0; /** Minimum valid latitude in degrees. */ public static final double MIN_LAT = -90.0; /** Maximum valid longitude in degrees. */ public static final double MAX_LON = 180.0; /** Minimum valid longitude in degrees. */ public static final double MIN_LON = -180.0; public static final String LATITUDE = GeoPointFieldMapper.Names.LAT; public static final String LONGITUDE = GeoPointFieldMapper.Names.LON; public static final String GEOHASH = GeoPointFieldMapper.Names.GEOHASH; /** Earth ellipsoid major axis defined by WGS 84 in meters */ public static final double EARTH_SEMI_MAJOR_AXIS = 6378137.0; // meters (WGS 84) /** Earth ellipsoid minor axis defined by WGS 84 in meters */ public static final double EARTH_SEMI_MINOR_AXIS = 6356752.314245; // meters (WGS 84) /** Earth mean radius defined by WGS 84 in meters */ public static final double EARTH_MEAN_RADIUS = 6371008.7714D; // meters (WGS 84) /** Earth axis ratio defined by WGS 84 (0.996647189335) */ public static final double EARTH_AXIS_RATIO = EARTH_SEMI_MINOR_AXIS / EARTH_SEMI_MAJOR_AXIS; /** Earth ellipsoid equator length in meters */ public static final double EARTH_EQUATOR = 2*Math.PI * EARTH_SEMI_MAJOR_AXIS; /** Earth ellipsoid polar distance in meters */ public static final double EARTH_POLAR_DISTANCE = Math.PI * EARTH_SEMI_MINOR_AXIS; /** rounding error for quantized latitude and longitude values */ public static final double TOLERANCE = 1E-6; /** Returns the minimum between the provided distance 'initialRadius' and the * maximum distance/radius from the point 'center' before overlapping **/ public static double maxRadialDistance(GeoPoint center, double initialRadius) { final double maxRadius = maxRadialDistanceMeters(center.lat(), center.lon()); return Math.min(initialRadius, maxRadius); } /** Returns true if latitude is actually a valid latitude value.*/ public static boolean isValidLatitude(double latitude) { if (Double.isNaN(latitude) || Double.isInfinite(latitude) || latitude < GeoUtils.MIN_LAT || latitude > GeoUtils.MAX_LAT) { return false; } return true; } /** Returns true if longitude is actually a valid longitude value. */ public static boolean isValidLongitude(double longitude) { if (Double.isNaN(longitude) || Double.isNaN(longitude) || longitude < GeoUtils.MIN_LON || longitude > GeoUtils.MAX_LON) { return false; } return true; } /** * Calculate the width (in meters) of geohash cells at a specific level * @param level geohash level must be greater or equal to zero * @return the width of cells at level in meters */ public static double geoHashCellWidth(int level) { assert level>=0; // Geohash cells are split into 32 cells at each level. the grid // alternates at each level between a 8x4 and a 4x8 grid return EARTH_EQUATOR / (1L<<((((level+1)/2)*3) + ((level/2)*2))); } /** * Calculate the width (in meters) of quadtree cells at a specific level * @param level quadtree level must be greater or equal to zero * @return the width of cells at level in meters */ public static double quadTreeCellWidth(int level) { assert level >=0; return EARTH_EQUATOR / (1L<<level); } /** * Calculate the height (in meters) of geohash cells at a specific level * @param level geohash level must be greater or equal to zero * @return the height of cells at level in meters */ public static double geoHashCellHeight(int level) { assert level>=0; // Geohash cells are split into 32 cells at each level. the grid // alternates at each level between a 8x4 and a 4x8 grid return EARTH_POLAR_DISTANCE / (1L<<((((level+1)/2)*2) + ((level/2)*3))); } /** * Calculate the height (in meters) of quadtree cells at a specific level * @param level quadtree level must be greater or equal to zero * @return the height of cells at level in meters */ public static double quadTreeCellHeight(int level) { assert level>=0; return EARTH_POLAR_DISTANCE / (1L<<level); } /** * Calculate the size (in meters) of geohash cells at a specific level * @param level geohash level must be greater or equal to zero * @return the size of cells at level in meters */ public static double geoHashCellSize(int level) { assert level>=0; final double w = geoHashCellWidth(level); final double h = geoHashCellHeight(level); return Math.sqrt(w*w + h*h); } /** * Calculate the size (in meters) of quadtree cells at a specific level * @param level quadtree level must be greater or equal to zero * @return the size of cells at level in meters */ public static double quadTreeCellSize(int level) { assert level>=0; return Math.sqrt(EARTH_POLAR_DISTANCE*EARTH_POLAR_DISTANCE + EARTH_EQUATOR*EARTH_EQUATOR) / (1L<<level); } /** * Calculate the number of levels needed for a specific precision. Quadtree * cells will not exceed the specified size (diagonal) of the precision. * @param meters Maximum size of cells in meters (must greater than zero) * @return levels need to achieve precision */ public static int quadTreeLevelsForPrecision(double meters) { assert meters >= 0; if(meters == 0) { return QuadPrefixTree.MAX_LEVELS_POSSIBLE; } else { final double ratio = 1+(EARTH_POLAR_DISTANCE / EARTH_EQUATOR); // cell ratio final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width final long part = Math.round(Math.ceil(EARTH_EQUATOR / width)); final int level = Long.SIZE - Long.numberOfLeadingZeros(part)-1; // (log_2) return (part<=(1L<<level)) ?level :(level+1); // adjust level } } /** * Calculate the number of levels needed for a specific precision. QuadTree * cells will not exceed the specified size (diagonal) of the precision. * @param distance Maximum size of cells as unit string (must greater or equal to zero) * @return levels need to achieve precision */ public static int quadTreeLevelsForPrecision(String distance) { return quadTreeLevelsForPrecision(DistanceUnit.METERS.parse(distance, DistanceUnit.DEFAULT)); } /** * Calculate the number of levels needed for a specific precision. GeoHash * cells will not exceed the specified size (diagonal) of the precision. * @param meters Maximum size of cells in meters (must greater or equal to zero) * @return levels need to achieve precision */ public static int geoHashLevelsForPrecision(double meters) { assert meters >= 0; if(meters == 0) { return GeohashPrefixTree.getMaxLevelsPossible(); } else { final double ratio = 1+(EARTH_POLAR_DISTANCE / EARTH_EQUATOR); // cell ratio final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width final double part = Math.ceil(EARTH_EQUATOR / width); if(part == 1) return 1; final int bits = (int)Math.round(Math.ceil(Math.log(part) / Math.log(2))); final int full = bits / 5; // number of 5 bit subdivisions final int left = bits - full*5; // bit representing the last level final int even = full + (left>0?1:0); // number of even levels final int odd = full + (left>3?1:0); // number of odd levels return even+odd; } } /** * Calculate the number of levels needed for a specific precision. GeoHash * cells will not exceed the specified size (diagonal) of the precision. * @param distance Maximum size of cells as unit string (must greater or equal to zero) * @return levels need to achieve precision */ public static int geoHashLevelsForPrecision(String distance) { return geoHashLevelsForPrecision(DistanceUnit.METERS.parse(distance, DistanceUnit.DEFAULT)); } /** * Normalize longitude to lie within the -180 (exclusive) to 180 (inclusive) range. * * @param lon Longitude to normalize * @return The normalized longitude. */ public static double normalizeLon(double lon) { return centeredModulus(lon, 360); } /** * Normalize latitude to lie within the -90 to 90 (both inclusive) range. * <p> * Note: You should not normalize longitude and latitude separately, * because when normalizing latitude it may be necessary to * add a shift of 180&deg; in the longitude. * For this purpose, you should call the * {@link #normalizePoint(GeoPoint)} function. * * @param lat Latitude to normalize * @return The normalized latitude. * @see #normalizePoint(GeoPoint) */ public static double normalizeLat(double lat) { lat = centeredModulus(lat, 360); if (lat < -90) { lat = -180 - lat; } else if (lat > 90) { lat = 180 - lat; } return lat; } /** * Normalize the geo {@code Point} for its coordinates to lie within their * respective normalized ranges. * <p> * Note: A shift of 180&deg; is applied in the longitude if necessary, * in order to normalize properly the latitude. * * @param point The point to normalize in-place. */ public static void normalizePoint(GeoPoint point) { normalizePoint(point, true, true); } /** * Normalize the geo {@code Point} for the given coordinates to lie within * their respective normalized ranges. * <p> * You can control which coordinate gets normalized with the two flags. * <p> * Note: A shift of 180&deg; is applied in the longitude if necessary, * in order to normalize properly the latitude. * If normalizing latitude but not longitude, it is assumed that * the longitude is in the form x+k*360, with x in ]-180;180], * and k is meaningful to the application. * Therefore x will be adjusted while keeping k preserved. * * @param point The point to normalize in-place. * @param normLat Whether to normalize latitude or leave it as is. * @param normLon Whether to normalize longitude. */ public static void normalizePoint(GeoPoint point, boolean normLat, boolean normLon) { double[] pt = {point.lon(), point.lat()}; normalizePoint(pt, normLon, normLat); point.reset(pt[1], pt[0]); } public static void normalizePoint(double[] lonLat) { normalizePoint(lonLat, true, true); } public static void normalizePoint(double[] lonLat, boolean normLon, boolean normLat) { assert lonLat != null && lonLat.length == 2; normLat = normLat && (lonLat[1] > 90 || lonLat[1] < -90); normLon = normLon && (lonLat[0] > 180 || lonLat[0] < -180); if (normLat) { lonLat[1] = centeredModulus(lonLat[1], 360); boolean shift = true; if (lonLat[1] < -90) { lonLat[1] = -180 - lonLat[1]; } else if (lonLat[1] > 90) { lonLat[1] = 180 - lonLat[1]; } else { // No need to shift the longitude, and the latitude is normalized shift = false; } if (shift) { if (normLon) { lonLat[0] += 180; } else { // Longitude won't be normalized, // keep it in the form x+k*360 (with x in ]-180;180]) // by only changing x, assuming k is meaningful for the user application. lonLat[0] += normalizeLon(lonLat[0]) > 0 ? -180 : 180; } } } if (normLon) { lonLat[0] = centeredModulus(lonLat[0], 360); } } private static double centeredModulus(double dividend, double divisor) { double rtn = dividend % divisor; if (rtn <= 0) { rtn += divisor; } if (rtn > divisor / 2) { rtn -= divisor; } return rtn; } /** * Parse a {@link GeoPoint} with a {@link XContentParser}: * * @param parser {@link XContentParser} to parse the value from * @return new {@link GeoPoint} parsed from the parse */ public static GeoPoint parseGeoPoint(XContentParser parser) throws IOException, ElasticsearchParseException { return parseGeoPoint(parser, new GeoPoint()); } /** * Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms: * * <ul> * <li>Object: <pre>{&quot;lat&quot;: <i>&lt;latitude&gt;</i>, &quot;lon&quot;: <i>&lt;longitude&gt;</i>}</pre></li> * <li>String: <pre>&quot;<i>&lt;latitude&gt;</i>,<i>&lt;longitude&gt;</i>&quot;</pre></li> * <li>Geohash: <pre>&quot;<i>&lt;geohash&gt;</i>&quot;</pre></li> * <li>Array: <pre>[<i>&lt;longitude&gt;</i>,<i>&lt;latitude&gt;</i>]</pre></li> * </ul> * * @param parser {@link XContentParser} to parse the value from * @param point A {@link GeoPoint} that will be reset by the values parsed * @return new {@link GeoPoint} parsed from the parse */ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) throws IOException, ElasticsearchParseException { double lat = Double.NaN; double lon = Double.NaN; String geohash = null; NumberFormatException numberFormatException = null; if(parser.currentToken() == Token.START_OBJECT) { while(parser.nextToken() != Token.END_OBJECT) { if(parser.currentToken() == Token.FIELD_NAME) { String field = parser.currentName(); if(LATITUDE.equals(field)) { parser.nextToken(); switch (parser.currentToken()) { case VALUE_NUMBER: case VALUE_STRING: try { lat = parser.doubleValue(true); } catch (NumberFormatException e) { numberFormatException = e; } break; default: throw new ElasticsearchParseException("latitude must be a number"); } } else if (LONGITUDE.equals(field)) { parser.nextToken(); switch (parser.currentToken()) { case VALUE_NUMBER: case VALUE_STRING: try { lon = parser.doubleValue(true); } catch (NumberFormatException e) { numberFormatException = e; } break; default: throw new ElasticsearchParseException("longitude must be a number"); } } else if (GEOHASH.equals(field)) { if(parser.nextToken() == Token.VALUE_STRING) { geohash = parser.text(); } else { throw new ElasticsearchParseException("geohash must be a string"); } } else { throw new ElasticsearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); } } else { throw new ElasticsearchParseException("token [{}] not allowed", parser.currentToken()); } } if (geohash != null) { if(!Double.isNaN(lat) || !Double.isNaN(lon)) { throw new ElasticsearchParseException("field must be either lat/lon or geohash"); } else { return point.resetFromGeoHash(geohash); } } else if (numberFormatException != null) { throw new ElasticsearchParseException("[{}] and [{}] must be valid double values", numberFormatException, LATITUDE, LONGITUDE); } else if (Double.isNaN(lat)) { throw new ElasticsearchParseException("field [{}] missing", LATITUDE); } else if (Double.isNaN(lon)) { throw new ElasticsearchParseException("field [{}] missing", LONGITUDE); } else { return point.reset(lat, lon); } } else if(parser.currentToken() == Token.START_ARRAY) { int element = 0; while(parser.nextToken() != Token.END_ARRAY) { if(parser.currentToken() == Token.VALUE_NUMBER) { element++; if(element == 1) { lon = parser.doubleValue(); } else if(element == 2) { lat = parser.doubleValue(); } else { throw new ElasticsearchParseException("only two values allowed"); } } else { throw new ElasticsearchParseException("numeric value expected"); } } return point.reset(lat, lon); } else if(parser.currentToken() == Token.VALUE_STRING) { String data = parser.text(); return parseGeoPoint(data, point); } else { throw new ElasticsearchParseException("geo_point expected"); } } /** parse a {@link GeoPoint} from a String */ public static GeoPoint parseGeoPoint(String data, GeoPoint point) { int comma = data.indexOf(','); if(comma > 0) { double lat = Double.parseDouble(data.substring(0, comma).trim()); double lon = Double.parseDouble(data.substring(comma + 1).trim()); return point.reset(lat, lon); } else { return point.resetFromGeoHash(data); } } /** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */ public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) { if (Math.abs(centerLat) == MAX_LAT) { return SloppyMath.haversinMeters(centerLat, centerLon, 0, centerLon); } return SloppyMath.haversinMeters(centerLat, centerLon, centerLat, (MAX_LON + centerLon) % 360); } /** Return the distance (in meters) between 2 lat,lon geo points using the haversine method implemented by lucene */ public static double arcDistance(double lat1, double lon1, double lat2, double lon2) { return SloppyMath.haversinMeters(lat1, lon1, lat2, lon2); } /** * Return the distance (in meters) between 2 lat,lon geo points using a simple tangential plane * this provides a faster alternative to {@link GeoUtils#arcDistance} when points are within 5 km */ public static double planeDistance(double lat1, double lon1, double lat2, double lon2) { double x = (lon2 - lon1) * SloppyMath.TO_RADIANS * Math.cos((lat2 + lat1) / 2.0 * SloppyMath.TO_RADIANS); double y = (lat2 - lat1) * SloppyMath.TO_RADIANS; return Math.sqrt(x * x + y * y) * EARTH_MEAN_RADIUS; } private GeoUtils() { } }
package com.backendless.ucrspoon.login; import java.util.HashMap; import java.util.List; import java.util.Map; import com.backendless.Backendless; import com.backendless.BackendlessCollection; import com.backendless.BackendlessUser; import com.backendless.async.callback.AsyncCallback; import com.backendless.exceptions.BackendlessFault; import com.backendless.persistence.BackendlessDataQuery; import com.backendless.ucrspoon.data.Dish; import com.backendless.ucrspoon.data.Restaurant; import com.backendless.ucrspoon.data.Orders; import android.app.Activity; import android.content.Intent; import android.os.Bundle; import android.util.Log; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.Button; import android.widget.ListView; import android.widget.TextView; import android.widget.Toast; public class Ordering2 extends Activity { String[] categories; String[] items; HashMap<String, Integer> Orders; HashMap<String, Double> Prices; String R_id; String partySize; String tableLocation; String time; String selectedItem; String name; double totalPrice = 0; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_ordering2); Backendless.setUrl( Defaults.SERVER_URL ); // in case you didn't already do the init //Backendless.initApp( this, Defaults.APPLICATION_ID, Defaults.SECRET_KEY, Defaults.VERSION ); //Retrieve extras Bundle extras = getIntent().getExtras(); if(extras != null) { R_id = extras.getString("R_id"); time = extras.getString("time"); partySize = extras.getString("partySize"); tableLocation = extras.getString("tableLocation"); populateCategory(); } TextView selectedCategory = (TextView)findViewById(R.id.itemPrompt); selectedCategory.setText("----"); Prices = new HashMap<String, Double>(); Orders = new HashMap<String, Integer>(); Button orderItem = (Button)findViewById(R.id.button_orderItem); orderItem.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Toast ordered = Toast.makeText(Ordering2.this, "You ordered " + selectedItem + "!", Toast.LENGTH_SHORT); ordered.show(); if(Orders.containsKey(selectedItem)) { Orders.put(selectedItem, Orders.get(selectedItem)+1); } else { Orders.put(selectedItem, 1); } totalPrice += Prices.get(selectedItem); } }); Button viewItems = (Button)findViewById(R.id.button_viewItems); viewItems.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { String cart = ""; for(Map.Entry<String, Integer> entry: Orders.entrySet()) { cart += entry.getKey() + " x" + entry.getValue() + "\n"; } cart += "\nTotal = $" + totalPrice; Toast ordered = Toast.makeText(Ordering2.this, cart, Toast.LENGTH_SHORT); ordered.show(); } }); Button done = (Button)findViewById(R.id.button_toConfirmPage); done.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (Orders.isEmpty()) { Toast invalid = Toast.makeText(Ordering2.this, "There is nothing to order", Toast.LENGTH_SHORT); invalid.show(); } else { Orders order = new Orders(); order.setR_id(R_id); //Make sure that a user is logged in to be able to get name BackendlessUser user = Backendless.UserService.CurrentUser(); if(user != null) { name = (String)user.getProperty("name"); } else { name = "ERROR: User is not logged in"; } //Set the name, time, party size, and tableLocations String orderTmp = name + ";" + time + ";" + partySize + ";" + tableLocation; //Set the dishes for(Map.Entry<String, Integer> entry: Orders.entrySet()) { orderTmp += ";" + entry.getKey() + " x" + entry.getValue(); } //Set the total Price orderTmp += ";" + String.valueOf(totalPrice); order.setOrder(orderTmp); order.saveAsync(new AsyncCallback<Orders>() { @Override public void handleFault(BackendlessFault fault) { Toast fail= Toast.makeText(Ordering2.this,"Ordering Failed. Please Try Again.", Toast.LENGTH_SHORT); fail.show(); // TODO Auto-generated method stub } @Override public void handleResponse(Orders response) { Orders.clear(); Orders = new HashMap<String, Integer>(); totalPrice = 0; Toast success= Toast.makeText(Ordering2.this,"Order success", Toast.LENGTH_SHORT); success.show(); } }); } } }); } @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.ordering2, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle action bar item clicks here. The action bar will // automatically handle clicks on the Home/Up button, so long // as you specify a parent activity in AndroidManifest.xml. int id = item.getItemId(); if (id == R.id.action_settings) { return true; } return super.onOptionsItemSelected(item); } private void populateCategory(){ String whereClause = "R_id = '" + R_id + "'"; BackendlessDataQuery dataQuery = new BackendlessDataQuery(); dataQuery.setWhereClause( whereClause ); Restaurant.findAsync( dataQuery, new AsyncCallback<BackendlessCollection<Restaurant>>(){ @Override public void handleResponse(BackendlessCollection<Restaurant> response ) { List<Restaurant> lr = response.getData(); if(lr.size() < 1){ return; } categories = lr.get(0).getMenuCategories().split(";"); ArrayAdapter<String> adapter; adapter = new ArrayAdapter<String>( Ordering2.this, //COntext for this activity R.layout.restaurat_list, //Layout to be use(create) categories); //Items to be displayed ListView list = (ListView)findViewById(R.id.list_menuCategories); list.setAdapter(adapter); registerClickCallback(); } @Override public void handleFault(BackendlessFault fault) { // does nothing but auto override // TODO Auto-generated method stub return; } }); } private void registerClickCallback() { final ListView list = (ListView)findViewById(R.id.list_menuCategories); list.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View viewClicked, int position, long id) { TextView selected = (TextView)viewClicked; TextView selectedCategory = (TextView)findViewById(R.id.itemPrompt); selectedCategory.setText("Select an item from " + selected.getText()); //Get Items corresponding to the selected Category String whereClause = "R_id = '" +R_id + "' AND Category = '" + selected.getText().toString() + "'"; BackendlessDataQuery dataQuery = new BackendlessDataQuery(); dataQuery.setWhereClause( whereClause ); Dish.findAsync( dataQuery, new AsyncCallback<BackendlessCollection<Dish>>(){ @Override public void handleResponse(BackendlessCollection<Dish> response ) { List<Dish> lr = response.getData(); if(lr.size() < 1){ return; } items = new String[lr.size()]; for(int i = 0, size = lr.size(); i < size; i++) { items[i] = new String(lr.get(i).getName() + " ($"+lr.get(i).getPrice()+" )"); Prices.put(items[i], lr.get(i).getPrice()); } ArrayAdapter<String> adapter; adapter = new ArrayAdapter<String>( Ordering2.this, //COntext for this activity R.layout.restaurat_list, //Layout to be use(create) items); //Items to be displayed ListView list = (ListView)findViewById(R.id.list_menuItems); list.setAdapter(adapter); registerClickCallback2(); } @Override public void handleFault(BackendlessFault fault) { // does nothing but auto override // TODO Auto-generated method stub return; } }); // TODO Auto-generated method stub } }); } private void registerClickCallback2() { ListView list = (ListView)findViewById(R.id.list_menuItems); list.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View viewClicked, int position, long id) { //TextView textView=(TextView)viewClicked; TextView selected = (TextView)viewClicked; selectedItem = selected.getText().toString(); // TODO Auto-generated method stub } }); } }
package com.joy.tweetitdeluxe.adapter; import android.content.Context; import android.content.Intent; import android.support.v7.widget.CardView; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.CheckBox; import android.widget.ImageView; import android.widget.TextView; import com.bumptech.glide.Glide; import com.bumptech.glide.load.engine.DiskCacheStrategy; import com.joy.tweetitdeluxe.R; import com.joy.tweetitdeluxe.activity.HomeTimelineActivity; import com.joy.tweetitdeluxe.activity.ProfileActivity; import com.joy.tweetitdeluxe.model.Tweet; import com.raizlabs.android.dbflow.sql.language.SQLite; import java.util.ArrayList; import java.util.Collections; import java.util.List; /** * Created by joy0520 on 2017/3/3. */ public class TweetsAdapter extends RecyclerView.Adapter { static class TweetHolder extends RecyclerView.ViewHolder { private ImageView image; private TextView name, userName, text, timeStamp; private CardView cardView; public TweetHolder(View itemView) { super(itemView); image = (ImageView) itemView.findViewById(R.id.photo); name = (TextView) itemView.findViewById(R.id.name); userName = (TextView) itemView.findViewById(R.id.user_name); text = (TextView) itemView.findViewById(R.id.text); timeStamp = (TextView) itemView.findViewById(R.id.time_stamp); cardView = (CardView) itemView.findViewById(R.id.card); } } public abstract static class EndlessScrollListener extends RecyclerView.OnScrollListener { // True if we are still waiting for the last set of data to load. private boolean loading = false; int lastVisibleItem, visibleItemCount, totalItemCount; private LinearLayoutManager mManager; public EndlessScrollListener(LinearLayoutManager manager) { mManager = manager; } public abstract void onLoadMore(); public void finishLoading() { this.loading = false; } @Override public void onScrolled(RecyclerView recyclerView, int dx, int dy) { //check for scroll down if (dy > 0) { visibleItemCount = mManager.getChildCount(); totalItemCount = mManager.getItemCount(); lastVisibleItem = mManager.findLastVisibleItemPosition(); if (HomeTimelineActivity.DEBUG) { Log.i("onScrolled()", "visibleItemCount=" + visibleItemCount + ", totalItemCount=" + totalItemCount + ", lastVisibleItem=" + lastVisibleItem); } if (!loading) { if ((visibleItemCount + lastVisibleItem) >= totalItemCount) { loading = true; //Do pagination.. i.e. fetch new data onLoadMore(); } } } } } public interface Callback { void onItemClicked(Tweet tweet); } private List<Tweet> mTweets; private Context mContext; private Callback mCallback; public TweetsAdapter(Context context) { super(); mContext = context; mTweets = new ArrayList<>(); } public void postANewTweet(Tweet tweet) { mTweets.add(0, tweet); saveTweets(mTweets); notifyItemInserted(0); } public void updateTweetFavorited(long id, boolean favortied) { int position = -1; for (Tweet tweet : mTweets) { if (tweet.getId() == id) { tweet.setFavorited(favortied); tweet.save(); position = mTweets.indexOf(tweet); } } if (position != -1) notifyItemChanged(position); } public void addTweets(List<Tweet> tweets) { mTweets.addAll(tweets); saveTweets(mTweets); notifyDataSetChanged(); } public void clearAll() { mTweets.clear(); deleteSavedTweets(mTweets); notifyDataSetChanged(); } @Override public int getItemViewType(int position) { return super.getItemViewType(position); } @Override public RecyclerView.ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { LayoutInflater inflater = LayoutInflater.from(mContext); View view = inflater.inflate(R.layout.item_tweet, parent, false); return new TweetHolder(view); } @Override public void onBindViewHolder(RecyclerView.ViewHolder holder, int position) { final Tweet tweet = mTweets.get(position); if (holder instanceof TweetHolder) { TweetHolder tweetHolder = (TweetHolder) holder; // Setup view content Glide.with(mContext) .load(tweet.getProfileImageUrl()) .fitCenter() .diskCacheStrategy(DiskCacheStrategy.ALL) .into(tweetHolder.image); tweetHolder.name.setText(tweet.getName()); tweetHolder.userName.setText(String.format("@%s", tweet.getScreenName())); tweetHolder.text.setText(tweet.getText()); tweetHolder.timeStamp.setText(tweet.getCreatedAtFormatString()); // Setup click event tweetHolder.cardView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (mCallback != null) { mCallback.onItemClicked(tweet); } } }); tweetHolder.image.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Intent intent = new Intent(mContext, ProfileActivity.class); intent.putExtra("screen_name", tweet.getScreenName()); mContext.startActivity(intent); } }); } } @Override public int getItemCount() { return mTweets.size(); } // TODO do this on another thread private void saveTweets(final List<Tweet> tweets) { for (Tweet tweet : tweets) { tweet.save(); } } private void deleteSavedTweets(final List<Tweet> tweets) { for (Tweet tweet : tweets) { tweet.delete(); } } public List<Tweet> loadTweetsFromDB() { return SQLite.select().from(Tweet.class).queryList(); } public void applyLocalTweets() { mTweets.clear(); mTweets.addAll(loadTweetsFromDB()); Collections.sort(mTweets); notifyDataSetChanged(); } public void setCallback(Callback callback) { mCallback = callback; } }
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.wso2.andes.test.framework.distributedcircuit; import org.apache.log4j.Logger; import org.wso2.andes.test.framework.*; import org.wso2.andes.test.utils.ConversationFactory; import org.wso2.andes.junit.extensions.TimingController; import org.wso2.andes.junit.extensions.TimingControllerAware; import org.wso2.andes.junit.extensions.util.ParsedProperties; import javax.jms.Destination; import javax.jms.JMSException; import javax.jms.Message; import javax.jms.Session; import java.util.LinkedList; import java.util.List; /** * DistributedCircuitImpl is a distributed implementation of the test {@link Circuit}. Many publishers and receivers * accross multiple machines may be combined to form a single test circuit. The test circuit extracts reports from * all of its publishers and receivers, and applies its assertions to these reports. * * <p/><table id="crc"><caption>CRC Card</caption> * <tr><th> Responsibilities <th> Collaborations * <tr><td> Supply the publishing and receiving ends of a test messaging circuit. * <tr><td> Start the circuit running. * <tr><td> Close the circuit down. * <tr><td> Take a reading of the circuits state. * <tr><td> Apply assertions against the circuits state. * <tr><td> Send test messages over the circuit. * <tr><td> Perform the default test procedue on the circuit. * </table> * * @todo There is a short pause after receiving sender reports before asking for receiver reports, because receivers may * not have finished receiving all their test messages before the report request arrives. This is going to be a * problem for taking test timings and needs to be eliminiated. Suggested solution: have receiver send back reports * asynchronously, on test batch size boundaries, and do so automatically rather than having to have the report * request sent to them. Number each test run, or otherwise uniquely identify it, when a receiver does not get * any more messages on a test run for more than a timeout, it can assume the test is complete and send a final * report. On the coordinator end a future will need to be created to wait for all final reports to come in, and * to register results and timings for the test. This must work in such a way that a new test cycle can be started * without waiting for the results of the old one to come in. * * @todo Add in setting of timing controller, from timing aware test cases. */ public class DistributedCircuitImpl implements Circuit, TimingControllerAware { /** Used for debugging purposes. */ private static final Logger log = Logger.getLogger(DistributedCircuitImpl.class); /** Holds the conversation factory over which to coordinate the test. */ protected ConversationFactory conversationFactory; /** Holds the controlSession over which to hold the control conversation. */ protected Session controlSession; /** Holds the sender nodes in the test circuit. */ protected List<TestClientDetails> senders; /** Holds the receiver nodes in the test circuit. */ protected List<TestClientDetails> receivers; /** Holds the sender control conversations. */ protected ConversationFactory.Conversation[] senderConversation; /** Holds the receiver control conversations. */ protected ConversationFactory.Conversation[] receiverConversation; /** Holds the control topics for the senders in the test circuit. */ protected Destination[] senderControlTopic; /** Holds the control topics for the receivers in the test circuit. */ protected Destination[] receiverControlTopic; /** Holds the number of messages to send per test run. */ protected int numMessages; /** * Holds the timing controller for the circuit. This is used to log test times asynchronously, when reciever nodes * return their reports after senders have completed a test case. */ TimingController timingController; /** * Creates a distributed test circuit on the specified senders and receivers. * * @param session The controlSession for all control conversations. * @param senders The senders. * @param receivers The receivers. * @param senderConversation A control conversation with the senders. * @param receiverConversation A control conversation with the receivers. * @param senderControlTopic The senders control topic. * @param receiverControlTopic The receivers control topic. */ protected DistributedCircuitImpl(Session session, List<TestClientDetails> senders, List<TestClientDetails> receivers, ConversationFactory.Conversation[] senderConversation, ConversationFactory.Conversation[] receiverConversation, Destination[] senderControlTopic, Destination[] receiverControlTopic) { this.controlSession = session; this.senders = senders; this.receivers = receivers; this.senderConversation = senderConversation; this.receiverConversation = receiverConversation; this.senderControlTopic = senderControlTopic; this.receiverControlTopic = receiverControlTopic; } /** * Creates a distributed test circuit from the specified test parameters, on the senders and receivers * given. * * @param testProps The test parameters. * @param senders The sender ends in the test circuit. * @param receivers The receiver ends in the test circuit. * @param conversationFactory A conversation factory for creating the control conversations with senders and receivers. * * @return A connected and ready to start, test circuit. */ public static Circuit createCircuit(ParsedProperties testProps, List<TestClientDetails> senders, List<TestClientDetails> receivers, ConversationFactory conversationFactory) { log.debug("public static Circuit createCircuit(ParsedProperties testProps, List<TestClientDetails> senders, " + " List<TestClientDetails> receivers, ConversationFactory conversationFactory)"); try { Session session = conversationFactory.getSession(); // Create control conversations with each of the senders. ConversationFactory.Conversation[] senderConversation = new ConversationFactory.Conversation[senders.size()]; Destination[] senderControlTopic = new Destination[senders.size()]; for (int i = 0; i < senders.size(); i++) { TestClientDetails sender = senders.get(i); senderControlTopic[i] = session.createTopic(sender.privateControlKey); senderConversation[i] = conversationFactory.startConversation(); } log.debug("Sender conversations created."); // Create control conversations with each of the receivers. ConversationFactory.Conversation[] receiverConversation = new ConversationFactory.Conversation[receivers.size()]; Destination[] receiverControlTopic = new Destination[receivers.size()]; for (int i = 0; i < receivers.size(); i++) { TestClientDetails receiver = receivers.get(i); receiverControlTopic[i] = session.createTopic(receiver.privateControlKey); receiverConversation[i] = conversationFactory.startConversation(); } log.debug("Receiver conversations created."); // Assign the sender role to each of the sending test clients. for (int i = 0; i < senders.size(); i++) { TestClientDetails sender = senders.get(i); Message assignSender = conversationFactory.getSession().createMessage(); TestUtils.setPropertiesOnMessage(assignSender, testProps); assignSender.setStringProperty("CONTROL_TYPE", "ASSIGN_ROLE"); assignSender.setStringProperty("ROLE", "SENDER"); senderConversation[i].send(senderControlTopic[i], assignSender); } log.debug("Sender role assignments sent."); // Assign the receivers role to each of the receiving test clients. for (int i = 0; i < receivers.size(); i++) { TestClientDetails receiver = receivers.get(i); Message assignReceiver = session.createMessage(); TestUtils.setPropertiesOnMessage(assignReceiver, testProps); assignReceiver.setStringProperty("CONTROL_TYPE", "ASSIGN_ROLE"); assignReceiver.setStringProperty("ROLE", "RECEIVER"); receiverConversation[i].send(receiverControlTopic[i], assignReceiver); } log.debug("Receiver role assignments sent."); // Wait for the senders and receivers to confirm their roles. for (int i = 0; i < senders.size(); i++) { senderConversation[i].receive(); } log.debug("Got all sender role confirmations"); for (int i = 0; i < receivers.size(); i++) { receiverConversation[i].receive(); } log.debug("Got all receiver role confirmations"); // Package everything up as a circuit. return new DistributedCircuitImpl(session, senders, receivers, senderConversation, receiverConversation, senderControlTopic, receiverControlTopic); } catch (JMSException e) { throw new RuntimeException("JMSException not handled."); } } /** * Used by tests cases that can supply a {@link org.wso2.andes.junit.extensions.TimingController} to set the * controller on an aware test. * * @param controller The timing controller. */ public void setTimingController(TimingController controller) { this.timingController = controller; } /** * Gets the interface on the publishing end of the circuit. * * @return The publishing end of the circuit. */ public Publisher getPublisher() { throw new RuntimeException("Not Implemented."); } /** * Gets the interface on the receiving end of the circuit. * * @return The receiving end of the circuit. */ public Receiver getReceiver() { throw new RuntimeException("Not Implemented."); } /** * Connects and starts the circuit. After this method is called the circuit is ready to send messages. */ public void start() { log.debug("public void start(): called"); try { // Start the test on each of the senders. Message start = controlSession.createMessage(); start.setStringProperty("CONTROL_TYPE", "START"); start.setIntProperty("MESSAGE_COUNT", numMessages); for (int i = 0; i < senders.size(); i++) { senderConversation[i].send(senderControlTopic[i], start); } log.debug("All senders told to start their tests."); } catch (JMSException e) { throw new RuntimeException("Unhandled JMSException.", e); } } /** * Checks the test circuit. The effect of this is to gather the circuits state, for both ends of the circuit, * into a report, against which assertions may be checked. * * @todo Replace the asynch receiver report thread with a choice of direct or asynch executor, so that asynch * or synch logging of test timings is optional. Also need to provide an onMessage method that is capable * of receiving timing reports that receivers will generate during an ongoing test, on the test sample * size boundaries. The message timing logging code should be factored out as a common method that can * be called in response to the final report responses, or the onMessage method. Another alternative is * to abandon the final report request altogether and just use the onMessage method? I think the two * differ though, as the final report is used to apply assertions, and the ongoing report is just for * periodic timing results... In which case, maybe there needs to be a way for the onMessage method * to process just some of the incoming messages, and forward the rest on to the conversion helper, as * a sort of pre-conversation helper filter? Make conversation expose its onMessage method (it should * already) and allow another delivery thread to filter the incoming messages to the conversation. */ public void check() { log.debug("public void check(): called"); try { // Wait for all the test senders to return their reports. for (int i = 0; i < senders.size(); i++) { Message senderReport = senderConversation[i].receive(); log.debug("Sender " + senderReport.getStringProperty("CLIENT_NAME") + " reports message count: " + senderReport.getIntProperty("MESSAGE_COUNT")); log.debug("Sender " + senderReport.getStringProperty("CLIENT_NAME") + " reports message time: " + senderReport.getLongProperty("TEST_TIME")); } log.debug("Got all sender test reports."); // Apply sender assertions to pass/fail the tests. // Inject a short pause to give the receivers time to finish receiving their test messages. TestUtils.pause(500); // Ask the receivers for their reports. Message statusRequest = controlSession.createMessage(); statusRequest.setStringProperty("CONTROL_TYPE", "STATUS_REQUEST"); for (int i = 0; i < receivers.size(); i++) { receiverConversation[i].send(receiverControlTopic[i], statusRequest); } log.debug("All receiver test reports requested."); // Wait for all receiver reports to come in, but do so asynchronously. Runnable gatherAllReceiverReports = new Runnable() { public void run() { try { // Wait for all the receivers to send their reports. for (int i = 0; i < receivers.size(); i++) { Message receiverReport = receiverConversation[i].receive(); String clientName = receiverReport.getStringProperty("CLIENT_NAME"); int messageCount = receiverReport.getIntProperty("MESSAGE_COUNT"); long testTime = receiverReport.getLongProperty("TEST_TIME"); log.debug("Receiver " + clientName + " reports message count: " + messageCount); log.debug("Receiver " + receiverReport.getStringProperty("CLIENT_NAME") + " reports message time: " + testTime); // Apply receiver assertions to pass/fail the tests. // Log the test timings on the asynchronous test timing controller. /*try { timingController.completeTest(true, messageCount, testTime); } // The timing controll can throw InterruptedException is the current test is to be // interrupted. catch (InterruptedException e) { e.printStackTrace(); }*/ } log.debug("All receiver test reports received."); } catch (JMSException e) { throw new RuntimeException(e); } } }; Thread receiverReportsThread = new Thread(gatherAllReceiverReports); receiverReportsThread.start(); // return new Message[] { senderReport, receiverReport }; } catch (JMSException e) { throw new RuntimeException("Unhandled JMSException.", e); } } /** * Closes the circuit. All associated resources are closed. */ public void close() { log.debug("public void close(): called"); // End the current test on all senders and receivers. } /** * Applies a list of assertions against the test circuit. The {@link #check()} method should be called before doing * this, to ensure that the circuit has gathered its state into a report to assert against. * * @param assertions The list of assertions to apply. * * @return Any assertions that failed. */ public List<Assertion> applyAssertions(List<Assertion> assertions) { log.debug("public List<Assertion> applyAssertions(List<Assertion> assertions = " + assertions + "): called"); List<Assertion> failures = new LinkedList<Assertion>(); for (Assertion assertion : assertions) { if (!assertion.apply()) { failures.add(assertion); } } return failures; } /** * Runs the default test procedure against the circuit, and checks that all of the specified assertions hold. * * @param numMessages The number of messages to send using the default test procedure. * @param assertions The list of assertions to apply. * * @return Any assertions that failed. * * @todo From check onwards needs to be handled as a future. The future must call back onto the test case to * report results asynchronously. */ public List<Assertion> test(int numMessages, List<Assertion> assertions) { log.debug("public List<Assertion> test(int numMessages = " + numMessages + ", List<Assertion> assertions = " + assertions + "): called"); // Keep the number of messages to send per test run, where the send method can reference it. this.numMessages = numMessages; // Start the test running on all sender circuit ends. start(); // Request status reports to be handed in. check(); // Assert conditions on the publishing end of the circuit. // Assert conditions on the receiving end of the circuit. List<Assertion> failures = applyAssertions(assertions); // Close the circuit ending the current test case. close(); // Pass with no failed assertions or fail with a list of failed assertions. return failures; } }
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.hadoop.hive; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.serde2.AbstractSerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeStats; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.io.MapWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.elasticsearch.hadoop.cfg.HadoopSettingsManager; import org.elasticsearch.hadoop.cfg.Settings; import org.elasticsearch.hadoop.rest.InitializationUtils; import org.elasticsearch.hadoop.serialization.bulk.BulkCommand; import org.elasticsearch.hadoop.serialization.bulk.BulkCommands; import org.elasticsearch.hadoop.util.BytesArray; import org.elasticsearch.hadoop.util.FieldAlias; import org.elasticsearch.hadoop.util.SettingsUtils; import org.elasticsearch.hadoop.util.StringUtils; public class EsSerDe extends AbstractSerDe { private static Log log = LogFactory.getLog(EsSerDe.class); private Properties tableProperties; private Configuration cfg; private Settings settings; private StructObjectInspector inspector; // serialization artifacts private final BytesArray scratchPad = new BytesArray(512); private final HiveType hiveType = new HiveType(null, null); private final HiveBytesArrayWritable result = new HiveBytesArrayWritable(); private StructTypeInfo structTypeInfo; private FieldAlias alias; private BulkCommand command; private boolean writeInitialized = false; private boolean readInitialized = false; private boolean IS_ES_20 = true; private boolean trace = false; // introduced in Hive 0.14 // implemented to actually get access to the raw properties @Override public void initialize(Configuration conf, Properties tbl, Properties partitionProperties) throws SerDeException { inspector = HiveUtils.structObjectInspector(tbl); structTypeInfo = HiveUtils.typeInfo(inspector); cfg = conf; settings = (cfg != null ? HadoopSettingsManager.loadFrom(cfg).merge(tbl) : HadoopSettingsManager.loadFrom(tbl)); alias = HiveUtils.alias(settings); HiveUtils.fixHive13InvalidComments(settings, tbl); this.tableProperties = tbl; trace = log.isTraceEnabled(); } @Override public void initialize(Configuration conf, Properties tbl) throws SerDeException { initialize(conf, tbl, new Properties()); } @Override public Object deserialize(Writable blob) throws SerDeException { if (!readInitialized) { readInitialized = true; IS_ES_20 = SettingsUtils.isEs20(settings); } if (blob == null || blob instanceof NullWritable) { return null; } Object des = hiveFromWritable(structTypeInfo, blob, alias, IS_ES_20); if (trace) { log.trace(String.format("Deserialized [%s] to [%s]", blob, des)); } return des; } @Override public ObjectInspector getObjectInspector() throws SerDeException { return inspector; } @Override public SerDeStats getSerDeStats() { // TODO: can compute serialize stats but not deserialized ones return null; } @Override public Class<? extends Writable> getSerializedClass() { return HiveBytesArrayWritable.class; } @Override public Writable serialize(Object data, ObjectInspector objInspector) throws SerDeException { lazyInitializeWrite(); // serialize the type directly to json (to avoid converting to Writable and then serializing) scratchPad.reset(); hiveType.setObjectInspector(objInspector); hiveType.setObject(data); command.write(hiveType).copyTo(scratchPad); result.setContent(scratchPad); return result; } private void lazyInitializeWrite() { if (writeInitialized) { return; } writeInitialized = true; Settings settings = HadoopSettingsManager.loadFrom(tableProperties); InitializationUtils.setValueWriterIfNotSet(settings, HiveValueWriter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, HiveFieldExtractor.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, HiveBytesConverter.class, log); this.command = BulkCommands.create(settings, null); } @SuppressWarnings("unchecked") static Object hiveFromWritable(TypeInfo type, Writable data, FieldAlias alias, boolean IS_ES_20) { if (data == null || data instanceof NullWritable) { return null; } switch (type.getCategory()) { case LIST: {// or ARRAY ListTypeInfo listType = (ListTypeInfo) type; TypeInfo listElementType = listType.getListElementTypeInfo(); ArrayWritable aw = (ArrayWritable) data; List<Object> list = new ArrayList<Object>(); for (Writable writable : aw.get()) { list.add(hiveFromWritable(listElementType, writable, alias, IS_ES_20)); } return list; } case MAP: { MapTypeInfo mapType = (MapTypeInfo) type; Map<Writable, Writable> mw = (Map<Writable, Writable>) data; Map<Object, Object> map = new LinkedHashMap<Object, Object>(); for (Entry<Writable, Writable> entry : mw.entrySet()) { map.put(hiveFromWritable(mapType.getMapKeyTypeInfo(), entry.getKey(), alias, IS_ES_20), hiveFromWritable(mapType.getMapValueTypeInfo(), entry.getValue(), alias, IS_ES_20)); } return map; } case STRUCT: { StructTypeInfo structType = (StructTypeInfo) type; List<String> names = structType.getAllStructFieldNames(); List<TypeInfo> info = structType.getAllStructFieldTypeInfos(); // return just the values List<Object> struct = new ArrayList<Object>(); MapWritable map = (MapWritable) data; Text reuse = new Text(); for (int index = 0; index < names.size(); index++) { String esAlias = alias.toES(names.get(index)); // check for multi-level alias Writable result = map; for (String level : StringUtils.tokenize(esAlias, ".")) { reuse.set(level); result = ((MapWritable) result).get(reuse); if (result == null) { break; } } struct.add(hiveFromWritable(info.get(index), result, alias, IS_ES_20)); } return struct; } case UNION: { throw new UnsupportedOperationException("union not yet supported");//break; } case PRIMITIVE: default: // return as is return data; } } }
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package android.support.v17.leanback.widget; import android.support.v7.widget.RecyclerView; import android.support.v17.leanback.R; import android.util.Log; import android.view.View; import android.view.ViewGroup; import java.util.ArrayList; /** * Bridge from Presenter to RecyclerView.Adapter. Public to allow use by third * party presenters. */ public class ItemBridgeAdapter extends RecyclerView.Adapter { private static final String TAG = "ItemBridgeAdapter"; private static final boolean DEBUG = false; /** * Interface for listening to view holder operations. */ public static class AdapterListener { public void onAddPresenter(Presenter presenter) { } public void onCreate(ViewHolder viewHolder) { } public void onBind(ViewHolder viewHolder) { } public void onUnbind(ViewHolder viewHolder) { } public void onAttachedToWindow(ViewHolder viewHolder) { } public void onDetachedFromWindow(ViewHolder viewHolder) { } } /** * Interface for wrapping a view created by presenter into another view. * The wrapper must be immediate parent of the wrapped view. */ public static abstract class Wrapper { public abstract View createWrapper(View root); public abstract void wrap(View wrapper, View wrapped); } private ObjectAdapter mAdapter; private Wrapper mWrapper; private PresenterSelector mPresenterSelector; private FocusHighlight mFocusHighlight; private AdapterListener mAdapterListener; private ArrayList<Presenter> mPresenters = new ArrayList<Presenter>(); final class OnFocusChangeListener implements View.OnFocusChangeListener { View.OnFocusChangeListener mChainedListener; @Override public void onFocusChange(View view, boolean hasFocus) { if (DEBUG) Log.v(TAG, "onFocusChange " + hasFocus + " " + view + " mFocusHighlight" + mFocusHighlight); if (mWrapper != null) { view = (View) view.getParent(); } if (mFocusHighlight != null) { mFocusHighlight.onItemFocused(view, hasFocus); } if (mChainedListener != null) { mChainedListener.onFocusChange(view, hasFocus); } } } public class ViewHolder extends RecyclerView.ViewHolder { final Presenter mPresenter; final Presenter.ViewHolder mHolder; final OnFocusChangeListener mFocusChangeListener = new OnFocusChangeListener(); Object mItem; Object mExtraObject; /** * Get {@link Presenter}. */ public final Presenter getPresenter() { return mPresenter; } /** * Get {@link Presenter.ViewHolder}. */ public final Presenter.ViewHolder getViewHolder() { return mHolder; } /** * Get currently bound object. */ public final Object getItem() { return mItem; } /** * Get extra object associated with the view. Developer can attach * any customized UI object in addition to {@link Presenter.ViewHolder}. * A typical use case is attaching an animator object. */ public final Object getExtraObject() { return mExtraObject; } /** * Set extra object associated with the view. Developer can attach * any customized UI object in addition to {@link Presenter.ViewHolder}. * A typical use case is attaching an animator object. */ public void setExtraObject(Object object) { mExtraObject = object; } ViewHolder(Presenter presenter, View view, Presenter.ViewHolder holder) { super(view); mPresenter = presenter; mHolder = holder; } } private ObjectAdapter.DataObserver mDataObserver = new ObjectAdapter.DataObserver() { @Override public void onChanged() { ItemBridgeAdapter.this.notifyDataSetChanged(); } @Override public void onItemRangeChanged(int positionStart, int itemCount) { ItemBridgeAdapter.this.notifyItemRangeChanged(positionStart, itemCount); } @Override public void onItemRangeInserted(int positionStart, int itemCount) { ItemBridgeAdapter.this.notifyItemRangeInserted(positionStart, itemCount); } @Override public void onItemRangeRemoved(int positionStart, int itemCount) { ItemBridgeAdapter.this.notifyItemRangeRemoved(positionStart, itemCount); } }; public ItemBridgeAdapter(ObjectAdapter adapter, PresenterSelector presenterSelector) { setAdapter(adapter); mPresenterSelector = presenterSelector; } public ItemBridgeAdapter(ObjectAdapter adapter) { this(adapter, null); } public ItemBridgeAdapter() { } public void setAdapter(ObjectAdapter adapter) { if (mAdapter != null) { mAdapter.unregisterObserver(mDataObserver); } mAdapter = adapter; if (mAdapter == null) { return; } mAdapter.registerObserver(mDataObserver); if (hasStableIds() != mAdapter.hasStableIds()) { setHasStableIds(mAdapter.hasStableIds()); } } public void setWrapper(Wrapper wrapper) { mWrapper = wrapper; } public Wrapper getWrapper() { return mWrapper; } void setFocusHighlight(FocusHighlight listener) { mFocusHighlight = listener; if (DEBUG) Log.v(TAG, "setFocusHighlight " + mFocusHighlight); } public void clear() { setAdapter(null); } @Override public int getItemCount() { return mAdapter.size(); } @Override public int getItemViewType(int position) { PresenterSelector presenterSelector = mPresenterSelector != null ? mPresenterSelector : mAdapter.getPresenterSelector(); Object item = mAdapter.get(position); Presenter presenter = presenterSelector.getPresenter(item); int type = mPresenters.indexOf(presenter); if (type < 0) { mPresenters.add(presenter); type = mPresenters.indexOf(presenter); if (mAdapterListener != null) { mAdapterListener.onAddPresenter(presenter); } } return type; } /** * {@link View.OnFocusChangeListener} that assigned in * {@link Presenter#onCreateViewHolder(ViewGroup)} may be chained, user should never change * {@link View.OnFocusChangeListener} after that. */ @Override public RecyclerView.ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { if (DEBUG) Log.v(TAG, "onCreateViewHolder viewType " + viewType); Presenter presenter = mPresenters.get(viewType); Presenter.ViewHolder presenterVh; View view; if (mWrapper != null) { view = mWrapper.createWrapper(parent); presenterVh = presenter.onCreateViewHolder(parent); mWrapper.wrap(view, presenterVh.view); } else { presenterVh = presenter.onCreateViewHolder(parent); view = presenterVh.view; } ViewHolder viewHolder = new ViewHolder(presenter, view, presenterVh); if (mAdapterListener != null) { mAdapterListener.onCreate(viewHolder); } View presenterView = viewHolder.mHolder.view; if (presenterView != null) { viewHolder.mFocusChangeListener.mChainedListener = presenterView.getOnFocusChangeListener(); presenterView.setOnFocusChangeListener(viewHolder.mFocusChangeListener); } return viewHolder; } public void setAdapterListener(AdapterListener listener) { mAdapterListener = listener; } @Override public void onBindViewHolder(RecyclerView.ViewHolder holder, int position) { if (DEBUG) Log.v(TAG, "onBindViewHolder position " + position); ViewHolder viewHolder = (ViewHolder) holder; viewHolder.mItem = mAdapter.get(position); viewHolder.mPresenter.onBindViewHolder(viewHolder.mHolder, viewHolder.mItem); if (mAdapterListener != null) { mAdapterListener.onBind(viewHolder); } } @Override public void onViewRecycled(RecyclerView.ViewHolder holder) { ViewHolder viewHolder = (ViewHolder) holder; viewHolder.mPresenter.onUnbindViewHolder(viewHolder.mHolder); viewHolder.mItem = null; if (mAdapterListener != null) { mAdapterListener.onUnbind(viewHolder); } } @Override public void onViewAttachedToWindow(RecyclerView.ViewHolder holder) { ViewHolder viewHolder = (ViewHolder) holder; if (mAdapterListener != null) { mAdapterListener.onAttachedToWindow(viewHolder); } viewHolder.mPresenter.onViewAttachedToWindow(viewHolder.mHolder); } @Override public void onViewDetachedFromWindow(RecyclerView.ViewHolder holder) { ViewHolder viewHolder = (ViewHolder) holder; viewHolder.mPresenter.onViewDetachedFromWindow(viewHolder.mHolder); if (mAdapterListener != null) { mAdapterListener.onDetachedFromWindow(viewHolder); } } @Override public long getItemId(int position) { return mAdapter.getId(position); } }
package org.terifan.raccoon.io.managed; import java.io.IOException; import org.terifan.raccoon.io.DatabaseIOException; import org.terifan.raccoon.io.physical.IPhysicalBlockDevice; import org.terifan.raccoon.io.secure.SecureBlockDevice; import org.terifan.raccoon.storage.BlockPointer; import org.terifan.raccoon.util.ByteArrayBuffer; import org.terifan.security.cryptography.ISAAC; import org.terifan.security.messagedigest.MurmurHash3; class SuperBlock { private final static byte FORMAT_VERSION = 1; private final static int CHECKSUM_SIZE = 16; private final static int IV_SIZE = 16; public static final int DEVICE_HEADER_LABEL_MAX_LENGTH = 32; private int mFormatVersion; private long mCreateTime; private long mModifiedTime; private long mTransactionId; private DeviceHeader mTenantHeader; private DeviceHeader mApplicationHeader; private BlockPointer mSpaceMapPointer; private byte[] mApplicationPointer; public SuperBlock() { mFormatVersion = FORMAT_VERSION; mCreateTime = System.currentTimeMillis(); mSpaceMapPointer = new BlockPointer(); mTransactionId = -1L; mApplicationHeader = new DeviceHeader(); mTenantHeader = new DeviceHeader(); mApplicationPointer = new byte[0]; } public SuperBlock(long aTransactionId) { this(); mTransactionId = aTransactionId; } public SuperBlock(IPhysicalBlockDevice aBlockDevice, long aBlockIndex) { this(); read(aBlockDevice, aBlockIndex); } public BlockPointer getSpaceMapPointer() { return mSpaceMapPointer; } public long getTransactionId() { return mTransactionId; } public void incrementTransactionId() { mTransactionId++; } public int getFormatVersion() { return mFormatVersion; } public long getCreateTime() { return mCreateTime; } public void setCreateTime(long aCreateTime) { mCreateTime = aCreateTime; } public long getModifiedTime() { return mModifiedTime; } public void setModifiedTime(long aModifiedTime) { mModifiedTime = aModifiedTime; } DeviceHeader getApplicationHeader() { return mApplicationHeader; } void setApplicationHeader(DeviceHeader aApplicationHeader) { mApplicationHeader = aApplicationHeader; } DeviceHeader getTenantHeader() { return mTenantHeader; } void setTenantHeader(DeviceHeader aTenantHeader) { mTenantHeader = aTenantHeader; } public byte[] getApplicationPointer() { return mApplicationPointer; } public void setApplicationPointer(byte[] aApplicationPointer) { mApplicationPointer = aApplicationPointer; } public void read(IPhysicalBlockDevice aBlockDevice, long aBlockIndex) { int blockSize = aBlockDevice.getBlockSize(); ByteArrayBuffer buffer = ByteArrayBuffer.alloc(blockSize, true); if (aBlockDevice instanceof SecureBlockDevice) { ((SecureBlockDevice)aBlockDevice).readBlockWithIV(aBlockIndex, buffer.array(), 0, blockSize); } else { aBlockDevice.readBlock(aBlockIndex, buffer.array(), 0, buffer.capacity(), new long[2]); } long[] hash = MurmurHash3.hash256(buffer.array(), CHECKSUM_SIZE, blockSize - CHECKSUM_SIZE - IV_SIZE, aBlockIndex); buffer.position(0); if (buffer.readInt64() != hash[0] || buffer.readInt64() != hash[1]) { throw new DatabaseIOException("Checksum error at block index " + aBlockIndex); } unmarshal(buffer); } public void write(IPhysicalBlockDevice aBlockDevice, long aBlockIndex) { if (aBlockIndex < 0) { throw new DatabaseIOException("Block at illegal offset: " + aBlockIndex); } mModifiedTime = System.currentTimeMillis(); int blockSize = aBlockDevice.getBlockSize(); ByteArrayBuffer buffer = ByteArrayBuffer.alloc(blockSize, true); buffer.position(CHECKSUM_SIZE); // reserve space for checksum marshal(buffer); if (buffer.remaining() < IV_SIZE) { throw new DatabaseIOException("SuperBlock marshalled into a too large buffer"); } if (aBlockDevice instanceof SecureBlockDevice) { ISAAC.PRNG.nextBytes(buffer.array(), buffer.position(), buffer.remaining() - IV_SIZE); } long[] hash = MurmurHash3.hash256(buffer.array(), CHECKSUM_SIZE, blockSize - CHECKSUM_SIZE - IV_SIZE, aBlockIndex); buffer.position(0); buffer.writeInt64(hash[0]); buffer.writeInt64(hash[1]); if (aBlockDevice instanceof SecureBlockDevice) { ((SecureBlockDevice)aBlockDevice).writeBlockWithIV(aBlockIndex, buffer.array(), 0, blockSize); } else { aBlockDevice.writeBlock(aBlockIndex, buffer.array(), 0, blockSize, new long[2]); } } private void marshal(ByteArrayBuffer aBuffer) { if (mApplicationPointer == null) { throw new IllegalStateException("The application pointer must be specified"); } if (mApplicationPointer.length > IManagedBlockDevice.APPLICATION_POINTER_MAX_SIZE) { throw new IllegalStateException("The application pointer is too long"); } aBuffer.writeInt8(mFormatVersion); aBuffer.writeInt64(mCreateTime); aBuffer.writeInt64(mModifiedTime); aBuffer.writeInt64(mTransactionId); mSpaceMapPointer.marshal(aBuffer); aBuffer.writeInt8(mApplicationPointer.length); aBuffer.write(mApplicationPointer); mApplicationHeader.marshal(aBuffer); mTenantHeader.marshal(aBuffer); } private void unmarshal(ByteArrayBuffer aBuffer) { mFormatVersion = aBuffer.readInt8(); if (mFormatVersion != FORMAT_VERSION) { throw new UnsupportedVersionException("Data format is not supported: was " + mFormatVersion + ", expected " + FORMAT_VERSION); } mCreateTime = aBuffer.readInt64(); mModifiedTime = aBuffer.readInt64(); mTransactionId = aBuffer.readInt64(); mSpaceMapPointer.unmarshal(aBuffer); mApplicationPointer = aBuffer.read(new byte[aBuffer.readInt8()]); mApplicationHeader.unmarshal(aBuffer); mTenantHeader.unmarshal(aBuffer); } }
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prestosql.jdbc; import com.google.common.collect.AbstractIterator; import com.google.common.collect.Streams; import io.prestosql.client.Column; import io.prestosql.client.QueryStatusInfo; import io.prestosql.client.StatementClient; import java.sql.SQLException; import java.time.ZoneId; import java.util.Iterator; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Semaphore; import java.util.function.Consumer; import java.util.stream.Stream; import static com.google.common.base.Throwables.throwIfUnchecked; import static com.google.common.base.Verify.verify; import static io.airlift.concurrent.Threads.daemonThreadsNamed; import static java.lang.String.format; import static java.util.Objects.requireNonNull; import static java.util.concurrent.Executors.newCachedThreadPool; public class PrestoResultSet extends AbstractPrestoResultSet { private final StatementClient client; private final String queryId; static PrestoResultSet create(StatementClient client, long maxRows, Consumer<QueryStats> progressCallback, WarningsManager warningsManager) throws SQLException { requireNonNull(client, "client is null"); List<Column> columns = getColumns(client, progressCallback); return new PrestoResultSet(client, columns, maxRows, progressCallback, warningsManager); } private PrestoResultSet(StatementClient client, List<Column> columns, long maxRows, Consumer<QueryStats> progressCallback, WarningsManager warningsManager) throws SQLException { super( getResultTimeZone(requireNonNull(client, "client is null")), columns, new AsyncIterator<>(flatten(new ResultsPageIterator(client, progressCallback, warningsManager), maxRows), client)); this.client = requireNonNull(client, "client is null"); requireNonNull(progressCallback, "progressCallback is null"); this.queryId = client.currentStatusInfo().getId(); } private static ZoneId getResultTimeZone(StatementClient client) { if (client.useSessionTimeZone()) { return client.getTimeZone(); } return ZoneId.systemDefault(); } public String getQueryId() { return queryId; } public QueryStats getStats() { return QueryStats.create(queryId, client.getStats()); } @Override public void close() throws SQLException { closed.set(true); ((AsyncIterator<?>) results).cancel(); client.close(); } void partialCancel() { client.cancelLeafStage(); } private static <T> Iterator<T> flatten(Iterator<Iterable<T>> iterator, long maxRows) { Stream<T> stream = Streams.stream(iterator) .flatMap(Streams::stream); if (maxRows > 0) { stream = stream.limit(maxRows); } return stream.iterator(); } private static class AsyncIterator<T> extends AbstractIterator<T> { private static final int MAX_QUEUED_ROWS = 50_000; private static final ExecutorService executorService = newCachedThreadPool(daemonThreadsNamed("Presto JDBC worker-%d")); private final StatementClient client; private final BlockingQueue<T> rowQueue = new ArrayBlockingQueue<>(MAX_QUEUED_ROWS); // Semaphore to indicate that some data is ready. // Each permit represents a row of data (or that the underlying iterator is exhausted). private final Semaphore semaphore = new Semaphore(0); private final CompletableFuture<Void> future; public AsyncIterator(Iterator<T> dataIterator, StatementClient client) { requireNonNull(dataIterator, "dataIterator is null"); this.client = client; this.future = CompletableFuture.runAsync(() -> { try { while (dataIterator.hasNext()) { rowQueue.put(dataIterator.next()); semaphore.release(); } } catch (InterruptedException e) { interrupt(e); } finally { semaphore.release(); } }, executorService); } public void cancel() { future.cancel(true); } public void interrupt(InterruptedException e) { client.close(); Thread.currentThread().interrupt(); throw new RuntimeException(new SQLException("ResultSet thread was interrupted", e)); } @Override protected T computeNext() { try { semaphore.acquire(); } catch (InterruptedException e) { interrupt(e); } if (rowQueue.isEmpty()) { // If we got here and the queue is empty the thread fetching from the underlying iterator is done. // Wait for Future to marked done and check status. try { future.get(); } catch (InterruptedException e) { interrupt(e); } catch (ExecutionException e) { throwIfUnchecked(e.getCause()); throw new RuntimeException(e.getCause()); } return endOfData(); } return rowQueue.poll(); } } private static class ResultsPageIterator extends AbstractIterator<Iterable<List<Object>>> { private final StatementClient client; private final Consumer<QueryStats> progressCallback; private final WarningsManager warningsManager; private ResultsPageIterator(StatementClient client, Consumer<QueryStats> progressCallback, WarningsManager warningsManager) { this.client = requireNonNull(client, "client is null"); this.progressCallback = requireNonNull(progressCallback, "progressCallback is null"); this.warningsManager = requireNonNull(warningsManager, "warningsManager is null"); } @Override protected Iterable<List<Object>> computeNext() { while (client.isRunning()) { QueryStatusInfo results = client.currentStatusInfo(); progressCallback.accept(QueryStats.create(results.getId(), results.getStats())); warningsManager.addWarnings(results.getWarnings()); Iterable<List<Object>> data = client.currentData().getData(); try { client.advance(); } catch (RuntimeException e) { throw e; } if (data != null) { return data; } } verify(client.isFinished()); QueryStatusInfo results = client.finalStatusInfo(); progressCallback.accept(QueryStats.create(results.getId(), results.getStats())); warningsManager.addWarnings(results.getWarnings()); if (results.getError() != null) { throw new RuntimeException(resultsException(results)); } return endOfData(); } } private static List<Column> getColumns(StatementClient client, Consumer<QueryStats> progressCallback) throws SQLException { while (client.isRunning()) { QueryStatusInfo results = client.currentStatusInfo(); progressCallback.accept(QueryStats.create(results.getId(), results.getStats())); List<Column> columns = results.getColumns(); if (columns != null) { return columns; } client.advance(); } verify(client.isFinished()); QueryStatusInfo results = client.finalStatusInfo(); if (results.getError() == null) { throw new SQLException(format("Query has no columns (#%s)", results.getId())); } throw resultsException(results); } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.consumer.CommitFailedException; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.clients.consumer.RetriableCommitFailedException; import org.apache.kafka.clients.consumer.internals.PartitionAssignor.Assignment; import org.apache.kafka.clients.consumer.internals.PartitionAssignor.Subscription; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Count; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Rate; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.JoinGroupRequest.ProtocolMetadata; import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicInteger; /** * This class manages the coordination process with the consumer coordinator. */ public final class ConsumerCoordinator extends AbstractCoordinator { private static final Logger log = LoggerFactory.getLogger(ConsumerCoordinator.class); private final List<PartitionAssignor> assignors; private final Metadata metadata; private final ConsumerCoordinatorMetrics sensors; private final SubscriptionState subscriptions; private final OffsetCommitCallback defaultOffsetCommitCallback; private final boolean autoCommitEnabled; private final int autoCommitIntervalMs; private final ConsumerInterceptors<?, ?> interceptors; private final boolean excludeInternalTopics; private final AtomicInteger pendingAsyncCommits; // this collection must be thread-safe because it is modified from the response handler // of offset commit requests, which may be invoked from the heartbeat thread private final ConcurrentLinkedQueue<OffsetCommitCompletion> completedOffsetCommits; private boolean isLeader = false; private Set<String> joinedSubscription; private MetadataSnapshot metadataSnapshot; private MetadataSnapshot assignmentSnapshot; private long nextAutoCommitDeadline; /** * Initialize the coordination manager. */ public ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose) { super(client, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, metrics, metricGrpPrefix, time, retryBackoffMs, leaveGroupOnClose); this.metadata = metadata; this.metadataSnapshot = new MetadataSnapshot(subscriptions, metadata.fetch()); this.subscriptions = subscriptions; this.defaultOffsetCommitCallback = new DefaultOffsetCommitCallback(); this.autoCommitEnabled = autoCommitEnabled; this.autoCommitIntervalMs = autoCommitIntervalMs; this.assignors = assignors; this.completedOffsetCommits = new ConcurrentLinkedQueue<>(); this.sensors = new ConsumerCoordinatorMetrics(metrics, metricGrpPrefix); this.interceptors = interceptors; this.excludeInternalTopics = excludeInternalTopics; this.pendingAsyncCommits = new AtomicInteger(); if (autoCommitEnabled) this.nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs; this.metadata.requestUpdate(); addMetadataListener(); } @Override public String protocolType() { return ConsumerProtocol.PROTOCOL_TYPE; } @Override public List<ProtocolMetadata> metadata() { this.joinedSubscription = subscriptions.subscription(); List<ProtocolMetadata> metadataList = new ArrayList<>(); for (PartitionAssignor assignor : assignors) { Subscription subscription = assignor.subscription(joinedSubscription); ByteBuffer metadata = ConsumerProtocol.serializeSubscription(subscription); metadataList.add(new ProtocolMetadata(assignor.name(), metadata)); } return metadataList; } public void updatePatternSubscription(Cluster cluster) { final Set<String> topicsToSubscribe = new HashSet<>(); for (String topic : cluster.topics()) if (subscriptions.subscribedPattern().matcher(topic).matches() && !(excludeInternalTopics && cluster.internalTopics().contains(topic))) topicsToSubscribe.add(topic); subscriptions.subscribeFromPattern(topicsToSubscribe); // note we still need to update the topics contained in the metadata. Although we have // specified that all topics should be fetched, only those set explicitly will be retained metadata.setTopics(subscriptions.groupSubscription()); } private void addMetadataListener() { this.metadata.addListener(new Metadata.Listener() { @Override public void onMetadataUpdate(Cluster cluster, Set<String> unavailableTopics) { // if we encounter any unauthorized topics, raise an exception to the user if (!cluster.unauthorizedTopics().isEmpty()) throw new TopicAuthorizationException(new HashSet<>(cluster.unauthorizedTopics())); if (subscriptions.hasPatternSubscription()) updatePatternSubscription(cluster); // check if there are any changes to the metadata which should trigger a rebalance if (subscriptions.partitionsAutoAssigned()) { MetadataSnapshot snapshot = new MetadataSnapshot(subscriptions, cluster); if (!snapshot.equals(metadataSnapshot)) metadataSnapshot = snapshot; } if (!Collections.disjoint(metadata.topics(), unavailableTopics)) metadata.requestUpdate(); } }); } private PartitionAssignor lookupAssignor(String name) { for (PartitionAssignor assignor : this.assignors) { if (assignor.name().equals(name)) return assignor; } return null; } @Override protected void onJoinComplete(int generation, String memberId, String assignmentStrategy, ByteBuffer assignmentBuffer) { // only the leader is responsible for monitoring for metadata changes (i.e. partition changes) if (!isLeader) assignmentSnapshot = null; PartitionAssignor assignor = lookupAssignor(assignmentStrategy); if (assignor == null) throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy); Assignment assignment = ConsumerProtocol.deserializeAssignment(assignmentBuffer); // set the flag to refresh last committed offsets subscriptions.needRefreshCommits(); // update partition assignment subscriptions.assignFromSubscribed(assignment.partitions()); // check if the assignment contains some topics that were not in the original // subscription, if yes we will obey what leader has decided and add these topics // into the subscriptions as long as they still match the subscribed pattern // // TODO this part of the logic should be removed once we allow regex on leader assign Set<String> addedTopics = new HashSet<>(); for (TopicPartition tp : subscriptions.assignedPartitions()) { if (!joinedSubscription.contains(tp.topic())) addedTopics.add(tp.topic()); } if (!addedTopics.isEmpty()) { Set<String> newSubscription = new HashSet<>(subscriptions.subscription()); Set<String> newJoinedSubscription = new HashSet<>(joinedSubscription); newSubscription.addAll(addedTopics); newJoinedSubscription.addAll(addedTopics); this.subscriptions.subscribeFromPattern(newSubscription); this.joinedSubscription = newJoinedSubscription; } // update the metadata and enforce a refresh to make sure the fetcher can start // fetching data in the next iteration this.metadata.setTopics(subscriptions.groupSubscription()); client.ensureFreshMetadata(); // give the assignor a chance to update internal state based on the received assignment assignor.onAssignment(assignment); // reschedule the auto commit starting from now this.nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs; // execute the user's callback after rebalance ConsumerRebalanceListener listener = subscriptions.listener(); log.info("Setting newly assigned partitions {} for group {}", subscriptions.assignedPartitions(), groupId); try { Set<TopicPartition> assigned = new HashSet<>(subscriptions.assignedPartitions()); listener.onPartitionsAssigned(assigned); } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { log.error("User provided listener {} for group {} failed on partition assignment", listener.getClass().getName(), groupId, e); } } /** * Poll for coordinator events. This ensures that the coordinator is known and that the consumer * has joined the group (if it is using group management). This also handles periodic offset commits * if they are enabled. * * @param now current time in milliseconds */ public void poll(long now) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned() && coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { // due to a race condition between the initial metadata fetch and the initial rebalance, // we need to ensure that the metadata is fresh before joining initially. This ensures // that we have matched the pattern against the cluster's topics at least once before joining. if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } /** * Return the time to the next needed invocation of {@link #poll(long)}. * @param now current time in milliseconds * @return the maximum time in milliseconds the caller should wait before the next invocation of poll() */ public long timeToNextPoll(long now) { if (!autoCommitEnabled) return timeToNextHeartbeat(now); if (now > nextAutoCommitDeadline) return 0; return Math.min(nextAutoCommitDeadline - now, timeToNextHeartbeat(now)); } @Override protected Map<String, ByteBuffer> performAssignment(String leaderId, String assignmentStrategy, Map<String, ByteBuffer> allSubscriptions) { PartitionAssignor assignor = lookupAssignor(assignmentStrategy); if (assignor == null) throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy); Set<String> allSubscribedTopics = new HashSet<>(); Map<String, Subscription> subscriptions = new HashMap<>(); for (Map.Entry<String, ByteBuffer> subscriptionEntry : allSubscriptions.entrySet()) { Subscription subscription = ConsumerProtocol.deserializeSubscription(subscriptionEntry.getValue()); subscriptions.put(subscriptionEntry.getKey(), subscription); allSubscribedTopics.addAll(subscription.topics()); } // the leader will begin watching for changes to any of the topics the group is interested in, // which ensures that all metadata changes will eventually be seen this.subscriptions.groupSubscribe(allSubscribedTopics); metadata.setTopics(this.subscriptions.groupSubscription()); // update metadata (if needed) and keep track of the metadata used for assignment so that // we can check after rebalance completion whether anything has changed client.ensureFreshMetadata(); isLeader = true; log.debug("Performing assignment for group {} using strategy {} with subscriptions {}", groupId, assignor.name(), subscriptions); Map<String, Assignment> assignment = assignor.assign(metadata.fetch(), subscriptions); // user-customized assignor may have created some topics that are not in the subscription list // and assign their partitions to the members; in this case we would like to update the leader's // own metadata with the newly added topics so that it will not trigger a subsequent rebalance // when these topics gets updated from metadata refresh. // // TODO: this is a hack and not something we want to support long-term unless we push regex into the protocol // we may need to modify the PartitionAssingor API to better support this case. Set<String> assignedTopics = new HashSet<>(); for (Assignment assigned : assignment.values()) { for (TopicPartition tp : assigned.partitions()) assignedTopics.add(tp.topic()); } if (!assignedTopics.containsAll(allSubscribedTopics)) { Set<String> notAssignedTopics = new HashSet<>(allSubscribedTopics); notAssignedTopics.removeAll(assignedTopics); log.warn("The following subscribed topics are not assigned to any members in the group {} : {} ", groupId, notAssignedTopics); } if (!allSubscribedTopics.containsAll(assignedTopics)) { Set<String> newlyAddedTopics = new HashSet<>(assignedTopics); newlyAddedTopics.removeAll(allSubscribedTopics); log.info("The following not-subscribed topics are assigned to group {}, and their metadata will be " + "fetched from the brokers : {}", groupId, newlyAddedTopics); allSubscribedTopics.addAll(assignedTopics); this.subscriptions.groupSubscribe(allSubscribedTopics); metadata.setTopics(this.subscriptions.groupSubscription()); client.ensureFreshMetadata(); } assignmentSnapshot = metadataSnapshot; log.debug("Finished assignment for group {}: {}", groupId, assignment); Map<String, ByteBuffer> groupAssignment = new HashMap<>(); for (Map.Entry<String, Assignment> assignmentEntry : assignment.entrySet()) { ByteBuffer buffer = ConsumerProtocol.serializeAssignment(assignmentEntry.getValue()); groupAssignment.put(assignmentEntry.getKey(), buffer); } return groupAssignment; } @Override protected void onJoinPrepare(int generation, String memberId) { // commit offsets prior to rebalance if auto-commit enabled maybeAutoCommitOffsetsSync(rebalanceTimeoutMs); // execute the user's callback before rebalance ConsumerRebalanceListener listener = subscriptions.listener(); log.info("Revoking previously assigned partitions {} for group {}", subscriptions.assignedPartitions(), groupId); try { Set<TopicPartition> revoked = new HashSet<>(subscriptions.assignedPartitions()); listener.onPartitionsRevoked(revoked); } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { log.error("User provided listener {} for group {} failed on partition revocation", listener.getClass().getName(), groupId, e); } isLeader = false; subscriptions.resetGroupSubscription(); } @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; // we need to rejoin if we performed the assignment and metadata has changed if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; // we need to join if our subscription has changed since the last join if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } /** * Refresh the committed offsets for provided partitions. */ public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); // verify assignment is still active if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } /** * Fetch the current committed offsets from the coordinator for a set of partitions. * @param partitions The partitions to fetch offsets for * @return A map from partition to the committed offset */ public Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions) { while (true) { ensureCoordinatorReady(); // contact coordinator to fetch committed offsets RequestFuture<Map<TopicPartition, OffsetAndMetadata>> future = sendOffsetFetchRequest(partitions); client.poll(future); if (future.succeeded()) return future.value(); if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); } } public void close(long timeoutMs) { // we do not need to re-enable wakeups since we are closing already client.disableWakeups(); long now = time.milliseconds(); long endTimeMs = now + timeoutMs; try { maybeAutoCommitOffsetsSync(timeoutMs); now = time.milliseconds(); if (pendingAsyncCommits.get() > 0 && endTimeMs > now) { ensureCoordinatorReady(now, endTimeMs - now); now = time.milliseconds(); } } finally { super.close(Math.max(0, endTimeMs - now)); } } // visible for testing void invokeCompletedOffsetCommitCallbacks() { while (true) { OffsetCommitCompletion completion = completedOffsetCommits.poll(); if (completion == null) break; completion.invoke(); } } public void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) { invokeCompletedOffsetCommitCallbacks(); if (!coordinatorUnknown()) { doCommitOffsetsAsync(offsets, callback); } else { // we don't know the current coordinator, so try to find it and then send the commit // or fail (we don't want recursive retries which can cause offset commits to arrive // out of order). Note that there may be multiple offset commits chained to the same // coordinator lookup request. This is fine because the listeners will be invoked in // the same order that they were added. Note also that AbstractCoordinator prevents // multiple concurrent coordinator lookup requests. pendingAsyncCommits.incrementAndGet(); lookupCoordinator().addListener(new RequestFutureListener<Void>() { @Override public void onSuccess(Void value) { pendingAsyncCommits.decrementAndGet(); doCommitOffsetsAsync(offsets, callback); } @Override public void onFailure(RuntimeException e) { pendingAsyncCommits.decrementAndGet(); completedOffsetCommits.add(new OffsetCommitCompletion(callback, offsets, RetriableCommitFailedException.withUnderlyingMessage(e.getMessage()))); } }); } // ensure the commit has a chance to be transmitted (without blocking on its completion). // Note that commits are treated as heartbeats by the coordinator, so there is no need to // explicitly allow heartbeats through delayed task execution. client.pollNoWakeup(); } private void doCommitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) { this.subscriptions.needRefreshCommits(); RequestFuture<Void> future = sendOffsetCommitRequest(offsets); final OffsetCommitCallback cb = callback == null ? defaultOffsetCommitCallback : callback; future.addListener(new RequestFutureListener<Void>() { @Override public void onSuccess(Void value) { if (interceptors != null) interceptors.onCommit(offsets); completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, null)); } @Override public void onFailure(RuntimeException e) { Exception commitException = e; if (e instanceof RetriableException) commitException = RetriableCommitFailedException.withUnderlyingMessage(e.getMessage()); completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, commitException)); } }); } /** * Commit offsets synchronously. This method will retry until the commit completes successfully * or an unrecoverable error is encountered. * @param offsets The offsets to be committed * @throws org.apache.kafka.common.errors.AuthorizationException if the consumer is not authorized to the group * or to any of the specified partitions * @throws CommitFailedException if an unrecoverable error occurs before the commit can be completed * @return If the offset commit was successfully sent and a successful response was received from * the coordinator */ public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } private void maybeAutoCommitOffsetsAsync(long now) { if (autoCommitEnabled) { if (coordinatorUnknown()) { this.nextAutoCommitDeadline = now + retryBackoffMs; } else if (now >= nextAutoCommitDeadline) { this.nextAutoCommitDeadline = now + autoCommitIntervalMs; doAutoCommitOffsetsAsync(); } } } public void maybeAutoCommitOffsetsNow() { if (autoCommitEnabled && !coordinatorUnknown()) doAutoCommitOffsetsAsync(); } private void doAutoCommitOffsetsAsync() { Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed(); log.debug("Sending asynchronous auto-commit of offsets {} for group {}", allConsumedOffsets, groupId); commitOffsetsAsync(allConsumedOffsets, new OffsetCommitCallback() { @Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { if (exception != null) { log.warn("Auto-commit of offsets {} failed for group {}: {}", offsets, groupId, exception.getMessage()); if (exception instanceof RetriableException) nextAutoCommitDeadline = Math.min(time.milliseconds() + retryBackoffMs, nextAutoCommitDeadline); } else { log.debug("Completed auto-commit of offsets {} for group {}", offsets, groupId); } } }); } private void maybeAutoCommitOffsetsSync(long timeoutMs) { if (autoCommitEnabled) { Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed(); try { log.debug("Sending synchronous auto-commit of offsets {} for group {}", allConsumedOffsets, groupId); if (!commitOffsetsSync(allConsumedOffsets, timeoutMs)) log.debug("Auto-commit of offsets {} for group {} timed out before completion", allConsumedOffsets, groupId); } catch (WakeupException | InterruptException e) { log.debug("Auto-commit of offsets {} for group {} was interrupted before completion", allConsumedOffsets, groupId); // rethrow wakeups since they are triggered by the user throw e; } catch (Exception e) { // consistent with async auto-commit failures, we do not propagate the exception log.warn("Auto-commit of offsets {} failed for group {}: {}", allConsumedOffsets, groupId, e.getMessage()); } } } private class DefaultOffsetCommitCallback implements OffsetCommitCallback { @Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { if (exception != null) log.error("Offset commit with offsets {} failed for group {}", offsets, groupId, exception); } } /** * Commit offsets for the specified list of topics and partitions. This is a non-blocking call * which returns a request future that can be polled in the case of a synchronous commit or ignored in the * asynchronous case. * * @param offsets The list of offsets per partition that should be committed. * @return A request future whose value indicates whether the commit was successful or not */ private RequestFuture<Void> sendOffsetCommitRequest(final Map<TopicPartition, OffsetAndMetadata> offsets) { if (offsets.isEmpty()) return RequestFuture.voidSuccess(); Node coordinator = coordinator(); if (coordinator == null) return RequestFuture.coordinatorNotAvailable(); // create the offset commit request Map<TopicPartition, OffsetCommitRequest.PartitionData> offsetData = new HashMap<>(offsets.size()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { OffsetAndMetadata offsetAndMetadata = entry.getValue(); if (offsetAndMetadata.offset() < 0) { return RequestFuture.failure(new IllegalArgumentException("Invalid offset: " + offsetAndMetadata.offset())); } offsetData.put(entry.getKey(), new OffsetCommitRequest.PartitionData( offsetAndMetadata.offset(), offsetAndMetadata.metadata())); } final Generation generation; if (subscriptions.partitionsAutoAssigned()) generation = generation(); else generation = Generation.NO_GENERATION; // if the generation is null, we are not part of an active group (and we expect to be). // the only thing we can do is fail the commit and let the user rejoin the group in poll() if (generation == null) return RequestFuture.failure(new CommitFailedException()); OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder(this.groupId, offsetData). setGenerationId(generation.generationId). setMemberId(generation.memberId). setRetentionTime(OffsetCommitRequest.DEFAULT_RETENTION_TIME); log.trace("Sending OffsetCommit request with {} to coordinator {} for group {}", offsets, coordinator, groupId); return client.send(coordinator, builder) .compose(new OffsetCommitResponseHandler(offsets)); } private class OffsetCommitResponseHandler extends CoordinatorResponseHandler<OffsetCommitResponse, Void> { private final Map<TopicPartition, OffsetAndMetadata> offsets; private OffsetCommitResponseHandler(Map<TopicPartition, OffsetAndMetadata> offsets) { this.offsets = offsets; } @Override public void handle(OffsetCommitResponse commitResponse, RequestFuture<Void> future) { sensors.commitLatency.record(response.requestLatencyMs()); Set<String> unauthorizedTopics = new HashSet<>(); for (Map.Entry<TopicPartition, Errors> entry : commitResponse.responseData().entrySet()) { TopicPartition tp = entry.getKey(); OffsetAndMetadata offsetAndMetadata = this.offsets.get(tp); long offset = offsetAndMetadata.offset(); Errors error = entry.getValue(); if (error == Errors.NONE) { log.debug("Group {} committed offset {} for partition {}", groupId, offset, tp); if (subscriptions.isAssigned(tp)) // update the local cache only if the partition is still assigned subscriptions.committed(tp, offsetAndMetadata); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { log.error("Not authorized to commit offsets for group {}", groupId); future.raise(new GroupAuthorizationException(groupId)); return; } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { unauthorizedTopics.add(tp.topic()); } else if (error == Errors.OFFSET_METADATA_TOO_LARGE || error == Errors.INVALID_COMMIT_OFFSET_SIZE) { // raise the error to the user log.debug("Offset commit for group {} failed on partition {}: {}", groupId, tp, error.message()); future.raise(error); return; } else if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) { // just retry log.debug("Offset commit for group {} failed: {}", groupId, error.message()); future.raise(error); return; } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR || error == Errors.REQUEST_TIMED_OUT) { log.debug("Offset commit for group {} failed: {}", groupId, error.message()); coordinatorDead(); future.raise(error); return; } else if (error == Errors.UNKNOWN_MEMBER_ID || error == Errors.ILLEGAL_GENERATION || error == Errors.REBALANCE_IN_PROGRESS) { // need to re-join group log.debug("Offset commit for group {} failed: {}", groupId, error.message()); resetGeneration(); future.raise(new CommitFailedException()); return; } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { log.debug("Offset commit for group {} failed on partition {}: {}", groupId, tp, error.message()); future.raise(new KafkaException("Partition " + tp + " may not exist or user may not have Describe access to topic")); return; } else { log.error("Group {} failed to commit partition {} at offset {}: {}", groupId, tp, offset, error.message()); future.raise(new KafkaException("Unexpected error in commit: " + error.message())); return; } } if (!unauthorizedTopics.isEmpty()) { log.error("Not authorized to commit to topics {} for group {}", unauthorizedTopics, groupId); future.raise(new TopicAuthorizationException(unauthorizedTopics)); } else { future.complete(null); } } } /** * Fetch the committed offsets for a set of partitions. This is a non-blocking call. The * returned future can be polled to get the actual offsets returned from the broker. * * @param partitions The set of partitions to get offsets for. * @return A request future containing the committed offsets. */ private RequestFuture<Map<TopicPartition, OffsetAndMetadata>> sendOffsetFetchRequest(Set<TopicPartition> partitions) { Node coordinator = coordinator(); if (coordinator == null) return RequestFuture.coordinatorNotAvailable(); log.debug("Group {} fetching committed offsets for partitions: {}", groupId, partitions); // construct the request OffsetFetchRequest.Builder requestBuilder = new OffsetFetchRequest.Builder(this.groupId, new ArrayList<>(partitions)); // send the request with a callback return client.send(coordinator, requestBuilder) .compose(new OffsetFetchResponseHandler()); } private class OffsetFetchResponseHandler extends CoordinatorResponseHandler<OffsetFetchResponse, Map<TopicPartition, OffsetAndMetadata>> { @Override public void handle(OffsetFetchResponse response, RequestFuture<Map<TopicPartition, OffsetAndMetadata>> future) { if (response.hasError()) { Errors error = response.error(); log.debug("Offset fetch for group {} failed: {}", groupId, error.message()); if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) { // just retry future.raise(error); } else if (error == Errors.NOT_COORDINATOR) { // re-discover the coordinator and retry coordinatorDead(); future.raise(error); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(new GroupAuthorizationException(groupId)); } else { future.raise(new KafkaException("Unexpected error in fetch offset response: " + error.message())); } return; } Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(response.responseData().size()); for (Map.Entry<TopicPartition, OffsetFetchResponse.PartitionData> entry : response.responseData().entrySet()) { TopicPartition tp = entry.getKey(); OffsetFetchResponse.PartitionData data = entry.getValue(); if (data.hasError()) { Errors error = data.error; log.debug("Group {} failed to fetch offset for partition {}: {}", groupId, tp, error.message()); if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { future.raise(new KafkaException("Partition " + tp + " may not exist or the user may not have " + "Describe access to the topic")); } else { future.raise(new KafkaException("Unexpected error in fetch offset response: " + error.message())); } return; } else if (data.offset >= 0) { // record the position with the offset (-1 indicates no committed offset to fetch) offsets.put(tp, new OffsetAndMetadata(data.offset, data.metadata)); } else { log.debug("Group {} has no committed offset for partition {}", groupId, tp); } } future.complete(offsets); } } private class ConsumerCoordinatorMetrics { private final String metricGrpName; private final Sensor commitLatency; private ConsumerCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) { this.metricGrpName = metricGrpPrefix + "-coordinator-metrics"; this.commitLatency = metrics.sensor("commit-latency"); this.commitLatency.add(metrics.metricName("commit-latency-avg", this.metricGrpName, "The average time taken for a commit request"), new Avg()); this.commitLatency.add(metrics.metricName("commit-latency-max", this.metricGrpName, "The max time taken for a commit request"), new Max()); this.commitLatency.add(metrics.metricName("commit-rate", this.metricGrpName, "The number of commit calls per second"), new Rate(new Count())); Measurable numParts = new Measurable() { public double measure(MetricConfig config, long now) { return subscriptions.assignedPartitions().size(); } }; metrics.addMetric(metrics.metricName("assigned-partitions", this.metricGrpName, "The number of partitions currently assigned to this consumer"), numParts); } } private static class MetadataSnapshot { private final Map<String, Integer> partitionsPerTopic; private MetadataSnapshot(SubscriptionState subscription, Cluster cluster) { Map<String, Integer> partitionsPerTopic = new HashMap<>(); for (String topic : subscription.groupSubscription()) partitionsPerTopic.put(topic, cluster.partitionCountForTopic(topic)); this.partitionsPerTopic = partitionsPerTopic; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MetadataSnapshot that = (MetadataSnapshot) o; return partitionsPerTopic != null ? partitionsPerTopic.equals(that.partitionsPerTopic) : that.partitionsPerTopic == null; } @Override public int hashCode() { return partitionsPerTopic != null ? partitionsPerTopic.hashCode() : 0; } } private static class OffsetCommitCompletion { private final OffsetCommitCallback callback; private final Map<TopicPartition, OffsetAndMetadata> offsets; private final Exception exception; private OffsetCommitCompletion(OffsetCommitCallback callback, Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { this.callback = callback; this.offsets = offsets; this.exception = exception; } public void invoke() { if (callback != null) callback.onComplete(offsets, exception); } } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache.metric; import java.lang.reflect.Field; import java.sql.Connection; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Properties; import java.util.Set; import java.util.UUID; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.stream.Collectors; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteJdbcThinDriver; import org.apache.ignite.Ignition; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.affinity.AffinityFunction; import org.apache.ignite.cache.affinity.AffinityFunctionContext; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cache.query.ContinuousQuery; import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.cache.query.ScanQuery; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.client.IgniteClient; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.ClientConfiguration; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.SqlConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.binary.mutabletest.GridBinaryTestClasses.TestObjectAllTypes; import org.apache.ignite.internal.binary.mutabletest.GridBinaryTestClasses.TestObjectEnum; import org.apache.ignite.internal.metric.AbstractExporterSpiTest; import org.apache.ignite.internal.metric.SystemViewSelfTest.TestPredicate; import org.apache.ignite.internal.metric.SystemViewSelfTest.TestRunnable; import org.apache.ignite.internal.metric.SystemViewSelfTest.TestTransformer; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.metastorage.DistributedMetaStorage; import org.apache.ignite.internal.processors.service.DummyService; import org.apache.ignite.internal.util.StripedExecutor; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.services.ServiceConfiguration; import org.apache.ignite.spi.systemview.view.MetastorageView; import org.apache.ignite.spi.systemview.view.SqlSchemaView; import org.apache.ignite.spi.systemview.view.SystemView; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.transactions.Transaction; import org.junit.Test; import static java.util.Arrays.asList; import static org.apache.ignite.internal.metric.SystemViewSelfTest.TEST_PREDICATE; import static org.apache.ignite.internal.metric.SystemViewSelfTest.TEST_TRANSFORMER; import static org.apache.ignite.internal.processors.cache.GridCacheUtils.cacheGroupId; import static org.apache.ignite.internal.processors.cache.GridCacheUtils.cacheId; import static org.apache.ignite.internal.processors.cache.index.AbstractSchemaSelfTest.queryProcessor; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.METASTORE_VIEW; import static org.apache.ignite.internal.processors.metastorage.persistence.DistributedMetaStorageImpl.DISTRIBUTED_METASTORE_VIEW; import static org.apache.ignite.internal.processors.query.QueryUtils.DFLT_SCHEMA; import static org.apache.ignite.internal.processors.query.QueryUtils.SCHEMA_SYS; import static org.apache.ignite.internal.processors.query.h2.SchemaManager.SQL_SCHEMA_VIEW; import static org.apache.ignite.internal.util.IgniteUtils.toStringSafe; import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC; import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC; import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ; import static org.apache.ignite.transactions.TransactionIsolation.SERIALIZABLE; /** */ public class SqlViewExporterSpiTest extends AbstractExporterSpiTest { /** */ private static IgniteEx ignite0; /** */ private static IgniteEx ignite1; /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); cfg.setConsistentId(igniteInstanceName); cfg.setDataStorageConfiguration(new DataStorageConfiguration() .setDataRegionConfigurations( new DataRegionConfiguration().setName("in-memory").setMaxSize(100L * 1024 * 1024)) .setDefaultDataRegionConfiguration( new DataRegionConfiguration() .setPersistenceEnabled(true))); return cfg; } /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { cleanPersistenceDir(); ignite0 = startGrid(0); ignite1 = startGrid(1); ignite0.cluster().baselineAutoAdjustEnabled(false); ignite0.cluster().active(true); } /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { Collection<String> caches = ignite0.cacheNames(); for (String cache : caches) ignite0.destroyCache(cache); } /** {@inheritDoc} */ @Override protected void afterTestsStopped() throws Exception { stopAllGrids(true); cleanPersistenceDir(); } /** */ @Test public void testEmptyFilter() throws Exception { List<List<?>> res = execute(ignite0, "SELECT * FROM SYS.METRICS"); assertNotNull(res); assertFalse(res.isEmpty()); } /** */ @Test public void testDataRegionMetrics() throws Exception { List<List<?>> res = execute(ignite0, "SELECT REPLACE(name, 'io.dataregion.default.'), value, description FROM SYS.METRICS"); Set<String> names = new HashSet<>(); DataRegionConfiguration cfg = ignite0.configuration().getDataStorageConfiguration().getDefaultDataRegionConfiguration(); for (List<?> row : res) { String name = (String)row.get(0); String val = (String)row.get(1); names.add(name); if ("InitialSize".equals(name)) assertEquals(Long.toString(cfg.getInitialSize()), val); else if ("MaxSize".equals(name)) assertEquals(Long.toString(cfg.getMaxSize()), val); assertNotNull("Metric value must be not null [name=" + name + ']', val); } for (String attr : EXPECTED_ATTRIBUTES) assertTrue(attr + " should be exported via SQL view", names.contains(attr)); } /** */ @Test public void testCachesView() throws Exception { Set<String> cacheNames = new HashSet<>(asList("cache-1", "cache-2")); for (String name : cacheNames) ignite0.createCache(name); List<List<?>> caches = execute(ignite0, "SELECT CACHE_NAME FROM SYS.CACHES"); assertEquals(ignite0.context().cache().cacheDescriptors().size(), caches.size()); for (List<?> row : caches) cacheNames.remove(row.get(0)); assertTrue(cacheNames.toString(), cacheNames.isEmpty()); } /** */ @Test public void testCacheGroupsView() throws Exception { Set<String> grpNames = new HashSet<>(asList("grp-1", "grp-2")); for (String grpName : grpNames) ignite0.createCache(new CacheConfiguration<>("cache-" + grpName).setGroupName(grpName)); List<List<?>> grps = execute(ignite0, "SELECT CACHE_GROUP_NAME FROM SYS.CACHE_GROUPS"); assertEquals(ignite0.context().cache().cacheGroupDescriptors().size(), grps.size()); for (List<?> row : grps) grpNames.remove(row.get(0)); assertTrue(grpNames.toString(), grpNames.isEmpty()); } /** */ @Test public void testComputeBroadcast() throws Exception { CyclicBarrier barrier = new CyclicBarrier(6); for (int i = 0; i < 5; i++) { ignite0.compute().broadcastAsync(() -> { try { barrier.await(); barrier.await(); } catch (InterruptedException | BrokenBarrierException e) { throw new RuntimeException(e); } }); } barrier.await(); List<List<?>> tasks = execute(ignite0, "SELECT " + " INTERNAL, " + " AFFINITY_CACHE_NAME, " + " AFFINITY_PARTITION_ID, " + " TASK_CLASS_NAME, " + " TASK_NAME, " + " TASK_NODE_ID, " + " USER_VERSION " + "FROM SYS.TASKS"); assertEquals(5, tasks.size()); List<?> t = tasks.get(0); assertFalse((Boolean)t.get(0)); assertNull(t.get(1)); assertEquals(-1, t.get(2)); assertTrue(t.get(3).toString().startsWith(getClass().getName())); assertTrue(t.get(4).toString().startsWith(getClass().getName())); assertEquals(ignite0.localNode().id(), t.get(5)); assertEquals("0", t.get(6)); barrier.await(); } /** */ @Test public void testServices() throws Exception { ServiceConfiguration srvcCfg = new ServiceConfiguration(); srvcCfg.setName("service"); srvcCfg.setMaxPerNodeCount(1); srvcCfg.setService(new DummyService()); ignite0.services().deploy(srvcCfg); List<List<?>> srvs = execute(ignite0, "SELECT " + " NAME, " + " SERVICE_ID, " + " SERVICE_CLASS, " + " TOTAL_COUNT, " + " MAX_PER_NODE_COUNT, " + " CACHE_NAME, " + " AFFINITY_KEY, " + " NODE_FILTER, " + " STATICALLY_CONFIGURED, " + " ORIGIN_NODE_ID " + "FROM SYS.SERVICES"); assertEquals(ignite0.context().service().serviceDescriptors().size(), srvs.size()); List<?> sysView = srvs.iterator().next(); assertEquals(srvcCfg.getName(), sysView.get(0)); assertEquals(DummyService.class.getName(), sysView.get(2)); assertEquals(srvcCfg.getMaxPerNodeCount(), sysView.get(4)); } /** */ @Test public void testClientsConnections() throws Exception { String host = ignite0.configuration().getClientConnectorConfiguration().getHost(); if (host == null) host = ignite0.configuration().getLocalHost(); int port = ignite0.configuration().getClientConnectorConfiguration().getPort(); try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(host + ":" + port))) { try (Connection conn = new IgniteJdbcThinDriver().connect("jdbc:ignite:thin://" + host, new Properties())) { List<List<?>> conns = execute(ignite0, "SELECT * FROM SYS.CLIENT_CONNECTIONS"); assertEquals(2, conns.size()); } } } /** */ @Test public void testTransactions() throws Exception { IgniteCache<Integer, Integer> cache = ignite0.createCache(new CacheConfiguration<Integer, Integer>("c") .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)); assertTrue(execute(ignite0, "SELECT * FROM SYS.TRANSACTIONS").isEmpty()); CountDownLatch latch1 = new CountDownLatch(10); CountDownLatch latch2 = new CountDownLatch(1); AtomicInteger cntr = new AtomicInteger(); GridTestUtils.runMultiThreadedAsync(() -> { try (Transaction tx = ignite0.transactions().withLabel("test").txStart(PESSIMISTIC, REPEATABLE_READ)) { cache.put(cntr.incrementAndGet(), cntr.incrementAndGet()); cache.put(cntr.incrementAndGet(), cntr.incrementAndGet()); latch1.countDown(); latch2.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } }, 5, "xxx"); GridTestUtils.runMultiThreadedAsync(() -> { try (Transaction tx = ignite0.transactions().txStart(OPTIMISTIC, SERIALIZABLE)) { cache.put(cntr.incrementAndGet(), cntr.incrementAndGet()); cache.put(cntr.incrementAndGet(), cntr.incrementAndGet()); latch1.countDown(); latch2.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } }, 5, "yyy"); latch1.await(5, TimeUnit.SECONDS); List<List<?>> txs = execute(ignite0, "SELECT * FROM SYS.TRANSACTIONS"); assertEquals(10, txs.size()); latch2.countDown(); boolean res = waitForCondition(() -> execute(ignite0, "SELECT * FROM SYS.TRANSACTIONS").isEmpty(), 5_000); assertTrue(res); } /** */ @Test public void testSchemas() throws Exception { try (IgniteEx g = startGrid(new IgniteConfiguration().setSqlConfiguration(new SqlConfiguration() .setSqlSchemas("MY_SCHEMA", "ANOTHER_SCHEMA")))) { SystemView<SqlSchemaView> schemasSysView = g.context().systemView().view(SQL_SCHEMA_VIEW); Set<String> schemaFromSysView = new HashSet<>(); schemasSysView.forEach(v -> schemaFromSysView.add(v.schemaName())); HashSet<String> expSchemas = new HashSet<>(asList("MY_SCHEMA", "ANOTHER_SCHEMA", "SYS", "PUBLIC")); assertEquals(schemaFromSysView, expSchemas); List<List<?>> schemas = execute(g, "SELECT * FROM SYS.SCHEMAS"); schemaFromSysView.clear(); schemas.forEach(s -> schemaFromSysView.add(s.get(0).toString())); assertEquals(schemaFromSysView, expSchemas); } } /** */ @Test public void testViews() throws Exception { Set<String> expViews = new HashSet<>(asList( "METRICS", "SERVICES", "CACHE_GROUPS", "CACHES", "TASKS", "JOBS", "SQL_QUERIES_HISTORY", "NODES", "SCHEMAS", "NODE_METRICS", "BASELINE_NODES", "BASELINE_NODE_ATTRIBUTES", "INDEXES", "LOCAL_CACHE_GROUPS_IO", "SQL_QUERIES", "SCAN_QUERIES", "NODE_ATTRIBUTES", "SNAPSHOT", "TABLES", "CLIENT_CONNECTIONS", "VIEWS", "TABLE_COLUMNS", "VIEW_COLUMNS", "TRANSACTIONS", "CONTINUOUS_QUERIES", "STRIPED_THREADPOOL_QUEUE", "DATASTREAM_THREADPOOL_QUEUE", "DATA_REGION_PAGE_LISTS", "CACHE_GROUP_PAGE_LISTS", "PARTITION_STATES", "BINARY_METADATA", "METASTORAGE", "DISTRIBUTED_METASTORAGE", "STATISTICS_CONFIGURATION", "STATISTICS_PARTITION_DATA", "STATISTICS_LOCAL_DATA", "DS_ATOMICLONGS", "DS_ATOMICREFERENCES", "DS_ATOMICSTAMPED", "DS_ATOMICSEQUENCES", "DS_COUNTDOWNLATCHES", "DS_REENTRANTLOCKS", "DS_SETS", "DS_SEMAPHORES", "DS_QUEUES" )); Set<String> actViews = new HashSet<>(); List<List<?>> res = execute(ignite0, "SELECT * FROM SYS.VIEWS"); for (List<?> row : res) actViews.add(row.get(0).toString()); assertEquals(expViews, actViews); } /** */ @Test public void testTable() throws Exception { assertTrue(execute(ignite0, "SELECT * FROM SYS.TABLES").isEmpty()); execute(ignite0, "CREATE TABLE T1(ID LONG PRIMARY KEY, NAME VARCHAR)"); List<List<?>> res = execute(ignite0, "SELECT * FROM SYS.TABLES"); assertEquals(1, res.size()); List<?> tbl = res.get(0); int cacheId = cacheId("SQL_PUBLIC_T1"); String cacheName = "SQL_PUBLIC_T1"; assertEquals(cacheId, tbl.get(0)); // CACHE_GROUP_ID assertEquals(cacheName, tbl.get(1)); // CACHE_GROUP_NAME assertEquals(cacheId, tbl.get(2)); // CACHE_ID assertEquals(cacheName, tbl.get(3)); // CACHE_NAME assertEquals(DFLT_SCHEMA, tbl.get(4)); // SCHEMA_NAME assertEquals("T1", tbl.get(5)); // TABLE_NAME assertNull(tbl.get(6)); // AFFINITY_KEY_COLUMN assertEquals("ID", tbl.get(7)); // KEY_ALIAS assertNull(tbl.get(8)); // VALUE_ALIAS assertEquals("java.lang.Long", tbl.get(9)); // KEY_TYPE_NAME assertNotNull(tbl.get(10)); // VALUE_TYPE_NAME execute(ignite0, "CREATE TABLE T2(ID LONG PRIMARY KEY, NAME VARCHAR)"); assertEquals(2, execute(ignite0, "SELECT * FROM SYS.TABLES").size()); execute(ignite0, "DROP TABLE T1"); execute(ignite0, "DROP TABLE T2"); assertTrue(execute(ignite0, "SELECT * FROM SYS.TABLES").isEmpty()); } /** */ @Test public void testTableColumns() throws Exception { assertTrue(execute(ignite0, "SELECT * FROM SYS.TABLE_COLUMNS").isEmpty()); execute(ignite0, "CREATE TABLE T1(ID LONG PRIMARY KEY, NAME VARCHAR(40))"); Set<?> actCols = execute(ignite0, "SELECT * FROM SYS.TABLE_COLUMNS") .stream() .map(l -> l.get(0)) .collect(Collectors.toSet()); assertEquals(new HashSet<>(asList("ID", "NAME", "_KEY", "_VAL")), actCols); execute(ignite0, "CREATE TABLE T2(ID LONG PRIMARY KEY, NAME VARCHAR(50))"); List<List<?>> expRes = asList( asList("ID", "T1", "PUBLIC", false, false, "null", true, true, -1, -1, Long.class.getName()), asList("NAME", "T1", "PUBLIC", false, false, "null", true, false, 40, -1, String.class.getName()), asList("_KEY", "T1", "PUBLIC", true, false, null, false, true, -1, -1, null), asList("_VAL", "T1", "PUBLIC", false, false, null, true, false, -1, -1, null), asList("ID", "T2", "PUBLIC", false, false, "null", true, true, -1, -1, Long.class.getName()), asList("NAME", "T2", "PUBLIC", false, false, "null", true, false, 50, -1, String.class.getName()), asList("_KEY", "T2", "PUBLIC", true, false, null, false, true, -1, -1, null), asList("_VAL", "T2", "PUBLIC", false, false, null, true, false, -1, -1, null) ); List<List<?>> res = execute(ignite0, "SELECT * FROM SYS.TABLE_COLUMNS ORDER BY TABLE_NAME, COLUMN_NAME"); assertEquals(expRes, res); execute(ignite0, "DROP TABLE T1"); execute(ignite0, "DROP TABLE T2"); assertTrue(execute(ignite0, "SELECT * FROM SYS.TABLE_COLUMNS").isEmpty()); } /** */ @Test public void testViewColumns() throws Exception { execute(ignite0, "SELECT * FROM SYS.VIEW_COLUMNS"); List<List<?>> expRes = asList( asList("CONNECTION_ID", "CLIENT_CONNECTIONS", SCHEMA_SYS, "null", true, 19L, 0, Long.class.getName()), asList("LOCAL_ADDRESS", "CLIENT_CONNECTIONS", SCHEMA_SYS, "null", true, (long)Integer.MAX_VALUE, 0, String.class.getName()), asList("REMOTE_ADDRESS", "CLIENT_CONNECTIONS", SCHEMA_SYS, "null", true, (long)Integer.MAX_VALUE, 0, String.class.getName()), asList("TYPE", "CLIENT_CONNECTIONS", SCHEMA_SYS, "null", true, (long)Integer.MAX_VALUE, 0, String.class.getName()), asList("USER", "CLIENT_CONNECTIONS", SCHEMA_SYS, "null", true, (long)Integer.MAX_VALUE, 0, String.class.getName()), asList("VERSION", "CLIENT_CONNECTIONS", SCHEMA_SYS, "null", true, (long)Integer.MAX_VALUE, 0, String.class.getName()) ); List<List<?>> res = execute(ignite0, "SELECT * FROM SYS.VIEW_COLUMNS WHERE VIEW_NAME = 'CLIENT_CONNECTIONS'"); assertEquals(expRes, res); } /** */ @Test public void testContinuousQuery() throws Exception { IgniteCache<Integer, Integer> cache = ignite0.createCache("cache-1"); assertTrue(execute(ignite0, "SELECT * FROM SYS.CONTINUOUS_QUERIES").isEmpty()); assertTrue(execute(ignite1, "SELECT * FROM SYS.CONTINUOUS_QUERIES").isEmpty()); try (QueryCursor qry = cache.query(new ContinuousQuery<>() .setInitialQuery(new ScanQuery<>()) .setPageSize(100) .setTimeInterval(1000) .setLocalListener(evts -> { // No-op. }) .setRemoteFilterFactory(() -> evt -> true) )) { for (int i = 0; i < 100; i++) cache.put(i, i); checkContinuouQueryView(ignite0, true); checkContinuouQueryView(ignite1, false); } assertTrue(execute(ignite0, "SELECT * FROM SYS.CONTINUOUS_QUERIES").isEmpty()); assertTrue(waitForCondition(() -> execute(ignite1, "SELECT * FROM SYS.CONTINUOUS_QUERIES").isEmpty(), getTestTimeout())); } /** */ private void checkContinuouQueryView(IgniteEx g, boolean loc) { List<List<?>> qrys = execute(g, "SELECT " + " CACHE_NAME, " + " BUFFER_SIZE, " + " INTERVAL, " + " NODE_ID, " + " LOCAL_LISTENER, " + " REMOTE_FILTER, " + " LOCAL_TRANSFORMED_LISTENER, " + " REMOTE_TRANSFORMER " + "FROM SYS.CONTINUOUS_QUERIES"); assertEquals(1, qrys.size()); List<?> cq = qrys.iterator().next(); assertEquals("cache-1", cq.get(0)); assertEquals(100, cq.get(1)); assertEquals(1000L, cq.get(2)); assertEquals(ignite0.localNode().id(), cq.get(3)); if (loc) assertTrue(cq.get(4).toString().startsWith(getClass().getName())); else assertNull(cq.get(4)); assertTrue(cq.get(5).toString().startsWith(getClass().getName())); assertNull(cq.get(6)); assertNull(cq.get(7)); } /** */ private static final String SCAN_QRY_SELECT = "SELECT " + " ORIGIN_NODE_ID," + " QUERY_ID," + " CACHE_NAME," + " CACHE_ID," + " CACHE_GROUP_ID," + " CACHE_GROUP_NAME," + " START_TIME," + " DURATION," + " CANCELED," + " FILTER," + " LOCAL," + " PARTITION," + " TOPOLOGY," + " TRANSFORMER," + " KEEP_BINARY," + " SUBJECT_ID," + " TASK_NAME, " + " PAGE_SIZE" + " FROM SYS.SCAN_QUERIES"; /** */ @Test public void testLocalScanQuery() throws Exception { IgniteCache<Integer, Integer> cache1 = ignite0.createCache( new CacheConfiguration<Integer, Integer>("cache1") .setGroupName("group1")); int part = ignite0.affinity("cache1").primaryPartitions(ignite0.localNode())[0]; List<Integer> partKeys = partitionKeys(cache1, part, 11, 0); for (Integer key : partKeys) cache1.put(key, key); assertEquals(0, execute(ignite0, SCAN_QRY_SELECT).size()); QueryCursor<Integer> qryRes1 = cache1.query( new ScanQuery<Integer, Integer>() .setFilter(new TestPredicate()) .setLocal(true) .setPartition(part) .setPageSize(10), new TestTransformer()); assertTrue(qryRes1.iterator().hasNext()); boolean res = waitForCondition(() -> !execute(ignite0, SCAN_QRY_SELECT).isEmpty(), 5_000); assertTrue(res); List<?> view = execute(ignite0, SCAN_QRY_SELECT).get(0); assertEquals(ignite0.localNode().id(), view.get(0)); assertEquals(0L, view.get(1)); assertEquals("cache1", view.get(2)); assertEquals(cacheId("cache1"), view.get(3)); assertEquals(cacheGroupId("cache1", "group1"), view.get(4)); assertEquals("group1", view.get(5)); assertTrue((Long)view.get(6) <= System.currentTimeMillis()); assertTrue((Long)view.get(7) >= 0); assertFalse((Boolean)view.get(8)); assertEquals(TEST_PREDICATE, view.get(9)); assertTrue((Boolean)view.get(10)); assertEquals(part, view.get(11)); assertEquals(toStringSafe(ignite0.context().discovery().topologyVersionEx()), view.get(12)); assertEquals(TEST_TRANSFORMER, view.get(13)); assertFalse((Boolean)view.get(14)); assertNull(view.get(15)); assertNull(view.get(16)); qryRes1.close(); res = waitForCondition(() -> execute(ignite0, SCAN_QRY_SELECT).isEmpty(), 5_000); assertTrue(res); } /** */ @Test public void testScanQuery() throws Exception { try (IgniteEx client1 = startClientGrid("client-1"); IgniteEx client2 = startClientGrid("client-2")) { IgniteCache<Integer, Integer> cache1 = client1.createCache( new CacheConfiguration<Integer, Integer>("cache1") .setGroupName("group1")); IgniteCache<Integer, Integer> cache2 = client2.createCache("cache2"); for (int i = 0; i < 100; i++) { cache1.put(i, i); cache2.put(i, i); } assertEquals(0, execute(ignite0, SCAN_QRY_SELECT).size()); assertEquals(0, execute(ignite1, SCAN_QRY_SELECT).size()); QueryCursor<Integer> qryRes1 = cache1.query( new ScanQuery<Integer, Integer>() .setFilter(new TestPredicate()) .setPageSize(10), new TestTransformer()); QueryCursor<?> qryRes2 = cache2.withKeepBinary().query(new ScanQuery<>() .setPageSize(20)); assertTrue(qryRes1.iterator().hasNext()); assertTrue(qryRes2.iterator().hasNext()); checkScanQueryView(client1, client2, ignite0); checkScanQueryView(client1, client2, ignite1); qryRes1.close(); qryRes2.close(); boolean res = waitForCondition( () -> execute(ignite0, SCAN_QRY_SELECT).size() + execute(ignite1, SCAN_QRY_SELECT).size() == 0, 5_000); assertTrue(res); } } /** */ private void checkScanQueryView(IgniteEx client1, IgniteEx client2, IgniteEx server) throws Exception { boolean res = waitForCondition(() -> execute(server, SCAN_QRY_SELECT).size() > 1, 5_000); assertTrue(res); Consumer<List<?>> cache1checker = view -> { assertEquals(client1.localNode().id(), view.get(0)); assertTrue((Long)view.get(1) != 0); assertEquals("cache1", view.get(2)); assertEquals(cacheId("cache1"), view.get(3)); assertEquals(cacheGroupId("cache1", "group1"), view.get(4)); assertEquals("group1", view.get(5)); assertTrue((Long)view.get(6) <= System.currentTimeMillis()); assertTrue((Long)view.get(7) >= 0); assertFalse((Boolean)view.get(8)); assertEquals(TEST_PREDICATE, view.get(9)); assertFalse((Boolean)view.get(10)); assertEquals(-1, view.get(11)); assertEquals(toStringSafe(client1.context().discovery().topologyVersionEx()), view.get(12)); assertEquals(TEST_TRANSFORMER, view.get(13)); assertFalse((Boolean)view.get(14)); assertNull(view.get(15)); assertNull(view.get(16)); assertEquals(10, view.get(17)); }; Consumer<List<?>> cache2checker = view -> { assertEquals(client2.localNode().id(), view.get(0)); assertTrue((Long)view.get(1) != 0); assertEquals("cache2", view.get(2)); assertEquals(cacheId("cache2"), view.get(3)); assertEquals(cacheGroupId("cache2", null), view.get(4)); assertEquals("cache2", view.get(5)); assertTrue((Long)view.get(6) <= System.currentTimeMillis()); assertTrue((Long)view.get(7) >= 0); assertFalse((Boolean)view.get(8)); assertNull(view.get(9)); assertFalse((Boolean)view.get(10)); assertEquals(-1, view.get(11)); assertEquals(toStringSafe(client2.context().discovery().topologyVersionEx()), view.get(12)); assertNull(view.get(13)); assertTrue((Boolean)view.get(14)); assertNull(view.get(15)); assertNull(view.get(16)); assertEquals(20, view.get(17)); }; boolean found1 = false; boolean found2 = false; for (List<?> view : execute(server, SCAN_QRY_SELECT)) { if ("cache2".equals(view.get(2))) { cache2checker.accept(view); found1 = true; } else { cache1checker.accept(view); found2 = true; } } assertTrue(found1 && found2); } /** */ @Test public void testStripedExecutor() throws Exception { checkStripeExecutorView(ignite0.context().pools().getStripedExecutorService(), "STRIPED_THREADPOOL_QUEUE", "sys"); } /** */ @Test public void testStreamerExecutor() throws Exception { checkStripeExecutorView(ignite0.context().pools().getDataStreamerExecutorService(), "DATASTREAM_THREADPOOL_QUEUE", "data-streamer"); } /** * Checks striped executor system view. * * @param execSvc Striped executor. * @param view System view name. * @param poolName Executor name. */ private void checkStripeExecutorView(StripedExecutor execSvc, String view, String poolName) throws Exception { CountDownLatch latch = new CountDownLatch(1); execSvc.execute(0, new TestRunnable(latch, 0)); execSvc.execute(0, new TestRunnable(latch, 1)); execSvc.execute(1, new TestRunnable(latch, 2)); execSvc.execute(1, new TestRunnable(latch, 3)); try { boolean res = waitForCondition(() -> execute(ignite0, "SELECT * FROM SYS." + view).size() == 2, 5_000); assertTrue(res); List<List<?>> stripedQueue = execute(ignite0, "SELECT * FROM SYS." + view); List<?> row0 = stripedQueue.get(0); assertEquals(0, row0.get(0)); assertEquals(TestRunnable.class.getSimpleName() + '1', row0.get(1)); assertEquals(poolName + "-stripe-0", row0.get(2)); assertEquals(TestRunnable.class.getName(), row0.get(3)); List<?> row1 = stripedQueue.get(1); assertEquals(1, row1.get(0)); assertEquals(TestRunnable.class.getSimpleName() + '3', row1.get(1)); assertEquals(poolName + "-stripe-1", row1.get(2)); assertEquals(TestRunnable.class.getName(), row1.get(3)); } finally { latch.countDown(); } } /** */ @Test public void testPagesList() throws Exception { String cacheName = "cacheFL"; IgniteCache<Integer, byte[]> cache = ignite0.getOrCreateCache(new CacheConfiguration<Integer, byte[]>() .setName(cacheName).setAffinity(new RendezvousAffinityFunction().setPartitions(1))); GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)ignite0.context().cache().context() .database(); int pageSize = dbMgr.pageSize(); try { dbMgr.enableCheckpoints(false).get(); int key = 0; // Fill up different free-list buckets. for (int j = 0; j < pageSize / 2; j++) cache.put(key++, new byte[j + 1]); // Put some pages to one bucket to overflow pages cache. for (int j = 0; j < 1000; j++) cache.put(key++, new byte[pageSize / 2]); // Test filtering by 3 columns. assertFalse(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE BUCKET_NUMBER = 0 " + "AND PARTITION_ID = 0 AND CACHE_GROUP_ID = ?", cacheId(cacheName)).isEmpty()); // Test filtering with invalid cache group id. assertTrue(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE CACHE_GROUP_ID = ?", -1) .isEmpty()); // Test filtering with invalid partition id. assertTrue(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE PARTITION_ID = ?", -1) .isEmpty()); // Test filtering with invalid bucket number. assertTrue(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE BUCKET_NUMBER = -1") .isEmpty()); assertFalse(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE BUCKET_SIZE > 0 " + "AND CACHE_GROUP_ID = ?", cacheId(cacheName)).isEmpty()); assertFalse(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE STRIPES_COUNT > 0 " + "AND CACHE_GROUP_ID = ?", cacheId(cacheName)).isEmpty()); assertFalse(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE CACHED_PAGES_COUNT > 0 " + "AND CACHE_GROUP_ID = ?", cacheId(cacheName)).isEmpty()); assertFalse(execute(ignite0, "SELECT * FROM SYS.DATA_REGION_PAGE_LISTS WHERE NAME LIKE 'in-memory%'") .isEmpty()); assertEquals(0L, execute(ignite0, "SELECT COUNT(*) FROM SYS.DATA_REGION_PAGE_LISTS " + "WHERE NAME LIKE 'in-memory%' AND BUCKET_SIZE > 0").get(0).get(0)); } finally { dbMgr.enableCheckpoints(true).get(); } ignite0.cluster().active(false); ignite0.cluster().active(true); IgniteCache<Integer, Integer> cacheInMemory = ignite0.getOrCreateCache(new CacheConfiguration<Integer, Integer>() .setName("cacheFLInMemory").setDataRegionName("in-memory")); cacheInMemory.put(0, 0); // After activation/deactivation new view for data region pages lists should be created, check that new view // correctly reflects changes in free-lists. assertFalse(execute(ignite0, "SELECT * FROM SYS.DATA_REGION_PAGE_LISTS WHERE NAME LIKE 'in-memory%' AND " + "BUCKET_SIZE > 0").isEmpty()); } /** */ @Test public void testPartitionStates() throws Exception { String nodeName0 = getTestIgniteInstanceName(0); String nodeName1 = getTestIgniteInstanceName(1); String nodeName2 = getTestIgniteInstanceName(2); IgniteCache<Integer, Integer> cache1 = ignite0.createCache(new CacheConfiguration<Integer, Integer>() .setName("cache1") .setCacheMode(CacheMode.PARTITIONED) .setAffinity(new TestAffinityFunction(new String[][] {{nodeName0, nodeName1}, {nodeName1, nodeName2}, {nodeName2, nodeName0}}))); IgniteCache<Integer, Integer> cache2 = ignite0.createCache(new CacheConfiguration<Integer, Integer>() .setName("cache2") .setCacheMode(CacheMode.PARTITIONED) .setAffinity(new TestAffinityFunction(new String[][] {{nodeName0, nodeName1, nodeName2}, {nodeName1}}))); for (int i = 0; i < 100; i++) { cache1.put(i, i); cache2.put(i, i); } try (IgniteEx ignite2 = startGrid(nodeName2)) { ignite2.rebalanceEnabled(false); ignite0.cluster().setBaselineTopology(ignite0.cluster().topologyVersion()); String partStateSql = "SELECT STATE FROM SYS.PARTITION_STATES WHERE CACHE_GROUP_ID = ? AND NODE_ID = ? " + "AND PARTITION_ID = ?"; UUID nodeId0 = ignite0.cluster().localNode().id(); UUID nodeId1 = ignite1.cluster().localNode().id(); UUID nodeId2 = ignite2.cluster().localNode().id(); Integer cacheGrpId1 = ignite0.cachex("cache1").context().groupId(); Integer cacheGrpId2 = ignite0.cachex("cache2").context().groupId(); String owningState = GridDhtPartitionState.OWNING.name(); String movingState = GridDhtPartitionState.MOVING.name(); for (Ignite ignite : Arrays.asList(ignite0, ignite1, ignite2)) { // Check partitions for cache1. assertEquals(owningState, execute(ignite, partStateSql, cacheGrpId1, nodeId0, 0).get(0).get(0)); assertEquals(owningState, execute(ignite, partStateSql, cacheGrpId1, nodeId1, 0).get(0).get(0)); assertEquals(owningState, execute(ignite, partStateSql, cacheGrpId1, nodeId1, 1).get(0).get(0)); assertEquals(movingState, execute(ignite, partStateSql, cacheGrpId1, nodeId2, 1).get(0).get(0)); assertEquals(owningState, execute(ignite, partStateSql, cacheGrpId1, nodeId0, 2).get(0).get(0)); assertEquals(movingState, execute(ignite, partStateSql, cacheGrpId1, nodeId2, 2).get(0).get(0)); // Check partitions for cache2. assertEquals(owningState, execute(ignite, partStateSql, cacheGrpId2, nodeId0, 0).get(0).get(0)); assertEquals(owningState, execute(ignite, partStateSql, cacheGrpId2, nodeId1, 0).get(0).get(0)); assertEquals(movingState, execute(ignite, partStateSql, cacheGrpId2, nodeId2, 0).get(0).get(0)); assertEquals(owningState, execute(ignite, partStateSql, cacheGrpId2, nodeId1, 1).get(0).get(0)); } // Check primary flag. String partPrimarySql = "SELECT IS_PRIMARY FROM SYS.PARTITION_STATES WHERE CACHE_GROUP_ID = ? " + "AND NODE_ID = ? AND PARTITION_ID = ?"; for (Ignite ignite : Arrays.asList(ignite0, ignite1, ignite2)) { // Check partitions for cache1. assertEquals(true, execute(ignite, partPrimarySql, cacheGrpId1, nodeId0, 0).get(0).get(0)); assertEquals(false, execute(ignite, partPrimarySql, cacheGrpId1, nodeId1, 0).get(0).get(0)); assertEquals(true, execute(ignite, partPrimarySql, cacheGrpId1, nodeId1, 1).get(0).get(0)); assertEquals(false, execute(ignite, partPrimarySql, cacheGrpId1, nodeId2, 1).get(0).get(0)); assertEquals(true, execute(ignite, partPrimarySql, cacheGrpId1, nodeId0, 2).get(0).get(0)); assertEquals(false, execute(ignite, partPrimarySql, cacheGrpId1, nodeId2, 2).get(0).get(0)); // Check partitions for cache2. assertEquals(true, execute(ignite, partPrimarySql, cacheGrpId2, nodeId0, 0).get(0).get(0)); assertEquals(false, execute(ignite, partPrimarySql, cacheGrpId2, nodeId1, 0).get(0).get(0)); assertEquals(false, execute(ignite, partPrimarySql, cacheGrpId2, nodeId2, 0).get(0).get(0)); assertEquals(true, execute(ignite, partPrimarySql, cacheGrpId2, nodeId1, 1).get(0).get(0)); } // Check joins with cache groups and nodes views. assertEquals(owningState, execute(ignite0, "SELECT p.STATE " + "FROM SYS.PARTITION_STATES p " + "JOIN SYS.CACHE_GROUPS g ON p.CACHE_GROUP_ID = g.CACHE_GROUP_ID " + "JOIN SYS.NODES n ON p.NODE_ID = n.NODE_ID " + "WHERE g.CACHE_GROUP_NAME = 'cache2' AND n.CONSISTENT_ID = ? AND p.PARTITION_ID = 1", nodeName1) .get(0).get(0)); // Check malformed or invalid values for indexed columns. assertEquals(0, execute(ignite0, "SELECT * FROM SYS.PARTITION_STATES WHERE PARTITION_ID = ?", Integer.MAX_VALUE).size()); assertEquals(0, execute(ignite0, "SELECT * FROM SYS.PARTITION_STATES WHERE PARTITION_ID = -1") .size()); assertEquals(0, execute(ignite0, "SELECT * FROM SYS.PARTITION_STATES WHERE NODE_ID = '123'") .size()); assertEquals(0, execute(ignite0, "SELECT * FROM SYS.PARTITION_STATES WHERE NODE_ID = ?", UUID.randomUUID()).size()); assertEquals(0, execute(ignite0, "SELECT * FROM SYS.PARTITION_STATES WHERE CACHE_GROUP_ID = 0") .size()); AffinityTopologyVersion topVer = ignite0.context().discovery().topologyVersionEx(); ignite2.rebalanceEnabled(true); // Wait until rebalance complete. assertTrue(GridTestUtils.waitForCondition(() -> ignite0.context().discovery().topologyVersionEx() .compareTo(topVer) > 0, 5_000L)); // Check that all partitions are in OWNING state now. String cntByStateSql = "SELECT COUNT(*) FROM SYS.PARTITION_STATES " + "WHERE CACHE_GROUP_ID IN (?, ?) AND STATE = ?"; for (Ignite ignite : Arrays.asList(ignite0, ignite1, ignite2)) { assertEquals(10L, execute(ignite, cntByStateSql, cacheGrpId1, cacheGrpId2, owningState).get(0).get(0)); assertEquals(0L, execute(ignite, cntByStateSql, cacheGrpId1, cacheGrpId2, movingState).get(0).get(0)); } // Check that assignment is now changed to ideal. for (Ignite ignite : Arrays.asList(ignite0, ignite1, ignite2)) { assertEquals(false, execute(ignite, partPrimarySql, cacheGrpId1, nodeId0, 2).get(0).get(0)); assertEquals(true, execute(ignite, partPrimarySql, cacheGrpId1, nodeId2, 2).get(0).get(0)); } } finally { ignite0.cluster().setBaselineTopology(ignite0.cluster().topologyVersion()); } } /** */ @Test public void testBinaryMeta() { IgniteCache<Integer, TestObjectAllTypes> c1 = ignite0.createCache("test-cache"); IgniteCache<Integer, TestObjectEnum> c2 = ignite0.createCache("test-enum-cache"); execute(ignite0, "CREATE TABLE T1(ID LONG PRIMARY KEY, NAME VARCHAR(40), ACCOUNT BIGINT)"); execute(ignite0, "INSERT INTO T1(ID, NAME, ACCOUNT) VALUES(1, 'test', 1)"); c1.put(1, new TestObjectAllTypes()); c2.put(1, TestObjectEnum.A); List<List<?>> view = execute(ignite0, "SELECT TYPE_NAME, FIELDS_COUNT, FIELDS, IS_ENUM FROM SYS.BINARY_METADATA"); assertNotNull(view); assertEquals(3, view.size()); for (List<?> meta : view) { if (TestObjectEnum.class.getName().contains( meta.get(0).toString())) { assertTrue((Boolean)meta.get(3)); assertEquals(0, meta.get(1)); } else if (TestObjectAllTypes.class.getName().contains(meta.get(0).toString())) { assertFalse((Boolean)meta.get(3)); Field[] fields = TestObjectAllTypes.class.getDeclaredFields(); assertEquals(fields.length, meta.get(1)); for (Field field : fields) assertTrue(meta.get(2).toString().contains(field.getName())); } else { assertFalse((Boolean)meta.get(3)); assertEquals(2, meta.get(1)); assertTrue(meta.get(2).toString().contains("NAME")); assertTrue(meta.get(2).toString().contains("ACCOUNT")); } } } /** */ @Test public void testMetastorage() throws Exception { IgniteCacheDatabaseSharedManager db = ignite0.context().cache().context().database(); SystemView<MetastorageView> metaStoreView = ignite0.context().systemView().view(METASTORE_VIEW); assertNotNull(metaStoreView); String name = "test-key"; String val = "test-value"; String unmarshalledName = "unmarshalled-key"; String unmarshalledVal = "[Raw data. 0 bytes]"; db.checkpointReadLock(); try { db.metaStorage().write(name, val); db.metaStorage().writeRaw(unmarshalledName, new byte[0]); } finally { db.checkpointReadUnlock(); } assertEquals(1, execute(ignite0, "SELECT * FROM SYS.METASTORAGE WHERE name = ? AND value = ?", name, val).size()); assertEquals(1, execute(ignite0, "SELECT * FROM SYS.METASTORAGE WHERE name = ? AND value = ?", unmarshalledName, unmarshalledVal).size()); } /** */ @Test public void testDistributedMetastorage() throws Exception { DistributedMetaStorage dms = ignite0.context().distributedMetastorage(); SystemView<MetastorageView> distributedMetaStoreView = ignite0.context().systemView().view(DISTRIBUTED_METASTORE_VIEW); assertNotNull(distributedMetaStoreView); String name = "test-distributed-key"; String val = "test-distributed-value"; dms.write(name, val); assertEquals(1, execute(ignite0, "SELECT * FROM SYS.DISTRIBUTED_METASTORAGE WHERE name = ? AND value = ?", name, val).size()); assertTrue(waitForCondition(() -> execute(ignite1, "SELECT * FROM SYS.DISTRIBUTED_METASTORAGE WHERE name = ? AND value = ?", name, val).size() == 1, getTestTimeout())); } /** */ @Test public void testSnapshot() throws Exception { String snap0 = "testSnapshot0"; String snap1 = "testSnapshot1"; int nodesCnt = G.allGrids().size(); assertEquals(0, execute(ignite0, "SELECT * FROM SYS.SNAPSHOT").size()); ignite0.snapshot().createSnapshot(snap0).get(); assertEquals(nodesCnt, execute(ignite0, "SELECT * FROM SYS.SNAPSHOT").size()); ignite0.createCache(DEFAULT_CACHE_NAME).put("key", "val"); ignite0.snapshot().createSnapshot(snap1).get(); assertEquals(nodesCnt * 2, execute(ignite0, "SELECT * FROM SYS.SNAPSHOT").size()); assertEquals(nodesCnt, execute(ignite0, "SELECT * FROM SYS.SNAPSHOT where name = ?", snap0).size()); assertEquals(nodesCnt, execute(ignite0, "SELECT * FROM SYS.SNAPSHOT WHERE cache_groups LIKE '%" + DEFAULT_CACHE_NAME + "%'").size()); } /** * Execute query on given node. * * @param node Node. * @param sql Statement. */ private List<List<?>> execute(Ignite node, String sql, Object... args) { SqlFieldsQuery qry = new SqlFieldsQuery(sql) .setArgs(args) .setSchema("PUBLIC"); return queryProcessor(node).querySqlFields(qry, true).getAll(); } /** * Affinity function with fixed partition allocation. */ public static class TestAffinityFunction implements AffinityFunction { /** Partitions to nodes map. */ private final String[][] partMap; /** * @param partMap Parition allocation map, contains nodes consistent ids for each partition. */ public TestAffinityFunction(String[][] partMap) { this.partMap = partMap; } /** {@inheritDoc} */ @Override public void reset() { // No-op. } /** {@inheritDoc} */ @Override public int partitions() { return partMap.length; } /** {@inheritDoc} */ @Override public int partition(Object key) { return key.hashCode() % partitions(); } /** {@inheritDoc} */ @Override public List<List<ClusterNode>> assignPartitions(AffinityFunctionContext affCtx) { List<List<ClusterNode>> parts = new ArrayList<>(partMap.length); for (String[] nodes : partMap) { List<ClusterNode> nodesList = new ArrayList<>(); for (String nodeConsistentId: nodes) { ClusterNode affNode = F.find(affCtx.currentTopologySnapshot(), null, (IgnitePredicate<ClusterNode>)node -> node.consistentId().equals(nodeConsistentId)); if (affNode != null) nodesList.add(affNode); } parts.add(nodesList); } return parts; } /** {@inheritDoc} */ @Override public void removeNode(UUID nodeId) { // No-op. } } }
/* * The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 * (the "License"). You may not use this work except in compliance with the License, which is * available at www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied, as more fully set forth in the License. * * See the NOTICE file distributed with this work for information regarding copyright ownership. */ package alluxio.client.fs; import static org.junit.Assert.assertFalse; import alluxio.AlluxioURI; import alluxio.Configuration; import alluxio.Constants; import alluxio.PropertyKey; import alluxio.client.WriteType; import alluxio.client.file.FileOutStream; import alluxio.client.file.FileSystem; import alluxio.client.file.FileSystemTestUtils; import alluxio.client.file.URIStatus; import alluxio.client.file.options.CreateDirectoryOptions; import alluxio.client.file.options.CreateFileOptions; import alluxio.client.file.options.DeleteOptions; import alluxio.exception.AlluxioException; import alluxio.exception.DirectoryNotEmptyException; import alluxio.exception.FileAlreadyExistsException; import alluxio.exception.FileDoesNotExistException; import alluxio.exception.InvalidPathException; import alluxio.master.LocalAlluxioCluster; import alluxio.testutils.BaseIntegrationTest; import alluxio.testutils.LocalAlluxioClusterResource; import alluxio.underfs.UnderFileSystem; import alluxio.util.CommonUtils; import alluxio.util.UnderFileSystemUtils; import alluxio.util.WaitForOptions; import alluxio.util.io.PathUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import java.io.IOException; /** * Integration tests for Alluxio Client (reuse the {@link LocalAlluxioCluster}). */ public final class FileSystemIntegrationTest extends BaseIntegrationTest { private static final byte[] TEST_BYTES = "TestBytes".getBytes(); private static final int USER_QUOTA_UNIT_BYTES = 1000; @Rule public LocalAlluxioClusterResource mLocalAlluxioClusterResource = new LocalAlluxioClusterResource.Builder() .setProperty(PropertyKey.USER_FILE_BUFFER_BYTES, USER_QUOTA_UNIT_BYTES) .build(); private FileSystem mFileSystem = null; private CreateFileOptions mWriteBoth; private UnderFileSystem mUfs; @Rule public ExpectedException mThrown = ExpectedException.none(); @Before public void before() throws Exception { mFileSystem = mLocalAlluxioClusterResource.get().getClient(); mWriteBoth = CreateFileOptions.defaults().setWriteType(WriteType.CACHE_THROUGH); mUfs = UnderFileSystem.Factory.createForRoot(); } @Test public void getRoot() throws Exception { Assert.assertEquals(0, mFileSystem.getStatus(new AlluxioURI("/")).getFileId()); } @Test public void createFile() throws Exception { String uniqPath = PathUtils.uniqPath(); for (int k = 1; k < 5; k++) { AlluxioURI uri = new AlluxioURI(uniqPath + k); mFileSystem.createFile(uri, mWriteBoth).close(); Assert.assertNotNull(mFileSystem.getStatus(uri)); } } @Test public void deleteFile() throws Exception { String uniqPath = PathUtils.uniqPath(); for (int k = 0; k < 5; k++) { AlluxioURI fileURI = new AlluxioURI(uniqPath + k); FileSystemTestUtils.createByteFile(mFileSystem, fileURI.getPath(), k, mWriteBoth); Assert.assertTrue(mFileSystem.getStatus(fileURI).getInAlluxioPercentage() == 100); Assert.assertNotNull(mFileSystem.getStatus(fileURI)); } for (int k = 0; k < 5; k++) { AlluxioURI fileURI = new AlluxioURI(uniqPath + k); mFileSystem.delete(fileURI); Assert.assertFalse(mFileSystem.exists(fileURI)); mThrown.expect(FileDoesNotExistException.class); mFileSystem.getStatus(fileURI); } } /** * Tests if a directory with in-progress writes can be deleted recursively. */ @Test public void deleteDirectoryWithPersistedWritesInProgress() throws Exception { final AlluxioURI testFolder = new AlluxioURI("/testFolder"); mFileSystem.createDirectory(testFolder, CreateDirectoryOptions.defaults().setWriteType(WriteType.CACHE_THROUGH)); FileOutStream out = mFileSystem.createFile(new AlluxioURI("/testFolder/testFile"), CreateFileOptions.defaults() .setWriteType(WriteType.CACHE_THROUGH)); out.write(TEST_BYTES); out.flush(); // Need to wait for the file to be flushed, see ALLUXIO-2899 CommonUtils.waitFor("File flush.", () -> { try { return mUfs.listStatus(mFileSystem.getStatus(testFolder).getUfsPath()).length > 0; } catch (Exception e) { throw new RuntimeException(e); } }, WaitForOptions.defaults().setTimeoutMs(5 * Constants.SECOND_MS)); mFileSystem.delete(new AlluxioURI("/testFolder"), DeleteOptions.defaults().setRecursive(true)); Assert.assertFalse(mFileSystem.exists(new AlluxioURI("/testFolder"))); mThrown.expect(IOException.class); out.close(); } @Test public void getFileStatus() throws Exception { String uniqPath = PathUtils.uniqPath(); int writeBytes = USER_QUOTA_UNIT_BYTES * 2; AlluxioURI uri = new AlluxioURI(uniqPath); FileSystemTestUtils.createByteFile(mFileSystem, uri.getPath(), writeBytes, mWriteBoth); Assert.assertTrue(mFileSystem.getStatus(uri).getInAlluxioPercentage() == 100); Assert.assertTrue(mFileSystem.getStatus(uri).getPath().equals(uniqPath)); } @Test public void renameFileTest1() throws Exception { String uniqPath = PathUtils.uniqPath(); AlluxioURI path1 = new AlluxioURI(uniqPath + 1); mFileSystem.createFile(path1, mWriteBoth).close(); for (int k = 1; k < 10; k++) { AlluxioURI fileA = new AlluxioURI(uniqPath + k); AlluxioURI fileB = new AlluxioURI(uniqPath + (k + 1)); URIStatus existingFile = mFileSystem.getStatus(fileA); long oldFileId = existingFile.getFileId(); Assert.assertNotNull(existingFile); mFileSystem.rename(fileA, fileB); URIStatus renamedFile = mFileSystem.getStatus(fileB); Assert.assertNotNull(renamedFile); Assert.assertEquals(oldFileId, renamedFile.getFileId()); } } @Test public void renameFileTest2() throws Exception { AlluxioURI uniqUri = new AlluxioURI(PathUtils.uniqPath()); mFileSystem.createFile(uniqUri, mWriteBoth).close(); URIStatus f = mFileSystem.getStatus(uniqUri); long oldFileId = f.getFileId(); mFileSystem.rename(uniqUri, uniqUri); Assert.assertEquals(oldFileId, mFileSystem.getStatus(uniqUri).getFileId()); } /** * Creates another directory on the local filesystem, alongside the existing Ufs, to be used as a * second Ufs. * * @return the path of the alternate Ufs directory */ private String createAlternateUfs() throws Exception { AlluxioURI parentURI = new AlluxioURI(Configuration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS)).getParent(); String alternateUfsRoot = parentURI.join("alternateUnderFSStorage").toString(); UnderFileSystemUtils.mkdirIfNotExists(mUfs, alternateUfsRoot); return alternateUfsRoot; } /** * Deletes the alternate under file system directory. * * @param alternateUfsRoot the root of the alternate Ufs */ private void destroyAlternateUfs(String alternateUfsRoot) throws Exception { UnderFileSystemUtils.deleteDirIfExists(mUfs, alternateUfsRoot); } @Test public void mountAlternateUfs() throws Exception { String alternateUfsRoot = createAlternateUfs(); try { String filePath = PathUtils.concatPath(alternateUfsRoot, "file1"); UnderFileSystemUtils.touch(mUfs, filePath); mFileSystem.mount(new AlluxioURI("/d1"), new AlluxioURI(alternateUfsRoot)); mFileSystem.loadMetadata(new AlluxioURI("/d1/file1")); Assert.assertEquals("file1", mFileSystem.listStatus(new AlluxioURI("/d1")).get(0).getName()); } finally { destroyAlternateUfs(alternateUfsRoot); } } @Test public void mountAlternateUfsSubdirs() throws Exception { String alternateUfsRoot = createAlternateUfs(); try { String dirPath1 = PathUtils.concatPath(alternateUfsRoot, "dir1"); String dirPath2 = PathUtils.concatPath(alternateUfsRoot, "dir2"); UnderFileSystemUtils.mkdirIfNotExists(mUfs, dirPath1); UnderFileSystemUtils.mkdirIfNotExists(mUfs, dirPath2); String filePath1 = PathUtils.concatPath(dirPath1, "file1"); String filePath2 = PathUtils.concatPath(dirPath2, "file2"); UnderFileSystemUtils.touch(mUfs, filePath1); UnderFileSystemUtils.touch(mUfs, filePath2); mFileSystem.mount(new AlluxioURI("/d1"), new AlluxioURI(dirPath1)); mFileSystem.mount(new AlluxioURI("/d2"), new AlluxioURI(dirPath2)); mFileSystem.loadMetadata(new AlluxioURI("/d1/file1")); mFileSystem.loadMetadata(new AlluxioURI("/d2/file2")); Assert.assertEquals("file1", mFileSystem.listStatus(new AlluxioURI("/d1")).get(0).getName()); Assert.assertEquals("file2", mFileSystem.listStatus(new AlluxioURI("/d2")).get(0).getName()); } finally { destroyAlternateUfs(alternateUfsRoot); } } @Test public void mountPrefixUfs() throws Exception { // Primary UFS cannot be re-mounted String ufsRoot = Configuration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS); String ufsSubdir = PathUtils.concatPath(ufsRoot, "dir1"); UnderFileSystemUtils.mkdirIfNotExists(mUfs, ufsSubdir); try { mFileSystem.mount(new AlluxioURI("/dir"), new AlluxioURI(ufsSubdir)); Assert.fail("Cannot remount primary ufs."); } catch (AlluxioException e) { // Exception expected } String alternateUfsRoot = createAlternateUfs(); try { String midDirPath = PathUtils.concatPath(alternateUfsRoot, "mid"); String innerDirPath = PathUtils.concatPath(midDirPath, "inner"); UnderFileSystemUtils.mkdirIfNotExists(mUfs, innerDirPath); mFileSystem.mount(new AlluxioURI("/mid"), new AlluxioURI(midDirPath)); // Cannot mount suffix of already-mounted directory try { mFileSystem.mount(new AlluxioURI("/inner"), new AlluxioURI(innerDirPath)); Assert.fail("Cannot mount suffix of already-mounted directory"); } catch (AlluxioException e) { // Exception expected, continue } // Cannot mount prefix of already-mounted directory try { mFileSystem.mount(new AlluxioURI("/root"), new AlluxioURI(alternateUfsRoot)); Assert.fail("Cannot mount prefix of already-mounted directory"); } catch (AlluxioException e) { // Exception expected, continue } } finally { destroyAlternateUfs(alternateUfsRoot); } } @Test public void mountShadowUfs() throws Exception { String ufsRoot = Configuration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS); String ufsSubdir = PathUtils.concatPath(ufsRoot, "dir1"); UnderFileSystemUtils.mkdirIfNotExists(mUfs, ufsSubdir); String alternateUfsRoot = createAlternateUfs(); try { String subdirPath = PathUtils.concatPath(alternateUfsRoot, "subdir"); UnderFileSystemUtils.mkdirIfNotExists(mUfs, subdirPath); // Cannot mount to path that shadows a file in the primary UFS mFileSystem.mount(new AlluxioURI("/dir1"), new AlluxioURI(subdirPath)); Assert.fail("Cannot mount to path that shadows a file in the primary UFS"); } catch (AlluxioException e) { // Exception expected, continue } finally { destroyAlternateUfs(alternateUfsRoot); } } // Test exception cases for all FileSystem RPCs @Test public void createExistingDirectory() throws Exception { AlluxioURI path = new AlluxioURI("/dir"); mFileSystem.createDirectory(path); mThrown.expect(FileAlreadyExistsException.class); mFileSystem.createDirectory(path); } @Test public void createDirectoryOnTopOfFile() throws Exception { AlluxioURI path = new AlluxioURI("/dir"); FileSystemTestUtils.createByteFile(mFileSystem, path, CreateFileOptions.defaults(), 10); mThrown.expect(FileAlreadyExistsException.class); mFileSystem.createDirectory(path); } @Test public void createDirectoryInvalidPath() throws Exception { mThrown.expect(InvalidPathException.class); mFileSystem.createDirectory(new AlluxioURI("not a path")); } @Test public void createExistingFile() throws Exception { AlluxioURI path = new AlluxioURI("/file"); mFileSystem.createFile(path).close(); mThrown.expect(FileAlreadyExistsException.class); mFileSystem.createFile(path); } @Test public void createFileInvalidPath() throws Exception { mThrown.expect(InvalidPathException.class); mFileSystem.createFile(new AlluxioURI("not a path")); } @Test public void deleteNonexistingPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.delete(new AlluxioURI("/dir")); } @Test public void deleteNonexistingNestedPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.delete(new AlluxioURI("/dir/dir")); } @Test public void deleteNonemptyDirectory() throws Exception { AlluxioURI dir = new AlluxioURI("/dir"); mFileSystem.createDirectory(dir); mFileSystem.createFile(new AlluxioURI(PathUtils.concatPath(dir, "file"))).close(); mThrown.expect(DirectoryNotEmptyException.class); mFileSystem.delete(dir, DeleteOptions.defaults().setRecursive(false)); } @Test public void existsNonexistingPath() throws Exception { AlluxioURI path = new AlluxioURI("/path"); assertFalse(mFileSystem.exists(path)); } @Test public void existsNonexistingNestedPath() throws Exception { AlluxioURI path = new AlluxioURI("/dir/path"); assertFalse(mFileSystem.exists(path)); } @Test public void freeNonexistingPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.free(new AlluxioURI("/path")); } @Test public void freeNonexistingNestedPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.free(new AlluxioURI("/dir/path")); } @Test public void getStatusNonexistingPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.getStatus(new AlluxioURI("/path")); } @Test public void getStatusNonexistingNestedPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.getStatus(new AlluxioURI("/dir/path")); } @Test public void listStatusNonexistingPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.listStatus(new AlluxioURI("/path")); } @Test public void loadMetadataNonexistingPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.loadMetadata(new AlluxioURI("/path")); } @Test public void openFileNonexistingPath() throws Exception { AlluxioURI path = new AlluxioURI("/path"); mThrown.expect(FileDoesNotExistException.class); mFileSystem.openFile(path); } @Test public void renameNonexistingPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.rename(new AlluxioURI("/path1"), new AlluxioURI("/path1")); } @Test public void setAttributeNonexistingPath() throws Exception { mThrown.expect(FileDoesNotExistException.class); mFileSystem.setAttribute(new AlluxioURI("/path")); } }
package publichealthcomplaint.complaintmgr.impl; import java.util.Calendar; import java.util.GregorianCalendar; import publichealthcomplaint.datatypes.IScheduleDt; import publichealthcomplaint.exceptionhandling.impl.InvalidDateException; class Schedule implements IScheduleDt{ private int segundo; private int hora; private int minuto; /** * Representa o formato hh:mi:ss */ public static final int FORMATO1 = 1; /** * Representa o formato hhmiss */ public static final int FORMATO2 = 2; public Schedule() { GregorianCalendar calendar = new GregorianCalendar(); hora = calendar.get(Calendar.HOUR_OF_DAY); minuto = calendar.get(Calendar.MINUTE); segundo = calendar.get(Calendar.SECOND); } public Schedule(int segundo, int minuto, int hora) throws InvalidDateException { this.segundo = segundo; this.minuto = minuto; this.hora = hora; validaHorario(segundo, minuto, hora); } public Schedule(String segundoStr, String minutoStr, String horaStr) throws InvalidDateException { try { this.segundo = Integer.parseInt(segundoStr); this.minuto = Integer.parseInt(minutoStr); this.hora = Integer.parseInt(horaStr); validaHorario(segundo, minuto, hora); } catch (NumberFormatException ne) { throw new InvalidDateException(segundo, minuto, hora); } } public int compara(IScheduleDt horario) { int retorno = 0; if (hora > horario.getHora()) { retorno = 1; } else if (hora < horario.getHora()) { retorno = -1; } else { if (minuto > horario.getMinuto()) { retorno = 1; } else if (minuto < horario.getMinuto()) { retorno = -1; } else { if (segundo > horario.getSegundo()) { retorno = 1; } else if (segundo < horario.getSegundo()) { retorno = -1; } } } return retorno; } /** * Retorna a representacao string da hora que foi recebida * como parametro no formato representado pela constante formato. */ public String format(int formato) { return format(this, formato); } /** * Retorna a representacao string da hora que foi recebida * como parametro no formato representado pela constante formato. */ public String format(IScheduleDt horario, int formato) { String segundoStr = "", minutoStr = "", horaStr = ""; String texto = null; try { segundoStr = String.valueOf(horario.getSegundo()); minutoStr = String.valueOf(horario.getMinuto()); horaStr = String.valueOf(horario.getHora()); if (segundoStr.length() < 2) { segundoStr = "0" + segundoStr; } if (minutoStr.length() < 2) { minutoStr = "0" + minutoStr; } if (horaStr.length() < 2) { horaStr = "0" + horaStr; } } catch (NumberFormatException nb) { } switch (formato) { case (FORMATO1): texto = horaStr + ":" + minutoStr + ":" + segundoStr; break; case (FORMATO2): texto = horaStr + minutoStr + segundoStr; break; default: texto = null; break; } return texto; } /** * Definicao do Metodo * * * @return * * @see */ public int getHora() { return hora; } /** * Definicao do Metodo * * * @return * * @see */ public int getMinuto() { return minuto; } /** * Definicao do Metodo * * * @return * * @see */ public int getSegundo() { return segundo; } /** * Transforma string em data. * recebe como parametro o String e o separador utilizado. */ public IScheduleDt stringToHorario(String horarioStr, int formato) throws InvalidDateException { String segundoStr = null, minutoStr = null, horaStr = null; IScheduleDt horario = null; try { switch (formato) { case (Schedule.FORMATO1): horaStr = horarioStr.substring(0, 2); minutoStr = horarioStr.substring(3, 5); segundoStr = horarioStr.substring(6, 8); break; case (Schedule.FORMATO2): segundoStr = horarioStr.substring(0, 2); minutoStr = horarioStr.substring(2, 4); horaStr = horarioStr.substring(4, 6); break; default: horario = null; break; } horario = new Schedule(segundoStr, minutoStr, horaStr); } catch (Exception nb) { throw new InvalidDateException(horarioStr); } return horario; } /** * Valida uma data (dia, mes e ano), caso algum dos valores seja * invalido lanca a excecao DataInvalidaException */ private void validaHorario(int segundo, int minuto, int hora) throws InvalidDateException { if (!((segundo >= 0) && (segundo <= 59) && (minuto >= 0) && (minuto <= 59) && (hora >= 0) && (hora <= 23))) { throw new InvalidDateException(segundo, minuto, hora); } } }
/* * Copyright (C) 2015 Square, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package okhttp3; import java.net.URI; import java.net.URL; import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; import okhttp3.internal.Util; import okio.Buffer; import okio.ByteString; import static org.junit.Assert.fail; /** Tests how each code point is encoded and decoded in the context of each URL component. */ class UrlComponentEncodingTester { private static final int UNICODE_2 = 0x07ff; // Arbitrary code point that's 2 bytes in UTF-8. private static final int UNICODE_3 = 0xffff; // Arbitrary code point that's 3 bytes in UTF-8. private static final int UNICODE_4 = 0x10ffff; // Arbitrary code point that's 4 bytes in UTF-8. /** * The default encode set for the ASCII range. The specific rules vary per-component: for example, * '?' may be identity-encoded in a fragment, but must be percent-encoded in a path. * * See https://url.spec.whatwg.org/#percent-encoded-bytes */ private static final Map<Integer, Encoding> defaultEncodings; static { Map<Integer, Encoding> map = new LinkedHashMap<>(); map.put( 0x0, Encoding.PERCENT); // Null character map.put( 0x1, Encoding.PERCENT); // Start of Header map.put( 0x2, Encoding.PERCENT); // Start of Text map.put( 0x3, Encoding.PERCENT); // End of Text map.put( 0x4, Encoding.PERCENT); // End of Transmission map.put( 0x5, Encoding.PERCENT); // Enquiry map.put( 0x6, Encoding.PERCENT); // Acknowledgment map.put( 0x7, Encoding.PERCENT); // Bell map.put((int) '\b', Encoding.PERCENT); // Backspace map.put((int) '\t', Encoding.SKIP); // Horizontal Tab map.put((int) '\n', Encoding.SKIP); // Line feed map.put( 0xb, Encoding.PERCENT); // Vertical Tab map.put((int) '\f', Encoding.SKIP); // Form feed map.put((int) '\r', Encoding.SKIP); // Carriage return map.put( 0xe, Encoding.PERCENT); // Shift Out map.put( 0xf, Encoding.PERCENT); // Shift In map.put( 0x10, Encoding.PERCENT); // Data Link Escape map.put( 0x11, Encoding.PERCENT); // Device Control 1 (oft. XON) map.put( 0x12, Encoding.PERCENT); // Device Control 2 map.put( 0x13, Encoding.PERCENT); // Device Control 3 (oft. XOFF) map.put( 0x14, Encoding.PERCENT); // Device Control 4 map.put( 0x15, Encoding.PERCENT); // Negative Acknowledgment map.put( 0x16, Encoding.PERCENT); // Synchronous idle map.put( 0x17, Encoding.PERCENT); // End of Transmission Block map.put( 0x18, Encoding.PERCENT); // Cancel map.put( 0x19, Encoding.PERCENT); // End of Medium map.put( 0x1a, Encoding.PERCENT); // Substitute map.put( 0x1b, Encoding.PERCENT); // Escape map.put( 0x1c, Encoding.PERCENT); // File Separator map.put( 0x1d, Encoding.PERCENT); // Group Separator map.put( 0x1e, Encoding.PERCENT); // Record Separator map.put( 0x1f, Encoding.PERCENT); // Unit Separator map.put((int) ' ', Encoding.PERCENT); map.put((int) '!', Encoding.IDENTITY); map.put((int) '"', Encoding.PERCENT); map.put((int) '#', Encoding.PERCENT); map.put((int) '$', Encoding.IDENTITY); map.put((int) '%', Encoding.IDENTITY); map.put((int) '&', Encoding.IDENTITY); map.put((int) '\'', Encoding.IDENTITY); map.put((int) '(', Encoding.IDENTITY); map.put((int) ')', Encoding.IDENTITY); map.put((int) '*', Encoding.IDENTITY); map.put((int) '+', Encoding.IDENTITY); map.put((int) ',', Encoding.IDENTITY); map.put((int) '-', Encoding.IDENTITY); map.put((int) '.', Encoding.IDENTITY); map.put((int) '/', Encoding.IDENTITY); map.put((int) '0', Encoding.IDENTITY); map.put((int) '1', Encoding.IDENTITY); map.put((int) '2', Encoding.IDENTITY); map.put((int) '3', Encoding.IDENTITY); map.put((int) '4', Encoding.IDENTITY); map.put((int) '5', Encoding.IDENTITY); map.put((int) '6', Encoding.IDENTITY); map.put((int) '7', Encoding.IDENTITY); map.put((int) '8', Encoding.IDENTITY); map.put((int) '9', Encoding.IDENTITY); map.put((int) ':', Encoding.IDENTITY); map.put((int) ';', Encoding.IDENTITY); map.put((int) '<', Encoding.PERCENT); map.put((int) '=', Encoding.IDENTITY); map.put((int) '>', Encoding.PERCENT); map.put((int) '?', Encoding.PERCENT); map.put((int) '@', Encoding.IDENTITY); map.put((int) 'A', Encoding.IDENTITY); map.put((int) 'B', Encoding.IDENTITY); map.put((int) 'C', Encoding.IDENTITY); map.put((int) 'D', Encoding.IDENTITY); map.put((int) 'E', Encoding.IDENTITY); map.put((int) 'F', Encoding.IDENTITY); map.put((int) 'G', Encoding.IDENTITY); map.put((int) 'H', Encoding.IDENTITY); map.put((int) 'I', Encoding.IDENTITY); map.put((int) 'J', Encoding.IDENTITY); map.put((int) 'K', Encoding.IDENTITY); map.put((int) 'L', Encoding.IDENTITY); map.put((int) 'M', Encoding.IDENTITY); map.put((int) 'N', Encoding.IDENTITY); map.put((int) 'O', Encoding.IDENTITY); map.put((int) 'P', Encoding.IDENTITY); map.put((int) 'Q', Encoding.IDENTITY); map.put((int) 'R', Encoding.IDENTITY); map.put((int) 'S', Encoding.IDENTITY); map.put((int) 'T', Encoding.IDENTITY); map.put((int) 'U', Encoding.IDENTITY); map.put((int) 'V', Encoding.IDENTITY); map.put((int) 'W', Encoding.IDENTITY); map.put((int) 'X', Encoding.IDENTITY); map.put((int) 'Y', Encoding.IDENTITY); map.put((int) 'Z', Encoding.IDENTITY); map.put((int) '[', Encoding.IDENTITY); map.put((int) '\\', Encoding.IDENTITY); map.put((int) ']', Encoding.IDENTITY); map.put((int) '^', Encoding.IDENTITY); map.put((int) '_', Encoding.IDENTITY); map.put((int) '`', Encoding.PERCENT); map.put((int) 'a', Encoding.IDENTITY); map.put((int) 'b', Encoding.IDENTITY); map.put((int) 'c', Encoding.IDENTITY); map.put((int) 'd', Encoding.IDENTITY); map.put((int) 'e', Encoding.IDENTITY); map.put((int) 'f', Encoding.IDENTITY); map.put((int) 'g', Encoding.IDENTITY); map.put((int) 'h', Encoding.IDENTITY); map.put((int) 'i', Encoding.IDENTITY); map.put((int) 'j', Encoding.IDENTITY); map.put((int) 'k', Encoding.IDENTITY); map.put((int) 'l', Encoding.IDENTITY); map.put((int) 'm', Encoding.IDENTITY); map.put((int) 'n', Encoding.IDENTITY); map.put((int) 'o', Encoding.IDENTITY); map.put((int) 'p', Encoding.IDENTITY); map.put((int) 'q', Encoding.IDENTITY); map.put((int) 'r', Encoding.IDENTITY); map.put((int) 's', Encoding.IDENTITY); map.put((int) 't', Encoding.IDENTITY); map.put((int) 'u', Encoding.IDENTITY); map.put((int) 'v', Encoding.IDENTITY); map.put((int) 'w', Encoding.IDENTITY); map.put((int) 'x', Encoding.IDENTITY); map.put((int) 'y', Encoding.IDENTITY); map.put((int) 'z', Encoding.IDENTITY); map.put((int) '{', Encoding.IDENTITY); map.put((int) '|', Encoding.IDENTITY); map.put((int) '}', Encoding.IDENTITY); map.put((int) '~', Encoding.IDENTITY); map.put( 0x7f, Encoding.PERCENT); // Delete map.put( UNICODE_2, Encoding.PERCENT); map.put( UNICODE_3, Encoding.PERCENT); map.put( UNICODE_4, Encoding.PERCENT); defaultEncodings = Collections.unmodifiableMap(map); } private final Map<Integer, Encoding> encodings; private final StringBuilder uriEscapedCodePoints = new StringBuilder(); public UrlComponentEncodingTester() { this.encodings = new LinkedHashMap<>(defaultEncodings); } public UrlComponentEncodingTester override(Encoding encoding, int... codePoints) { for (int codePoint : codePoints) { encodings.put(codePoint, encoding); } return this; } public UrlComponentEncodingTester identityForNonAscii() { encodings.put(UNICODE_2, Encoding.IDENTITY); encodings.put(UNICODE_3, Encoding.IDENTITY); encodings.put(UNICODE_4, Encoding.IDENTITY); return this; } /** * Configure a character to be skipped but only for conversion to and from {@code java.net.URI}. * That class is more strict than the others. */ public UrlComponentEncodingTester skipForUri(int... codePoints) { uriEscapedCodePoints.append(new String(codePoints, 0, codePoints.length)); return this; } public UrlComponentEncodingTester test(Component component) { for (Map.Entry<Integer, Encoding> entry : encodings.entrySet()) { Encoding encoding = entry.getValue(); int codePoint = entry.getKey(); testEncodeAndDecode(codePoint, component); if (encoding == Encoding.SKIP) continue; testParseOriginal(codePoint, encoding, component); testParseAlreadyEncoded(codePoint, encoding, component); testToUrl(codePoint, encoding, component); testFromUrl(codePoint, encoding, component); if (codePoint != '%') { boolean uriEscaped = uriEscapedCodePoints.indexOf( Encoding.IDENTITY.encode(codePoint)) != -1; testUri(codePoint, encoding, component, uriEscaped); } } return this; } private void testParseAlreadyEncoded(int codePoint, Encoding encoding, Component component) { String encoded = encoding.encode(codePoint); String urlString = component.urlString(encoded); HttpUrl url = HttpUrl.parse(urlString); if (!component.encodedValue(url).equals(encoded)) { fail(Util.format("Encoding %s %#x using %s", component, codePoint, encoding)); } } private void testEncodeAndDecode(int codePoint, Component component) { String expected = Encoding.IDENTITY.encode(codePoint); HttpUrl.Builder builder = HttpUrl.parse("http://host/").newBuilder(); component.set(builder, expected); HttpUrl url = builder.build(); String actual = component.get(url); if (!expected.equals(actual)) { fail(Util.format("Roundtrip %s %#x %s", component, codePoint, url)); } } private void testParseOriginal(int codePoint, Encoding encoding, Component component) { String encoded = encoding.encode(codePoint); if (encoding != Encoding.PERCENT) return; String identity = Encoding.IDENTITY.encode(codePoint); String urlString = component.urlString(identity); HttpUrl url = HttpUrl.parse(urlString); String s = component.encodedValue(url); if (!s.equals(encoded)) { fail(Util.format("Encoding %s %#02x using %s", component, codePoint, encoding)); } } private void testToUrl(int codePoint, Encoding encoding, Component component) { String encoded = encoding.encode(codePoint); HttpUrl httpUrl = HttpUrl.parse(component.urlString(encoded)); URL javaNetUrl = httpUrl.url(); if (!javaNetUrl.toString().equals(javaNetUrl.toString())) { fail(Util.format("Encoding %s %#x using %s", component, codePoint, encoding)); } } private void testFromUrl(int codePoint, Encoding encoding, Component component) { String encoded = encoding.encode(codePoint); HttpUrl httpUrl = HttpUrl.parse(component.urlString(encoded)); HttpUrl toAndFromJavaNetUrl = HttpUrl.get(httpUrl.url()); if (!toAndFromJavaNetUrl.equals(httpUrl)) { fail(Util.format("Encoding %s %#x using %s", component, codePoint, encoding)); } } private void testUri( int codePoint, Encoding encoding, Component component, boolean uriEscaped) { String string = new String(new int[] {codePoint}, 0, 1); String encoded = encoding.encode(codePoint); HttpUrl httpUrl = HttpUrl.parse(component.urlString(encoded)); URI uri = httpUrl.uri(); HttpUrl toAndFromUri = HttpUrl.get(uri); if (uriEscaped) { // The URI has more escaping than the HttpURL. Check that the decoded values still match. if (uri.toString().equals(httpUrl.toString())) { fail(Util.format("Encoding %s %#x using %s", component, codePoint, encoding)); } if (!component.get(toAndFromUri).equals(string)) { fail(Util.format("Encoding %s %#x using %s", component, codePoint, encoding)); } } else { // Check that the URI and HttpURL have the exact same escaping. if (!toAndFromUri.equals(httpUrl)) { fail(Util.format("Encoding %s %#x using %s", component, codePoint, encoding)); } if (!uri.toString().equals(httpUrl.toString())) { fail(Util.format("Encoding %s %#x using %s", component, codePoint, encoding)); } } } public enum Encoding { IDENTITY { public String encode(int codePoint) { return new String(new int[] {codePoint}, 0, 1); } }, PERCENT { public String encode(int codePoint) { ByteString utf8 = ByteString.encodeUtf8(IDENTITY.encode(codePoint)); Buffer percentEncoded = new Buffer(); for (int i = 0; i < utf8.size(); i++) { percentEncoded.writeUtf8(Util.format("%%%02X", utf8.getByte(i) & 0xff)); } return percentEncoded.readUtf8(); } }, SKIP; public String encode(int codePoint) { throw new UnsupportedOperationException(); } } public enum Component { USER { @Override public String urlString(String value) { return "http://" + value + "@example.com/"; } @Override public String encodedValue(HttpUrl url) { return url.encodedUsername(); } @Override public void set(HttpUrl.Builder builder, String value) { builder.username(value); } @Override public String get(HttpUrl url) { return url.username(); } }, PASSWORD { @Override public String urlString(String value) { return "http://:" + value + "@example.com/"; } @Override public String encodedValue(HttpUrl url) { return url.encodedPassword(); } @Override public void set(HttpUrl.Builder builder, String value) { builder.password(value); } @Override public String get(HttpUrl url) { return url.password(); } }, PATH { @Override public String urlString(String value) { return "http://example.com/a" + value + "z/"; } @Override public String encodedValue(HttpUrl url) { String path = url.encodedPath(); return path.substring(2, path.length() - 2); } @Override public void set(HttpUrl.Builder builder, String value) { builder.addPathSegment("a" + value + "z"); } @Override public String get(HttpUrl url) { String pathSegment = url.pathSegments().get(0); return pathSegment.substring(1, pathSegment.length() - 1); } }, QUERY { @Override public String urlString(String value) { return "http://example.com/?a" + value + "z"; } @Override public String encodedValue(HttpUrl url) { String query = url.encodedQuery(); return query.substring(1, query.length() - 1); } @Override public void set(HttpUrl.Builder builder, String value) { builder.query("a" + value + "z"); } @Override public String get(HttpUrl url) { String query = url.query(); return query.substring(1, query.length() - 1); } }, QUERY_VALUE { @Override public String urlString(String value) { return "http://example.com/?q=a" + value + "z"; } @Override public String encodedValue(HttpUrl url) { String query = url.encodedQuery(); return query.substring(3, query.length() - 1); } @Override public void set(HttpUrl.Builder builder, String value) { builder.addQueryParameter("q", "a" + value + "z"); } @Override public String get(HttpUrl url) { String value = url.queryParameter("q"); return value.substring(1, value.length() - 1); } }, FRAGMENT { @Override public String urlString(String value) { return "http://example.com/#a" + value + "z"; } @Override public String encodedValue(HttpUrl url) { String fragment = url.encodedFragment(); return fragment.substring(1, fragment.length() - 1); } @Override public void set(HttpUrl.Builder builder, String value) { builder.fragment("a" + value + "z"); } @Override public String get(HttpUrl url) { String fragment = url.fragment(); return fragment.substring(1, fragment.length() - 1); } }; public abstract String urlString(String value); public abstract String encodedValue(HttpUrl url); public abstract void set(HttpUrl.Builder builder, String value); public abstract String get(HttpUrl url); } }
/* * Copyright 2017-2018 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.cloud.spring.data.spanner.core.mapping; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.google.cloud.spanner.Key; import com.google.cloud.spring.data.spanner.core.convert.ConverterAwareMappingSpannerEntityProcessor; import com.google.cloud.spring.data.spanner.core.convert.SpannerEntityProcessor; import com.google.spanner.v1.TypeCode; import java.util.List; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.springframework.context.ApplicationContext; import org.springframework.data.mapping.PersistentProperty; import org.springframework.data.mapping.PersistentPropertyAccessor; import org.springframework.data.mapping.PropertyHandler; import org.springframework.data.mapping.SimplePropertyHandler; import org.springframework.data.util.ClassTypeInformation; /** Tests for the Spanner persistent entity. */ public class SpannerPersistentEntityImplTests { /** tests the messages and types of exceptions. */ @Rule public ExpectedException thrown = ExpectedException.none(); private final SpannerMappingContext spannerMappingContext; private final SpannerEntityProcessor spannerEntityProcessor; public SpannerPersistentEntityImplTests() { this.spannerMappingContext = new SpannerMappingContext(); this.spannerEntityProcessor = new ConverterAwareMappingSpannerEntityProcessor( this.spannerMappingContext); } @Test public void testTableName() { SpannerPersistentEntityImpl<TestEntity> entity = new SpannerPersistentEntityImpl<>(ClassTypeInformation.from(TestEntity.class), this.spannerMappingContext, this.spannerEntityProcessor); assertThat(entity.tableName()).isEqualTo("custom_test_table"); } @Test public void testRawTableName() { SpannerPersistentEntityImpl<EntityNoCustomName> entity = new SpannerPersistentEntityImpl<>(ClassTypeInformation.from(EntityNoCustomName.class), this.spannerMappingContext, this.spannerEntityProcessor); assertThat(entity.tableName()).isEqualTo("entityNoCustomName"); } @Test public void testEmptyCustomTableName() { SpannerPersistentEntityImpl<EntityEmptyCustomName> entity = new SpannerPersistentEntityImpl<>(ClassTypeInformation.from(EntityEmptyCustomName.class), this.spannerMappingContext, this.spannerEntityProcessor); assertThat(entity.tableName()).isEqualTo("entityEmptyCustomName"); } @Test public void testColumns() { assertThat(new SpannerMappingContext().getPersistentEntity(TestEntity.class).columns()) .containsExactlyInAnyOrder("id", "custom_col"); } @Test public void testExpressionResolutionWithoutApplicationContext() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "Error getting table name for EntityWithExpression; " + "nested exception is org.springframework.expression.spel.SpelEvaluationException: " + "EL1007E: Property or field 'tablePostfix' cannot be found on null"); SpannerPersistentEntityImpl<EntityWithExpression> entity = new SpannerPersistentEntityImpl<>(ClassTypeInformation.from(EntityWithExpression.class), this.spannerMappingContext, this.spannerEntityProcessor); entity.tableName(); } @Test public void testExpressionResolutionFromApplicationContext() { SpannerPersistentEntityImpl<EntityWithExpression> entity = new SpannerPersistentEntityImpl<>(ClassTypeInformation.from(EntityWithExpression.class), this.spannerMappingContext, this.spannerEntityProcessor); ApplicationContext applicationContext = mock(ApplicationContext.class); when(applicationContext.getBean("tablePostfix")).thenReturn("something"); when(applicationContext.containsBean("tablePostfix")).thenReturn(true); entity.setApplicationContext(applicationContext); assertThat(entity.tableName()).isEqualTo("table_something"); } @Test public void testDuplicatePrimaryKeyOrder() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "Two properties were annotated with the same primary key order: " + "id2 and id in EntityWithDuplicatePrimaryKeyOrder."); new SpannerMappingContext().getPersistentEntity(EntityWithDuplicatePrimaryKeyOrder.class); } @Test public void testInvalidPrimaryKeyOrder() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "The primary key columns were not given a consecutive order. " + "There is no property annotated with order 2 in EntityWithWronglyOrderedKeys."); new SpannerMappingContext() .getPersistentEntity(EntityWithWronglyOrderedKeys.class) .getIdProperty(); } @Test public void testNoIdEntity() { assertThat( new SpannerMappingContext().getPersistentEntity(EntityWithNoId.class).getIdProperty()) .isNotNull(); } @Test public void testGetIdProperty() { assertThat(new SpannerMappingContext().getPersistentEntity(TestEntity.class).getIdProperty()) .isInstanceOf(SpannerCompositeKeyProperty.class); } @Test public void testHasIdProperty() { assertThat(new SpannerMappingContext().getPersistentEntity(TestEntity.class).hasIdProperty()) .isTrue(); } @Test public void testSetIdProperty() { SpannerPersistentEntity entity = new SpannerMappingContext().getPersistentEntity(MultiIdsEntity.class); PersistentProperty idProperty = entity.getIdProperty(); MultiIdsEntity t = new MultiIdsEntity(); entity.getPropertyAccessor(t).setProperty(idProperty, Key.of("blah", 123L, 123.45D)); assertThat(t.id).isEqualTo("blah"); assertThat(t.id2).isEqualTo(123L); assertThat(t.id3).isEqualTo(123.45D); } @Test public void testSetIdPropertyLongerKey() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "The number of key parts is not equal to the number of primary key properties"); SpannerPersistentEntity entity = new SpannerMappingContext().getPersistentEntity(MultiIdsEntity.class); PersistentProperty idProperty = entity.getIdProperty(); MultiIdsEntity t = new MultiIdsEntity(); entity.getPropertyAccessor(t).setProperty(idProperty, Key.of("blah", 123L, 123.45D, "abc")); } @Test public void testSetIdPropertyNullKey() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "The number of key parts is not equal to the number of primary key properties"); SpannerPersistentEntity entity = new SpannerMappingContext().getPersistentEntity(MultiIdsEntity.class); PersistentProperty idProperty = entity.getIdProperty(); MultiIdsEntity t = new MultiIdsEntity(); entity.getPropertyAccessor(t).setProperty(idProperty, null); } @Test public void testIgnoredProperty() { TestEntity t = new TestEntity(); t.id = "a"; t.something = "a"; t.notMapped = "b"; SpannerPersistentEntity p = new SpannerMappingContext().getPersistentEntity(TestEntity.class); PersistentPropertyAccessor accessor = p.getPropertyAccessor(t); p.doWithProperties( (SimplePropertyHandler) property -> assertThat(accessor.getProperty(property)).isNotEqualTo("b")); } @Test public void testInvalidTableName() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "Error getting table name for EntityBadName; nested exception is" + " com.google.cloud.spring.data.spanner.core.mapping.SpannerDataException: Only" + " letters, numbers, and underscores are allowed in table names: ;DROP TABLE" + " your_table;"); SpannerPersistentEntityImpl<EntityBadName> entity = new SpannerPersistentEntityImpl<>(ClassTypeInformation.from(EntityBadName.class), this.spannerMappingContext, this.spannerEntityProcessor); entity.tableName(); } @Test public void testSpelInvalidName() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "Error getting table name for EntityWithExpression; nested exception is " + "com.google.cloud.spring.data.spanner.core.mapping.SpannerDataException: " + "Only letters, numbers, and underscores are allowed in table names: " + "table_; DROP TABLE your_table;"); SpannerPersistentEntityImpl<EntityWithExpression> entity = new SpannerPersistentEntityImpl<>(ClassTypeInformation.from(EntityWithExpression.class), this.spannerMappingContext, this.spannerEntityProcessor); ApplicationContext applicationContext = mock(ApplicationContext.class); when(applicationContext.getBean("tablePostfix")).thenReturn("; DROP TABLE your_table;"); when(applicationContext.containsBean("tablePostfix")).thenReturn(true); entity.setApplicationContext(applicationContext); entity.tableName(); } @Test public void testDuplicateEmbeddedColumnName() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "Two properties resolve to the same column name: " + "other in EmbeddedParentDuplicateColumn"); this.spannerMappingContext.getPersistentEntity(EmbeddedParentDuplicateColumn.class); } @Test public void testEmbeddedParentKeys() { GrandParentEmbedded grandParentEmbedded = new GrandParentEmbedded(); grandParentEmbedded.id = "1"; ParentEmbedded parentEmbedded = new ParentEmbedded(); parentEmbedded.grandParentEmbedded = grandParentEmbedded; parentEmbedded.id2 = 2; parentEmbedded.id3 = 3L; ChildEmbedded childEmbedded = new ChildEmbedded(); childEmbedded.parentEmbedded = parentEmbedded; childEmbedded.id4 = "4"; // intentionally null, which is a supported key component type. childEmbedded.id5 = null; Key key = (Key) this.spannerMappingContext .getPersistentEntity(ChildEmbedded.class) .getIdentifierAccessor(childEmbedded) .getIdentifier(); assertThat(key) .isEqualTo( Key.newBuilder() .append("1") .append("2") .append("3") .append("4") .appendObject(null) .build()); } @Test public void testEmbeddedCollection() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage("Embedded properties cannot be collections:"); this.thrown.expectMessage( "com.google.cloud.spring.data.spanner.core.mapping." + "SpannerPersistentEntityImplTests$ChildCollectionEmbedded.parentEmbedded"); this.spannerMappingContext.getPersistentEntity(ChildCollectionEmbedded.class); } @Test public void testExcludeEmbeddedColumnNames() { assertThat(this.spannerMappingContext.getPersistentEntity(ChildEmbedded.class).columns()) .containsExactlyInAnyOrder("id", "id2", "id3", "id4", "id5"); } @Test public void doWithChildrenCollectionsTest() { PropertyHandler<SpannerPersistentProperty> mockHandler = mock(PropertyHandler.class); SpannerPersistentEntity spannerPersistentEntity = this.spannerMappingContext.getPersistentEntity(ParentInRelationship.class); doAnswer( invocation -> { String colName = ((SpannerPersistentProperty) invocation.getArgument(0)).getName(); assertThat(colName.equals("childrenA") || colName.equals("childrenB")).isTrue(); return null; }) .when(mockHandler) .doWithPersistentProperty(any()); spannerPersistentEntity.doWithInterleavedProperties(mockHandler); verify(mockHandler, times(2)).doWithPersistentProperty(any()); } @Test public void testParentChildPkNamesMismatch() { this.thrown.expect(SpannerDataException.class); this.thrown.expectMessage( "The child primary key column (ChildBinRelationship.id) at position 1 does not match that " + "of its parent (ParentInRelationshipMismatchedKeyName.idNameDifferentThanChildren)."); this.spannerMappingContext.getPersistentEntity(ParentInRelationshipMismatchedKeyName.class); } @Test public void testGetJsonPropertyName() { SpannerPersistentEntityImpl<EntityWithJsonField> entityWithJsonField = (SpannerPersistentEntityImpl<EntityWithJsonField>) this.spannerMappingContext.getPersistentEntity(EntityWithJsonField.class); assertThat(entityWithJsonField.isJsonProperty(JsonEntity.class)).isTrue(); assertThat(entityWithJsonField.isJsonProperty(String.class)).isFalse(); SpannerPersistentEntityImpl<TestEntity> entityWithNoJsonField = (SpannerPersistentEntityImpl<TestEntity>) this.spannerMappingContext.getPersistentEntity(TestEntity.class); assertThat(entityWithNoJsonField.isJsonProperty(String.class)).isFalse(); assertThat(entityWithNoJsonField.isJsonProperty(long.class)).isFalse(); } private static class ParentInRelationship { @PrimaryKey String id; @Interleaved List<ChildAinRelationship> childrenA; @Interleaved List<ChildBinRelationship> childrenB; } private static class ChildAinRelationship { @PrimaryKey String id; @PrimaryKey(keyOrder = 2) String id2; } private static class EmbeddedKeyComponents { @PrimaryKey String id; @PrimaryKey(keyOrder = 2) String id2; } private static class ChildBinRelationship { @Embedded @PrimaryKey EmbeddedKeyComponents embeddedKeyComponents; } private static class ParentInRelationshipMismatchedKeyName { @PrimaryKey String idNameDifferentThanChildren; @Interleaved List<ChildBinRelationship> childrenA; } private static class GrandParentEmbedded { @PrimaryKey String id; } private static class ParentEmbedded { @PrimaryKey @Embedded GrandParentEmbedded grandParentEmbedded; // This property requires conversion to be stored as a STRING column. @PrimaryKey(keyOrder = 2) @Column(name = "id2", spannerType = TypeCode.STRING) int id2; // This property will be stored as a STRING column even though Long is a natively supported // type. @PrimaryKey(keyOrder = 3) @Column(name = "id3", spannerType = TypeCode.STRING) Long id3; } private static class ChildEmbedded { @PrimaryKey @Embedded ParentEmbedded parentEmbedded; @PrimaryKey(keyOrder = 2) String id4; @PrimaryKey(keyOrder = 3) @Column(spannerType = TypeCode.STRING) Long id5; } private static class ChildCollectionEmbedded { @PrimaryKey @Embedded List<ParentEmbedded> parentEmbedded; @PrimaryKey(keyOrder = 2) String id4; } private static class EmbeddedParentDuplicateColumn { @PrimaryKey String id; String other; @Embedded EmbeddedChildDuplicateColumn embeddedChildDuplicateColumn; } private static class EmbeddedChildDuplicateColumn { @Column(name = "other") String stuff; } @Table(name = ";DROP TABLE your_table;") private static class EntityBadName { @PrimaryKey(keyOrder = 1) String id; String something; } @Table(name = "custom_test_table") private static class TestEntity { @PrimaryKey(keyOrder = 1) String id; @Column(name = "custom_col") String something; @NotMapped String notMapped; } private static class EntityNoCustomName { @PrimaryKey(keyOrder = 1) String id; String something; } @Table private static class EntityEmptyCustomName { @PrimaryKey(keyOrder = 1) String id; String something; } @Table(name = "#{'table_'.concat(tablePostfix)}") private static class EntityWithExpression { @PrimaryKey(keyOrder = 1) String id; String something; } private static class EntityWithDuplicatePrimaryKeyOrder { @PrimaryKey(keyOrder = 1) String id; @PrimaryKey(keyOrder = 1) String id2; } private static class EntityWithWronglyOrderedKeys { @PrimaryKey(keyOrder = 1) String id; @PrimaryKey(keyOrder = 3) String id2; } private static class EntityWithNoId { String id; } private static class MultiIdsEntity { @PrimaryKey(keyOrder = 1) String id; @PrimaryKey(keyOrder = 2) Long id2; @PrimaryKey(keyOrder = 3) Double id3; } private static class EntityWithJsonField { @PrimaryKey String id; @Column(spannerType = TypeCode.JSON) JsonEntity jsonField; } private static class JsonEntity {} }
package com.linkedin.databus.client.request; /* * * Copyright 2013 LinkedIn Corp. All rights reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.Map.Entry; import java.util.concurrent.ExecutorService; import org.apache.log4j.Logger; import com.linkedin.databus.client.DatabusHttpClientImpl; import com.linkedin.databus.client.DatabusSourcesConnection; import com.linkedin.databus.client.DbusPartitionInfoImpl; import com.linkedin.databus.client.monitoring.RegistrationStatsInfo; import com.linkedin.databus.client.pub.DatabusRegistration; import com.linkedin.databus.client.pub.DatabusV3MultiPartitionRegistration; import com.linkedin.databus.client.pub.DatabusV3Registration; import com.linkedin.databus.client.pub.DbusClusterInfo; import com.linkedin.databus.client.pub.DbusPartitionInfo; import com.linkedin.databus.client.pub.RegistrationId; import com.linkedin.databus.client.pub.RegistrationState; import com.linkedin.databus.client.registration.DatabusMultiPartitionRegistration; import com.linkedin.databus.client.registration.DatabusV2ClusterRegistrationImpl; import com.linkedin.databus.core.DatabusComponentStatus; import com.linkedin.databus.core.data_model.DatabusSubscription; import com.linkedin.databus.core.data_model.PhysicalPartition; import com.linkedin.databus2.core.container.request.AbstractStatsRequestProcessor; import com.linkedin.databus2.core.container.request.DatabusRequest; import com.linkedin.databus2.core.container.request.InvalidRequestParamValueException; import com.linkedin.databus2.core.container.request.RequestProcessingException; import com.linkedin.databus2.core.filter.DbusKeyCompositeFilterConfig; /** * * Request processor to support REST API for * * (a) Listing the registration ids (both V2/V3 top and partition level registrations). * (b) Inspecting the status of a given registration by id (c) Listing all the client * clusters (both V2 and V3) registered to the client instance. (d) List all the active * partitions for a given V2/V3 client cluster. (e) Pause/Resume a given V2/V3 * registration (both top-level and partition (child) level). (f) Pause/Resume all the V2 * and V3 registrations (both top-level and partition (child) level) (g) List all the * MPRegistrations (V3). * * Please note that Top-level registrations are those that were created as a result of one * of "registerXXX()" calls on databus-client. In the case of multi-partition * registrations (like MPRegistration, V2/V3 CLB), only the parent registration is * considered the top-level registration. Per-partition (child) registrations which were * created as part of partition migration are NOT top-level registrations. */ public class ClientStateRequestProcessor extends AbstractStatsRequestProcessor { public static final String MODULE = ClientStateRequestProcessor.class.getName(); public static final Logger LOG = Logger.getLogger(MODULE); public static final String COMMAND_NAME = "clientState"; private final DatabusHttpClientImpl _client; /** All first-level registrations listed **/ private final static String REGISTRATIONS_KEY = "registrations"; /** First-level registration info listed **/ private final static String REGISTRATION_KEY_PREFIX = "registration/"; /** All Client Clusters supported by this client instance **/ private final static String CLIENT_CLUSTERS_KEY = "clientClusters"; /** Partitions supported by this client cluster with their registrations **/ private final static String CLIENT_CLUSTER_KEY = "clientPartitions/"; /** Registration info supported by this client cluster with their registrations **/ private final static String CLIENT_CLUSTER_PARTITION_REG_KEY = "clientPartition/"; /** Multi Partition Registrations active in this client instance **/ private final static String MP_REGISTRATIONS_KEY = "mpRegistrations"; /** Pause all registrations active in this client instance **/ private final static String PAUSE_ALL_REGISTRATIONS = "registrations/pause"; /** Pause registration identified by the registration id **/ private final static String PAUSE_REGISTRATION = "registration/pause"; /** Resume all registrations paused in this client instance **/ private final static String RESUME_ALL_REGISTRATIONS = "registrations/resume"; /** Resume registration identified by the registration id **/ private final static String RESUME_REGISTRATION = "registration/resume"; public ClientStateRequestProcessor(ExecutorService executorService, DatabusHttpClientImpl client) { super(COMMAND_NAME, executorService); _client = client; } @Override protected boolean doProcess(String category, DatabusRequest request) throws IOException, RequestProcessingException { boolean success = true; if (category.equals(REGISTRATIONS_KEY)) { processRegistrations(request); } else if (category.startsWith(PAUSE_REGISTRATION)) { pauseResumeRegistration(request, true); } else if (category.startsWith(RESUME_REGISTRATION)) { pauseResumeRegistration(request, false); } else if (category.startsWith(REGISTRATION_KEY_PREFIX)) { processRegistrationInfo(request); } else if (category.startsWith(CLIENT_CLUSTERS_KEY)) { processClusters(request); } else if (category.startsWith(CLIENT_CLUSTER_KEY)) { processCluster(request); } else if (category.startsWith(CLIENT_CLUSTER_PARTITION_REG_KEY)) { processPartition(request); } else if (category.equals(MP_REGISTRATIONS_KEY)) { processMPRegistrations(request); } else if (category.equals(PAUSE_ALL_REGISTRATIONS)) { pauseAllRegistrations(request); } else if (category.equals(RESUME_ALL_REGISTRATIONS)) { resumeAllRegistrations(request); } else { success = false; } return success; } /** * Exposes the mapping between a mpRegistration -> Set of individual registrations * */ private void processMPRegistrations(DatabusRequest request) throws IOException, RequestProcessingException { Map<RegistrationId, DatabusV3Registration> registrationIdMap = _client.getRegistrationIdMap(); if (null == registrationIdMap) throw new InvalidRequestParamValueException(request.getName(), REGISTRATIONS_KEY, "Present only for Databus V3 clients"); Map<String, List<String>> ridList = new TreeMap<String, List<String>>(); for (Map.Entry<RegistrationId, DatabusV3Registration> entry : registrationIdMap.entrySet()) { DatabusV3Registration reg = entry.getValue(); if (reg instanceof DatabusV3MultiPartitionRegistration) { Collection<DatabusV3Registration> dvrList = ((DatabusV3MultiPartitionRegistration) reg).getPartionRegs().values(); List<String> mpRegList = new ArrayList<String>(); for (DatabusV3Registration dvr : dvrList) { mpRegList.add(dvr.getRegistrationId().getId()); } ridList.put(entry.getKey().getId(), mpRegList); } } writeJsonObjectToResponse(ridList, request); return; } /** * Provides an individual registrations details. The individual registration can be * either that of V2/V3 and top-level or child. Top-level registrations are those that * were created as a result of one of "registerXXX()" calls on databus-client. In the * case of multi-partition registrations (like MPRegistration, V2/V3 CLB), only the * parent registration is considered the top-level registration. Per-partition (child) * registrations which were created as part of partition migration are NOT top-level * registrations. The output format can be different depending on whether it is a V2/V3 * as we are dumping the entire Registration in the case of V2. In the case of V3, we * create an intermediate objects. These are legacy formats which when changed could * cause the integ-tests to fail. * * @param request * DatabusRequest corresponding to the REST call. * @throws IOException * if unable to write to output channel. * @throws RequestProcessingException * when registration could not be located. */ private void processRegistrationInfo(DatabusRequest request) throws IOException, RequestProcessingException { boolean found = true; // V2 Registration lookup first RegistrationStatsInfo regStatsInfo = null; try { DatabusRegistration r = findV2Registration(request, REGISTRATION_KEY_PREFIX); writeJsonObjectToResponse(r, request); } catch (RequestProcessingException ex) { found = false; } // V3 Registration lookup if not found if (!found) { DatabusV3Registration reg = findV3Registration(request, REGISTRATION_KEY_PREFIX); // if // reg // is // null, // the // callee // throws // an // exception. DatabusSourcesConnection sourcesConn = _client.getDatabusSourcesConnection(reg.getRegistrationId().getId()); regStatsInfo = new RegistrationStatsInfo(reg, sourcesConn); writeJsonObjectToResponse(regStatsInfo, request); } } /** * Displays all top-level registrations registered to the client (both V2 and V3). * Top-level registrations are those that were created as a result of one of * "registerXXX()" calls on databus-client. In the case of multi-partition registrations * (like MPRegistration, V2/V3 CLB), only the parent registration is considered the * top-level registration. Per-partition (child) registrations which were created as * part of partition migration are NOT top-level registrations. * * @param request * DatabusRequest corresponding to the REST API. * @throws IOException * when unable to write to ourput channel */ private void processRegistrations(DatabusRequest request) throws IOException { Map<String, Collection<DatabusSubscription>> regIds = new TreeMap<String, Collection<DatabusSubscription>>(); // V2 Registration Collection<RegInfo> regs = getAllTopLevelV2Registrations(); if (null != regs) { for (RegInfo r : regs) { regIds.put(r.getRegId().getId(), r.getSubs()); } } Map<RegistrationId, DatabusV3Registration> registrationIdMap = _client.getRegistrationIdMap(); // V3 Registration if (null != registrationIdMap) { for (Map.Entry<RegistrationId, DatabusV3Registration> entry : registrationIdMap.entrySet()) { DatabusV3Registration reg = entry.getValue(); List<DatabusSubscription> dsl = reg.getSubscriptions(); regIds.put(entry.getKey().getId(), dsl); } } writeJsonObjectToResponse(regIds, request); } /** * * Proved list of V2 and V3 Client clusters which are used (registered). * * @param request * DatabusRequest corresponding to the REST call. * @throws IOException * when unable to write to output channel. */ private void processClusters(DatabusRequest request) throws IOException { Map<RegistrationId, DbusClusterInfo> clusters = _client.getAllClientClusters(); writeJsonObjectToResponse(clusters.values(), request); } /** * Provide the list of partitions corresponding to the V2/V3 client cluster. * * @param request * DatabusRequest corresponding to the REST call. * @throws IOException * when unable to write to output channel. * @throws RequestProcessingException * when cluster not found. */ private void processCluster(DatabusRequest request) throws IOException, RequestProcessingException { String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME); String clusterName = category.substring(CLIENT_CLUSTER_KEY.length()); List<PartitionInfo> clusters = new ArrayList<PartitionInfo>(); RequestProcessingException rEx = null; Collection<PartitionInfo> v2Clusters = null; // Check as if this is V2 Cluster first boolean found = true; try { v2Clusters = getV2ClusterPartitions(clusterName); clusters.addAll(v2Clusters); } catch (RequestProcessingException ex) { found = false; rEx = ex; } // Try as V3 cluster if it is not V2. if (!found) { Collection<PartitionInfo> v3Clusters = null; try { v3Clusters = getV3ClusterPartitions(clusterName); clusters.addAll(v3Clusters); found = true; } catch (RequestProcessingException ex) { found = false; rEx = ex; } } if (!found) throw rEx; writeJsonObjectToResponse(clusters, request); } /** * Provide a partition information belonging to a V2/V3 client cluster and hosted in * this client instance * * @param request * DatabusRequest corresponding to the REST call. * @throws IOException * when unable to write to output channel. * @throws RequestProcessingException * when cluster not found or when partition is not hosted in this instance */ private void processPartition(DatabusRequest request) throws IOException, RequestProcessingException { String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME); String clusterPartitionName = category.substring(CLIENT_CLUSTER_PARTITION_REG_KEY.length()); /** * API: curl * http://<HOST>:<PORT>/clientState/clientPartition/<CLUSTER_NAME>/<PARTITION> curl * http://<HOST>:<PORT>/clientState/clientPartition/<CLUSTER_NAME>:<PARTITION> */ String[] toks = clusterPartitionName.split("[:/]"); if (toks.length != 2) throw new RequestProcessingException("Cluster and partition info are expected to be in pattern = <cluster>[/:]<partition> but was " + clusterPartitionName); RegInfo reg = null; boolean found = true; // Try as a V2 Partition try { reg = getV2PartitionRegistration(toks[0], new Long(toks[1])); } catch (RequestProcessingException ex) { found = false; } // If not found, try as V3 if (!found) { reg = getV3PartitionRegistration(toks[0], new Long(toks[1])); } writeJsonObjectToResponse(reg, request); } private DatabusV2ClusterRegistrationImpl getV2ClusterRegistration(String clusterName) throws RequestProcessingException { Collection<DatabusMultiPartitionRegistration> regs = _client.getAllClientClusterRegistrations(); for (DatabusMultiPartitionRegistration reg : regs) { if (reg instanceof DatabusV2ClusterRegistrationImpl) { DatabusV2ClusterRegistrationImpl r = (DatabusV2ClusterRegistrationImpl) reg; if (clusterName.equals(r.getClusterInfo().getName())) return r; } } throw new RequestProcessingException("No Registration found for cluster (" + clusterName + ") !!"); } private DatabusV3MultiPartitionRegistration getV3ClusterRegistration(String clusterName) throws RequestProcessingException { // There is a one-to-one mapping between clusterName to // DatabusV3MultiPartitionRegistration Map<RegistrationId, DbusClusterInfo> clusterMap = _client.getAllClientClusters(); for (Entry<RegistrationId, DbusClusterInfo> e : clusterMap.entrySet()) { if (clusterName.equalsIgnoreCase(e.getValue().getName())) { DatabusV3Registration reg = _client.getRegistration(e.getKey()); if (reg instanceof DatabusV3MultiPartitionRegistration) { return (DatabusV3MultiPartitionRegistration) reg; } break; } } throw new RequestProcessingException("No Registration found for cluster (" + clusterName + ") !!"); } /** * Pause or resume a V2 or V3 registration. The registration can be a top-level or * child-level registration Top-level registrations are those that were created as a * result of one of "registerXXX()" calls on databus-client. In the case of * multi-partition registrations (like MPRegistration, V2/V3 CLB), only the parent * registration is considered the top-level registration. Per-partition (child) * registrations which were created as part of partition migration are NOT top-level * registrations. * * @param request * Databus request corresponding to the REST call. * @param doPause * true if wanted to pause, false if to be resumed * @throws IOException * if unable to write output to channel * @throws RequestProcessingException * when registration could not be found. */ private void pauseResumeRegistration(DatabusRequest request, boolean doPause) throws IOException, RequestProcessingException { DatabusRegistration r = null; DatabusV3Registration r2 = null; boolean found = true; boolean isRunning = false; boolean isPaused = false; boolean isSuspended = false; RegistrationId regId = null; RequestProcessingException rEx = null; RegStatePair regStatePair = null; try { r = findV2Registration(request, PAUSE_REGISTRATION); isRunning = r.getState().isRunning(); isPaused = (r.getState() == DatabusRegistration.RegistrationState.PAUSED); isSuspended = (r.getState() == DatabusRegistration.RegistrationState.SUSPENDED_ON_ERROR); regId = r.getRegistrationId(); } catch (RequestProcessingException ex) { found = false; rEx = ex; } if (!found) { try { r2 = findV3Registration(request, PAUSE_REGISTRATION); found = true; isRunning = r2.getState().isRunning(); isPaused = (r2.getState() == RegistrationState.PAUSED); isSuspended = (r2.getState() == RegistrationState.SUSPENDED_ON_ERROR); regId = r.getRegistrationId(); } catch (RequestProcessingException ex) { found = false; rEx = ex; } } if (!found) throw rEx; LOG.info("REST call to pause registration : " + regId); if (isRunning) { if (doPause) { if (!isPaused) { if (null != r) { r.pause(); regStatePair = new RegStatePair(r.getState(), r.getRegistrationId()); } else { r2.pause(); regStatePair = new RegStatePair(r2.getState().name(), r2.getRegistrationId()); } } } else { if (isPaused || isSuspended) { if (null != r) { r.resume(); regStatePair = new RegStatePair(r.getState(), r.getRegistrationId()); } else { r2.resume(); regStatePair = new RegStatePair(r2.getState().name(), r2.getRegistrationId()); } } } } writeJsonObjectToResponse(regStatePair, request); } /** * Pause all registrations (both V2 and V3 in this client instance) which are in running * state. * * @param request * DatabusRequest corresponding to the REST call. * @throws IOException * when unable to write the output. */ private void pauseAllRegistrations(DatabusRequest request) throws IOException { LOG.info("REST call to pause all registrations"); /** * Get the top-level V2 registrations and pause them. The child-level registrations by * the top-level registrations that aggregates them. */ Collection<DatabusRegistration> regs = _client.getAllRegistrations(); if (null != regs) { for (DatabusRegistration r : regs) { if (r.getState().isRunning()) { if (r.getState() != DatabusRegistration.RegistrationState.PAUSED) r.pause(); } } } /** * Get the top-level V3 registrations and pause them. The child-level registrations by * the top-level registrations that aggregates them. */ Map<RegistrationId, DatabusV3Registration> regMap = _client.getRegistrationIdMap(); Collection<RegInfo> topLevelRegs = getAllTopLevelV3Registrations(); /** * Important Note: There is an important implementation difference on which * registrations are stored in the global registration data-structure maintained by * the client (DatabusHttp[V3]ClientImpls) between V2 and V3. * * 1. In the case of V2, only top-level registrations are stored in the global * data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all * registrations are stored in the global data-structure. * * In the case of V3, this is needed so that all registrations can act on the relay * external view change. This can be refactored in the future by moving the * relay-external view change to registration impl ( reduce the complexity in * ClientImpl ). The V2 implementation did not have this logic and was following a * more intuitive structure of preserving the hierarchy. */ if ((null != regMap) && (null != topLevelRegs)) { for (RegInfo reg : topLevelRegs) { DatabusV3Registration r = regMap.get(reg.getRegId()); if (r.getState().isRunning()) { if (r.getState() != RegistrationState.PAUSED) r.pause(); } } } writeJsonObjectToResponse(getAllTopLevelRegStates(), request); } /** * Resume all registrations paused or suspended (both V2 and V3 in this client instance) * * @param request * DatabusRequest corresponding to the REST call. * @throws IOException * when unable to write the output. */ private void resumeAllRegistrations(DatabusRequest request) throws IOException { LOG.info("REST call to resume all registrations"); /** * Get the top-level V2 registrations and pause them. The child-level registrations by * the top-level registrations that aggregates them. */ Collection<DatabusRegistration> regs = _client.getAllRegistrations(); if (null != regs) { for (DatabusRegistration r : regs) { if (r.getState().isRunning()) { if ((r.getState() == DatabusRegistration.RegistrationState.PAUSED) || (r.getState() == DatabusRegistration.RegistrationState.SUSPENDED_ON_ERROR)) r.resume(); } } } /** * Get the top-level V3 registrations and pause them. The child-level registrations by * the top-level registrations that aggregates them. */ Map<RegistrationId, DatabusV3Registration> regMap = _client.getRegistrationIdMap(); Collection<RegInfo> topLevelRegs = getAllTopLevelV3Registrations(); /** * Important Note: There is an important implementation difference on which * registrations are stored in the global registration data-structure maintained by * the client (DatabusHttp[V3]ClientImpls) between V2 and V3. * * 1. In the case of V2, only top-level registrations are stored in the global * data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all * registrations are stored in the global data-structure. * * In the case of V3, this is needed so that all registrations can act on the relay * external view change. This can be refactored in the future by moving the * relay-external view change to registration impl ( reduce the complexity in * ClientImpl ). The V2 implementation did not have this logic and was following a * more intuitive structure of preserving the hierarchy. */ if ((null != regMap) && (null != topLevelRegs)) { for (RegInfo reg : topLevelRegs) { DatabusV3Registration r = regMap.get(reg.getRegId()); if (r.getState().isRunning()) { if ((r.getState() == RegistrationState.PAUSED) || (r.getState() == RegistrationState.SUSPENDED_ON_ERROR)) r.resume(); } } } writeJsonObjectToResponse(getAllTopLevelRegStates(), request); } /** * Generate regStatePair for all the top-level registrations (both V2 and V3). * * @return */ private Collection<RegStatePair> getAllTopLevelRegStates() { List<RegStatePair> regList = new ArrayList<RegStatePair>(); Collection<RegInfo> regs = getAllTopLevelRegistrations(); for (RegInfo reg : regs) { regList.add(new RegStatePair(reg.getState(), reg.getRegId())); } return regList; } /** * Returns all the top-level registrations (both V2 and V3). Top-level registrations are * those that were created as a result of one of "registerXXX()" calls on * databus-client. In the case of multi-partition registrations (like MPRegistration, * V2/V3 CLB), only the parent registration is considered the top-level registration. * Per-partition (child) registrations which were created as part of partition migration * are NOT top-level registrations. * * @return collection of top-level registrations (V2/V3) */ private Collection<RegInfo> getAllTopLevelRegistrations() { List<RegInfo> regList = new ArrayList<RegInfo>(); regList.addAll(getAllTopLevelV2Registrations()); regList.addAll(getAllTopLevelV3Registrations()); return regList; } /** * Returns all the top-level V3 registrations. Top-level registrations are those that * were created as a result of one of "registerXXX()" calls on databus-client. In the * case of multi-partition registrations (like MPRegistration, V3 CLB), only the parent * registration is considered the top-level registration. Per-partition (child) * registrations which were created as part of partition migration are NOT top-level * registrations. * * @return collection of top-level registrations (V3) */ private Collection<RegInfo> getAllTopLevelV3Registrations() { /** * Important Note: There is an important implementation difference on which * registrations are stored in the global registration data-structure maintained by * the client (DatabusHttp[V3]ClientImpls) between V2 and V3. * * 1. In the case of V2, only top-level registrations are stored in the global * data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all * registrations are stored in the global data-structure. * * In the case of V3, this is needed so that all registrations can act on the relay * external view change. This can be refactored in the future by moving the * relay-external view change to registration impl ( reduce the complexity in * ClientImpl ). The V2 implementation did not have this logic and was following a * more intuitive structure of preserving the hierarchy. */ Map<RegistrationId, RegInfo> regListMap = new HashMap<RegistrationId, RegInfo>(); /** * The _client.getRegistrationIdMap() has all registrations in one place. Top-Level * Registrations = Only those registrations whose getParent() == null. */ Map<RegistrationId, DatabusV3Registration> regMap = _client.getRegistrationIdMap(); for (Entry<RegistrationId, DatabusV3Registration> e : regMap.entrySet()) { RegInfo regInfo = null; DatabusV3Registration r = e.getValue(); // If not top-level, skip if (null != r.getParentRegistration()) { continue; } Map<DbusPartitionInfo, RegInfo> childR = null; if (r instanceof DatabusV3MultiPartitionRegistration) { // ass the children regs to parent. Map<PhysicalPartition, DatabusV3Registration> childRegs = ((DatabusV3MultiPartitionRegistration) r).getPartionRegs(); childR = new HashMap<DbusPartitionInfo, RegInfo>(); for (Entry<PhysicalPartition, DatabusV3Registration> e2 : childRegs.entrySet()) { childR.put(new DbusPartitionInfoImpl(e2.getKey().getId()), new RegInfo(e.getValue().getState().name(), e.getValue().getRegistrationId(), e.getValue().getStatus(), null, e.getValue().getSubscriptions())); } } regInfo = new RegInfo(r.getState().name(), r.getRegistrationId(), r.getStatus(), null, r.getSubscriptions(), true, childR); regListMap.put(e.getKey(), regInfo); } return regListMap.values(); } /** * Returns all the top-level V2 registrations. Top-level registrations are those that * were created as a result of one of "registerXXX()" calls on databus-client. In the * case of multi-partition registrations (like V2 CLB), only the parent registration is * considered the top-level registration. Per-partition (child) registrations which were * created as part of partition migration are NOT top-level registrations. * * @return collection of top-level registrations (V2) */ private Collection<RegInfo> getAllTopLevelV2Registrations() { List<RegInfo> regList = new ArrayList<RegInfo>(); Collection<DatabusRegistration> regs = _client.getAllRegistrations(); for (DatabusRegistration r : regs) { RegInfo regInfo = null; if (r instanceof DatabusMultiPartitionRegistration) { Map<DbusPartitionInfo, DatabusRegistration> childRegs = ((DatabusMultiPartitionRegistration) r).getPartitionRegs(); Map<DbusPartitionInfo, RegInfo> childR = new HashMap<DbusPartitionInfo, RegInfo>(); for (Entry<DbusPartitionInfo, DatabusRegistration> e : childRegs.entrySet()) { childR.put(e.getKey(), new RegInfo(e.getValue().getState().name(), e.getValue().getRegistrationId(), e.getValue().getStatus(), e.getValue().getFilterConfig(), e.getValue().getSubscriptions())); } regInfo = new RegInfo(r.getState().name(), r.getRegistrationId(), r.getStatus(), r.getFilterConfig(), r.getSubscriptions(), true, childR); } else { regInfo = new RegInfo(r.getState().name(), r.getRegistrationId(), r.getStatus(), r.getFilterConfig(), r.getSubscriptions()); } regList.add(regInfo); } return regList; } /** * Get the list of partitions hosted by this client for the V2 cluster. * * @param cluster * V2 CLuster for which we need to find out the partitions. * @return * @throws RequestProcessingException * when unable to find the cluster. */ private Collection<PartitionInfo> getV2ClusterPartitions(String cluster) throws RequestProcessingException { DatabusV2ClusterRegistrationImpl reg = getV2ClusterRegistration(cluster); List<PartitionInfo> partitions = new ArrayList<PartitionInfo>(); Map<DbusPartitionInfo, DatabusRegistration> regMap = reg.getPartitionRegs(); for (Entry<DbusPartitionInfo, DatabusRegistration> e : regMap.entrySet()) { PartitionInfo p = new PartitionInfo(e.getKey().getPartitionId(), e.getValue().getRegistrationId()); partitions.add(p); } return partitions; } /** * Get the list of partitions hosted by this client for the V3 cluster. * * @param cluster * V3 CLuster for which we need to find out the partitions. * @return * @throws RequestProcessingException * when unable to find the cluster. */ private Collection<PartitionInfo> getV3ClusterPartitions(String cluster) throws RequestProcessingException { DatabusV3MultiPartitionRegistration reg = getV3ClusterRegistration(cluster); List<PartitionInfo> partitions = new ArrayList<PartitionInfo>(); Map<PhysicalPartition, DatabusV3Registration> regMap = reg.getPartionRegs(); for (Entry<PhysicalPartition, DatabusV3Registration> e : regMap.entrySet()) { PartitionInfo p = new PartitionInfo(e.getKey().getId(), e.getValue().getRegistrationId()); partitions.add(p); } return partitions; } /** * Helper method to get partition registration information for a given V2 Cluster * partition * * @param cluster * V2 Cluster * @param partition * Partition in the cluster. * @return * @throws RequestProcessingException * When cluster or partition is not hosted in this instance. */ private RegInfo getV2PartitionRegistration(String cluster, long partition) throws RequestProcessingException { DatabusV2ClusterRegistrationImpl reg = getV2ClusterRegistration(cluster); DbusPartitionInfo p = new DbusPartitionInfoImpl(partition); DatabusRegistration r = reg.getPartitionRegs().get(p); if (null == r) throw new RequestProcessingException("Partition(" + partition + ") for cluster (" + cluster + ") not found !!"); return new RegInfo(r.getState().name(), r.getRegistrationId(), r.getStatus(), r.getFilterConfig(), r.getSubscriptions()); } /** * Helper method to get partition registration information for a given V3 Cluster * partition * * @param cluster * V3 Cluster * @param partition * Partition in the cluster. * @return * @throws RequestProcessingException * When cluster or partition is not hosted in this instance. */ private RegInfo getV3PartitionRegistration(String cluster, long partition) throws RequestProcessingException { DatabusV3MultiPartitionRegistration reg = getV3ClusterRegistration(cluster); for (Entry<PhysicalPartition, DatabusV3Registration> e : reg.getPartionRegs() .entrySet()) { if (partition == e.getKey().getId()) { DatabusV3Registration r = e.getValue(); return new RegInfo(r.getState().name(), r.getRegistrationId(), r.getStatus(), null, r.getSubscriptions()); } } throw new RequestProcessingException("Partition(" + partition + ") for cluster (" + cluster + ") not found !!"); } /** * Helper method to locate a databus V2 registration by its registration id. This method * can locate both top-level (registered by one of _dbusClient.registerXXX()) and * individual-partition (child) registration that are aggregated inside a top-level * MultiPartition registration. * * Please note that this can traverse the registration tree which is 1 level deep. In * other words, it will not work when we have MultiPartition registrations aggregated * inside another MultiPartition registrations. * * @param regId * Registration Id to be located * @param request * Databus Request corresponding to the REST call. * @return * @throws RequestProcessingException * when the registration is not found. */ private DatabusRegistration findV2Registration(DatabusRequest request, String prefix) throws RequestProcessingException { String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME); String registrationIdStr = category.substring(prefix.length()); RegistrationId regId = new RegistrationId(registrationIdStr); Collection<DatabusRegistration> regs = _client.getAllRegistrations(); if (null != regs) { for (DatabusRegistration r : regs) { if (regId.equals(r.getRegistrationId())) { return r; } /** * Important Note: There is an important implementation difference on which * registrations are stored in the global registration data-structure maintained * by the client (DatabusHttp[V3]ClientImpls) between V2 and V3. * * 1. In the case of V2, only top-level registrations are stored in the global * data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all * registrations are stored in the global data-structure. * * In the case of V3, this is needed so that all registrations can act on the * relay external view change. This can be refactored in the future by moving the * relay-external view change to registration impl ( reduce the complexity in * ClientImpl ). The V2 implementation did not have this logic and was following a * more intuitive structure of preserving the hierarchy. The below code handles * the discrepancy for V2. */ if (r instanceof DatabusMultiPartitionRegistration) { Map<DbusPartitionInfo, DatabusRegistration> childRegs = ((DatabusMultiPartitionRegistration) r).getPartitionRegs(); for (Entry<DbusPartitionInfo, DatabusRegistration> e : childRegs.entrySet()) { if (regId.equals(e.getValue().getRegistrationId())) { return e.getValue(); } } } } } throw new RequestProcessingException("Unable to find registration (" + regId + ") "); } /** * Helper method to locate a databus V3 registration by its registration id. This method * can locate both top-level (registered by one of _dbusClient.registerXXX()) and * individual-partition (child) registration that are aggregated inside a top-level * MultiPartition registration. * * Please note that this can traverse the registration tree which is 1 level deep. In * other words, it will not work when we have MultiPartition registrations aggregated * inside another MultiPartition registrations. * * @param regId * Registration Id to be located * @param request * Databus Request corresponding to the REST call. * @return * @throws RequestProcessingException * when the registration is not found. */ private DatabusV3Registration findV3Registration(RegistrationId regId, DatabusRequest request) throws RequestProcessingException { Map<RegistrationId, DatabusV3Registration> regIdMap = _client.getRegistrationIdMap(); if (null == regIdMap) { throw new InvalidRequestParamValueException(request.getName(), REGISTRATION_KEY_PREFIX, "No registrations available !! "); } /** * Important Note: There is an important implementation difference on which * registrations are stored in the global registration data-structure maintained by * the client (DatabusHttp[V3]ClientImpls) between V2 and V3. * * 1. In the case of V2, only top-level registrations are stored in the global * data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all * registrations are stored in the global data-structure. * * In the case of V3, this is needed so that all registrations can act on the relay * external view change. This can be refactored in the future by moving the * relay-external view change to registration impl ( reduce the complexity in * ClientImpl ). The V2 implementation did not have this logic and was following a * more intuitive structure of preserving the hierarchy. */ for (DatabusV3Registration r : regIdMap.values()) { if (regId.equals(r.getRegistrationId())) { return r; } } throw new InvalidRequestParamValueException(request.getName(), REGISTRATION_KEY_PREFIX, "Registration with id " + regId + " not present !!"); } private DatabusV3Registration findV3Registration(DatabusRequest request, String prefix) throws RequestProcessingException { String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME); String regIdStr = category.substring(prefix.length()); RegistrationId regId = new RegistrationId(regIdStr); return findV3Registration(regId, request); } private static class PartitionInfo { private final long partition; private final RegistrationId regId; public long getPartition() { return partition; } public RegistrationId getRegId() { return regId; } public PartitionInfo(long partition, RegistrationId regId) { super(); this.partition = partition; this.regId = regId; } } private static class RegStatePair { private final String _state; private final RegistrationId _regId; public String getState() { return _state; } public RegistrationId getRegId() { return _regId; } public RegStatePair(DatabusRegistration.RegistrationState state, RegistrationId regId) { _regId = regId; _state = state.name(); } public RegStatePair(String state, RegistrationId regId) { _regId = regId; _state = state; } } private static class RegInfo { private final String state; private final RegistrationId regId; private final String status; private final DbusKeyCompositeFilterConfig filter; private final Collection<DatabusSubscription> subs; private final boolean isMultiPartition; private final Map<DbusPartitionInfo, RegInfo> childRegistrations; public String getState() { return state; } public RegistrationId getRegId() { return regId; } public String getStatus() { return status; } public DbusKeyCompositeFilterConfig getFilter() { return filter; } public Collection<DatabusSubscription> getSubs() { return subs; } public boolean isMultiPartition() { return isMultiPartition; } public Map<DbusPartitionInfo, RegInfo> getChildRegistrations() { return childRegistrations; } public RegInfo(String state, RegistrationId regId, DatabusComponentStatus status, DbusKeyCompositeFilterConfig filter, Collection<DatabusSubscription> subs) { this(state, regId, status, filter, subs, false, null); } public RegInfo(String state, RegistrationId regId, DatabusComponentStatus status, DbusKeyCompositeFilterConfig filter, Collection<DatabusSubscription> subs, boolean isMultiPartition, Map<DbusPartitionInfo, RegInfo> childRegistrations) { super(); this.state = state; this.regId = regId; this.status = status.toString(); this.filter = filter; this.subs = subs; this.isMultiPartition = isMultiPartition; this.childRegistrations = childRegistrations; } } }
/* * Copyright (C) 2017 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.android.exoplayer2.source.ads; import android.net.Uri; import android.os.Handler; import android.os.Looper; import androidx.annotation.IntDef; import androidx.annotation.Nullable; import com.google.android.exoplayer2.C; import com.google.android.exoplayer2.Timeline; import com.google.android.exoplayer2.source.CompositeMediaSource; import com.google.android.exoplayer2.source.DeferredMediaPeriod; import com.google.android.exoplayer2.source.MediaPeriod; import com.google.android.exoplayer2.source.MediaSource; import com.google.android.exoplayer2.source.MediaSource.MediaPeriodId; import com.google.android.exoplayer2.source.MediaSourceEventListener; import com.google.android.exoplayer2.source.MediaSourceEventListener.LoadEventInfo; import com.google.android.exoplayer2.source.MediaSourceEventListener.MediaLoadData; import com.google.android.exoplayer2.source.ProgressiveMediaSource; import com.google.android.exoplayer2.upstream.Allocator; import com.google.android.exoplayer2.upstream.DataSource; import com.google.android.exoplayer2.upstream.DataSpec; import com.google.android.exoplayer2.upstream.TransferListener; import com.google.android.exoplayer2.util.Assertions; import java.io.IOException; import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; /** * A {@link MediaSource} that inserts ads linearly with a provided content media source. This source * cannot be used as a child source in a composition. It must be the top-level source used to * prepare the player. */ public final class AdsMediaSource extends CompositeMediaSource<MediaPeriodId> { /** Factory for creating {@link MediaSource}s to play ad media. */ public interface MediaSourceFactory { /** * Creates a new {@link MediaSource} for loading the ad media with the specified {@code uri}. * * @param uri The URI of the media or manifest to play. * @return The new media source. */ MediaSource createMediaSource(Uri uri); /** * Returns the content types supported by media sources created by this factory. Each element * should be one of {@link C#TYPE_DASH}, {@link C#TYPE_SS}, {@link C#TYPE_HLS} or {@link * C#TYPE_OTHER}. * * @return The content types supported by media sources created by this factory. */ int[] getSupportedTypes(); } /** * Wrapper for exceptions that occur while loading ads, which are notified via {@link * MediaSourceEventListener#onLoadError(int, MediaPeriodId, LoadEventInfo, MediaLoadData, * IOException, boolean)}. */ public static final class AdLoadException extends IOException { /** * Types of ad load exceptions. One of {@link #TYPE_AD}, {@link #TYPE_AD_GROUP}, {@link * #TYPE_ALL_ADS} or {@link #TYPE_UNEXPECTED}. */ @Documented @Retention(RetentionPolicy.SOURCE) @IntDef({TYPE_AD, TYPE_AD_GROUP, TYPE_ALL_ADS, TYPE_UNEXPECTED}) public @interface Type {} /** Type for when an ad failed to load. The ad will be skipped. */ public static final int TYPE_AD = 0; /** Type for when an ad group failed to load. The ad group will be skipped. */ public static final int TYPE_AD_GROUP = 1; /** Type for when all ad groups failed to load. All ads will be skipped. */ public static final int TYPE_ALL_ADS = 2; /** Type for when an unexpected error occurred while loading ads. All ads will be skipped. */ public static final int TYPE_UNEXPECTED = 3; /** Returns a new ad load exception of {@link #TYPE_AD}. */ public static AdLoadException createForAd(Exception error) { return new AdLoadException(TYPE_AD, error); } /** Returns a new ad load exception of {@link #TYPE_AD_GROUP}. */ public static AdLoadException createForAdGroup(Exception error, int adGroupIndex) { return new AdLoadException( TYPE_AD_GROUP, new IOException("Failed to load ad group " + adGroupIndex, error)); } /** Returns a new ad load exception of {@link #TYPE_ALL_ADS}. */ public static AdLoadException createForAllAds(Exception error) { return new AdLoadException(TYPE_ALL_ADS, error); } /** Returns a new ad load exception of {@link #TYPE_UNEXPECTED}. */ public static AdLoadException createForUnexpected(RuntimeException error) { return new AdLoadException(TYPE_UNEXPECTED, error); } /** The {@link Type} of the ad load exception. */ public final @Type int type; private AdLoadException(@Type int type, Exception cause) { super(cause); this.type = type; } /** * Returns the {@link RuntimeException} that caused the exception if its type is {@link * #TYPE_UNEXPECTED}. */ public RuntimeException getRuntimeExceptionForUnexpected() { Assertions.checkState(type == TYPE_UNEXPECTED); return (RuntimeException) getCause(); } } // Used to identify the content "child" source for CompositeMediaSource. private static final MediaPeriodId DUMMY_CONTENT_MEDIA_PERIOD_ID = new MediaPeriodId(/* periodUid= */ new Object()); private final MediaSource contentMediaSource; private final MediaSourceFactory adMediaSourceFactory; private final AdsLoader adsLoader; private final AdsLoader.AdViewProvider adViewProvider; private final Handler mainHandler; private final Map<MediaSource, List<DeferredMediaPeriod>> deferredMediaPeriodByAdMediaSource; private final Timeline.Period period; // Accessed on the player thread. private ComponentListener componentListener; private Timeline contentTimeline; private Object contentManifest; private AdPlaybackState adPlaybackState; private MediaSource[][] adGroupMediaSources; private Timeline[][] adGroupTimelines; /** * Constructs a new source that inserts ads linearly with the content specified by {@code * contentMediaSource}. Ad media is loaded using {@link ProgressiveMediaSource}. * * @param contentMediaSource The {@link MediaSource} providing the content to play. * @param dataSourceFactory Factory for data sources used to load ad media. * @param adsLoader The loader for ads. * @param adViewProvider Provider of views for the ad UI. */ public AdsMediaSource( MediaSource contentMediaSource, DataSource.Factory dataSourceFactory, AdsLoader adsLoader, AdsLoader.AdViewProvider adViewProvider) { this( contentMediaSource, new ProgressiveMediaSource.Factory(dataSourceFactory), adsLoader, adViewProvider); } /** * Constructs a new source that inserts ads linearly with the content specified by {@code * contentMediaSource}. * * @param contentMediaSource The {@link MediaSource} providing the content to play. * @param adMediaSourceFactory Factory for media sources used to load ad media. * @param adsLoader The loader for ads. * @param adViewProvider Provider of views for the ad UI. */ public AdsMediaSource( MediaSource contentMediaSource, MediaSourceFactory adMediaSourceFactory, AdsLoader adsLoader, AdsLoader.AdViewProvider adViewProvider) { this.contentMediaSource = contentMediaSource; this.adMediaSourceFactory = adMediaSourceFactory; this.adsLoader = adsLoader; this.adViewProvider = adViewProvider; mainHandler = new Handler(Looper.getMainLooper()); deferredMediaPeriodByAdMediaSource = new HashMap<>(); period = new Timeline.Period(); adGroupMediaSources = new MediaSource[0][]; adGroupTimelines = new Timeline[0][]; adsLoader.setSupportedContentTypes(adMediaSourceFactory.getSupportedTypes()); } @Override @Nullable public Object getTag() { return contentMediaSource.getTag(); } @Override public void prepareSourceInternal(@Nullable TransferListener mediaTransferListener) { super.prepareSourceInternal(mediaTransferListener); ComponentListener componentListener = new ComponentListener(); this.componentListener = componentListener; prepareChildSource(DUMMY_CONTENT_MEDIA_PERIOD_ID, contentMediaSource); mainHandler.post(() -> adsLoader.start(componentListener, adViewProvider)); } @Override public MediaPeriod createPeriod(MediaPeriodId id, Allocator allocator, long startPositionUs) { if (adPlaybackState.adGroupCount > 0 && id.isAd()) { int adGroupIndex = id.adGroupIndex; int adIndexInAdGroup = id.adIndexInAdGroup; Uri adUri = adPlaybackState.adGroups[adGroupIndex].uris[adIndexInAdGroup]; if (adGroupMediaSources[adGroupIndex].length <= adIndexInAdGroup) { MediaSource adMediaSource = adMediaSourceFactory.createMediaSource(adUri); int oldAdCount = adGroupMediaSources[adGroupIndex].length; if (adIndexInAdGroup >= oldAdCount) { int adCount = adIndexInAdGroup + 1; adGroupMediaSources[adGroupIndex] = Arrays.copyOf(adGroupMediaSources[adGroupIndex], adCount); adGroupTimelines[adGroupIndex] = Arrays.copyOf(adGroupTimelines[adGroupIndex], adCount); } adGroupMediaSources[adGroupIndex][adIndexInAdGroup] = adMediaSource; deferredMediaPeriodByAdMediaSource.put(adMediaSource, new ArrayList<>()); prepareChildSource(id, adMediaSource); } MediaSource mediaSource = adGroupMediaSources[adGroupIndex][adIndexInAdGroup]; DeferredMediaPeriod deferredMediaPeriod = new DeferredMediaPeriod(mediaSource, id, allocator, startPositionUs); deferredMediaPeriod.setPrepareErrorListener( new AdPrepareErrorListener(adUri, adGroupIndex, adIndexInAdGroup)); List<DeferredMediaPeriod> mediaPeriods = deferredMediaPeriodByAdMediaSource.get(mediaSource); if (mediaPeriods == null) { Object periodUid = adGroupTimelines[adGroupIndex][adIndexInAdGroup].getUidOfPeriod(/* periodIndex= */ 0); MediaPeriodId adSourceMediaPeriodId = new MediaPeriodId(periodUid, id.windowSequenceNumber); deferredMediaPeriod.createPeriod(adSourceMediaPeriodId); } else { // Keep track of the deferred media period so it can be populated with the real media period // when the source's info becomes available. mediaPeriods.add(deferredMediaPeriod); } return deferredMediaPeriod; } else { DeferredMediaPeriod mediaPeriod = new DeferredMediaPeriod(contentMediaSource, id, allocator, startPositionUs); mediaPeriod.createPeriod(id); return mediaPeriod; } } @Override public void releasePeriod(MediaPeriod mediaPeriod) { DeferredMediaPeriod deferredMediaPeriod = (DeferredMediaPeriod) mediaPeriod; List<DeferredMediaPeriod> mediaPeriods = deferredMediaPeriodByAdMediaSource.get(deferredMediaPeriod.mediaSource); if (mediaPeriods != null) { mediaPeriods.remove(deferredMediaPeriod); } deferredMediaPeriod.releasePeriod(); } @Override public void releaseSourceInternal() { super.releaseSourceInternal(); componentListener.release(); componentListener = null; deferredMediaPeriodByAdMediaSource.clear(); contentTimeline = null; contentManifest = null; adPlaybackState = null; adGroupMediaSources = new MediaSource[0][]; adGroupTimelines = new Timeline[0][]; mainHandler.post(adsLoader::stop); } @Override protected void onChildSourceInfoRefreshed( MediaPeriodId mediaPeriodId, MediaSource mediaSource, Timeline timeline, @Nullable Object manifest) { if (mediaPeriodId.isAd()) { int adGroupIndex = mediaPeriodId.adGroupIndex; int adIndexInAdGroup = mediaPeriodId.adIndexInAdGroup; onAdSourceInfoRefreshed(mediaSource, adGroupIndex, adIndexInAdGroup, timeline); } else { onContentSourceInfoRefreshed(timeline, manifest); } } @Override protected @Nullable MediaPeriodId getMediaPeriodIdForChildMediaPeriodId( MediaPeriodId childId, MediaPeriodId mediaPeriodId) { // The child id for the content period is just DUMMY_CONTENT_MEDIA_PERIOD_ID. That's why we need // to forward the reported mediaPeriodId in this case. return childId.isAd() ? childId : mediaPeriodId; } // Internal methods. private void onAdPlaybackState(AdPlaybackState adPlaybackState) { if (this.adPlaybackState == null) { adGroupMediaSources = new MediaSource[adPlaybackState.adGroupCount][]; Arrays.fill(adGroupMediaSources, new MediaSource[0]); adGroupTimelines = new Timeline[adPlaybackState.adGroupCount][]; Arrays.fill(adGroupTimelines, new Timeline[0]); } this.adPlaybackState = adPlaybackState; maybeUpdateSourceInfo(); } private void onContentSourceInfoRefreshed(Timeline timeline, Object manifest) { Assertions.checkArgument(timeline.getPeriodCount() == 1); contentTimeline = timeline; contentManifest = manifest; maybeUpdateSourceInfo(); } private void onAdSourceInfoRefreshed(MediaSource mediaSource, int adGroupIndex, int adIndexInAdGroup, Timeline timeline) { Assertions.checkArgument(timeline.getPeriodCount() == 1); adGroupTimelines[adGroupIndex][adIndexInAdGroup] = timeline; List<DeferredMediaPeriod> mediaPeriods = deferredMediaPeriodByAdMediaSource.remove(mediaSource); if (mediaPeriods != null) { Object periodUid = timeline.getUidOfPeriod(/* periodIndex= */ 0); for (int i = 0; i < mediaPeriods.size(); i++) { DeferredMediaPeriod mediaPeriod = mediaPeriods.get(i); MediaPeriodId adSourceMediaPeriodId = new MediaPeriodId(periodUid, mediaPeriod.id.windowSequenceNumber); mediaPeriod.createPeriod(adSourceMediaPeriodId); } } maybeUpdateSourceInfo(); } private void maybeUpdateSourceInfo() { if (adPlaybackState != null && contentTimeline != null) { adPlaybackState = adPlaybackState.withAdDurationsUs(getAdDurations(adGroupTimelines, period)); Timeline timeline = adPlaybackState.adGroupCount == 0 ? contentTimeline : new SinglePeriodAdTimeline(contentTimeline, adPlaybackState); refreshSourceInfo(timeline, contentManifest); } } private static long[][] getAdDurations(Timeline[][] adTimelines, Timeline.Period period) { long[][] adDurations = new long[adTimelines.length][]; for (int i = 0; i < adTimelines.length; i++) { adDurations[i] = new long[adTimelines[i].length]; for (int j = 0; j < adTimelines[i].length; j++) { adDurations[i][j] = adTimelines[i][j] == null ? C.TIME_UNSET : adTimelines[i][j].getPeriod(/* periodIndex= */ 0, period).getDurationUs(); } } return adDurations; } /** Listener for component events. All methods are called on the main thread. */ private final class ComponentListener implements AdsLoader.EventListener { private final Handler playerHandler; private volatile boolean released; /** * Creates new listener which forwards ad playback states on the creating thread and all other * events on the external event listener thread. */ public ComponentListener() { playerHandler = new Handler(); } /** Releases the component listener. */ public void release() { released = true; playerHandler.removeCallbacksAndMessages(null); } @Override public void onAdPlaybackState(final AdPlaybackState adPlaybackState) { if (released) { return; } playerHandler.post( () -> { if (released) { return; } AdsMediaSource.this.onAdPlaybackState(adPlaybackState); }); } @Override public void onAdLoadError(final AdLoadException error, DataSpec dataSpec) { if (released) { return; } createEventDispatcher(/* mediaPeriodId= */ null) .loadError( dataSpec, dataSpec.uri, /* responseHeaders= */ Collections.emptyMap(), C.DATA_TYPE_AD, C.TRACK_TYPE_UNKNOWN, /* loadDurationMs= */ 0, /* bytesLoaded= */ 0, error, /* wasCanceled= */ true); } } private final class AdPrepareErrorListener implements DeferredMediaPeriod.PrepareErrorListener { private final Uri adUri; private final int adGroupIndex; private final int adIndexInAdGroup; public AdPrepareErrorListener(Uri adUri, int adGroupIndex, int adIndexInAdGroup) { this.adUri = adUri; this.adGroupIndex = adGroupIndex; this.adIndexInAdGroup = adIndexInAdGroup; } @Override public void onPrepareError(MediaPeriodId mediaPeriodId, final IOException exception) { createEventDispatcher(mediaPeriodId) .loadError( new DataSpec(adUri), adUri, /* responseHeaders= */ Collections.emptyMap(), C.DATA_TYPE_AD, C.TRACK_TYPE_UNKNOWN, /* loadDurationMs= */ 0, /* bytesLoaded= */ 0, AdLoadException.createForAd(exception), /* wasCanceled= */ true); mainHandler.post( () -> adsLoader.handlePrepareError(adGroupIndex, adIndexInAdGroup, exception)); } } }
/* Implements the GroupClient Interface */ import java.util.*; import java.io.*; // include crypto librarys needed import javax.crypto.*; import javax.crypto.spec.IvParameterSpec; import java.security.*; import java.security.spec.InvalidKeySpecException; import java.security.spec.PKCS8EncodedKeySpec; import java.security.spec.X509EncodedKeySpec; import javax.crypto.spec.SecretKeySpec; import java.util.logging.Level; import java.util.logging.Logger; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.jcajce.provider.digest.SHA3.DigestSHA3; import org.bouncycastle.util.encoders.Hex; // add pemobject and pemreader for parsing public/private keys import org.bouncycastle.util.io.pem.PemObject; import org.bouncycastle.util.io.pem.PemReader; public class GroupClient extends Client implements GroupClientInterface { DataLogger gLog = new DataLogger("Group"); StringWriter stackTraceString = new StringWriter(); PrintWriter stackTrace = new PrintWriter(stackTraceString); private SecretKey sessionConfidentialityAESKey = null, sessionIntegrityAESKey = null; private SecureRandom sr = null; public Hashtable<String, ArrayList<String>> encKeyChainTable = null; public Hashtable<String, ArrayList<String>> intKeyChainTable = null; int counter; public GroupClient() { // tell Client's connect and disconnect where to log in case of error super.log = new DataLogger("Group"); } public synchronized boolean authenticateUser(String username) { Security.addProvider(new BouncyCastleProvider()); Envelope message = null, response = null; message = new Envelope("AUTH1"); try { message.addObject(username); //Add user name string output.writeObject(message); //Get the response from the server response = (Envelope) input.readObject(); } catch (Exception e) { return false; } // we got stage 1 of the T1 protocol if (response.getMessage().equals("1")) { byte[] randomChallenge = null; randomChallenge = Base64.getDecoder().decode(response.getBytes1()); byte[] aesChalEncrypt = null; aesChalEncrypt = response.getBytes2(); byte[] aesInteEncrypt = null; aesInteEncrypt = response.getBytes3(); try { // open pem file for our private key and the group servers public key PemReader userPrivPemReader = new PemReader(new InputStreamReader(new FileInputStream("MyKeys/user-" + username + "-privK.pem"))); PemObject userPrivPemObj = userPrivPemReader.readPemObject(); byte[] privateBytes = userPrivPemObj.getContent(); PKCS8EncodedKeySpec privEncoded = new PKCS8EncodedKeySpec(privateBytes); KeyFactory daFactory = KeyFactory.getInstance("RSA"); PrivateKey privK = daFactory.generatePrivate(privEncoded); userPrivPemReader.close(); // decrypt the aesSessionKey Cipher cipher = Cipher.getInstance("RSA", "BC"); cipher.init(Cipher.DECRYPT_MODE, privK); byte[] decryptedKeyBytes = cipher.doFinal(Base64.getDecoder().decode(aesChalEncrypt)); byte[] decryptedKeyBytes2 = cipher.doFinal(Base64.getDecoder().decode(aesInteEncrypt)); byte[] aesKeyBytes = new byte[32]; byte[] aesIntBytes = new byte[32]; System.arraycopy(decryptedKeyBytes, 0, aesKeyBytes, 0, aesKeyBytes.length); System.arraycopy(decryptedKeyBytes2, 0, aesIntBytes, 0, aesIntBytes.length); sessionConfidentialityAESKey = new SecretKeySpec(aesKeyBytes, 0, aesKeyBytes.length, "AES"); sessionIntegrityAESKey = new SecretKeySpec(aesIntBytes, 0, aesIntBytes.length, "AES"); // create sha-256 hash of key bytes + integrity +challenge bytes byte[] concat = new byte[84]; byte[] encodedSesAes = sessionConfidentialityAESKey.getEncoded(); byte[] encodedIntegrity = sessionIntegrityAESKey.getEncoded(); System.arraycopy(encodedSesAes, 0, concat, 0, encodedSesAes.length);//copying the encoded key, and the challenge into a concat byte array System.arraycopy(encodedIntegrity, 0, concat, encodedSesAes.length, encodedIntegrity.length); System.arraycopy(randomChallenge, 0, concat, (encodedSesAes.length + encodedIntegrity.length), randomChallenge.length); MessageDigest theDigester = MessageDigest.getInstance("SHA-256", "BC"); theDigester.update(concat); byte[] hashToSend = theDigester.digest(); // create a second random challenge SecureRandom rand = new SecureRandom(); byte[] randomSent = new byte[20];//160 bit random challenge. rand.nextBytes(randomSent); byte[] encryptedRandom = null; // encrypt it with the group servers public key try { PemReader daPubPemReader = new PemReader(new InputStreamReader(new FileInputStream("MyKeys/gs-pubK.pem"))); PemObject daPubPemObj = daPubPemReader.readPemObject(); byte[] publicBytes = daPubPemObj.getContent(); X509EncodedKeySpec pubEncoded = new X509EncodedKeySpec(publicBytes); KeyFactory daFactory2 = KeyFactory.getInstance("RSA"); PublicKey pubK = daFactory2.generatePublic(pubEncoded); daPubPemReader.close(); cipher = Cipher.getInstance("RSA", "BC"); cipher.init(Cipher.ENCRYPT_MODE, pubK); encryptedRandom = cipher.doFinal(randomSent); } catch (Exception e) { System.out.println("Failed to load the group server's public key from MyKeys directory"); return false; } message = new Envelope("AUTH2"); message.addBytes1(Base64.getEncoder().encode(hashToSend)); message.addBytes2(Base64.getEncoder().encode(encryptedRandom)); output.writeObject(message); //Get the response from the server response = (Envelope) input.readObject(); // if random challenge back is the same, its the server if (response.getMessage().equals("2")) { byte[] randomBack = Base64.getDecoder().decode(response.getBytes1()); if (Arrays.equals(randomSent, randomBack)) // sub-string to cut block padding { System.out.println("Authentication Successful!"); sr = new SecureRandom(); counter = 0; return true; } } } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); System.out.println("You must have your private key and the group servers public key in MyKeys"); } } else if (response.getMessage().equals("NOKEY")) { System.out.println("FAILED! Group Server does not have your public key"); } return false; } public synchronized UserToken getToken(String username, String wantedIP, String wantedPort) { try { UserToken token = null; Envelope message = null, response = null; //Tell the server to return a token. message = new Envelope("GET"); message.addObject(username); //Add user name string message.addObject(wantedIP); //Add user name string message.addObject(wantedPort); //Add user name string encrypt(message, output); //Get the response from the server response = (Envelope) decrypt(input); //Successful response if (response.getMessage().equals("OK")) { //If there is a token in the Envelope, return it ArrayList<Object> temp = null; temp = response.getObjContents(); if (temp.size() == 1) { token = (UserToken) temp.get(0); return token; } } return null; } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); return null; } } // used to get the required keychains a user will need to upload/download files @SuppressWarnings("unchecked") public synchronized void setKeyChainTables(String username, UserToken token) { try { Envelope message = null, response = null; //Tell the server to return a token. message = new Envelope("KCHAIN"); message.addObject(username); //Add user name string message.addObject(token); //Add users token encrypt(message, output); //Get the response from the server response = (Envelope) decrypt(input); //Successful response if (response.getMessage().equals("OK")) { // we got a hashmap containing all keychains for all groups we are in // they only have keys for blocks the user is allowed to access encKeyChainTable = (Hashtable<String, ArrayList<String>>) response.getObjContents().get(0); intKeyChainTable = (Hashtable<String, ArrayList<String>>) response.getObjContents().get(0); System.out.println("encr kchain's size is: " + encKeyChainTable.size()); System.out.println("int kchain's size is: " + encKeyChainTable.size()); return; } else if (response.getMessage().equals("MODIFIED")) { System.out.println("INVALID! Token tampering detected! Get a correct token and reconnect."); } } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); } } public boolean createUser(String username, UserToken token) { try { Envelope message = null, response = null; //Tell the server to create a user message = new Envelope("CUSER"); message.addObject(username); //Add user name string message.addObject(token); //Add the requester's token encrypt(message, output); response = (Envelope) decrypt(input); //If server indicates success, return true if (response.getMessage().equals("OK")) { return true; } else if (response.getMessage().equals("MODIFIED")) { System.out.println("INVALID! Token tampering detected! Get a correct token and reconnect."); } return false; } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); return false; } } public boolean deleteUser(String username, UserToken token) { try { Envelope message = null, response = null; //Tell the server to delete a user message = new Envelope("DUSER"); message.addObject(username); //Add user name message.addObject(token); //Add requester's token encrypt(message, output); response = (Envelope) decrypt(input); //If server indicates success, return true if (response.getMessage().equals("OK")) { return true; } else if (response.getMessage().equals("MODIFIED")) { System.out.println("INVALID! Token tampering detected! Get a correct token and reconnect."); } return false; } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); return false; } } public boolean createGroup(String groupname, UserToken token) { try { Envelope message = null, response = null; //Tell the server to create a group message = new Envelope("CGROUP"); message.addObject(groupname); //Add the group name string message.addObject(token); //Add the requester's token encrypt(message, output); output.flush(); response = (Envelope) decrypt(input); //If server indicates success, return true if (response.getMessage().equals("OK")) { return true; } else if (response.getMessage().equals("MODIFIED")) { System.out.println("INVALID! Token tampering detected! Get a correct token and reconnect."); } return false; } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); return false; } } public boolean deleteGroup(String groupname, UserToken token) { try { Envelope message = null, response = null; //Tell the server to delete a group message = new Envelope("DGROUP"); message.addObject(groupname); //Add group name string message.addObject(token); //Add requester's token encrypt(message, output); response = (Envelope) decrypt(input); //If server indicates success, return true if (response.getMessage().equals("OK")) { return true; } else if (response.getMessage().equals("MODIFIED")) { System.out.println("INVALID! Token tampering detected! Get a correct token and reconnect."); } return false; } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); return false; } } @SuppressWarnings("unchecked") public List<String> listMembers(String group, UserToken token) { try { Envelope message = null, response = null; //Tell the server to return the member list message = new Envelope("LMEMBERS"); message.addObject(group); //Add group name string message.addObject(token); //Add requester's token encrypt(message, output); response = (Envelope) decrypt(input); //If server indicates success, return the member list if (response.getMessage().equals("OK")) { return (List<String>) response.getObjContents().get(0); //This cast creates compiler warnings. Sorry. } else if (response.getMessage().equals("MODIFIED")) { System.out.println("INVALID! Token tampering detected! Get a correct token and reconnect."); } return null; } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); return null; } } public boolean addUserToGroup(String username, String groupname, UserToken token) { try { Envelope message = null, response = null; //Tell the server to add a user to the group message = new Envelope("AUSERTOGROUP"); message.addObject(username); //Add user name string message.addObject(groupname); //Add group name string message.addObject(token); //Add requester's token encrypt(message, output); response = (Envelope) decrypt(input); System.out.println("The response message:" + response.getMessage()); //If server indicates success, return true if (response.getMessage().equals("OK")) { return true; } else if (response.getMessage().equals("MODIFIED")) { System.out.println("INVALID! Token tampering detected! Get a correct token and reconnect."); } return false; } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); return false; } } public boolean deleteUserFromGroup(String username, String groupname, UserToken token) { try { Envelope message = null, response = null; //Tell the server to remove a user from the group message = new Envelope("RUSERFROMGROUP"); message.addObject(username); //Add user name string message.addObject(groupname); //Add group name string message.addObject(token); //Add requester's token encrypt(message, output); response = (Envelope) decrypt(input); //If server indicates success, return true if (response.getMessage().equals("OK")) { return true; } else if (response.getMessage().equals("MODIFIED")) { System.out.println("INVALID! Token tampering detected! Get a correct token and reconnect."); } return false; } catch (Exception e) { e.printStackTrace(new PrintWriter(stackTrace)); gLog.write("err", e.getMessage(), stackTraceString.toString()); return false; } } private void encrypt(Envelope Object, ObjectOutputStream output) { try { ByteArrayOutputStream boutput = new ByteArrayOutputStream(); ObjectOutputStream oStream = new ObjectOutputStream(boutput); Object.addCounter(counter); counter++; oStream.writeObject(Object); byte[] byteValue = boutput.toByteArray(); oStream.close(); byte[] IV = new byte[16]; sr.nextBytes(IV); IvParameterSpec spec = new IvParameterSpec(IV); Cipher aes = Cipher.getInstance("AES/CBC/PKCS7PADDING", new BouncyCastleProvider()); aes.init(Cipher.ENCRYPT_MODE, sessionConfidentialityAESKey, spec); Envelope temp = new Envelope("Encrypted"); Mac mac = Mac.getInstance("HmacSHA256", new BouncyCastleProvider()); mac.init(sessionIntegrityAESKey); mac.update(byteValue); temp.addObject(new String(Base64.getEncoder().encode(IV))); temp.addBytes1(Base64.getEncoder().encode(mac.doFinal()));//put the HMAC here. temp.addBytes2(Base64.getEncoder().encode(aes.doFinal(byteValue))); output.writeObject(temp); } catch (NoSuchAlgorithmException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (NoSuchPaddingException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (InvalidKeyException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (InvalidAlgorithmParameterException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (IllegalBlockSizeException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (BadPaddingException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } } private Object decrypt(ObjectInputStream input) { try { Envelope env = (Envelope) input.readObject(); IvParameterSpec spec = new IvParameterSpec(Base64.getDecoder().decode(((String) env.getObjContents().get(0)).getBytes())); Cipher aes = Cipher.getInstance("AES/CBC/PKCS7PADDING", new BouncyCastleProvider()); aes.init(Cipher.DECRYPT_MODE, sessionConfidentialityAESKey, spec); byte[] temp = aes.doFinal(Base64.getDecoder().decode(env.getBytes2())); byte[] passedHMAC = Base64.getDecoder().decode(env.getBytes1()); ByteArrayInputStream in = new ByteArrayInputStream(temp); ObjectInputStream is = new ObjectInputStream(in); env = (Envelope) is.readObject(); int counter1 = env.getCounter(); if (counter1 != counter) { System.out.println("Error: detected tampering in the message received, counter didn't match."); sock.close(); System.exit(0); } counter++; Mac mac = Mac.getInstance("HmacSHA256", new BouncyCastleProvider()); mac.init(sessionIntegrityAESKey); //calculate the string to be used to calculate this HMAC, then compare. mac.update(temp); byte[] calculatedHMAC = mac.doFinal(); if (!Arrays.equals(passedHMAC, calculatedHMAC)) { System.out.println("Error: detected tampering in the message received, HMAC didn't match."); sock.close(); System.exit(0); } return env; } catch (IOException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (ClassNotFoundException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (IllegalBlockSizeException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (BadPaddingException ex) { Logger.getLogger(FileClient.class.getName()).log(Level.SEVERE, null, ex); } catch (NoSuchAlgorithmException ex) { Logger.getLogger(GroupClient.class.getName()).log(Level.SEVERE, null, ex); } catch (NoSuchPaddingException ex) { Logger.getLogger(GroupClient.class.getName()).log(Level.SEVERE, null, ex); } catch (InvalidKeyException ex) { Logger.getLogger(GroupClient.class.getName()).log(Level.SEVERE, null, ex); } catch (InvalidAlgorithmParameterException ex) { Logger.getLogger(GroupClient.class.getName()).log(Level.SEVERE, null, ex); } return null; } }
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; import java.util.LinkedHashMap; import java.util.Map; import javax.xml.bind.annotation.XmlAnyAttribute; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.namespace.QName; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.codehaus.jackson.annotate.JsonAnyGetter; import org.codehaus.jackson.annotate.JsonAnySetter; /** * Representation of a column family schema. * * <pre> * &lt;complexType name="ColumnSchema"&gt; * &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt; * &lt;anyAttribute&gt;&lt;/anyAttribute&gt; * &lt;/complexType&gt; * </pre> */ @XmlRootElement(name="ColumnSchema") @InterfaceAudience.Private public class ColumnSchemaModel implements Serializable { private static final long serialVersionUID = 1L; private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE); private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE); private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER); private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION); private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY); private static QName TTL = new QName(HColumnDescriptor.TTL); private static QName VERSIONS = new QName(HConstants.VERSIONS); private String name; private Map<QName,Object> attrs = new LinkedHashMap<>(); /** * Default constructor */ public ColumnSchemaModel() {} /** * Add an attribute to the column family schema * @param name the attribute name * @param value the attribute value */ @JsonAnySetter public void addAttribute(String name, Object value) { attrs.put(new QName(name), value); } /** * @param name the attribute name * @return the attribute value */ public String getAttribute(String name) { Object o = attrs.get(new QName(name)); return o != null ? o.toString(): null; } /** * @return the column name */ @XmlAttribute public String getName() { return name; } /** * @return the map for holding unspecified (user) attributes */ @XmlAnyAttribute @JsonAnyGetter public Map<QName,Object> getAny() { return attrs; } /** * @param name the table name */ public void setName(String name) { this.name = name; } /* (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ NAME => '"); sb.append(name); sb.append('\''); for (Map.Entry<QName,Object> e: attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); sb.append(e.getValue().toString()); sb.append('\''); } sb.append(" }"); return sb.toString(); } // getters and setters for common schema attributes // cannot be standard bean type getters and setters, otherwise this would // confuse JAXB /** * @return true if the BLOCKCACHE attribute is present and true */ public boolean __getBlockcache() { Object o = attrs.get(BLOCKCACHE); return o != null ? Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE; } /** * @return the value of the BLOCKSIZE attribute or its default if it is unset */ public int __getBlocksize() { Object o = attrs.get(BLOCKSIZE); return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE; } /** * @return the value of the BLOOMFILTER attribute or its default if unset */ public String __getBloomfilter() { Object o = attrs.get(BLOOMFILTER); return o != null ? o.toString() : HColumnDescriptor.DEFAULT_BLOOMFILTER; } /** * @return the value of the COMPRESSION attribute or its default if unset */ public String __getCompression() { Object o = attrs.get(COMPRESSION); return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION; } /** * @return true if the IN_MEMORY attribute is present and true */ public boolean __getInMemory() { Object o = attrs.get(IN_MEMORY); return o != null ? Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY; } /** * @return the value of the TTL attribute or its default if it is unset */ public int __getTTL() { Object o = attrs.get(TTL); return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_TTL; } /** * @return the value of the VERSIONS attribute or its default if it is unset */ public int __getVersions() { Object o = attrs.get(VERSIONS); return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS; } /** * @param value the desired value of the BLOCKSIZE attribute */ public void __setBlocksize(int value) { attrs.put(BLOCKSIZE, Integer.toString(value)); } /** * @param value the desired value of the BLOCKCACHE attribute */ public void __setBlockcache(boolean value) { attrs.put(BLOCKCACHE, Boolean.toString(value)); } public void __setBloomfilter(String value) { attrs.put(BLOOMFILTER, value); } /** * @param value the desired value of the COMPRESSION attribute */ public void __setCompression(String value) { attrs.put(COMPRESSION, value); } /** * @param value the desired value of the IN_MEMORY attribute */ public void __setInMemory(boolean value) { attrs.put(IN_MEMORY, Boolean.toString(value)); } /** * @param value the desired value of the TTL attribute */ public void __setTTL(int value) { attrs.put(TTL, Integer.toString(value)); } /** * @param value the desired value of the VERSIONS attribute */ public void __setVersions(int value) { attrs.put(VERSIONS, Integer.toString(value)); } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.hyracks.maven.license; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.StringWriter; import java.io.Writer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Comparator; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; import java.util.function.BiConsumer; import java.util.function.Predicate; import java.util.function.UnaryOperator; import java.util.jar.JarEntry; import java.util.jar.JarFile; import java.util.regex.Pattern; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SequenceWriter; import freemarker.cache.FileTemplateLoader; import freemarker.template.Configuration; import freemarker.template.Template; import freemarker.template.TemplateException; import org.apache.commons.io.IOUtils; import org.apache.hyracks.maven.license.freemarker.IndentDirective; import org.apache.hyracks.maven.license.freemarker.LoadFileDirective; import org.apache.hyracks.maven.license.project.LicensedProjects; import org.apache.hyracks.maven.license.project.Project; import org.apache.maven.plugin.MojoExecutionException; import org.apache.maven.plugin.MojoFailureException; import org.apache.maven.plugins.annotations.Mojo; import org.apache.maven.plugins.annotations.Parameter; import org.apache.maven.plugins.annotations.ResolutionScope; import org.apache.maven.project.ProjectBuildingException; @Mojo(name = "generate", requiresProject = true, requiresDependencyResolution = ResolutionScope.TEST) public class GenerateFileMojo extends LicenseMojo { public static final Pattern FOUNDATION_PATTERN = Pattern.compile("^\\s*This product includes software developed " + "(at|by) The Apache Software Foundation \\(http://www.apache.org/\\).\\s*$".replace(" ", "\\s+"), Pattern.DOTALL | Pattern.MULTILINE); public static final Comparator<String> WHITESPACE_NORMALIZED_COMPARATOR = (o1, o2) -> o1.replaceAll("\\s+", " ").compareTo(o2.replaceAll("\\s+", " ")); @Parameter(required = true) private File templateRootDir; @Parameter(defaultValue = "${project.build.directory}/generated-sources") private File outputDir; @Parameter private List<GeneratedFile> generatedFiles = new ArrayList<>(); @Parameter(defaultValue = "${project.build.sourceEncoding}") private String encoding; @Parameter private File licenseMapOutputFile; @Parameter private List<ExtraLicenseFile> extraLicenseMaps = new ArrayList<>(); @Parameter protected Map<String, String> templateProperties = new HashMap<>(); @Parameter private boolean stripFoundationAssertionFromNotices = true; private SortedMap<String, SortedSet<Project>> noticeMap; @java.lang.Override public void execute() throws MojoExecutionException, MojoFailureException { try { init(); readExtraMaps(); addDependenciesToLicenseMap(); resolveLicenseContent(); resolveNoticeFiles(); resolveLicenseFiles(); rebuildLicenseContentProjectMap(); combineCommonGavs(); SourcePointerResolver.execute(this); persistLicenseMap(); buildNoticeProjectMap(); generateFiles(); } catch (IOException | TemplateException | ProjectBuildingException e) { throw new MojoExecutionException("Unexpected exception: " + e, e); } } private void resolveLicenseContent() throws IOException { Set<LicenseSpec> licenseSpecs = new HashSet<>(); for (LicensedProjects licensedProjects : licenseMap.values()) { licenseSpecs.add(licensedProjects.getLicense()); } licenseSpecs.addAll(urlToLicenseMap.values()); for (LicenseSpec license : licenseSpecs) { resolveLicenseContent(license, true); } } private String resolveLicenseContent(LicenseSpec license, boolean bestEffort) throws IOException { if (license.getContent() == null) { getLog().debug("Resolving content for " + license.getUrl() + " (" + license.getContentFile() + ")"); File cFile = new File(license.getContentFile()); if (!cFile.isAbsolute()) { cFile = new File(licenseDirectory, license.getContentFile()); } if (!cFile.exists()) { if (!bestEffort) { getLog().warn("MISSING: license content file (" + cFile + ") for url: " + license.getUrl()); license.setContent("MISSING: " + license.getContentFile() + " (" + license.getUrl() + ")"); } } else { getLog().info("Reading license content from file: " + cFile); StringWriter sw = new StringWriter(); LicenseUtil.readAndTrim(sw, cFile); license.setContent(sw.toString()); } } return license.getContent(); } private void combineCommonGavs() { for (LicensedProjects licensedProjects : licenseMap.values()) { Map<String, Project> projectMap = new HashMap<>(); for (Iterator<Project> iter = licensedProjects.getProjects().iterator(); iter.hasNext(); ) { Project project = iter.next(); if (projectMap.containsKey(project.gav())) { Project first = projectMap.get(project.gav()); first.setLocation(first.getLocation() + "," + project.getLocation()); iter.remove(); } else { projectMap.put(project.gav(), project); } } } } private void generateFiles() throws TemplateException, IOException { Map<String, Object> props = getProperties(); Configuration config = new Configuration(); config.setTemplateLoader(new FileTemplateLoader(templateRootDir)); for (GeneratedFile generation : generatedFiles) { Template template = config.getTemplate(generation.getTemplate(), StandardCharsets.UTF_8.name()); if (template == null) { throw new IOException("Could not load template " + generation.getTemplate()); } outputDir.mkdirs(); final File file = new File(outputDir, generation.getOutputFile()); getLog().info("Writing " + file + "..."); try (final FileOutputStream fos = new FileOutputStream(file); final Writer writer = new OutputStreamWriter(fos, StandardCharsets.UTF_8)) { template.process(props, writer); } } } protected Map<String, Object> getProperties() { Map<String, Object> props = new HashMap<>(); props.put("indent", new IndentDirective()); props.put("loadfile", new LoadFileDirective()); props.put("project", project); props.put("noticeMap", noticeMap.entrySet()); props.put("licenseMap", licenseMap.entrySet()); props.put("licenses", urlToLicenseMap.values()); props.putAll(templateProperties); return props; } private void readExtraMaps() throws IOException { final ObjectMapper objectMapper = new ObjectMapper(); for (ExtraLicenseFile extraLicenseFile : extraLicenseMaps) { for (LicensedProjects projects : objectMapper.readValue(extraLicenseFile.getFile(), LicensedProjects[].class)) { LicenseSpec spec = urlToLicenseMap.get(projects.getLicense().getUrl()); if (spec != null) { // TODO(mblow): probably we should always favor the extra map... // propagate any license content we may have with what already has been loaded if (projects.getLicense().getContent() != null && spec.getContent() == null) { spec.setContent(projects.getLicense().getContent()); } // propagate any license displayName we may have with what already has been loaded if (projects.getLicense().getDisplayName() != null && spec.getDisplayName() == null) { spec.setDisplayName(projects.getLicense().getDisplayName()); } } for (Project p : projects.getProjects()) { p.setLocation(extraLicenseFile.getLocation()); addProject(p, projects.getLicense(), extraLicenseFile.isAdditive()); } } } } private void persistLicenseMap() throws IOException { if (licenseMapOutputFile != null) { licenseMapOutputFile.getParentFile().mkdirs(); SequenceWriter sw = new ObjectMapper().writerWithDefaultPrettyPrinter() .writeValues(licenseMapOutputFile).init(true); for (LicensedProjects entry : licenseMap.values()) { sw.write(entry); } sw.close(); } } private void rebuildLicenseContentProjectMap() throws IOException { int counter = 0; Map<String, LicensedProjects> licenseMap2 = new TreeMap<>(WHITESPACE_NORMALIZED_COMPARATOR); for (LicensedProjects lps : licenseMap.values()) { for (Project p : lps.getProjects()) { String licenseText = p.getLicenseText(); if (licenseText == null) { getLog().warn("Using license other than from within artifact: " + p.gav()); licenseText = resolveLicenseContent(lps.getLicense(), false); } LicenseSpec spec = lps.getLicense(); if (spec.getDisplayName() == null) { LicenseSpec canonicalLicense = urlToLicenseMap.get(spec.getUrl()); if (canonicalLicense != null) { spec.setDisplayName(canonicalLicense.getDisplayName()); } } if (!licenseMap2.containsKey(licenseText)) { if (!licenseText.equals(lps.getLicense().getContent())) { spec = new LicenseSpec(new ArrayList<>(), licenseText, null, spec.getDisplayName(), spec.getMetric(), spec.getUrl() + (counter++)); } licenseMap2.put(licenseText, new LicensedProjects(spec)); } final LicensedProjects lp2 = licenseMap2.get(licenseText); if (lp2.getLicense().getDisplayName() == null) { lp2.getLicense().setDisplayName(lps.getLicense().getDisplayName()); } lp2.addProject(p); } } licenseMap = licenseMap2; } private Set<Project> getProjects() { Set<Project> projects = new HashSet<>(); licenseMap.values().forEach(p -> projects.addAll(p.getProjects())); return projects; } private void buildNoticeProjectMap() { noticeMap = new TreeMap<>(WHITESPACE_NORMALIZED_COMPARATOR); for (Project p : getProjects()) { prependSourcePointerToNotice(p); final String noticeText = p.getNoticeText(); if (noticeText == null) { continue; } if (!noticeMap.containsKey(noticeText)) { noticeMap.put(noticeText, new TreeSet<>(Project.PROJECT_COMPARATOR)); } noticeMap.get(noticeText).add(p); } } private void prependSourcePointerToNotice(Project project) { if (project.getSourcePointer() != null) { String notice = project.getSourcePointer().replace("\n", "\n "); if (project.getNoticeText() != null) { notice += "\n\n" + project.getNoticeText(); } project.setNoticeText(notice); } } private void resolveNoticeFiles() throws MojoExecutionException, IOException { resolveArtifactFiles("NOTICE", entry -> entry.getName().matches("(.*/|^)" + "NOTICE" + "(.txt)?"), Project::setNoticeText, text -> stripFoundationAssertionFromNotices ? FOUNDATION_PATTERN.matcher(text).replaceAll("") : text); } private void resolveLicenseFiles() throws MojoExecutionException, IOException { resolveArtifactFiles("LICENSE", entry -> entry.getName().matches("(.*/|^)" + "LICENSE" + "(.txt)?"), Project::setLicenseText, UnaryOperator.identity()); } private void resolveArtifactFiles(final String name, Predicate<JarEntry> filter, BiConsumer<Project, String> consumer, UnaryOperator<String> contentTransformer) throws MojoExecutionException, IOException { for (Project p : getProjects()) { File artifactFile = new File(p.getArtifactPath()); if (!artifactFile.exists()) { throw new MojoExecutionException("Artifact file " + artifactFile + " does not exist!"); } else if (!artifactFile.getName().endsWith(".jar")) { getLog().info("Skipping unknown artifact file type: " + artifactFile); continue; } try (JarFile jarFile = new JarFile(artifactFile)) { SortedMap<String, JarEntry> matches = gatherMatchingEntries(jarFile, filter); if (matches.isEmpty()) { getLog().warn("No " + name + " file found for " + p.gav()); } else { if (matches.size() > 1) { getLog().warn("Multiple " + name + " files found for " + p.gav() + ": " + matches.keySet() + "; taking first"); } else { getLog().info(p.gav() + " has " + name + " file: " + matches.keySet()); } resolveContent(p, jarFile, matches.values().iterator().next(), contentTransformer, consumer, name); } } } } private void resolveContent(Project project, JarFile jarFile, JarEntry entry, UnaryOperator<String> transformer, BiConsumer<Project, String> contentConsumer, final String name) throws IOException { String text = IOUtils.toString(jarFile.getInputStream(entry), StandardCharsets.UTF_8); text = transformer.apply(text); text = LicenseUtil.trim(text); if (text.length() == 0) { getLog().warn("Ignoring empty " + name + " file ( " + entry + ") for " + project.gav()); } else { contentConsumer.accept(project, text); getLog().debug("Resolved " + name + " text for " + project.gav() + ": \n" + text); } } private SortedMap<String, JarEntry> gatherMatchingEntries(JarFile jarFile, Predicate<JarEntry> filter) { SortedMap<String, JarEntry> matches = new TreeMap<>(); Enumeration<JarEntry> entries = jarFile.entries(); while (entries.hasMoreElements()) { JarEntry entry = entries.nextElement(); if (filter.test(entry)) { matches.put(entry.getName(), entry); } } return matches; } }
/* * Copyright (c) 2011-2022 VMware, Inc. or its affiliates, All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package reactor.netty.tcp; import java.io.IOException; import java.lang.ref.WeakReference; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.nio.charset.Charset; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Properties; import java.util.Set; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Supplier; import io.netty.bootstrap.Bootstrap; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelOption; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.unix.DomainSocketAddress; import io.netty.handler.codec.LineBasedFrameDecoder; import io.netty.resolver.AddressResolverGroup; import io.netty.resolver.DefaultAddressResolverGroup; import io.netty.util.AttributeKey; import io.netty.util.NetUtil; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.core.scheduler.Schedulers; import reactor.netty.Connection; import reactor.netty.DisposableServer; import reactor.netty.NettyOutbound; import reactor.netty.SocketUtils; import reactor.netty.channel.AbortedException; import reactor.netty.channel.ChannelOperations; import reactor.netty.resources.ConnectionProvider; import reactor.netty.resources.LoopResources; import reactor.netty.transport.NameResolverProvider; import reactor.test.StepVerifier; import reactor.util.Logger; import reactor.util.Loggers; import reactor.util.retry.Retry; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.assertj.core.api.Assumptions.assumeThat; /** * @author Stephane Maldini * @since 2.5 */ public class TcpClientTests { static final Logger log = Loggers.getLogger(TcpClientTests.class); private final ExecutorService threadPool = Executors.newCachedThreadPool(); int echoServerPort; EchoServer echoServer; Future<?> echoServerFuture; int abortServerPort; ConnectionAbortServer abortServer; Future<?> abortServerFuture; int timeoutServerPort; ConnectionTimeoutServer timeoutServer; Future<?> timeoutServerFuture; int heartbeatServerPort; HeartbeatServer heartbeatServer; Future<?> heartbeatServerFuture; @BeforeEach void setup() throws Exception { echoServerPort = SocketUtils.findAvailableTcpPort(); echoServer = new EchoServer(echoServerPort); echoServerFuture = threadPool.submit(echoServer); if (!echoServer.await(10, TimeUnit.SECONDS)) { throw new IOException("fail to start test server"); } abortServerPort = SocketUtils.findAvailableTcpPort(); abortServer = new ConnectionAbortServer(abortServerPort); abortServerFuture = threadPool.submit(abortServer); if (!abortServer.await(10, TimeUnit.SECONDS)) { throw new IOException("fail to start test server"); } timeoutServerPort = SocketUtils.findAvailableTcpPort(); timeoutServer = new ConnectionTimeoutServer(timeoutServerPort); timeoutServerFuture = threadPool.submit(timeoutServer); if (!timeoutServer.await(10, TimeUnit.SECONDS)) { throw new IOException("fail to start test server"); } heartbeatServerPort = SocketUtils.findAvailableTcpPort(); heartbeatServer = new HeartbeatServer(heartbeatServerPort); heartbeatServerFuture = threadPool.submit(heartbeatServer); if (!heartbeatServer.await(10, TimeUnit.SECONDS)) { throw new IOException("fail to start test server"); } } @AfterEach void cleanup() throws Exception { echoServer.close(); abortServer.close(); timeoutServer.close(); heartbeatServer.close(); assertThat(echoServerFuture.get()).isNull(); assertThat(abortServerFuture.get()).isNull(); assertThat(timeoutServerFuture.get()).isNull(); assertThat(heartbeatServerFuture.get()).isNull(); threadPool.shutdown(); threadPool.awaitTermination(5, TimeUnit.SECONDS); Thread.sleep(500); } @Test void disableSsl() { TcpClient secureClient = TcpClient.create() .secure(); assertThat(secureClient.configuration().isSecure()).isTrue(); assertThat(secureClient.noSSL().configuration().isSecure()).isFalse(); } @Test void testTcpClient() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); Connection client = TcpClient.create() .host("localhost") .port(echoServerPort) .handle((in, out) -> { in.receive() .log("conn") .subscribe(s -> latch.countDown()); return out.sendString(Flux.just("Hello World!")) .neverComplete(); }) .wiretap(true) .connectNow(); assertThat(latch.await(30, TimeUnit.SECONDS)).as("latch await").isTrue(); client.disposeNow(); } @Test void testTcpClient1ThreadAcquire() { LoopResources resources = LoopResources.create("test", 1, true); Connection client = TcpClient.create() .host("localhost") .port(echoServerPort) .runOn(resources) .wiretap(true) .connectNow(); client.disposeNow(); resources.dispose(); assertThat(client).as("client was configured").isInstanceOf(ChannelOperations.class); } @Test void testTcpClientWithInetSocketAddress() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); TcpClient client = TcpClient.create().port(echoServerPort); Connection s = client.handle((in, out) -> { in.receive() .subscribe(d -> latch.countDown()); return out.sendString(Flux.just("Hello")) .neverComplete(); }) .wiretap(true) .connectNow(Duration.ofSeconds(5)); assertThat(latch.await(5, TimeUnit.SECONDS)).as("latch await").isTrue(); s.disposeNow(); } @Test void tcpClientHandlesLineFeedData() throws InterruptedException { final int messages = 100; final CountDownLatch latch = new CountDownLatch(messages); final List<String> strings = new ArrayList<>(); Connection client = TcpClient.create() .host("localhost") .port(echoServerPort) .doOnConnected(c -> c.addHandlerLast("codec", new LineBasedFrameDecoder(8 * 1024))) .handle((in, out) -> out.sendString(Flux.range(1, messages) .map(i -> "Hello World!" + i + "\n") .subscribeOn(Schedulers.parallel())) .then(in.receive() .asString() .take(100) .flatMapIterable(s -> Arrays.asList(s.split("\\n"))) .doOnNext(s -> { strings.add(s); latch.countDown(); }) .then())) .wiretap(true) .connectNow(Duration.ofSeconds(15)); assertThat(latch.await(15, TimeUnit.SECONDS)) .as("Expected messages not received. Received " + strings.size() + " messages: " + strings) .isTrue(); assertThat(strings).hasSize(messages); client.disposeNow(); } @Test void tcpClientHandlesLineFeedDataFixedPool() throws InterruptedException { Consumer<? super Connection> channelInit = c -> c.addHandlerLast("codec", new LineBasedFrameDecoder(8 * 1024)); ConnectionProvider p = ConnectionProvider.newConnection(); tcpClientHandlesLineFeedData( TcpClient.create(p) .host("localhost") .port(echoServerPort) .doOnConnected(channelInit)); } @Test void tcpClientHandlesLineFeedDataElasticPool() throws InterruptedException { Consumer<? super Connection> channelInit = c -> c.addHandlerLast("codec", new LineBasedFrameDecoder(8 * 1024)); tcpClientHandlesLineFeedData( TcpClient.create(ConnectionProvider.create("tcpClientHandlesLineFeedDataElasticPool", Integer.MAX_VALUE)) .host("localhost") .port(echoServerPort) .doOnConnected(channelInit)); } private void tcpClientHandlesLineFeedData(TcpClient client) throws InterruptedException { final int messages = 100; final CountDownLatch latch = new CountDownLatch(messages); final List<String> strings = new ArrayList<>(); Connection c = client.handle((in, out) -> out.sendString(Flux.range(1, messages) .map(i -> "Hello World!" + i + "\n") .subscribeOn(Schedulers.parallel())) .then(in.receive() .asString() .take(100) .flatMapIterable(s -> Arrays.asList(s.split("\\n"))) .doOnNext(s -> { strings.add(s); latch.countDown(); }).then())) .wiretap(true) .connectNow(Duration.ofSeconds(30)); log.debug("Connected"); c.onDispose() .log() .block(Duration.ofSeconds(30)); assertThat(latch.await(15, TimeUnit.SECONDS)) .as("Expected messages not received. Received " + strings.size() + " messages: " + strings) .isTrue(); assertThat(strings).hasSize(messages); } @Test void closingPromiseIsFulfilled() { TcpClient client = TcpClient.newConnection() .host("localhost") .port(abortServerPort); client.handle((in, out) -> Mono.empty()) .wiretap(true) .connectNow() .disposeNow(); } /*Check in details*/ private void connectionWillRetryConnectionAttemptWhenItFails(TcpClient client) throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicLong totalDelay = new AtomicLong(); client.handle((in, out) -> Mono.never()) .wiretap(true) .connect() .retryWhen(Retry.from(errors -> errors.flatMap(attempt -> { switch ((int) attempt.totalRetries()) { case 0: totalDelay.addAndGet(100); return Mono.delay(Duration.ofMillis(100)); case 1: totalDelay.addAndGet(500); return Mono.delay(Duration.ofMillis(500)); case 2: totalDelay.addAndGet(1000); return Mono.delay(Duration.ofSeconds(1)); default: latch.countDown(); return Mono.<Long>empty(); } }))) .subscribe(System.out::println); assertThat(latch.await(15, TimeUnit.SECONDS)).as("latch await").isTrue(); assertThat(totalDelay.get()).as("totalDelay was >1.6s").isGreaterThanOrEqualTo(1600L); } /*Check in details*/ @Test void connectionWillRetryConnectionAttemptWhenItFailsElastic() throws InterruptedException { connectionWillRetryConnectionAttemptWhenItFails( TcpClient.create() .host("localhost") .port(abortServerPort + 3) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 100)); } //see https://github.com/reactor/reactor-netty/issues/289 @Test void connectionWillRetryConnectionAttemptWhenItFailsFixedChannelPool() throws InterruptedException { connectionWillRetryConnectionAttemptWhenItFails( TcpClient.create(ConnectionProvider.create("connectionWillRetryConnectionAttemptWhenItFailsFixedChannelPool", 1)) .host("localhost") .port(abortServerPort + 3) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 100)); } @Test void connectionWillAttemptToReconnectWhenItIsDropped() throws InterruptedException { final CountDownLatch connectionLatch = new CountDownLatch(1); final CountDownLatch reconnectionLatch = new CountDownLatch(1); try { TcpClient tcpClient = TcpClient.newConnection() .host("localhost") .port(abortServerPort); Mono<? extends Connection> handler = tcpClient.handle((in, out) -> { log.debug("Start"); connectionLatch.countDown(); in.receive().subscribe(); return Flux.never(); }) .wiretap(true) .connect(); Connection c = handler.log() .then(handler.doOnSuccess(s -> reconnectionLatch.countDown())) .block(Duration.ofSeconds(30)); assertThat(c).isNotNull(); c.onDispose(); assertThat(connectionLatch.await(5, TimeUnit.SECONDS)).as("Initial connection is made").isTrue(); assertThat(reconnectionLatch.await(5, TimeUnit.SECONDS)).as("A reconnect attempt was made").isTrue(); } catch (AbortedException e) { // ignored } } @Test void testCancelSend() throws InterruptedException { final CountDownLatch connectionLatch = new CountDownLatch(3); TcpClient tcpClient = TcpClient.newConnection() .host("localhost") .port(echoServerPort); Connection c; c = tcpClient.handle((i, o) -> { o.sendObject(Mono.never() .doOnCancel(connectionLatch::countDown) .log("uno")) .then() .subscribe() .dispose(); Schedulers.parallel() .schedule(() -> o.sendObject(Mono.never() .doOnCancel(connectionLatch::countDown) .log("dos")) .then() .subscribe() .dispose()); o.sendObject(Mono.never() .doOnCancel(connectionLatch::countDown) .log("tres")) .then() .subscribe() .dispose(); return Mono.never(); }) .connectNow(); assertThat(connectionLatch.await(30, TimeUnit.SECONDS)).as("Cancel not propagated").isTrue(); c.disposeNow(); } @Test void consumerSpecAssignsEventHandlers() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(2); final CountDownLatch close = new CountDownLatch(1); final AtomicLong totalDelay = new AtomicLong(); final long start = System.currentTimeMillis(); TcpClient client = TcpClient.create() .host("localhost") .port(timeoutServerPort); Connection s = client.handle((in, out) -> { in.withConnection(c -> c.onDispose(close::countDown)); out.withConnection(c -> c.onWriteIdle(200, () -> { totalDelay.addAndGet(System.currentTimeMillis() - start); latch.countDown(); })); return Mono.delay(Duration.ofSeconds(1)) .then() .log(); }) .wiretap(true) .connectNow(); assertThat(latch.await(5, TimeUnit.SECONDS)).as("latch was counted down").isTrue(); assertThat(close.await(30, TimeUnit.SECONDS)).as("close was counted down").isTrue(); assertThat(totalDelay.get()).as("totalDelay was > 200ms").isGreaterThanOrEqualTo(200L); s.disposeNow(); } @Test void readIdleDoesNotFireWhileDataIsBeingRead() throws InterruptedException, IOException { final CountDownLatch latch = new CountDownLatch(1); long start = System.currentTimeMillis(); TcpClient client = TcpClient.create() .port(heartbeatServerPort); Connection s = client.handle((in, out) -> { in.withConnection(c -> c.onReadIdle(200, latch::countDown)); return Flux.never(); }) .wiretap(true) .connectNow(); assertThat(latch.await(5, TimeUnit.SECONDS)).as("latch await").isTrue(); heartbeatServer.close(); long duration = System.currentTimeMillis() - start; assertThat(duration).isGreaterThanOrEqualTo(200L); s.disposeNow(); } @Test void writeIdleDoesNotFireWhileDataIsBeingSent() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); long start = System.currentTimeMillis(); Connection client = TcpClient.create() .host("localhost") .port(echoServerPort) .handle((in, out) -> { log.debug("hello"); out.withConnection(c -> c.onWriteIdle(500, latch::countDown)); List<Publisher<Void>> allWrites = new ArrayList<>(); for (int i = 0; i < 5; i++) { allWrites.add(out.sendString(Flux.just("a") .delayElements(Duration.ofMillis(750)))); } return Flux.merge(allWrites); }) .wiretap(true) .connectNow(); log.debug("Started"); assertThat(latch.await(5, TimeUnit.SECONDS)).as("latch await").isTrue(); long duration = System.currentTimeMillis() - start; assertThat(duration).isGreaterThanOrEqualTo(500L); client.disposeNow(); } @Test void gettingOptionsDuplicates() { TcpClient client1 = TcpClient.create(); TcpClient client2 = client1.host("example.com").port(123); assertThat(client2) .isNotSameAs(client1) .isNotSameAs(((TcpClientConnect) client2).duplicate()); } public static final class EchoServer extends CountDownLatch implements Runnable { private final int port; private final ServerSocketChannel server; private volatile Thread thread; public EchoServer(int port) { super(1); this.port = port; try { server = ServerSocketChannel.open(); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void run() { try { server.configureBlocking(true); server.socket() .bind(new InetSocketAddress(port)); countDown(); thread = Thread.currentThread(); while (true) { SocketChannel ch = server.accept(); ByteBuffer buffer = ByteBuffer.allocate(8192); while (true) { int read = ch.read(buffer); if (read > 0) { buffer.flip(); } int written = ch.write(buffer); if (written < 0) { throw new IOException("Cannot write to client"); } buffer.rewind(); } } } catch (IOException e) { // Server closed } } public void close() throws IOException { Thread thread = this.thread; if (thread != null) { thread.interrupt(); } ServerSocketChannel server = this.server; if (server != null) { server.close(); } } } private static final class ConnectionAbortServer extends CountDownLatch implements Runnable { final int port; private final ServerSocketChannel server; private ConnectionAbortServer(int port) { super(1); this.port = port; try { server = ServerSocketChannel.open(); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void run() { try { server.configureBlocking(true); server.socket() .bind(new InetSocketAddress(port)); countDown(); while (true) { SocketChannel ch = server.accept(); log.debug("ABORTING"); ch.close(); } } catch (Exception e) { Loggers.getLogger(this.getClass()).debug("", e); } } public void close() throws IOException { ServerSocketChannel server = this.server; if (server != null) { server.close(); } } } private static final class ConnectionTimeoutServer extends CountDownLatch implements Runnable { final int port; private final ServerSocketChannel server; private ConnectionTimeoutServer(int port) { super(1); this.port = port; try { server = ServerSocketChannel.open(); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void run() { try { server.configureBlocking(true); server.socket() .bind(new InetSocketAddress(port)); countDown(); while (true) { SocketChannel ch = server.accept(); ByteBuffer buff = ByteBuffer.allocate(1); ch.read(buff); } } catch (IOException e) { // ignore } } public void close() throws IOException { ServerSocketChannel server = this.server; if (server != null) { server.close(); } } } private static final class HeartbeatServer extends CountDownLatch implements Runnable { final int port; private final ServerSocketChannel server; private HeartbeatServer(int port) { super(1); this.port = port; try { server = ServerSocketChannel.open(); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void run() { try { server.configureBlocking(true); server.socket() .bind(new InetSocketAddress(port)); countDown(); while (true) { SocketChannel ch = server.accept(); while (server.isOpen()) { ByteBuffer out = ByteBuffer.allocate(1); out.put((byte) '\n'); out.flip(); ch.write(out); Thread.sleep(100); } } } catch (IOException e) { // Server closed } catch (InterruptedException ie) { // ignore } } public void close() throws IOException { ServerSocketChannel server = this.server; if (server != null) { server.close(); } } } @Test void testIssue600_1() { doTestIssue600(true); } @Test void testIssue600_2() { doTestIssue600(false); } private void doTestIssue600(boolean withLoop) { DisposableServer server = TcpServer.create() .port(0) .handle((req, res) -> res.send(req.receive() .retain() .delaySubscription(Duration.ofSeconds(1)))) .wiretap(true) .bindNow(); ConnectionProvider pool = ConnectionProvider.create("doTestIssue600", 10); LoopResources loop = LoopResources.create("test", 4, true); TcpClient client; if (withLoop) { client = TcpClient.create(pool) .remoteAddress(server::address) .runOn(loop); } else { client = TcpClient.create(pool) .remoteAddress(server::address); } Set<String> threadNames = new ConcurrentSkipListSet<>(); Flux.range(1, 4) .flatMap(i -> client.handle((in, out) -> { threadNames.add(Thread.currentThread().getName()); return out.send(Flux.empty()); }) .connect()) .as(StepVerifier::create) .expectNextCount(4) .expectComplete() .verify(Duration.ofSeconds(30)); pool.dispose(); loop.dispose(); server.disposeNow(); assertThat(threadNames.size()).isGreaterThan(1); } @Test void testRetryOnDifferentAddress() throws Exception { DisposableServer server = TcpServer.create() .port(0) .wiretap(true) .handle((req, res) -> res.sendString(Mono.just("test"))) .bindNow(); final CountDownLatch latch = new CountDownLatch(1); Supplier<SocketAddress> addressSupplier = new Supplier<SocketAddress>() { int i = 2; @Override public SocketAddress get() { return new InetSocketAddress("localhost", server.port() + i--); } }; Connection conn = TcpClient.create() .remoteAddress(addressSupplier) .doOnConnected(connection -> latch.countDown()) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 100) .handle((in, out) -> Mono.never()) .wiretap(true) .connect() .retry() .block(Duration.ofSeconds(30)); assertThat(conn).isNotNull(); assertThat(latch.await(30, TimeUnit.SECONDS)).as("latch await").isTrue(); conn.disposeNow(); server.disposeNow(); } @Test void testReconnectWhenDisconnected() throws Exception { DisposableServer server = TcpServer.create() .port(0) .wiretap(true) .handle((req, res) -> res.sendString(Mono.just("test"))) .bindNow(); final CountDownLatch latch = new CountDownLatch(1); TcpClient client = TcpClient.create() .port(echoServerPort) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 100) .handle((in, out) -> out.withConnection(Connection::dispose)) .wiretap(true); connect(client, true, latch); assertThat(latch.await(30, TimeUnit.SECONDS)).as("latch await").isTrue(); server.disposeNow(); } private void connect(TcpClient client, boolean reconnect, CountDownLatch latch) { client.connect() .subscribe( conn -> { if (reconnect) { conn.onTerminate() .subscribe(null, null, () -> connect(client, false, latch)); } }, null, latch::countDown); } @Test void testIssue585_1() throws Exception { DisposableServer server = TcpServer.create() .port(0) .handle((req, res) -> res.send(req.receive() .retain())) .wiretap(true) .bindNow(); CountDownLatch latch = new CountDownLatch(1); byte[] bytes = "test".getBytes(Charset.defaultCharset()); ByteBuf b1 = Unpooled.wrappedBuffer(bytes); ByteBuf b2 = Unpooled.wrappedBuffer(bytes); ByteBuf b3 = Unpooled.wrappedBuffer(bytes); WeakReference<ByteBuf> refCheck1 = new WeakReference<>(b1); WeakReference<ByteBuf> refCheck2 = new WeakReference<>(b2); WeakReference<ByteBuf> refCheck3 = new WeakReference<>(b3); Connection conn = TcpClient.create() .remoteAddress(server::address) .wiretap(true) .connectNow(); NettyOutbound out = conn.outbound(); Flux.concatDelayError( out.sendObject(Mono.error(new RuntimeException("test"))) .sendObject(b1) .then(), out.sendObject(Mono.error(new RuntimeException("test"))) .sendObject(b2) .then(), out.sendObject(Mono.error(new RuntimeException("test"))) .sendObject(b3) .then()) .doOnError(t -> latch.countDown()) .subscribe(conn.disposeSubscriber()); assertThat(latch.await(30, TimeUnit.SECONDS)).as("latch await").isTrue(); assertThat(b1.refCnt()).isEqualTo(0); b1 = null; checkReference(refCheck1); assertThat(b2.refCnt()).isEqualTo(0); b2 = null; checkReference(refCheck2); assertThat(b3.refCnt()).isEqualTo(0); b3 = null; checkReference(refCheck3); server.disposeNow(); conn.disposeNow(); } @Test void testIssue585_2() throws Exception { DisposableServer server = TcpServer.create() .port(0) .handle((req, res) -> res.send(req.receive() .retain())) .wiretap(true) .bindNow(); byte[] bytes = "test".getBytes(Charset.defaultCharset()); ByteBuf b1 = Unpooled.wrappedBuffer(bytes); ByteBuf b2 = Unpooled.wrappedBuffer(bytes); ByteBuf b3 = Unpooled.wrappedBuffer(bytes); WeakReference<ByteBuf> refCheck1 = new WeakReference<>(b1); WeakReference<ByteBuf> refCheck2 = new WeakReference<>(b2); WeakReference<ByteBuf> refCheck3 = new WeakReference<>(b3); Connection conn = TcpClient.create() .remoteAddress(server::address) .wiretap(true) .connectNow(); NettyOutbound out = conn.outbound(); out.sendObject(b1) .then() .block(Duration.ofSeconds(30)); assertThat(b1.refCnt()).isEqualTo(0); b1 = null; checkReference(refCheck1); out.sendObject(b2) .then() .block(Duration.ofSeconds(30)); assertThat(b2.refCnt()).isEqualTo(0); b2 = null; checkReference(refCheck2); out.sendObject(b3) .then() .block(Duration.ofSeconds(30)); assertThat(b3.refCnt()).isEqualTo(0); b3 = null; checkReference(refCheck3); server.disposeNow(); conn.disposeNow(); } private void checkReference(WeakReference<ByteBuf> ref) throws Exception { for (int i = 0; i < 10; i++) { if (ref.get() == null) { return; } System.gc(); Thread.sleep(100); } assertThat(ref.get()).isNull(); } @Test void testTcpClientWithDomainSocketsNIOTransport() { LoopResources loop = LoopResources.create("testTcpClientWithDomainSocketsNIOTransport"); try { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> TcpClient.create() .runOn(loop, false) .remoteAddress(() -> new DomainSocketAddress("/tmp/test.sock")) .connectNow()); } finally { loop.disposeLater() .block(Duration.ofSeconds(30)); } } @Test void testTcpClientWithDomainSocketsWithHost() { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> TcpClient.create() .remoteAddress(() -> new DomainSocketAddress("/tmp/test.sock")) .host("localhost") .connectNow()); } @Test void testTcpClientWithDomainSocketsWithPort() { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> TcpClient.create() .remoteAddress(() -> new DomainSocketAddress("/tmp/test.sock")) .port(1234) .connectNow()); } @Test @SuppressWarnings({"deprecation", "FutureReturnValueIgnored"}) void testBootstrapUnsupported() { assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.bind(); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.bind(NetUtil.LOCALHOST, 8000); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.bind(8000); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.bind(new InetSocketAddress("localhost", 8000)); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.bind("localhost", 8000); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> b.channel(io.netty.channel.socket.SocketChannel.class))); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(Bootstrap::clone)); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.connect(); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.connect(NetUtil.LOCALHOST, 8000); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.connect(new InetSocketAddress("localhost", 8000)); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.connect(new InetSocketAddress("localhost", 8001), new InetSocketAddress("localhost", 8002)); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.connect("localhost", 8000); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { b.equals(new Bootstrap()); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { b.hashCode(); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { // FutureReturnValueIgnored is deliberate b.register(); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(b -> { b.toString(); return b; })); assertThatExceptionOfType(UnsupportedOperationException.class) .isThrownBy(() -> TcpClient.create().bootstrap(Bootstrap::validate)); } @Test @SuppressWarnings("deprecation") void testBootstrap() { DisposableServer server = TcpServer.create() .port(0) .handle((req, res) -> res.send(req.receive() .retain())) .wiretap(true) .bindNow(); AtomicInteger invoked = new AtomicInteger(); Connection conn = TcpClient.create() .bootstrap(b -> b.attr(AttributeKey.valueOf("testBootstrap"), "testBootstrap") .group(new NioEventLoopGroup()) .option(ChannelOption.valueOf("testBootstrap"), "testBootstrap") .remoteAddress(server.address()) .resolver(DefaultAddressResolverGroup.INSTANCE) .handler(new ChannelInboundHandlerAdapter() { @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { invoked.set(1); super.channelActive(ctx); } })) .connectNow(); conn.outbound() .sendString(Mono.just("testBootstrap")) .then() .subscribe(); String result = conn.inbound() .receive() .asString() .blockFirst(); assertThat(result).isEqualTo("testBootstrap"); assertThat(invoked.get()).isEqualTo(1); conn.disposeNow(); server.disposeNow(); } @Test @SuppressWarnings("deprecation") void testAddressSupplier() { DisposableServer server = TcpServer.create() .port(0) .handle((req, res) -> res.send(req.receive() .retain())) .wiretap(true) .bindNow(); Connection conn = TcpClient.create() .addressSupplier(server::address) .connectNow(); conn.outbound() .sendString(Mono.just("testAddressSupplier")) .then() .subscribe(); String result = conn.inbound() .receive() .asString() .blockFirst(); assertThat(result).isEqualTo("testAddressSupplier"); conn.disposeNow(); server.disposeNow(); } @Test void testDefaultResolverWithCustomEventLoop() throws Exception { LoopResources loop1 = LoopResources.create("test", 1, true); NioEventLoopGroup loop2 = new NioEventLoopGroup(1); TcpClient client = TcpClient.create(); TcpClient newClient = null; try { assertThat(client.configuration().resolver()).isNull(); newClient = client.runOn(loop1); assertThat(newClient.configuration().resolver()).isNotNull(); newClient.configuration() .resolver() .getResolver(loop2.next()) .resolve(new InetSocketAddress("example.com", 443)) .addListener(f -> assertThat(Thread.currentThread().getName()).startsWith("test-")); } finally { if (newClient != null && newClient.configuration().resolver() != null) { newClient.configuration() .resolver() .close(); } loop1.disposeLater() .block(Duration.ofSeconds(10)); loop2.shutdownGracefully() .get(10, TimeUnit.SECONDS); } } @Test void testCustomLoopCustomResolver() { LoopResources loop1 = LoopResources.create("loop1", 1, true); LoopResources loop2 = LoopResources.create("loop2", 1, true); LoopResources loop3 = LoopResources.create("loop3", 1, true); TcpClient client = TcpClient.create(); try { assertThat(client.configuration().loopResources()).isSameAs(TcpResources.get()); assertThat(client.configuration().resolver()).isNull(); assertThat(client.configuration().getNameResolverProvider()).isNull(); client = client.runOn(loop1); assertThat(client.configuration().loopResources()).isSameAs(loop1); AddressResolverGroup<?> resolver1 = client.configuration().resolver(); NameResolverProvider nameResolverProvider1 = client.configuration().getNameResolverProvider(); assertThat(resolver1).isNotNull(); assertThat(nameResolverProvider1).isNotNull(); resolver1.close(); client = client.runOn(loop2); assertThat(client.configuration().loopResources()).isSameAs(loop2); AddressResolverGroup<?> resolver2 = client.configuration().resolver(); NameResolverProvider nameResolverProvider2 = client.configuration().getNameResolverProvider(); assertThat(resolver2).isNotNull().isNotSameAs(resolver1); assertThat(nameResolverProvider2).isNotNull().isSameAs(nameResolverProvider1); resolver2.close(); client = client.resolver(DefaultAddressResolverGroup.INSTANCE); assertThat(client.configuration().loopResources()).isSameAs(loop2); assertThat(client.configuration().resolver()).isSameAs(DefaultAddressResolverGroup.INSTANCE); assertThat(client.configuration().getNameResolverProvider()).isNull(); client = client.runOn(loop3); assertThat(client.configuration().loopResources()).isSameAs(loop3); assertThat(client.configuration().resolver()).isSameAs(DefaultAddressResolverGroup.INSTANCE); assertThat(client.configuration().getNameResolverProvider()).isNull(); } finally { loop1.disposeLater() .block(Duration.ofSeconds(5)); loop2.disposeLater() .block(Duration.ofSeconds(5)); loop3.disposeLater() .block(Duration.ofSeconds(5)); } } @Test public void testSharedNameResolver_SharedClientWithConnectionPool() throws InterruptedException { doTestSharedNameResolver(TcpClient.create(), true); } @Test public void testSharedNameResolver_SharedClientNoConnectionPool() throws InterruptedException { doTestSharedNameResolver(TcpClient.newConnection(), true); } @Test public void testSharedNameResolver_NotSharedClientWithConnectionPool() throws InterruptedException { doTestSharedNameResolver(TcpClient.create(), false); } @Test public void testSharedNameResolver_NotSharedClientNoConnectionPool() throws InterruptedException { doTestSharedNameResolver(TcpClient.newConnection(), false); } private void doTestSharedNameResolver(TcpClient client, boolean sharedClient) throws InterruptedException { DisposableServer disposableServer = TcpServer.create() .port(0) .handle((req, res) -> res.sendString(Mono.just("testNoOpenedFileDescriptors"))) .bindNow(Duration.ofSeconds(30)); LoopResources loop = LoopResources.create("doTestSharedNameResolver", 4, true); AtomicReference<List<AddressResolverGroup<?>>> resolvers = new AtomicReference<>(new ArrayList<>()); try { int count = 8; CountDownLatch latch = new CountDownLatch(count); TcpClient localClient = null; if (sharedClient) { localClient = client.runOn(loop) .port(disposableServer.port()) .doOnConnect(config -> resolvers.get().add(config.resolver())) .doOnConnected(conn -> conn.onDispose(latch::countDown)); } for (int i = 0; i < count; i++) { if (!sharedClient) { localClient = client.runOn(loop) .port(disposableServer.port()) .doOnConnect(config -> resolvers.get().add(config.resolver())) .doOnConnected(conn -> conn.onDispose(latch::countDown)); } localClient.handle((in, out) -> in.receive().then()) .connect() .subscribe(); } assertThat(latch.await(30, TimeUnit.SECONDS)).isTrue(); assertThat(resolvers.get().size()).isEqualTo(count); AddressResolverGroup<?> resolver = resolvers.get().get(0); assertThat(resolvers.get()).allMatch(addressResolverGroup -> addressResolverGroup == resolver); } finally { disposableServer.disposeNow(); loop.disposeLater() .block(); } } /* https://github.com/reactor/reactor-netty/issues/1765 */ @Test void noSystemProxySettings() { Properties props = System.getProperties(); assumeThat(!(props.containsKey("http.proxyHost") || props.containsKey("https.proxyHost") || props.containsKey("socksProxyHost"))).isTrue(); DisposableServer disposableServer = TcpServer.create() .port(0) .handle((req, res) -> res.sendString(Mono.just("noSystemProxySettings"))) .bindNow(); AtomicReference<AddressResolverGroup<?>> resolver = new AtomicReference<>(); Connection conn = null; try { conn = TcpClient.create() .host("localhost") .port(disposableServer.port()) .proxyWithSystemProperties() .doOnConnect(conf -> resolver.set(conf.resolver())) .connectNow(); } finally { disposableServer.disposeNow(); if (conn != null) { conn.disposeNow(); } } assertThat(resolver.get()).isNull(); } }
/* * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.pivotal.gemfirexd.internal.engine.hadoop.mapred; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; import java.io.IOException; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.lib.CombineFileSplit; import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl; import com.gemstone.gemfire.internal.FileUtil; import com.pivotal.gemfirexd.FabricServiceManager; import com.pivotal.gemfirexd.TestUtil; import com.pivotal.gemfirexd.callbacks.Event.Type; import com.pivotal.gemfirexd.hadoop.mapred.Key; import com.pivotal.gemfirexd.hadoop.mapred.Row; import com.pivotal.gemfirexd.hadoop.mapred.RowInputFormat; import com.pivotal.gemfirexd.jdbc.JdbcTestBase; public class EventInputFormatTest extends JdbcTestBase { String HDFS_DIR = "./myhdfs"; public void testEventInputFormat() throws Exception { getConnection(); Connection conn = startNetserverAndGetLocalNetConnection(); Statement st = conn.createStatement(); st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + HDFS_DIR + "' batchtimeinterval 5000 milliseconds"); st.execute("create table app.mytab1 (col1 int primary key, col2 varchar(100)) persistent hdfsstore (myhdfs) BUCKETS 1"); PreparedStatement ps = conn.prepareStatement("insert into mytab1 values (?, ?)"); int NUM_ENTRIES = 20; for(int i = 0; i < NUM_ENTRIES; i++) { ps.setInt(1, i); ps.setString(2, "Value-" + System.nanoTime()); ps.execute(); } //Wait for data to get to HDFS... String qname = HDFSStoreFactoryImpl.getEventQueueName("/APP/MYTAB1"); st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); FileStatus[] list = fs.listStatus(new Path(HDFS_DIR + "/APP_MYTAB1/0/")); assertEquals(1, list.length); conf.set(RowInputFormat.INPUT_TABLE, "MYTAB1"); conf.set(RowInputFormat.HOME_DIR, HDFS_DIR); JobConf job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); RowInputFormat ipformat = new RowInputFormat(); InputSplit[] splits = ipformat.getSplits(job, 2); assertEquals(1, splits.length); CombineFileSplit split = (CombineFileSplit) splits[0]; assertEquals(1, split.getPaths().length); assertEquals(list[0].getPath().toString(), split.getPath(0).toString()); assertEquals(0, split.getOffset(0)); assertEquals(list[0].getLen(), split.getLength(0)); RecordReader<Key, Row> rr = ipformat.getRecordReader(split, job, null); Key key = rr.createKey(); Row value = rr.createValue(); int count = 0; while (rr.next(key, value)) { assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(20, count); TestUtil.shutDown(); } public void testNoSecureHdfsCheck() throws Exception { getConnection(); Connection conn = startNetserverAndGetLocalNetConnection(); Statement st = conn.createStatement(); st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + HDFS_DIR + "' batchtimeinterval 5000 milliseconds"); st.execute("create table app.mytab1 (col1 int primary key, col2 varchar(100)) persistent hdfsstore (myhdfs) BUCKETS 1"); PreparedStatement ps = conn.prepareStatement("insert into mytab1 values (?, ?)"); int NUM_ENTRIES = 20; for(int i = 0; i < NUM_ENTRIES; i++) { ps.setInt(1, i); ps.setString(2, "Value-" + System.nanoTime()); ps.execute(); } //Wait for data to get to HDFS... String qname = HDFSStoreFactoryImpl.getEventQueueName("/APP/MYTAB1"); st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); stopNetServer(); FabricServiceManager.currentFabricServiceInstance().stop(new Properties()); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); FileStatus[] list = fs.listStatus(new Path(HDFS_DIR + "/APP_MYTAB1/0/")); assertEquals(1, list.length); conf.set(RowInputFormat.INPUT_TABLE, "MYTAB1"); conf.set(RowInputFormat.HOME_DIR, HDFS_DIR); conf.set("hadoop.security.authentication", "kerberos"); JobConf job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); RowInputFormat ipformat = new RowInputFormat(); InputSplit[] splits = ipformat.getSplits(job, 2); assertEquals(1, splits.length); CombineFileSplit split = (CombineFileSplit) splits[0]; assertEquals(1, split.getPaths().length); assertEquals(list[0].getPath().toString(), split.getPath(0).toString()); assertEquals(0, split.getOffset(0)); assertEquals(list[0].getLen(), split.getLength(0)); RecordReader<Key, Row> rr = ipformat.getRecordReader(split, job, null); Key key = rr.createKey(); Row value = rr.createValue(); int count = 0; while (rr.next(key, value)) { assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(20, count); TestUtil.shutDown(); } public void testNBuckets1Split() throws Exception { getConnection(); Connection conn = startNetserverAndGetLocalNetConnection(); Statement st = conn.createStatement(); st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + HDFS_DIR + "' batchtimeinterval 5000 milliseconds"); st.execute("create table app.mytab1 (col1 int primary key, col2 varchar(100)) persistent hdfsstore (myhdfs) BUCKETS 5"); PreparedStatement ps = conn.prepareStatement("insert into mytab1 values (?, ?)"); int NUM_ENTRIES = 20; for(int i = 0; i < NUM_ENTRIES; i++) { ps.setInt(1, i); ps.setString(2, "Value-" + System.nanoTime()); ps.execute(); } //Wait for data to get to HDFS... String qname = HDFSStoreFactoryImpl.getEventQueueName("/APP/MYTAB1"); st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); FileStatus[] list = fs.listStatus(new Path(HDFS_DIR + "/APP_MYTAB1/")); assertEquals(5, list.length); int hopCount = 0; for (FileStatus bucket : list) { FileStatus[] hops = fs.listStatus(bucket.getPath()); hopCount += hops.length; } assertEquals(5, hopCount); conf.set(RowInputFormat.INPUT_TABLE, "MYTAB1"); conf.set(RowInputFormat.HOME_DIR, HDFS_DIR); JobConf job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); RowInputFormat ipformat = new RowInputFormat(); InputSplit[] splits = ipformat.getSplits(job, 2); assertEquals(1, splits.length); CombineFileSplit split = (CombineFileSplit) splits[0]; assertEquals(hopCount, split.getPaths().length); RecordReader<Key, Row> rr = ipformat.getRecordReader(split, job, null); Key key = rr.createKey(); Row value = rr.createValue(); int[] check = new int[NUM_ENTRIES]; for (int i : check) { check[i] = 0; } while (rr.next(key, value)) { int index = value.getRowAsResultSet().getInt("col1"); check[index]++; } for (int i : check) { assertEquals(check[i], 1); } TestUtil.shutDown(); } public void testTimeFilters() throws Exception { getConnection(); Connection conn = startNetserverAndGetLocalNetConnection(); Statement st = conn.createStatement(); st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + HDFS_DIR + "' batchtimeinterval 5000 milliseconds"); st.execute("create table app.mytab1 (col1 int primary key, col2 varchar(100)) persistent hdfsstore (myhdfs) BUCKETS 1"); PreparedStatement ps = conn.prepareStatement("insert into mytab1 values (?, ?)"); int NUM_ENTRIES = 10; for(int i = 0; i < NUM_ENTRIES; i++) { ps.setInt(1, i); ps.setString(2, "Value-" + System.nanoTime()); ps.execute(); TimeUnit.MILLISECONDS.sleep(10); } //Wait for data to get to HDFS... String qname = HDFSStoreFactoryImpl.getEventQueueName("/APP/MYTAB1"); st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); FileStatus[] list = fs.listStatus(new Path(HDFS_DIR + "/APP_MYTAB1/0/")); assertEquals(1, list.length); conf.set(RowInputFormat.INPUT_TABLE, "MYTAB1"); conf.set(RowInputFormat.HOME_DIR, HDFS_DIR); JobConf job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); RowInputFormat ipformat = new RowInputFormat(); InputSplit[] splits = ipformat.getSplits(job, 2); assertEquals(1, splits.length); RecordReader<Key, Row> rr = ipformat.getRecordReader(splits[0], job, null); Key key = rr.createKey(); Row value = rr.createValue(); int count = 0; long ts0 = 0; // timestamp of first event long ts3 = 0; long ts5 = 0; long ts9 = 0; // timestamp of last event while (rr.next(key, value)) { switch (count) { case 0: ts0 = value.getTimestamp(); break; case 3: ts3 = value.getTimestamp(); break; case 5: ts5 = value.getTimestamp(); break; case 9: ts9 = value.getTimestamp(); break; } assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(10, count); assertTrue(ts0 > 0); assertTrue(ts3 > 0); assertTrue(ts5 > 0); assertTrue(ts9 > 0); job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); job.setLong(RowInputFormat.START_TIME_MILLIS, ts0 - 1); rr = ipformat.getRecordReader(splits[0], job, null); key = rr.createKey(); value = rr.createValue(); count = 0; while (rr.next(key, value)) { assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(10, count); job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); job.setLong(RowInputFormat.START_TIME_MILLIS, ts3); rr = ipformat.getRecordReader(splits[0], job, null); key = rr.createKey(); value = rr.createValue(); count = 3; while (rr.next(key, value)) { assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(10, count); job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); job.setLong(RowInputFormat.START_TIME_MILLIS, ts9); rr = ipformat.getRecordReader(splits[0], job, null); key = rr.createKey(); value = rr.createValue(); count = 9; while (rr.next(key, value)) { assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(10, count); job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); job.setLong(RowInputFormat.START_TIME_MILLIS, ts9 + 1); rr = ipformat.getRecordReader(splits[0], job, null); key = rr.createKey(); value = rr.createValue(); while (rr.next(key, value)) { fail(); } job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); job.setLong(RowInputFormat.END_TIME_MILLIS, ts9); rr = ipformat.getRecordReader(splits[0], job, null); key = rr.createKey(); value = rr.createValue(); count = 0; while (rr.next(key, value)) { assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(10, count); job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); job.setLong(RowInputFormat.END_TIME_MILLIS, ts5 - 1); rr = ipformat.getRecordReader(splits[0], job, null); key = rr.createKey(); value = rr.createValue(); count = 0; while (rr.next(key, value)) { assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(5, count); job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); job.setLong(RowInputFormat.START_TIME_MILLIS, ts3); job.setLong(RowInputFormat.END_TIME_MILLIS, ts5); rr = ipformat.getRecordReader(splits[0], job, null); key = rr.createKey(); value = rr.createValue(); count = 3; while (rr.next(key, value)) { assertEquals(count++, value.getRowAsResultSet().getInt("col1")); } assertEquals(6, count); TestUtil.shutDown(); } public void testRowSerDe() throws Exception { doTestRowSerDe(true); } public void testRowSerDeNoConcurrencyChecks() throws Exception { doTestRowSerDe(false); } private void doTestRowSerDe(boolean concurrencyChecks) throws Exception { getConnection(); Connection conn = startNetserverAndGetLocalNetConnection(); final long statTS = System.currentTimeMillis(); Statement st = conn.createStatement(); st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + HDFS_DIR + "' batchtimeinterval 5000 milliseconds"); String concurrency = "persistent ENABLE CONCURRENCY CHECKS"; st.execute("create table app.mytab1 (col1 int primary key, col2 varchar(100)) partition by primary key buckets 1 hdfsstore (myhdfs) " +(concurrencyChecks ? concurrency : "")); PreparedStatement ps = conn.prepareStatement("insert into mytab1 values (?, ?)"); ps.setInt(1, 1); ps.setString(2, "Value-1"); ps.execute(); //Wait for data to get to HDFS... String qname = HDFSStoreFactoryImpl.getEventQueueName("/APP/MYTAB1"); st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); FileStatus[] list = fs.listStatus(new Path(HDFS_DIR + "/APP_MYTAB1/0/")); assertEquals(1, list.length); conf.set(RowInputFormat.INPUT_TABLE, "MYTAB1"); conf.set(RowInputFormat.HOME_DIR, HDFS_DIR); JobConf job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); RowInputFormat ipformat = new RowInputFormat(); InputSplit[] splits = ipformat.getSplits(job, 2); assertEquals(1, splits.length); RecordReader<Key, Row> rr = ipformat.getRecordReader(splits[0], job, null); Key key = rr.createKey(); Row value = rr.createValue(); assertTrue(rr.next(key, value)); assertEquals(1, value.getRowAsResultSet().getInt(1)); assertEquals("Value-1", value.getRowAsResultSet().getString(2)); assertTrue(value.getTimestamp() > statTS); assertFalse(value.getRowAsResultSet().next()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); value.write(dos); dos.close(); byte[] buf = baos.toByteArray(); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(buf)); Row row = new Row(); row.readFields(dis); dis.close(); assertEquals(1, row.getRowAsResultSet().getInt(1)); assertEquals("Value-1", row.getRowAsResultSet().getString(2)); assertFalse(value.getRowAsResultSet().next()); TestUtil.shutDown(); } public void testDeleteForRW() throws Exception { deleteTest(false, true); } public void testDeleteForWriteOnly() throws Exception { deleteTest(true, true); } public void testDeleteForWriteOnly_transactional() throws Exception { deleteTest(true, true, true); } public void testDeleteForRWNoPk() throws Exception { deleteTest(false, false); } public void testDeleteForWriteOnlyNoPK() throws Exception { deleteTest(true, false); } public void testDeleteForWriteOnlyNoPK_transactional() throws Exception { deleteTest(true, false, true); } private void deleteTest( boolean writeOnly, boolean primaryKey) throws Exception { deleteTest(writeOnly, primaryKey, false); } public static Connection getTxConnection() throws SQLException { Connection conn = getConnection(); conn.setAutoCommit(true); conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); return conn; } private void deleteTest( boolean writeOnly, boolean primaryKey, boolean isTransactional) throws Exception { //getConnection(); Connection conn = null; if (isTransactional) { conn = getTxConnection();//startNetserverAndGetLocalNetConnection(); } else { conn = getConnection(); } Statement st = conn.createStatement(); st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + HDFS_DIR + "' batchtimeinterval 2000 milliseconds"); String primaryKeyString = primaryKey ? "primary key" : ""; st.execute("create table app.mytab1 (col1 int " + primaryKeyString + ", col2 varchar(100)) BUCKETS 1 persistent hdfsstore (myhdfs) " + (writeOnly? " WRITEONLY " : "")); PreparedStatement ps = conn.prepareStatement("insert into mytab1 values (?, ?)"); for(int i = 0; i < 3; i++) { ps.setInt(1, i); ps.setString(2, "Value-" + System.nanoTime()); ps.execute(); } st.execute("delete from mytab1 where col1 = 1"); //Wait for data to get to HDFS... String qname = HDFSStoreFactoryImpl.getEventQueueName("/APP/MYTAB1"); st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); TestUtil.shutDown(); FileStatus[] list = null; Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); for (int i = 0; i < 20; i++) { list = fs.listStatus(new Path(HDFS_DIR + "/APP_MYTAB1/0/")); if (list.length == 1) { break; } Thread.sleep(500); } if (list.length != 1) { fail("unexpected files: " + java.util.Arrays.toString(list)); } conf.set(RowInputFormat.INPUT_TABLE, "MYTAB1"); conf.set(RowInputFormat.HOME_DIR, HDFS_DIR); JobConf job = new JobConf(conf); job.setBoolean(RowInputFormat.CHECKPOINT_MODE, false); RowInputFormat ipformat = new RowInputFormat(); InputSplit[] splits = ipformat.getSplits(job, 2); RecordReader<Key, Row> rr = ipformat.getRecordReader(splits[0], job, null); Key key = rr.createKey(); Row value = rr.createValue(); rr.next(key, value); assertEquals(0, value.getRowAsResultSet().getInt("col1")); if (!writeOnly) { rr.next(key, value); checkForDeletedRow(value, primaryKey); } rr.next(key, value); assertEquals(1, value.getRowAsResultSet().getInt("col1")); rr.next(key, value); assertEquals(2, value.getRowAsResultSet().getInt("col1")); if (writeOnly) { rr.next(key, value); checkForDeletedRow(value, primaryKey); } assertFalse(rr.next(key, value)); TestUtil.shutDown(); } private void checkForDeletedRow(Row value, boolean primaryKey) throws IOException, SQLException { assertTrue("Operation shoud be destroy but it is " + value.getEventType(), value.getEventType().equals(Type.AFTER_DELETE)); ResultSet rs = value.getRowAsResultSet(); if(primaryKey) { assertEquals(1, rs.getInt("col1")); } else { assertEquals(null, rs.getObject("col1")); } assertEquals(null, rs.getString("col2")); } @Override public void setUp() throws Exception { FileUtil.delete(new File(HDFS_DIR)); super.setUp(); } @Override public void tearDown() throws Exception { super.tearDown(); FileUtil.delete(new File(HDFS_DIR)); } public EventInputFormatTest(String name) { super(name); } }
package com.scrye.badgertunes; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.Reader; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Set; public class RealNode implements Node { private String name; private String filename; private ArrayList<Node> children; private RealNode parent; private HashMap<String,Boolean> tags = new HashMap<String,Boolean>(); private int scroll_y; @Override public String toString() { return getName(); } public String getName() { return name; } @Override public String getFilename() { return filename; } @Override public ArrayList<Node> getChildren() { return children; } @Override public Node getParent() { return parent; } /** @brief Return a tree of FakeNodes which have the given tag. If tag is "None", return this node. */ public Node filter(String tag) { if(tag == "None") { return this; } else { return filterInternal(tag, false); } } private FakeNode filterInternal(String tag, boolean tag_base_value) { Boolean value = tags.get(tag); boolean tag_val = (value != null && value.booleanValue() == true || value == null && tag_base_value == true); if(children == null) { if(tag_val == true) { return new FakeNode(this); } else { return null; } } else { FakeNode new_node = new FakeNode(this); for(Node child: children) { RealNode real_child = (RealNode) child; FakeNode new_child = real_child.filterInternal(tag, tag_val); if(new_child != null) { new_node.addChild(new_child); } } if(new_node.getChildren() != null) { return new_node; } else { return null; } } } private boolean hasTag(String tag) { RealNode node = this; while(node != null) { Boolean value = node.tags.get(tag); if(value != null) { return value.booleanValue(); } node = node.parent; } return false; } /** @brief Recursively scan this tag and all its children, adding positive tags to tag_set. */ public void fillTagSet(Set<String> tag_set) { // Harvest all the positive tag keys in this node. for(Map.Entry<String, Boolean> entry : tags.entrySet()) { String tag = entry.getKey(); if(entry.getValue().booleanValue() == true) { tag_set.add(tag); } } // recursively call this on all children if(getChildren() != null) { for(Node child : getChildren()) { child.fillTagSet(tag_set); } } } /* (non-Javadoc) * @see com.scrye.badgertunes.NodeInterface#writeTags(java.util.HashMap) */ @Override public void writeTags(HashMap<String,Boolean> tag_values) { // Clear all tags and re-write them based on tag_values and parents' tags. tags.clear(); // Loop over all tags in the incoming map for(Map.Entry<String, Boolean> entry : tag_values.entrySet()) { String tag = entry.getKey(); boolean value = entry.getValue().booleanValue(); // For each tag, go up the Node tree to see what the parents think of it. RealNode node = this; while(node != null) { Boolean tag_val = node.tags.get(tag); if(tag_val != null) { // Only store this tag locally if the parent's version is different. if(tag_val.booleanValue() != value) { tags.put(tag, value); } // The first parent we find with an opinion is the only one we care about. break; } node = node.parent; } // If we got to the root without seeing this tag and it's positive, add it in. if(node == null && value == true) { tags.put(tag, value); } } writeTagsToFile(); } public static String readFile( String filePath ) throws IOException { Reader reader = new FileReader( filePath ); StringBuilder sb = new StringBuilder(); char buffer[] = new char[16384]; // read 16k blocks int len; // how much content was read? while( ( len = reader.read( buffer ) ) > 0 ){ sb.append( buffer, 0, len ); } reader.close(); return sb.toString(); } // Tag file format: (json) // this_dir: {tag1: true, tag2: false}, // filename.mp3: {tag2: true, tag3: true}, // other_file.mp3: {tag1: false} private void writeTagsToFile() { String tagfile; if(getChildren() == null) { tagfile = getParent().getFilename(); } else { tagfile = getFilename(); } tagfile += "/tags"; JSONObject json = null; try { String tagfile_contents = readFile(tagfile); json = new JSONObject(tagfile_contents); } catch (IOException e) { } catch (JSONException e) { } if(json == null) { json = new JSONObject(); } String key; if(getChildren() == null) { key = getName(); } else { key = "this_dir"; } JSONObject tags_json = new JSONObject(); for(Map.Entry<String, Boolean> entry : tags.entrySet()) { String tag = entry.getKey(); boolean value = entry.getValue().booleanValue(); try { tags_json.put(tag, value); } catch (JSONException e) { // TODO Auto-generated catch block e.printStackTrace(); } } try { json.put(key, tags_json); String json_string_out = json.toString(2); writeFile(json_string_out, tagfile); } catch (JSONException e) { // TODO Auto-generated catch block e.printStackTrace(); } } private void writeFile(String data, String file_path) { try { FileWriter writer = new FileWriter(file_path); writer.write(data); writer.close(); } catch (IOException e) { e.printStackTrace(); } } @Override public HashMap<String,Boolean> readTags() { HashMap<String, Boolean> result = new HashMap<String, Boolean>(); RealNode node = this; while(node != null) { for(Map.Entry<String, Boolean> entry : node.tags.entrySet()) { String tag = entry.getKey(); boolean value = entry.getValue().booleanValue(); if(result.get(tag) == null) { result.put(tag, value); } } node = node.parent; } return result; } static public String nameFromFilename(String filename) { String[] path_elements = filename.split("/"); return path_elements[path_elements.length - 1]; } static public RealNode readJson(JSONObject json) { RealNode node = new RealNode(); JSONArray json_children; try { node.filename = json.getString("Name"); } catch( JSONException ex ) { return null; } node.name = nameFromFilename(node.getFilename()); try { json_children = json.getJSONArray("Children"); } catch( JSONException ex ) { json_children = null; } if( json_children != null && json_children.length() > 0 ) { node.children = new ArrayList<Node>(json_children.length()); for( int i = 0; i < json_children.length(); i++ ) { try { JSONObject json_child = json_children.getJSONObject( i ); RealNode child = readJson( json_child ); if( child != null ) { node.getChildren().add( child ); child.parent = node; } } catch( JSONException ex ) {} } node.sortChildren(); } return node; } private void setTagsFromJSON(JSONObject tags_json) { Iterator<?> keys = tags_json.keys(); while(keys.hasNext()) { String tag = (String) keys.next(); boolean value; try { value = tags_json.getBoolean(tag); tags.put(tag, value); } catch (JSONException e) { } } } static public RealNode readLocal(File file) { RealNode node = new RealNode(); node.filename = file.getPath(); node.name = file.getName(); if(node.getName().equals("tags")) { return null; } if(file.isDirectory()) { JSONObject tags_json = null; try { String tagfile = node.getFilename() + "/tags"; String tagfile_contents = readFile(tagfile); tags_json = new JSONObject(tagfile_contents); JSONObject this_dir_tags_json = tags_json.getJSONObject("this_dir"); node.setTagsFromJSON(this_dir_tags_json); } catch (IOException e) { } catch (JSONException e) { } File[] files = file.listFiles(); if(files != null && files.length > 0) { node.children = new ArrayList<Node>(files.length); for(int i = 0; i < files.length; i++) { RealNode child = readLocal(files[i]); if(child != null) { node.getChildren().add(child); child.parent = node; // Tags for song files are stored in the tags file in the containing directory. // We've already read that file into tags_json, so look up any tags for this song in it. if(child.getChildren() == null && tags_json != null) { try { JSONObject child_tags_json = tags_json.getJSONObject(child.getName()); child.setTagsFromJSON(child_tags_json); } catch (JSONException e) { } } } } node.sortChildren(); } } return node; } public void sortChildren() { Collections.sort(children, new Comparator<Node>() { @Override public int compare(Node a, Node b) { return a.getName().compareToIgnoreCase(b.getName()); } }); } @Override public int getScrollY() { return scroll_y; } @Override public void setScrollY(int scroll_y) { this.scroll_y = scroll_y; } }
/* * Copyright 2011 - 2013 NTB University of Applied Sciences in Technology * Buchs, Switzerland, http://www.ntb.ch/inf * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ch.ntb.inf.deep.cfg; /** * Node of the CFG-Tree with first and last bytecode address (bca). */ public class CFGNode { static final int nofLinks = 2; // default number of successors and predecessors /** * Used for finding loop headers. */ boolean visited, active; /** * Used to identify nodes belonging to catch clauses. */ public boolean isCatch; /** * Used for calculating dominator tree. */ int ref; boolean root = false; /** * Bytecode address of the first bytecode instruction in this node. */ public int firstBCA; /** * Bytecode address of the last bytecode instruction in this node. */ public int lastBCA; /** * Number of backward branches, used for the detection of loop headers (if > * 0 then loop header). */ int nofBackwardBranches; /** * Points to immediate dominator of this node. */ public CFGNode idom; /** * Points to the next CFGNode in the flat classfile representation (next can * point to a CFGNode which is not a successor or predecessor in the * instruction flow). */ public CFGNode next; /** * successor and predecessor. */ public CFGNode[] successors, predecessors; /** * counter for successors and predecessors. */ public int nofSuccessors, nofPredecessors; /** * Constructor for a new, empty CFG-Node. */ public CFGNode() { this.active = false; this.visited = false; this.nofBackwardBranches = 0; this.successors = new CFGNode[nofLinks]; this.predecessors = new CFGNode[nofLinks]; this.nofSuccessors = 0; this.nofPredecessors = 0; this.idom = null; } /** * Checks, if the current node is a loop header (other nodes have * backward-branches to this node). * * @return <true> if node is a loop header */ public final boolean isLoopHeader() { return nofBackwardBranches > 0; } /** * getter for the predecessor which starts with the bytecode address * firstBCA. * * @param firstBCA * bytecode address of the first instruction of the predecessor * @return the node with starting address firstBCA, null if no such node * exists */ public final CFGNode getPredecessor(final int firstBCA) { int i = 0; while (i < nofPredecessors) { if (predecessors[i].firstBCA == firstBCA) return predecessors[i]; i++; } return null; } /** * getter for the successor which start with the bytecode address firstBCA. * * @param firstBCA * bytecode address of the first instruction of the successor * @return the node with starting address firstBCA, null if no such node * exists */ public final CFGNode getSuccessor(final int firstBCA) { int i = 0; while (i < nofSuccessors) { if (successors[i].firstBCA == firstBCA) return successors[i]; i++; } return null; } /** * Adds a node to the array of successors. * * @param node * node to add. */ public final void addSuccessor(CFGNode node) { int len = successors.length; if (nofSuccessors == len) { CFGNode[] newArray = new CFGNode[2 * len]; for (int k = 0; k < len; k++) newArray[k] = successors[k]; successors = newArray; } successors[nofSuccessors] = node; nofSuccessors++; } /** * Adds a node to the array of predecessors. * * @param node * node to add. */ public final void addPredecessor(CFGNode node) { if (getPredecessor(node.firstBCA) != null) return; // node already in array int len = predecessors.length; if (nofPredecessors == len) { CFGNode[] newArray = new CFGNode[2 * len]; for (int k = 0; k < len; k++) newArray[k] = predecessors[k]; predecessors = newArray; } predecessors[nofPredecessors] = node; nofPredecessors++; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("[" + firstBCA + ":" + lastBCA + "]"); sb.append(isLoopHeader()? ", is loop header":""); sb.append(nofBackwardBranches > 0? ", bckwd branches=" + nofBackwardBranches:""); sb.append(idom != null? ", idom=[" + idom.firstBCA + ":" + idom.lastBCA + "]":", idom=null"); sb.append(isCatch? ", is first node of catch":""); sb.append(", ref=" + ref); sb.append(", visited:" + visited); sb.append("\n"); for (int n = 0; n < 6; n++) sb.append(" "); sb.append("predecessor: "); for (int k = 0; (k < predecessors.length) && (predecessors[k] != null); k++) { sb.append("[" + predecessors[k].firstBCA + ":" + predecessors[k].lastBCA + "]"); } sb.append("\n"); for (int n = 0; n < 6; n++) sb.append(" "); sb.append("successor: "); for (int k = 0; (k < successors.length) && (successors[k] != null); k++) { sb.append("[" + successors[k].firstBCA + ":" + successors[k].lastBCA + "]"); } sb.append("\n"); return sb.toString(); } public String toString(boolean cfg) { return toString(false); } }
package net.etalia.jalia; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; import static org.junit.Assert.assertTrue; import java.math.BigDecimal; import java.util.Collection; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import net.etalia.jalia.DummyAddress.AddressType; import org.junit.Assert; import org.junit.Test; public class ObjectMapperDeserializeTest extends TestBase { private String replaceQuote(String json) { return json.replace("'", "\""); } @Test public void simpleMap() throws Exception { String json = "{ 'testString':'string', 'testInt':1, 'testBoolean':true, 'subMap' : { 'subString':'subString' }, 'testNull':null, 'testLong':-62075462400000}"; json = replaceQuote(json); ObjectMapper mapper = new ObjectMapper(); mapper.init(); Object ret = mapper.readValue(json); checkThat(ret, notNullValue()); checkThat(ret, instanceOf(Map.class)); Map<String,Object> map = (Map<String, Object>) ret; checkThat(map, hasEntry("testString", (Object)"string")); checkThat(map, hasEntry("testInt", (Object)1)); checkThat(map, hasEntry("testBoolean", (Object)true)); checkThat(map, hasEntry("testLong", (Object)(new Long(-62075462400000l)))); Object subMapObj = map.get("subMap"); checkThat(subMapObj, notNullValue()); checkThat(subMapObj, instanceOf(Map.class)); Map<String,String> subMap = (Map<String, String>) subMapObj; checkThat(subMap, hasEntry("subString", "subString")); } @Test public void intMap() throws Exception { String json = "{ 'a1' : 1, 'a2' : 2}"; json = replaceQuote(json); ObjectMapper mapper = new ObjectMapper(); mapper.init(); Object ret = mapper.readValue(json, new TypeUtil.Specific<Map<String,Integer>>() {}.type()); checkThat(ret, notNullValue()); checkThat(ret, instanceOf(Map.class)); Map<String,Integer> map = (Map<String, Integer>) ret; checkThat(map, hasEntry("a1", 1)); checkThat(map, hasEntry("a2", 2)); } @Test(expected=JaliaException.class) public void intMapError() throws Exception { String json = "{ 'a1' : 1, 'a2' : 'ciao'}"; json = replaceQuote(json); ObjectMapper mapper = new ObjectMapper(); mapper.init(); mapper.readValue(json, new TypeUtil.Specific<Map<String,Integer>>() {}.type()); } @Test public void simpleList() throws Exception { String json = "[ 1, 1.0, 'a2', true]"; json = replaceQuote(json); ObjectMapper mapper = new ObjectMapper(); mapper.init(); Object ret = mapper.readValue(json); checkThat(ret, notNullValue()); checkThat(ret, instanceOf(List.class)); List<Object> list = (List<Object>) ret; checkThat(list, contains((Object)1l, (Object)1.0d, "a2", true)); } @Test public void intList() throws Exception { String json = "[ 1, 2, 3]"; json = replaceQuote(json); ObjectMapper mapper = new ObjectMapper(); mapper.init(); Object ret = mapper.readValue(json, new TypeUtil.Specific<List<Integer>>() {}.type()); checkThat(ret, notNullValue()); checkThat(ret, instanceOf(List.class)); List<Integer> list = (List<Integer>) ret; checkThat(list, contains(1,2,3)); } @Test(expected=JaliaException.class) public void intListError() throws Exception { String json = "[ 1, 2, 'ciao']"; json = replaceQuote(json); ObjectMapper mapper = new ObjectMapper(); mapper.init(); mapper.readValue(json, new TypeUtil.Specific<List<Integer>>() {}.type()); } @Test public void intLinkedList() throws Exception { String json = "[ 1, 2, 3]"; json = replaceQuote(json); ObjectMapper mapper = new ObjectMapper(); mapper.init(); Object ret = mapper.readValue(json, new TypeUtil.Specific<LinkedList<Integer>>() {}.type()); checkThat(ret, notNullValue()); checkThat(ret, instanceOf(List.class)); checkThat(ret, instanceOf(LinkedList.class)); List<Integer> list = (List<Integer>) ret; checkThat(list, contains(1,2,3)); } @Test public void intArray() throws Exception { String json = "[ 1, 2, 3]"; json = replaceQuote(json); ObjectMapper mapper = new ObjectMapper(); mapper.init(); Object ret = mapper.readValue(json, new TypeUtil.Specific<int[]>() {}.type()); checkThat(ret, notNullValue()); checkThat(ret.getClass().isArray(), equalTo(true)); int[] list = (int[]) ret; checkThat(list[0], equalTo(1)); checkThat(list[1], equalTo(2)); checkThat(list[2], equalTo(3)); } @Test public void simpleEntity() throws Exception { String json = "{" + "'@entity':'Person'," + "'name':'Mario',"+ "'surname':'Rossi'," + "'age':21," + "'height':5.2," + "'active':true," + "'addresses':[" + "{" + "'type':'EMAIL'," + "'address':'m.rossi@gmail.com'" + "}"+ "]," + "'tags':[" + "'tag1'," + "'tag2'" + "]," + "'birthDay':1000" + "}"; ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(new DummyEntityProvider()); om.init(); Object val = om.readValue(json.replace("'", "\"")); checkThat(val, notNullValue()); checkThat(val, instanceOf(DummyPerson.class)); DummyPerson person = (DummyPerson) val; checkThat(person.getName(), equalTo("Mario")); checkThat(person.getSurname(), equalTo("Rossi")); checkThat(person.getAge(), equalTo(21)); checkThat(person.getHeight(), equalTo(5.2f)); checkThat(person.getActive(), equalTo(true)); checkThat(person.getBirthDay(), notNullValue()); checkThat(person.getBirthDay().getTime(), equalTo(1000l)); checkThat(person.getAddresses(), hasSize(1)); checkThat(person.getAddresses().get(0), notNullValue()); checkThat(person.getAddresses().get(0), instanceOf(DummyAddress.class)); checkThat(person.getAddresses().get(0).getType(), equalTo(AddressType.EMAIL)); checkThat(person.getAddresses().get(0).getAddress(), equalTo("m.rossi@gmail.com")); checkThat(person.getTags(), hasSize(2)); checkThat(person.getTags(), containsInAnyOrder("tag1","tag2")); } @Test public void simpleEntityWithStrings() throws Exception { String json = "{" + "'@entity':'Person'," + "'name':'Mario',"+ "'surname':'Rossi'," + "'age':'21'," + "'active':'true'," + "'birthDay':'1000'" + "}"; ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(new DummyEntityProvider()); om.init(); Object val = om.readValue(json.replace("'", "\"")); checkThat(val, notNullValue()); checkThat(val, instanceOf(DummyPerson.class)); DummyPerson person = (DummyPerson) val; checkThat(person.getName(), equalTo("Mario")); checkThat(person.getSurname(), equalTo("Rossi")); checkThat(person.getAge(), equalTo(21)); checkThat(person.getActive(), equalTo(true)); checkThat(person.getBirthDay(), notNullValue()); checkThat(person.getBirthDay().getTime(), equalTo(1000l)); } @Test public void simpleEntityWithEmptyStrings() throws Exception { String json = "{" + "'@entity':'Person'," + "'name':'Mario',"+ "'surname':'Rossi'," + "'age':''," + "'active':''," + "'birthDay':''" + "}"; ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(new DummyEntityProvider()); om.init(); Object val = om.readValue(json.replace("'", "\"")); checkThat(val, notNullValue()); checkThat(val, instanceOf(DummyPerson.class)); DummyPerson person = (DummyPerson) val; checkThat(person.getName(), equalTo("Mario")); checkThat(person.getSurname(), equalTo("Rossi")); checkThat(person.getAge(), equalTo(null)); checkThat(person.getActive(), equalTo(null)); checkThat(person.getBirthDay(), nullValue()); } @Test public void simpleEntityWithISO8601Date() throws Exception { String json = "{" + "'@entity':'Person'," + "'birthDay':'1979-03-05T07:31:22Z'" + "}"; ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(new DummyEntityProvider()); om.init(); Object val = om.readValue(json.replace("'", "\"")); checkThat(val, notNullValue()); checkThat(val, instanceOf(DummyPerson.class)); DummyPerson person = (DummyPerson) val; checkThat(person.getBirthDay().getTime(), equalTo(289467082000l)); } @Test public void entityFromExisting() throws Exception { String json = "{" + "'id':'p1'," + "'@entity':'Person'," + "'addresses':[" + "{" + "'type':'EMAIL'," + "'address':'m.rossi@gmail.com'" + "}"+ "]" + "}"; ObjectMapper om = new ObjectMapper(); DummyEntityProvider provider = new DummyEntityProvider(); provider.addToDb(new DummyPerson("p1", "Simone","Gianni")); om.setEntityNameProvider(provider); om.setEntityFactory(provider); om.setClassDataFactory(provider); om.init(); Object val = om.readValue(json.replace("'", "\"")); checkThat(val, notNullValue()); checkThat(val, instanceOf(DummyPerson.class)); DummyPerson person = (DummyPerson) val; checkThat(person.getName(), equalTo("Simone")); checkThat(person.getSurname(), equalTo("Gianni")); checkThat(person.getAddresses(), hasSize(1)); checkThat(person.getAddresses().get(0), notNullValue()); checkThat(person.getAddresses().get(0), instanceOf(DummyAddress.class)); checkThat(person.getAddresses().get(0).getType(), equalTo(AddressType.EMAIL)); checkThat(person.getAddresses().get(0).getAddress(), equalTo("m.rossi@gmail.com")); ChangeRecorder changeRecorder = om.getChangeRecorder(); ChangeRecorder.Change<Collection<DummyAddress>> addressesChange = changeRecorder.getChange(val, "addresses"); checkThat(addressesChange, notNullValue()); checkThat(addressesChange.getOldValue(), hasSize(0)); checkThat(addressesChange.getNewValue(), hasSize(1)); } @Test(expected=JaliaException.class) public void wrongHint() throws Exception { String json = "{" + "'@entity':'Person'," + "'name':'Mario',"+ "'surname':'Rossi'," + "'addresses':[" + "{" + "'type':'EMAIL'," + "'address':'m.rossi@gmail.com'" + "}"+ "]" + "}"; ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(new DummyEntityProvider()); om.init(); om.readValue(json.replace("'", "\""), new TypeUtil.Specific<Integer>(){}.type()); } @Test(expected=JaliaException.class) public void wrongInnerType() throws Exception { String json = "{" + "'@entity':'Person'," + "'name':'Mario',"+ "'surname':'Rossi'," + "'addresses':[" + "{" + "'@entity':'Person'," + "'name':'wrong'" + "}"+ "]" + "}"; ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(new DummyEntityProvider()); om.init(); om.readValue(json.replace("'", "\"")); } @Test public void exceptionMessage() throws Exception { String json = "{" + "'@entity':'Person'," + "'addresses':[" + "{" + "'type':'INVALID_TYPE'" + "}"+ "]" + "}"; ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(new DummyEntityProvider()); om.init(); try { om.readValue(json.replace("'", "\"")); Assert.fail("Should throw exception"); } catch (Exception e) { e.printStackTrace(); } } @Test public void differentEntitiesInList() throws Exception { DummyAddress a1 = new DummyAddress("a1",AddressType.EMAIL, "simoneg@apache.org"); DummyAddress a2 = new DummyAddress("a2",AddressType.HOME, "Via Prove, 21"); DummyPerson person = new DummyPerson("p1","Simone","Gianni",a1,a2); List<DummyAddress> prelist = person.getAddresses(); DummyEntityProvider provider = new DummyEntityProvider(); provider.addToDb(person,a1,a2); ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(provider); om.setEntityFactory(provider); om.setClassDataFactory(provider); om.init(); String json = "{" + "'@entity':'Person'," + "'id':'p1'," + "'addresses':[" + "{" + "'@entity':'Address'," + "'id':'a3'," + "'type':'EMAIL'," + "'address':'a@b.com'" + "}"+ "," + "{" + "'@entity':'Address'," + "'id':'a1'" + "}" + "," + "{" + "'@entity':'Address'," + "'id':'a2'" + "}" + "]" + "}"; Object rpersonObj = om.readValue(json.replace("'", "\"")); DummyPerson rperson = (DummyPerson) rpersonObj; checkThat(rperson, sameInstance(person)); checkThat(rperson.getAddresses(), sameInstance(prelist)); checkThat(prelist, hasSize(3)); checkThat(prelist.get(0).getIdentifier(), equalTo("a3")); checkThat(prelist.get(1), sameInstance(a1)); checkThat(prelist.get(2), sameInstance(a2)); } @Test public void differentEntitiesInSet() throws Exception { DummyPerson person = new DummyPerson("p1","Simone","Gianni"); person.initTags("tag1","tag2"); Set<String> preset = person.getTags(); DummyEntityProvider provider = new DummyEntityProvider(); provider.addToDb(person); ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(provider); om.setEntityFactory(provider); om.setClassDataFactory(provider); om.init(); String json = "{" + "'@entity':'Person'," + "'id':'p1'," + "'tags':[" + "'tag3'," + "'tag1'," + "'tag2'" + "]" + "}"; Object rpersonObj = om.readValue(json.replace("'", "\"")); DummyPerson rperson = (DummyPerson) rpersonObj; checkThat(rperson, sameInstance(person)); checkThat(rperson.getTags(), sameInstance(preset)); checkThat(preset, hasSize(3)); checkThat(preset, containsInAnyOrder("tag1","tag2","tag3")); } @Test public void lessEntitiesInList() throws Exception { DummyAddress a1 = new DummyAddress("a1",AddressType.EMAIL, "simoneg@apache.org"); DummyAddress a2 = new DummyAddress("a2",AddressType.HOME, "Via Prove, 21"); DummyAddress a3 = new DummyAddress("a3",AddressType.OFFICE, "Via del Lavoro, 21"); DummyPerson person = new DummyPerson("p1","Simone","Gianni",a1,a2,a3); List<DummyAddress> prelist = person.getAddresses(); DummyEntityProvider provider = new DummyEntityProvider(); provider.addToDb(person,a1,a2); ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(provider); om.setEntityFactory(provider); om.setClassDataFactory(provider); om.init(); String json = "{" + "'@entity':'Person'," + "'id':'p1'," + "'addresses':[" + "{" + "'@entity':'Address'," + "'id':'a4'," + "'type':'EMAIL'," + "'address':'a@b.com'" + "}"+ "," + "{" + "'@entity':'Address'," + "'id':'a1'" + "}" + "]" + "}"; Object rpersonObj = om.readValue(json.replace("'", "\"")); DummyPerson rperson = (DummyPerson) rpersonObj; checkThat(rperson, sameInstance(person)); checkThat(rperson.getAddresses(), sameInstance(prelist)); System.out.println(rperson); checkThat(prelist, hasSize(2)); checkThat(prelist.get(0).getIdentifier(), equalTo("a4")); checkThat(prelist.get(1), sameInstance(a1)); } @Test public void lessEntitiesInSet() throws Exception { DummyPerson person = new DummyPerson("p1","Simone","Gianni"); person.initTags("tag1","tag2","tag3"); Set<String> preset = person.getTags(); DummyEntityProvider provider = new DummyEntityProvider(); provider.addToDb(person); ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(provider); om.setEntityFactory(provider); om.setClassDataFactory(provider); om.init(); String json = "{" + "'@entity':'Person'," + "'id':'p1'," + "'tags':[" + "'tag1',"+ "'tag4'"+ "]" + "}"; Object rpersonObj = om.readValue(json.replace("'", "\"")); DummyPerson rperson = (DummyPerson) rpersonObj; checkThat(rperson, sameInstance(person)); checkThat(rperson.getTags(), sameInstance(preset)); checkThat(preset, hasSize(2)); checkThat(preset, containsInAnyOrder("tag1","tag4")); } @Test public void embeddedEntities() throws Exception { DummyAddress a1 = new DummyAddress("a1",AddressType.EMAIL, "simoneg@apache.org"); DummyAddress a2 = new DummyAddress("a2",AddressType.HOME, "Via Prove, 21"); DummyAddress a3 = new DummyAddress("a3",AddressType.OFFICE, "Via del Lavoro, 21"); DummyEntityProvider provider = new DummyEntityProvider(); provider.addToDb(a1,a2,a3); ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(provider); om.setEntityFactory(provider); om.setClassDataFactory(provider); om.init(); String json = "{" + "'@entity':'Person'," + "'id':'p1'," + "'addresses':[" + "\"a1\",\"a2\"" + "]" + "}"; Object rpersonObj = om.readValue(json.replace("'", "\"")); DummyPerson rperson = (DummyPerson) rpersonObj; List<DummyAddress> prelist = rperson.getAddresses(); checkThat(prelist, hasSize(2)); checkThat(prelist.get(0).getIdentifier(), equalTo("a1")); checkThat(prelist.get(0), sameInstance(a1)); checkThat(prelist.get(1).getIdentifier(), equalTo("a2")); checkThat(prelist.get(1), sameInstance(a2)); } @Test public void unmodifiables() throws Exception { DummyEntityProvider provider = new DummyEntityProvider(); ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(provider); om.setEntityFactory(provider); om.setClassDataFactory(provider); om.init(); String json = "{" + "'@entity':'Person'," + "'id':'p1'," + "'secrets':[" + "'s1','s2'" + "]," + "'extraData':{" + "'extra1':'extra'" + "}" + "}"; Object rpersonObj = om.readValue(json.replace("'", "\"")); DummyPerson rperson = (DummyPerson) rpersonObj; checkThat(rperson.getExtraData(), hasEntry("extra1", (Object)"extra")); checkThat(rperson.getSecrets(), contains("s1","s2")); } @Test(expected = JaliaException.class) public void cannotChangeMainAddressWithUnfoundOne() throws Exception { ObjectMapper om = new ObjectMapper(); String json = "{" + "'@entity':'Person'," + "'mainAddress':null" + "}"; om.readValue(json.replace("'", "\""), DummyPerson.class); String json2 = "{" + "'@entity':'Person'," + "'mainAddress':" + "{" + "'type':'EMAIL'," + "'address':'a@b.com'" + "}"+ "}"; om.readValue(json2.replace("'", "\""), DummyPerson.class); } @Test public void nullBeans() throws Exception { DummyPerson p1 = new DummyPerson(); p1.setIdentifier("p1"); DummyPerson bf = new DummyPerson(); bf.setIdentifier("pbf"); p1.setBestFriend(bf); DummyEntityProvider provider = new DummyEntityProvider(); ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(provider); om.setEntityFactory(provider); om.setClassDataFactory(provider); provider.addToDb(p1); om.init(); { String json = "{" + "'@entity':'Person'," + "'id':'p1'" + "}"; Object rpersonObj = om.readValue(json.replace("'", "\"")); DummyPerson rperson = (DummyPerson) rpersonObj; checkThat(rperson.getBestFriend(), notNullValue()); } { String json = "{" + "'@entity':'Person'," + "'id':'p1'," + "'bestFriend':null" + "}"; Object rpersonObj = om.readValue(json.replace("'", "\"")); DummyPerson rperson = (DummyPerson) rpersonObj; checkThat(rperson.getBestFriend(), nullValue()); } } @Test public void invalidNativeDeserializations() throws Exception { ObjectMapper om = new ObjectMapper(); checkThat(om.readValue("test of string { with \"stuff\" [] }", String.class), equalTo("test of string { with \"stuff\" [] }")); checkThat(om.readValue("1", Long.class), equalTo(1l)); checkThat(om.readValue("1.0", Double.class), equalTo(1.0d)); checkThat(om.readValue("true", Boolean.class), equalTo(true)); checkThat(om.readValue("null", Boolean.class), nullValue()); checkThat(om.readValue("null", DummyPerson.class), nullValue()); } @Test public void pollutedDeSerCache() throws Exception { DummyEntityProvider ep = new DummyEntityProvider(); ep.addToDb(new DummyAddress("a4", AddressType.EMAIL, "a@b.com")); ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(null); om.setEntityFactory(ep); String json = "{" + "'@entity':'Person'," + "'mainAddress':null" + "}"; om.readValue(json.replace("'", "\""), DummyPerson.class); String json2 = "{" + "'@entity':'Person'," + "'mainAddress':" + "{" + "'@entity':'Address'," + "'id':'a4'," + "'type':'EMAIL'," + "'address':'a@b.com'" + "}"+ "}"; om.readValue(json2.replace("'", "\""), DummyPerson.class); } @Test public void bigDecimal() throws Exception { ObjectMapper om = new ObjectMapper(); om.setEntityNameProvider(null); String json = "{'@entity':'Person','balance':70000.00}"; DummyPerson person = om.readValue(json.replace("'", "\""), DummyPerson.class); assertTrue(person.getBalance().compareTo(new BigDecimal("70000.00")) == 0); } @Test public void doNotUpdateWhenIgnoreIsSetOnly() { DummyAnnotations bean = new DummyAnnotations(); bean.setSetOnly("thevalue"); ObjectMapper mapper = new ObjectMapper(); mapper.readValue("{'setOnly':'newvalue'}".replace("'", "\""), bean); checkThat(bean.getSetOnly(), equalTo("thevalue")); } @Test public void updateWhenIgnoreIsGetOnly() { DummyAnnotations bean = new DummyAnnotations(); bean.setGetOnly("thevalue"); ObjectMapper mapper = new ObjectMapper(); mapper.readValue("{'getOnly':'newvalue'}".replace("'", "\""), bean); checkThat(bean.getGetOnly(), equalTo("newvalue")); } @Test public void updateWhenIgnoreIsSetOnlyAndOverridesIgnores() { DummyAnnotations bean = new DummyAnnotations(); bean.setSetOnly("thevalue"); ObjectMapper mapper = new ObjectMapper(); mapper.setOption(DefaultOptions.OVERRIDE_IGNORES, true); mapper.readValue("{'setOnly':'newvalue'}".replace("'", "\""), bean); checkThat(bean.getSetOnly(), equalTo("newvalue")); } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.river.api.security; import java.security.AccessControlContext; import java.security.AccessController; import java.security.DomainCombiner; import java.security.Guard; import java.security.Permission; import java.security.Policy; import java.security.PrivilegedAction; import java.security.ProtectionDomain; import java.security.SecurityPermission; import java.util.ArrayList; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.NavigableSet; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.FutureTask; import java.util.concurrent.RunnableFuture; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; import net.jini.security.Security; import net.jini.security.SecurityContext; import au.net.zeus.collection.RC; import au.net.zeus.collection.Ref; import au.net.zeus.collection.Referrer; import org.cliffc.high_scale_lib.NonBlockingHashMap; /** * CombinerSecurityManager, is intended to be a highly scalable * SecurityManager implementation that caches the results of security checks * for each context, which may be an instance of SecurityContext or * AccessControlContext. Stale records are pruned from the cache. * * The cache utilises Cliff Click's NonBlockingHashMap and Doug Lee's * ConcurrentSkipListSet. * * This SecurityManager should be tuned for garbage collection for a large * young generation heap, since many young objects are created and discarded. * * It is recommended that this SecurityManager be installed from the command * line in order to load as early as possible. * * Apart from Permission objects and java.security.Policy.getPolicy() class * lock (Bug ID: 7093090 fixed in jdk8(b15), this SecurityManager is non * blocking, including the cache it keeps to prevent repeat security checks. * * @see Security * @see SecurityContext * @see AccessControlContext * * */ public class CombinerSecurityManager extends SecurityManager implements CachingSecurityManager { private static final Logger logger = Logger.getLogger(CombinerSecurityManager.class.getName()); private final DomainCombiner dc; // Cache of optimised Delegate AccessControlContext's private final ConcurrentMap<AccessControlContext, AccessControlContext> contextCache; private final ConcurrentMap<Object, NavigableSet<Permission>> checked; private final Guard g; private final Action action; private final Executor executor; private final Comparator<Referrer<Permission>> permCompare; private final AccessControlContext SMConstructorContext; private final AccessControlContext SMPrivilegedContext; private final ProtectionDomain privilegedDomain; private final ThreadLocal<SecurityContext> threadContext; private final ThreadLocal<Boolean> inTrustedCodeRecursiveCall; public CombinerSecurityManager(){ super(); // Get context before this becomes a SecurityManager. // super() checked the permission to create a SecurityManager. SMConstructorContext = AccessController.getContext(); ProtectionDomain [] context = new ProtectionDomain[1]; privilegedDomain = this.getClass().getProtectionDomain(); context[0] = privilegedDomain; SMPrivilegedContext = new AccessControlContext(context); dc = new DelegateDomainCombiner(); ConcurrentMap<Referrer<AccessControlContext>, Referrer<AccessControlContext>> internal = new NonBlockingHashMap<Referrer<AccessControlContext>, Referrer<AccessControlContext>>(); contextCache = RC.concurrentMap(internal, Ref.TIME, Ref.STRONG, 60000L, 0L); ConcurrentMap<Referrer<Object>, Referrer<NavigableSet<Permission>>> refmap = new NonBlockingHashMap<Referrer<Object>, Referrer<NavigableSet<Permission>>>(); checked = RC.concurrentMap(refmap, Ref.TIME, Ref.STRONG, 20000L, 0L); g = new SecurityPermission("getPolicy"); Permission createAccPerm = new SecurityPermission("createAccessControlContext"); action = new Action(); // Make this a tunable property. double blocking_coefficient = 0.6; // 0 CPU intensive to 0.9 IO intensive int numberOfCores = Runtime.getRuntime().availableProcessors(); int poolSizeLimit = (int) (numberOfCores / ( 1 - blocking_coefficient)); // The intent here is to parallelise security checks as well as weed // out blocking SocketPermission's to execute them in parallel to // reduce the wait on network IO. // Once the pool size has reached it's maximum, the tasks are handed // back to the calling thread to execute, this strategy also eliminates // the possiblity of deadlock caused by circular dependencies between // permission checks. executor = new ThreadPoolExecutor(numberOfCores, poolSizeLimit, 20L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new ThreadPoolExecutor.CallerRunsPolicy()); permCompare = RC.comparator(new PermissionComparator()); threadContext = new ThreadLocal<SecurityContext>(); inTrustedCodeRecursiveCall = new ThreadLocal<Boolean>(); /* Get the policy & refresh, in case it hasn't been initialized. * While there is no SecurityManager, * no policy checks are performed, however the policy must be in a * constructed working state, before the SecurityManager is set, * otherwise the Policy, if it's a non jvm policy, won't have permission * to read the properties and files it needs in order to be constructed. */ Policy policy = java.security.Policy.getPolicy(); // This is to avoid unnecessarily refreshing the policy. if (!policy.implies(context[0], createAccPerm)) policy.refresh(); /* Bug ID: 7093090 Reduce synchronization in java.security.Policy.getPolicyNoCheck * This bug may cause contention between ProtectionDomain implies * calls, also it could be a point of attack for Denial of service, * since the lock used is a static class lock. This bug has been fixed * in jdk8(b15). */ } @Override public Object getSecurityContext() { Object context = null; inTrustedCodeRecursiveCall.set(Boolean.TRUE); try { context = Security.getContext(); }finally { inTrustedCodeRecursiveCall.set(Boolean.FALSE); // Must always happen, no matter what. } return context; } /** * Throws a <code>SecurityException</code> if the requested * access, specified by the given permission, is not permitted based * on the security policy currently in effect. * * This method obtains the current SecurityContext and checks * the give permission in that context. * * @see SecurityContext * @see Security * @param perm * @throws SecurityException */ @Override public void checkPermission(Permission perm) throws SecurityException { Boolean call = inTrustedCodeRecursiveCall.get(); if (call == Boolean.TRUE) return; // In Security and Policy static methods we trust. Object context = getSecurityContext(); checkPermission(perm, context); } /** * Throws a <code>SecurityException</code> if the requested * access, specified by the given permission and context, is not permitted based * on the security policy currently in effect. * * It is absolutely essential that the SecurityContext override equals * and hashCode. * * @param perm * @param context - AccessControlContext or SecurityContext * @throws SecurityException */ @Override public void checkPermission(Permission perm, Object context) throws SecurityException { if (perm == null ) throw new NullPointerException("Permission Collection null"); AccessControlContext executionContext = null; SecurityContext securityContext = null; if (context instanceof AccessControlContext){ executionContext = (AccessControlContext) context; } else if (context instanceof SecurityContext){ securityContext = (SecurityContext) context; executionContext = securityContext.getAccessControlContext(); } else { throw new SecurityException(); } threadContext.set(securityContext); // may be null. /* The next line speeds up permission checks related to this SecurityManager. */ if ( SMPrivilegedContext.equals(executionContext) || SMConstructorContext.equals(executionContext)) return; // prevents endless loop in debug. // Checks if Permission has already been checked for this context. NavigableSet<Permission> checkedPerms = checked.get(context); if (checkedPerms == null){ /* A ConcurrentSkipListSet is used to avoid blocking during * removal operations that occur while the garbage collector * recovers softly reachable memory. Since this happens while * the jvm's under stress, it's important that permission checks * continue to perform well. * * Although I considered a multi read, single write Set, I wanted * to avoid blocking under stress, that would be caused as a result * of garbage collection. * * IMPORTANT: * The Set "checkedPerms" must be obtained prior to executing a permission * check and the result written to the same Set, interleaved * clear operations may remove the Set from the ConcurrentMap "checked", * this prevents revoked permissions from entering the "checked" * cache after clear is called and allows tasks to run to completion * without needing to be concerned about revocation. */ NavigableSet<Referrer<Permission>> internal = new ConcurrentSkipListSet<Referrer<Permission>>(permCompare); checkedPerms = RC.navigableSet(internal, Ref.TIME, 5000L); inTrustedCodeRecursiveCall.set(Boolean.TRUE); try { NavigableSet<Permission> existed = checked.putIfAbsent(context, checkedPerms); if (existed != null) checkedPerms = existed; }finally { inTrustedCodeRecursiveCall.set(Boolean.FALSE); // Must always happen, no matter what. } } if (checkedPerms.contains(perm)) return; // don't need to check again. // Cache the created AccessControlContext. AccessControlContext delegateContext = contextCache.get(executionContext); if (delegateContext == null ) { final AccessControlContext finalExecutionContext = executionContext; // Create a new AccessControlContext with the DelegateDomainCombiner inTrustedCodeRecursiveCall.set(Boolean.TRUE); try { delegateContext = AccessController.doPrivileged( new PrivilegedAction<AccessControlContext>(){ public AccessControlContext run() { return new AccessControlContext(finalExecutionContext, dc); } } ); }finally { inTrustedCodeRecursiveCall.set(Boolean.FALSE); // Must always happen, no matter what. } // Optimise the delegateContext, this runs the DelegateDomainCombiner // and returns the AccessControlContext. // This is a mutator method, the delegateContext returned // is actually the same object passed in, after it is // mutated, but just in case that changes in future we // return it. delegateContext = AccessController.doPrivileged(action, delegateContext); inTrustedCodeRecursiveCall.set(Boolean.TRUE); try { contextCache.putIfAbsent(executionContext, delegateContext); // Above putIfAbsent: It doesn't matter if it already existed, // the context we have is valid to perform a permissionCheck. }finally { inTrustedCodeRecursiveCall.set(Boolean.FALSE); // Must always happen, no matter what. } } // Normal execution, same as SecurityManager. delegateContext.checkPermission(perm); // Throws SecurityException. /* It's ok to cache SocketPermission if we use a comparator */ // If we get to here, no exceptions were thrown, caller has permission. checkedPerms.add(perm); } /** * This method is intended to be called only by a Policy. * * To clear the cache of checked Permissions requires the following Permission: * java.security.SecurityPermission("getPolicy"); * * @throws SecurityException */ public void clearCache() throws SecurityException { /* Clear the cache, out of date permission check tasks are still * writing to old Set's, while new checks will write to new Sets. */ g.checkGuard(this); inTrustedCodeRecursiveCall.set(Boolean.TRUE); try { checked.clear(); }finally { inTrustedCodeRecursiveCall.set(Boolean.FALSE); // Must always happen, no matter what. } } // Action retrieves the optimised AccessControlContext. private static class Action implements PrivilegedAction<AccessControlContext> { private Action(){} public AccessControlContext run(){ return AccessController.getContext(); } } private class DelegateDomainCombiner implements DomainCombiner { private DelegateDomainCombiner (){ } public ProtectionDomain[] combine(final ProtectionDomain[] currentDomains, final ProtectionDomain[] assignedDomains) { /* We're only interested in the assignedDomains, since these * are from the Context that the SecurityManager has been asked * to check. * * assignedDomains are inherited domains. * * This code wraps assignedDomains in a DelegateProtectionDomain * to ensure we check for the DelegatePermission or it's candidate * Permission. * * The AccessControlContext instance will be the new instance * we just created moments earlier, but with the context returned * by this DomainCombiner. * * The SecurityManager's ProtectionDomain must be removed * from the Context, for the following case: * * If using sun.security.provider.PolicyFile, the policy will * cache it's own domain prior to it being instantiated and it * may perform a PrivilegedAction when it's * getPermissions(ProtectionDomain pd) is later called for * ProtectionDomain's not in policy cache. * However, CombinerSecurityManager and * net.jini.security.Security cannot cache their shared * ProtectionDomain, relying on the underlying policy instead. * * When a standard java permission check * is made, the AccessController picks up the domain of * CombinerSecurityManager and net.jini.security.Security, * as well as that of the policy provider. Since the policy * provider will cache it's own ProtectionDomain, but not that * of the SecurityManager and Security, a infinite circular call * loop will preceed until a StackOverflowError occurs. * * This will be caused by PolicyFile, attempting to determine * which permissions apply to the ProtectionDomain of * CombinerSecurityManager and Security, then asking * the SecurityManager if it has a FilePermission. * * The policy provider org.apache.river.security.ConcurrentPolicyFile * has no such issue, unless using CodeSource based PermissionGrant's, * which have been deprecated. */ int l = assignedDomains.length; List<ProtectionDomain> list = new ArrayList<ProtectionDomain>(l); for (int i = 0; i < l ; i++){ if (assignedDomains[i] != privilegedDomain){ list.add(assignedDomains[i]); } } ProtectionDomain [] context = list.toArray(new ProtectionDomain[list.size()]); DelegateProtectionDomain[] delegated = new DelegateProtectionDomain[1]; delegated[0] = new DelegateProtectionDomain(context); return delegated; } } /* * DelegateProtectionDomain executes checks on ProtectionDomain's in parallel. */ private class DelegateProtectionDomain extends ProtectionDomain { // Context from AccessControlContext. private final ProtectionDomain[] context; DelegateProtectionDomain(ProtectionDomain[] context){ // Use static domain so we don't strongly reference the ClassLoader // which has a strong reference to ProtectionDomain. super(null, null); this.context = context; // Not mutated so don't need to clone. } /* An earlier implementation used interruption to cancel running tasks, * this interruption only added complexity, in most cases permission * checks are expected to pass and failure occurs far less often, * for that reason, it is acceptable for all tasks to run to completion. * The overall performance cost of using task interruption was likely * greater, due to increased access of shared memory for only a small * performance benefit for failling permission checks. * * If the current thread is interrupted, the interrupt status is * preserved, this is done in cases where permission is required to perform * safe shutdown */ @Override public boolean implies(Permission perm) { Thread currentThread = Thread.currentThread(); boolean interrupt = Thread.interrupted(); // Clears the interrupt and stores it. int l = context.length; /* This is both a performance optimisation and a safety precaution. * When there are only a few domains on the stack, they are * normally privileged and will return very quickly. * * Also, permission checks performed inside PrivilegedAction * calls by the Policy may come from setting or getting context security * sensitive variables when wrappingPrivilegedAction * permission checks. * * The policy may accept Objects from other ProtectionDomain's * as part of a PermissionGrant, this domain must be included * in the context so it can be checked, but since that would * create a recursive call, we avoid recursion * by not splitting that permission check among multiple threads. */ if ( l < 4 ){ for ( int i = 0; i < l; i++ ){ if (! checkPermission(context[i], perm)) { if (interrupt) currentThread.interrupt(); return false; } } if (interrupt) currentThread.interrupt(); return true; } CountDownLatch latch = new CountDownLatch(l); List<RunnableFuture<Boolean>> resultList = new ArrayList<RunnableFuture<Boolean>>(l); for ( int i = 0; i < l; i++ ){ resultList.add(new FutureTask<Boolean>( new PermissionCheck(context[i], perm, latch, threadContext.get()) )); } Iterator<RunnableFuture<Boolean>> it = resultList.iterator(); while (it.hasNext()){ executor.execute(it.next()); } try { // We can change either call to add a timeout. latch.await(); // Throws InterruptedException it = resultList.iterator(); try { while (it.hasNext()){ Boolean result = it.next().get(); // Throws InterruptedException if (result.equals(Boolean.FALSE)) { if (interrupt) currentThread.interrupt(); return false; } } if (interrupt) currentThread.interrupt(); return true; } catch (ExecutionException ex) { // This should never happen, unless a runtime exception occurs. if (logger.isLoggable(Level.SEVERE)) logger.log(Level.SEVERE, null, ex); throw new RuntimeException("Unrecoverable: ", ex.getCause()); // Bail out. } } catch (InterruptedException ex) { // REMIND: Java Memory Model and thread interruption. // We've been externally interrupted, during execution. // Do this the slow way to avoid reinterruption during shutdown cleanup! if (logger.isLoggable(Level.FINEST)) logger.log(Level.FINEST, "External Interruption", ex); for ( int i = 0; i < l; i++ ){ if (!checkPermission(context[i], perm)) { currentThread.interrupt(); // restore external interrupt. return false; } } currentThread.interrupt(); // restore external interrupt. return true; } } @Override public String toString(){ /* Unfortunately we don't know exactly which domain has failed * in fact, multiple domains may fail the permission check since * they are executed concurrently, for that reason, we'll print * all domains on the stack. */ StringBuilder sb = new StringBuilder(800); sb.append("DomainCombinerSecurityManager full stack: \n"); int l = context.length; for (int i = 0; i < l; i++ ){ sb.append(context[i].toString()); } return sb.toString(); } } /** * Immutable callable task, discarded immediately after use. */ private class PermissionCheck implements Callable<Boolean> { private final ProtectionDomain pd; private final Permission p; private final CountDownLatch latch; private final SecurityContext securityContext; // Preserves context accross calls. PermissionCheck(ProtectionDomain pd, Permission p, CountDownLatch c, SecurityContext sc){ if (pd == null || p == null) throw new NullPointerException(); this.pd = pd; this.p = p; latch = c; securityContext = sc; } public Boolean call() throws Exception { // Required for AggregatePolicyProvider. Boolean result = AccessController.doPrivileged( securityContext != null ? securityContext.wrap( new PrivilegedAction<Boolean>(){ public Boolean run() { boolean result = checkPermission(pd, p); return Boolean.valueOf(result); } } ) :new PrivilegedAction<Boolean>(){ public Boolean run() { boolean result = checkPermission(pd, p); return Boolean.valueOf(result); } } ); latch.countDown(); return result; } } /** * Enables customisation of permission check. * @param pd * @param p * @return */ protected boolean checkPermission(ProtectionDomain pd, Permission p){ return pd.implies(p); } }
/** * * Copyright the original author or authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.smackx.pubsub; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.jivesoftware.smack.SmackException.NoResponseException; import org.jivesoftware.smack.SmackException.NotConnectedException; import org.jivesoftware.smack.XMPPConnection; import org.jivesoftware.smack.XMPPException.XMPPErrorException; import org.jivesoftware.smack.packet.EmptyResultIQ; import org.jivesoftware.smack.packet.IQ; import org.jivesoftware.smack.packet.IQ.Type; import org.jivesoftware.smack.packet.Stanza; import org.jivesoftware.smack.packet.PacketExtension; import org.jivesoftware.smackx.disco.ServiceDiscoveryManager; import org.jivesoftware.smackx.disco.packet.DiscoverInfo; import org.jivesoftware.smackx.disco.packet.DiscoverItems; import org.jivesoftware.smackx.pubsub.packet.PubSub; import org.jivesoftware.smackx.pubsub.packet.PubSubNamespace; import org.jivesoftware.smackx.pubsub.util.NodeUtils; import org.jivesoftware.smackx.xdata.Form; import org.jivesoftware.smackx.xdata.FormField; import org.jxmpp.jid.DomainBareJid; import org.jxmpp.jid.Jid; import org.jxmpp.jid.impl.JidCreate; import org.jxmpp.stringprep.XmppStringprepException; /** * This is the starting point for access to the pubsub service. It * will provide access to general information about the service, as * well as create or retrieve pubsub {@link LeafNode} instances. These * instances provide the bulk of the functionality as defined in the * pubsub specification <a href="http://xmpp.org/extensions/xep-0060.html">XEP-0060</a>. * * @author Robin Collier */ final public class PubSubManager { private XMPPConnection con; private DomainBareJid to; private Map<String, Node> nodeMap = new ConcurrentHashMap<String, Node>(); /** * Create a pubsub manager associated to the specified connection. Defaults the service * name to <i>pubsub</i> * * @param connection The XMPP connection * @throws XmppStringprepException */ public PubSubManager(XMPPConnection connection) throws XmppStringprepException { con = connection; to = JidCreate.domainBareFrom("pubsub." + connection.getServiceName()); } /** * Create a pubsub manager associated to the specified connection where * the pubsub requests require a specific to address for packets. * * @param connection The XMPP connection * @param toAddress The pubsub specific to address (required for some servers) */ public PubSubManager(XMPPConnection connection, DomainBareJid toAddress) { con = connection; to = toAddress; } /** * Creates an instant node, if supported. * * @return The node that was created * @throws XMPPErrorException * @throws NoResponseException * @throws NotConnectedException * @throws InterruptedException */ public LeafNode createNode() throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { PubSub reply = sendPubsubPacket(Type.set, new NodeExtension(PubSubElementType.CREATE), null); NodeExtension elem = reply.getExtension("create", PubSubNamespace.BASIC.getXmlns()); LeafNode newNode = new LeafNode(con, elem.getNode()); newNode.setTo(to); nodeMap.put(newNode.getId(), newNode); return newNode; } /** * Creates a node with default configuration. * * @param id The id of the node, which must be unique within the * pubsub service * @return The node that was created * @throws XMPPErrorException * @throws NoResponseException * @throws NotConnectedException * @throws InterruptedException */ public LeafNode createNode(String id) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { return (LeafNode)createNode(id, null); } /** * Creates a node with specified configuration. * * Note: This is the only way to create a collection node. * * @param name The name of the node, which must be unique within the * pubsub service * @param config The configuration for the node * @return The node that was created * @throws XMPPErrorException * @throws NoResponseException * @throws NotConnectedException * @throws InterruptedException */ public Node createNode(String name, Form config) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { PubSub request = PubSub.createPubsubPacket(to, Type.set, new NodeExtension(PubSubElementType.CREATE, name), null); boolean isLeafNode = true; if (config != null) { request.addExtension(new FormNode(FormNodeType.CONFIGURE, config)); FormField nodeTypeField = config.getField(ConfigureNodeFields.node_type.getFieldName()); if (nodeTypeField != null) isLeafNode = nodeTypeField.getValues().get(0).equals(NodeType.leaf.toString()); } // Errors will cause exceptions in getReply, so it only returns // on success. sendPubsubPacket(con, request); Node newNode = isLeafNode ? new LeafNode(con, name) : new CollectionNode(con, name); newNode.setTo(to); nodeMap.put(newNode.getId(), newNode); return newNode; } /** * Retrieves the requested node, if it exists. It will throw an * exception if it does not. * * @param id - The unique id of the node * @return the node * @throws XMPPErrorException The node does not exist * @throws NoResponseException if there was no response from the server. * @throws NotConnectedException * @throws InterruptedException */ @SuppressWarnings("unchecked") public <T extends Node> T getNode(String id) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { Node node = nodeMap.get(id); if (node == null) { DiscoverInfo info = new DiscoverInfo(); info.setTo(to); info.setNode(id); DiscoverInfo infoReply = (DiscoverInfo) con.createPacketCollectorAndSend(info).nextResultOrThrow(); if (infoReply.hasIdentity(PubSub.ELEMENT, "leaf")) { node = new LeafNode(con, id); } else if (infoReply.hasIdentity(PubSub.ELEMENT, "collection")) { node = new CollectionNode(con, id); } else { // XEP-60 5.3 states that // "The 'disco#info' result MUST include an identity with a category of 'pubsub' and a type of either 'leaf' or 'collection'." // If this is not the case, then we are dealing with an PubSub implementation that doesn't follow the specification. throw new AssertionError( "PubSub service '" + to + "' returned disco info result for node '" + id + "', but it did not contain an Identity of type 'leaf' or 'collection' (and category 'pubsub'), which is not allowed according to XEP-60 5.3."); } node.setTo(to); nodeMap.put(id, node); } return (T) node; } /** * Get all the nodes that currently exist as a child of the specified * collection node. If the service does not support collection nodes * then all nodes will be returned. * * To retrieve contents of the root collection node (if it exists), * or there is no root collection node, pass null as the nodeId. * * @param nodeId - The id of the collection node for which the child * nodes will be returned. * @return {@link DiscoverItems} representing the existing nodes * @throws XMPPErrorException * @throws NoResponseException if there was no response from the server. * @throws NotConnectedException * @throws InterruptedException */ public DiscoverItems discoverNodes(String nodeId) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { DiscoverItems items = new DiscoverItems(); if (nodeId != null) items.setNode(nodeId); items.setTo(to); DiscoverItems nodeItems = (DiscoverItems) con.createPacketCollectorAndSend(items).nextResultOrThrow(); return nodeItems; } /** * Gets the subscriptions on the root node. * * @return List of exceptions * @throws XMPPErrorException * @throws NoResponseException * @throws NotConnectedException * @throws InterruptedException */ public List<Subscription> getSubscriptions() throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { Stanza reply = sendPubsubPacket(Type.get, new NodeExtension(PubSubElementType.SUBSCRIPTIONS), null); SubscriptionsExtension subElem = reply.getExtension(PubSubElementType.SUBSCRIPTIONS.getElementName(), PubSubElementType.SUBSCRIPTIONS.getNamespace().getXmlns()); return subElem.getSubscriptions(); } /** * Gets the affiliations on the root node. * * @return List of affiliations * @throws XMPPErrorException * @throws NoResponseException * @throws NotConnectedException * @throws InterruptedException * */ public List<Affiliation> getAffiliations() throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { PubSub reply = sendPubsubPacket(Type.get, new NodeExtension(PubSubElementType.AFFILIATIONS), null); AffiliationsExtension listElem = reply.getExtension(PubSubElementType.AFFILIATIONS); return listElem.getAffiliations(); } /** * Delete the specified node * * @param nodeId * @throws XMPPErrorException * @throws NoResponseException * @throws NotConnectedException * @throws InterruptedException */ public void deleteNode(String nodeId) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { sendPubsubPacket(Type.set, new NodeExtension(PubSubElementType.DELETE, nodeId), PubSubElementType.DELETE.getNamespace()); nodeMap.remove(nodeId); } /** * Returns the default settings for Node configuration. * * @return configuration form containing the default settings. * @throws XMPPErrorException * @throws NoResponseException * @throws NotConnectedException * @throws InterruptedException */ public ConfigureForm getDefaultConfiguration() throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { // Errors will cause exceptions in getReply, so it only returns // on success. PubSub reply = sendPubsubPacket(Type.get, new NodeExtension(PubSubElementType.DEFAULT), PubSubElementType.DEFAULT.getNamespace()); return NodeUtils.getFormFromPacket(reply, PubSubElementType.DEFAULT); } /** * Gets the supported features of the servers pubsub implementation * as a standard {@link DiscoverInfo} instance. * * @return The supported features * @throws XMPPErrorException * @throws NoResponseException * @throws NotConnectedException * @throws InterruptedException */ public DiscoverInfo getSupportedFeatures() throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { ServiceDiscoveryManager mgr = ServiceDiscoveryManager.getInstanceFor(con); return mgr.discoverInfo(to); } private PubSub sendPubsubPacket(Type type, PacketExtension ext, PubSubNamespace ns) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { return sendPubsubPacket(con, to, type, Collections.singletonList(ext), ns); } static PubSub sendPubsubPacket(XMPPConnection con, Jid to, Type type, List<PacketExtension> extList, PubSubNamespace ns) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { PubSub pubSub = new PubSub(to, type, ns); for (PacketExtension pe : extList) { pubSub.addExtension(pe); } return sendPubsubPacket(con ,pubSub); } static PubSub sendPubsubPacket(XMPPConnection con, PubSub packet) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { IQ resultIQ = con.createPacketCollectorAndSend(packet).nextResultOrThrow(); if (resultIQ instanceof EmptyResultIQ) { return null; } return (PubSub) resultIQ; } }
package com.google.api.ads.adwords.jaxws.v201601.cm; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.bind.annotation.XmlType; /** * * An abstract Conversion base class. * * * <p>Java class for ConversionTracker complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="ConversionTracker"> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="id" type="{http://www.w3.org/2001/XMLSchema}long" minOccurs="0"/> * &lt;element name="originalConversionTypeId" type="{http://www.w3.org/2001/XMLSchema}long" minOccurs="0"/> * &lt;element name="name" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="status" type="{https://adwords.google.com/api/adwords/cm/v201601}ConversionTracker.Status" minOccurs="0"/> * &lt;element name="category" type="{https://adwords.google.com/api/adwords/cm/v201601}ConversionTracker.Category" minOccurs="0"/> * &lt;element name="conversionTypeOwnerCustomerId" type="{http://www.w3.org/2001/XMLSchema}long" minOccurs="0"/> * &lt;element name="viewthroughLookbackWindow" type="{http://www.w3.org/2001/XMLSchema}int" minOccurs="0"/> * &lt;element name="ctcLookbackWindow" type="{http://www.w3.org/2001/XMLSchema}int" minOccurs="0"/> * &lt;element name="countingType" type="{https://adwords.google.com/api/adwords/cm/v201601}ConversionDeduplicationMode" minOccurs="0"/> * &lt;element name="defaultRevenueValue" type="{http://www.w3.org/2001/XMLSchema}double" minOccurs="0"/> * &lt;element name="defaultRevenueCurrencyCode" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="alwaysUseDefaultRevenueValue" type="{http://www.w3.org/2001/XMLSchema}boolean" minOccurs="0"/> * &lt;element name="excludeFromBidding" type="{http://www.w3.org/2001/XMLSchema}boolean" minOccurs="0"/> * &lt;element name="mostRecentConversionDate" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="lastReceivedRequestTime" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="ConversionTracker.Type" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "ConversionTracker", propOrder = { "id", "originalConversionTypeId", "name", "status", "category", "conversionTypeOwnerCustomerId", "viewthroughLookbackWindow", "ctcLookbackWindow", "countingType", "defaultRevenueValue", "defaultRevenueCurrencyCode", "alwaysUseDefaultRevenueValue", "excludeFromBidding", "mostRecentConversionDate", "lastReceivedRequestTime", "conversionTrackerType" }) @XmlSeeAlso({ UploadConversion.class, AppConversion.class, WebsiteCallMetricsConversion.class, AdCallMetricsConversion.class, AdWordsConversionTracker.class }) public abstract class ConversionTracker { protected Long id; protected Long originalConversionTypeId; protected String name; @XmlSchemaType(name = "string") protected ConversionTrackerStatus status; @XmlSchemaType(name = "string") protected ConversionTrackerCategory category; protected Long conversionTypeOwnerCustomerId; protected Integer viewthroughLookbackWindow; protected Integer ctcLookbackWindow; @XmlSchemaType(name = "string") protected ConversionDeduplicationMode countingType; protected Double defaultRevenueValue; protected String defaultRevenueCurrencyCode; protected Boolean alwaysUseDefaultRevenueValue; protected Boolean excludeFromBidding; protected String mostRecentConversionDate; protected String lastReceivedRequestTime; @XmlElement(name = "ConversionTracker.Type") protected String conversionTrackerType; /** * Gets the value of the id property. * * @return * possible object is * {@link Long } * */ public Long getId() { return id; } /** * Sets the value of the id property. * * @param value * allowed object is * {@link Long } * */ public void setId(Long value) { this.id = value; } /** * Gets the value of the originalConversionTypeId property. * * @return * possible object is * {@link Long } * */ public Long getOriginalConversionTypeId() { return originalConversionTypeId; } /** * Sets the value of the originalConversionTypeId property. * * @param value * allowed object is * {@link Long } * */ public void setOriginalConversionTypeId(Long value) { this.originalConversionTypeId = value; } /** * Gets the value of the name property. * * @return * possible object is * {@link String } * */ public String getName() { return name; } /** * Sets the value of the name property. * * @param value * allowed object is * {@link String } * */ public void setName(String value) { this.name = value; } /** * Gets the value of the status property. * * @return * possible object is * {@link ConversionTrackerStatus } * */ public ConversionTrackerStatus getStatus() { return status; } /** * Sets the value of the status property. * * @param value * allowed object is * {@link ConversionTrackerStatus } * */ public void setStatus(ConversionTrackerStatus value) { this.status = value; } /** * Gets the value of the category property. * * @return * possible object is * {@link ConversionTrackerCategory } * */ public ConversionTrackerCategory getCategory() { return category; } /** * Sets the value of the category property. * * @param value * allowed object is * {@link ConversionTrackerCategory } * */ public void setCategory(ConversionTrackerCategory value) { this.category = value; } /** * Gets the value of the conversionTypeOwnerCustomerId property. * * @return * possible object is * {@link Long } * */ public Long getConversionTypeOwnerCustomerId() { return conversionTypeOwnerCustomerId; } /** * Sets the value of the conversionTypeOwnerCustomerId property. * * @param value * allowed object is * {@link Long } * */ public void setConversionTypeOwnerCustomerId(Long value) { this.conversionTypeOwnerCustomerId = value; } /** * Gets the value of the viewthroughLookbackWindow property. * * @return * possible object is * {@link Integer } * */ public Integer getViewthroughLookbackWindow() { return viewthroughLookbackWindow; } /** * Sets the value of the viewthroughLookbackWindow property. * * @param value * allowed object is * {@link Integer } * */ public void setViewthroughLookbackWindow(Integer value) { this.viewthroughLookbackWindow = value; } /** * Gets the value of the ctcLookbackWindow property. * * @return * possible object is * {@link Integer } * */ public Integer getCtcLookbackWindow() { return ctcLookbackWindow; } /** * Sets the value of the ctcLookbackWindow property. * * @param value * allowed object is * {@link Integer } * */ public void setCtcLookbackWindow(Integer value) { this.ctcLookbackWindow = value; } /** * Gets the value of the countingType property. * * @return * possible object is * {@link ConversionDeduplicationMode } * */ public ConversionDeduplicationMode getCountingType() { return countingType; } /** * Sets the value of the countingType property. * * @param value * allowed object is * {@link ConversionDeduplicationMode } * */ public void setCountingType(ConversionDeduplicationMode value) { this.countingType = value; } /** * Gets the value of the defaultRevenueValue property. * * @return * possible object is * {@link Double } * */ public Double getDefaultRevenueValue() { return defaultRevenueValue; } /** * Sets the value of the defaultRevenueValue property. * * @param value * allowed object is * {@link Double } * */ public void setDefaultRevenueValue(Double value) { this.defaultRevenueValue = value; } /** * Gets the value of the defaultRevenueCurrencyCode property. * * @return * possible object is * {@link String } * */ public String getDefaultRevenueCurrencyCode() { return defaultRevenueCurrencyCode; } /** * Sets the value of the defaultRevenueCurrencyCode property. * * @param value * allowed object is * {@link String } * */ public void setDefaultRevenueCurrencyCode(String value) { this.defaultRevenueCurrencyCode = value; } /** * Gets the value of the alwaysUseDefaultRevenueValue property. * * @return * possible object is * {@link Boolean } * */ public Boolean isAlwaysUseDefaultRevenueValue() { return alwaysUseDefaultRevenueValue; } /** * Sets the value of the alwaysUseDefaultRevenueValue property. * * @param value * allowed object is * {@link Boolean } * */ public void setAlwaysUseDefaultRevenueValue(Boolean value) { this.alwaysUseDefaultRevenueValue = value; } /** * Gets the value of the excludeFromBidding property. * * @return * possible object is * {@link Boolean } * */ public Boolean isExcludeFromBidding() { return excludeFromBidding; } /** * Sets the value of the excludeFromBidding property. * * @param value * allowed object is * {@link Boolean } * */ public void setExcludeFromBidding(Boolean value) { this.excludeFromBidding = value; } /** * Gets the value of the mostRecentConversionDate property. * * @return * possible object is * {@link String } * */ public String getMostRecentConversionDate() { return mostRecentConversionDate; } /** * Sets the value of the mostRecentConversionDate property. * * @param value * allowed object is * {@link String } * */ public void setMostRecentConversionDate(String value) { this.mostRecentConversionDate = value; } /** * Gets the value of the lastReceivedRequestTime property. * * @return * possible object is * {@link String } * */ public String getLastReceivedRequestTime() { return lastReceivedRequestTime; } /** * Sets the value of the lastReceivedRequestTime property. * * @param value * allowed object is * {@link String } * */ public void setLastReceivedRequestTime(String value) { this.lastReceivedRequestTime = value; } /** * Gets the value of the conversionTrackerType property. * * @return * possible object is * {@link String } * */ public String getConversionTrackerType() { return conversionTrackerType; } /** * Sets the value of the conversionTrackerType property. * * @param value * allowed object is * {@link String } * */ public void setConversionTrackerType(String value) { this.conversionTrackerType = value; } }
/* * JBoss, Home of Professional Open Source. * * Copyright 2014 Red Hat, Inc., and individual contributors * as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.logmanager.handlers; import java.io.BufferedWriter; import java.io.Writer; import java.io.Closeable; import java.io.Flushable; import java.util.logging.ErrorManager; import java.util.logging.Formatter; import org.jboss.logmanager.ExtLogRecord; import org.jboss.logmanager.ExtHandler; /** * A handler which writes to any {@code Writer}. */ public class WriterHandler extends ExtHandler { protected final Object outputLock = new Object(); private volatile boolean checkHeadEncoding = true; private volatile boolean checkTailEncoding = true; private Writer writer; /** * Construct a new instance. */ public WriterHandler() { } /** {@inheritDoc} */ protected void doPublish(final ExtLogRecord record) { final String formatted; final Formatter formatter = getFormatter(); try { formatted = formatter.format(record); } catch (Exception ex) { reportError("Formatting error", ex, ErrorManager.FORMAT_FAILURE); return; } if (formatted.length() == 0) { // nothing to write; don't bother return; } try { synchronized (outputLock) { if (writer == null) { return; } preWrite(record); final Writer writer = this.writer; if (writer == null) { return; } writer.write(formatted); // only flush if something was written super.doPublish(record); } } catch (Exception ex) { reportError("Error writing log message", ex, ErrorManager.WRITE_FAILURE); return; } } /** * Execute any pre-write policy, such as file rotation. The write lock is held during this method, so make * it quick. The default implementation does nothing. * * @param record the record about to be logged */ protected void preWrite(final ExtLogRecord record) { // do nothing by default } /** * Set the writer. The writer will then belong to this handler; when the handler is closed or a new writer is set, * this writer will be closed. * * @param writer the new writer, or {@code null} to disable logging */ public void setWriter(final Writer writer) { checkAccess(); Writer oldWriter = null; boolean ok = false; try { synchronized (outputLock) { oldWriter = this.writer; if (oldWriter != null) { writeTail(oldWriter); safeFlush(oldWriter); } if (writer != null) { writeHead(this.writer = new BufferedWriter(writer)); } else { this.writer = null; } ok = true; } } finally { safeClose(oldWriter); if (! ok) safeClose(writer); } } /** * Determine whether head encoding checking is turned on. * * @return {@code true} to check and report head encoding problems, or {@code false} to ignore them */ public boolean isCheckHeadEncoding() { return checkHeadEncoding; } /** * Establish whether head encoding checking is turned on. * * @param checkHeadEncoding {@code true} to check and report head encoding problems, or {@code false} to ignore them * @return this handler */ public WriterHandler setCheckHeadEncoding(boolean checkHeadEncoding) { this.checkHeadEncoding = checkHeadEncoding; return this; } /** * Determine whether tail encoding checking is turned on. * * @return {@code true} to check and report tail encoding problems, or {@code false} to ignore them */ public boolean isCheckTailEncoding() { return checkTailEncoding; } /** * Establish whether tail encoding checking is turned on. * * @param checkTailEncoding {@code true} to check and report tail encoding problems, or {@code false} to ignore them * @return this handler */ public WriterHandler setCheckTailEncoding(boolean checkTailEncoding) { this.checkTailEncoding = checkTailEncoding; return this; } private void writeHead(final Writer writer) { try { final Formatter formatter = getFormatter(); if (formatter != null) { final String head = formatter.getHead(this); if (checkHeadEncoding) { if (!getCharset().newEncoder().canEncode(head)) { reportError("Section header cannot be encoded into charset \"" + getCharset().name() + "\"", null, ErrorManager.GENERIC_FAILURE); return; } } writer.write(head); } } catch (Exception e) { reportError("Error writing section header", e, ErrorManager.WRITE_FAILURE); } } private void writeTail(final Writer writer) { try { final Formatter formatter = getFormatter(); if (formatter != null) { final String tail = formatter.getTail(this); if (checkTailEncoding) { if (!getCharset().newEncoder().canEncode(tail)) { reportError("Section tail cannot be encoded into charset \"" + getCharset().name() + "\"", null, ErrorManager.GENERIC_FAILURE); return; } } writer.write(tail); } } catch (Exception ex) { reportError("Error writing section tail", ex, ErrorManager.WRITE_FAILURE); } } /** * Flush this logger. */ public void flush() { // todo - maybe this synch is not really needed... if there's a perf detriment, drop it synchronized (outputLock) { safeFlush(writer); } super.flush(); } /** * Close this logger. * * @throws SecurityException if the caller does not have sufficient permission */ public void close() throws SecurityException { checkAccess(); setWriter(null); super.close(); } /** * Safely close the resource, reporting an error if the close fails. * * @param c the resource */ protected void safeClose(Closeable c) { try { if (c != null) c.close(); } catch (Exception e) { reportError("Error closing resource", e, ErrorManager.CLOSE_FAILURE); } catch (Throwable ignored) {} } void safeFlush(Flushable f) { try { if (f != null) f.flush(); } catch (Exception e) { reportError("Error on flush", e, ErrorManager.FLUSH_FAILURE); } catch (Throwable ignored) {} } }
package com.swfarm.biz.ebay.dto; import java.io.Serializable; import java.sql.Timestamp; import java.util.Date; import org.apache.commons.lang.StringUtils; import com.swfarm.pub.framework.Env; import com.swfarm.pub.framework.FormNumberCache; import com.swfarm.pub.utils.DateUtils; public class EbayOrderItemDTO implements Serializable { private String id; private Long numIid; private String customerOrderItemNo; private String customerOrderNo; private String accountNumber; private String customerName; private String categoryCode; private String articleNumber; private String saleSuiteArticleNumber; private Double saleSuiteItemQuantity; private Long stockKeepingUnitId; private Double skuPurchasePrice; private String stockLocation; private String currency; private Double price = 0d; private Double quantity = 0d; private Double postageFee = 0d; private Double allocatedQuantity = 0d; private Double stockQuantity = 0d; private Double deliverQuantity = 0d; private Double totalPrice = 0d; private String buyerCheckoutMessage; private EbayOrderDTO customerOrderDTO; private String customerOrderId; private String customerOrderProcessStep; private Double grossProfit; private Double bonusPoint; private String productManagerNo; private String productManagerName; private String vendorCode; private Boolean autoPending; private Timestamp modifiedTime; private String saleChannel; private Boolean inactive = false; /* ebay item attribute */ private String saleRecordNo; private String orderLineItemId; private String itemNumber; private String variationNumber; private String itemSite; private String itemTitle; private String viewItemUrl; private String variationTitle; private String variationViewItemURL; private String warehouseCode; private String customLabel; private Double customLabelPrice; private Double customLabelQuantity; private String transactionId; private String ebayOrderId; private String payPalEmailAddress; private String payPalTransactionId; private String refundPayPalTransactionId; private Date saleDate; private Date paidOnDate; private Date shippedDate; private Integer handlingDays; private Double finalValueFee; private String finalValueFeeCurrency; private String paymentMethodUsed; private String paymentHoldStatus; private String unpaidItemCaseStatus; private String unpaidItemCaseType; private String originCustomerOrderItemId; public String getId() { return id; } public void setId(String id) { this.id = id; } public Long getNumIid() { if (numIid == null) { numIid = FormNumberCache.getCustomerOrderItemNumIidSeq(); } return numIid; } public void setNumIid(Long numIid) { this.numIid = numIid; } public String getCustomerOrderItemNo() { return customerOrderItemNo; } public void setCustomerOrderItemNo(String customerOrderItemNo) { this.customerOrderItemNo = customerOrderItemNo; } public String getCustomerOrderNo() { return customerOrderNo; } public String getAccountNumber() { return accountNumber; } public String getCustomerName() { return customerName; } public void setArticleNumber(String articleNumber) { this.articleNumber = articleNumber; } public String getCategoryCode() { return categoryCode; } public void setCategoryCode(String categoryCode) { this.categoryCode = categoryCode; } public String getSaleSuiteArticleNumber() { return saleSuiteArticleNumber; } public void setSaleSuiteArticleNumber(String saleSuiteArticleNumber) { this.saleSuiteArticleNumber = saleSuiteArticleNumber; } public Double getSaleSuiteItemQuantity() { if (saleSuiteItemQuantity == null) { saleSuiteItemQuantity = 0d; } return saleSuiteItemQuantity; } public void setSaleSuiteItemQuantity(Double saleSuiteItemQuantity) { this.saleSuiteItemQuantity = saleSuiteItemQuantity; } public Long getStockKeepingUnitId() { return stockKeepingUnitId; } public void setStockKeepingUnitId(Long stockKeepingUnitId) { this.stockKeepingUnitId = stockKeepingUnitId; } public Double getSkuPurchasePrice() { if (skuPurchasePrice == null) { skuPurchasePrice = 0d; } return skuPurchasePrice; } public void setSkuPurchasePrice(Double skuPurchasePrice) { this.skuPurchasePrice = skuPurchasePrice; } public Double getPrice() { if (price == null) { price = 0d; } return price; } public void setPrice(Double price) { this.price = price; } public Double getPostageFee() { if (postageFee == null) { postageFee = 0d; } return postageFee; } public void setPostageFee(Double postageFee) { this.postageFee = postageFee; } public String getCurrency() { if (StringUtils.isEmpty(currency)) { currency = "USD"; } return currency; } public void setCurrency(String currency) { this.currency = currency; } public Double getQuantity() { if (quantity == null) { quantity = 0d; } return quantity; } public void setQuantity(Double quantity) { this.quantity = quantity; } public Double getAllocatedQuantity() { if (allocatedQuantity == null) { allocatedQuantity = 0d; } return allocatedQuantity; } public void setAllocatedQuantity(Double allocatedQuantity) { this.allocatedQuantity = allocatedQuantity; } public Double getStockQuantity() { if (stockQuantity == null) { stockQuantity = 0d; } return stockQuantity; } public void setStockQuantity(Double stockQuantity) { this.stockQuantity = stockQuantity; } public Double getDeliverQuantity() { if (deliverQuantity == null) { deliverQuantity = 0d; } return deliverQuantity; } public void setDeliverQuantity(Double deliverQuantity) { this.deliverQuantity = deliverQuantity; } public Double getToDeliverQuantity() { Double quantity = this.getQuantity(); Double deliverQuantity = this.getDeliverQuantity(); if (quantity > deliverQuantity) { return quantity - deliverQuantity; } else { return 0d; } } public String getStockLocation() { return stockLocation; } public void setStockLocation(String stockLocation) { this.stockLocation = stockLocation; } public void setBuyerCheckoutMessage(String buyerCheckoutMessage) { this.buyerCheckoutMessage = buyerCheckoutMessage; } public String getBuyerCheckoutMessage() { return buyerCheckoutMessage; } public EbayOrderDTO getCustomerOrderDTO() { return customerOrderDTO; } public void setCustomerOrderDTO(EbayOrderDTO customerOrderDTO) { this.customerOrderDTO = customerOrderDTO; } public String getCustomerOrderId() { return customerOrderId; } public void setCustomerOrderId(String customerOrderId) { this.customerOrderId = customerOrderId; } public String getCustomerOrderProcessStep() { return customerOrderProcessStep; } public void setCustomerOrderProcessStep(String customerOrderProcessStep) { this.customerOrderProcessStep = customerOrderProcessStep; } public Double getSubtotalPrice() { return this.getCustomLabelPrice() * this.getCustomLabelQuantity(); } public Double getCalTotalPrice() { return this.getCustomLabelPrice() * this.getCustomLabelQuantity() + this.getPostageFee(); } public Double getTotalPrice() { if (this.totalPrice == null) { this.totalPrice = 0d; } return this.totalPrice; } public void setTotalPrice(Double totalPrice) { this.totalPrice = totalPrice; } public Double getRmbTotalPrice() { Double totalPrice = getTotalPrice(); if (StringUtils.isNotEmpty(currency) && totalPrice != null) { if ("USD".equals(currency)) { return Env.getCurrencyUsdRate() * totalPrice; } else if ("GBP".equals(currency)) { return Env.getCurrencyGbpRate() * totalPrice; } else if ("EUR".equals(currency)) { return Env.getCurrencyEurRate() * totalPrice; } else if ("AUD".equals(currency)) { return Env.getCurrencyAudRate() * totalPrice; } } return Env.getCurrencyUsdRate() * totalPrice; } public Double getRmbSkuPrice() { Double skuPrice = this.getPrice(); if (this.getPostageFee() > 0 && this.getQuantity() > 0) { skuPrice = skuPrice + this.getPostageFee() / this.getQuantity(); } if (this.getTotalPrice() > 0 && this.getQuantity() > 0) { if (skuPrice < this.getTotalPrice() / this.getQuantity()) { skuPrice = this.getTotalPrice() / this.getQuantity(); } } if (StringUtils.isNotEmpty(currency)) { if ("USD".equals(currency)) { return Env.getCurrencyUsdRate() * skuPrice; } else if ("GBP".equals(currency)) { return Env.getCurrencyGbpRate() * skuPrice; } else if ("EUR".equals(currency)) { return Env.getCurrencyEurRate() * skuPrice; } else if ("AUD".equals(currency)) { return Env.getCurrencyAudRate() * skuPrice; } } return Env.getCurrencyUsdRate() * skuPrice; } public Double getGrossProfit() { return grossProfit; } public void setGrossProfit(Double grossProfit) { this.grossProfit = grossProfit; } public Double getBonusPoint() { return bonusPoint; } public void setBonusPoint(Double bonusPoint) { this.bonusPoint = bonusPoint; } public String getProductManagerNo() { return productManagerNo; } public void setProductManagerNo(String productManagerNo) { this.productManagerNo = productManagerNo; } public String getProductManagerName() { return productManagerName; } public void setProductManagerName(String productManagerName) { this.productManagerName = productManagerName; } public String getVendorCode() { return vendorCode; } public void setVendorCode(String vendorCode) { this.vendorCode = vendorCode; } public Boolean getAutoPending() { return autoPending; } public void setAutoPending(Boolean autoPending) { this.autoPending = autoPending; } public Boolean getInactive() { if (inactive == null) { inactive = false; } return inactive; } public void setInactive(Boolean inactive) { this.inactive = inactive; } public Timestamp getModifiedTime() { return modifiedTime; } public void setModifiedTime(Timestamp modifiedTime) { this.modifiedTime = modifiedTime; } public String getSaleChannel() { return saleChannel; } public void setSaleChannel(String saleChannel) { this.saleChannel = saleChannel; } public boolean getIsDelayThreeDays() { Date today = new Date(); Date threeDate = DateUtils.getBeforeDate(new Date(), 2); if (this.getPaidOnDate() != null) { if (threeDate.after(this.getPaidOnDate())) { return true; } } return false; } public boolean getIsDelayTenDays() { Date today = new Date(); Date tenDate = DateUtils.getBeforeDate(new Date(), 9); if (this.getPaidOnDate() != null) { if (tenDate.after(this.getPaidOnDate())) { return true; } } return false; } public void setCustomerOrderNo(String customerOrderNo) { this.customerOrderNo = customerOrderNo; } public void setAccountNumber(String accountNumber) { this.accountNumber = accountNumber; } public void setCustomerName(String customerName) { this.customerName = customerName; } public String getItemNumber() { return itemNumber; } public void setItemNumber(String itemNumber) { this.itemNumber = itemNumber; } public String getVariationNumber() { return variationNumber; } public void setVariationNumber(String variationNumber) { this.variationNumber = variationNumber; } public String getItemTitle() { return itemTitle; } public void setItemTitle(String itemTitle) { this.itemTitle = itemTitle; } public String getItemSite() { return itemSite; } public void setItemSite(String itemSite) { this.itemSite = itemSite; } public String getVariationTitle() { return variationTitle; } public void setVariationTitle(String variationTitle) { this.variationTitle = variationTitle; } public String getVariationViewItemURL() { return variationViewItemURL; } public void setVariationViewItemURL(String variationViewItemURL) { this.variationViewItemURL = variationViewItemURL; } public String getRefundPayPalTransactionId() { return refundPayPalTransactionId; } public void setRefundPayPalTransactionId(String refundPayPalTransactionId) { this.refundPayPalTransactionId = refundPayPalTransactionId; } public Boolean getHasSaleSuiteItem() { if (StringUtils.isNotEmpty(customLabel) && !customLabel.equalsIgnoreCase(articleNumber)) { return true; } return false; } public String getWarehouseCode() { return warehouseCode; } public void setWarehouseCode(String warehouseCode) { this.warehouseCode = warehouseCode; } public String getCustomLabel() { return customLabel; } public void setCustomLabel(String customLabel) { this.customLabel = customLabel; } public Double getCustomLabelPrice() { if (customLabelPrice == null) { customLabelPrice = this.getPrice(); } return customLabelPrice; } public void setCustomLabelPrice(Double customLabelPrice) { this.customLabelPrice = customLabelPrice; } public Double getCustomLabelQuantity() { if (this.customLabelQuantity == null || this.customLabelQuantity == 0d) { Boolean hasSuiteItem = false; if (StringUtils.isNotEmpty(articleNumber) && StringUtils.isNotEmpty(customLabel) && !articleNumber.equals(customLabel)) { hasSuiteItem = true; } if (this.getQuantity() > 0) { if (this.getSaleSuiteItemQuantity() > 0) { this.customLabelQuantity = this.quantity / this.saleSuiteItemQuantity; } else { if (hasSuiteItem == false) { this.customLabelQuantity = this.quantity; } } } } return this.customLabelQuantity; } public void setCustomLabelQuantity(Double customLabelQuantity) { this.customLabelQuantity = customLabelQuantity; } public String getTransactionId() { return transactionId; } public void setTransactionId(String transactionId) { this.transactionId = transactionId; } public String getEbayOrderId() { return ebayOrderId; } public void setEbayOrderId(String ebayOrderId) { this.ebayOrderId = ebayOrderId; } public String getPayPalEmailAddress() { return payPalEmailAddress; } public void setPayPalEmailAddress(String payPalEmailAddress) { this.payPalEmailAddress = payPalEmailAddress; } public String getPayPalTransactionId() { return payPalTransactionId; } public void setPayPalTransactionId(String payPalTransactionId) { this.payPalTransactionId = payPalTransactionId; } public Date getSaleDate() { return saleDate; } public void setSaleDate(Date saleDate) { this.saleDate = saleDate; } public Date getPaidOnDate() { return paidOnDate; } public void setPaidOnDate(Date paidOnDate) { this.paidOnDate = paidOnDate; } public Date getShippedDate() { return shippedDate; } public void setShippedDate(Date shippedDate) { this.shippedDate = shippedDate; } public Integer getHandlingDays() { if (this.paidOnDate != null) { if (this.shippedDate != null) { this.handlingDays = (int) (((this.shippedDate.getTime() - this.paidOnDate .getTime()) + 1) / DateUtils.DAYINMILISECOND) + 1; } else { this.handlingDays = (int) (((new Date().getTime() - this.paidOnDate .getTime()) + 1) / DateUtils.DAYINMILISECOND) + 1; } } else { this.handlingDays = 0; } return handlingDays; } public Double getFinalValueFee() { if (finalValueFee == null) { finalValueFee = 0d; } return finalValueFee; } public void setFinalValueFee(Double finalValueFee) { this.finalValueFee = finalValueFee; } public String getFinalValueFeeCurrency() { if (StringUtils.isEmpty(finalValueFeeCurrency)) { this.finalValueFeeCurrency = "USD"; } return finalValueFeeCurrency; } public void setFinalValueFeeCurrency(String finalValueFeeCurrency) { this.finalValueFeeCurrency = finalValueFeeCurrency; } public String getPaymentMethodUsed() { return paymentMethodUsed; } public void setPaymentMethodUsed(String paymentMethodUsed) { this.paymentMethodUsed = paymentMethodUsed; } public String getPaymentHoldStatus() { return paymentHoldStatus; } public void setPaymentHoldStatus(String paymentHoldStatus) { this.paymentHoldStatus = paymentHoldStatus; } public String getViewItemUrl() { return this.viewItemUrl; } public void setViewItemUrl(String viewItemUrl) { this.viewItemUrl = viewItemUrl; } public String getSaleRecordNo() { return saleRecordNo; } public void setSaleRecordNo(String saleRecordNo) { this.saleRecordNo = saleRecordNo; } public String getOrderLineItemId() { return orderLineItemId; } public void setOrderLineItemId(String orderLineItemId) { this.orderLineItemId = orderLineItemId; } public String getUnpaidItemCaseStatus() { return unpaidItemCaseStatus; } public void setUnpaidItemCaseStatus(String unpaidItemCaseStatus) { this.unpaidItemCaseStatus = unpaidItemCaseStatus; } public String getUnpaidItemCaseType() { return unpaidItemCaseType; } public void setUnpaidItemCaseType(String unpaidItemCaseType) { this.unpaidItemCaseType = unpaidItemCaseType; } public String getOriginCustomerOrderItemId() { return originCustomerOrderItemId; } public void setOriginCustomerOrderItemId(String originCustomerOrderItemId) { this.originCustomerOrderItemId = originCustomerOrderItemId; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((accountNumber == null) ? 0 : accountNumber.hashCode()); result = prime * result + ((customerOrderItemNo == null) ? 0 : customerOrderItemNo .hashCode()); result = prime * result + ((id == null) ? 0 : id.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; EbayOrderItemDTO other = (EbayOrderItemDTO) obj; if (accountNumber == null) { if (other.accountNumber != null) return false; } else if (!accountNumber.equals(other.accountNumber)) return false; if (customerOrderItemNo == null) { if (other.customerOrderItemNo != null) return false; } else if (!customerOrderItemNo.equals(other.customerOrderItemNo)) return false; if (id == null) { if (other.id != null) return false; } else if (!id.equals(other.id)) return false; return true; } }
/******************************************************************************* * * Pentaho Big Data * * Copyright (C) 2002-2019 by Hitachi Vantara : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.hadoop.mapreduce; import com.thoughtworks.xstream.XStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapRunnable; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; import org.pentaho.di.core.Const; import org.pentaho.di.core.exception.KettleException; import org.pentaho.di.core.logging.KettleLogStore; import org.pentaho.di.core.logging.KettleLoggingEvent; import org.pentaho.di.core.logging.LogLevel; import org.pentaho.di.core.row.RowMeta; import org.pentaho.di.core.variables.VariableSpace; import org.pentaho.di.core.variables.Variables; import org.pentaho.di.trans.RowProducer; import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.step.BaseStepMeta; import org.pentaho.di.trans.step.StepInterface; import org.pentaho.di.trans.step.StepMeta; import org.pentaho.di.trans.step.StepMetaInterface; import org.pentaho.di.trans.steps.missing.MissingTrans; import org.pentaho.hadoop.mapreduce.converter.TypeConverterFactory; import org.pentaho.hadoop.mapreduce.converter.spi.ITypeConverter; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.UUID; import java.util.concurrent.TimeUnit; /** * Map runner that uses the normal Kettle execution engine to process all input data during one single run.<p> This * relies on newly un-@Deprecated interfaces ({@link MapRunnable}, {@link JobConf}) in Hadoop 0.21.0. */ public class PentahoMapRunnable<K1, V1, K2, V2> implements MapRunnable<K1, V1, K2, V2> { public static final String KETTLE_PMR_PLUGIN_TIMEOUT = "KETTLE_PMR_PLUGIN_TIMEOUT"; private long pluginWaitTimeout; protected static enum Counter { INPUT_RECORDS, OUTPUT_RECORDS, OUT_RECORD_WITH_NULL_KEY, OUT_RECORD_WITH_NULL_VALUE } protected String transMapXml; protected String transReduceXml; protected String mapInputStepName; protected String reduceInputStepName; protected String mapOutputStepName; protected String reduceOutputStepName; protected Class<K2> outClassK; protected Class<V2> outClassV; protected String id = UUID.randomUUID().toString(); protected boolean debug = false; // the transformation that will be used as a mapper or reducer protected Trans trans; protected VariableSpace variableSpace = null; protected LogLevel logLevel; protected OutputCollectorRowListener<K2, V2> rowCollector; private final String ENVIRONMENT_VARIABLE_PREFIX = "java.system."; private final String KETTLE_VARIABLE_PREFIX = "KETTLE_"; public PentahoMapRunnable() throws KettleException { } public void configure( JobConf job ) { pluginWaitTimeout = TimeUnit.MINUTES.toMillis( 5 ); debug = "true".equalsIgnoreCase( job.get( "debug" ) ); //$NON-NLS-1$ transMapXml = job.get( "transformation-map-xml" ); transReduceXml = job.get( "transformation-reduce-xml" ); mapInputStepName = job.get( "transformation-map-input-stepname" ); mapOutputStepName = job.get( "transformation-map-output-stepname" ); reduceInputStepName = job.get( "transformation-reduce-input-stepname" ); reduceOutputStepName = job.get( "transformation-reduce-output-stepname" ); String xmlVariableSpace = job.get( "variableSpace" ); outClassK = (Class<K2>) job.getMapOutputKeyClass(); outClassV = (Class<V2>) job.getMapOutputValueClass(); if ( !Const.isEmpty( xmlVariableSpace ) ) { setDebugStatus( "PentahoMapRunnable(): variableSpace was retrieved from the job. The contents: " ); setDebugStatus( xmlVariableSpace ); // deserialize from xml to variable space XStream xStream = new XStream(); setDebugStatus( "PentahoMapRunnable(): Setting classes variableSpace property.: " ); variableSpace = (VariableSpace) xStream.fromXML( xmlVariableSpace ); for ( String variableName : variableSpace.listVariables() ) { if ( variableName.startsWith( KETTLE_VARIABLE_PREFIX ) ) { System.setProperty( variableName, variableSpace.getVariable( variableName ) ); } if ( KETTLE_PMR_PLUGIN_TIMEOUT.equals( variableName ) ) { try { pluginWaitTimeout = Long.parseLong( variableSpace.getVariable( variableName ) ); } catch ( Exception e ) { System.out.println( "Unable to parse plugin wait timeout, defaulting to 5 minutes" ); } } } } else { setDebugStatus( "PentahoMapRunnable(): The PDI Job's variable space was not sent." ); variableSpace = new Variables(); } // Check for environment variables in the userDefined variables Iterator<Entry<String, String>> iter = job.iterator(); while ( iter.hasNext() ) { Entry<String, String> entry = iter.next(); if ( entry.getKey().startsWith( ENVIRONMENT_VARIABLE_PREFIX ) ) { System.setProperty( entry.getKey().substring( ENVIRONMENT_VARIABLE_PREFIX.length() ), entry.getValue() ); } else if ( entry.getKey().startsWith( KETTLE_VARIABLE_PREFIX ) ) { System.setProperty( entry.getKey(), entry.getValue() ); } } MRUtil.passInformationToTransformation( variableSpace, job ); setDebugStatus( "Job configuration" ); setDebugStatus( "Output key class: " + outClassK.getName() ); setDebugStatus( "Output value class: " + outClassV.getName() ); // set the log level to what the level of the job is String stringLogLevel = job.get( "logLevel" ); if ( !Const.isEmpty( stringLogLevel ) ) { logLevel = LogLevel.valueOf( stringLogLevel ); setDebugStatus( "Log level set to " + stringLogLevel ); } else { System.out.println( "Could not retrieve the log level from the job configuration. logLevel will not be set." ); } long deadline = 0; boolean first = true; while ( true ) { createTrans( job ); if ( first ) { deadline = pluginWaitTimeout + System.currentTimeMillis(); System.out .println( PentahoMapRunnable.class + ": Trans creation checking starting now " + new Date().toString() ); first = false; } List<MissingTrans> missingTranses = new ArrayList<MissingTrans>(); for ( StepMeta stepMeta : trans.getTransMeta().getSteps() ) { StepMetaInterface stepMetaInterface = stepMeta.getStepMetaInterface(); if ( stepMetaInterface instanceof MissingTrans ) { MissingTrans missingTrans = (MissingTrans) stepMetaInterface; System.out.println( MissingTrans.class + "{stepName: " + missingTrans.getStepName() + ", missingPluginId: " + missingTrans .getMissingPluginId() + "}" ); missingTranses.add( missingTrans ); } } if ( missingTranses.size() == 0 ) { System.out.println( PentahoMapRunnable.class + ": Done waiting on plugins now " + new Date().toString() ); break; } else { if ( System.currentTimeMillis() > deadline ) { StringBuilder stringBuilder = new StringBuilder( "Failed to initialize plugins: " ); for ( MissingTrans missingTrans : missingTranses ) { stringBuilder.append( missingTrans.getMissingPluginId() ); stringBuilder.append( " on step " ).append( missingTrans.getStepName() ); stringBuilder.append( ", " ); } stringBuilder.setLength( stringBuilder.length() - 2 ); throw new RuntimeException( stringBuilder.toString() ); } else { try { Thread.sleep( Math.min( 100, deadline - System.currentTimeMillis() ) ); } catch ( InterruptedException e ) { throw new RuntimeException( e ); } } } } } public void injectValue( Object key, ITypeConverter inConverterK, Object value, ITypeConverter inConverterV, RowMeta injectorRowMeta, RowProducer rowProducer, Reporter reporter ) throws Exception { injectValue( key, 0, inConverterK, value, 1, inConverterV, injectorRowMeta, rowProducer, reporter ); } public void injectValue( Object key, int keyOrdinal, ITypeConverter inConverterK, Object value, int valueOrdinal, ITypeConverter inConverterV, RowMeta injectorRowMeta, RowProducer rowProducer, Reporter reporter ) throws Exception { Object[] row = new Object[ injectorRowMeta.size() ]; row[ keyOrdinal ] = inConverterK != null ? inConverterK.convert( injectorRowMeta.getValueMeta( keyOrdinal ), key ) : key; row[ valueOrdinal ] = inConverterV != null ? inConverterV.convert( injectorRowMeta.getValueMeta( valueOrdinal ), value ) : value; if ( debug ) { setDebugStatus( reporter, "Injecting input record [" + row[ keyOrdinal ] + "] - [" + row[ valueOrdinal ] + "]" ); } rowProducer.putRow( injectorRowMeta, row ); } protected void createTrans( final Configuration conf ) { try { setDebugStatus( "Creating a transformation for a map." ); trans = MRUtil.getTrans( conf, transMapXml, false ); } catch ( KettleException ke ) { throw new RuntimeException( "Error loading transformation", ke ); //$NON-NLS-1$ } } public String getTransMapXml() { return transMapXml; } public void setTransMapXml( String transMapXml ) { this.transMapXml = transMapXml; } public String getTransReduceXml() { return transReduceXml; } public void setTransReduceXml( String transReduceXml ) { this.transReduceXml = transReduceXml; } public String getMapInputStepName() { return mapInputStepName; } public void setMapInputStepName( String mapInputStepName ) { this.mapInputStepName = mapInputStepName; } public String getMapOutputStepName() { return mapOutputStepName; } public void setMapOutputStepName( String mapOutputStepName ) { this.mapOutputStepName = mapOutputStepName; } public String getReduceInputStepName() { return reduceInputStepName; } public void setReduceInputStepName( String reduceInputStepName ) { this.reduceInputStepName = reduceInputStepName; } public String getReduceOutputStepName() { return reduceOutputStepName; } public void setReduceOutputStepName( String reduceOutputStepName ) { this.reduceOutputStepName = reduceOutputStepName; } public Class<?> getOutClassK() { return outClassK; } public void setOutClassK( Class<K2> outClassK ) { this.outClassK = outClassK; } public Class<?> getOutClassV() { return outClassV; } public void setOutClassV( Class<V2> outClassV ) { this.outClassV = outClassV; } public Trans getTrans() { return trans; } public void setTrans( Trans trans ) { this.trans = trans; } public String getId() { return id; } public void setId( String id ) { this.id = id; } public Exception getException() { return rowCollector != null ? rowCollector.getException() : null; } public void setDebugStatus( Reporter reporter, String message ) { if ( debug ) { System.out.println( message ); reporter.setStatus( message ); } } private void setDebugStatus( String message ) { if ( debug ) { System.out.println( message ); } } public void run( RecordReader<K1, V1> input, final OutputCollector<K2, V2> output, final Reporter reporter ) throws IOException { try { if ( trans == null ) { throw new RuntimeException( "Error initializing transformation. See error log." ); //$NON-NLS-1$ } else { // Clean up old logging KettleLogStore.discardLines( trans.getLogChannelId(), true ); } // Create a copy of trans so we don't continue to add new TransListeners and run into a // ConcurrentModificationException // when this mapper is reused "quickly" trans = MRUtil.recreateTrans( trans ); String logLinePrefix = getClass().getName() + ".run: "; setDebugStatus( logLinePrefix + " The transformation was just recreated." ); // share the variables from the PDI job. // we do this here instead of in createTrans() as MRUtil.recreateTrans() wil not // copy "execution" trans information. if ( variableSpace != null ) { setDebugStatus( "Sharing the VariableSpace from the PDI job." ); trans.shareVariablesWith( variableSpace ); if ( debug ) { // list the variables List<String> variables = Arrays.asList( trans.listVariables() ); Collections.sort( variables ); if ( variables != null ) { setDebugStatus( "Variables: " ); for ( String variable : variables ) { setDebugStatus( " " + variable + " = " + trans.getVariable( variable ) ); } } } } else { setDebugStatus( reporter, "variableSpace is null. We are not going to share it with the trans." ); } // set the trans' log level if we have our's set if ( logLevel != null ) { setDebugStatus( "Setting the trans.logLevel to " + logLevel.toString() ); trans.setLogLevel( logLevel ); } else { setDebugStatus( "logLevel is null. The trans log level will not be set." ); } // allocate key & value instances that are re-used for all entries K1 key = input.createKey(); V1 value = input.createValue(); setDebugStatus( reporter, "Preparing transformation for execution" ); trans.prepareExecution( null ); try { setDebugStatus( reporter, "Locating output step: " + mapOutputStepName ); StepInterface outputStep = trans.findRunThread( mapOutputStepName ); if ( outputStep != null ) { rowCollector = new OutputCollectorRowListener( output, outClassK, outClassV, reporter, debug ); // rowCollector = OutputCollectorRowListener.build(output, outputRowMeta, outClassK, outClassV, // reporter, debug); outputStep.addRowListener( rowCollector ); RowMeta injectorRowMeta = new RowMeta(); RowProducer rowProducer = null; TypeConverterFactory typeConverterFactory = new TypeConverterFactory(); ITypeConverter inConverterK = null; ITypeConverter inConverterV = null; setDebugStatus( reporter, "Locating input step: " + mapInputStepName ); if ( mapInputStepName != null ) { // Setup row injection rowProducer = trans.addRowProducer( mapInputStepName, 0 ); StepInterface inputStep = rowProducer.getStepInterface(); StepMetaInterface inputStepMeta = inputStep.getStepMeta().getStepMetaInterface(); InKeyValueOrdinals inOrdinals = null; if ( inputStepMeta instanceof BaseStepMeta ) { setDebugStatus( reporter, "Generating converters from RowMeta for injection into the mapper transformation" ); // Use getFields(...) to get the row meta and therefore the expected input types inputStepMeta.getFields( injectorRowMeta, null, null, null, null ); inOrdinals = new InKeyValueOrdinals( injectorRowMeta ); if ( inOrdinals.getKeyOrdinal() < 0 || inOrdinals.getValueOrdinal() < 0 ) { throw new KettleException( "key or value is not defined in transformation injector step" ); } // Get a converter for the Key if the value meta has a concrete Java class we can use. // If no converter can be found here we wont do any type conversion. if ( injectorRowMeta.getValueMeta( inOrdinals.getKeyOrdinal() ) != null ) { inConverterK = typeConverterFactory .getConverter( key.getClass(), injectorRowMeta.getValueMeta( inOrdinals.getKeyOrdinal() ) ); } // Get a converter for the Value if the value meta has a concrete Java class we can use. // If no converter can be found here we wont do any type conversion. if ( injectorRowMeta.getValueMeta( inOrdinals.getValueOrdinal() ) != null ) { inConverterV = typeConverterFactory .getConverter( value.getClass(), injectorRowMeta.getValueMeta( inOrdinals.getValueOrdinal() ) ); } } trans.startThreads(); if ( rowProducer != null ) { while ( input.next( key, value ) ) { if ( inOrdinals != null ) { injectValue( key, inOrdinals.getKeyOrdinal(), inConverterK, value, inOrdinals.getValueOrdinal(), inConverterV, injectorRowMeta, rowProducer, reporter ); } else { injectValue( key, inConverterK, value, inConverterV, injectorRowMeta, rowProducer, reporter ); } } rowProducer.finished(); } trans.waitUntilFinished(); setDebugStatus( reporter, "Mapper transformation has finished" ); if ( trans.getErrors() > 0 ) { setDebugStatus( "Errors detected for mapper transformation" ); List<KettleLoggingEvent> logList = KettleLogStore .getLogBufferFromTo( trans.getLogChannelId(), false, 0, KettleLogStore.getLastBufferLineNr() ); StringBuffer buff = new StringBuffer(); for ( KettleLoggingEvent le : logList ) { if ( le.getLevel() == LogLevel.ERROR ) { buff.append( le.getMessage().toString() ).append( "\n" ); } } throw new Exception( "Errors were detected for mapper transformation:\n\n" + buff.toString() ); } } else { setDebugStatus( reporter, "No input stepname was defined" ); } if ( getException() != null ) { setDebugStatus( reporter, "An exception was generated by the mapper transformation" ); // Bubble the exception from within Kettle to Hadoop throw getException(); } } else { if ( mapOutputStepName != null ) { setDebugStatus( reporter, "Output step [" + mapOutputStepName + "]could not be found" ); throw new KettleException( "Output step not defined in transformation" ); } else { setDebugStatus( reporter, "Output step name not specified" ); } } } finally { try { trans.stopAll(); } catch ( Exception ex ) { ex.printStackTrace(); } try { trans.cleanup(); } catch ( Exception ex ) { ex.printStackTrace(); } } } catch ( Exception e ) { e.printStackTrace( System.err ); setDebugStatus( reporter, "An exception was generated by the mapper task" ); throw new IOException( e ); } reporter.setStatus( "Completed processing record" ); } }
/* * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. * * Copyright (c) 2013-2014 sagyf Yang. The Four Group. */ package goja.plugins.redis; import goja.kits.io.SerializableKit; import com.google.common.collect.Lists; import goja.Logger; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPool; import redis.clients.jedis.Transaction; import redis.clients.jedis.Tuple; import redis.clients.util.SafeEncoder; import java.io.Serializable; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; public class JedisKit { private static JedisPool pool; public static void init(JedisPool pool) { JedisKit.pool = pool; } public static List<Object> tx(JedisAtom jedisAtom) { Jedis jedis = pool.getResource(); Transaction trans = jedis.multi(); jedisAtom.action(trans); return trans.exec(); } public static <T> T call(JedisAction<T> jedisAction) { T result = null; Jedis jedis = pool.getResource(); try { result = jedisAction.action(jedis); } catch (Exception e) { e.printStackTrace(); Logger.error(e.getMessage(), e); } finally { if (null != jedis) pool.returnResource(jedis); } return result; } /** * query one Object from Redis with key * * @param key using it get value from key-value database * @return the Object which implements Serializable */ public static <T extends Serializable> T get(final String key) { return call(new JedisAction<T>() { @SuppressWarnings("unchecked") @Override public T action(Jedis jedis) { Object result = null; byte[] retVal = jedis.get(SafeEncoder.encode(key)); if (null != retVal) { try { result = SerializableKit.toObject(retVal); } catch (Exception e) { result = SafeEncoder.encode(retVal); } } return (T) result; } }); } /** * set object to key-value database with the specified key * * @param key the unique key to indicate the value Object * @param value the value indicated by the key * @return return true while the set operation is succeed,false by failed */ public static boolean set(final String key, final Serializable value) { return call(new JedisAction<Boolean>() { @Override public Boolean action(Jedis jedis) { String retVal; if (value instanceof String) { retVal = jedis.set(key, (String) value); } else { retVal = jedis.set(SafeEncoder.encode(key), SerializableKit.toByteArray(value)); } return "OK".equalsIgnoreCase(retVal); } }); } /** * set object to key-value database with the specified key and EXPIRE time * * @param key the unique key to indicate the value Object * @param value the value indicated by the key * @param seconds EXPIRE time * @return return true while the set operation is succeed,false by failed */ public static boolean set(final String key, final Serializable value, final int seconds) { return call(new JedisAction<Boolean>() { @Override public Boolean action(Jedis jedis) { byte[] bytes; if (value instanceof String) { bytes = SafeEncoder.encode((String) value); } else { bytes = SerializableKit.toByteArray(value); } String retVal = jedis.setex(SafeEncoder.encode(key), seconds, bytes); return "OK".equalsIgnoreCase(retVal); } }); } /** * query multiple Object from Redis with key * * @param keys using it get value from key-value database * @return the Object list which implements Serializable */ public static List<Serializable> mquery(final String... keys) { return call(new JedisAction<List<Serializable>>() { @Override public List<Serializable> action(Jedis jedis) { List<Serializable> result = new ArrayList<Serializable>(keys.length); for (int index = 0; index < keys.length; index++) result.add(null); byte[][] encodeKeys = new byte[keys.length][]; for (int i = 0; i < keys.length; i++) encodeKeys[i] = SafeEncoder.encode(keys[i]); List<byte[]> retVals = jedis.mget(encodeKeys); if (null != retVals) { int index = 0; for (byte[] val : retVals) { if (null != val) result.set(index, SerializableKit.toObject(val)); index++; } } return result; } }); } public static List<String> mqueryStr(final String... keys) { return call(new JedisAction<List<String>>() { @Override public List<String> action(Jedis jedis) { return jedis.mget(keys); } }); } public static boolean msaveOrUpdate(final Map<String, Serializable> values) { return call(new JedisAction<Boolean>() { @Override public Boolean action(Jedis jedis) { byte[][] encodeValues = new byte[values.size() * 2][]; int index = 0; Iterator<Entry<String, Serializable>> iter = values.entrySet().iterator(); while (iter.hasNext()) { Entry<String, Serializable> entry = iter.next(); encodeValues[index++] = entry.getKey().getBytes(); encodeValues[index++] = SerializableKit.toByteArray(entry.getValue()); } String retVal = jedis.mset(encodeValues); return "OK".equalsIgnoreCase(retVal); } }); } public static boolean msaveOrUpdateStr(final Map<String, String> values) { return call(new JedisAction<Boolean>() { @Override public Boolean action(Jedis jedis) { Iterator<Entry<String, String>> iter = values.entrySet().iterator(); int index = 0; String[] encodeValues = new String[values.size() * 2]; while (iter.hasNext()) { Entry<String, String> entry = iter.next(); encodeValues[index++] = entry.getKey(); encodeValues[index++] = entry.getValue(); } return "OK".equalsIgnoreCase(jedis.mset(encodeValues)); } }); } /** * query keys set by pattern */ public static Set<String> keys(final String pattern) { return call(new JedisAction<Set<String>>() { @Override public Set<String> action(Jedis jedis) { return jedis.keys(pattern); } }); } public static long del(final String... keys) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { byte[][] encodeKeys = new byte[keys.length][]; for (int i = 0; i < keys.length; i++) encodeKeys[i] = SafeEncoder.encode(keys[i]); return jedis.del(encodeKeys); } }); } public static long listAdd(final String key, final Serializable value) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { return jedis.rpush(SafeEncoder.encode(key), SerializableKit.toByteArray(value)); } }); } public static long listAddFirst(final String key, final Serializable value) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { return jedis.lpush(SafeEncoder.encode(key), SerializableKit.toByteArray(value)); } }); } public static String type(final String key) { return call(new JedisAction<String>() { @Override public String action(Jedis jedis) { return jedis.type(SafeEncoder.encode(key)); } }); } @SuppressWarnings("unchecked") public static <T> List<T> queryList(final String key, final int start, final int end) { return call(new JedisAction<List<T>>() { @Override public List<T> action(Jedis jedis) { List<T> result = Lists.newArrayList(); List<byte[]> retVals = jedis.lrange(SafeEncoder.encode(key), start, end); if (retVals != null) { for (byte[] val : retVals) { if (null != val) result.add((T) SerializableKit.toObject(val)); } } return result; } }); } public static long listSize(final String key) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { return jedis.llen(SafeEncoder.encode(key)); } }); } public static boolean listTrim(final String key, final int start, final int end) { return call(new JedisAction<Boolean>() { @Override public Boolean action(Jedis jedis) { return "OK".equalsIgnoreCase(jedis.ltrim(SafeEncoder.encode(key), start, end)); } }); } public static long incrementAndGet(final String key) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { return jedis.incr(key); } }); } public static long decrementAndGet(final String key) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { return jedis.decr(key); } }); } public static long queryLong(final String key) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { return Long.valueOf(jedis.get(key)); } }); } public static boolean hmset(final String key, final Map<String, String> values) { return call(new JedisAction<Boolean>() { @Override public Boolean action(Jedis jedis) { return "OK".equals(jedis.hmset(key, values)); } }); } public static List<String> hvals(final String key) { return call(new JedisAction<List<String>>() { @Override public List<String> action(Jedis jedis) { return jedis.hvals(key); } }); } public static List<String> hmget(final String key, final String... fields) { return call(new JedisAction<List<String>>() { @Override public List<String> action(Jedis jedis) { return jedis.hmget(key, fields); } }); } public static Double zincrby(final String key, final double score, final String member) { return call(new JedisAction<Double>() { @Override public Double action(Jedis jedis) { return jedis.zincrby(key, score, member); } }); } public static Double zscore(final String key, final String score) { return call(new JedisAction<Double>() { @Override public Double action(Jedis jedis) { return jedis.zscore(key, score); } }); } public static Long zadd(final String key, final double score, final String member) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { return jedis.zadd(key, score, member); } }); } public static Set<Tuple> zrangeWithScores(final String key, final long start, final long end) { return call(new JedisAction<Set<Tuple>>() { @Override public Set<Tuple> action(Jedis jedis) { return jedis.zrangeWithScores(key, start, end); } }); } public static String watch(final String... keys) { return call(new JedisAction<String>() { @Override public String action(Jedis jedis) { return jedis.watch(keys); } }); } public static Long lpush(final String key, final Serializable value) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { Long retVal; if (value instanceof String) { retVal = jedis.lpush(key, (String) value); } else { retVal = jedis.lpush(SafeEncoder.encode(key), SerializableKit.toByteArray(value)); } return retVal; } }); } public static <T extends Serializable> T rpop(final String key) { return call(new JedisAction<T>() { @SuppressWarnings("unchecked") @Override public T action(Jedis jedis) { Object result = null; byte[] retVal = jedis.rpop(SafeEncoder.encode(key)); if (null != retVal) { try { result = SerializableKit.toObject(retVal); } catch (Exception e) { result = SafeEncoder.encode(retVal); } } return (T) result; } }); } public static <T extends Serializable> List<T> lrange(final String key, final long start, final long end) { return call(new JedisAction<List<T>>() { @SuppressWarnings("unchecked") @Override public List<T> action(Jedis jedis) { List<T> list = Lists.newArrayList(); List<byte[]> results = jedis.lrange(SafeEncoder.encode(key), start, end); for (byte[] result : results) { try { list.add((T) SerializableKit.toObject(result)); } catch (Exception e) { list.add((T) SafeEncoder.encode(result)); } } return list; } }); } public static <T extends Serializable> T rpoplpush(final String srckey, final String dstkey) { return call(new JedisAction<T>() { @Override @SuppressWarnings("unchecked") public T action(Jedis jedis) { Object result = null; byte[] retVal = jedis.rpoplpush(SafeEncoder.encode(srckey), SafeEncoder.encode(dstkey)); if (null != retVal) { try { result = SerializableKit.toObject(retVal); } catch (Exception e) { result = SafeEncoder.encode(retVal); } } return (T) result; } }); } public static Long lrem(String key, Serializable value) { return lrem(key, 1, value); } public static Long lrem(final String key, final long count, final Serializable value) { return call(new JedisAction<Long>() { @Override public Long action(Jedis jedis) { Long retVal; if (value instanceof String) { retVal = jedis.lrem(key, count, (String) value); } else { retVal = jedis.lrem(SafeEncoder.encode(key), count, SerializableKit.toByteArray(value)); } return retVal; } }); } }
/* * Copyright (C) 2017-2019 Dremio Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dremio.service.reflection; import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.mockito.Mockito; import com.dremio.exec.server.SabotContext; import com.dremio.exec.store.CatalogService; import com.dremio.service.DirectProvider; import com.dremio.service.namespace.NamespaceKey; import com.dremio.service.namespace.NamespaceService; import com.dremio.service.namespace.dataset.proto.AccelerationSettings; import com.dremio.service.namespace.dataset.proto.DatasetConfig; import com.dremio.service.namespace.dataset.proto.DatasetType; import com.dremio.service.namespace.proto.EntityId; import com.dremio.service.reflection.MaterializationCache.CacheViewer; import com.dremio.service.reflection.ReflectionStatus.COMBINED_STATUS; import com.dremio.service.reflection.proto.DataPartition; import com.dremio.service.reflection.proto.ExternalReflection; import com.dremio.service.reflection.proto.Materialization; import com.dremio.service.reflection.proto.MaterializationId; import com.dremio.service.reflection.proto.ReflectionEntry; import com.dremio.service.reflection.proto.ReflectionGoal; import com.dremio.service.reflection.proto.ReflectionGoalState; import com.dremio.service.reflection.proto.ReflectionId; import com.dremio.service.reflection.proto.ReflectionState; import com.dremio.service.reflection.store.ExternalReflectionStore; import com.dremio.service.reflection.store.MaterializationStore; import com.dremio.service.reflection.store.ReflectionEntriesStore; import com.dremio.service.reflection.store.ReflectionGoalsStore; /** * Tests {@link ReflectionStatusService} */ @RunWith(Parameterized.class) public class TestReflectionStatusService { private final ReflectionId reflectionId; private final ReflectionStatusService statusService; private final COMBINED_STATUS expected; private static class ConstantCacheViewer implements CacheViewer { private final boolean isCached; ConstantCacheViewer(boolean isCached) { this.isCached = isCached; } @Override public boolean isCached(MaterializationId id) { return isCached; } } public TestReflectionStatusService( String name, COMBINED_STATUS expected, boolean enabled, boolean manualRefresh, boolean isValid, ReflectionEntry entry, Materialization lastMaterialization, boolean isMaterializationCached) { final NamespaceService namespaceService = mock(NamespaceService.class); final SabotContext sabotContext = mock(SabotContext.class); final ReflectionGoalsStore goalsStore = mock(ReflectionGoalsStore.class); final ReflectionEntriesStore entriesStore = mock(ReflectionEntriesStore.class); final MaterializationStore materializationStore = mock(MaterializationStore.class); final ExternalReflectionStore externalReflectionStore = mock(ExternalReflectionStore.class); final ReflectionSettings reflectionSettings = mock(ReflectionSettings.class); final ReflectionValidator validator = mock(ReflectionValidator.class); final CatalogService catalogService = mock(CatalogService.class); statusService = new ReflectionStatusServiceImpl( sabotContext::getExecutors, DirectProvider.wrap(namespaceService), DirectProvider.<CacheViewer>wrap(new ConstantCacheViewer(isMaterializationCached)), goalsStore, entriesStore, materializationStore, externalReflectionStore, reflectionSettings, validator, DirectProvider.wrap(catalogService) ); reflectionId = new ReflectionId(UUID.randomUUID().toString()); final String datasetId = UUID.randomUUID().toString(); final List<String> dataPath = Arrays.asList("source", "folder", "dataset"); final DatasetConfig dataset = new DatasetConfig() .setId(new EntityId(datasetId)) .setFullPathList(dataPath); when(namespaceService.findDatasetByUUID(datasetId)).thenReturn(dataset); final AccelerationSettings settings = new AccelerationSettings() .setRefreshPeriod(1000L) .setNeverRefresh(manualRefresh); when(reflectionSettings.getReflectionSettings(new NamespaceKey(dataPath))).thenReturn(settings); final ReflectionGoal goal = new ReflectionGoal() .setId(reflectionId) .setDatasetId(datasetId) .setState(enabled ? ReflectionGoalState.ENABLED : ReflectionGoalState.DISABLED); when(goalsStore.get(reflectionId)).thenReturn(goal); entry.setId(reflectionId); when(entriesStore.get(reflectionId)).thenReturn(entry); when(materializationStore.getLastMaterializationDone(reflectionId)).thenReturn(lastMaterialization); when(materializationStore.getAllDone(eq(reflectionId), Mockito.anyLong())).thenReturn(Collections.singleton(lastMaterialization)); when(validator.isValid(goal)).thenReturn(isValid); this.expected = expected; } @Test public void testStatus() { assertEquals(expected, statusService.getReflectionStatus(reflectionId).getCombinedStatus()); } enum MATERIALIZATION_STATE { NOT_FOUND, // no last materialization INCOMPLETE, // last materialization has missing data partitions EXPIRED, // last materialization expired NOT_CACHED, // last materialization valid but not cached VALID } private static Object[] newTestCase( String name, COMBINED_STATUS expected, boolean enabled, boolean manualRefresh, boolean invalid, ReflectionState entryState, int numFailures, MATERIALIZATION_STATE materializationState, boolean hasCachedMaterialization) { return newTestCase(name, expected, enabled, manualRefresh, invalid, entryState, numFailures, materializationState, hasCachedMaterialization, 1L); } private static Object[] newTestCase( String name, COMBINED_STATUS expected, boolean enabled, boolean manualRefresh, boolean invalid, ReflectionState entryState, int numFailures, MATERIALIZATION_STATE materializationState, boolean hasCachedMaterialization, Long expiration) { // expected, enabled, manualRefresh, isValid, entry, lastMaterialization, isMaterializationCached ReflectionEntry entry = null; if (entryState != null) { entry = new ReflectionEntry() .setState(entryState) .setNumFailures(numFailures); } Materialization materialization = new Materialization() .setLastRefreshFromPds(0L); if (expiration == null) { materialization.setExpiration(null); } else { materialization.setExpiration(System.currentTimeMillis() + TimeUnit.DAYS.toMillis(expiration)); } switch (materializationState) { case NOT_FOUND: materialization = null; break; case INCOMPLETE: materialization.setPartitionList(Collections.singletonList(new DataPartition("some_address"))); break; case EXPIRED: materialization.setExpiration(0L); break; default: break; } //TODO move materializationState handling to the test itself return new Object[] { name, expected, enabled, manualRefresh, !invalid, entry, materialization, hasCachedMaterialization }; } @Parameterized.Parameters(name = "{index}: {0}") public static Iterable<Object[]> data() { return Arrays.asList( newTestCase("disabled", COMBINED_STATUS.DISABLED,false, false, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.NOT_FOUND, false), newTestCase("disabled with manual refresh", COMBINED_STATUS.DISABLED, false, true, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.NOT_FOUND, false), newTestCase("invalid", COMBINED_STATUS.INVALID, true, false, true, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.NOT_FOUND, false), newTestCase("invalid with manual refresh", COMBINED_STATUS.INVALID, true, true, true, ReflectionState.REFRESHING, 2, MATERIALIZATION_STATE.VALID, true), newTestCase("invalid with failures", COMBINED_STATUS.INVALID, true, false, true, ReflectionState.FAILED, 3, MATERIALIZATION_STATE.EXPIRED, false), newTestCase("given up, incomplete, can accelerate", COMBINED_STATUS.FAILED, true, false, false, ReflectionState.FAILED, 3, MATERIALIZATION_STATE.INCOMPLETE, true), newTestCase("given up, expired, cannot accelerate", COMBINED_STATUS.FAILED, true, false, false, ReflectionState.FAILED, 3, MATERIALIZATION_STATE.EXPIRED, false), newTestCase("incomplete, no failures, can accelerate", COMBINED_STATUS.INCOMPLETE, true, false, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.INCOMPLETE, true), newTestCase("incomplete, some failures, cannot accelerate", COMBINED_STATUS.INCOMPLETE, true, true, false, ReflectionState.ACTIVE, 2, MATERIALIZATION_STATE.INCOMPLETE, false), newTestCase("expired, no failures, can accelerate", COMBINED_STATUS.EXPIRED, true, false, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.EXPIRED, true), newTestCase("expired, some failures, cannot accelerate", COMBINED_STATUS.EXPIRED, true, true, false, ReflectionState.ACTIVE, 2, MATERIALIZATION_STATE.EXPIRED, false), newTestCase("refreshing, no materialization done", COMBINED_STATUS.REFRESHING, true, false, false, ReflectionState.REFRESHING, 0, MATERIALIZATION_STATE.NOT_FOUND, false), newTestCase("refreshing", COMBINED_STATUS.REFRESHING, true, false, false, ReflectionState.REFRESHING, 0, MATERIALIZATION_STATE.NOT_FOUND, false), newTestCase("metadata refreshing", COMBINED_STATUS.REFRESHING, true, false, false, ReflectionState.METADATA_REFRESH, 0, MATERIALIZATION_STATE.NOT_FOUND, false), newTestCase("can accelerate, no failures", COMBINED_STATUS.CAN_ACCELERATE, true, false, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.VALID, true), newTestCase("can accelerate, with failures", COMBINED_STATUS.CAN_ACCELERATE_WITH_FAILURES, true, false, false, ReflectionState.ACTIVE, 2, MATERIALIZATION_STATE.VALID, true), newTestCase("can accelerate, manual refresh", COMBINED_STATUS.CAN_ACCELERATE, true, true, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.VALID, true), newTestCase("cannot accelerate, not cached", COMBINED_STATUS.CANNOT_ACCELERATE_SCHEDULED, true, false, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.VALID, false), newTestCase("cannot accelerate", COMBINED_STATUS.CANNOT_ACCELERATE_SCHEDULED, true, false, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.NOT_FOUND, false), newTestCase("cannot accelerate manual", COMBINED_STATUS.CANNOT_ACCELERATE_MANUAL, true, true, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.NOT_FOUND, false), newTestCase("null materialization expiration", COMBINED_STATUS.EXPIRED, true, false, false, ReflectionState.ACTIVE, 0, MATERIALIZATION_STATE.VALID, false, null) ); } @Test public void testGetExternalReflectionStatus() throws Exception { final NamespaceService namespaceService = mock(NamespaceService.class); final SabotContext sabotContext = mock(SabotContext.class); final ReflectionGoalsStore goalsStore = mock(ReflectionGoalsStore.class); final ReflectionEntriesStore entriesStore = mock(ReflectionEntriesStore.class); final MaterializationStore materializationStore = mock(MaterializationStore.class); final ExternalReflectionStore externalReflectionStore = mock(ExternalReflectionStore.class); final ReflectionSettings reflectionSettings = mock(ReflectionSettings.class); final ReflectionValidator validator = mock(ReflectionValidator.class); final CatalogService catalogService = mock(CatalogService.class); ReflectionStatusServiceImpl reflectionStatusService = new ReflectionStatusServiceImpl( sabotContext::getExecutors, DirectProvider.wrap(namespaceService), DirectProvider.<CacheViewer>wrap(new ConstantCacheViewer(false)), goalsStore, entriesStore, materializationStore, externalReflectionStore, reflectionSettings, validator, DirectProvider.wrap(catalogService) ); // mock query dataset DatasetConfig queryDatasetConfig = new DatasetConfig(); queryDatasetConfig.setType(DatasetType.PHYSICAL_DATASET); Integer queryHash = ReflectionUtils.computeDatasetHash(queryDatasetConfig, namespaceService, false); String queryDatasetId = UUID.randomUUID().toString(); when(namespaceService.findDatasetByUUID(queryDatasetId)).thenReturn(queryDatasetConfig); // mock target dataset DatasetConfig targetDatasetConfig = new DatasetConfig(); targetDatasetConfig.setType(DatasetType.PHYSICAL_DATASET); Integer targetHash = ReflectionUtils.computeDatasetHash(targetDatasetConfig, namespaceService, false); String targetDatasetId = UUID.randomUUID().toString(); when(namespaceService.findDatasetByUUID(targetDatasetId)).thenReturn(targetDatasetConfig); // mock external reflection ReflectionId reflectionId = new ReflectionId(UUID.randomUUID().toString()); ExternalReflection externalReflection = new ExternalReflection(); externalReflection.setId(reflectionId.getId()); externalReflection.setQueryDatasetId(queryDatasetId); externalReflection.setQueryDatasetHash(queryHash); externalReflection.setTargetDatasetId(targetDatasetId); // make the hashes not match externalReflection.setTargetDatasetHash(targetHash + 1); when(externalReflectionStore.get(reflectionId.getId())).thenReturn(externalReflection); // since the hashes don't match, should return OUT_OF_SYNC ExternalReflectionStatus externalReflectionStatus = reflectionStatusService.getExternalReflectionStatus(reflectionId); assertEquals(externalReflectionStatus.getConfigStatus(), ExternalReflectionStatus.STATUS.OUT_OF_SYNC); } }
package com.sequenceiq.cloudbreak.core.flow2; import static org.mockito.BDDMockito.given; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyMap; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.isNull; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import java.util.Collections; import java.util.HashMap; import java.util.Map; import org.junit.Before; import org.junit.Test; import org.mockito.BDDMockito; import org.mockito.InjectMocks; import org.mockito.Matchers; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import com.sequenceiq.cloudbreak.cloud.event.Payload; import com.sequenceiq.cloudbreak.core.flow2.chain.FlowChains; import com.sequenceiq.cloudbreak.core.flow2.config.FlowConfiguration; import com.sequenceiq.cloudbreak.repository.FlowLogRepository; import com.sequenceiq.cloudbreak.service.flowlog.FlowLogService; import reactor.bus.Event; public class Flow2HandlerTest { public static final String FLOW_ID = "flowId"; public static final String FLOW_CHAIN_ID = "flowChainId"; @InjectMocks private Flow2Handler underTest; @Mock private FlowLogService flowLogService; @Mock private FlowLogRepository flowLogRepository; @Mock private Map<String, FlowConfiguration<?>> flowConfigurationMap; @Mock private FlowRegister runningFlows; @Mock private FlowConfiguration flowConfig; @Mock private FlowChains flowChains; @Mock private FlowTriggerCondition flowTriggerCondition; @Mock private Flow flow; private FlowState flowState; private Event<? extends Payload> dummyEvent; private Payload payload = () -> 1L; @Before public void setUp() { underTest = new Flow2Handler(); MockitoAnnotations.initMocks(this); Map<String, Object> headers = new HashMap<>(); headers.put(Flow2Handler.FLOW_ID, FLOW_ID); dummyEvent = new Event<>(new Event.Headers(headers), payload); flowState = new OwnFlowState(); } @Test public void testNewFlow() { BDDMockito.<FlowConfiguration>given(flowConfigurationMap.get(any())).willReturn(flowConfig); given(flowConfig.createFlow(anyString())).willReturn(flow); given(flowConfig.getFlowTriggerCondition()).willReturn(flowTriggerCondition); given(flowTriggerCondition.isFlowTriggerable(anyLong())).willReturn(true); given(flow.getCurrentState()).willReturn(flowState); Event<Payload> event = new Event<>(payload); event.setKey("KEY"); underTest.accept(event); verify(flowConfigurationMap, times(1)).get(anyString()); verify(runningFlows, times(1)).put(eq(flow), isNull(String.class)); verify(flowLogService, times(1)).save(anyString(), anyString(), eq("KEY"), any(Payload.class), anyMap(), eq(flowConfig.getClass()), eq(flowState)); verify(flow, times(1)).sendEvent(anyString(), any()); } @Test public void testNewFlowButNotHandled() { Event<Payload> event = new Event<>(payload); event.setKey("KEY"); underTest.accept(event); verify(flowConfigurationMap, times(1)).get(anyString()); verify(runningFlows, times(0)).put(any(Flow.class), isNull(String.class)); verify(flowLogService, times(0)).save(anyString(), anyString(), anyString(), any(Payload.class), anyMap(), Matchers.<Class>any(), any(FlowState.class)); } @Test public void testExistingFlow() { BDDMockito.<FlowConfiguration>given(flowConfigurationMap.get(any())).willReturn(flowConfig); given(runningFlows.get(anyString())).willReturn(flow); given(flow.getCurrentState()).willReturn(flowState); dummyEvent.setKey("KEY"); underTest.accept(dummyEvent); verify(flowLogService, times(1)).save(eq(FLOW_ID), anyString(), eq("KEY"), any(Payload.class), anyMap(), any(Class.class), eq(flowState)); verify(flow, times(1)).sendEvent(eq("KEY"), any()); } @Test public void testExistingFlowNotFound() { BDDMockito.<FlowConfiguration>given(flowConfigurationMap.get(any())).willReturn(flowConfig); dummyEvent.setKey("KEY"); underTest.accept(dummyEvent); verify(flowLogService, times(0)).save(anyString(), anyString(), anyString(), any(Payload.class), anyMap(), Matchers.<Class>any(), any(FlowState.class)); verify(flow, times(0)).sendEvent(anyString(), any()); } @Test public void testFlowFinalFlowNotChained() { given(runningFlows.remove(FLOW_ID)).willReturn(flow); dummyEvent.setKey(Flow2Handler.FLOW_FINAL); underTest.accept(dummyEvent); verify(flowLogService, times(1)).close(anyLong(), eq(FLOW_ID)); verify(runningFlows, times(1)).remove(eq(FLOW_ID)); verify(runningFlows, times(0)).get(eq(FLOW_ID)); verify(runningFlows, times(0)).put(any(Flow.class), isNull(String.class)); verify(flowChains, times(0)).removeFlowChain(anyString()); verify(flowChains, times(0)).triggerNextFlow(anyString()); } @Test public void testFlowFinalFlowChained() { given(runningFlows.remove(FLOW_ID)).willReturn(flow); dummyEvent.setKey(Flow2Handler.FLOW_FINAL); dummyEvent.getHeaders().set(Flow2Handler.FLOW_CHAIN_ID, FLOW_CHAIN_ID); underTest.accept(dummyEvent); verify(flowLogService, times(1)).close(anyLong(), eq(FLOW_ID)); verify(runningFlows, times(1)).remove(eq(FLOW_ID)); verify(runningFlows, times(0)).get(eq(FLOW_ID)); verify(runningFlows, times(0)).put(any(Flow.class), isNull(String.class)); verify(flowChains, times(0)).removeFlowChain(anyString()); verify(flowChains, times(1)).triggerNextFlow(eq(FLOW_CHAIN_ID)); } @Test public void testFlowFinalFlowFailedNoChain() { given(flow.isFlowFailed()).willReturn(Boolean.TRUE); given(runningFlows.remove(FLOW_ID)).willReturn(flow); dummyEvent.setKey(Flow2Handler.FLOW_FINAL); given(runningFlows.remove(anyString())).willReturn(flow); underTest.accept(dummyEvent); verify(flowLogService, times(1)).close(anyLong(), eq(FLOW_ID)); verify(runningFlows, times(1)).remove(eq(FLOW_ID)); verify(runningFlows, times(0)).get(eq(FLOW_ID)); verify(runningFlows, times(0)).put(any(Flow.class), isNull(String.class)); verify(flowChains, times(0)).removeFullFlowChain(anyString()); verify(flowChains, times(0)).triggerNextFlow(anyString()); } @Test public void testFlowFinalFlowFailedWithChain() { given(flow.isFlowFailed()).willReturn(Boolean.TRUE); given(runningFlows.remove(FLOW_ID)).willReturn(flow); dummyEvent.setKey(Flow2Handler.FLOW_FINAL); dummyEvent.getHeaders().set(Flow2Handler.FLOW_CHAIN_ID, "FLOW_CHAIN_ID"); given(runningFlows.remove(anyString())).willReturn(flow); underTest.accept(dummyEvent); verify(flowLogService, times(1)).close(anyLong(), eq(FLOW_ID)); verify(runningFlows, times(1)).remove(eq(FLOW_ID)); verify(runningFlows, times(0)).get(eq(FLOW_ID)); verify(runningFlows, times(0)).put(any(Flow.class), isNull(String.class)); verify(flowChains, times(1)).removeFullFlowChain(anyString()); verify(flowChains, times(0)).triggerNextFlow(anyString()); } @Test public void testCancelRunningFlows() { given(flowLogRepository.findAllRunningNonTerminationFlowIdsByStackId(anyLong())).willReturn(Collections.singleton(FLOW_ID)); given(runningFlows.remove(FLOW_ID)).willReturn(flow); given(runningFlows.getFlowChainId(eq(FLOW_ID))).willReturn(FLOW_CHAIN_ID); dummyEvent.setKey(Flow2Handler.FLOW_CANCEL); underTest.accept(dummyEvent); verify(flowLogService, times(1)).cancel(anyLong(), eq(FLOW_ID)); verify(flowChains, times(1)).removeFullFlowChain(eq(FLOW_CHAIN_ID)); } private static class OwnFlowState implements FlowState { @Override public Class<? extends AbstractAction> action() { return null; } @Override public String name() { return null; } } }
/* * Copyright 2016 IKS Gesellschaft fuer Informations- und Kommunikationssysteme mbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.iksgmbh.sql.pojomemodb.dataobjects.persistent; import com.iksgmbh.sql.pojomemodb.SQLKeyWords; import com.iksgmbh.sql.pojomemodb.dataobjects.temporal.ApartValue; import com.iksgmbh.sql.pojomemodb.dataobjects.temporal.ColumnInitData; import com.iksgmbh.sql.pojomemodb.dataobjects.temporal.OrderCondition; import com.iksgmbh.sql.pojomemodb.dataobjects.temporal.WhereCondition; import org.junit.Before; import org.junit.Test; import java.math.BigDecimal; import java.sql.SQLDataException; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import static org.junit.Assert.*; public class TableTest { private Table sut; @Before public void setup() throws SQLDataException { sut = new Table("Test"); } @Test public void returnsDataWhenAsterixAndAliasAreUsedInCombination() throws SQLDataException { // arrange sut.createNewColumn(createColumnInitData("ID", "Number(5)"), null); sut.createNewColumn(createColumnInitData("Name", "VARCHAR(10)"), null); sut.createNewColumn(createColumnInitData("Date", "Date"), null); final List<ApartValue> values = new ArrayList<ApartValue>(); ApartValue value = new ApartValue("to_date('15.05.16','DD.MM.RR')", "Date"); values.add(value); value = new ApartValue("'Jim'", "Name"); values.add(value); value = new ApartValue("1", "ID"); values.add(value); sut.insertDataRow(values); // act final List<Object[]> result = sut.select(null, new ArrayList<WhereCondition>(), new ArrayList<OrderCondition>()); // assert assertEquals("number of datasets", 1, result.size()); } @Test public void applies_to_char_function() throws SQLDataException { // arrange sut.createNewColumn(createColumnInitData("Column1", "Date"), null); final List<ApartValue> values = new ArrayList<ApartValue>(); ApartValue value = new ApartValue("to_date('15.05.16','DD.MM.RR')", "Column1"); values.add(value); sut.insertDataRow(values); final List<String> selectedColumns = new ArrayList<String>(); selectedColumns.add("to_char(Column1, 'dd.mm.yyyy hh24:mi:ss')"); final List<WhereCondition> whereConditions = new ArrayList<WhereCondition>(); // act final List<Object[]> result = sut.select(selectedColumns, whereConditions, new ArrayList<OrderCondition>()); // assert final String dateAsString = (String) result.get(0)[0]; System.err.println("<" + dateAsString + ">"); assertEquals("date value", "15.05.2016 00:00:00", dateAsString); } @Test public void buildsCloneOfDataRows() throws SQLDataException { // arrange sut.createNewColumn(createColumnInitData("Column1", "Date"), null); sut.createNewColumn(createColumnInitData("Column2", "varchar(50)"), null); sut.createNewColumn(createColumnInitData("Column3", "Number(10,2)"), null); final List<ApartValue> values = new ArrayList<ApartValue>(); values.add(new ApartValue("to_date('15.05.16','DD.MM.RR')", "Column1")); values.add(new ApartValue("'Test'", "Column2")); values.add(new ApartValue("10.2", "Column3")); sut.insertDataRow(values); // act 1 final List<Object[]> result1 = sut.createDataRowsClone(); result1.get(0)[0] = null; result1.get(0)[1] = "New"; result1.get(0)[2] = BigDecimal.ZERO; // act 2 final List<Object[]> result2 = sut.createDataRowsClone(); // assert assertEquals("number value", "10.2", ((BigDecimal)result2.get(0)[2]).toPlainString() ); assertEquals("text value", "Test", result2.get(0)[1]); assertEquals("date value", "Sun May 15 00:00:00 CEST 2016", result2.get(0)[0].toString()); } @Test public void ordersNullValues() throws SQLException { // arrange test table sut.createNewColumn(createColumnInitData("Column1", "Date"), null); sut.createNewColumn(createColumnInitData("Column2", "varchar(50)"), null); sut.createNewColumn(createColumnInitData("Column3", "Number(10,2)"), null); List<ApartValue> values = new ArrayList<ApartValue>(); values.add(new ApartValue("to_date('15.05.16','DD.MM.RR')", "Column1")); values.add(new ApartValue("'NotNull'", "Column2")); values.add(new ApartValue("10.2", "Column3")); sut.insertDataRow(values); // column 2 is NOT null values = new ArrayList<ApartValue>(); values.add(new ApartValue("to_date('15.05.16','DD.MM.RR')", "Column1")); values.add(new ApartValue("10.2", "Column3")); sut.insertDataRow(values); // column 2 is null // arrange select statement data final List<String> columns = new ArrayList<String>(); columns.add("Column2"); final List<OrderCondition> orderConditions = new ArrayList<OrderCondition>(); orderConditions.add(new OrderCondition("Column2", SQLKeyWords.ASC)); // act 1 final List<Object[]> result1 = sut.select(columns, new ArrayList<WhereCondition>(), orderConditions); // act 2 orderConditions.clear(); orderConditions.add(new OrderCondition("Column2", SQLKeyWords.DESC)); final List<Object[]> result2 = sut.select(columns, new ArrayList<WhereCondition>(), orderConditions); // assert assertNull(result1.get(0)[0]); assertEquals("Column2", "NotNull", result1.get(1)[0]); assertEquals("Column2", "NotNull", result2.get(0)[0]); assertNull(result2.get(1)[0]); } @Test public void usesDefaultValueIfDefined() throws SQLDataException { // arrange sut.createNewColumn(createColumnInitData("Column_With_Default", "VARCHAR(20)", "'DefaultValue'", null), null); sut.createNewColumn(createColumnInitData("Column_No_Default", "Date"), null); sut.createNewColumn(createColumnInitData("DateColumn", "Date", "sysdate", null), null); sut.createNewColumn(createColumnInitData("ID", "NUMBER"), null); final List<ApartValue> values = new ArrayList<ApartValue>(); values.add(new ApartValue("1", "ID")); // act sut.insertDataRow(values); // assert final List<Object[]> result = sut.createDataRowsClone(); assertEquals("value of ColumnWithDefault", "DefaultValue", result.get(0)[0]); assertNull(result.get(0)[1]); assertNotNull(result.get(0)[2]); } @Test public void throwsExceptionForDuplicatesInPrimaryKeyColumn() throws SQLDataException { // arrange sut.createNewColumn(createColumnInitData("Column_With_Default", "VARCHAR(20)", "'DefaultValue'", null), null); sut.createNewColumn(createColumnInitData("Column_No_Default", "Date"), null); sut.createNewColumn(createColumnInitData("DateColumn", "Date", "sysdate", null), null); sut.createNewColumn(createColumnInitData("ID", "NUMBER", null, "primaryKeyId"), null); final List<ApartValue> values = new ArrayList<ApartValue>(); values.add(new ApartValue("1", "ID")); sut.insertDataRow(values); // act try { sut.insertDataRow(values); fail("Expected exception was not thrown!"); } catch (SQLDataException e) { // assert assertEquals("Error message", "Primary Key Constraint violated in column 'ID' with value '1'.", e.getMessage().trim()); } } private ColumnInitData createColumnInitData(String colName, String colType) { ColumnInitData toReturn = new ColumnInitData(colName); toReturn.columnType = colType; return toReturn; } private ColumnInitData createColumnInitData(String colName, String colType, String defaultValue, String primKey) { ColumnInitData toReturn = createColumnInitData(colName, colType); toReturn.defaultValue = defaultValue; toReturn.primaryKey = primKey; return toReturn; } }
// Copyright (C) 2015 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.gerrit.acceptance.server.change; import static com.google.common.truth.Truth.assertThat; import static com.google.gerrit.acceptance.GitUtil.pushHead; import static com.google.gerrit.extensions.api.changes.SubmittedTogetherOption.NON_VISIBLE_CHANGES; import com.google.gerrit.acceptance.AbstractDaemonTest; import com.google.gerrit.acceptance.GitUtil; import com.google.gerrit.acceptance.TestProjectInput; import com.google.gerrit.extensions.api.changes.SubmittedTogetherInfo; import com.google.gerrit.extensions.client.ChangeStatus; import com.google.gerrit.extensions.client.ListChangesOption; import com.google.gerrit.extensions.client.SubmitType; import com.google.gerrit.extensions.common.FileInfo; import com.google.gerrit.extensions.common.RevisionInfo; import com.google.gerrit.reviewdb.client.Project; import com.google.gerrit.testing.ConfigSuite; import java.util.EnumSet; import org.eclipse.jgit.junit.TestRepository; import org.eclipse.jgit.lib.Config; import org.eclipse.jgit.revwalk.RevCommit; import org.junit.Test; public class SubmittedTogetherIT extends AbstractDaemonTest { @ConfigSuite.Config public static Config submitWholeTopicEnabled() { return submitWholeTopicEnabledConfig(); } @Test public void doesNotIncludeCurrentFiles() throws Exception { RevCommit c1_1 = commitBuilder().add("a.txt", "1").message("subject: 1").create(); RevCommit c2_1 = commitBuilder().add("b.txt", "2").message("subject: 2").create(); String id2 = getChangeId(c2_1); pushHead(testRepo, "refs/for/master", false); SubmittedTogetherInfo info = gApi.changes().id(id2).submittedTogether(EnumSet.of(NON_VISIBLE_CHANGES)); assertThat(info.changes).hasSize(2); assertThat(info.changes.get(0).currentRevision).isEqualTo(c2_1.name()); assertThat(info.changes.get(1).currentRevision).isEqualTo(c1_1.name()); RevisionInfo rev = info.changes.get(0).revisions.get(c2_1.name()); assertThat(rev.files).isNull(); } @Test public void returnsCurrentFilesIfOptionRequested() throws Exception { RevCommit c1_1 = commitBuilder().add("a.txt", "1").message("subject: 1").create(); RevCommit c2_1 = commitBuilder().add("b.txt", "2").message("subject: 2").create(); String id2 = getChangeId(c2_1); pushHead(testRepo, "refs/for/master", false); SubmittedTogetherInfo info = gApi.changes() .id(id2) .submittedTogether( EnumSet.of(ListChangesOption.CURRENT_FILES), EnumSet.of(NON_VISIBLE_CHANGES)); assertThat(info.changes).hasSize(2); assertThat(info.changes.get(0).currentRevision).isEqualTo(c2_1.name()); assertThat(info.changes.get(1).currentRevision).isEqualTo(c1_1.name()); RevisionInfo rev = info.changes.get(0).revisions.get(c2_1.name()); assertThat(rev).isNotNull(); FileInfo file = rev.files.get("b.txt"); assertThat(file).isNotNull(); assertThat(file.status).isEqualTo('A'); } @Test public void returnsAncestors() throws Exception { // Create two commits and push. RevCommit c1_1 = commitBuilder().add("a.txt", "1").message("subject: 1").create(); String id1 = getChangeId(c1_1); RevCommit c2_1 = commitBuilder().add("b.txt", "2").message("subject: 2").create(); String id2 = getChangeId(c2_1); pushHead(testRepo, "refs/for/master", false); assertSubmittedTogether(id1); assertSubmittedTogether(id2, id2, id1); } @Test public void anonymousAncestors() throws Exception { RevCommit a = commitBuilder().add("a", "1").message("change 1").create(); RevCommit b = commitBuilder().add("b", "1").message("change 2").create(); pushHead(testRepo, "refs/for/master", false); setApiUserAnonymous(); assertSubmittedTogether(getChangeId(a)); assertSubmittedTogether(getChangeId(b), getChangeId(b), getChangeId(a)); } @Test public void respectWholeTopic() throws Exception { RevCommit initialHead = getRemoteHead(); // Create two independent commits and push. RevCommit c1_1 = commitBuilder().add("a.txt", "1").message("subject: 1").create(); String id1 = getChangeId(c1_1); pushHead(testRepo, "refs/for/master/" + name("connectingTopic"), false); testRepo.reset(initialHead); RevCommit c2_1 = commitBuilder().add("b.txt", "2").message("subject: 2").create(); String id2 = getChangeId(c2_1); pushHead(testRepo, "refs/for/master/" + name("connectingTopic"), false); if (isSubmitWholeTopicEnabled()) { assertSubmittedTogether(id1, id2, id1); assertSubmittedTogether(id2, id2, id1); } else { assertSubmittedTogether(id1); assertSubmittedTogether(id2); } } @Test public void anonymousWholeTopic() throws Exception { RevCommit initialHead = getRemoteHead(); RevCommit a = commitBuilder().add("a", "1").message("change 1").create(); pushHead(testRepo, "refs/for/master/" + name("topic"), false); String id1 = getChangeId(a); testRepo.reset(initialHead); RevCommit b = commitBuilder().add("b", "1").message("change 2").create(); pushHead(testRepo, "refs/for/master/" + name("topic"), false); String id2 = getChangeId(b); setApiUserAnonymous(); if (isSubmitWholeTopicEnabled()) { assertSubmittedTogether(id1, id2, id1); assertSubmittedTogether(id2, id2, id1); } else { assertSubmittedTogether(id1); assertSubmittedTogether(id2); } } @Test public void topicChaining() throws Exception { RevCommit initialHead = getRemoteHead(); RevCommit c1_1 = commitBuilder().add("a.txt", "1").message("subject: 1").create(); String id1 = getChangeId(c1_1); pushHead(testRepo, "refs/for/master/" + name("connectingTopic"), false); testRepo.reset(initialHead); RevCommit c2_1 = commitBuilder().add("b.txt", "2").message("subject: 2").create(); String id2 = getChangeId(c2_1); pushHead(testRepo, "refs/for/master/" + name("connectingTopic"), false); RevCommit c3_1 = commitBuilder().add("b.txt", "3").message("subject: 3").create(); String id3 = getChangeId(c3_1); pushHead(testRepo, "refs/for/master/" + name("unrelated-topic"), false); if (isSubmitWholeTopicEnabled()) { assertSubmittedTogether(id1, id2, id1); assertSubmittedTogether(id2, id2, id1); assertSubmittedTogether(id3, id3, id2, id1); } else { assertSubmittedTogether(id1); assertSubmittedTogether(id2); assertSubmittedTogether(id3, id3, id2); } } @Test public void respectTopicsOnAncestors() throws Exception { RevCommit initialHead = getRemoteHead(); RevCommit c1_1 = commitBuilder().add("a.txt", "1").message("subject: 1").create(); String id1 = getChangeId(c1_1); pushHead(testRepo, "refs/for/master/" + name("connectingTopic"), false); testRepo.reset(initialHead); RevCommit c2_1 = commitBuilder().add("b.txt", "2").message("subject: 2").create(); String id2 = getChangeId(c2_1); pushHead(testRepo, "refs/for/master/" + name("otherConnectingTopic"), false); RevCommit c3_1 = commitBuilder().add("b.txt", "3").message("subject: 3").create(); String id3 = getChangeId(c3_1); pushHead(testRepo, "refs/for/master/" + name("connectingTopic"), false); RevCommit c4_1 = commitBuilder().add("b.txt", "4").message("subject: 4").create(); String id4 = getChangeId(c4_1); pushHead(testRepo, "refs/for/master", false); testRepo.reset(initialHead); RevCommit c5_1 = commitBuilder().add("c.txt", "5").message("subject: 5").create(); String id5 = getChangeId(c5_1); pushHead(testRepo, "refs/for/master", false); RevCommit c6_1 = commitBuilder().add("c.txt", "6").message("subject: 6").create(); String id6 = getChangeId(c6_1); pushHead(testRepo, "refs/for/master/" + name("otherConnectingTopic"), false); if (isSubmitWholeTopicEnabled()) { assertSubmittedTogether(id1, id6, id5, id3, id2, id1); assertSubmittedTogether(id2, id6, id5, id2); assertSubmittedTogether(id3, id6, id5, id3, id2, id1); assertSubmittedTogether(id4, id6, id5, id4, id3, id2, id1); assertSubmittedTogether(id5); assertSubmittedTogether(id6, id6, id5, id2); } else { assertSubmittedTogether(id1); assertSubmittedTogether(id2); assertSubmittedTogether(id3, id3, id2); assertSubmittedTogether(id4, id4, id3, id2); assertSubmittedTogether(id5); assertSubmittedTogether(id6, id6, id5); } } @Test public void newBranchTwoChangesTogether() throws Exception { Project.NameKey p1 = createProject("a-new-project", null, false); TestRepository<?> repo1 = cloneProject(p1); RevCommit c1 = repo1 .branch("HEAD") .commit() .insertChangeId() .add("a.txt", "1") .message("subject: 1") .create(); String id1 = GitUtil.getChangeId(repo1, c1).get(); pushHead(repo1, "refs/for/master", false); RevCommit c2 = repo1 .branch("HEAD") .commit() .insertChangeId() .add("b.txt", "2") .message("subject: 2") .create(); String id2 = GitUtil.getChangeId(repo1, c2).get(); pushHead(repo1, "refs/for/master", false); assertSubmittedTogether(id1); assertSubmittedTogether(id2, id2, id1); } @Test @TestProjectInput(submitType = SubmitType.CHERRY_PICK) public void testCherryPickWithoutAncestors() throws Exception { // Create two commits and push. RevCommit c1_1 = commitBuilder().add("a.txt", "1").message("subject: 1").create(); String id1 = getChangeId(c1_1); RevCommit c2_1 = commitBuilder().add("b.txt", "2").message("subject: 2").create(); String id2 = getChangeId(c2_1); pushHead(testRepo, "refs/for/master", false); assertSubmittedTogether(id1); assertSubmittedTogether(id2); } @Test public void submissionIdSavedOnMergeInOneProject() throws Exception { // Create two commits and push. RevCommit c1_1 = commitBuilder().add("a.txt", "1").message("subject: 1").create(); String id1 = getChangeId(c1_1); RevCommit c2_1 = commitBuilder().add("b.txt", "2").message("subject: 2").create(); String id2 = getChangeId(c2_1); pushHead(testRepo, "refs/for/master", false); assertSubmittedTogether(id1); assertSubmittedTogether(id2, id2, id1); approve(id1); approve(id2); submit(id2); assertMerged(id1); assertMerged(id2); // Prior to submission this was empty, but the post-merge value is what was // actually submitted. assertSubmittedTogether(id1, id2, id1); assertSubmittedTogether(id2, id2, id1); } private String getChangeId(RevCommit c) throws Exception { return GitUtil.getChangeId(testRepo, c).get(); } private void submit(String changeId) throws Exception { gApi.changes().id(changeId).current().submit(); } private void assertMerged(String changeId) throws Exception { assertThat(gApi.changes().id(changeId).get().status).isEqualTo(ChangeStatus.MERGED); } }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Autogenerated by Thrift Compiler (0.9.3) * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated */ package org.apache.storm.generated; import org.apache.thrift.scheme.IScheme; import org.apache.thrift.scheme.SchemeFactory; import org.apache.thrift.scheme.StandardScheme; import org.apache.thrift.scheme.TupleScheme; import org.apache.thrift.protocol.TTupleProtocol; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.EncodingUtils; import org.apache.thrift.TException; import org.apache.thrift.async.AsyncMethodCallback; import org.apache.thrift.server.AbstractNonblockingServer.*; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.util.HashMap; import java.util.EnumMap; import java.util.Set; import java.util.HashSet; import java.util.EnumSet; import java.util.Collections; import java.util.BitSet; import java.nio.ByteBuffer; import java.util.Arrays; import javax.annotation.Generated; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") public class LSTopoHistory implements org.apache.thrift.TBase<LSTopoHistory, LSTopoHistory._Fields>, java.io.Serializable, Cloneable, Comparable<LSTopoHistory> { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LSTopoHistory"); private static final org.apache.thrift.protocol.TField TOPOLOGY_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_id", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TIME_STAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("time_stamp", org.apache.thrift.protocol.TType.I64, (short)2); private static final org.apache.thrift.protocol.TField USERS_FIELD_DESC = new org.apache.thrift.protocol.TField("users", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField GROUPS_FIELD_DESC = new org.apache.thrift.protocol.TField("groups", org.apache.thrift.protocol.TType.LIST, (short)4); private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>(); static { schemes.put(StandardScheme.class, new LSTopoHistoryStandardSchemeFactory()); schemes.put(TupleScheme.class, new LSTopoHistoryTupleSchemeFactory()); } private String topology_id; // required private long time_stamp; // required private List<String> users; // required private List<String> groups; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TOPOLOGY_ID((short)1, "topology_id"), TIME_STAMP((short)2, "time_stamp"), USERS((short)3, "users"), GROUPS((short)4, "groups"); private static final Map<String, _Fields> byName = new HashMap<String, _Fields>(); static { for (_Fields field : EnumSet.allOf(_Fields.class)) { byName.put(field.getFieldName(), field); } } /** * Find the _Fields constant that matches fieldId, or null if its not found. */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TOPOLOGY_ID return TOPOLOGY_ID; case 2: // TIME_STAMP return TIME_STAMP; case 3: // USERS return USERS; case 4: // GROUPS return GROUPS; default: return null; } } /** * Find the _Fields constant that matches fieldId, throwing an exception * if it is not found. */ public static _Fields findByThriftIdOrThrow(int fieldId) { _Fields fields = findByThriftId(fieldId); if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); return fields; } /** * Find the _Fields constant that matches name, or null if its not found. */ public static _Fields findByName(String name) { return byName.get(name); } private final short _thriftId; private final String _fieldName; _Fields(short thriftId, String fieldName) { _thriftId = thriftId; _fieldName = fieldName; } public short getThriftFieldId() { return _thriftId; } public String getFieldName() { return _fieldName; } } // isset id assignments private static final int __TIME_STAMP_ISSET_ID = 0; private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TOPOLOGY_ID, new org.apache.thrift.meta_data.FieldMetaData("topology_id", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TIME_STAMP, new org.apache.thrift.meta_data.FieldMetaData("time_stamp", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.USERS, new org.apache.thrift.meta_data.FieldMetaData("users", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.GROUPS, new org.apache.thrift.meta_data.FieldMetaData("groups", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LSTopoHistory.class, metaDataMap); } public LSTopoHistory() { } public LSTopoHistory( String topology_id, long time_stamp, List<String> users, List<String> groups) { this(); this.topology_id = topology_id; this.time_stamp = time_stamp; set_time_stamp_isSet(true); this.users = users; this.groups = groups; } /** * Performs a deep copy on <i>other</i>. */ public LSTopoHistory(LSTopoHistory other) { __isset_bitfield = other.__isset_bitfield; if (other.is_set_topology_id()) { this.topology_id = other.topology_id; } this.time_stamp = other.time_stamp; if (other.is_set_users()) { List<String> __this__users = new ArrayList<String>(other.users); this.users = __this__users; } if (other.is_set_groups()) { List<String> __this__groups = new ArrayList<String>(other.groups); this.groups = __this__groups; } } public LSTopoHistory deepCopy() { return new LSTopoHistory(this); } @Override public void clear() { this.topology_id = null; set_time_stamp_isSet(false); this.time_stamp = 0; this.users = null; this.groups = null; } public String get_topology_id() { return this.topology_id; } public void set_topology_id(String topology_id) { this.topology_id = topology_id; } public void unset_topology_id() { this.topology_id = null; } /** Returns true if field topology_id is set (has been assigned a value) and false otherwise */ public boolean is_set_topology_id() { return this.topology_id != null; } public void set_topology_id_isSet(boolean value) { if (!value) { this.topology_id = null; } } public long get_time_stamp() { return this.time_stamp; } public void set_time_stamp(long time_stamp) { this.time_stamp = time_stamp; set_time_stamp_isSet(true); } public void unset_time_stamp() { __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIME_STAMP_ISSET_ID); } /** Returns true if field time_stamp is set (has been assigned a value) and false otherwise */ public boolean is_set_time_stamp() { return EncodingUtils.testBit(__isset_bitfield, __TIME_STAMP_ISSET_ID); } public void set_time_stamp_isSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIME_STAMP_ISSET_ID, value); } public int get_users_size() { return (this.users == null) ? 0 : this.users.size(); } public java.util.Iterator<String> get_users_iterator() { return (this.users == null) ? null : this.users.iterator(); } public void add_to_users(String elem) { if (this.users == null) { this.users = new ArrayList<String>(); } this.users.add(elem); } public List<String> get_users() { return this.users; } public void set_users(List<String> users) { this.users = users; } public void unset_users() { this.users = null; } /** Returns true if field users is set (has been assigned a value) and false otherwise */ public boolean is_set_users() { return this.users != null; } public void set_users_isSet(boolean value) { if (!value) { this.users = null; } } public int get_groups_size() { return (this.groups == null) ? 0 : this.groups.size(); } public java.util.Iterator<String> get_groups_iterator() { return (this.groups == null) ? null : this.groups.iterator(); } public void add_to_groups(String elem) { if (this.groups == null) { this.groups = new ArrayList<String>(); } this.groups.add(elem); } public List<String> get_groups() { return this.groups; } public void set_groups(List<String> groups) { this.groups = groups; } public void unset_groups() { this.groups = null; } /** Returns true if field groups is set (has been assigned a value) and false otherwise */ public boolean is_set_groups() { return this.groups != null; } public void set_groups_isSet(boolean value) { if (!value) { this.groups = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { case TOPOLOGY_ID: if (value == null) { unset_topology_id(); } else { set_topology_id((String)value); } break; case TIME_STAMP: if (value == null) { unset_time_stamp(); } else { set_time_stamp((Long)value); } break; case USERS: if (value == null) { unset_users(); } else { set_users((List<String>)value); } break; case GROUPS: if (value == null) { unset_groups(); } else { set_groups((List<String>)value); } break; } } public Object getFieldValue(_Fields field) { switch (field) { case TOPOLOGY_ID: return get_topology_id(); case TIME_STAMP: return get_time_stamp(); case USERS: return get_users(); case GROUPS: return get_groups(); } throw new IllegalStateException(); } /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case TOPOLOGY_ID: return is_set_topology_id(); case TIME_STAMP: return is_set_time_stamp(); case USERS: return is_set_users(); case GROUPS: return is_set_groups(); } throw new IllegalStateException(); } @Override public boolean equals(Object that) { if (that == null) return false; if (that instanceof LSTopoHistory) return this.equals((LSTopoHistory)that); return false; } public boolean equals(LSTopoHistory that) { if (that == null) return false; boolean this_present_topology_id = true && this.is_set_topology_id(); boolean that_present_topology_id = true && that.is_set_topology_id(); if (this_present_topology_id || that_present_topology_id) { if (!(this_present_topology_id && that_present_topology_id)) return false; if (!this.topology_id.equals(that.topology_id)) return false; } boolean this_present_time_stamp = true; boolean that_present_time_stamp = true; if (this_present_time_stamp || that_present_time_stamp) { if (!(this_present_time_stamp && that_present_time_stamp)) return false; if (this.time_stamp != that.time_stamp) return false; } boolean this_present_users = true && this.is_set_users(); boolean that_present_users = true && that.is_set_users(); if (this_present_users || that_present_users) { if (!(this_present_users && that_present_users)) return false; if (!this.users.equals(that.users)) return false; } boolean this_present_groups = true && this.is_set_groups(); boolean that_present_groups = true && that.is_set_groups(); if (this_present_groups || that_present_groups) { if (!(this_present_groups && that_present_groups)) return false; if (!this.groups.equals(that.groups)) return false; } return true; } @Override public int hashCode() { List<Object> list = new ArrayList<Object>(); boolean present_topology_id = true && (is_set_topology_id()); list.add(present_topology_id); if (present_topology_id) list.add(topology_id); boolean present_time_stamp = true; list.add(present_time_stamp); if (present_time_stamp) list.add(time_stamp); boolean present_users = true && (is_set_users()); list.add(present_users); if (present_users) list.add(users); boolean present_groups = true && (is_set_groups()); list.add(present_groups); if (present_groups) list.add(groups); return list.hashCode(); } @Override public int compareTo(LSTopoHistory other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; lastComparison = Boolean.valueOf(is_set_topology_id()).compareTo(other.is_set_topology_id()); if (lastComparison != 0) { return lastComparison; } if (is_set_topology_id()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_id, other.topology_id); if (lastComparison != 0) { return lastComparison; } } lastComparison = Boolean.valueOf(is_set_time_stamp()).compareTo(other.is_set_time_stamp()); if (lastComparison != 0) { return lastComparison; } if (is_set_time_stamp()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.time_stamp, other.time_stamp); if (lastComparison != 0) { return lastComparison; } } lastComparison = Boolean.valueOf(is_set_users()).compareTo(other.is_set_users()); if (lastComparison != 0) { return lastComparison; } if (is_set_users()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.users, other.users); if (lastComparison != 0) { return lastComparison; } } lastComparison = Boolean.valueOf(is_set_groups()).compareTo(other.is_set_groups()); if (lastComparison != 0) { return lastComparison; } if (is_set_groups()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.groups, other.groups); if (lastComparison != 0) { return lastComparison; } } return 0; } public _Fields fieldForId(int fieldId) { return _Fields.findByThriftId(fieldId); } public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { schemes.get(iprot.getScheme()).getScheme().read(iprot, this); } public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { schemes.get(oprot.getScheme()).getScheme().write(oprot, this); } @Override public String toString() { StringBuilder sb = new StringBuilder("LSTopoHistory("); boolean first = true; sb.append("topology_id:"); if (this.topology_id == null) { sb.append("null"); } else { sb.append(this.topology_id); } first = false; if (!first) sb.append(", "); sb.append("time_stamp:"); sb.append(this.time_stamp); first = false; if (!first) sb.append(", "); sb.append("users:"); if (this.users == null) { sb.append("null"); } else { sb.append(this.users); } first = false; if (!first) sb.append(", "); sb.append("groups:"); if (this.groups == null) { sb.append("null"); } else { sb.append(this.groups); } first = false; sb.append(")"); return sb.toString(); } public void validate() throws org.apache.thrift.TException { // check for required fields if (!is_set_topology_id()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'topology_id' is unset! Struct:" + toString()); } if (!is_set_time_stamp()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'time_stamp' is unset! Struct:" + toString()); } if (!is_set_users()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'users' is unset! Struct:" + toString()); } if (!is_set_groups()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'groups' is unset! Struct:" + toString()); } // check for sub-struct validity } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { try { write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } private static class LSTopoHistoryStandardSchemeFactory implements SchemeFactory { public LSTopoHistoryStandardScheme getScheme() { return new LSTopoHistoryStandardScheme(); } } private static class LSTopoHistoryStandardScheme extends StandardScheme<LSTopoHistory> { public void read(org.apache.thrift.protocol.TProtocol iprot, LSTopoHistory struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) { schemeField = iprot.readFieldBegin(); if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (schemeField.id) { case 1: // TOPOLOGY_ID if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.topology_id = iprot.readString(); struct.set_topology_id_isSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 2: // TIME_STAMP if (schemeField.type == org.apache.thrift.protocol.TType.I64) { struct.time_stamp = iprot.readI64(); struct.set_time_stamp_isSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 3: // USERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list808 = iprot.readListBegin(); struct.users = new ArrayList<String>(_list808.size); String _elem809; for (int _i810 = 0; _i810 < _list808.size; ++_i810) { _elem809 = iprot.readString(); struct.users.add(_elem809); } iprot.readListEnd(); } struct.set_users_isSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 4: // GROUPS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list811 = iprot.readListBegin(); struct.groups = new ArrayList<String>(_list811.size); String _elem812; for (int _i813 = 0; _i813 < _list811.size; ++_i813) { _elem812 = iprot.readString(); struct.groups.add(_elem812); } iprot.readListEnd(); } struct.set_groups_isSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } iprot.readFieldEnd(); } iprot.readStructEnd(); struct.validate(); } public void write(org.apache.thrift.protocol.TProtocol oprot, LSTopoHistory struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.topology_id != null) { oprot.writeFieldBegin(TOPOLOGY_ID_FIELD_DESC); oprot.writeString(struct.topology_id); oprot.writeFieldEnd(); } oprot.writeFieldBegin(TIME_STAMP_FIELD_DESC); oprot.writeI64(struct.time_stamp); oprot.writeFieldEnd(); if (struct.users != null) { oprot.writeFieldBegin(USERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.users.size())); for (String _iter814 : struct.users) { oprot.writeString(_iter814); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } if (struct.groups != null) { oprot.writeFieldBegin(GROUPS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.groups.size())); for (String _iter815 : struct.groups) { oprot.writeString(_iter815); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); } } private static class LSTopoHistoryTupleSchemeFactory implements SchemeFactory { public LSTopoHistoryTupleScheme getScheme() { return new LSTopoHistoryTupleScheme(); } } private static class LSTopoHistoryTupleScheme extends TupleScheme<LSTopoHistory> { @Override public void write(org.apache.thrift.protocol.TProtocol prot, LSTopoHistory struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; oprot.writeString(struct.topology_id); oprot.writeI64(struct.time_stamp); { oprot.writeI32(struct.users.size()); for (String _iter816 : struct.users) { oprot.writeString(_iter816); } } { oprot.writeI32(struct.groups.size()); for (String _iter817 : struct.groups) { oprot.writeString(_iter817); } } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, LSTopoHistory struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; struct.topology_id = iprot.readString(); struct.set_topology_id_isSet(true); struct.time_stamp = iprot.readI64(); struct.set_time_stamp_isSet(true); { org.apache.thrift.protocol.TList _list818 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); struct.users = new ArrayList<String>(_list818.size); String _elem819; for (int _i820 = 0; _i820 < _list818.size; ++_i820) { _elem819 = iprot.readString(); struct.users.add(_elem819); } } struct.set_users_isSet(true); { org.apache.thrift.protocol.TList _list821 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); struct.groups = new ArrayList<String>(_list821.size); String _elem822; for (int _i823 = 0; _i823 < _list821.size; ++_i823) { _elem822 = iprot.readString(); struct.groups.add(_elem822); } } struct.set_groups_isSet(true); } } }
/* * Copyright 2012-2015 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.aerospike.client.cluster; import gnu.crypto.util.Base64; import java.util.HashMap; import java.util.concurrent.atomic.AtomicReferenceArray; import com.aerospike.client.AerospikeException; import com.aerospike.client.Info; import com.aerospike.client.Log; import com.aerospike.client.command.Buffer; /** * Parse node's master (and optionally prole) partitions. */ public final class PartitionParser { static final String PartitionGeneration = "partition-generation"; static final String ReplicasMaster = "replicas-master"; static final String ReplicasAll = "replicas-all"; private HashMap<String,AtomicReferenceArray<Node>[]> map; private final StringBuilder sb; private final byte[] buffer; private final int partitionCount; private final int generation; private int length; private int offset; private boolean copied; public PartitionParser(Connection conn, Node node, HashMap<String,AtomicReferenceArray<Node>[]> map, int partitionCount, boolean requestProleReplicas) { // Send format 1: partition-generation\nreplicas-master\n // Send format 2: partition-generation\nreplicas-all\n this.partitionCount = partitionCount; this.map = map; String command = (requestProleReplicas)? ReplicasAll : ReplicasMaster; Info info = new Info(conn, PartitionGeneration, command); this.length = info.getLength(); if (length == 0) { throw new AerospikeException.Parse("Partition info is empty"); } this.buffer = info.getBuffer(); // Create reusable StringBuilder for performance. this.sb = new StringBuilder(32); // Max namespace length generation = parseGeneration(); if (requestProleReplicas) { parseReplicasAll(node); } else { parseReplicasMaster(node); } } public int getGeneration() { return generation; } public boolean isPartitionMapCopied() { return copied; } public HashMap<String,AtomicReferenceArray<Node>[]> getPartitionMap() { return map; } private int parseGeneration() { expectName(PartitionGeneration); int begin = offset; while (offset < length) { if (buffer[offset] == '\n') { String s = Buffer.utf8ToString(buffer, begin, offset - begin, sb).trim(); offset++; return Integer.parseInt(s); } offset++; } throw new AerospikeException.Parse("Failed to find partition-generation value"); } @SuppressWarnings("unchecked") private void parseReplicasMaster(Node node) { // Use low-level info methods and parse byte array directly for maximum performance. // Receive format: replicas-master\t<ns1>:<base 64 encoded bitmap1>;<ns2>:<base 64 encoded bitmap2>...\n expectName(ReplicasMaster); int begin = offset; while (offset < length) { if (buffer[offset] == ':') { // Parse namespace. String namespace = Buffer.utf8ToString(buffer, begin, offset - begin, sb).trim(); if (namespace.length() <= 0 || namespace.length() >= 32) { String response = getTruncatedResponse(); throw new AerospikeException.Parse("Invalid partition namespace " + namespace + ". Response=" + response); } begin = ++offset; // Parse partition bitmap. while (offset < length) { byte b = buffer[offset]; if (b == ';' || b == '\n') { break; } offset++; } if (offset == begin) { String response = getTruncatedResponse(); throw new AerospikeException.Parse("Empty partition id for namespace " + namespace + ". Response=" + response); } AtomicReferenceArray<Node>[] replicaArray = map.get(namespace); if (replicaArray == null) { replicaArray = new AtomicReferenceArray[1]; replicaArray[0] = new AtomicReferenceArray<Node>(partitionCount); copyPartitionMap(); map.put(namespace, replicaArray); } // Log.info("Map: " + namespace + "[0] " + node); decodeBitmap(node, replicaArray[0], begin); begin = ++offset; } else { offset++; } } } @SuppressWarnings("unchecked") private void parseReplicasAll(Node node) throws AerospikeException { // Use low-level info methods and parse byte array directly for maximum performance. // Receive format: replicas-all\t // <ns1>:<count>,<base 64 encoded bitmap1>,<base 64 encoded bitmap2>...; // <ns2>:<count>,<base 64 encoded bitmap1>,<base 64 encoded bitmap2>...;\n expectName(ReplicasAll); int begin = offset; while (offset < length) { if (buffer[offset] == ':') { // Parse namespace. String namespace = Buffer.utf8ToString(buffer, begin, offset - begin, sb).trim(); if (namespace.length() <= 0 || namespace.length() >= 32) { String response = getTruncatedResponse(); throw new AerospikeException.Parse("Invalid partition namespace " + namespace + ". Response=" + response); } begin = ++offset; // Parse replica count. while (offset < length) { byte b = buffer[offset]; if (b == ',') { break; } offset++; } int replicaCount = Integer.parseInt(new String(buffer, begin, offset - begin)); // Ensure replicaArray is correct size. AtomicReferenceArray<Node>[] replicaArray = map.get(namespace); if (replicaArray == null) { // Create new replica array. replicaArray = new AtomicReferenceArray[replicaCount]; for (int i = 0; i < replicaCount; i++) { replicaArray[i] = new AtomicReferenceArray<Node>(partitionCount); } copyPartitionMap(); map.put(namespace, replicaArray); } else if (replicaArray.length != replicaCount) { if (Log.infoEnabled()) { Log.info("Namespace " + namespace + " replication factor changed from " + replicaArray.length + " to " + replicaCount); } // Resize replica array. AtomicReferenceArray<Node>[] replicaTarget = new AtomicReferenceArray[replicaCount]; if (replicaArray.length < replicaCount) { int i = 0; // Copy existing entries. for (; i < replicaArray.length; i++) { replicaTarget[i] = replicaArray[i]; } // Create new entries. for (; i < replicaCount; i++) { replicaTarget[i] = new AtomicReferenceArray<Node>(partitionCount); } } else { // Copy existing entries. for (int i = 0; i < replicaCount; i++) { replicaTarget[i] = replicaArray[i]; } } copyPartitionMap(); replicaArray = replicaTarget; map.put(namespace, replicaArray); } // Parse partition bitmaps. for (int i = 0; i < replicaCount; i++) { begin = ++offset; // Find bitmap endpoint while (offset < length) { byte b = buffer[offset]; if (b == ',' || b == ';') { break; } offset++; } if (offset == begin) { String response = getTruncatedResponse(); throw new AerospikeException.Parse("Empty partition id for namespace " + namespace + ". Response=" + response); } // Log.info("Map: " + namespace + '[' + i + "] " + node); decodeBitmap(node, replicaArray[i], begin); } begin = ++offset; } else { offset++; } } } private void decodeBitmap(Node node, AtomicReferenceArray<Node> nodeArray, int begin) { byte[] restoreBuffer = Base64.decode(buffer, begin, offset - begin); for (int i = 0; i < partitionCount; i++) { Node nodeOld = nodeArray.get(i); if ((restoreBuffer[i >> 3] & (0x80 >> (i & 7))) != 0) { // Node owns this partition. // Log.info("Map: " + i); if (nodeOld != null && nodeOld != node) { // Force previously mapped node to refresh it's partition map on next cluster tend. nodeOld.partitionGeneration = -1; } // Use lazy set because there is only one producer thread. In addition, // there is a one second delay due to the cluster tend polling interval. // An extra millisecond for a node change will not make a difference and // overall performance is improved. nodeArray.lazySet(i, node); } else { // Node does not own partition. if (node == nodeOld) { // Must erase previous map. nodeArray.lazySet(i, null); } } } } private void copyPartitionMap() { if (! copied) { // Make shallow copy of map. map = new HashMap<String,AtomicReferenceArray<Node>[]>(map); copied = true; } } private void expectName(String name) throws AerospikeException { int begin = offset; while (offset < length) { if (buffer[offset] == '\t') { String s = Buffer.utf8ToString(buffer, begin, offset - begin, sb).trim(); if (name.equals(s)) { offset++; return; } break; } offset++; } throw new AerospikeException.Parse("Failed to find " + name); } private String getTruncatedResponse() { int max = (length > 200) ? 200 : length; return Buffer.utf8ToString(buffer, 0, max); } }
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.zookeepermaster.group; import java.io.File; import java.net.ServerSocket; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.apache.camel.component.zookeepermaster.group.internal.ChildData; import org.apache.camel.component.zookeepermaster.group.internal.ZooKeeperGroup; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.retry.RetryNTimes; import org.apache.zookeeper.server.NIOServerCnxnFactory; import org.apache.zookeeper.server.ServerConfig; import org.apache.zookeeper.server.ZooKeeperServer; import org.apache.zookeeper.server.persistence.FileTxnSnapLog; import org.junit.Test; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; public class GroupTest { private GroupListener listener = new GroupListener<NodeState>() { @Override public void groupEvent(Group<NodeState> group, GroupListener.GroupEvent event) { boolean connected = group.isConnected(); boolean master = group.isMaster(); if (connected) { Collection<NodeState> members = group.members().values(); System.err.println("GroupEvent: " + event + " (connected=" + connected + ", master=" + master + ", members=" + members + ")"); } else { System.err.println("GroupEvent: " + event + " (connected=" + connected + ", master=false)"); } } }; private int findFreePort() throws Exception { ServerSocket ss = new ServerSocket(0); int port = ss.getLocalPort(); ss.close(); return port; } private NIOServerCnxnFactory startZooKeeper(int port) throws Exception { ServerConfig cfg = new ServerConfig(); cfg.parse(new String[] {Integer.toString(port), "target/zk/data"}); ZooKeeperServer zkServer = new ZooKeeperServer(); FileTxnSnapLog ftxn = new FileTxnSnapLog(new File(cfg.getDataLogDir()), new File(cfg.getDataDir())); zkServer.setTxnLogFactory(ftxn); zkServer.setTickTime(cfg.getTickTime()); zkServer.setMinSessionTimeout(6000); zkServer.setMaxSessionTimeout(9000); NIOServerCnxnFactory cnxnFactory = new NIOServerCnxnFactory(); cnxnFactory.configure(cfg.getClientPortAddress(), cfg.getMaxClientCnxns()); cnxnFactory.startup(zkServer); return cnxnFactory; } @Test public void testOrder() throws Exception { int port = findFreePort(); CuratorFramework curator = CuratorFrameworkFactory.builder() .connectString("localhost:" + port) .retryPolicy(new RetryNTimes(10, 100)) .build(); curator.start(); final String path = "/singletons/test/Order" + System.currentTimeMillis(); ArrayList<ZooKeeperGroup> members = new ArrayList<ZooKeeperGroup>(); for (int i = 0; i < 4; i++) { ZooKeeperGroup<NodeState> group = new ZooKeeperGroup<NodeState>(curator, path, NodeState.class); group.add(listener); members.add(group); } for (ZooKeeperGroup group : members) { assertFalse(group.isConnected()); assertFalse(group.isMaster()); } NIOServerCnxnFactory cnxnFactory = startZooKeeper(port); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); // first to start should be master if members are ordered... int i = 0; for (ZooKeeperGroup group : members) { group.start(); group.update(new NodeState("foo" + i)); i++; // wait for registration while (group.getId() == null) { TimeUnit.MILLISECONDS.sleep(100); } } boolean firsStartedIsMaster = members.get(0).isMaster(); for (ZooKeeperGroup group : members) { group.close(); } curator.close(); cnxnFactory.shutdown(); cnxnFactory.join(); assertTrue("first started is master", firsStartedIsMaster); } @Test public void testJoinAfterConnect() throws Exception { int port = findFreePort(); CuratorFramework curator = CuratorFrameworkFactory.builder() .connectString("localhost:" + port) .retryPolicy(new RetryNTimes(10, 100)) .build(); curator.start(); final Group<NodeState> group = new ZooKeeperGroup<NodeState>(curator, "/singletons/test" + System.currentTimeMillis(), NodeState.class); group.add(listener); group.start(); assertFalse(group.isConnected()); assertFalse(group.isMaster()); GroupCondition groupCondition = new GroupCondition(); group.add(groupCondition); NIOServerCnxnFactory cnxnFactory = startZooKeeper(port); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); assertTrue(groupCondition.waitForConnected(5, TimeUnit.SECONDS)); assertFalse(group.isMaster()); group.update(new NodeState("foo")); assertTrue(groupCondition.waitForMaster(5, TimeUnit.SECONDS)); group.close(); curator.close(); cnxnFactory.shutdown(); cnxnFactory.join(); } @Test public void testJoinBeforeConnect() throws Exception { int port = findFreePort(); CuratorFramework curator = CuratorFrameworkFactory.builder() .connectString("localhost:" + port) .retryPolicy(new RetryNTimes(10, 100)) .build(); curator.start(); Group<NodeState> group = new ZooKeeperGroup<NodeState>(curator, "/singletons/test" + System.currentTimeMillis(), NodeState.class); group.add(listener); group.start(); GroupCondition groupCondition = new GroupCondition(); group.add(groupCondition); assertFalse(group.isConnected()); assertFalse(group.isMaster()); group.update(new NodeState("foo")); NIOServerCnxnFactory cnxnFactory = startZooKeeper(port); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); assertTrue(groupCondition.waitForConnected(5, TimeUnit.SECONDS)); assertTrue(groupCondition.waitForMaster(5, TimeUnit.SECONDS)); group.close(); curator.close(); cnxnFactory.shutdown(); cnxnFactory.join(); } @Test public void testRejoinAfterDisconnect() throws Exception { int port = findFreePort(); CuratorFramework curator = CuratorFrameworkFactory.builder() .connectString("localhost:" + port) .retryPolicy(new RetryNTimes(10, 100)) .build(); curator.start(); NIOServerCnxnFactory cnxnFactory = startZooKeeper(port); Group<NodeState> group = new ZooKeeperGroup<NodeState>(curator, "/singletons/test" + System.currentTimeMillis(), NodeState.class); group.add(listener); group.update(new NodeState("foo")); group.start(); GroupCondition groupCondition = new GroupCondition(); group.add(groupCondition); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); assertTrue(groupCondition.waitForConnected(5, TimeUnit.SECONDS)); assertTrue(groupCondition.waitForMaster(5, TimeUnit.SECONDS)); cnxnFactory.shutdown(); cnxnFactory.join(); groupCondition.waitForDisconnected(5, TimeUnit.SECONDS); group.remove(groupCondition); assertFalse(group.isConnected()); assertFalse(group.isMaster()); groupCondition = new GroupCondition(); group.add(groupCondition); cnxnFactory = startZooKeeper(port); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); assertTrue(groupCondition.waitForConnected(5, TimeUnit.SECONDS)); assertTrue(groupCondition.waitForMaster(5, TimeUnit.SECONDS)); group.close(); curator.close(); cnxnFactory.shutdown(); cnxnFactory.join(); } //Tests that if close() is executed right after start(), there are no left over entries. //(see https://github.com/jboss-fuse/fuse/issues/133) @Test public void testGroupClose() throws Exception { int port = findFreePort(); NIOServerCnxnFactory cnxnFactory = startZooKeeper(port); CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder() .connectString("localhost:" + port) .connectionTimeoutMs(6000) .sessionTimeoutMs(6000) .retryPolicy(new RetryNTimes(10, 100)); CuratorFramework curator = builder.build(); curator.start(); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); String groupNode = "/singletons/test" + System.currentTimeMillis(); curator.create().creatingParentsIfNeeded().forPath(groupNode); for (int i = 0; i < 100; i++) { ZooKeeperGroup<NodeState> group = new ZooKeeperGroup<NodeState>(curator, groupNode, NodeState.class); group.add(listener); group.update(new NodeState("foo")); group.start(); group.close(); List<String> entries = curator.getChildren().forPath(groupNode); assertTrue(entries.isEmpty() || group.isUnstable()); if (group.isUnstable()) { // let's wait for session timeout curator.close(); curator = builder.build(); curator.start(); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); } } curator.close(); cnxnFactory.shutdown(); cnxnFactory.join(); } @Test public void testAddFieldIgnoredOnParse() throws Exception { int port = findFreePort(); NIOServerCnxnFactory cnxnFactory = startZooKeeper(port); CuratorFramework curator = CuratorFrameworkFactory.builder() .connectString("localhost:" + port) .retryPolicy(new RetryNTimes(10, 100)) .build(); curator.start(); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); String groupNode = "/singletons/test" + System.currentTimeMillis(); curator.create().creatingParentsIfNeeded().forPath(groupNode); curator.getZookeeperClient().blockUntilConnectedOrTimedOut(); final ZooKeeperGroup<NodeState> group = new ZooKeeperGroup<NodeState>(curator, groupNode, NodeState.class); group.add(listener); group.start(); GroupCondition groupCondition = new GroupCondition(); group.add(groupCondition); group.update(new NodeState("foo")); assertTrue(groupCondition.waitForConnected(5, TimeUnit.SECONDS)); assertTrue(groupCondition.waitForMaster(5, TimeUnit.SECONDS)); ChildData currentData = group.getCurrentData().get(0); final int version = currentData.getStat().getVersion(); NodeState lastState = group.getLastState(); String json = lastState.toString(); System.err.println("JSON:" + json); String newValWithNewField = json.substring(0, json.lastIndexOf('}')) + ",\"Rubbish\":\"Rubbish\"}"; curator.getZookeeperClient().getZooKeeper().setData(group.getId(), newValWithNewField.getBytes(), version); assertTrue(group.isMaster()); int attempts = 0; while (attempts++ < 5 && version == group.getCurrentData().get(0).getStat().getVersion()) { TimeUnit.SECONDS.sleep(1); } assertNotEquals("We see the updated version", version, group.getCurrentData().get(0).getStat().getVersion()); System.err.println("CurrentData:" + group.getCurrentData()); group.close(); curator.close(); cnxnFactory.shutdown(); cnxnFactory.join(); } private class GroupCondition implements GroupListener<NodeState> { private CountDownLatch connected = new CountDownLatch(1); private CountDownLatch master = new CountDownLatch(1); private CountDownLatch disconnected = new CountDownLatch(1); @Override public void groupEvent(Group<NodeState> group, GroupEvent event) { switch (event) { case CONNECTED: case CHANGED: connected.countDown(); if (group.isMaster()) { master.countDown(); } break; case DISCONNECTED: disconnected.countDown(); break; default: // noop } } public boolean waitForConnected(long time, TimeUnit timeUnit) throws InterruptedException { return connected.await(time, timeUnit); } public boolean waitForDisconnected(long time, TimeUnit timeUnit) throws InterruptedException { return disconnected.await(time, timeUnit); } public boolean waitForMaster(long time, TimeUnit timeUnit) throws InterruptedException { return master.await(time, timeUnit); } } }
/* * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package sun.security.provider.certpath; import java.io.IOException; import java.math.BigInteger; import java.net.URI; import java.net.URISyntaxException; import java.security.AccessController; import java.security.InvalidAlgorithmParameterException; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; import java.security.PublicKey; import java.security.Security; import java.security.cert.CertPathValidatorException.BasicReason; import java.security.cert.Extension; import java.security.cert.*; import java.util.*; import javax.security.auth.x500.X500Principal; import static sun.security.provider.certpath.OCSP.*; import static sun.security.provider.certpath.PKIX.*; import sun.security.x509.*; import static sun.security.x509.PKIXExtensions.*; import sun.security.util.Debug; class RevocationChecker extends PKIXRevocationChecker { private static final Debug debug = Debug.getInstance("certpath"); private TrustAnchor anchor; private ValidatorParams params; private boolean onlyEE; private boolean softFail; private boolean crlDP; private URI responderURI; private X509Certificate responderCert; private List<CertStore> certStores; private Map<X509Certificate, byte[]> ocspResponses; private List<Extension> ocspExtensions; private boolean legacy; private LinkedList<CertPathValidatorException> softFailExceptions = new LinkedList<>(); // state variables private X509Certificate issuerCert; private PublicKey prevPubKey; private boolean crlSignFlag; private int certIndex; private enum Mode { PREFER_OCSP, PREFER_CRLS, ONLY_CRLS, ONLY_OCSP }; private Mode mode = Mode.PREFER_OCSP; private static class RevocationProperties { boolean onlyEE; boolean ocspEnabled; boolean crlDPEnabled; String ocspUrl; String ocspSubject; String ocspIssuer; String ocspSerial; } RevocationChecker() { legacy = false; } RevocationChecker(TrustAnchor anchor, ValidatorParams params) throws CertPathValidatorException { legacy = true; init(anchor, params); } void init(TrustAnchor anchor, ValidatorParams params) throws CertPathValidatorException { RevocationProperties rp = getRevocationProperties(); URI uri = getOcspResponder(); responderURI = (uri == null) ? toURI(rp.ocspUrl) : uri; X509Certificate cert = getOcspResponderCert(); responderCert = (cert == null) ? getResponderCert(rp, params.trustAnchors(), params.certStores()) : cert; Set<Option> options = getOptions(); for (Option option : options) { switch (option) { case ONLY_END_ENTITY: case PREFER_CRLS: case SOFT_FAIL: case NO_FALLBACK: break; default: throw new CertPathValidatorException( "Unrecognized revocation parameter option: " + option); } } softFail = options.contains(Option.SOFT_FAIL); // set mode, only end entity flag if (legacy) { mode = (rp.ocspEnabled) ? Mode.PREFER_OCSP : Mode.ONLY_CRLS; onlyEE = rp.onlyEE; } else { if (options.contains(Option.NO_FALLBACK)) { if (options.contains(Option.PREFER_CRLS)) { mode = Mode.ONLY_CRLS; } else { mode = Mode.ONLY_OCSP; } } else if (options.contains(Option.PREFER_CRLS)) { mode = Mode.PREFER_CRLS; } onlyEE = options.contains(Option.ONLY_END_ENTITY); } if (legacy) { crlDP = rp.crlDPEnabled; } else { crlDP = true; } ocspResponses = getOcspResponses(); ocspExtensions = getOcspExtensions(); this.anchor = anchor; this.params = params; this.certStores = new ArrayList<>(params.certStores()); try { this.certStores.add(CertStore.getInstance("Collection", new CollectionCertStoreParameters(params.certificates()))); } catch (InvalidAlgorithmParameterException | NoSuchAlgorithmException e) { // should never occur but not necessarily fatal, so log it, // ignore and continue if (debug != null) { debug.println("RevocationChecker: " + "error creating Collection CertStore: " + e); } } } private static URI toURI(String uriString) throws CertPathValidatorException { try { if (uriString != null) { return new URI(uriString); } return null; } catch (URISyntaxException e) { throw new CertPathValidatorException( "cannot parse ocsp.responderURL property", e); } } private static RevocationProperties getRevocationProperties() { return AccessController.doPrivileged( new PrivilegedAction<RevocationProperties>() { public RevocationProperties run() { RevocationProperties rp = new RevocationProperties(); String onlyEE = Security.getProperty( "com.sun.security.onlyCheckRevocationOfEECert"); rp.onlyEE = onlyEE != null && onlyEE.equalsIgnoreCase("true"); String ocspEnabled = Security.getProperty("ocsp.enable"); rp.ocspEnabled = ocspEnabled != null && ocspEnabled.equalsIgnoreCase("true"); rp.ocspUrl = Security.getProperty("ocsp.responderURL"); rp.ocspSubject = Security.getProperty("ocsp.responderCertSubjectName"); rp.ocspIssuer = Security.getProperty("ocsp.responderCertIssuerName"); rp.ocspSerial = Security.getProperty("ocsp.responderCertSerialNumber"); rp.crlDPEnabled = Boolean.getBoolean("com.sun.security.enableCRLDP"); return rp; } } ); } private static X509Certificate getResponderCert(RevocationProperties rp, Set<TrustAnchor> anchors, List<CertStore> stores) throws CertPathValidatorException { if (rp.ocspSubject != null) { return getResponderCert(rp.ocspSubject, anchors, stores); } else if (rp.ocspIssuer != null && rp.ocspSerial != null) { return getResponderCert(rp.ocspIssuer, rp.ocspSerial, anchors, stores); } else if (rp.ocspIssuer != null || rp.ocspSerial != null) { throw new CertPathValidatorException( "Must specify both ocsp.responderCertIssuerName and " + "ocsp.responderCertSerialNumber properties"); } return null; } private static X509Certificate getResponderCert(String subject, Set<TrustAnchor> anchors, List<CertStore> stores) throws CertPathValidatorException { X509CertSelector sel = new X509CertSelector(); try { sel.setSubject(new X500Principal(subject)); } catch (IllegalArgumentException e) { throw new CertPathValidatorException( "cannot parse ocsp.responderCertSubjectName property", e); } return getResponderCert(sel, anchors, stores); } private static X509Certificate getResponderCert(String issuer, String serial, Set<TrustAnchor> anchors, List<CertStore> stores) throws CertPathValidatorException { X509CertSelector sel = new X509CertSelector(); try { sel.setIssuer(new X500Principal(issuer)); } catch (IllegalArgumentException e) { throw new CertPathValidatorException( "cannot parse ocsp.responderCertIssuerName property", e); } try { sel.setSerialNumber(new BigInteger(stripOutSeparators(serial), 16)); } catch (NumberFormatException e) { throw new CertPathValidatorException( "cannot parse ocsp.responderCertSerialNumber property", e); } return getResponderCert(sel, anchors, stores); } private static X509Certificate getResponderCert(X509CertSelector sel, Set<TrustAnchor> anchors, List<CertStore> stores) throws CertPathValidatorException { // first check TrustAnchors for (TrustAnchor anchor : anchors) { X509Certificate cert = anchor.getTrustedCert(); if (cert == null) { continue; } if (sel.match(cert)) { return cert; } } // now check CertStores for (CertStore store : stores) { try { Collection<? extends Certificate> certs = store.getCertificates(sel); if (!certs.isEmpty()) { return (X509Certificate)certs.iterator().next(); } } catch (CertStoreException e) { // ignore and try next CertStore if (debug != null) { debug.println("CertStore exception:" + e); } continue; } } throw new CertPathValidatorException( "Cannot find the responder's certificate " + "(set using the OCSP security properties)."); } @Override public void init(boolean forward) throws CertPathValidatorException { if (forward) { throw new CertPathValidatorException("forward checking not supported"); } if (anchor != null) { issuerCert = anchor.getTrustedCert(); prevPubKey = (issuerCert != null) ? issuerCert.getPublicKey() : anchor.getCAPublicKey(); } crlSignFlag = true; if (params != null && params.certPath() != null) { certIndex = params.certPath().getCertificates().size() - 1; } else { certIndex = -1; } softFailExceptions.clear(); } @Override public boolean isForwardCheckingSupported() { return false; } @Override public Set<String> getSupportedExtensions() { return null; } @Override public List<CertPathValidatorException> getSoftFailExceptions() { return Collections.unmodifiableList(softFailExceptions); } @Override public void check(Certificate cert, Collection<String> unresolvedCritExts) throws CertPathValidatorException { check((X509Certificate)cert, unresolvedCritExts, prevPubKey, crlSignFlag); } private void check(X509Certificate xcert, Collection<String> unresolvedCritExts, PublicKey pubKey, boolean crlSignFlag) throws CertPathValidatorException { try { if (onlyEE && xcert.getBasicConstraints() != -1) { if (debug != null) { debug.println("Skipping revocation check, not end " + "entity cert"); } return; } switch (mode) { case PREFER_OCSP: case ONLY_OCSP: checkOCSP(xcert, unresolvedCritExts); break; case PREFER_CRLS: case ONLY_CRLS: checkCRLs(xcert, unresolvedCritExts, null, pubKey, crlSignFlag); break; } } catch (CertPathValidatorException e) { if (e.getReason() == BasicReason.REVOKED) { throw e; } boolean eSoftFail = isSoftFailException(e); if (eSoftFail) { if (mode == Mode.ONLY_OCSP || mode == Mode.ONLY_CRLS) { return; } } else { if (mode == Mode.ONLY_OCSP || mode == Mode.ONLY_CRLS) { throw e; } } CertPathValidatorException cause = e; // Otherwise, failover if (debug != null) { debug.println("RevocationChecker.check() " + e.getMessage()); debug.println("RevocationChecker.check() preparing to failover"); } try { switch (mode) { case PREFER_OCSP: checkCRLs(xcert, unresolvedCritExts, null, pubKey, crlSignFlag); break; case PREFER_CRLS: checkOCSP(xcert, unresolvedCritExts); break; } } catch (CertPathValidatorException x) { if (debug != null) { debug.println("RevocationChecker.check() failover failed"); debug.println("RevocationChecker.check() " + x.getMessage()); } if (x.getReason() == BasicReason.REVOKED) { throw x; } if (!isSoftFailException(x)) { cause.addSuppressed(x); throw cause; } else { // only pass if both exceptions were soft failures if (!eSoftFail) { throw cause; } } } } finally { updateState(xcert); } } private boolean isSoftFailException(CertPathValidatorException e) { if (softFail && e.getReason() == BasicReason.UNDETERMINED_REVOCATION_STATUS) { // recreate exception with correct index CertPathValidatorException e2 = new CertPathValidatorException( e.getMessage(), e.getCause(), params.certPath(), certIndex, e.getReason()); softFailExceptions.addFirst(e2); return true; } return false; } private void updateState(X509Certificate cert) throws CertPathValidatorException { issuerCert = cert; // Make new public key if parameters are missing PublicKey pubKey = cert.getPublicKey(); if (PKIX.isDSAPublicKeyWithoutParams(pubKey)) { // pubKey needs to inherit DSA parameters from prev key pubKey = BasicChecker.makeInheritedParamsKey(pubKey, prevPubKey); } prevPubKey = pubKey; crlSignFlag = certCanSignCrl(cert); if (certIndex > 0) { certIndex--; } } // Maximum clock skew in milliseconds (15 minutes) allowed when checking // validity of CRLs private static final long MAX_CLOCK_SKEW = 900000; private void checkCRLs(X509Certificate cert, Collection<String> unresolvedCritExts, Set<X509Certificate> stackedCerts, PublicKey pubKey, boolean signFlag) throws CertPathValidatorException { checkCRLs(cert, pubKey, null, signFlag, true, stackedCerts, params.trustAnchors()); } private void checkCRLs(X509Certificate cert, PublicKey prevKey, X509Certificate prevCert, boolean signFlag, boolean allowSeparateKey, Set<X509Certificate> stackedCerts, Set<TrustAnchor> anchors) throws CertPathValidatorException { if (debug != null) { debug.println("RevocationChecker.checkCRLs()" + " ---checking revocation status ..."); } // reject circular dependencies - RFC 3280 is not explicit on how // to handle this, so we feel it is safest to reject them until // the issue is resolved in the PKIX WG. if (stackedCerts != null && stackedCerts.contains(cert)) { if (debug != null) { debug.println("RevocationChecker.checkCRLs()" + " circular dependency"); } throw new CertPathValidatorException ("Could not determine revocation status", null, null, -1, BasicReason.UNDETERMINED_REVOCATION_STATUS); } Set<X509CRL> possibleCRLs = new HashSet<>(); Set<X509CRL> approvedCRLs = new HashSet<>(); X509CRLSelector sel = new X509CRLSelector(); sel.setCertificateChecking(cert); CertPathHelper.setDateAndTime(sel, params.date(), MAX_CLOCK_SKEW); // First, check user-specified CertStores CertPathValidatorException networkFailureException = null; for (CertStore store : certStores) { try { for (CRL crl : store.getCRLs(sel)) { possibleCRLs.add((X509CRL)crl); } } catch (CertStoreException e) { if (debug != null) { debug.println("RevocationChecker.checkCRLs() " + "CertStoreException: " + e.getMessage()); } if (networkFailureException == null && CertStoreHelper.isCausedByNetworkIssue(store.getType(),e)) { // save this exception, we may need to throw it later networkFailureException = new CertPathValidatorException( "Unable to determine revocation status due to " + "network error", e, null, -1, BasicReason.UNDETERMINED_REVOCATION_STATUS); } } } if (debug != null) { debug.println("RevocationChecker.checkCRLs() " + "possible crls.size() = " + possibleCRLs.size()); } boolean[] reasonsMask = new boolean[9]; if (!possibleCRLs.isEmpty()) { // Now that we have a list of possible CRLs, see which ones can // be approved approvedCRLs.addAll(verifyPossibleCRLs(possibleCRLs, cert, prevKey, signFlag, reasonsMask, anchors)); } if (debug != null) { debug.println("RevocationChecker.checkCRLs() " + "approved crls.size() = " + approvedCRLs.size()); } // make sure that we have at least one CRL that _could_ cover // the certificate in question and all reasons are covered if (!approvedCRLs.isEmpty() && Arrays.equals(reasonsMask, ALL_REASONS)) { checkApprovedCRLs(cert, approvedCRLs); } else { // Check Distribution Points // all CRLs returned by the DP Fetcher have also been verified try { if (crlDP) { approvedCRLs.addAll(DistributionPointFetcher.getCRLs( sel, signFlag, prevKey, prevCert, params.sigProvider(), certStores, reasonsMask, anchors, null)); } } catch (CertStoreException e) { if (e instanceof CertStoreTypeException) { CertStoreTypeException cste = (CertStoreTypeException)e; if (CertStoreHelper.isCausedByNetworkIssue(cste.getType(), e)) { throw new CertPathValidatorException( "Unable to determine revocation status due to " + "network error", e, null, -1, BasicReason.UNDETERMINED_REVOCATION_STATUS); } } throw new CertPathValidatorException(e); } if (!approvedCRLs.isEmpty() && Arrays.equals(reasonsMask, ALL_REASONS)) { checkApprovedCRLs(cert, approvedCRLs); } else { if (allowSeparateKey) { try { verifyWithSeparateSigningKey(cert, prevKey, signFlag, stackedCerts); return; } catch (CertPathValidatorException cpve) { if (networkFailureException != null) { // if a network issue previously prevented us from // retrieving a CRL from one of the user-specified // CertStores, throw it now so it can be handled // appropriately throw networkFailureException; } throw cpve; } } else { if (networkFailureException != null) { // if a network issue previously prevented us from // retrieving a CRL from one of the user-specified // CertStores, throw it now so it can be handled // appropriately throw networkFailureException; } throw new CertPathValidatorException( "Could not determine revocation status", null, null, -1, BasicReason.UNDETERMINED_REVOCATION_STATUS); } } } } private void checkApprovedCRLs(X509Certificate cert, Set<X509CRL> approvedCRLs) throws CertPathValidatorException { // See if the cert is in the set of approved crls. if (debug != null) { BigInteger sn = cert.getSerialNumber(); debug.println("RevocationChecker.checkApprovedCRLs() " + "starting the final sweep..."); debug.println("RevocationChecker.checkApprovedCRLs()" + " cert SN: " + sn.toString()); } CRLReason reasonCode = CRLReason.UNSPECIFIED; X509CRLEntryImpl entry = null; for (X509CRL crl : approvedCRLs) { X509CRLEntry e = crl.getRevokedCertificate(cert); if (e != null) { try { entry = X509CRLEntryImpl.toImpl(e); } catch (CRLException ce) { throw new CertPathValidatorException(ce); } if (debug != null) { debug.println("RevocationChecker.checkApprovedCRLs()" + " CRL entry: " + entry.toString()); } /* * Abort CRL validation and throw exception if there are any * unrecognized critical CRL entry extensions (see section * 5.3 of RFC 3280). */ Set<String> unresCritExts = entry.getCriticalExtensionOIDs(); if (unresCritExts != null && !unresCritExts.isEmpty()) { /* remove any that we will process */ unresCritExts.remove(ReasonCode_Id.toString()); unresCritExts.remove(CertificateIssuer_Id.toString()); if (!unresCritExts.isEmpty()) { throw new CertPathValidatorException( "Unrecognized critical extension(s) in revoked " + "CRL entry"); } } reasonCode = entry.getRevocationReason(); if (reasonCode == null) { reasonCode = CRLReason.UNSPECIFIED; } Date revocationDate = entry.getRevocationDate(); if (revocationDate.before(params.date())) { Throwable t = new CertificateRevokedException( revocationDate, reasonCode, crl.getIssuerX500Principal(), entry.getExtensions()); throw new CertPathValidatorException( t.getMessage(), t, null, -1, BasicReason.REVOKED); } } } } private void checkOCSP(X509Certificate cert, Collection<String> unresolvedCritExts) throws CertPathValidatorException { X509CertImpl currCert = null; try { currCert = X509CertImpl.toImpl(cert); } catch (CertificateException ce) { throw new CertPathValidatorException(ce); } // The algorithm constraints of the OCSP trusted responder certificate // does not need to be checked in this code. The constraints will be // checked when the responder's certificate is validated. OCSPResponse response = null; CertId certId = null; try { if (issuerCert != null) { certId = new CertId(issuerCert, currCert.getSerialNumberObject()); } else { // must be an anchor name and key certId = new CertId(anchor.getCA(), anchor.getCAPublicKey(), currCert.getSerialNumberObject()); } // check if there is a cached OCSP response available byte[] responseBytes = ocspResponses.get(cert); if (responseBytes != null) { if (debug != null) { debug.println("Found cached OCSP response"); } response = new OCSPResponse(responseBytes); // verify the response byte[] nonce = null; for (Extension ext : ocspExtensions) { if (ext.getId().equals("1.3.6.1.5.5.7.48.1.2")) { nonce = ext.getValue(); } } response.verify(Collections.singletonList(certId), issuerCert, responderCert, params.date(), nonce); } else { URI responderURI = (this.responderURI != null) ? this.responderURI : OCSP.getResponderURI(currCert); if (responderURI == null) { throw new CertPathValidatorException( "Certificate does not specify OCSP responder", null, null, -1); } response = OCSP.check(Collections.singletonList(certId), responderURI, issuerCert, responderCert, null, ocspExtensions); } } catch (IOException e) { throw new CertPathValidatorException( "Unable to determine revocation status due to network error", e, null, -1, BasicReason.UNDETERMINED_REVOCATION_STATUS); } RevocationStatus rs = (RevocationStatus)response.getSingleResponse(certId); RevocationStatus.CertStatus certStatus = rs.getCertStatus(); if (certStatus == RevocationStatus.CertStatus.REVOKED) { Date revocationTime = rs.getRevocationTime(); if (revocationTime.before(params.date())) { Throwable t = new CertificateRevokedException( revocationTime, rs.getRevocationReason(), response.getSignerCertificate().getSubjectX500Principal(), rs.getSingleExtensions()); throw new CertPathValidatorException(t.getMessage(), t, null, -1, BasicReason.REVOKED); } } else if (certStatus == RevocationStatus.CertStatus.UNKNOWN) { throw new CertPathValidatorException( "Certificate's revocation status is unknown", null, params.certPath(), -1, BasicReason.UNDETERMINED_REVOCATION_STATUS); } } /* * Removes any non-hexadecimal characters from a string. */ private static final String HEX_DIGITS = "0123456789ABCDEFabcdef"; private static String stripOutSeparators(String value) { char[] chars = value.toCharArray(); StringBuilder hexNumber = new StringBuilder(); for (int i = 0; i < chars.length; i++) { if (HEX_DIGITS.indexOf(chars[i]) != -1) { hexNumber.append(chars[i]); } } return hexNumber.toString(); } /** * Checks that a cert can be used to verify a CRL. * * @param cert an X509Certificate to check * @return a boolean specifying if the cert is allowed to vouch for the * validity of a CRL */ static boolean certCanSignCrl(X509Certificate cert) { // if the cert doesn't include the key usage ext, or // the key usage ext asserts cRLSigning, return true, // otherwise return false. boolean[] keyUsage = cert.getKeyUsage(); if (keyUsage != null) { return keyUsage[6]; } return false; } /** * Internal method that verifies a set of possible_crls, * and sees if each is approved, based on the cert. * * @param crls a set of possible CRLs to test for acceptability * @param cert the certificate whose revocation status is being checked * @param signFlag <code>true</code> if prevKey was trusted to sign CRLs * @param prevKey the public key of the issuer of cert * @param reasonsMask the reason code mask * @param trustAnchors a <code>Set</code> of <code>TrustAnchor</code>s> * @return a collection of approved crls (or an empty collection) */ private static final boolean[] ALL_REASONS = {true, true, true, true, true, true, true, true, true}; private Collection<X509CRL> verifyPossibleCRLs(Set<X509CRL> crls, X509Certificate cert, PublicKey prevKey, boolean signFlag, boolean[] reasonsMask, Set<TrustAnchor> anchors) throws CertPathValidatorException { try { X509CertImpl certImpl = X509CertImpl.toImpl(cert); if (debug != null) { debug.println("RevocationChecker.verifyPossibleCRLs: " + "Checking CRLDPs for " + certImpl.getSubjectX500Principal()); } CRLDistributionPointsExtension ext = certImpl.getCRLDistributionPointsExtension(); List<DistributionPoint> points = null; if (ext == null) { // assume a DP with reasons and CRLIssuer fields omitted // and a DP name of the cert issuer. // TODO add issuerAltName too X500Name certIssuer = (X500Name)certImpl.getIssuerDN(); DistributionPoint point = new DistributionPoint( new GeneralNames().add(new GeneralName(certIssuer)), null, null); points = Collections.singletonList(point); } else { points = ext.get(CRLDistributionPointsExtension.POINTS); } Set<X509CRL> results = new HashSet<>(); for (DistributionPoint point : points) { for (X509CRL crl : crls) { if (DistributionPointFetcher.verifyCRL( certImpl, point, crl, reasonsMask, signFlag, prevKey, null, params.sigProvider(), anchors, certStores, params.date())) { results.add(crl); } } if (Arrays.equals(reasonsMask, ALL_REASONS)) break; } return results; } catch (CertificateException | CRLException | IOException e) { if (debug != null) { debug.println("Exception while verifying CRL: "+e.getMessage()); e.printStackTrace(); } return Collections.emptySet(); } } /** * We have a cert whose revocation status couldn't be verified by * a CRL issued by the cert that issued the CRL. See if we can * find a valid CRL issued by a separate key that can verify the * revocation status of this certificate. * <p> * Note that this does not provide support for indirect CRLs, * only CRLs signed with a different key (but the same issuer * name) as the certificate being checked. * * @param currCert the <code>X509Certificate</code> to be checked * @param prevKey the <code>PublicKey</code> that failed * @param signFlag <code>true</code> if that key was trusted to sign CRLs * @param stackedCerts a <code>Set</code> of <code>X509Certificate</code>s> * whose revocation status depends on the * non-revoked status of this cert. To avoid * circular dependencies, we assume they're * revoked while checking the revocation * status of this cert. * @throws CertPathValidatorException if the cert's revocation status * cannot be verified successfully with another key */ private void verifyWithSeparateSigningKey(X509Certificate cert, PublicKey prevKey, boolean signFlag, Set<X509Certificate> stackedCerts) throws CertPathValidatorException { String msg = "revocation status"; if (debug != null) { debug.println( "RevocationChecker.verifyWithSeparateSigningKey()" + " ---checking " + msg + "..."); } // reject circular dependencies - RFC 3280 is not explicit on how // to handle this, so we feel it is safest to reject them until // the issue is resolved in the PKIX WG. if ((stackedCerts != null) && stackedCerts.contains(cert)) { if (debug != null) { debug.println( "RevocationChecker.verifyWithSeparateSigningKey()" + " circular dependency"); } throw new CertPathValidatorException ("Could not determine revocation status", null, null, -1, BasicReason.UNDETERMINED_REVOCATION_STATUS); } // Try to find another key that might be able to sign // CRLs vouching for this cert. // If prevKey wasn't trusted, maybe we just didn't have the right // path to it. Don't rule that key out. if (!signFlag) { buildToNewKey(cert, null, stackedCerts); } else { buildToNewKey(cert, prevKey, stackedCerts); } } /** * Tries to find a CertPath that establishes a key that can be * used to verify the revocation status of a given certificate. * Ignores keys that have previously been tried. Throws a * CertPathValidatorException if no such key could be found. * * @param currCert the <code>X509Certificate</code> to be checked * @param prevKey the <code>PublicKey</code> of the certificate whose key * cannot be used to vouch for the CRL and should be ignored * @param stackedCerts a <code>Set</code> of <code>X509Certificate</code>s> * whose revocation status depends on the * establishment of this path. * @throws CertPathValidatorException on failure */ private static final boolean [] CRL_SIGN_USAGE = { false, false, false, false, false, false, true }; private void buildToNewKey(X509Certificate currCert, PublicKey prevKey, Set<X509Certificate> stackedCerts) throws CertPathValidatorException { if (debug != null) { debug.println("RevocationChecker.buildToNewKey()" + " starting work"); } Set<PublicKey> badKeys = new HashSet<>(); if (prevKey != null) { badKeys.add(prevKey); } X509CertSelector certSel = new RejectKeySelector(badKeys); certSel.setSubject(currCert.getIssuerX500Principal()); certSel.setKeyUsage(CRL_SIGN_USAGE); Set<TrustAnchor> newAnchors = anchor == null ? params.trustAnchors() : Collections.singleton(anchor); PKIXBuilderParameters builderParams; try { builderParams = new PKIXBuilderParameters(newAnchors, certSel); } catch (InvalidAlgorithmParameterException iape) { throw new RuntimeException(iape); // should never occur } builderParams.setInitialPolicies(params.initialPolicies()); builderParams.setCertStores(certStores); builderParams.setExplicitPolicyRequired (params.explicitPolicyRequired()); builderParams.setPolicyMappingInhibited (params.policyMappingInhibited()); builderParams.setAnyPolicyInhibited(params.anyPolicyInhibited()); // Policy qualifiers must be rejected, since we don't have // any way to convey them back to the application. // That's the default, so no need to write code. builderParams.setDate(params.date()); // CertPathCheckers need to be cloned to start from fresh state builderParams.setCertPathCheckers( params.getPKIXParameters().getCertPathCheckers()); builderParams.setSigProvider(params.sigProvider()); // Skip revocation during this build to detect circular // references. But check revocation afterwards, using the // key (or any other that works). builderParams.setRevocationEnabled(false); // check for AuthorityInformationAccess extension if (Builder.USE_AIA == true) { X509CertImpl currCertImpl = null; try { currCertImpl = X509CertImpl.toImpl(currCert); } catch (CertificateException ce) { // ignore but log it if (debug != null) { debug.println("RevocationChecker.buildToNewKey: " + "error decoding cert: " + ce); } } AuthorityInfoAccessExtension aiaExt = null; if (currCertImpl != null) { aiaExt = currCertImpl.getAuthorityInfoAccessExtension(); } if (aiaExt != null) { List<AccessDescription> adList = aiaExt.getAccessDescriptions(); if (adList != null) { for (AccessDescription ad : adList) { CertStore cs = URICertStore.getInstance(ad); if (cs != null) { if (debug != null) { debug.println("adding AIAext CertStore"); } builderParams.addCertStore(cs); } } } } } CertPathBuilder builder = null; try { builder = CertPathBuilder.getInstance("PKIX"); } catch (NoSuchAlgorithmException nsae) { throw new CertPathValidatorException(nsae); } while (true) { try { if (debug != null) { debug.println("RevocationChecker.buildToNewKey()" + " about to try build ..."); } PKIXCertPathBuilderResult cpbr = (PKIXCertPathBuilderResult)builder.build(builderParams); if (debug != null) { debug.println("RevocationChecker.buildToNewKey()" + " about to check revocation ..."); } // Now check revocation of all certs in path, assuming that // the stackedCerts are revoked. if (stackedCerts == null) { stackedCerts = new HashSet<X509Certificate>(); } stackedCerts.add(currCert); TrustAnchor ta = cpbr.getTrustAnchor(); PublicKey prevKey2 = ta.getCAPublicKey(); if (prevKey2 == null) { prevKey2 = ta.getTrustedCert().getPublicKey(); } boolean signFlag = true; List<? extends Certificate> cpList = cpbr.getCertPath().getCertificates(); try { for (int i = cpList.size()-1; i >= 0; i-- ) { X509Certificate cert = (X509Certificate)cpList.get(i); if (debug != null) { debug.println("RevocationChecker.buildToNewKey()" + " index " + i + " checking " + cert); } checkCRLs(cert, prevKey2, null, signFlag, true, stackedCerts, newAnchors); signFlag = certCanSignCrl(cert); prevKey2 = cert.getPublicKey(); } } catch (CertPathValidatorException cpve) { // ignore it and try to get another key badKeys.add(cpbr.getPublicKey()); continue; } if (debug != null) { debug.println("RevocationChecker.buildToNewKey()" + " got key " + cpbr.getPublicKey()); } // Now check revocation on the current cert using that key and // the corresponding certificate. // If it doesn't check out, try to find a different key. // And if we can't find a key, then return false. PublicKey newKey = cpbr.getPublicKey(); try { checkCRLs(currCert, newKey, (X509Certificate) cpList.get(0), true, false, null, params.trustAnchors()); // If that passed, the cert is OK! return; } catch (CertPathValidatorException cpve) { // If it is revoked, rethrow exception if (cpve.getReason() == BasicReason.REVOKED) { throw cpve; } // Otherwise, ignore the exception and // try to get another key. } badKeys.add(newKey); } catch (InvalidAlgorithmParameterException iape) { throw new CertPathValidatorException(iape); } catch (CertPathBuilderException cpbe) { throw new CertPathValidatorException ("Could not determine revocation status", null, null, -1, BasicReason.UNDETERMINED_REVOCATION_STATUS); } } } @Override public RevocationChecker clone() { RevocationChecker copy = (RevocationChecker)super.clone(); // we don't deep-copy the exceptions, but that is ok because they // are never modified after they are instantiated copy.softFailExceptions = new LinkedList<>(softFailExceptions); return copy; } /* * This inner class extends the X509CertSelector to add an additional * check to make sure the subject public key isn't on a particular list. * This class is used by buildToNewKey() to make sure the builder doesn't * end up with a CertPath to a public key that has already been rejected. */ private static class RejectKeySelector extends X509CertSelector { private final Set<PublicKey> badKeySet; /** * Creates a new <code>RejectKeySelector</code>. * * @param badPublicKeys a <code>Set</code> of * <code>PublicKey</code>s that * should be rejected (or <code>null</code> * if no such check should be done) */ RejectKeySelector(Set<PublicKey> badPublicKeys) { this.badKeySet = badPublicKeys; } /** * Decides whether a <code>Certificate</code> should be selected. * * @param cert the <code>Certificate</code> to be checked * @return <code>true</code> if the <code>Certificate</code> should be * selected, <code>false</code> otherwise */ @Override public boolean match(Certificate cert) { if (!super.match(cert)) return(false); if (badKeySet.contains(cert.getPublicKey())) { if (debug != null) debug.println("RejectKeySelector.match: bad key"); return false; } if (debug != null) debug.println("RejectKeySelector.match: returning true"); return true; } /** * Return a printable representation of the <code>CertSelector</code>. * * @return a <code>String</code> describing the contents of the * <code>CertSelector</code> */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("RejectKeySelector: [\n"); sb.append(super.toString()); sb.append(badKeySet); sb.append("]"); return sb.toString(); } } }
package org.apache.lucene.search; /** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.Set; import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.util.Bits; /** * A query that generates the union of documents produced by its subqueries, and that scores each document with the maximum * score for that document as produced by any subquery, plus a tie breaking increment for any additional matching subqueries. * This is useful when searching for a word in multiple fields with different boost factors (so that the fields cannot be * combined equivalently into a single search field). We want the primary score to be the one associated with the highest boost, * not the sum of the field scores (as BooleanQuery would give). * If the query is "albino elephant" this ensures that "albino" matching one field and "elephant" matching * another gets a higher score than "albino" matching both fields. * To get this result, use both BooleanQuery and DisjunctionMaxQuery: for each term a DisjunctionMaxQuery searches for it in * each field, while the set of these DisjunctionMaxQuery's is combined into a BooleanQuery. * The tie breaker capability allows results that include the same term in multiple fields to be judged better than results that * include this term in only the best of those multiple fields, without confusing this with the better case of two different terms * in the multiple fields. */ public class DisjunctionMaxQuery extends Query implements Iterable<Query> { /* The subqueries */ private ArrayList<Query> disjuncts = new ArrayList<Query>(); /* Multiple of the non-max disjunct scores added into our final score. Non-zero values support tie-breaking. */ private float tieBreakerMultiplier = 0.0f; /** Creates a new empty DisjunctionMaxQuery. Use add() to add the subqueries. * @param tieBreakerMultiplier the score of each non-maximum disjunct for a document is multiplied by this weight * and added into the final score. If non-zero, the value should be small, on the order of 0.1, which says that * 10 occurrences of word in a lower-scored field that is also in a higher scored field is just as good as a unique * word in the lower scored field (i.e., one that is not in any higher scored field. */ public DisjunctionMaxQuery(float tieBreakerMultiplier) { this.tieBreakerMultiplier = tieBreakerMultiplier; } /** * Creates a new DisjunctionMaxQuery * @param disjuncts a {@code Collection<Query>} of all the disjuncts to add * @param tieBreakerMultiplier the weight to give to each matching non-maximum disjunct */ public DisjunctionMaxQuery(Collection<Query> disjuncts, float tieBreakerMultiplier) { this.tieBreakerMultiplier = tieBreakerMultiplier; add(disjuncts); } /** Add a subquery to this disjunction * @param query the disjunct added */ public void add(Query query) { disjuncts.add(query); } /** Add a collection of disjuncts to this disjunction * via {@code Iterable<Query>} * @param disjuncts a collection of queries to add as disjuncts. */ public void add(Collection<Query> disjuncts) { this.disjuncts.addAll(disjuncts); } /** @return An {@code Iterator<Query>} over the disjuncts */ @Override public Iterator<Query> iterator() { return disjuncts.iterator(); } /** * @return the disjuncts. */ public ArrayList<Query> getDisjuncts() { return disjuncts; } /** * @return tie breaker value for multiple matches. */ public float getTieBreakerMultiplier() { return tieBreakerMultiplier; } /** * Expert: the Weight for DisjunctionMaxQuery, used to * normalize, score and explain these queries. * * <p>NOTE: this API and implementation is subject to * change suddenly in the next release.</p> */ protected class DisjunctionMaxWeight extends Weight { /** The Weights for our subqueries, in 1-1 correspondence with disjuncts */ protected ArrayList<Weight> weights = new ArrayList<Weight>(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts /** Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */ public DisjunctionMaxWeight(IndexSearcher searcher) throws IOException { for (Query disjunctQuery : disjuncts) { weights.add(disjunctQuery.createWeight(searcher)); } } /** Return our associated DisjunctionMaxQuery */ @Override public Query getQuery() { return DisjunctionMaxQuery.this; } /** Compute the sub of squared weights of us applied to our subqueries. Used for normalization. */ @Override public float getValueForNormalization() throws IOException { float max = 0.0f, sum = 0.0f; for (Weight currentWeight : weights) { float sub = currentWeight.getValueForNormalization(); sum += sub; max = Math.max(max, sub); } float boost = getBoost(); return (((sum - max) * tieBreakerMultiplier * tieBreakerMultiplier) + max) * boost * boost; } /** Apply the computed normalization factor to our subqueries */ @Override public void normalize(float norm, float topLevelBoost) { topLevelBoost *= getBoost(); // Incorporate our boost for (Weight wt : weights) { wt.normalize(norm, topLevelBoost); } } /** Create the scorer used to score our associated DisjunctionMaxQuery */ @Override public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException { Scorer[] scorers = new Scorer[weights.size()]; int idx = 0; for (Weight w : weights) { // we will advance() subscorers Scorer subScorer = w.scorer(context, true, false, acceptDocs); if (subScorer != null) { scorers[idx++] = subScorer; } } if (idx == 0) return null; // all scorers did not have documents DisjunctionMaxScorer result = new DisjunctionMaxScorer(this, tieBreakerMultiplier, scorers, idx); return result; } /** Explain the score we computed for doc */ @Override public Explanation explain(AtomicReaderContext context, int doc) throws IOException { if (disjuncts.size() == 1) return weights.get(0).explain(context,doc); ComplexExplanation result = new ComplexExplanation(); float max = 0.0f, sum = 0.0f; result.setDescription(tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:"); for (Weight wt : weights) { Explanation e = wt.explain(context, doc); if (e.isMatch()) { result.setMatch(Boolean.TRUE); result.addDetail(e); sum += e.getValue(); max = Math.max(max, e.getValue()); } } result.setValue(max + (sum - max) * tieBreakerMultiplier); return result; } } // end of DisjunctionMaxWeight inner class /** Create the Weight used to score us */ @Override public Weight createWeight(IndexSearcher searcher) throws IOException { return new DisjunctionMaxWeight(searcher); } /** Optimize our representation and our subqueries representations * @param reader the IndexReader we query * @return an optimized copy of us (which may not be a copy if there is nothing to optimize) */ @Override public Query rewrite(IndexReader reader) throws IOException { int numDisjunctions = disjuncts.size(); if (numDisjunctions == 1) { Query singleton = disjuncts.get(0); Query result = singleton.rewrite(reader); if (getBoost() != 1.0f) { if (result == singleton) result = result.clone(); result.setBoost(getBoost() * result.getBoost()); } return result; } DisjunctionMaxQuery clone = null; for (int i = 0 ; i < numDisjunctions; i++) { Query clause = disjuncts.get(i); Query rewrite = clause.rewrite(reader); if (rewrite != clause) { if (clone == null) clone = this.clone(); clone.disjuncts.set(i, rewrite); } } if (clone != null) return clone; else return this; } /** Create a shallow copy of us -- used in rewriting if necessary * @return a copy of us (but reuse, don't copy, our subqueries) */ @Override @SuppressWarnings("unchecked") public DisjunctionMaxQuery clone() { DisjunctionMaxQuery clone = (DisjunctionMaxQuery)super.clone(); clone.disjuncts = (ArrayList<Query>) this.disjuncts.clone(); return clone; } // inherit javadoc @Override public void extractTerms(Set<Term> terms) { for (Query query : disjuncts) { query.extractTerms(terms); } } /** Prettyprint us. * @param field the field to which we are applied * @return a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost" */ @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("("); int numDisjunctions = disjuncts.size(); for (int i = 0 ; i < numDisjunctions; i++) { Query subquery = disjuncts.get(i); if (subquery instanceof BooleanQuery) { // wrap sub-bools in parens buffer.append("("); buffer.append(subquery.toString(field)); buffer.append(")"); } else buffer.append(subquery.toString(field)); if (i != numDisjunctions-1) buffer.append(" | "); } buffer.append(")"); if (tieBreakerMultiplier != 0.0f) { buffer.append("~"); buffer.append(tieBreakerMultiplier); } if (getBoost() != 1.0) { buffer.append("^"); buffer.append(getBoost()); } return buffer.toString(); } /** Return true iff we represent the same query as o * @param o another object * @return true iff o is a DisjunctionMaxQuery with the same boost and the same subqueries, in the same order, as us */ @Override public boolean equals(Object o) { if (! (o instanceof DisjunctionMaxQuery) ) return false; DisjunctionMaxQuery other = (DisjunctionMaxQuery)o; return this.getBoost() == other.getBoost() && this.tieBreakerMultiplier == other.tieBreakerMultiplier && this.disjuncts.equals(other.disjuncts); } /** Compute a hash code for hashing us * @return the hash code */ @Override public int hashCode() { return Float.floatToIntBits(getBoost()) + Float.floatToIntBits(tieBreakerMultiplier) + disjuncts.hashCode(); } }
/* * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.dao.mysql; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.netflix.conductor.common.metadata.events.EventExecution; import com.netflix.conductor.common.metadata.tasks.PollData; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ApplicationException; import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.metrics.Monitors; import javax.inject.Inject; import javax.inject.Singleton; import javax.sql.DataSource; import java.sql.Connection; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Comparator; import java.util.Date; import java.util.LinkedList; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; @Singleton public class MySQLExecutionDAO extends MySQLBaseDAO implements ExecutionDAO { private static final String ARCHIVED_FIELD = "archived"; private static final String RAW_JSON_FIELD = "rawJSON"; @Inject public MySQLExecutionDAO(ObjectMapper objectMapper, DataSource dataSource) { super(objectMapper, dataSource); } private static String dateStr(Long timeInMs) { Date date = new Date(timeInMs); return dateStr(date); } private static String dateStr(Date date) { SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd"); return format.format(date); } @Override public List<Task> getPendingTasksByWorkflow(String taskDefName, String workflowId) { // @formatter:off String GET_IN_PROGRESS_TASKS_FOR_WORKFLOW = "SELECT json_data FROM task_in_progress tip " + "INNER JOIN task t ON t.task_id = tip.task_id " + "WHERE task_def_name = ? AND workflow_id = ?"; // @formatter:on return queryWithTransaction(GET_IN_PROGRESS_TASKS_FOR_WORKFLOW, q -> q.addParameter(taskDefName).addParameter(workflowId).executeAndFetch(Task.class)); } @Override public List<Task> getTasks(String taskDefName, String startKey, int count) { List<Task> tasks = new ArrayList<>(count); List<Task> pendingTasks = getPendingTasksForTaskType(taskDefName); boolean startKeyFound = startKey == null; int found = 0; for (Task pendingTask : pendingTasks) { if (!startKeyFound) { if (pendingTask.getTaskId().equals(startKey)) { startKeyFound = true; // noinspection ConstantConditions if (startKey != null) { continue; } } } if (startKeyFound && found < count) { tasks.add(pendingTask); found++; } } return tasks; } private static String taskKey(Task task) { return task.getReferenceTaskName() + "_" + task.getRetryCount(); } @Override public List<Task> createTasks(List<Task> tasks) { List<Task> created = Lists.newArrayListWithCapacity(tasks.size()); withTransaction(connection -> { for (Task task : tasks) { validate(task); task.setScheduledTime(System.currentTimeMillis()); final String taskKey = taskKey(task); boolean scheduledTaskAdded = addScheduledTask(connection, task, taskKey); if (!scheduledTaskAdded) { logger.trace("Task already scheduled, skipping the run " + task.getTaskId() + ", ref=" + task.getReferenceTaskName() + ", key=" + taskKey); continue; } insertOrUpdateTaskData(connection, task); addWorkflowToTaskMapping(connection, task); addTaskInProgress(connection, task); updateTask(connection, task); created.add(task); } }); return created; } @Override public void updateTask(Task task) { withTransaction(connection -> updateTask(connection, task)); } /** * This is a dummy implementation and this feature is not for Mysql backed * Conductor * * @param task: which needs to be evaluated whether it is rateLimited or not * @return */ @Override public boolean exceedsRateLimitPerFrequency(Task task) { return false; } @Override public boolean exceedsInProgressLimit(Task task) { Optional<TaskDef> taskDefinition = task.getTaskDefinition(); if (!taskDefinition.isPresent()) { return false; } TaskDef taskDef = taskDefinition.get(); int limit = taskDef.concurrencyLimit(); if (limit <= 0) { return false; } long current = getInProgressTaskCount(task.getTaskDefName()); if (current >= limit) { Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); return true; } logger.info("Task execution count for {}: limit={}, current={}", task.getTaskDefName(), limit, getInProgressTaskCount(task.getTaskDefName())); String taskId = task.getTaskId(); List<String> tasksInProgressInOrderOfArrival = findAllTasksInProgressInOrderOfArrival(task, limit); boolean rateLimited = !tasksInProgressInOrderOfArrival.contains(taskId); if (rateLimited) { logger.info("Task execution count limited. {}, limit {}, current {}", task.getTaskDefName(), limit, getInProgressTaskCount(task.getTaskDefName())); Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); } return rateLimited; } @Override public void updateTasks(List<Task> tasks) { withTransaction(connection -> tasks.forEach(task -> updateTask(connection, task))); } @Override public void removeTask(String taskId) { Task task = getTask(taskId); if (task == null) { logger.warn("No such Task by id {}", taskId); return; } final String taskKey = taskKey(task); withTransaction(connection -> { removeScheduledTask(connection, task, taskKey); removeWorkflowToTaskMapping(connection, task); removeTaskInProgress(connection, task); removeTaskData(connection, task); }); } @Override public Task getTask(String taskId) { String GET_TASK = "SELECT json_data FROM task WHERE task_id = ?"; return queryWithTransaction(GET_TASK, q -> q.addParameter(taskId).executeAndFetchFirst(Task.class)); } @Override public List<Task> getTasks(List<String> taskIds) { if (taskIds.isEmpty()) { return Lists.newArrayList(); } return getWithTransaction(c -> getTasks(c, taskIds)); } @Override public List<Task> getPendingTasksForTaskType(String taskName) { Preconditions.checkNotNull(taskName, "task name cannot be null"); // @formatter:off String GET_IN_PROGRESS_TASKS_FOR_TYPE = "SELECT json_data FROM task_in_progress tip " + "INNER JOIN task t ON t.task_id = tip.task_id " + "WHERE task_def_name = ?"; // @formatter:on return queryWithTransaction(GET_IN_PROGRESS_TASKS_FOR_TYPE, q -> q.addParameter(taskName).executeAndFetch(Task.class)); } @Override public List<Task> getTasksForWorkflow(String workflowId) { String GET_TASKS_FOR_WORKFLOW = "SELECT task_id FROM workflow_to_task WHERE workflow_id = ?"; return getWithTransaction(tx -> query(tx, GET_TASKS_FOR_WORKFLOW, q -> { List<String> taskIds = q.addParameter(workflowId).executeScalarList(String.class); return getTasks(tx, taskIds); })); } @Override public String createWorkflow(Workflow workflow) { workflow.setCreateTime(System.currentTimeMillis()); return insertOrUpdateWorkflow(workflow, false); } @Override public String updateWorkflow(Workflow workflow) { workflow.setUpdateTime(System.currentTimeMillis()); return insertOrUpdateWorkflow(workflow, true); } @Override public void removeWorkflow(String workflowId) { Workflow workflow = getWorkflow(workflowId, true); withTransaction(connection -> { removeWorkflowDefToWorkflowMapping(connection, workflow); removeWorkflow(connection, workflowId); removePendingWorkflow(connection, workflow.getWorkflowName(), workflowId); }); for (Task task : workflow.getTasks()) { removeTask(task.getTaskId()); } } @Override public void removeFromPendingWorkflow(String workflowType, String workflowId) { withTransaction(connection -> removePendingWorkflow(connection, workflowType, workflowId)); } @Override public Workflow getWorkflow(String workflowId) { return getWorkflow(workflowId, true); } @Override public Workflow getWorkflow(String workflowId, boolean includeTasks) { Workflow workflow = getWithTransaction(tx -> readWorkflow(tx, workflowId)); if (workflow != null) { if (includeTasks) { List<Task> tasks = getTasksForWorkflow(workflowId); tasks.sort(Comparator.comparingLong(Task::getScheduledTime).thenComparingInt(Task::getSeq)); workflow.setTasks(tasks); } } return workflow; } @Override public List<String> getRunningWorkflowIds(String workflowName) { Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); String GET_PENDING_WORKFLOW_IDS = "SELECT workflow_id FROM workflow_pending WHERE workflow_type = ?"; return queryWithTransaction(GET_PENDING_WORKFLOW_IDS, q -> q.addParameter(workflowName).executeScalarList(String.class)); } @Override public List<Workflow> getPendingWorkflowsByType(String workflowName) { Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); return getRunningWorkflowIds(workflowName).stream().map(this::getWorkflow).collect(Collectors.toList()); } @Override public long getPendingWorkflowCount(String workflowName) { Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); String GET_PENDING_WORKFLOW_COUNT = "SELECT COUNT(*) FROM workflow_pending WHERE workflow_type = ?"; return queryWithTransaction(GET_PENDING_WORKFLOW_COUNT, q -> q.addParameter(workflowName).executeCount()); } @Override public long getInProgressTaskCount(String taskDefName) { String GET_IN_PROGRESS_TASK_COUNT = "SELECT COUNT(*) FROM task_in_progress WHERE task_def_name = ? AND in_progress_status = true"; return queryWithTransaction(GET_IN_PROGRESS_TASK_COUNT, q -> q.addParameter(taskDefName).executeCount()); } @Override public List<Workflow> getWorkflowsByType(String workflowName, Long startTime, Long endTime) { Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); Preconditions.checkNotNull(startTime, "startTime cannot be null"); Preconditions.checkNotNull(endTime, "endTime cannot be null"); List<Workflow> workflows = new LinkedList<>(); withTransaction(tx -> { // @formatter:off String GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF = "SELECT workflow_id FROM workflow_def_to_workflow " + "WHERE workflow_def = ? AND date_str BETWEEN ? AND ?"; // @formatter:on List<String> workflowIds = query(tx, GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF, q -> q.addParameter(workflowName) .addParameter(dateStr(startTime)).addParameter(dateStr(endTime)).executeScalarList(String.class)); workflowIds.forEach(workflowId -> { try { Workflow wf = getWorkflow(workflowId); if (wf.getCreateTime() >= startTime && wf.getCreateTime() <= endTime) { workflows.add(wf); } } catch (Exception e) { logger.error("Unable to load workflow id {} with name {}", workflowId, workflowName, e); } }); }); return workflows; } @Override public List<Workflow> getWorkflowsByCorrelationId(String correlationId, boolean includeTasks) { Preconditions.checkNotNull(correlationId, "correlationId cannot be null"); String GET_WORKFLOWS_BY_CORRELATION_ID = "SELECT workflow_id FROM workflow WHERE correlation_id = ?"; return queryWithTransaction(GET_WORKFLOWS_BY_CORRELATION_ID, q -> q.addParameter(correlationId).executeScalarList(String.class).stream() .map(workflowId -> getWorkflow(workflowId, includeTasks)).collect(Collectors.toList())); } @Override public boolean canSearchAcrossWorkflows() { return true; } @Override public boolean addEventExecution(EventExecution eventExecution) { try { return getWithTransaction(tx -> insertEventExecution(tx, eventExecution)); } catch (Exception e) { throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, "Unable to add event execution " + eventExecution.getId(), e); } } @Override public void removeEventExecution(EventExecution eventExecution) { try { withTransaction(tx -> removeEventExecution(tx, eventExecution)); } catch (Exception e) { throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, "Unable to remove event execution " + eventExecution.getId(), e); } } @Override public void updateEventExecution(EventExecution eventExecution) { try { withTransaction(tx -> updateEventExecution(tx, eventExecution)); } catch (Exception e) { throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, "Unable to update event execution " + eventExecution.getId(), e); } } @Override public List<EventExecution> getEventExecutions(String eventHandlerName, String eventName, String messageId, int max) { try { List<EventExecution> executions = Lists.newLinkedList(); withTransaction(tx -> { for (int i = 0; i < max; i++) { String executionId = messageId + "_" + i; // see EventProcessor.handle to understand how the // execution id is set EventExecution ee = readEventExecution(tx, eventHandlerName, eventName, messageId, executionId); if (ee == null) { break; } executions.add(ee); } }); return executions; } catch (Exception e) { String message = String.format( "Unable to get event executions for eventHandlerName=%s, eventName=%s, messageId=%s", eventHandlerName, eventName, messageId); throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, message, e); } } @Override public void updateLastPoll(String taskDefName, String domain, String workerId) { Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis()); String effectiveDomain = (domain == null) ? "DEFAULT" : domain; withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain)); } @Override public PollData getPollData(String taskDefName, String domain) { Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); String effectiveDomain = (domain == null) ? "DEFAULT" : domain; return getWithTransaction(tx -> readPollData(tx, taskDefName, effectiveDomain)); } @Override public List<PollData> getPollData(String taskDefName) { Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); return readAllPollData(taskDefName); } private List<Task> getTasks(Connection connection, List<String> taskIds) { if (taskIds.isEmpty()) { return Lists.newArrayList(); } // Generate a formatted query string with a variable number of bind params based // on taskIds.size() final String GET_TASKS_FOR_IDS = String.format( "SELECT json_data FROM task WHERE task_id IN (%s) AND json_data IS NOT NULL", Query.generateInBindings(taskIds.size())); return query(connection, GET_TASKS_FOR_IDS, q -> q.addParameters(taskIds).executeAndFetch(Task.class)); } private String insertOrUpdateWorkflow(Workflow workflow, boolean update) { Preconditions.checkNotNull(workflow, "workflow object cannot be null"); boolean terminal = workflow.getStatus().isTerminal(); if (terminal) { workflow.setEndTime(System.currentTimeMillis()); } List<Task> tasks = workflow.getTasks(); workflow.setTasks(Lists.newLinkedList()); withTransaction(tx -> { if (!update) { addWorkflow(tx, workflow); addWorkflowDefToWorkflowMapping(tx, workflow); } else { updateWorkflow(tx, workflow); } if (terminal) { removePendingWorkflow(tx, workflow.getWorkflowName(), workflow.getWorkflowId()); } else { addPendingWorkflow(tx, workflow.getWorkflowName(), workflow.getWorkflowId()); } }); workflow.setTasks(tasks); return workflow.getWorkflowId(); } private void updateTask(Connection connection, Task task) { task.setUpdateTime(System.currentTimeMillis()); if (task.getStatus() != null && task.getStatus().isTerminal() && task.getEndTime() == 0) { task.setEndTime(System.currentTimeMillis()); } Optional<TaskDef> taskDefinition = task.getTaskDefinition(); if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) { boolean inProgress = task.getStatus() != null && task.getStatus().equals(Task.Status.IN_PROGRESS); updateInProgressStatus(connection, task, inProgress); } insertOrUpdateTaskData(connection, task); if (task.getStatus() != null && task.getStatus().isTerminal()) { removeTaskInProgress(connection, task); } addWorkflowToTaskMapping(connection, task); } private Workflow readWorkflow(Connection connection, String workflowId) { String GET_WORKFLOW = "SELECT json_data FROM workflow WHERE workflow_id = ?"; return query(connection, GET_WORKFLOW, q -> q.addParameter(workflowId).executeAndFetchFirst(Workflow.class)); } private void addWorkflow(Connection connection, Workflow workflow) { String INSERT_WORKFLOW = "INSERT INTO workflow (workflow_id, correlation_id, json_data) VALUES (?, ?, ?)"; execute(connection, INSERT_WORKFLOW, q -> q.addParameter(workflow.getWorkflowId()) .addParameter(workflow.getCorrelationId()).addJsonParameter(workflow).executeUpdate()); } private void updateWorkflow(Connection connection, Workflow workflow) { String UPDATE_WORKFLOW = "UPDATE workflow SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE workflow_id = ?"; execute(connection, UPDATE_WORKFLOW, q -> q.addJsonParameter(workflow).addParameter(workflow.getWorkflowId()).executeUpdate()); } private void removeWorkflow(Connection connection, String workflowId) { String REMOVE_WORKFLOW = "DELETE FROM workflow WHERE workflow_id = ?"; execute(connection, REMOVE_WORKFLOW, q -> q.addParameter(workflowId).executeDelete()); } private void addPendingWorkflow(Connection connection, String workflowType, String workflowId) { String INSERT_PENDING_WORKFLOW = "INSERT IGNORE INTO workflow_pending (workflow_type, workflow_id) VALUES (?, ?)"; execute(connection, INSERT_PENDING_WORKFLOW, q -> q.addParameter(workflowType).addParameter(workflowId).executeUpdate()); } private void removePendingWorkflow(Connection connection, String workflowType, String workflowId) { String REMOVE_PENDING_WORKFLOW = "DELETE FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?"; execute(connection, REMOVE_PENDING_WORKFLOW, q -> q.addParameter(workflowType).addParameter(workflowId).executeDelete()); } private void insertOrUpdateTaskData(Connection connection, Task task) { String INSERT_TASK = "INSERT INTO task (task_id, json_data, modified_on) VALUES (?, ?, CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE json_data=VALUES(json_data), modified_on=VALUES(modified_on)"; execute(connection, INSERT_TASK, q -> q.addParameter(task.getTaskId()).addJsonParameter(task).executeUpdate()); } private void removeTaskData(Connection connection, Task task) { String REMOVE_TASK = "DELETE FROM task WHERE task_id = ?"; execute(connection, REMOVE_TASK, q -> q.addParameter(task.getTaskId()).executeDelete()); } private void addWorkflowToTaskMapping(Connection connection, Task task) { String INSERT_WORKFLOW_TO_TASK = "INSERT IGNORE INTO workflow_to_task (workflow_id, task_id) VALUES (?, ?)"; execute(connection, INSERT_WORKFLOW_TO_TASK, q -> q.addParameter(task.getWorkflowInstanceId()).addParameter(task.getTaskId()).executeUpdate()); } private void removeWorkflowToTaskMapping(Connection connection, Task task) { String REMOVE_WORKFLOW_TO_TASK = "DELETE FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?"; execute(connection, REMOVE_WORKFLOW_TO_TASK, q -> q.addParameter(task.getWorkflowInstanceId()).addParameter(task.getTaskId()).executeDelete()); } private void addWorkflowDefToWorkflowMapping(Connection connection, Workflow workflow) { String INSERT_WORKFLOW_DEF_TO_WORKFLOW = "INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)"; execute(connection, INSERT_WORKFLOW_DEF_TO_WORKFLOW, q -> q.addParameter(workflow.getWorkflowName()).addParameter(dateStr(workflow.getCreateTime())) .addParameter(workflow.getWorkflowId()).executeUpdate()); } private void removeWorkflowDefToWorkflowMapping(Connection connection, Workflow workflow) { String REMOVE_WORKFLOW_DEF_TO_WORKFLOW = "DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?"; execute(connection, REMOVE_WORKFLOW_DEF_TO_WORKFLOW, q -> q.addParameter(workflow.getWorkflowName()).addParameter(dateStr(workflow.getCreateTime())) .addParameter(workflow.getWorkflowId()).executeUpdate()); } @VisibleForTesting boolean addScheduledTask(Connection connection, Task task, String taskKey) { final String INSERT_IGNORE_SCHEDULED_TASK = "INSERT IGNORE INTO task_scheduled (workflow_id, task_key, task_id) VALUES (?, ?, ?)"; int count = query(connection, INSERT_IGNORE_SCHEDULED_TASK, q -> q.addParameter(task.getWorkflowInstanceId()) .addParameter(taskKey).addParameter(task.getTaskId()).executeUpdate()); return count > 0; } private void removeScheduledTask(Connection connection, Task task, String taskKey) { String REMOVE_SCHEDULED_TASK = "DELETE FROM task_scheduled WHERE workflow_id = ? AND task_key = ?"; execute(connection, REMOVE_SCHEDULED_TASK, q -> q.addParameter(task.getWorkflowInstanceId()).addParameter(taskKey).executeDelete()); } private void addTaskInProgress(Connection connection, Task task) { String EXISTS_IN_PROGRESS_TASK = "SELECT EXISTS(SELECT 1 FROM task_in_progress WHERE task_def_name = ? AND task_id = ?)"; boolean exist = query(connection, EXISTS_IN_PROGRESS_TASK, q -> q.addParameter(task.getTaskDefName()).addParameter(task.getTaskId()).exists()); if (!exist) { String INSERT_IN_PROGRESS_TASK = "INSERT INTO task_in_progress (task_def_name, task_id, workflow_id) VALUES (?, ?, ?)"; execute(connection, INSERT_IN_PROGRESS_TASK, q -> q.addParameter(task.getTaskDefName()) .addParameter(task.getTaskId()).addParameter(task.getWorkflowInstanceId()).executeUpdate()); } } private void removeTaskInProgress(Connection connection, Task task) { String REMOVE_IN_PROGRESS_TASK = "DELETE FROM task_in_progress WHERE task_def_name = ? AND task_id = ?"; execute(connection, REMOVE_IN_PROGRESS_TASK, q -> q.addParameter(task.getTaskDefName()).addParameter(task.getTaskId()).executeUpdate()); } private void updateInProgressStatus(Connection connection, Task task, boolean inProgress) { String UPDATE_IN_PROGRESS_TASK_STATUS = "UPDATE task_in_progress SET in_progress_status = ?, modified_on = CURRENT_TIMESTAMP " + "WHERE task_def_name = ? AND task_id = ?"; execute(connection, UPDATE_IN_PROGRESS_TASK_STATUS, q -> q.addParameter(inProgress) .addParameter(task.getTaskDefName()).addParameter(task.getTaskId()).executeUpdate()); } private boolean insertEventExecution(Connection connection, EventExecution eventExecution) { String INSERT_EVENT_EXECUTION = "INSERT INTO event_execution (event_handler_name, event_name, message_id, execution_id, json_data) " + "VALUES (?, ?, ?, ?, ?)"; int count = query(connection, INSERT_EVENT_EXECUTION, q -> q.addParameter(eventExecution.getName()).addParameter(eventExecution.getEvent()) .addParameter(eventExecution.getMessageId()).addParameter(eventExecution.getId()) .addJsonParameter(eventExecution).executeUpdate()); return count > 0; } private void updateEventExecution(Connection connection, EventExecution eventExecution) { // @formatter:off String UPDATE_EVENT_EXECUTION = "UPDATE event_execution SET " + "json_data = ?, " + "modified_on = CURRENT_TIMESTAMP " + "WHERE event_handler_name = ? " + "AND event_name = ? " + "AND message_id = ? " + "AND execution_id = ?"; // @formatter:on execute(connection, UPDATE_EVENT_EXECUTION, q -> q.addJsonParameter(eventExecution).addParameter(eventExecution.getName()) .addParameter(eventExecution.getEvent()).addParameter(eventExecution.getMessageId()) .addParameter(eventExecution.getId()).executeUpdate()); } private void removeEventExecution(Connection connection, EventExecution eventExecution) { String REMOVE_EVENT_EXECUTION = "DELETE FROM event_execution " + "WHERE event_handler_name = ? " + "AND event_name = ? " + "AND message_id = ? " + "AND execution_id = ?"; execute(connection, REMOVE_EVENT_EXECUTION, q -> q.addParameter(eventExecution.getName()).addParameter(eventExecution.getEvent()) .addParameter(eventExecution.getMessageId()).addParameter(eventExecution.getId()) .executeUpdate()); } private EventExecution readEventExecution(Connection connection, String eventHandlerName, String eventName, String messageId, String executionId) { // @formatter:off String GET_EVENT_EXECUTION = "SELECT json_data FROM event_execution " + "WHERE event_handler_name = ? " + "AND event_name = ? " + "AND message_id = ? " + "AND execution_id = ?"; // @formatter:on return query(connection, GET_EVENT_EXECUTION, q -> q.addParameter(eventHandlerName).addParameter(eventName) .addParameter(messageId).addParameter(executionId).executeAndFetchFirst(EventExecution.class)); } private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) { String INSERT_POLL_DATA = "INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE json_data=VALUES(json_data), modified_on=VALUES(modified_on)"; execute(connection, INSERT_POLL_DATA, q -> q.addParameter(pollData.getQueueName()).addParameter(domain) .addJsonParameter(pollData).executeUpdate()); } private PollData readPollData(Connection connection, String queueName, String domain) { String GET_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?"; return query(connection, GET_POLL_DATA, q -> q.addParameter(queueName).addParameter(domain).executeAndFetchFirst(PollData.class)); } private List<PollData> readAllPollData(String queueName) { String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?"; return queryWithTransaction(GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class)); } private List<String> findAllTasksInProgressInOrderOfArrival(Task task, int limit) { String GET_IN_PROGRESS_TASKS_WITH_LIMIT = "SELECT task_id FROM task_in_progress WHERE task_def_name = ? ORDER BY id LIMIT ?"; return queryWithTransaction(GET_IN_PROGRESS_TASKS_WITH_LIMIT, q -> q.addParameter(task.getTaskDefName()).addParameter(limit).executeScalarList(String.class)); } private void validate(Task task) { Preconditions.checkNotNull(task, "task object cannot be null"); Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null"); Preconditions.checkNotNull(task.getWorkflowInstanceId(), "Workflow instance id cannot be null"); Preconditions.checkNotNull(task.getReferenceTaskName(), "Task reference name cannot be null"); } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.sdk.io; import static com.google.common.base.Preconditions.checkNotNull; import com.google.auto.value.AutoValue; import com.google.common.annotations.VisibleForTesting; import javax.annotation.Nullable; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import org.apache.beam.sdk.runners.PipelineRunner; import org.apache.beam.sdk.transforms.PTransform; import org.apache.beam.sdk.transforms.display.DisplayData; import org.apache.beam.sdk.values.PBegin; import org.apache.beam.sdk.values.PCollection; import org.apache.beam.sdk.values.PDone; /** Transforms for reading and writing XML files using JAXB mappers. */ public class XmlIO { // CHECKSTYLE.OFF: JavadocStyle /** * Reads XML files. This source reads one or more XML files and creates a {@link PCollection} of a * given type. Please note the example given below. * * <p>The XML file must be of the following form, where {@code root} and {@code record} are XML * element names that are defined by the user: * * <pre>{@code * <root> * <record> ... </record> * <record> ... </record> * <record> ... </record> * ... * <record> ... </record> * </root> * }</pre> * * <p>Basically, the XML document should contain a single root element with an inner list * consisting entirely of record elements. The records may contain arbitrary XML content; however, * that content <b>must not</b> contain the start {@code <record>} or end {@code </record>} tags. * This restriction enables reading from large XML files in parallel from different offsets in the * file. * * <p>Root and/or record elements may additionally contain an arbitrary number of XML attributes. * Additionally users must provide a class of a JAXB annotated Java type that can be used convert * records into Java objects and vice versa using JAXB marshalling/unmarshalling mechanisms. * Reading the source will generate a {@code PCollection} of the given JAXB annotated Java type. * Optionally users may provide a minimum size of a bundle that should be created for the source. * * <p>The following example shows how to use this method in a Beam pipeline: * * <pre>{@code * PCollection<String> output = p.apply(XmlIO.<Record>read() * .from(file.toPath().toString()) * .withRootElement("root") * .withRecordElement("record") * .withRecordClass(Record.class)); * }</pre> * * <p>Currently, only XML files that use single-byte characters are supported. Using a file that * contains multi-byte characters may result in data loss or duplication. * * <p>To use this method: * * <ol> * <li>Explicitly declare a dependency on org.codehaus.woodstox:stax2-api * <li>Include a compatible implementation on the classpath at run-time, such as * org.codehaus.woodstox:woodstox-core-asl * </ol> * * <p>These dependencies have been declared as optional in the sdks/java/core/pom.xml file of * Apache Beam. * * <h3>Permissions</h3> * * <p>Permission requirements depend on the {@link org.apache.beam.sdk.runners.PipelineRunner * PipelineRunner} that is used to execute the Beam pipeline. Please refer to the documentation of * corresponding {@link PipelineRunner PipelineRunners} for more details. * * @param <T> Type of the objects that represent the records of the XML file. The {@code * PCollection} generated by this source will be of this type. */ // CHECKSTYLE.ON: JavadocStyle public static <T> Read<T> read() { return new AutoValue_XmlIO_Read.Builder<T>() .setMinBundleSize(Read.DEFAULT_MIN_BUNDLE_SIZE) .setCompressionType(Read.CompressionType.AUTO) .build(); } // CHECKSTYLE.OFF: JavadocStyle /** * A {@link FileBasedSink} that outputs records as XML-formatted elements. Writes a {@link * PCollection} of records from JAXB-annotated classes to a single file location. * * <p>Given a PCollection containing records of type T that can be marshalled to XML elements, * this Sink will produce a single file consisting of a single root element that contains all of * the elements in the PCollection. * * <p>XML Sinks are created with a base filename to write to, a root element name that will be * used for the root element of the output files, and a class to bind to an XML element. This * class will be used in the marshalling of records in an input PCollection to their XML * representation and must be able to be bound using JAXB annotations (checked at pipeline * construction time). * * <p>XML Sinks can be written to using the {@link Write} transform: * * <pre>{@code * p.apply(XmlIO.<Type>write() * .withRecordClass(Type.class) * .withRootElement(root_element) * .toFilenamePrefix(output_filename)); * }</pre> * * <p>For example, consider the following class with JAXB annotations: * * <pre> * {@literal @}XmlRootElement(name = "word_count_result") * {@literal @}XmlType(propOrder = {"word", "frequency"}) * public class WordFrequency { * private String word; * private long frequency; * * public WordFrequency() { } * * public WordFrequency(String word, long frequency) { * this.word = word; * this.frequency = frequency; * } * * public void setWord(String word) { * this.word = word; * } * * public void setFrequency(long frequency) { * this.frequency = frequency; * } * * public long getFrequency() { * return frequency; * } * * public String getWord() { * return word; * } * } * </pre> * * <p>The following will produce XML output with a root element named "words" from a PCollection * of WordFrequency objects: * * <pre>{@code * p.apply(XmlIO.<WordFrequency>write() * .withRecordClass(WordFrequency.class) * .withRootElement("words") * .toFilenamePrefix(output_file)); * }</pre> * * <p>The output of which will look like: * * <pre>{@code * <words> * * <word_count_result> * <word>decreased</word> * <frequency>1</frequency> * </word_count_result> * * <word_count_result> * <word>War</word> * <frequency>4</frequency> * </word_count_result> * * <word_count_result> * <word>empress'</word> * <frequency>14</frequency> * </word_count_result> * * <word_count_result> * <word>stoops</word> * <frequency>6</frequency> * </word_count_result> * * ... * </words> * }</pre> */ // CHECKSTYLE.ON: JavadocStyle public static <T> Write<T> write() { return new AutoValue_XmlIO_Write.Builder<T>().build(); } /** Implementation of {@link #read}. */ @AutoValue public abstract static class Read<T> extends PTransform<PBegin, PCollection<T>> { private static final int DEFAULT_MIN_BUNDLE_SIZE = 8 * 1024; @Nullable abstract String getFileOrPatternSpec(); @Nullable abstract String getRootElement(); @Nullable abstract String getRecordElement(); @Nullable abstract Class<T> getRecordClass(); abstract CompressionType getCompressionType(); abstract long getMinBundleSize(); abstract Builder<T> toBuilder(); @AutoValue.Builder abstract static class Builder<T> { abstract Builder<T> setFileOrPatternSpec(String fileOrPatternSpec); abstract Builder<T> setRootElement(String rootElement); abstract Builder<T> setRecordElement(String recordElement); abstract Builder<T> setRecordClass(Class<T> recordClass); abstract Builder<T> setMinBundleSize(long minBundleSize); abstract Builder<T> setCompressionType(CompressionType compressionType); abstract Read<T> build(); } /** Strategy for determining the compression type of XML files being read. */ public enum CompressionType { /** Automatically determine the compression type based on filename extension. */ AUTO(""), /** Uncompressed (i.e., may be split). */ UNCOMPRESSED(""), /** GZipped. */ GZIP(".gz"), /** BZipped. */ BZIP2(".bz2"), /** Zipped. */ ZIP(".zip"), /** Deflate compressed. */ DEFLATE(".deflate"); private String filenameSuffix; CompressionType(String suffix) { this.filenameSuffix = suffix; } /** * Determine if a given filename matches a compression type based on its extension. * * @param filename the filename to match * @return true iff the filename ends with the compression type's known extension. */ public boolean matches(String filename) { return filename.toLowerCase().endsWith(filenameSuffix.toLowerCase()); } } /** * Reads a single XML file or a set of XML files defined by a Java "glob" file pattern. Each XML * file should be of the form defined in {@link #read}. */ public Read<T> from(String fileOrPatternSpec) { return toBuilder().setFileOrPatternSpec(fileOrPatternSpec).build(); } /** * Sets name of the root element of the XML document. This will be used to create a valid * starting root element when initiating a bundle of records created from an XML document. This * is a required parameter. */ public Read<T> withRootElement(String rootElement) { return toBuilder().setRootElement(rootElement).build(); } /** * Sets name of the record element of the XML document. This will be used to determine offset of * the first record of a bundle created from the XML document. This is a required parameter. */ public Read<T> withRecordElement(String recordElement) { return toBuilder().setRecordElement(recordElement).build(); } /** * Sets a JAXB annotated class that can be populated using a record of the provided XML file. * This will be used when unmarshalling record objects from the XML file. This is a required * parameter. */ public Read<T> withRecordClass(Class<T> recordClass) { return toBuilder().setRecordClass(recordClass).build(); } /** * Sets a parameter {@code minBundleSize} for the minimum bundle size of the source. Please * refer to {@link OffsetBasedSource} for the definition of minBundleSize. This is an optional * parameter. */ public Read<T> withMinBundleSize(long minBundleSize) { return toBuilder().setMinBundleSize(minBundleSize).build(); } /** * Decompresses all input files using the specified compression type. * * <p>If no compression type is specified, the default is {@link CompressionType#AUTO}. In this * mode, the compression type of the file is determined by its extension. Supports .gz, .bz2, * .zip and .deflate compression. */ public Read<T> withCompressionType(CompressionType compressionType) { return toBuilder().setCompressionType(compressionType).build(); } @Override public void validate(PBegin input) { checkNotNull( getRootElement(), "rootElement is null. Use builder method withRootElement() to set this."); checkNotNull( getRecordElement(), "recordElement is null. Use builder method withRecordElement() to set this."); checkNotNull( getRecordClass(), "recordClass is null. Use builder method withRecordClass() to set this."); } @Override public void populateDisplayData(DisplayData.Builder builder) { builder .addIfNotDefault( DisplayData.item("minBundleSize", getMinBundleSize()) .withLabel("Minimum Bundle Size"), 1L) .add(DisplayData.item("filePattern", getFileOrPatternSpec()).withLabel("File Pattern")) .addIfNotNull( DisplayData.item("rootElement", getRootElement()).withLabel("XML Root Element")) .addIfNotNull( DisplayData.item("recordElement", getRecordElement()).withLabel("XML Record Element")) .addIfNotNull( DisplayData.item("recordClass", getRecordClass()).withLabel("XML Record Class")); } @VisibleForTesting BoundedSource<T> createSource() { XmlSource<T> source = new XmlSource<>(this); switch (getCompressionType()) { case UNCOMPRESSED: return source; case AUTO: return CompressedSource.from(source); case BZIP2: return CompressedSource.from(source) .withDecompression(CompressedSource.CompressionMode.BZIP2); case GZIP: return CompressedSource.from(source) .withDecompression(CompressedSource.CompressionMode.GZIP); case ZIP: return CompressedSource.from(source) .withDecompression(CompressedSource.CompressionMode.ZIP); case DEFLATE: return CompressedSource.from(source) .withDecompression(CompressedSource.CompressionMode.DEFLATE); default: throw new IllegalArgumentException("Unknown compression type: " + getCompressionType()); } } @Override public PCollection<T> expand(PBegin input) { return input.apply(org.apache.beam.sdk.io.Read.from(createSource())); } } /** Implementation of {@link #write}. */ @AutoValue public abstract static class Write<T> extends PTransform<PCollection<T>, PDone> { @Nullable abstract String getFilenamePrefix(); @Nullable abstract Class<T> getRecordClass(); @Nullable abstract String getRootElement(); abstract Builder<T> toBuilder(); @AutoValue.Builder abstract static class Builder<T> { abstract Builder<T> setFilenamePrefix(String baseOutputFilename); abstract Builder<T> setRecordClass(Class<T> recordClass); abstract Builder<T> setRootElement(String rootElement); abstract Write<T> build(); } /** * Writes to files with the given path prefix. * * <p>Output files will have the name {@literal {filenamePrefix}-0000i-of-0000n.xml} where n is * the number of output bundles. */ public Write<T> toFilenamePrefix(String filenamePrefix) { return toBuilder().setFilenamePrefix(filenamePrefix).build(); } /** * Writes objects of the given class mapped to XML elements using JAXB. * * <p>The specified class must be able to be used to create a JAXB context. */ public Write<T> withRecordClass(Class<T> recordClass) { return toBuilder().setRecordClass(recordClass).build(); } /** Sets the enclosing root element for the generated XML files. */ public Write<T> withRootElement(String rootElement) { return toBuilder().setRootElement(rootElement).build(); } @Override public void validate(PCollection<T> input) { checkNotNull(getRecordClass(), "Missing a class to bind to a JAXB context."); checkNotNull(getRootElement(), "Missing a root element name."); checkNotNull(getFilenamePrefix(), "Missing a filename to write to."); try { JAXBContext.newInstance(getRecordClass()); } catch (JAXBException e) { throw new RuntimeException("Error binding classes to a JAXB Context.", e); } } @Override public PDone expand(PCollection<T> input) { return input.apply(org.apache.beam.sdk.io.WriteFiles.to(createSink())); } @VisibleForTesting XmlSink<T> createSink() { return new XmlSink<>(this); } @Override public void populateDisplayData(DisplayData.Builder builder) { createSink().populateFileBasedDisplayData(builder); builder .addIfNotNull( DisplayData.item("rootElement", getRootElement()).withLabel("XML Root Element")) .addIfNotNull( DisplayData.item("recordClass", getRecordClass()).withLabel("XML Record Class")); } } }
package org.kannel.protocol.kbinds; import java.io.IOException; import java.io.OutputStream; import java.net.InetAddress; import java.net.Socket; import java.net.UnknownHostException; import org.kannel.protocol.packets.AdminKMessage; import org.kannel.protocol.packets.SMSPacketMessage; import org.kannel.protocol.tools.DataTypesTools; /** * Socket connected to a Kannel bearerbox (Typically) * *@author Oscar Medina Duarte *@created April 4, 2005 */ public class KSocket extends Socket { private String boxc_id = null; private AdminKMessage admMessage = null; /** * Wrapper constructor for the <i>Socket</i> super class */ public KSocket() { super(); } /** * Wrapper constructor for the <i>Socket</i> super class * *@param address Description of the Parameter *@param port Description of the Parameter *@exception IOException Exception thrown when */ public KSocket(InetAddress address, int port) throws IOException { super(address, port); } /** * Wrapper constructor for the <i>Socket</i> super class * *@param address Description of the Parameter *@param port Description of the Parameter *@param localAddr Description of the Parameter *@param localPort Description of the Parameter *@exception IOException Exception thrown when */ public KSocket(InetAddress address, int port, InetAddress localAddr, int localPort) throws IOException { super(address, port, localAddr, localPort); } /** * Wrapper constructor for the <i>Socket</i> super class * *@param host Description of the Parameter *@param port Description of the Parameter *@exception UnknownHostException Exception thrown when *@exception IOException Exception thrown when */ public KSocket(String host, int port) throws UnknownHostException, IOException { super(host, port); } /** * Wrapper constructor for the <i>Socket</i> super class * *@param host Description of the Parameter *@param port Description of the Parameter *@param localAddr Description of the Parameter *@param localPort Description of the Parameter *@exception IOException Exception thrown when */ public KSocket(String host, int port, InetAddress localAddr, int localPort) throws IOException { super(host, port, localAddr, localPort); } /** * Constructor for the KSocket object * <p>This constructor sets the boxc_id attribute to be used by <i>void kconnect()</i></p> * *@param boxc_id Description of the Parameter */ public KSocket(String boxc_id) { super(); this.boxc_id = boxc_id; } /** * Constructor for the KSocket object * <p>This constructor sets the boxc_id attribute to be used by <i>void kconnect()</i> * and sends AdminKMessage to Kannel box to initiate a session.</p> * *@param address Description of the Parameter *@param port Description of the Parameter *@param boxc_id Description of the Parameter *@exception IOException Exception thrown when */ public KSocket(InetAddress address, int port, String boxc_id) throws IOException { super(address, port); this.boxc_id = boxc_id; this.kconnect(this.boxc_id); } /** * Constructor for the KSocket object * <p>This constructor sets the boxc_id attribute to be used by <i>void kconnect()</i> * and sends AdminKMessage to Kannel box to initiate a session.</p> * *@param address Description of the Parameter *@param port Description of the Parameter *@param localAddr Description of the Parameter *@param localPort Description of the Parameter *@param boxc_id Description of the Parameter *@exception IOException Exception thrown when */ public KSocket(InetAddress address, int port, InetAddress localAddr, int localPort, String boxc_id) throws IOException { super(address, port, localAddr, localPort); this.boxc_id = boxc_id; this.kconnect(this.boxc_id); } /** * Constructor for the KSocket object * <p>This constructor sets the boxc_id attribute to be used by <i>void kconnect()</i> * and sends AdminKMessage to Kannel box to initiate a session.</p> * *@param host Description of the Parameter *@param port Description of the Parameter *@param boxc_id Description of the Parameter *@exception UnknownHostException Exception thrown when *@exception IOException Exception thrown when */ public KSocket(String host, int port, String boxc_id) throws UnknownHostException, IOException { super(host, port); this.boxc_id = boxc_id; this.kconnect(this.boxc_id); } /** * Constructor for the KSocket object * <p>This constructor sets the boxc_id attribute to be used by <i>void kconnect()</i> * and sends AdminKMessage to Kannel box to initiate a session.</p> * *@param host Description of the Parameter *@param port Description of the Parameter *@param localAddr Description of the Parameter *@param localPort Description of the Parameter *@param boxc_id Description of the Parameter *@exception IOException Exception thrown when */ public KSocket(String host, int port, InetAddress localAddr, int localPort, String boxc_id) throws IOException { super(host, port, localAddr, localPort); this.boxc_id = boxc_id; this.kconnect(this.boxc_id); } /** * Sets the boxc_id attribute of the KSocket object * *@param boxc_id The new boxc_id value */ public void setBoxc_id(String boxc_id) { this.boxc_id = boxc_id; } /** * Gets the boxc_id attribute of the KSocket object * *@return The boxc_id value */ public String getBoxc_id() { return boxc_id; } /** * Initiates a session with a bearer box. * *@param boxc_id Description of the Parameter *@exception IOException Exception thrown when */ public void kconnect(String boxc_id) throws IOException { if (this.admMessage == null){ this.admMessage = new AdminKMessage(AdminKMessage.CMD_IDENTIFY, boxc_id); } OutputStream os = super.getOutputStream(); byte[] bAdmMsg = admMessage.getMessage(); System.out.println("out:\n" + DataTypesTools.hexDump(bAdmMsg)); os.write(bAdmMsg); os.flush(); } /** * Initiates a session with a bearer box. * *@exception IOException Exception thrown when */ public void kconnect() throws IOException { this.kconnect(this.boxc_id); } /** * Testing class * *@param args Description of the Parameter *@exception Exception Exception thrown when */ public static void main(String args[]) throws Exception { KSocket ksckt = new KSocket("localhost", 6667, ""); OutputStream os = ksckt.getOutputStream(); SMSPacketMessage smsMsg = new SMSPacketMessage("de_mi", "pa_ti", "CosasRaras", "Y un mensaje de texto !"); os.write(smsMsg.getMessage()); os.flush(); ksckt.close(); } }
package controlP5; /** * controlP5 is a processing gui library. * * 2006-2015 by Andreas Schlegel * * This library is free software; you can redistribute it * and/or modify it under the terms of the GNU Lesser * General Public License as published by the Free Software * Foundation; either version 2.1 of the License, or (at * your option) any later version. This library is * distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General * Public License along with this library; if not, write to * the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * * @author Andreas Schlegel (http://www.sojamo.de) * @modified 04/14/2016 * @version 2.2.6 * */ import processing.core.PGraphics; /** * <p> * A button triggers an event after it has been release. * Events can be linked to functions and fields inside your * program/sketch. for a full documentation of this * controller see the {@link Controller} class. * * @example controllers/ControlP5button */ public class Button extends Controller< Button > { protected boolean isPressed; protected boolean isOn = false; public static int autoWidth = 69; public static int autoHeight = 19; protected int activateBy = RELEASE; protected boolean isSwitch = false; /** * Convenience constructor to extend Button. * * @example use/ControlP5extendController */ public Button( ControlP5 theControlP5 , String theName ) { this( theControlP5 , theControlP5.getDefaultTab( ) , theName , 0 , 0 , 0 , autoWidth , autoHeight ); theControlP5.register( theControlP5.papplet , theName , this ); } protected Button( ControlP5 theControlP5 , ControllerGroup< ? > theParent , String theName , float theDefaultValue , int theX , int theY , int theWidth , int theHeight ) { super( theControlP5 , theParent , theName , theX , theY , theWidth , theHeight ); _myValue = theDefaultValue; _myCaptionLabel.align( CENTER , CENTER ); } /** * @exclude */ public Button( ) { super( null , null , null , 0 , 0 , 1 , 1 ); } @Override protected void onEnter( ) { isActive = true; } @Override protected void onLeave( ) { isActive = false; setIsInside( false ); } /** * @exclude */ @Override @ControlP5.Invisible public void mousePressed( ) { isActive = getIsInside( ); isPressed = true; if ( activateBy == PRESSED ) { activate( ); } } /** * @exclude */ @Override @ControlP5.Invisible public void mouseReleased( ) { isPressed = false; if ( activateBy == RELEASE ) { activate( ); } isActive = false; } /** * A button can be activated by a mouse PRESSED or mouse * RELEASE. Default value is RELEASE. * * @param theValue use ControlP5.PRESSED or * ControlP5.RELEASE as parameter * @return Button */ public Button activateBy( int theValue ) { if ( theValue == PRESS ) { activateBy = PRESS; } else { activateBy = RELEASE; } return this; } protected void activate( ) { if ( isActive ) { isActive = false; isOn = !isOn; setValue( _myValue ); } } /** * @exclude */ @Override @ControlP5.Invisible public void mouseReleasedOutside( ) { mouseReleased( ); } /** * {@inheritDoc} */ @Override public Button setValue( float theValue ) { _myValue = theValue; broadcast( FLOAT ); return this; } /** * {@inheritDoc} */ @Override public Button update( ) { return setValue( _myValue ); } /** * Turns a button into a switch, or use a Toggle * instead. * */ public Button setSwitch( boolean theFlag ) { isSwitch = theFlag; if ( isSwitch ) { _myBroadcastType = BOOLEAN; } else { _myBroadcastType = FLOAT; } return this; } /** * If the button acts as a switch, setOn will turn on * the switch. Use * {@link controlP5.Button#setSwitch(boolean) setSwitch} * to turn a Button into a Switch. * * @return Button */ public Button setOn( ) { if ( isSwitch ) { isOn = false; isActive = true; activate( ); } return this; } /** * If the button acts as a switch, setOff will turn off * the switch. Use * {@link controlP5.Button#setSwitch(boolean) setSwitch} * to turn a Button into a Switch. * * @return Button */ public Button setOff( ) { if ( isSwitch ) { isOn = true; isActive = true; activate( ); } return this; } /** * @return boolean */ public boolean isOn( ) { return isOn; } public boolean isSwitch( ) { return isSwitch; } /** * @return boolean */ public boolean isPressed( ) { return isPressed; } /** * Returns true or false and indicates the switch state * of the button. {@link setSwitch(boolean) setSwitch} * should have been set before. * * @see controlP5.Button#setSwitch(boolean) * @return boolean */ public boolean getBooleanValue( ) { return isOn; } /** * @exclude */ @Override @ControlP5.Invisible public Button updateDisplayMode( int theMode ) { return updateViewMode( theMode ); } /** * @exclude */ @ControlP5.Invisible public Button updateViewMode( int theMode ) { _myDisplayMode = theMode; switch ( theMode ) { case ( DEFAULT ): _myControllerView = new ButtonView( ); break; case ( IMAGE ): _myControllerView = new ButtonImageView( ); break; case ( CUSTOM ): default: break; } return this; } private class ButtonView implements ControllerView< Button > { public void display( PGraphics theGraphics , Button theController ) { theGraphics.noStroke( ); if ( isOn && isSwitch ) { theGraphics.fill( color.getActive( ) ); } else { if ( getIsInside( ) ) { if ( isPressed ) { theGraphics.fill( color.getActive( ) ); } else { theGraphics.fill( color.getForeground( ) ); } } else { theGraphics.fill( color.getBackground( ) ); } } theGraphics.rect( 0 , 0 , getWidth( ) , getHeight( ) ); if ( isLabelVisible ) { _myCaptionLabel.draw( theGraphics , 0 , 0 , theController ); } } } private class ButtonImageView implements ControllerView< Button > { public void display( PGraphics theGraphics , Button theController ) { if ( isOn && isSwitch ) { theGraphics.image( ( availableImages[ HIGHLIGHT ] == true ) ? images[ HIGHLIGHT ] : images[ DEFAULT ] , 0 , 0 ); return; } if ( getIsInside( ) ) { if ( isPressed ) { theGraphics.image( ( availableImages[ ACTIVE ] == true ) ? images[ ACTIVE ] : images[ DEFAULT ] , 0 , 0 ); } else { theGraphics.image( ( availableImages[ OVER ] == true ) ? images[ OVER ] : images[ DEFAULT ] , 0 , 0 ); } } else { theGraphics.image( images[ DEFAULT ] , 0 , 0 ); } } } /** * @exclude */ @Override public String getInfo( ) { return "type:\tButton\n" + super.getInfo( ); } /** * @exclude */ @Override public String toString( ) { return super.toString( ) + " [ " + getValue( ) + " ] " + "Button" + " (" + this.getClass( ).getSuperclass( ) + ")"; } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.phoenix.compile; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.phoenix.coprocessor.MetaDataProtocol; import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.parse.AliasedNode; import org.apache.phoenix.parse.BindTableNode; import org.apache.phoenix.parse.ColumnDef; import org.apache.phoenix.parse.CreateTableStatement; import org.apache.phoenix.parse.DMLStatement; import org.apache.phoenix.parse.DerivedTableNode; import org.apache.phoenix.parse.FamilyWildcardParseNode; import org.apache.phoenix.parse.JoinTableNode; import org.apache.phoenix.parse.NamedTableNode; import org.apache.phoenix.parse.ParseNode; import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.parse.SingleTableStatement; import org.apache.phoenix.parse.TableName; import org.apache.phoenix.parse.TableNode; import org.apache.phoenix.parse.TableNodeVisitor; import org.apache.phoenix.parse.TableWildcardParseNode; import org.apache.phoenix.parse.WildcardParseNode; import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.AmbiguousColumnException; import org.apache.phoenix.schema.AmbiguousTableException; import org.apache.phoenix.schema.ColumnFamilyNotFoundException; import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.ColumnRef; import org.apache.phoenix.schema.MetaDataClient; import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PColumnFamily; import org.apache.phoenix.schema.PColumnFamilyImpl; import org.apache.phoenix.schema.PColumnImpl; import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.PNameFactory; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableImpl; import org.apache.phoenix.schema.PTableKey; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.util.Closeables; import org.apache.phoenix.util.LogUtil; import org.apache.phoenix.util.SchemaUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.ImmutableList; import com.google.common.collect.ListMultimap; import com.google.common.collect.Lists; /** * Validates FROM clause and builds a ColumnResolver for resolving column references * * * @since 0.1 */ public class FromCompiler { private static final Logger logger = LoggerFactory.getLogger(FromCompiler.class); public static final ColumnResolver EMPTY_TABLE_RESOLVER = new ColumnResolver() { @Override public List<TableRef> getTables() { return Collections.emptyList(); } @Override public TableRef resolveTable(String schemaName, String tableName) throws SQLException { throw new UnsupportedOperationException(); } @Override public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { throw new UnsupportedOperationException(); } }; public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, final PhoenixConnection connection) throws SQLException { TableName baseTable = statement.getBaseTableName(); if (baseTable == null) { return EMPTY_TABLE_RESOLVER; } NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList()); // Always use non-tenant-specific connection here try { SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true); return visitor; } catch (TableNotFoundException e) { // Used for mapped VIEW, since we won't be able to resolve that. // Instead, we create a table with just the dynamic columns. // A tenant-specific connection may not create a mapped VIEW. if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) { ConnectionQueryServices services = connection.getQueryServices(); byte[] fullTableName = SchemaUtil.getTableNameAsBytes(baseTable.getSchemaName(), baseTable.getTableName()); HTableInterface htable = null; try { htable = services.getTable(fullTableName); } catch (UnsupportedOperationException ignore) { throw e; // For Connectionless } finally { if (htable != null) Closeables.closeQuietly(htable); } tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs()); return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp()); } throw e; } } /** * Iterate through the nodes in the FROM clause to build a column resolver used to lookup a column given the name * and alias. * * @param statement * the select statement * @return the column resolver * @throws SQLException * @throws SQLFeatureNotSupportedException * if unsupported constructs appear in the FROM clause * @throws TableNotFoundException * if table name not found in schema */ public static ColumnResolver getResolverForQuery(SelectStatement statement, PhoenixConnection connection) throws SQLException { TableNode fromNode = statement.getFrom(); if (fromNode instanceof NamedTableNode) return new SingleTableColumnResolver(connection, (NamedTableNode) fromNode, true, 1); MultiTableColumnResolver visitor = new MultiTableColumnResolver(connection, 1); fromNode.accept(visitor); return visitor; } public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection) throws SQLException { SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true); return visitor; } public static ColumnResolver getResolver(SingleTableStatement statement, PhoenixConnection connection) throws SQLException { SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), true); return visitor; } public static ColumnResolver getResolverForCompiledDerivedTable(PhoenixConnection connection, TableRef tableRef, RowProjector projector) throws SQLException { List<PColumn> projectedColumns = new ArrayList<PColumn>(); List<Expression> sourceExpressions = new ArrayList<Expression>(); PTable table = tableRef.getTable(); for (PColumn column : table.getColumns()) { Expression sourceExpression = projector.getColumnProjector(column.getPosition()).getExpression(); PColumnImpl projectedColumn = new PColumnImpl(column.getName(), column.getFamilyName(), sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(), column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced()); projectedColumns.add(projectedColumn); sourceExpressions.add(sourceExpression); } PTable t = PTableImpl.makePTable(table, projectedColumns); return new SingleTableColumnResolver(connection, new TableRef(tableRef.getTableAlias(), t, tableRef.getLowerBoundTimeStamp(), tableRef.hasDynamicCols())); } public static ColumnResolver getResolver(TableRef tableRef) throws SQLException { SingleTableColumnResolver visitor = new SingleTableColumnResolver(tableRef); return visitor; } public static ColumnResolver getResolverForMutation(DMLStatement statement, PhoenixConnection connection) throws SQLException { /* * We validate the meta data at commit time for mutations, as this allows us to do many UPSERT VALUES calls * without hitting the server each time to check if the meta data is up-to-date. */ SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), false); return visitor; } private static class SingleTableColumnResolver extends BaseColumnResolver { private final List<TableRef> tableRefs; private final String alias; public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode table, long timeStamp) throws SQLException { super(connection, 0); List<PColumnFamily> families = Lists.newArrayListWithExpectedSize(table.getDynamicColumns().size()); for (ColumnDef def : table.getDynamicColumns()) { if (def.getColumnDefName().getFamilyName() != null) { families.add(new PColumnFamilyImpl(PNameFactory.newName(def.getColumnDefName().getFamilyName()),Collections.<PColumn>emptyList())); } } Long scn = connection.getSCN(); PTable theTable = new PTableImpl(connection.getTenantId(), table.getName().getSchemaName(), table.getName().getTableName(), scn == null ? HConstants.LATEST_TIMESTAMP : scn, families); theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable); alias = null; tableRefs = ImmutableList.of(new TableRef(alias, theTable, timeStamp, !table.getDynamicColumns().isEmpty())); } public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, boolean updateCacheImmediately) throws SQLException { this(connection, tableNode, updateCacheImmediately, 0); } public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, boolean updateCacheImmediately, int tsAddition) throws SQLException { super(connection, tsAddition); alias = tableNode.getAlias(); TableRef tableRef = createTableRef(tableNode, updateCacheImmediately); tableRefs = ImmutableList.of(tableRef); } public SingleTableColumnResolver(PhoenixConnection connection, TableRef tableRef) { super(connection, 0); alias = tableRef.getTableAlias(); tableRefs = ImmutableList.of(tableRef); } public SingleTableColumnResolver(TableRef tableRef) throws SQLException { super(null, 0); alias = tableRef.getTableAlias(); tableRefs = ImmutableList.of(tableRef); } @Override public List<TableRef> getTables() { return tableRefs; } @Override public TableRef resolveTable(String schemaName, String tableName) throws SQLException { TableRef tableRef = tableRefs.get(0); /* * The only case we can definitely verify is when both a schemaName and a tableName * are provided. Otherwise, the tableName might be a column family. In this case, * this will be validated by resolveColumn. */ if (schemaName != null || tableName != null) { String resolvedTableName = tableRef.getTable().getTableName().getString(); String resolvedSchemaName = tableRef.getTable().getSchemaName().getString(); if (schemaName != null && tableName != null) { if ( ! ( schemaName.equals(resolvedSchemaName) && tableName.equals(resolvedTableName) ) && ! schemaName.equals(alias) ) { throw new TableNotFoundException(schemaName, tableName); } } } return tableRef; } @Override public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { TableRef tableRef = tableRefs.get(0); boolean resolveCF = false; if (schemaName != null || tableName != null) { String resolvedTableName = tableRef.getTable().getTableName().getString(); String resolvedSchemaName = tableRef.getTable().getSchemaName().getString(); if (schemaName != null && tableName != null) { if ( ! ( schemaName.equals(resolvedSchemaName) && tableName.equals(resolvedTableName) )) { if (!(resolveCF = schemaName.equals(alias))) { throw new ColumnNotFoundException(schemaName, tableName, null, colName); } } } else { // schemaName == null && tableName != null if (tableName != null && !tableName.equals(alias) && (!tableName.equals(resolvedTableName) || !resolvedSchemaName.equals(""))) { resolveCF = true; } } } PColumn column = resolveCF ? tableRef.getTable().getColumnFamily(tableName).getColumn(colName) : tableRef.getTable().getColumn(colName); return new ColumnRef(tableRef, column.getPosition()); } } private static abstract class BaseColumnResolver implements ColumnResolver { protected final PhoenixConnection connection; protected final MetaDataClient client; // Fudge factor to add to current time we calculate. We need this when we do a SELECT // on Windows because the millis timestamp granularity is so bad we sometimes won't // get the data back that we just upsert. private final int tsAddition; private BaseColumnResolver(PhoenixConnection connection, int tsAddition) { this.connection = connection; this.client = connection == null ? null : new MetaDataClient(connection); this.tsAddition = tsAddition; } protected TableRef createTableRef(NamedTableNode tableNode, boolean updateCacheImmediately) throws SQLException { String tableName = tableNode.getName().getTableName(); String schemaName = tableNode.getName().getSchemaName(); long timeStamp = QueryConstants.UNSET_TIMESTAMP; String fullTableName = SchemaUtil.getTableName(schemaName, tableName); PName tenantId = connection.getTenantId(); PTable theTable = null; if (updateCacheImmediately || connection.getAutoCommit()) { MetaDataMutationResult result = client.updateCache(schemaName, tableName); timeStamp = result.getMutationTime(); theTable = result.getTable(); if (theTable == null) { throw new TableNotFoundException(schemaName, tableName, timeStamp); } } else { try { theTable = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName)); } catch (TableNotFoundException e1) { if (tenantId != null) { // Check with null tenantId next try { theTable = connection.getMetaDataCache().getTable(new PTableKey(null, fullTableName)); } catch (TableNotFoundException e2) { } } } // We always attempt to update the cache in the event of a TableNotFoundException if (theTable == null) { MetaDataMutationResult result = client.updateCache(schemaName, tableName); if (result.wasUpdated()) { timeStamp = result.getMutationTime(); theTable = result.getTable(); } } if (theTable == null) { throw new TableNotFoundException(schemaName, tableName, timeStamp); } } // Add any dynamic columns to the table declaration List<ColumnDef> dynamicColumns = tableNode.getDynamicColumns(); theTable = addDynamicColumns(dynamicColumns, theTable); if (timeStamp != QueryConstants.UNSET_TIMESTAMP) { timeStamp += tsAddition; } TableRef tableRef = new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty()); if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) { logger.debug(LogUtil.addCustomAnnotations("Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns(), connection)); } return tableRef; } protected PTable addDynamicColumns(List<ColumnDef> dynColumns, PTable theTable) throws SQLException { if (!dynColumns.isEmpty()) { List<PColumn> allcolumns = new ArrayList<PColumn>(); List<PColumn> existingColumns = theTable.getColumns(); // Need to skip the salting column, as it's added in the makePTable call below allcolumns.addAll(theTable.getBucketNum() == null ? existingColumns : existingColumns.subList(1, existingColumns.size())); // Position still based on with the salting columns int position = existingColumns.size(); PName defaultFamilyName = PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(theTable)); for (ColumnDef dynColumn : dynColumns) { PName familyName = defaultFamilyName; PName name = PNameFactory.newName(dynColumn.getColumnDefName().getColumnName()); String family = dynColumn.getColumnDefName().getFamilyName(); if (family != null) { theTable.getColumnFamily(family); // Verifies that column family exists familyName = PNameFactory.newName(family); } allcolumns.add(new PColumnImpl(name, familyName, dynColumn.getDataType(), dynColumn.getMaxLength(), dynColumn.getScale(), dynColumn.isNull(), position, dynColumn.getSortOrder(), dynColumn.getArraySize(), null, false)); position++; } theTable = PTableImpl.makePTable(theTable, allcolumns); } return theTable; } } private static class MultiTableColumnResolver extends BaseColumnResolver implements TableNodeVisitor<Void> { private final ListMultimap<String, TableRef> tableMap; private final List<TableRef> tables; private MultiTableColumnResolver(PhoenixConnection connection, int tsAddition) { super(connection, tsAddition); tableMap = ArrayListMultimap.<String, TableRef> create(); tables = Lists.newArrayList(); } @Override public List<TableRef> getTables() { return tables; } @Override public Void visit(BindTableNode boundTableNode) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public Void visit(JoinTableNode joinNode) throws SQLException { joinNode.getLHS().accept(this); joinNode.getRHS().accept(this); return null; } @Override public Void visit(NamedTableNode tableNode) throws SQLException { String alias = tableNode.getAlias(); TableRef tableRef = createTableRef(tableNode, true); PTable theTable = tableRef.getTable(); if (alias != null) { tableMap.put(alias, tableRef); } String name = theTable.getName().getString(); //avoid having one name mapped to two identical TableRef. if (alias == null || !alias.equals(name)) { tableMap.put(name, tableRef); } tables.add(tableRef); return null; } @Override public Void visit(DerivedTableNode subselectNode) throws SQLException { List<AliasedNode> selectNodes = subselectNode.getSelect().getSelect(); List<PColumn> columns = new ArrayList<PColumn>(); int position = 0; for (AliasedNode aliasedNode : selectNodes) { String alias = aliasedNode.getAlias(); if (alias == null) { ParseNode node = aliasedNode.getNode(); if (node instanceof WildcardParseNode || node instanceof TableWildcardParseNode || node instanceof FamilyWildcardParseNode) throw new SQLException("Encountered wildcard in subqueries."); alias = SchemaUtil.normalizeIdentifier(node.getAlias()); } if (alias == null) { // Use position as column name for anonymous columns, which can be // referenced by an outer wild-card select. alias = String.valueOf(position); } PColumnImpl column = new PColumnImpl(PNameFactory.newName(alias), PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY), null, 0, 0, true, position++, SortOrder.ASC, null, null, false); columns.add(column); } PTable t = PTableImpl.makePTable(null, PName.EMPTY_NAME, PName.EMPTY_NAME, PTableType.SUBQUERY, null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, null, null, columns, null, null, Collections.<PTable>emptyList(), false, Collections.<PName>emptyList(), null, null, false, false, null, null, null); String alias = subselectNode.getAlias(); TableRef tableRef = new TableRef(alias, t, MetaDataProtocol.MIN_TABLE_TIMESTAMP, false); tableMap.put(alias, tableRef); tables.add(tableRef); return null; } private static class ColumnFamilyRef { private final TableRef tableRef; private final PColumnFamily family; ColumnFamilyRef(TableRef tableRef, PColumnFamily family) { this.tableRef = tableRef; this.family = family; } public TableRef getTableRef() { return tableRef; } public PColumnFamily getFamily() { return family; } } @Override public TableRef resolveTable(String schemaName, String tableName) throws SQLException { String fullTableName = SchemaUtil.getTableName(schemaName, tableName); List<TableRef> tableRefs = tableMap.get(fullTableName); if (tableRefs.size() == 0) { throw new TableNotFoundException(fullTableName); } else if (tableRefs.size() > 1) { throw new AmbiguousTableException(tableName); } else { return tableRefs.get(0); } } private ColumnFamilyRef resolveColumnFamily(String tableName, String cfName) throws SQLException { if (tableName == null) { ColumnFamilyRef theColumnFamilyRef = null; Iterator<TableRef> iterator = tables.iterator(); while (iterator.hasNext()) { TableRef tableRef = iterator.next(); try { PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName); if (theColumnFamilyRef != null) { throw new TableNotFoundException(cfName); } theColumnFamilyRef = new ColumnFamilyRef(tableRef, columnFamily); } catch (ColumnFamilyNotFoundException e) {} } if (theColumnFamilyRef != null) { return theColumnFamilyRef; } throw new TableNotFoundException(cfName); } else { TableRef tableRef = resolveTable(null, tableName); PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName); return new ColumnFamilyRef(tableRef, columnFamily); } } @Override public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { if (tableName == null) { int theColumnPosition = -1; TableRef theTableRef = null; Iterator<TableRef> iterator = tables.iterator(); while (iterator.hasNext()) { TableRef tableRef = iterator.next(); try { PColumn column = tableRef.getTable().getColumn(colName); if (theTableRef != null) { throw new AmbiguousColumnException(colName); } theTableRef = tableRef; theColumnPosition = column.getPosition(); } catch (ColumnNotFoundException e) { } } if (theTableRef != null) { return new ColumnRef(theTableRef, theColumnPosition); } throw new ColumnNotFoundException(colName); } else { try { TableRef tableRef = resolveTable(schemaName, tableName); PColumn column = tableRef.getTable().getColumn(colName); return new ColumnRef(tableRef, column.getPosition()); } catch (TableNotFoundException e) { // Try using the tableName as a columnFamily reference instead ColumnFamilyRef cfRef = resolveColumnFamily(schemaName, tableName); PColumn column = cfRef.getFamily().getColumn(colName); return new ColumnRef(cfRef.getTableRef(), column.getPosition()); } } } } }
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.security.authc.oidc; import com.nimbusds.jose.JWSAlgorithm; import com.nimbusds.jwt.JWT; import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.oauth2.sdk.ParseException; import com.nimbusds.oauth2.sdk.ResponseType; import com.nimbusds.oauth2.sdk.Scope; import com.nimbusds.oauth2.sdk.id.ClientID; import com.nimbusds.oauth2.sdk.id.Issuer; import com.nimbusds.oauth2.sdk.id.State; import com.nimbusds.openid.connect.sdk.AuthenticationRequest; import com.nimbusds.openid.connect.sdk.LogoutRequest; import com.nimbusds.openid.connect.sdk.Nonce; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutResponse; import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationResponse; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import java.net.URI; import java.net.URISyntaxException; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.DN_CLAIM; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.GROUPS_CLAIM; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.MAIL_CLAIM; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.NAME_CLAIM; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_ENDSESSION_ENDPOINT; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_ISSUER; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_JWKSET_PATH; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_USERINFO_ENDPOINT; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.POPULATE_USER_METADATA; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.PRINCIPAL_CLAIM; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_CLIENT_ID; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_CLIENT_SECRET; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_POST_LOGOUT_REDIRECT_URI; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_REDIRECT_URI; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_RESPONSE_TYPE; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_SIGNATURE_ALGORITHM; public class OpenIdConnectRealm extends Realm implements Releasable { public static final String CONTEXT_TOKEN_DATA = "_oidc_tokendata"; private final OpenIdConnectProviderConfiguration opConfiguration; private final RelyingPartyConfiguration rpConfiguration; private final OpenIdConnectAuthenticator openIdConnectAuthenticator; private final ClaimParser principalAttribute; private final ClaimParser groupsAttribute; private final ClaimParser dnAttribute; private final ClaimParser nameAttribute; private final ClaimParser mailAttribute; private final Boolean populateUserMetadata; private final UserRoleMapper roleMapper; private DelegatedAuthorizationSupport delegatedRealms; public OpenIdConnectRealm(RealmConfig config, SSLService sslService, UserRoleMapper roleMapper, ResourceWatcherService watcherService) { super(config); this.roleMapper = roleMapper; this.rpConfiguration = buildRelyingPartyConfiguration(config); this.opConfiguration = buildOpenIdConnectProviderConfiguration(config); this.principalAttribute = ClaimParser.forSetting(logger, PRINCIPAL_CLAIM, config, true); this.groupsAttribute = ClaimParser.forSetting(logger, GROUPS_CLAIM, config, false); this.dnAttribute = ClaimParser.forSetting(logger, DN_CLAIM, config, false); this.nameAttribute = ClaimParser.forSetting(logger, NAME_CLAIM, config, false); this.mailAttribute = ClaimParser.forSetting(logger, MAIL_CLAIM, config, false); this.populateUserMetadata = config.getSetting(POPULATE_USER_METADATA); if (TokenService.isTokenServiceEnabled(config.settings()) == false) { throw new IllegalStateException("OpenID Connect Realm requires that the token service be enabled (" + XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey() + ")"); } this.openIdConnectAuthenticator = new OpenIdConnectAuthenticator(config, opConfiguration, rpConfiguration, sslService, watcherService); } // For testing OpenIdConnectRealm(RealmConfig config, OpenIdConnectAuthenticator authenticator, UserRoleMapper roleMapper) { super(config); this.roleMapper = roleMapper; this.rpConfiguration = buildRelyingPartyConfiguration(config); this.opConfiguration = buildOpenIdConnectProviderConfiguration(config); this.openIdConnectAuthenticator = authenticator; this.principalAttribute = ClaimParser.forSetting(logger, PRINCIPAL_CLAIM, config, true); this.groupsAttribute = ClaimParser.forSetting(logger, GROUPS_CLAIM, config, false); this.dnAttribute = ClaimParser.forSetting(logger, DN_CLAIM, config, false); this.nameAttribute = ClaimParser.forSetting(logger, NAME_CLAIM, config, false); this.mailAttribute = ClaimParser.forSetting(logger, MAIL_CLAIM, config, false); this.populateUserMetadata = config.getSetting(POPULATE_USER_METADATA); } @Override public void initialize(Iterable<Realm> realms, XPackLicenseState licenseState) { if (delegatedRealms != null) { throw new IllegalStateException("Realm has already been initialized"); } delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); } @Override public boolean supports(AuthenticationToken token) { return token instanceof OpenIdConnectToken; } private boolean isTokenForRealm(OpenIdConnectToken oidcToken) { if (oidcToken.getAuthenticatingRealm() == null) { return true; } else { return oidcToken.getAuthenticatingRealm().equals(this.name()); } } @Override public AuthenticationToken token(ThreadContext context) { return null; } @Override public void authenticate(AuthenticationToken token, ActionListener<AuthenticationResult> listener) { if (token instanceof OpenIdConnectToken && isTokenForRealm((OpenIdConnectToken) token)) { OpenIdConnectToken oidcToken = (OpenIdConnectToken) token; openIdConnectAuthenticator.authenticate(oidcToken, ActionListener.wrap( jwtClaimsSet -> { buildUserFromClaims(jwtClaimsSet, listener); }, e -> { logger.debug("Failed to consume the OpenIdConnectToken ", e); if (e instanceof ElasticsearchSecurityException) { listener.onResponse(AuthenticationResult.unsuccessful("Failed to authenticate user with OpenID Connect", e)); } else { listener.onFailure(e); } })); } else { listener.onResponse(AuthenticationResult.notHandled()); } } @Override public void lookupUser(String username, ActionListener<User> listener) { listener.onResponse(null); } private void buildUserFromClaims(JWTClaimsSet claims, ActionListener<AuthenticationResult> authResultListener) { final String principal = principalAttribute.getClaimValue(claims); if (Strings.isNullOrEmpty(principal)) { authResultListener.onResponse(AuthenticationResult.unsuccessful( principalAttribute + "not found in " + claims.toJSONObject(), null)); return; } final Map<String, Object> tokenMetadata = new HashMap<>(); tokenMetadata.put("id_token_hint", claims.getClaim("id_token_hint")); ActionListener<AuthenticationResult> wrappedAuthResultListener = ActionListener.wrap(auth -> { if (auth.isAuthenticated()) { // Add the ID Token as metadata on the authentication, so that it can be used for logout requests Map<String, Object> metadata = new HashMap<>(auth.getMetadata()); metadata.put(CONTEXT_TOKEN_DATA, tokenMetadata); auth = AuthenticationResult.success(auth.getUser(), metadata); } authResultListener.onResponse(auth); }, authResultListener::onFailure); if (delegatedRealms.hasDelegation()) { delegatedRealms.resolve(principal, wrappedAuthResultListener); return; } final Map<String, Object> userMetadata; if (populateUserMetadata) { userMetadata = claims.getClaims().entrySet().stream() .filter(entry -> isAllowedTypeForClaim(entry.getValue())) .collect(Collectors.toUnmodifiableMap(entry -> "oidc(" + entry.getKey() + ")", Map.Entry::getValue)); } else { userMetadata = Map.of(); } final List<String> groups = groupsAttribute.getClaimValues(claims); final String dn = dnAttribute.getClaimValue(claims); final String mail = mailAttribute.getClaimValue(claims); final String name = nameAttribute.getClaimValue(claims); UserRoleMapper.UserData userData = new UserRoleMapper.UserData(principal, dn, groups, userMetadata, config); roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { final User user = new User(principal, roles.toArray(Strings.EMPTY_ARRAY), name, mail, userMetadata, true); wrappedAuthResultListener.onResponse(AuthenticationResult.success(user)); }, wrappedAuthResultListener::onFailure)); } private RelyingPartyConfiguration buildRelyingPartyConfiguration(RealmConfig config) { final String redirectUriString = require(config, RP_REDIRECT_URI); final URI redirectUri; try { redirectUri = new URI(redirectUriString); } catch (URISyntaxException e) { // This should never happen as it's already validated in the settings throw new SettingsException("Invalid URI:" + RP_REDIRECT_URI.getKey(), e); } final String postLogoutRedirectUriString = config.getSetting(RP_POST_LOGOUT_REDIRECT_URI); final URI postLogoutRedirectUri; try { postLogoutRedirectUri = new URI(postLogoutRedirectUriString); } catch (URISyntaxException e) { // This should never happen as it's already validated in the settings throw new SettingsException("Invalid URI:" + RP_POST_LOGOUT_REDIRECT_URI.getKey(), e); } final ClientID clientId = new ClientID(require(config, RP_CLIENT_ID)); final SecureString clientSecret = config.getSetting(RP_CLIENT_SECRET); if (clientSecret.length() == 0) { throw new SettingsException("The configuration setting [" + RealmSettings.getFullSettingKey(config, RP_CLIENT_SECRET) + "] is required"); } final ResponseType responseType; try { responseType = ResponseType.parse(require(config, RP_RESPONSE_TYPE)); } catch (ParseException e) { // This should never happen as it's already validated in the settings throw new SettingsException("Invalid value for " + RP_RESPONSE_TYPE.getKey(), e); } final Scope requestedScope = new Scope(config.getSetting(RP_REQUESTED_SCOPES).toArray(Strings.EMPTY_ARRAY)); if (requestedScope.contains("openid") == false) { requestedScope.add("openid"); } final JWSAlgorithm signatureAlgorithm = JWSAlgorithm.parse(require(config, RP_SIGNATURE_ALGORITHM)); return new RelyingPartyConfiguration(clientId, clientSecret, redirectUri, responseType, requestedScope, signatureAlgorithm, postLogoutRedirectUri); } private OpenIdConnectProviderConfiguration buildOpenIdConnectProviderConfiguration(RealmConfig config) { Issuer issuer = new Issuer(require(config, OP_ISSUER)); String jwkSetUrl = require(config, OP_JWKSET_PATH); URI authorizationEndpoint; try { authorizationEndpoint = new URI(require(config, OP_AUTHORIZATION_ENDPOINT)); } catch (URISyntaxException e) { // This should never happen as it's already validated in the settings throw new SettingsException("Invalid URI: " + OP_AUTHORIZATION_ENDPOINT.getKey(), e); } String responseType = require(config, RP_RESPONSE_TYPE); String tokenEndpointString = config.getSetting(OP_TOKEN_ENDPOINT); if (responseType.equals("code") && tokenEndpointString.isEmpty()) { throw new SettingsException("The configuration setting [" + OP_TOKEN_ENDPOINT.getConcreteSettingForNamespace(name()).getKey() + "] is required when [" + RP_RESPONSE_TYPE.getConcreteSettingForNamespace(name()).getKey() + "] is set to \"code\""); } URI tokenEndpoint; try { tokenEndpoint = tokenEndpointString.isEmpty() ? null : new URI(tokenEndpointString); } catch (URISyntaxException e) { // This should never happen as it's already validated in the settings throw new SettingsException("Invalid URL: " + OP_TOKEN_ENDPOINT.getKey(), e); } URI userinfoEndpoint; try { userinfoEndpoint = (config.getSetting(OP_USERINFO_ENDPOINT).isEmpty()) ? null : new URI(config.getSetting(OP_USERINFO_ENDPOINT)); } catch (URISyntaxException e) { // This should never happen as it's already validated in the settings throw new SettingsException("Invalid URI: " + OP_USERINFO_ENDPOINT.getKey(), e); } URI endsessionEndpoint; try { endsessionEndpoint = (config.getSetting(OP_ENDSESSION_ENDPOINT).isEmpty()) ? null : new URI(config.getSetting(OP_ENDSESSION_ENDPOINT)); } catch (URISyntaxException e) { // This should never happen as it's already validated in the settings throw new SettingsException("Invalid URI: " + OP_ENDSESSION_ENDPOINT.getKey(), e); } return new OpenIdConnectProviderConfiguration(issuer, jwkSetUrl, authorizationEndpoint, tokenEndpoint, userinfoEndpoint, endsessionEndpoint); } private static String require(RealmConfig config, Setting.AffixSetting<String> setting) { final String value = config.getSetting(setting); if (value.isEmpty()) { throw new SettingsException("The configuration setting [" + RealmSettings.getFullSettingKey(config, setting) + "] is required"); } return value; } /** * Creates the URI for an OIDC Authentication Request from the realm configuration using URI Query String Serialization and * possibly generates a state parameter and a nonce. It then returns the URI, state and nonce encapsulated in a * {@link OpenIdConnectPrepareAuthenticationResponse}. A facilitator can provide a state and a nonce parameter in two cases: * <ul> * <li>In case of Kibana, it allows for a better UX by ensuring that all requests to an OpenID Connect Provider within * the same browser context (even across tabs) will use the same state and nonce values.</li> * <li>In case of custom facilitators, the implementer might require/support generating the state parameter in order * to tie this to an anti-XSRF token.</li> * </ul> * * * @param existingState An existing state that can be reused or null if we need to generate one * @param existingNonce An existing nonce that can be reused or null if we need to generate one * @param loginHint A String with a login hint to add to the authentication request in case of a 3rd party initiated login * * @return an {@link OpenIdConnectPrepareAuthenticationResponse} */ public OpenIdConnectPrepareAuthenticationResponse buildAuthenticationRequestUri(@Nullable String existingState, @Nullable String existingNonce, @Nullable String loginHint) { final State state = existingState != null ? new State(existingState) : new State(); final Nonce nonce = existingNonce != null ? new Nonce(existingNonce) : new Nonce(); final AuthenticationRequest.Builder builder = new AuthenticationRequest.Builder(rpConfiguration.getResponseType(), rpConfiguration.getRequestedScope(), rpConfiguration.getClientId(), rpConfiguration.getRedirectUri()) .endpointURI(opConfiguration.getAuthorizationEndpoint()) .state(state) .nonce(nonce); if (Strings.hasText(loginHint)) { builder.loginHint(loginHint); } return new OpenIdConnectPrepareAuthenticationResponse(builder.build().toURI().toString(), state.getValue(), nonce.getValue()); } public boolean isIssuerValid(String issuer) { return this.opConfiguration.getIssuer().getValue().equals(issuer); } public OpenIdConnectLogoutResponse buildLogoutResponse(JWT idTokenHint) { if (opConfiguration.getEndsessionEndpoint() != null) { final State state = new State(); final LogoutRequest logoutRequest = new LogoutRequest(opConfiguration.getEndsessionEndpoint(), idTokenHint, rpConfiguration.getPostLogoutRedirectUri(), state); return new OpenIdConnectLogoutResponse(logoutRequest.toURI().toString()); } else { return new OpenIdConnectLogoutResponse((String) null); } } @Override public void close() { openIdConnectAuthenticator.close(); } /* * We only map claims that are of Type String, Boolean, or Number, or arrays that contain only these types */ private static boolean isAllowedTypeForClaim(Object o) { return (o instanceof String || o instanceof Boolean || o instanceof Number || (o instanceof Collection && ((Collection) o).stream() .allMatch(c -> c instanceof String || c instanceof Boolean || c instanceof Number))); } static final class ClaimParser { private final String name; private final Function<JWTClaimsSet, List<String>> parser; ClaimParser(String name, Function<JWTClaimsSet, List<String>> parser) { this.name = name; this.parser = parser; } List<String> getClaimValues(JWTClaimsSet claims) { return parser.apply(claims); } String getClaimValue(JWTClaimsSet claims) { List<String> claimValues = parser.apply(claims); if (claimValues == null || claimValues.isEmpty()) { return null; } else { return claimValues.get(0); } } @Override public String toString() { return name; } private static Collection<String> parseClaimValues(JWTClaimsSet claimsSet, String claimName, String settingKey) { Collection<String> values; final Object claimValueObject = claimsSet.getClaim(claimName); if (claimValueObject == null) { values = List.of(); } else if (claimValueObject instanceof String) { values = List.of((String) claimValueObject); } else if (claimValueObject instanceof Collection && ((Collection) claimValueObject).stream().allMatch(c -> c instanceof String)) { values = (Collection<String>) claimValueObject; } else { throw new SettingsException("Setting [ " + settingKey + " expects a claim with String or a String Array value"); } return values; } static ClaimParser forSetting(Logger logger, OpenIdConnectRealmSettings.ClaimSetting setting, RealmConfig realmConfig, boolean required) { if (realmConfig.hasSetting(setting.getClaim())) { String claimName = realmConfig.getSetting(setting.getClaim()); if (realmConfig.hasSetting(setting.getPattern())) { Pattern regex = Pattern.compile(realmConfig.getSetting(setting.getPattern())); return new ClaimParser( "OpenID Connect Claim [" + claimName + "] with pattern [" + regex.pattern() + "] for [" + setting.name(realmConfig) + "]", claims -> { Collection<String> values = parseClaimValues(claims, claimName, RealmSettings.getFullSettingKey(realmConfig, setting.getClaim())); return values.stream().map(s -> { if (s == null) { logger.debug("OpenID Connect Claim [{}] is null", claimName); return null; } final Matcher matcher = regex.matcher(s); if (matcher.find() == false) { logger.debug("OpenID Connect Claim [{}] is [{}], which does not match [{}]", claimName, s, regex.pattern()); return null; } final String value = matcher.group(1); if (Strings.isNullOrEmpty(value)) { logger.debug("OpenID Connect Claim [{}] is [{}], which does match [{}] but group(1) is empty", claimName, s, regex.pattern()); return null; } return value; }).filter(Objects::nonNull).collect(Collectors.toUnmodifiableList()); }); } else { return new ClaimParser( "OpenID Connect Claim [" + claimName + "] for [" + setting.name(realmConfig) + "]", claims -> parseClaimValues(claims, claimName, RealmSettings.getFullSettingKey(realmConfig, setting.getClaim())) .stream() .filter(Objects::nonNull) .collect(Collectors.toUnmodifiableList())); } } else if (required) { throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getClaim()) + "] is required"); } else if (realmConfig.hasSetting(setting.getPattern())) { throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern()) + "] cannot be set unless [" + RealmSettings.getFullSettingKey(realmConfig, setting.getClaim()) + "] is also set"); } else { return new ClaimParser("No OpenID Connect Claim for [" + setting.name(realmConfig) + "]", attributes -> List.of()); } } } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.pool.impl; import java.util.ArrayList; import java.util.BitSet; import java.util.List; import java.util.NoSuchElementException; import org.apache.commons.pool.ObjectPool; import org.apache.commons.pool.PoolableObjectFactory; import org.apache.commons.pool.TestBaseObjectPool; /** * @author Rodney Waldhoff * @author Dirk Verbeeck * @author Sandy McArthur * @version $Revision: 960644 $ $Date: 2010-07-05 10:15:07 -0700 (Mon, 05 Jul 2010) $ */ public class TestStackObjectPool extends TestBaseObjectPool { public TestStackObjectPool(String testName) { super(testName); } protected ObjectPool makeEmptyPool(int mincap) { return new StackObjectPool(new SimpleFactory()); } protected ObjectPool makeEmptyPool(final PoolableObjectFactory factory) { return new StackObjectPool(factory); } protected Object getNthObject(int n) { return String.valueOf(n); } public void testIdleCap() throws Exception { ObjectPool pool = makeEmptyPool(8); Object[] active = new Object[100]; for(int i=0;i<100;i++) { active[i] = pool.borrowObject(); } assertEquals(100,pool.getNumActive()); assertEquals(0,pool.getNumIdle()); for(int i=0;i<100;i++) { pool.returnObject(active[i]); assertEquals(99 - i,pool.getNumActive()); assertEquals((i < 8 ? i+1 : 8),pool.getNumIdle()); } } /** * @deprecated - to be removed in pool 2.0 */ public void testPoolWithNullFactory() throws Exception { ObjectPool pool = new StackObjectPool(10); for(int i=0;i<10;i++) { pool.returnObject(new Integer(i)); } for(int j=0;j<3;j++) { Integer[] borrowed = new Integer[10]; BitSet found = new BitSet(); for(int i=0;i<10;i++) { borrowed[i] = (Integer)(pool.borrowObject()); assertNotNull(borrowed); assertTrue(!found.get(borrowed[i].intValue())); found.set(borrowed[i].intValue()); } for(int i=0;i<10;i++) { pool.returnObject(borrowed[i]); } } pool.invalidateObject(pool.borrowObject()); pool.invalidateObject(pool.borrowObject()); pool.clear(); } /** * @deprecated - to be removed in pool 2.0 */ public void testBorrowFromEmptyPoolWithNullFactory() throws Exception { ObjectPool pool = new StackObjectPool(); try { pool.borrowObject(); fail("Expected NoSuchElementException"); } catch(NoSuchElementException e) { // expected } } /** * @deprecated - to be removed in pool 2.0 */ public void testSetFactory() throws Exception { ObjectPool pool = new StackObjectPool(); try { pool.borrowObject(); fail("Expected NoSuchElementException"); } catch(NoSuchElementException e) { // expected } pool.setFactory(new SimpleFactory()); Object obj = pool.borrowObject(); assertNotNull(obj); pool.returnObject(obj); } /** * @deprecated - to be removed in pool 2.0 */ public void testCantResetFactoryWithActiveObjects() throws Exception { ObjectPool pool = new StackObjectPool(); pool.setFactory(new SimpleFactory()); Object obj = pool.borrowObject(); assertNotNull(obj); try { pool.setFactory(new SimpleFactory()); fail("Expected IllegalStateException"); } catch(IllegalStateException e) { // expected } } /** * @deprecated - to be removed in pool 2.0 */ public void testCanResetFactoryWithoutActiveObjects() throws Exception { ObjectPool pool = new StackObjectPool(); { pool.setFactory(new SimpleFactory()); Object obj = pool.borrowObject(); assertNotNull(obj); pool.returnObject(obj); } { pool.setFactory(new SimpleFactory()); Object obj = pool.borrowObject(); assertNotNull(obj); pool.returnObject(obj); } } /** * Verifies that validation failures when borrowing newly created instances * from the pool result in NoSuchElementExceptions and passivation failures * result in instances not being returned to the pool. */ public void testBorrowWithSometimesInvalidObjects() throws Exception { SelectiveFactory factory = new SelectiveFactory(); factory.setValidateSelectively(true); // Even numbers fail validation factory.setPassivateSelectively(true); // Multiples of 3 fail passivation ObjectPool pool = new StackObjectPool(factory, 20); Object[] obj = new Object[10]; for(int i=0;i<10;i++) { Object object = null; int k = 0; while (object == null && k < 100) { // bound not really needed try { k++; object = pool.borrowObject(); if (((Integer) object).intValue() % 2 == 0) { fail("Expecting NoSuchElementException"); } else { obj[i] = object; } } catch (NoSuchElementException ex) { // Should fail for evens } } assertEquals("Each time we borrow, get one more active.", i+1, pool.getNumActive()); } // 1,3,5,...,19 pass validation, get checked out for(int i=0;i<10;i++) { pool.returnObject(obj[i]); assertEquals("Each time we return, get one less active.", 9-i, pool.getNumActive()); } // 3, 9, 15 fail passivation. assertEquals(7,pool.getNumIdle()); assertEquals(new Integer(19), pool.borrowObject()); assertEquals(new Integer(17), pool.borrowObject()); assertEquals(new Integer(13), pool.borrowObject()); assertEquals(new Integer(11), pool.borrowObject()); assertEquals(new Integer(7), pool.borrowObject()); assertEquals(new Integer(5), pool.borrowObject()); assertEquals(new Integer(1), pool.borrowObject()); } /** * Verifies that validation and passivation failures returning objects are handled * properly - instances destroyed and not returned to the pool, but no exceptions propagated. */ public void testBorrowReturnWithSometimesInvalidObjects() throws Exception { SelectiveFactory factory = new SelectiveFactory(); ObjectPool pool = new StackObjectPool(factory, 20); Object[] obj = new Object[10]; for(int i=0;i<10;i++) { obj[i] = pool.borrowObject(); assertEquals("Each time we borrow, get one more active.", i+1, pool.getNumActive()); } factory.setValidateSelectively(true); // Even numbers fail validation factory.setPassivateSelectively(true); // Multiples of 3 fail passivation for(int i=0;i<10;i++) { pool.returnObject(obj[i]); assertEquals("Each time we return, get one less active.", 9-i, pool.getNumActive()); } // 0,2,4,6,8 fail validation, 3, 9 fail passivation - 3 left. assertEquals(3,pool.getNumIdle()); } public void testVariousConstructors() throws Exception { { StackObjectPool pool = new StackObjectPool(); assertNotNull(pool); } { StackObjectPool pool = new StackObjectPool(10); assertNotNull(pool); } { StackObjectPool pool = new StackObjectPool(10,5); assertNotNull(pool); } { StackObjectPool pool = new StackObjectPool(null); assertNotNull(pool); } { StackObjectPool pool = new StackObjectPool(null,10); assertNotNull(pool); } { StackObjectPool pool = new StackObjectPool(null,10,5); assertNotNull(pool); } } /** * Verify that out of range constructor arguments are ignored. */ public void testMaxIdleInitCapacityOutOfRange() throws Exception { SimpleFactory factory = new SimpleFactory(); StackObjectPool pool = new StackObjectPool(factory, -1, 0); assertEquals(pool.getMaxSleeping(), StackObjectPool.DEFAULT_MAX_SLEEPING); pool.addObject(); pool.close(); } /** * Verifies that when returning objects cause maxSleeping exceeded, oldest instances * are destroyed to make room for returning objects. */ public void testReturnObjectDiscardOrder() throws Exception { SelectiveFactory factory = new SelectiveFactory(); ObjectPool pool = new StackObjectPool(factory, 3); // borrow more objects than the pool can hold Integer i0 = (Integer)pool.borrowObject(); Integer i1 = (Integer)pool.borrowObject(); Integer i2 = (Integer)pool.borrowObject(); Integer i3 = (Integer)pool.borrowObject(); // tests // return as many as the pool will hold. pool.returnObject(i0); pool.returnObject(i1); pool.returnObject(i2); // the pool should now be full. assertEquals("No returned objects should have been destroyed yet.", 0, factory.getDestroyed().size()); // cause the pool to discard a stale object. pool.returnObject(i3); assertEquals("One object should have been destroyed.", 1, factory.getDestroyed().size()); // check to see what object was destroyed Integer d = (Integer)factory.getDestroyed().get(0); assertEquals("Destoryed object should be the stalest object.", i0, d); } /** * Verifies that exceptions thrown by factory activate method are not propagated to * the caller. Objects that throw on activate are destroyed and if none succeed, * the caller gets NoSuchElementException. */ public void testExceptionOnActivate() throws Exception { SelectiveFactory factory = new SelectiveFactory(); ObjectPool pool = new StackObjectPool(factory); pool.addObject(); pool.addObject(); factory.setThrowOnActivate(true); try { pool.borrowObject(); fail("Expecting NoSuchElementException"); } catch (NoSuchElementException ex) { // expected } assertEquals(0, pool.getNumIdle()); assertEquals(0, pool.getNumActive()); } /** * Verifies that exceptions thrown by factory destroy are swallowed * by both addObject and returnObject. */ public void testExceptionOnDestroy() throws Exception { SelectiveFactory factory = new SelectiveFactory(); ObjectPool pool = new StackObjectPool(factory, 2); factory.setThrowOnDestroy(true); for (int i = 0; i < 3; i++) { pool.addObject(); // Third one will destroy, exception should be swallowed } assertEquals(2, pool.getNumIdle()); Object[] objects = new Object[3]; for (int i = 0; i < 3; i++) { objects[i] = pool.borrowObject(); } for (int i = 0; i < 3; i++) { pool.returnObject(objects[i]); // Third triggers destroy } assertEquals(2, pool.getNumIdle()); } /** * Verifies that addObject propagates exceptions thrown by * factory passivate, but returnObject swallows these. */ public void testExceptionOnPassivate() throws Exception { SelectiveFactory factory = new SelectiveFactory(); ObjectPool pool = new StackObjectPool(factory, 2); factory.setThrowOnPassivate(true); // addObject propagates try { pool.addObject(); fail("Expecting IntegerFactoryException"); } catch (IntegerFactoryException ex) { assertEquals("passivateObject", ex.getType()); assertEquals(0, ex.getValue()); } assertEquals(0, pool.getNumIdle()); // returnObject swallows Object obj = pool.borrowObject(); pool.returnObject(obj); assertEquals(0, pool.getNumIdle()); } /** * Verifies that validation exceptions always propagate */ public void testExceptionOnValidate() throws Exception { SelectiveFactory factory = new SelectiveFactory(); ObjectPool pool = new StackObjectPool(factory, 2); factory.setThrowOnValidate(true); // addObject try { pool.addObject(); fail("Expecting IntegerFactoryException"); } catch (IntegerFactoryException ex) { assertEquals("validateObject", ex.getType()); } assertEquals(0, pool.getNumIdle()); // returnObject factory.setThrowOnValidate(false); Object obj = pool.borrowObject(); factory.setThrowOnValidate(true); try { pool.returnObject(obj); fail("Expecting IntegerFactoryException"); } catch (IntegerFactoryException ex) { assertEquals("validateObject", ex.getType()); } assertEquals(0, pool.getNumIdle()); // borrowObject - throws NoSuchElementException try { pool.borrowObject(); fail("Expecting NoSuchElementException"); } catch (NoSuchElementException ex) { // Expected } } /** * Verifies that exceptions thrown by makeObject are propagated. */ public void testExceptionOnMake() throws Exception { SelectiveFactory factory = new SelectiveFactory(); factory.setThrowOnMake(true); ObjectPool pool = new StackObjectPool(factory); try { pool.borrowObject(); fail("Expecting IntegerFactoryException"); } catch (IntegerFactoryException ex) { assertEquals("makeObject", ex.getType()); } try { pool.addObject(); fail("Expecting IntegerFactoryException"); } catch (IntegerFactoryException ex) { assertEquals("makeObject", ex.getType()); } } /** * Verifies NoSuchElementException when the factory returns a null object in borrowObject */ public void testMakeNull() throws Exception { SelectiveFactory factory = new SelectiveFactory(); ObjectPool pool = new StackObjectPool(factory); factory.setMakeNull(true); try { pool.borrowObject(); fail("Expecting NoSuchElementException"); } catch (NoSuchElementException ex) { // Expected } } /** * Verifies that initIdleCapacity is not a hard limit, but maxIdle is. */ public void testInitIdleCapacityExceeded() throws Exception { PoolableObjectFactory factory = new SimpleFactory(); ObjectPool pool = new StackObjectPool(factory, 2, 1); pool.addObject(); pool.addObject(); assertEquals(2, pool.getNumIdle()); pool.close(); pool = new StackObjectPool(factory, 1, 2); pool.addObject(); pool.addObject(); assertEquals(1, pool.getNumIdle()); } /** * Verifies close contract - idle instances are destroyed, returning instances * are destroyed, add/borrowObject throw IllegalStateException. */ public void testClose() throws Exception { SelectiveFactory factory = new SelectiveFactory(); ObjectPool pool = new StackObjectPool(factory); pool.addObject(); // 0 pool.addObject(); // 1 pool.addObject(); // 2 Integer two = (Integer) pool.borrowObject(); assertEquals(2, two.intValue()); pool.close(); assertEquals(0, pool.getNumIdle()); assertEquals(1, pool.getNumActive()); List destroyed = factory.getDestroyed(); assertEquals(2, destroyed.size()); assertTrue(destroyed.contains(new Integer(0))); assertTrue(destroyed.contains(new Integer(0))); pool.returnObject(two); assertTrue(destroyed.contains(two)); try { pool.addObject(); fail("Expecting IllegalStateException"); } catch (IllegalStateException ex) { // Expected } try { pool.borrowObject(); fail("Expecting IllegalStateException"); } catch (IllegalStateException ex) { // Expected } } /** * Simple factory that creates Integers. Validation and other factory methods * always succeed. */ static class SimpleFactory implements PoolableObjectFactory { int counter = 0; public Object makeObject() { return String.valueOf(counter++); } public void destroyObject(Object obj) { } public boolean validateObject(Object obj) { return true; } public void activateObject(Object obj) { } public void passivateObject(Object obj) { } } /** * Integer factory that fails validation and other factory methods "selectively" and * tracks object destruction. */ static class SelectiveFactory implements PoolableObjectFactory { private List destroyed = new ArrayList(); private int counter = 0; private boolean validateSelectively = false; // true <-> validate returns false for even Integers private boolean passivateSelectively = false; // true <-> passivate throws RTE if Integer = 0 mod 3 private boolean throwOnDestroy = false; // true <-> destroy throws RTE (always) private boolean throwOnActivate = false; // true <-> activate throws RTE (always) private boolean throwOnMake = false; // true <-> make throws RTE (always) private boolean throwOnValidate= false; // true <-> validate throws RTE (always) private boolean throwOnPassivate = false; // true <-> passivate throws RTE (always) private boolean makeNull = false; // true <-> make returns null public Object makeObject() { if (throwOnMake) { final int next = counter + 1; throw new IntegerFactoryException("makeObject", next); } else { return makeNull? null : new Integer(counter++); } } public void destroyObject(Object obj) { if (throwOnDestroy) { final Integer integer = (Integer)obj; throw new IntegerFactoryException("destroyObject", integer.intValue()); } destroyed.add(obj); } public boolean validateObject(Object obj) { if (throwOnValidate) { final Integer integer = (Integer)obj; throw new IntegerFactoryException("validateObject", integer.intValue()); } if (validateSelectively) { // only odd objects are valid if(obj instanceof Integer) { return ((((Integer)obj).intValue() % 2) == 1); } else { return false; } } return true; } public void activateObject(Object obj) { if (throwOnActivate) { final Integer integer = (Integer)obj; throw new IntegerFactoryException("activateObject", integer.intValue()); } } public void passivateObject(Object obj) { if (throwOnPassivate) { final Integer integer = (Integer)obj; throw new IntegerFactoryException("passivateObject", integer.intValue()); } if (passivateSelectively) { final Integer integer = (Integer)obj; if (integer.intValue() % 3 == 0) { throw new IntegerFactoryException("passivateObject", integer.intValue()); } } } public List getDestroyed() { return destroyed; } public void setCounter(int counter) { this.counter = counter; } public void setValidateSelectively(boolean validateSelectively) { this.validateSelectively = validateSelectively; } public void setPassivateSelectively(boolean passivateSelectively) { this.passivateSelectively = passivateSelectively; } public void setThrowOnDestroy(boolean throwOnDestroy) { this.throwOnDestroy = throwOnDestroy; } public void setThrowOnActivate(boolean throwOnActivate) { this.throwOnActivate = throwOnActivate; } public void setThrowOnMake(boolean throwOnMake) { this.throwOnMake = throwOnMake; } public void setThrowOnPassivate(boolean throwOnPassivate) { this.throwOnPassivate = throwOnPassivate; } public void setThrowOnValidate(boolean throwOnValidate) { this.throwOnValidate = throwOnValidate; } public void setMakeNull(boolean makeNull) { this.makeNull = makeNull; } } static class IntegerFactoryException extends RuntimeException { private String type; private int value; public IntegerFactoryException(String type, int value) { super(type + " failed. Value: " + value); this.type = type; this.value = value; } public String getType() { return type; } public int getValue() { return value; } } protected boolean isLifo() { return true; } protected boolean isFifo() { return false; } }
/* * Copyright 2005-2014 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl1.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.kra.award.paymentreports.awardreports; import org.kuali.kra.award.contacts.AwardSponsorContact; import org.kuali.kra.infrastructure.KeyConstants; import org.kuali.kra.infrastructure.KraServiceLocator; import org.kuali.kra.rules.ResearchDocumentRuleBase; import org.kuali.rice.krad.service.KeyValuesService; import org.kuali.rice.krad.util.GlobalVariables; import java.util.*; /** * The AwardPaymentScheduleRuleImpl */ public class AwardReportTermRecipientRuleImpl extends ResearchDocumentRuleBase implements AwardReportTermRecipientRule { private static final String AWARD_REPORT_TERM_RECIPIENT_CONTACT_ID_PROPERTY = "contactId"; private static final String AWARD_REPORT_TERM_RECIPIENT_RELODEX_ID_PROPERTY="rolodexId"; private static final String CONTACT_ERROR_PARM = "Contact Type (Contact)"; private static final String ORGANIZATION_ERROR_PARM = "Name/Organization (Organization)"; /** * * @see org.kuali.kra.award.paymentreports.paymentschedule.AwardPaymentScheduleRule#processAwardPaymentScheduleBusinessRules( * org.kuali.kra.award.paymentreports.paymentschedule.AwardPaymentScheduleRuleEvent) */ public boolean processAwardReportTermRecipientBusinessRules(AwardReportTermRecipientRuleEvent event) { return processCommonValidations(event); } /** * * This method processes new AwardPaymentSchedule rules * * @param event * @return */ public boolean processAddAwardReportTermRecipientBusinessRules(AddAwardReportTermRecipientRuleEvent event) { AwardReportTermRecipient awardReportTermRecipientItemForValidation = event.getAwardReportTermRecipientItemForValidation(); return areRequiredFieldsComplete(awardReportTermRecipientItemForValidation) && processCommonValidations(event) && validateContactAndOrganizationAreBothNotSelected(awardReportTermRecipientItemForValidation); } private boolean processCommonValidations(AwardReportTermRecipientRuleEvent event) { AwardReportTermRecipient awardReportTermRecipientItem = event.getAwardReportTermRecipientItemForValidation(); List<AwardReportTermRecipient> items = event.getParentAwardReportTerm().getAwardReportTermRecipients(); return isUnique(items, awardReportTermRecipientItem); } /** * An payment schedule item is unique if no other matching items are in the collection * To know if this is a new add or an edit of an existing equipment item, we check * the identifier for nullity. If null, this is an add; otherwise, it's an update * If an update, then we expect to find one match in the collection (itself). If an add, * we expect to find no matches in the collection * @param paymentScheduleItems * @param paymentScheduleItem * @return */ protected boolean isUnique(List<AwardReportTermRecipient> awardReportTermRecipientItems, AwardReportTermRecipient awardReportTermRecipientItem) { boolean duplicateFound = false; ArrayList<String> contactRecipients = new ArrayList<String>(); ArrayList<String> rolodexRecipients = new ArrayList<String>(); ArrayList<String> rolodexRecipientsWithNullContactId = new ArrayList<String>(); for (AwardReportTermRecipient listItem : awardReportTermRecipientItems) { if (listItem != null) { if(listItem.getContactId() == null && listItem.getRolodex() != null && !rolodexRecipientsWithNullContactId.contains(listItem.getRolodexId())){ rolodexRecipientsWithNullContactId.add(listItem.getRolodexId().toString()) ; } if (listItem.getContactId() != null) { duplicateFound = checkStringInList(listItem.getContactId().toString(), contactRecipients); } else { duplicateFound = checkStringInList(listItem.getRolodexId().toString(), rolodexRecipients); } if (duplicateFound) { break; } } } if (!duplicateFound && awardReportTermRecipientItem != null) { if (awardReportTermRecipientItem.getContactId() != null && !contactRecipients.isEmpty()) { duplicateFound = checkStringInList(awardReportTermRecipientItem.getContactId().toString(), contactRecipients); if(!duplicateFound){ Integer rolodexId = getRolodexIdFromContactId(awardReportTermRecipientItem.getContactId()); if(rolodexId != null){ duplicateFound = checkStringInList(rolodexId.toString(), rolodexRecipientsWithNullContactId); } } } else { if(awardReportTermRecipientItem.getRolodexId() != null){ duplicateFound = checkStringInList(awardReportTermRecipientItem.getRolodexId().toString(), rolodexRecipients); }else{ if(awardReportTermRecipientItem.getContactId() != null){ Integer rolodexId = getRolodexIdFromContactId(awardReportTermRecipientItem.getContactId()); if(rolodexId != null){ duplicateFound = checkStringInList(rolodexId.toString(), rolodexRecipients); } } } } } if (duplicateFound) { if (!hasDuplicateErrorBeenReported()) { reportError("AwardReportTermRecipient", KeyConstants.ERROR_AWARD_REPORT_TERM_RECIPIENT_ITEM_NOT_UNIQUE); } } return !duplicateFound; } private boolean checkStringInList(String receipient, ArrayList<String> recipientList) { boolean exists = false; if (recipientList.contains(receipient)) { exists = true; } else { recipientList.add(receipient); } return exists; } /** * Validate required fields present * @param equipmentItem * @return */ boolean areRequiredFieldsComplete(AwardReportTermRecipient awardReportTermRecipientItem) { boolean itemValid = awardReportTermRecipientItem.getContactId() != null || awardReportTermRecipientItem.getRolodexId() != null; if(!itemValid){ reportError(AWARD_REPORT_TERM_RECIPIENT_RELODEX_ID_PROPERTY, KeyConstants.ERROR_REQUIRED_ORGANIZATION_FIELD); } return itemValid; } /** * This method... * @param awardReportTermRecipientItemForValidation TODO */ boolean validateContactAndOrganizationAreBothNotSelected(AwardReportTermRecipient awardReportTermRecipientItemForValidation) { boolean itemValid = !(awardReportTermRecipientItemForValidation.getContactId() != null && awardReportTermRecipientItemForValidation.getRolodexId() != null); if(!itemValid){ reportError(AWARD_REPORT_TERM_RECIPIENT_CONTACT_ID_PROPERTY, KeyConstants.ERROR_BOTH_SPONSOR_AND_ROLODEX_ARE_SELECTED, CONTACT_ERROR_PARM, ORGANIZATION_ERROR_PARM); } return itemValid; } private boolean hasDuplicateErrorBeenReported() { return GlobalVariables.getMessageMap().containsMessageKey(KeyConstants.ERROR_AWARD_REPORT_TERM_RECIPIENT_ITEM_NOT_UNIQUE); } private Integer getRolodexIdFromContactId(Long contactId){ Collection<AwardSponsorContact> awardSponsorContacts = getSponsorContactsUsingKeyValuesService(contactId); Integer rolodexId = null; if(awardSponsorContacts.size()>1){ throw new MultipleSponsorContactsException(awardSponsorContacts.size()); } for(AwardSponsorContact awardSponsorContact: awardSponsorContacts){ rolodexId = awardSponsorContact.getRolodexId(); } return rolodexId; } @SuppressWarnings("all") private Collection<AwardSponsorContact> getSponsorContactsUsingKeyValuesService(Long contactId){ Map<String, Object> map = new HashMap<String, Object>(); map.put("awardContactId", contactId); return getKeyValuesService().findMatching(AwardSponsorContact.class, map); } /** * * This is a wrapper method for the retrieval of KeyValuesService. * * @return */ protected KeyValuesService getKeyValuesService(){ return KraServiceLocator.getService(KeyValuesService.class); } }
package by.premiya.olga.project.util; import by.premiya.olga.project.entity.Accumulator; import by.premiya.olga.project.entity.Image; import by.premiya.olga.project.entity.Wheel; import by.premiya.olga.project.service.ImageService; import by.premiya.olga.project.service.ProductService; import org.apache.commons.fileupload.FileItem; import org.apache.commons.fileupload.disk.DiskFileItemFactory; import org.apache.commons.fileupload.servlet.ServletFileUpload; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.security.authentication.encoding.PasswordEncoder; import org.springframework.stereotype.Component; import javax.imageio.ImageIO; import javax.servlet.ServletOutputStream; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.File; import java.io.IOException; import java.lang.reflect.Field; import java.util.List; /** * @author vabramov */ @Component public final class Utils { private Logger logger = LoggerFactory.getLogger(Utils.class); @Autowired private ImageService imageService; @Autowired private ProductService productService; @Autowired private PasswordEncoder passwordEncoder; private String OS = System.getProperty("os.name").toLowerCase(); private String classPath = Utils.class.getProtectionDomain().getCodeSource().getLocation().toString(); private String imagePath = classPath.substring(5, classPath.indexOf("WEB-INF")) + "assets/img/";; // { // imagePath = classPath.substring(5, classPath.indexOf("WEB-INF")) + "assets/img/"; // } private Utils() { } public boolean sendImage(HttpServletResponse res, Integer imageId) { ServletOutputStream outputStream = null; Image image = imageService.getById(imageId); if (image == null) return sendDefaultImage(res); try { res.setContentType(image.getContentType()); outputStream = res.getOutputStream(); return ImageIO.write(ImageIO.read(new File(image.getPath())), image.getType(), outputStream); } catch (IOException e) { logger.error("File '" + image.getPath() + "' not send"); return sendDefaultImage(res); } } public boolean sendGifImage(HttpServletResponse res, String from, String content) { res.setContentType("image/gif"); try { ServletOutputStream outputStream = res.getOutputStream(); return ImageIO.write(ImageIO.read(new File(imagePath + from + "/" + content + ".gif")), "GIF", outputStream); } catch (IOException e) { logger.error("File '" + content + ".gif' not send"); return sendDefaultImage(res); } } private boolean sendDefaultImage(HttpServletResponse res) { res.setContentType("image/jpeg"); try { ServletOutputStream outputStream = res.getOutputStream(); return ImageIO.write(ImageIO.read(new File(imagePath + "errors/image_not_available.jpg")), "JPEG", outputStream); } catch (IOException e1) { logger.error("Default image 'image_not_available.jpg' not send"); } return false; } public boolean uploadImage(HttpServletRequest request, String productName, String producer, String model) { boolean isMultipart; String filePath = ""; int maxFileSize = 50 * 1024; int maxMemSize = 4 * 1024; isMultipart = ServletFileUpload.isMultipartContent(request); if (!isMultipart) { return false; } DiskFileItemFactory factory = new DiskFileItemFactory(); factory.setSizeThreshold(maxMemSize); if (isWindows()) { filePath = "D:\\Files\\work-project\\" + productName +"\\"; } else if (isUnix()) { filePath = "/home/vlad/Pictures/work-project/" + productName + "/"; } factory.setRepository(new File(filePath)); ServletFileUpload upload = new ServletFileUpload(factory); upload.setSizeMax(maxFileSize); Object product = productService.getProductByModel(productName, model); Image existingImage; try { Field imageId = product.getClass().getDeclaredField("imageId"); imageId.setAccessible(true); existingImage = imageService.getById((Integer)imageId.get(product)); } catch (NoSuchFieldException | IllegalAccessException ignored) { return false; } try { List fileItems = upload.parseRequest(request); for (Object fileItem : fileItems) { FileItem fi = (FileItem) fileItem; if (!fi.isFormField()) { String fileName = encodeName(fi.getName(), product); String contentType = fi.getContentType(); File file = new File(filePath + fileName); Image image = new Image(getImageType(fileName), contentType, file.getPath()); if (existingImage != null) { File oldFile = new File(existingImage.getPath()); oldFile.delete(); existingImage.setContentType(image.getContentType()); existingImage.setPath(image.getPath()); existingImage.setType(image.getType()); image = existingImage; } imageService.save(image); productService.updateProduct(setImageId(product,image.getId())); fi.write(file); fi.delete(); return true; } } } catch (Exception ex) { System.out.println(ex); } return false; } private String encodeName(String name, Object product) { String salt = ""; if (product instanceof Wheel) { salt = String.valueOf(((Wheel) product).getId()); } String extension = name.substring(name.lastIndexOf('.')); String fileName = name.substring(0, name.lastIndexOf('.')); return passwordEncoder.encodePassword(fileName, salt) + extension; } private Object setImageId(Object product, Integer imageId) { if (product instanceof Wheel) { ((Wheel)product).setImageId(imageId); } else if (product instanceof Accumulator) { ((Accumulator)product).setImageId(imageId); } return product; } public String getImageType(String imageName) { String extension = imageName.substring(imageName.lastIndexOf('.') + 1); if ("jpg".equalsIgnoreCase(extension)) { return "JPEG"; } else if ("png".equalsIgnoreCase(extension)) { return "PNG"; } return "JPEG"; } public boolean isWindows() { return (OS.contains("win")); } public boolean isMac() { return (OS.contains("mac")); } public boolean isUnix() { return (OS.contains("nix") || OS.contains("nux") || (OS.contains("aix"))); } public boolean isSolaris() { return (OS.contains("sunos")); } public boolean hasCookie(HttpServletRequest req, String cookieName) { if (req.getCookies() != null) { for (Cookie cookie : req.getCookies()) { if (cookie.getName().equals(cookieName)) { if (logger.isDebugEnabled()) { logger.debug("In request found cookie '" + cookieName + "'"); } return true; } } } return false; } public void setLogoutCookie(HttpServletResponse response) { response.addCookie(new Cookie("loc", null)); // Cookie lic = new Cookie("lic", null); // lic.setMaxAge(0); // response.addCookie(lic); } public void removeLogoutCookie(HttpServletResponse response) { // response.addCookie(new Cookie("lic",null)); Cookie cookie = new Cookie("loc", null); cookie.setMaxAge(0); response.addCookie(cookie); } }
package com.winterwell.utils.time; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Calendar; import java.util.Date; import java.util.TimeZone; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.winterwell.utils.MathUtils; import com.winterwell.utils.StrUtils; import com.winterwell.utils.Utils; /** * String -> Time code. * * TODO refactor this, with options and stuff. Maybe even support other languages. * * @author daniel * @testedby {@link TimeParserTest} */ public class TimeParser { /** * Parse a string representing a time/date. Uses the * {@link SimpleDateFormat} format. * <p> * If the pattern does *not* contain a timezone (Z), this method will * enforce GMT. Otherwise Java can get cute and use summer time. * <p> * If calling this method a lot, you should may want to use * {@link SimpleDateFormat} directly to avoid reparsing the pattern. * * @param string * @param format * E.g. "HH:mm:ss dd/MM/yyyy" See {@link SimpleDateFormat} for * details. * <table border=0 cellspacing=3 cellpadding=0 summary= * * * "Chart shows pattern letters, date/time component, presentation, and examples." * * * > * <tr bgcolor="#ccccff"> * <th align=left>Letter <th align=left>Date or Time Component * <th align=left>Presentation <th align=left>Examples * <tr> * <td><code>G</code> <td>Era designator <td>Text <td><code>AD * </code> * <tr bgcolor="#eeeeff"> * <td><code>y</code> <td>Year <td>Year <td><code>1996</code>; * <code>96</code> * <tr> * <td><code>M</code> <td>Month in year <td>Month <td><code>July * </code>; <code>Jul</code>; <code>07</code> * <tr bgcolor="#eeeeff"> * <td><code>w</code> <td>Week in year <td>Number <td><code>27 * </code> * <tr> * <td><code>W</code> <td>Week in month <td>Number <td><code>2 * </code> * <tr bgcolor="#eeeeff"> * <td><code>D</code> <td>Day in year <td>Number <td><code>189 * </code> * <tr> * <td><code>d</code> <td>Day in month <td>Number <td><code>10 * </code> * <tr bgcolor="#eeeeff"> * <td><code>F</code> <td>Day of week in month <td>Number <td> * <code>2</code> * <tr> * <td><code>E</code> <td>Day in week <td>Text <td><code>Tuesday * </code>; <code>Tue</code> * <tr bgcolor="#eeeeff"> * <td><code>a</code> <td>Am/pm marker <td>Text <td><code>PM * </code> * <tr> * <td><code>H</code> <td>Hour in day (0-23) <td>Number <td> * <code>0</code> * <tr bgcolor="#eeeeff"> * <td><code>k</code> <td>Hour in day (1-24) <td>Number <td> * <code>24</code> * <tr> * <td><code>K</code> <td>Hour in am/pm (0-11) <td>Number <td> * <code>0</code> * <tr bgcolor="#eeeeff"> * <td><code>h</code> <td>Hour in am/pm (1-12) <td>Number <td> * <code>12</code> * <tr> * <td><code>m</code> <td>Minute in hour <td>Number <td><code>30 * </code> * <tr bgcolor="#eeeeff"> * <td><code>s</code> <td>Second in minute <td>Number <td><code> * 55</code> * <tr> * <td><code>S</code> <td>Millisecond <td>Number <td><code>978 * </code> * <tr bgcolor="#eeeeff"> * <td><code>z</code> <td>Time zone <td>General time zone <td> * <code>Pacific Standard Time</code>; <code>PST</code>; <code> * GMT-08:00</code> * <tr> * <td><code>Z</code> <td>Time zone <td>RFC 822 time zone <td> * <code>-0800</code> * </table> * @return * @tesedby {@link TimeUtilsTest} */ public Time parse(String string, String pattern) { assert !pattern.contains("h") : "h is a tricksy bastard - you probably want H in " + pattern; try { SimpleDateFormat format = new SimpleDateFormat(pattern); TimeZone zone = TimeZone.getTimeZone("GMT"); format.setTimeZone(zone); Date date = format.parse(string); return new Time(date); } catch (ParseException e) { throw new IllegalArgumentException(string + " did not match pattern " + pattern, e); } } /** * @param dt * e.g. 10 minutes. Ignores +/- indicators such as "ago" or * "hence". * @return always positive * @testedby TimeUnitTest#testParseDt()} */ public Dt parseDt(String dt) throws IllegalArgumentException { // trim and lower case dt = dt.trim().toLowerCase(); Pattern delay = Pattern .compile("(a|[\\d\\.]+)?\\s*\\b(year|month|week|day|hour|hr|minute|min|second|sec)s?\\b"); String[] bits = StrUtils.find(delay, dt); if (bits == null) throw new IllegalArgumentException("Could not parse dt-spec: " + dt); Double val = bits[1] == null || "a".equals(bits[1]) ? 1 : Double .valueOf(bits[1]); String us = bits[2]; TUnit unit = parseDt2_unit(us); return new Dt(val, unit); } private TUnit parseDt2_unit(String us) { // try the enum try { TUnit unit = TUnit.valueOf(us.toUpperCase()); assert unit != null; return unit; } catch (IllegalArgumentException e) { // ignore } if (us.equals("min")) return TUnit.MINUTE; if (us.equals("sec")) return TUnit.SECOND; throw new IllegalArgumentException(us); } /** * Experimental handling of time strings * * @param s * @return * @testedby TimeUtilsTest#testParseExperimental()} */ public Time parseExperimental(String s) throws IllegalArgumentException { return parseExperimental(s, null); } /** * * @param s * @param isRelative Can be null. If provided, will be set to true for relative times, such as "today" or "yesterday" * @return * * TODO it'd be nice to use TimeFragment here */ public Time parseExperimental(String s, AtomicBoolean isRelative) throws IllegalArgumentException { Period period = parsePeriod(s, isRelative); return period.first; } Time now = new Time(); public void setNow(Time now) { this.now = now; } String timezone; /** * yyyy-MM-dd -- be careful to sanity check values */ static final Pattern pISODATE = Pattern.compile("^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)"); static final Pattern pMONTH = Pattern.compile("jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec"); static final Pattern pDAY = Pattern.compile("sun|mon|tue|wed|thu|fri|sat"); /** * TODO break this out into a TimeParser class so we can have language & timezone support. * WARNING: will return a point time (a 0-length period) for many cases * @param s a period or a time. Anything really, but only English. * @param isRelative * @return * @throws IllegalArgumentException */ public Period parsePeriod(String s, AtomicBoolean isRelative) throws IllegalArgumentException { // split? if (s.contains(" to ")) { String[] bits = s.split(" to "); Time t0 = parseExperimental(bits[0], isRelative); Time t1 = parseExperimental(bits[1], isRelative); return new Period(t0, t1); } s = s.trim().toLowerCase(); // standard? try { Time t = new Time(s); // HACK: was it a day without a time? if (parsePeriod2_isWholeDay(s, t)) { return new Period(t, TimeUtils.getEndOfDay(t)); } return new Period(t); } catch (Exception e) { // oh well } // TODO use this more TimeFragment tf = new TimeFragment(); { // Do we have an ISO date? (but a non-ISO time) e.g. "yyyy-MM-dd HH:mm" or "yyyy-MM-dd HH:mm:ss" Matcher m = pISODATE.matcher(s); if (m.find()) { int y = Integer.valueOf(m.group(1)); int mm = Integer.valueOf(m.group(2)); int d = Integer.valueOf(m.group(3)); if (mm>0 && mm<13 && d>0 && d<32) { Time date = new Time(y,mm,d); tf.setDate(date); } } } // Use regexs to pick out markers for day, month, hour, dt // - build a time object based on what we find int year = -1; { // year Matcher m = TimeUtils.YEAR.matcher(s); if (m.find()) { year = Integer.valueOf(m.group(1)); // BC? if (m.group().contains("b")) { year = -year; } tf.put(Calendar.YEAR, year); } else { // maybe a short year? Matcher sm = Pattern.compile("\\d\\d$").matcher(s); if (sm.find()) { year = 2000 + Integer.valueOf(sm.group()); if (year > new Time().getYear() + 15) { year -= 1900; // 99 = 1999 } tf.put(Calendar.YEAR, year); } } } String month = null; { // month markers -- 3 letters is enough to id a month Matcher m = pMONTH.matcher(s); if (m.find()) { month = m.group(); int mi = pMONTH.pattern().indexOf(month); int mii = mi / 4; tf.setMonth(mii+1); // 1=jan } } String day = null; { // day of week Matcher m = pDAY.matcher(s); if (m.find()) { day = m.group(); // guard against mon = month false match if ("mon".equals(day) && s.contains("month")) { day = null; if (m.find()) { day = m.group(); } } // to index if (day!=null) { int idow = pDAY.pattern().indexOf(day); int dow = idow / 4; // dow: sun = 0 // cal: sunday = 1, saturday=7 tf.put(Calendar.DAY_OF_WEEK, dow+1); } } } Integer hour = null; { // TODO hour:minute Pattern HOUR = Pattern.compile("(\\d\\d):(\\d\\d)|(\\d\\d?)am|(\\d\\d?)pm"); Matcher m = HOUR.matcher(s); if (m.find()) { String hourMin = m.group(); String g1 = m.group(1); String g2 = m.group(2); String g3 = m.group(3); String g4 = m.group(4); hour = Integer.valueOf(Utils.or(g1,g3,g4)); if (g4!=null) { // pm? hour += 12; } tf.put(Calendar.HOUR_OF_DAY, hour); } } // put together a date if (month != null) { if (year==-1) { year = now.getYear(); tf.setYear(year); } // look for a day of month Matcher m = Pattern.compile("\\d+").matcher(s); while (m.find()) { int dayMonth = Integer.parseInt(m.group()); if (dayMonth == 0 || dayMonth > 31) { continue; } tf.put(Calendar.DAY_OF_MONTH, dayMonth); Time hm = tf.getTime(); if (hm!=null) { if (tf.hasTime()) { return new Period(hm); } Time eod = TimeUtils.getEndOfDay(hm); return new Period(hm, eod); } } if (day==null) { tf.put(Calendar.DAY_OF_MONTH, 1); Time t = tf.getTime(); if (t != null) { Time eom = TimeUtils.getEndOfMonth(t.plus(TUnit.DAY)); return new Period(t, eom); } } } // special strings if (s.equals("now")) { if (isRelative!=null) isRelative.set(true); return new Period(now); } if (s.equals("today")) { if (isRelative!=null) isRelative.set(true); return new Period(TimeUtils.getStartOfDay(now), TimeUtils.getEndOfDay(now)); } if (s.equals("yesterday")) { if (isRelative!=null) isRelative.set(true); s = "1 day ago"; } if (s.equals("tomorrow")) { if (isRelative!=null) isRelative.set(true); s = "1 day from now"; } // HACK "start/end" Pattern p = Pattern.compile("^(start|end)?([\\- ]of[\\- ])?"); Matcher m = p.matcher(s); String startEnd = null; if (m.find()) { startEnd = m.group(1); s = s.substring(m.end()); } s = s.trim(); //paranoia // HACK last next if (s.startsWith("last")) { if (isRelative!=null) isRelative.set(true); if (day!=null) { Time lastDay = now; for(int i=0; i<7; i++) { lastDay = lastDay.minus(TUnit.DAY); String lday = lastDay.format("EEE"); if (lday.toLowerCase().startsWith(day)) { return new Period(TimeUtils.getStartOfDay(lastDay), TimeUtils.getEndOfDay(lastDay)); } } return new Period(TimeUtils.getStartOfDay(lastDay), TimeUtils.getEndOfDay(lastDay)); } // NB this handles "last week" and "last-week" s = "1 "+s.substring(5)+" ago"; } if (s.startsWith("next")) { s = "1 "+s.substring(5)+" from now"; } // a step spec, e.g. 1 week ago? try { Dt dt = parseDt(s); if (isRelative!=null) isRelative.set(true); Time t; if (s.contains("ago")) { t = now.minus(dt); } else if (s.contains("this") || s.equals("month")) { // HACK test for "this month" or "end-of-week" // no-op t = now; } else if (s.equals("week")) { // HACK t = now; KDay dow = TimeUtils.getDayOfWeek(t); int dn = dow.ordinal(); Time t2; if ("start".equals(startEnd)) { t2 = TimeUtils.getStartOfDay(t.minus(dn, TUnit.DAY)); } else { t2 = TimeUtils.getEndOfDay(t.plus(7 - dn, TUnit.DAY)); } return new Period(t2); } else { t = now.plus(dt); } if (startEnd==null) return new Period(t); // TODO don't assume month -- also handle "start of last week" Time t2 = TimeUtils.getStartOfMonth(t); if ("start".equals(startEnd)) { return new Period(t2); } else { Time t3 = t2.plus(TUnit.MONTH).minus(TUnit.MILLISECOND); return new Period(t3); } } catch (Exception e) { // oh well } // a time? e.g. "7pm", "7pm today"?? // TODO an actual time description, like "Monday 1st, December 1968" // what do we have? if (tf.numset() > 1) { Time tft = tf.getTime(); if (tft!=null) return new Period(tft); } // parse failed throw new IllegalArgumentException(s); } private boolean parsePeriod2_isWholeDay(String s, Time t) { if ( ! t.equals(TimeUtils.getStartOfDay(t))) { return false; } if (MathUtils.isNumber(s)) return false; // HACK: do we have an hour:minute part or other time marker? if (s.contains(":") || s.contains("am") || s.contains("pm")) { return false; } // HACK: ad hoc markers if (s.contains("start")) return false; // no time - so treat as whole day return true; } }
/********************************************************************************** * $URL$ * $Id$ *********************************************************************************** * * Copyright (c) 2003, 2004, 2005, 2006, 2008 The Sakai Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ECL-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * **********************************************************************************/ package org.sakaiproject.cheftool.menu; import java.util.ArrayList; import java.util.List; import org.sakaiproject.cheftool.api.MenuItem; /** * <p> * MenuEntry is a clickable entry in a menu. * </p> */ public class MenuEntry implements MenuItem { /** The display title for the entry. */ protected String m_title = null; /** The icon name for the entry. */ protected String m_icon = null; /** The enabled flag for the entry. */ protected boolean m_enabled = true; /** The current flag for the entry. */ protected boolean m_current = false; /** The action string for the entry. */ protected String m_action = null; /** The full URL string for the entry. */ protected String m_url = null; /** The form name string for the entry. */ protected String m_form = null; /** The checked status (@see MenuItem for values). */ protected int m_checked = CHECKED_NA; /** The acessibility label for the entry. */ protected String m_accessibilitylabel = null; /** * Construct a menu. */ public MenuEntry(String title, String accessibilityLabel, String icon, boolean enabled, int checked, String action, String form) { m_title = title; m_accessibilitylabel = accessibilityLabel; m_icon = icon; m_enabled = enabled; m_checked = checked; m_action = action; m_form = form; } // MenuEntry /** * Construct a menu. */ public MenuEntry(String title, String icon, boolean enabled, int checked, String action) { this(title, null, icon, enabled, checked, action, null); } // MenuEntry /** * Construct a menu. */ public MenuEntry(String title, boolean enabled, String action) { this(title, null, null, enabled, CHECKED_NA, action, null); } // MenuEntry /** * Construct a menu. */ public MenuEntry(String title, String action) { this(title, null, null, true, CHECKED_NA, action, null); } // MenuEntry /** * Construct a menu. */ public MenuEntry(String title, String accessibilityLabel, String icon, boolean enabled, int checked, String action) { this(title, accessibilityLabel, icon, enabled, checked, action, null); } // MenuEntry /** * Construct a menu. */ public MenuEntry(String title, String accessibilityLabel, String action) { this(title, accessibilityLabel, null, true, CHECKED_NA, action, null); } // MenuEntry public MenuEntry(String title, String accessibilityLabel, boolean enabled, String action) { this(title, accessibilityLabel, null, enabled, CHECKED_NA, action, null); } // MenuEntry /** * Construct a menu. */ public MenuEntry(String title, String icon, boolean enabled, int checked, String action, String form) { this(title, null, icon, enabled, checked, action, form); } // MenuEntry /** * Set the full URL of the entry. To create an entry with a URL, create one first with a "" action, then call this. * * @param url * The full URL for the entry. * @return This, for convenience. */ public MenuEntry setUrl(String url) { m_url = url; return this; } // setUrl /** * Does this item act as a container for other items? * * @return true if this MenuItem is a container for other items, false if not. */ public boolean getIsContainer() { return false; } // getIsContainer /** * Is this item a divider ? * * @return true if this MenuItem is a divider, false if not. */ public boolean getIsDivider() { return false; } // getIsDivider /** * Access the display title for the item. * * @return The display title for the item. */ public String getTitle() { return ((m_title == null) ? "" : m_title); } // getTitle /** * Access the icon name for the item (or null if no icon). * * @return The icon name for the item (or null if no icon). */ public String getIcon() { return m_icon; } // getIcon /** * Access the enabled flag for the item. * * @return True if the item is enabled, false if not. */ public boolean getIsEnabled() { return m_enabled; } // getIsEnabled /** * Access the action string for this item; what to do when the user clicks. Note: if getIsMenu(), there will not be an action string (will return ""). Note: if the entry is not enabled, this will return "". * * @return The action string for this item. */ public String getAction() { return (((m_action == null) || (!m_enabled)) ? "" : m_action); } // getAction /** * Access the full URL string for this item; what to do when the user clicks. Note: this if defined overrides getAction() which should be "". Note: if getIsMenu(), there will not be a URL string (will return ""). * * @return The full URL string for this item. */ public String getUrl() { return (((m_url == null) || (!m_enabled)) ? "" : m_url); } // getUrl /** * Access the form name whose values will be used when this item is selected. * * @return The form name whose values will be used when this item is selected. */ public String getForm() { return m_form; } // getForm /** * Access the sub-items of the item. Note: if !isContainer(), there will be no sub-items (will return EmptyIterator). * * @return The sub-items of the item. */ public List<MenuItem> getItems() { return new ArrayList<>(); } // getItems /** * Access one sub-items of the item. Note: if !isContainer(), there will be no sub-items (will return null). * * @param index * The index position (0 based) for the sub-item to get. * @return The sub-item of the item. */ public MenuItem getItem(int index) { return null; } // getItem /** * Access the checked status of this item. Possible values: * * @see MenuItem * @return The checked status of this item. */ public int getChecked() { return m_checked; } // getChecked /** * Access the checked status of this item. * * @return True if item is checked, false otherwise. */ public boolean getIschecked() { return m_checked == CHECKED_TRUE; } // getIsChecked /** * Count the sub-items of the item. Note: if !isContainer(), the count is 0. * * @return The count of sub-items of the item. */ public int size() { return 0; } // size /** * Check if there are any sub-items. Note: if !isContainer(), this is empty. * * @return true of there are no sub-items, false if there are. */ public boolean isEmpty() { return true; } // isEmpty /** * Access the is-field (not a button) flag. * * @return True if the item is a field, false if not. */ public boolean getIsField() { return false; } // getIsField /** * {@inheritDoc} */ public boolean getIsCurrent() { return m_current; } /** * If you set this to true, this menu item will be displayed as the current selected item. */ public void setIsCurrent(boolean current) { m_current = current; } @Override public String getAccessibilityLabel() { return ((m_accessibilitylabel == null) ? m_title : m_accessibilitylabel); } } // MenuEntry
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sysml.runtime.matrix; import java.io.PrintWriter; import java.util.HashSet; import java.util.PrimitiveIterator; import java.util.stream.LongStream; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.math3.random.Well1024a; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.Counters.Group; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RunningJob; import org.apache.sysml.conf.ConfigurationManager; import org.apache.sysml.conf.DMLConfig; import org.apache.sysml.lops.Lop; import org.apache.sysml.runtime.DMLRuntimeException; import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer; import org.apache.sysml.runtime.instructions.MRInstructionParser; import org.apache.sysml.runtime.instructions.MRJobInstruction; import org.apache.sysml.runtime.instructions.mr.DataGenMRInstruction; import org.apache.sysml.runtime.instructions.mr.MRInstruction; import org.apache.sysml.runtime.instructions.mr.MRInstruction.MRINSTRUCTION_TYPE; import org.apache.sysml.runtime.instructions.mr.RandInstruction; import org.apache.sysml.runtime.instructions.mr.SeqInstruction; import org.apache.sysml.runtime.io.IOUtilFunctions; import org.apache.sysml.runtime.matrix.data.InputInfo; import org.apache.sysml.runtime.matrix.data.LibMatrixDatagen; import org.apache.sysml.runtime.matrix.data.MatrixIndexes; import org.apache.sysml.runtime.matrix.data.OutputInfo; import org.apache.sysml.runtime.matrix.data.TaggedMatrixBlock; import org.apache.sysml.runtime.matrix.mapred.DataGenMapper; import org.apache.sysml.runtime.matrix.mapred.GMRCombiner; import org.apache.sysml.runtime.matrix.mapred.GMRReducer; import org.apache.sysml.runtime.matrix.mapred.MRConfigurationNames; import org.apache.sysml.runtime.matrix.mapred.MRJobConfiguration; import org.apache.sysml.runtime.matrix.mapred.MRJobConfiguration.ConvertTarget; import org.apache.sysml.runtime.matrix.mapred.MRJobConfiguration.MatrixChar_N_ReducerGroups; import org.apache.sysml.runtime.util.MapReduceTool; import org.apache.sysml.runtime.util.UtilFunctions; import org.apache.sysml.yarn.DMLAppMasterUtils; import org.apache.sysml.yarn.ropt.YarnClusterAnalyzer; /** * <p>Rand MapReduce job which creates random objects.</p> * */ public class DataGenMR { private static final Log LOG = LogFactory.getLog(DataGenMR.class.getName()); private DataGenMR() { //prevent instantiation via private constructor } /** * <p>Starts a Rand MapReduce job which will produce one or more random objects.</p> * * @param inst MR job instruction * @param dataGenInstructions array of data gen instructions * @param instructionsInMapper instructions in mapper * @param aggInstructionsInReducer aggregate instructions in reducer * @param otherInstructionsInReducer other instructions in reducer * @param numReducers number of reducers * @param replication file replication * @param resultIndexes result indexes for each random object * @param dimsUnknownFilePrefix file path prefix when dimensions unknown * @param outputs output file for each random object * @param outputInfos output information for each random object * @return matrix characteristics for each random object * @throws Exception if Exception occurs */ public static JobReturn runJob(MRJobInstruction inst, String[] dataGenInstructions, String instructionsInMapper, String aggInstructionsInReducer, String otherInstructionsInReducer, int numReducers, int replication, byte[] resultIndexes, String dimsUnknownFilePrefix, String[] outputs, OutputInfo[] outputInfos) throws Exception { JobConf job = new JobConf(DataGenMR.class); job.setJobName("DataGen-MR"); //whether use block representation or cell representation MRJobConfiguration.setMatrixValueClass(job, true); byte[] realIndexes=new byte[dataGenInstructions.length]; for(byte b=0; b<realIndexes.length; b++) realIndexes[b]=b; String[] inputs=new String[dataGenInstructions.length]; InputInfo[] inputInfos = new InputInfo[dataGenInstructions.length]; long[] rlens=new long[dataGenInstructions.length]; long[] clens=new long[dataGenInstructions.length]; int[] brlens=new int[dataGenInstructions.length]; int[] bclens=new int[dataGenInstructions.length]; FileSystem fs = FileSystem.get(job); String dataGenInsStr=""; int numblocks=0; int maxbrlen=-1, maxbclen=-1; double maxsparsity = -1; for(int i = 0; i < dataGenInstructions.length; i++) { dataGenInsStr=dataGenInsStr+Lop.INSTRUCTION_DELIMITOR+dataGenInstructions[i]; MRInstruction mrins = MRInstructionParser.parseSingleInstruction(dataGenInstructions[i]); MRINSTRUCTION_TYPE mrtype = mrins.getMRInstructionType(); DataGenMRInstruction genInst = (DataGenMRInstruction) mrins; rlens[i] = genInst.getRows(); clens[i] = genInst.getCols(); brlens[i] = genInst.getRowsInBlock(); bclens[i] = genInst.getColsInBlock(); maxbrlen = Math.max(maxbrlen, brlens[i]); maxbclen = Math.max(maxbclen, bclens[i]); if ( mrtype == MRINSTRUCTION_TYPE.Rand ) { RandInstruction randInst = (RandInstruction) mrins; inputs[i]=LibMatrixDatagen.generateUniqueSeedPath(genInst.getBaseDir()); maxsparsity = Math.max(maxsparsity, randInst.getSparsity()); PrintWriter pw = null; try { pw = new PrintWriter(fs.create(new Path(inputs[i]))); //for obj reuse and preventing repeated buffer re-allocations StringBuilder sb = new StringBuilder(); //seed generation Well1024a bigrand = LibMatrixDatagen.setupSeedsForRand(randInst.getSeed()); LongStream nnz = LibMatrixDatagen.computeNNZperBlock(rlens[i], clens[i], brlens[i], bclens[i], randInst.getSparsity()); PrimitiveIterator.OfLong nnzIter = nnz.iterator(); for(long r = 0; r < rlens[i]; r += brlens[i]) { long curBlockRowSize = Math.min(brlens[i], (rlens[i] - r)); for(long c = 0; c < clens[i]; c += bclens[i]) { long curBlockColSize = Math.min(bclens[i], (clens[i] - c)); sb.append((r / brlens[i]) + 1); sb.append(','); sb.append((c / bclens[i]) + 1); sb.append(','); sb.append(curBlockRowSize); sb.append(','); sb.append(curBlockColSize); sb.append(','); sb.append(nnzIter.nextLong()); sb.append(','); sb.append(bigrand.nextLong()); pw.println(sb.toString()); sb.setLength(0); numblocks++; } } } finally { IOUtilFunctions.closeSilently(pw); } inputInfos[i] = InputInfo.TextCellInputInfo; } else if ( mrtype == MRINSTRUCTION_TYPE.Seq ) { SeqInstruction seqInst = (SeqInstruction) mrins; inputs[i]=genInst.getBaseDir() + System.currentTimeMillis()+".seqinput"; maxsparsity = 1.0; //always dense double from = seqInst.fromValue; double to = seqInst.toValue; double incr = seqInst.incrValue; //handle default 1 to -1 for special case of from>to incr = LibMatrixDatagen.updateSeqIncr(from, to, incr); // Correctness checks on (from, to, incr) boolean neg = (from > to); if ( incr == 0 ) throw new DMLRuntimeException("Invalid value for \"increment\" in seq()."); if (neg != (incr < 0) ) throw new DMLRuntimeException("Wrong sign for the increment in a call to seq()"); // Compute the number of rows in the sequence long numrows = UtilFunctions.getSeqLength(from, to, incr); if ( rlens[i] > 0 ) { if ( numrows != rlens[i] ) throw new DMLRuntimeException("Unexpected error while processing sequence instruction. Expected number of rows does not match given number: " + rlens[i] + " != " + numrows); } else { rlens[i] = numrows; } if ( clens[i] >0 && clens[i] != 1) throw new DMLRuntimeException("Unexpected error while processing sequence instruction. Number of columns (" + clens[i] + ") must be equal to 1."); else clens[i] = 1; PrintWriter pw = null; try { pw = new PrintWriter(fs.create(new Path(inputs[i]))); StringBuilder sb = new StringBuilder(); double temp = from; double block_from, block_to; for(long r = 0; r < rlens[i]; r += brlens[i]) { long curBlockRowSize = Math.min(brlens[i], (rlens[i] - r)); // block (bid_i,bid_j) generates a sequence from the interval [block_from, block_to] (inclusive of both end points of the interval) long bid_i = ((r / brlens[i]) + 1); long bid_j = 1; block_from = temp; block_to = temp+(curBlockRowSize-1)*incr; temp = block_to + incr; // next block starts from here sb.append(bid_i); sb.append(','); sb.append(bid_j); sb.append(','); sb.append(block_from); sb.append(','); sb.append(block_to); sb.append(','); sb.append(incr); pw.println(sb.toString()); sb.setLength(0); numblocks++; } } finally { IOUtilFunctions.closeSilently(pw); } inputInfos[i] = InputInfo.TextCellInputInfo; } else { throw new DMLRuntimeException("Unexpected Data Generation Instruction Type: " + mrtype ); } } dataGenInsStr=dataGenInsStr.substring(1);//remove the first "," RunningJob runjob; MatrixCharacteristics[] stats; try{ //set up the block size MRJobConfiguration.setBlocksSizes(job, realIndexes, brlens, bclens); //set up the input files and their format information MRJobConfiguration.setUpMultipleInputs(job, realIndexes, inputs, inputInfos, brlens, bclens, false, ConvertTarget.BLOCK); //set up the dimensions of input matrices MRJobConfiguration.setMatricesDimensions(job, realIndexes, rlens, clens); MRJobConfiguration.setDimsUnknownFilePrefix(job, dimsUnknownFilePrefix); //set up the block size MRJobConfiguration.setBlocksSizes(job, realIndexes, brlens, bclens); //set up the rand Instructions MRJobConfiguration.setRandInstructions(job, dataGenInsStr); //set up unary instructions that will perform in the mapper MRJobConfiguration.setInstructionsInMapper(job, instructionsInMapper); //set up the aggregate instructions that will happen in the combiner and reducer MRJobConfiguration.setAggregateInstructions(job, aggInstructionsInReducer); //set up the instructions that will happen in the reducer, after the aggregation instrucions MRJobConfiguration.setInstructionsInReducer(job, otherInstructionsInReducer); //set up the replication factor for the results job.setInt(MRConfigurationNames.DFS_REPLICATION, replication); //set up map/reduce memory configurations (if in AM context) DMLConfig config = ConfigurationManager.getDMLConfig(); DMLAppMasterUtils.setupMRJobRemoteMaxMemory(job, config); //set up custom map/reduce configurations MRJobConfiguration.setupCustomMRConfigurations(job, config); //determine degree of parallelism (nmappers: 1<=n<=capacity) //TODO use maxsparsity whenever we have a way of generating sparse rand data int capacity = InfrastructureAnalyzer.getRemoteParallelMapTasks(); long dfsblocksize = InfrastructureAnalyzer.getHDFSBlockSize(); //correction max number of mappers on yarn clusters if( InfrastructureAnalyzer.isYarnEnabled() ) capacity = (int)Math.max( capacity, YarnClusterAnalyzer.getNumCores() ); int nmapers = Math.max(Math.min((int)(8*maxbrlen*maxbclen*(long)numblocks/dfsblocksize), capacity),1); job.setNumMapTasks(nmapers); //set up what matrices are needed to pass from the mapper to reducer HashSet<Byte> mapoutputIndexes=MRJobConfiguration.setUpOutputIndexesForMapper(job, realIndexes, dataGenInsStr, instructionsInMapper, null, aggInstructionsInReducer, otherInstructionsInReducer, resultIndexes); MatrixChar_N_ReducerGroups ret=MRJobConfiguration.computeMatrixCharacteristics(job, realIndexes, dataGenInsStr, instructionsInMapper, null, aggInstructionsInReducer, null, otherInstructionsInReducer, resultIndexes, mapoutputIndexes, false); stats=ret.stats; //set up the number of reducers MRJobConfiguration.setNumReducers(job, ret.numReducerGroups, numReducers); // print the complete MRJob instruction if (LOG.isTraceEnabled()) inst.printCompleteMRJobInstruction(stats); // Update resultDimsUnknown based on computed "stats" byte[] resultDimsUnknown = new byte[resultIndexes.length]; for ( int i=0; i < resultIndexes.length; i++ ) { if ( stats[i].getRows() == -1 || stats[i].getCols() == -1 ) { resultDimsUnknown[i] = (byte) 1; } else { resultDimsUnknown[i] = (byte) 0; } } boolean mayContainCtable = instructionsInMapper.contains("ctabletransform") ||instructionsInMapper.contains("groupedagg") ; //set up the multiple output files, and their format information MRJobConfiguration.setUpMultipleOutputs(job, resultIndexes, resultDimsUnknown, outputs, outputInfos, true, mayContainCtable); // configure mapper and the mapper output key value pairs job.setMapperClass(DataGenMapper.class); if(numReducers==0) { job.setMapOutputKeyClass(Writable.class); job.setMapOutputValueClass(Writable.class); }else { job.setMapOutputKeyClass(MatrixIndexes.class); job.setMapOutputValueClass(TaggedMatrixBlock.class); } //set up combiner if(numReducers!=0 && aggInstructionsInReducer!=null && !aggInstructionsInReducer.isEmpty()) job.setCombinerClass(GMRCombiner.class); //configure reducer job.setReducerClass(GMRReducer.class); //job.setReducerClass(PassThroughReducer.class); // By default, the job executes in "cluster" mode. // Determine if we can optimize and run it in "local" mode. MatrixCharacteristics[] inputStats = new MatrixCharacteristics[inputs.length]; for ( int i=0; i < inputs.length; i++ ) { inputStats[i] = new MatrixCharacteristics(rlens[i], clens[i], brlens[i], bclens[i]); } //set unique working dir MRJobConfiguration.setUniqueWorkingDir(job); runjob=JobClient.runJob(job); /* Process different counters */ Group group=runjob.getCounters().getGroup(MRJobConfiguration.NUM_NONZERO_CELLS); for(int i=0; i<resultIndexes.length; i++) { // number of non-zeros stats[i].setNonZeros(group.getCounter(Integer.toString(i))); } String dir = dimsUnknownFilePrefix + "/" + runjob.getID().toString() + "_dimsFile"; stats = MapReduceTool.processDimsFiles(dir, stats); MapReduceTool.deleteFileIfExistOnHDFS(dir); } finally { for(String input: inputs) MapReduceTool.deleteFileIfExistOnHDFS(new Path(input), job); } return new JobReturn(stats, outputInfos, runjob.isSuccessful()); } }
/* * Copyright 2014 Lynden, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.lynden.gmapsfx; import com.lynden.gmapsfx.javascript.JavaFxWebEngine; import com.lynden.gmapsfx.javascript.JavascriptRuntime; import com.lynden.gmapsfx.javascript.event.MapStateEventType; import com.lynden.gmapsfx.javascript.object.GoogleMap; import com.lynden.gmapsfx.javascript.object.LatLong; import com.lynden.gmapsfx.javascript.object.MapOptions; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CyclicBarrier; import javafx.beans.value.ChangeListener; import javafx.beans.value.ObservableValue; import javafx.concurrent.Worker; import javafx.geometry.Point2D; import javafx.scene.layout.AnchorPane; import javafx.scene.web.WebView; import netscape.javascript.JSObject; /** * * @author Rob Terpilowski */ public class GoogleMapView extends AnchorPane { protected WebView webview; protected JavaFxWebEngine webengine; protected boolean initialized = false; protected final CyclicBarrier barrier = new CyclicBarrier(2); protected final List<MapComponentInitializedListener> mapInitializedListeners = new ArrayList<>(); protected final List<MapReadyListener> mapReadyListeners = new ArrayList<>(); protected GoogleMap map; public GoogleMapView() { this(false); } /** * Creates a new map view and specifies if the FireBug pane should be displayed in the WebView * @param debug true if the FireBug pane should be displayed in the WebView. */ public GoogleMapView( boolean debug ) { String htmlFile; if( debug ) { htmlFile = "/html/maps-debug.html"; } else { htmlFile = "/html/maps.html"; } webview = new WebView(); webengine = new JavaFxWebEngine(webview.getEngine()); JavascriptRuntime.setDefaultWebEngine( webengine ); setTopAnchor(webview,0.0); setLeftAnchor(webview,0.0); setBottomAnchor(webview, 0.0); setRightAnchor(webview, 0.0); getChildren().add(webview); webview.widthProperty().addListener(e -> mapResized()); webview.heightProperty().addListener(e -> mapResized()); webview.widthProperty().addListener(e -> mapResized()); webview.heightProperty().addListener(e -> mapResized()); webengine.getLoadWorker().stateProperty().addListener( new ChangeListener<Worker.State>() { public void changed(ObservableValue ov, Worker.State oldState, Worker.State newState) { if (newState == Worker.State.SUCCEEDED) { setInitialized(true); fireMapInitializedListeners(); } } }); webengine.load(getClass().getResource(htmlFile).toExternalForm()); } private void mapResized() { if (initialized) { //map.triggerResized(); System.out.println("GoogleMapView.mapResized: triggering resize event"); webengine.executeScript("google.maps.event.trigger("+map.getVariableName()+", 'resize')"); System.out.println("GoogleMapView.mapResized: triggering resize event done"); } } public void setZoom(int zoom) { checkInitialized(); map.setZoom(zoom); } public void setCenter(double latitude, double longitude) { checkInitialized(); LatLong latLong = new LatLong(latitude, longitude); map.setCenter(latLong); } public GoogleMap getMap() { checkInitialized(); return map; } public GoogleMap createMap( MapOptions mapOptions ) { checkInitialized(); map = new GoogleMap(mapOptions); map.addStateEventHandler(MapStateEventType.projection_changed, () -> { if (map.getProjection() != null) { mapResized(); fireMapReadyListeners(); } }); return map; } public GoogleMap createMap() { map = new GoogleMap(); return map; } public void addMapInializedListener(MapComponentInitializedListener listener) { synchronized (mapInitializedListeners) { mapInitializedListeners.add(listener); } } public void removeMapInitializedListener(MapComponentInitializedListener listener) { synchronized (mapInitializedListeners) { mapInitializedListeners.remove(listener); } } public void addMapReadyListener(MapReadyListener listener) { synchronized (mapReadyListeners) { mapReadyListeners.add(listener); } } public void removeReadyListener(MapReadyListener listener) { synchronized (mapReadyListeners) { mapReadyListeners.remove(listener); } } public Point2D fromLatLngToPoint(LatLong loc) { checkInitialized(); return map.fromLatLngToPoint(loc); } public void panBy(double x, double y) { checkInitialized(); map.panBy(x, y); } protected void init() { } protected void setInitialized(boolean initialized) { this.initialized = initialized; } protected void fireMapInitializedListeners() { synchronized (mapInitializedListeners) { for (MapComponentInitializedListener listener : mapInitializedListeners) { listener.mapInitialized(); } } } protected void fireMapReadyListeners() { synchronized (mapReadyListeners) { for (MapReadyListener listener : mapReadyListeners) { listener.mapReady(); } } } protected JSObject executeJavascript(String function) { Object returnObject = webengine.executeScript(function); return (JSObject) returnObject; } protected String getJavascriptMethod(String methodName, Object... args) { StringBuilder sb = new StringBuilder(); sb.append(methodName).append("("); for (Object arg : args) { sb.append(arg).append(","); } sb.replace(sb.length() - 1, sb.length(), ")"); return sb.toString(); } protected void checkInitialized() { if (!initialized) { throw new MapNotInitializedException(); } } public class JSListener { public void log(String text){ System.out.println(text); } } }
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.apache.lucene.geo.GeoEncodingUtils; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.ESSloppyMath; import org.elasticsearch.geometry.Rectangle; import java.io.IOException; import java.util.Locale; import static org.elasticsearch.common.geo.GeoUtils.normalizeLat; import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; /** * Implements geotile key hashing, same as used by many map tile implementations. * The string key is formatted as "zoom/x/y" * The hash value (long) contains all three of those values compacted into a single 64bit value: * bits 58..63 -- zoom (0..29) * bits 29..57 -- X tile index (0..2^zoom) * bits 0..28 -- Y tile index (0..2^zoom) */ public final class GeoTileUtils { private GeoTileUtils() {} private static final double PI_DIV_2 = Math.PI / 2; /** * Largest number of tiles (precision) to use. * This value cannot be more than (64-5)/2 = 29, because 5 bits are used for zoom level itself (0-31) * If zoom is not stored inside hash, it would be possible to use up to 32. * Note that changing this value will make serialization binary-incompatible between versions. * Another consideration is that index optimizes lat/lng storage, loosing some precision. * E.g. hash lng=140.74779717298918D lat=45.61884022447444D == "18/233561/93659", but shown as "18/233561/93658" */ public static final int MAX_ZOOM = 29; /** * The geo-tile map is clipped at 85.05112878 to 90 and -85.05112878 to -90 */ public static final double LATITUDE_MASK = 85.0511287798066; /** * Since shapes are encoded, their boundaries are to be compared to against the encoded/decoded values of <code>LATITUDE_MASK</code> */ public static final double NORMALIZED_LATITUDE_MASK = GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(LATITUDE_MASK)); public static final double NORMALIZED_NEGATIVE_LATITUDE_MASK = GeoEncodingUtils.decodeLatitude( GeoEncodingUtils.encodeLatitude(-LATITUDE_MASK) ); /** * Bit position of the zoom value within hash - zoom is stored in the most significant 6 bits of a long number. */ private static final int ZOOM_SHIFT = MAX_ZOOM * 2; /** * Bit mask to extract just the lowest 29 bits of a long */ private static final long X_Y_VALUE_MASK = (1L << MAX_ZOOM) - 1; /** * Parse an integer precision (zoom level). The {@link ValueType#INT} allows it to be a number or a string. * * The precision is expressed as a zoom level between 0 and {@link #MAX_ZOOM} (inclusive). * * @param parser {@link XContentParser} to parse the value from * @return int representing precision */ static int parsePrecision(XContentParser parser) throws IOException, ElasticsearchParseException { final Object node = parser.currentToken().equals(XContentParser.Token.VALUE_NUMBER) ? Integer.valueOf(parser.intValue()) : parser.text(); return XContentMapValues.nodeIntegerValue(node); } /** * Assert the precision value is within the allowed range, and return it if ok, or throw. */ public static int checkPrecisionRange(int precision) { if (precision < 0 || precision > MAX_ZOOM) { throw new IllegalArgumentException( "Invalid geotile_grid precision of " + precision + ". Must be between 0 and " + MAX_ZOOM + "." ); } return precision; } /** * Calculates the x-coordinate in the tile grid for the specified longitude given * the number of tile columns for a pre-determined zoom-level. * * @param longitude the longitude to use when determining the tile x-coordinate * @param tiles the number of tiles per row for a pre-determined zoom-level */ public static int getXTile(double longitude, long tiles) { // normalizeLon treats this as 180, which is not friendly for tile mapping if (longitude == -180) { return 0; } int xTile = (int) Math.floor((normalizeLon(longitude) + 180) / 360 * tiles); // Edge values may generate invalid values, and need to be clipped. // For example, polar regions (above/below lat 85.05112878) get normalized. if (xTile < 0) { return 0; } if (xTile >= tiles) { return (int) tiles - 1; } return xTile; } /** * Calculates the y-coordinate in the tile grid for the specified longitude given * the number of tile rows for pre-determined zoom-level. * * @param latitude the latitude to use when determining the tile y-coordinate * @param tiles the number of tiles per column for a pre-determined zoom-level */ public static int getYTile(double latitude, long tiles) { double latSin = SloppyMath.cos(PI_DIV_2 - Math.toRadians(normalizeLat(latitude))); int yTile = (int) Math.floor((0.5 - (Math.log((1 + latSin) / (1 - latSin)) / (4 * Math.PI))) * tiles); if (yTile < 0) { yTile = 0; } if (yTile >= tiles) { return (int) tiles - 1; } return yTile; } /** * Encode lon/lat to the geotile based long format. * The resulting hash contains interleaved tile X and Y coordinates. * The precision itself is also encoded as a few high bits. */ public static long longEncode(double longitude, double latitude, int precision) { // Mathematics for this code was adapted from https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Java // Number of tiles for the current zoom level along the X and Y axis final long tiles = 1 << checkPrecisionRange(precision); long xTile = getXTile(longitude, tiles); long yTile = getYTile(latitude, tiles); return longEncodeTiles(precision, xTile, yTile); } /** * Encode a geotile hash style string to a long. * * @param hashAsString String in format "zoom/x/y" * @return long encoded value of the given string hash */ public static long longEncode(String hashAsString) { int[] parsed = parseHash(hashAsString); return longEncode((long) parsed[0], (long) parsed[1], (long) parsed[2]); } public static long longEncodeTiles(int precision, long xTile, long yTile) { // Zoom value is placed in front of all the bits used for the geotile // e.g. when max zoom is 29, the largest index would use 58 bits (57th..0th), // leaving 5 bits unused for zoom. See MAX_ZOOM comment above. return ((long) precision << ZOOM_SHIFT) | (xTile << MAX_ZOOM) | yTile; } /** * Parse geotile hash as zoom, x, y integers. */ private static int[] parseHash(long hash) { final int zoom = (int) (hash >>> ZOOM_SHIFT); final int xTile = (int) ((hash >>> MAX_ZOOM) & X_Y_VALUE_MASK); final int yTile = (int) (hash & X_Y_VALUE_MASK); return new int[] { zoom, xTile, yTile }; } private static long longEncode(long precision, long xTile, long yTile) { // Zoom value is placed in front of all the bits used for the geotile // e.g. when max zoom is 29, the largest index would use 58 bits (57th..0th), // leaving 5 bits unused for zoom. See MAX_ZOOM comment above. return (precision << ZOOM_SHIFT) | (xTile << MAX_ZOOM) | yTile; } /** * Parse geotile String hash format in "zoom/x/y" into an array of integers */ private static int[] parseHash(String hashAsString) { final String[] parts = hashAsString.split("/", 4); if (parts.length != 3) { throw new IllegalArgumentException( "Invalid geotile_grid hash string of " + hashAsString + ". Must be three integers in a form \"zoom/x/y\"." ); } try { return new int[] { Integer.parseInt(parts[0]), Integer.parseInt(parts[1]), Integer.parseInt(parts[2]) }; } catch (NumberFormatException e) { throw new IllegalArgumentException( "Invalid geotile_grid hash string of " + hashAsString + ". Must be three integers in a form \"zoom/x/y\".", e ); } } /** * Encode to a geotile string from the geotile based long format */ public static String stringEncode(long hash) { int[] res = parseHash(hash); validateZXY(res[0], res[1], res[2]); return "" + res[0] + "/" + res[1] + "/" + res[2]; } /** * Decode long hash as a GeoPoint (center of the tile) */ static GeoPoint hashToGeoPoint(long hash) { int[] res = parseHash(hash); return zxyToGeoPoint(res[0], res[1], res[2]); } /** * Decode a string bucket key in "zoom/x/y" format to a GeoPoint (center of the tile) */ static GeoPoint keyToGeoPoint(String hashAsString) { int[] hashAsInts = parseHash(hashAsString); return zxyToGeoPoint(hashAsInts[0], hashAsInts[1], hashAsInts[2]); } public static Rectangle toBoundingBox(long hash) { int[] hashAsInts = parseHash(hash); return toBoundingBox(hashAsInts[1], hashAsInts[2], hashAsInts[0]); } /** * Decode a string bucket key in "zoom/x/y" format to a bounding box of the tile corners */ public static Rectangle toBoundingBox(String hash) { int[] hashAsInts = parseHash(hash); return toBoundingBox(hashAsInts[1], hashAsInts[2], hashAsInts[0]); } public static Rectangle toBoundingBox(int xTile, int yTile, int precision) { final double tiles = validateZXY(precision, xTile, yTile); final double minN = Math.PI - (2.0 * Math.PI * (yTile + 1)) / tiles; final double maxN = Math.PI - (2.0 * Math.PI * (yTile)) / tiles; final double minY = Math.toDegrees(ESSloppyMath.atan(ESSloppyMath.sinh(minN))); final double minX = ((xTile) / tiles * 360.0) - 180; final double maxY = Math.toDegrees(ESSloppyMath.atan(ESSloppyMath.sinh(maxN))); final double maxX = ((xTile + 1) / tiles * 360.0) - 180; return new Rectangle(minX, maxX, maxY, minY); } /** * Validates Zoom, X, and Y values, and returns the total number of allowed tiles along the x/y axis. */ private static int validateZXY(int zoom, int xTile, int yTile) { final int tiles = 1 << checkPrecisionRange(zoom); if (xTile < 0 || yTile < 0 || xTile >= tiles || yTile >= tiles) { throw new IllegalArgumentException( String.format(Locale.ROOT, "Zoom/X/Y combination is not valid: %d/%d/%d", zoom, xTile, yTile) ); } return tiles; } /** * Converts zoom/x/y integers into a GeoPoint. */ private static GeoPoint zxyToGeoPoint(int zoom, int xTile, int yTile) { final int tiles = validateZXY(zoom, xTile, yTile); final double n = Math.PI - (2.0 * Math.PI * (yTile + 0.5)) / tiles; final double lat = Math.toDegrees(ESSloppyMath.atan(ESSloppyMath.sinh(n))); final double lon = ((xTile + 0.5) / tiles * 360.0) - 180; return new GeoPoint(lat, lon); } }
/* * Copyright 1999-2101 Alibaba Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.fastjson.util; import java.io.Closeable; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.CharBuffer; import java.nio.charset.CharacterCodingException; import java.nio.charset.Charset; import java.nio.charset.CharsetDecoder; import java.nio.charset.CoderResult; import java.nio.charset.MalformedInputException; import java.security.AccessController; import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Properties; import com.alibaba.fastjson.JSONException; /** * @author wenshao[szujobs@hotmail.com] */ public class IOUtils { public final static String FASTJSON_PROPERTIES ="fastjson.properties"; public final static String FASTJSON_COMPATIBLEWITHJAVABEAN="fastjson.compatibleWithJavaBean"; public final static String FASTJSON_COMPATIBLEWITHFIELDNAME="fastjson.compatibleWithFieldName"; public final static Properties DEFAULT_PROPERTIES =new Properties(); public final static Charset UTF8 = Charset.forName("UTF-8"); public final static char[] DIGITS = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; public final static boolean[] firstIdentifierFlags = new boolean[256]; static { for (char c = 0; c < firstIdentifierFlags.length; ++c) { if (c >= 'A' && c <= 'Z') { firstIdentifierFlags[c] = true; } else if (c >= 'a' && c <= 'z') { firstIdentifierFlags[c] = true; } else if (c == '_') { firstIdentifierFlags[c] = true; } } } public final static boolean[] identifierFlags = new boolean[256]; static { for (char c = 0; c < identifierFlags.length; ++c) { if (c >= 'A' && c <= 'Z') { identifierFlags[c] = true; } else if (c >= 'a' && c <= 'z') { identifierFlags[c] = true; } else if (c == '_') { identifierFlags[c] = true; } else if (c >= '0' && c <= '9') { identifierFlags[c] = true; } } } static { try { new PropertiesInitializer().autoConfig(); } catch (Throwable e) { //skip } } static class PropertiesInitializer{ public void autoConfig(){ loadPropertiesFromFile(); TypeUtils.compatibleWithJavaBean ="true".equals(getStringProperty(FASTJSON_COMPATIBLEWITHJAVABEAN)) ; TypeUtils.compatibleWithFieldName ="true".equals(getStringProperty(FASTJSON_COMPATIBLEWITHFIELDNAME)) ; } } public static String getStringProperty(String name) { String prop = null; try { prop = System.getProperty(name); } catch (SecurityException e) { //skip } return (prop == null) ? DEFAULT_PROPERTIES.getProperty(name) : prop; } public static void loadPropertiesFromFile(){ InputStream imputStream = AccessController.doPrivileged(new PrivilegedAction<InputStream>() { public InputStream run() { ClassLoader cl = Thread.currentThread().getContextClassLoader(); if (cl != null) { return cl.getResourceAsStream(FASTJSON_PROPERTIES); } else { return ClassLoader.getSystemResourceAsStream(FASTJSON_PROPERTIES); } } }); if (null != imputStream) { try { DEFAULT_PROPERTIES.load(imputStream); imputStream.close(); } catch (java.io.IOException e) { // skip } } } public final static byte[] specicalFlags_doubleQuotes = new byte[161]; public final static byte[] specicalFlags_singleQuotes = new byte[161]; public final static boolean[] specicalFlags_doubleQuotesFlags = new boolean[161]; public final static boolean[] specicalFlags_singleQuotesFlags = new boolean[161]; public final static char[] replaceChars = new char[93]; static { specicalFlags_doubleQuotes['\0'] = 4; specicalFlags_doubleQuotes['\1'] = 4; specicalFlags_doubleQuotes['\2'] = 4; specicalFlags_doubleQuotes['\3'] = 4; specicalFlags_doubleQuotes['\4'] = 4; specicalFlags_doubleQuotes['\5'] = 4; specicalFlags_doubleQuotes['\6'] = 4; specicalFlags_doubleQuotes['\7'] = 4; specicalFlags_doubleQuotes['\b'] = 1; // 8 specicalFlags_doubleQuotes['\t'] = 1; // 9 specicalFlags_doubleQuotes['\n'] = 1; // 10 specicalFlags_doubleQuotes['\u000B'] = 4; // 11 specicalFlags_doubleQuotes['\f'] = 1; // 12 specicalFlags_doubleQuotes['\r'] = 1; // 13 specicalFlags_doubleQuotes['\"'] = 1; // 34 specicalFlags_doubleQuotes['\\'] = 1; // 92 specicalFlags_singleQuotes['\0'] = 4; specicalFlags_singleQuotes['\1'] = 4; specicalFlags_singleQuotes['\2'] = 4; specicalFlags_singleQuotes['\3'] = 4; specicalFlags_singleQuotes['\4'] = 4; specicalFlags_singleQuotes['\5'] = 4; specicalFlags_singleQuotes['\6'] = 4; specicalFlags_singleQuotes['\7'] = 4; specicalFlags_singleQuotes['\b'] = 1; // 8 specicalFlags_singleQuotes['\t'] = 1; // 9 specicalFlags_singleQuotes['\n'] = 1; // 10 specicalFlags_singleQuotes['\u000B'] = 4; // 11 specicalFlags_singleQuotes['\f'] = 1; // 12 specicalFlags_singleQuotes['\r'] = 1; // 13 specicalFlags_singleQuotes['\\'] = 1; // 92 specicalFlags_singleQuotes['\''] = 1; // 39 for (int i = 14; i <= 31; ++i) { specicalFlags_doubleQuotes[i] = 4; specicalFlags_singleQuotes[i] = 4; } for (int i = 127; i <= 160; ++i) { specicalFlags_doubleQuotes[i] = 4; specicalFlags_singleQuotes[i] = 4; } for (int i = 0; i < 161; ++i) { specicalFlags_doubleQuotesFlags[i] = specicalFlags_doubleQuotes[i] != 0; specicalFlags_singleQuotesFlags[i] = specicalFlags_singleQuotes[i] != 0; } replaceChars['\0'] = '0'; replaceChars['\1'] = '1'; replaceChars['\2'] = '2'; replaceChars['\3'] = '3'; replaceChars['\4'] = '4'; replaceChars['\5'] = '5'; replaceChars['\6'] = '6'; replaceChars['\7'] = '7'; replaceChars['\b'] = 'b'; // 8 replaceChars['\t'] = 't'; // 9 replaceChars['\n'] = 'n'; // 10 replaceChars['\u000B'] = 'v'; // 11 replaceChars['\f'] = 'f'; // 12 replaceChars['\r'] = 'r'; // 13 replaceChars['\"'] = '"'; // 34 replaceChars['\''] = '\''; // 39 replaceChars['/'] = '/'; // 47 replaceChars['\\'] = '\\'; // 92 } public final static char[] ASCII_CHARS = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '0', 'A', '0', 'B', '0', 'C', '0', 'D', '0', 'E', '0', 'F', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '1', 'A', '1', 'B', '1', 'C', '1', 'D', '1', 'E', '1', 'F', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '2', 'A', '2', 'B', '2', 'C', '2', 'D', '2', 'E', '2', 'F', }; public static void close(Closeable x) { if (x != null) { try { x.close(); } catch (Exception e) { // skip } } } // Requires positive x public static int stringSize(long x) { long p = 10; for (int i = 1; i < 19; i++) { if (x < p) return i; p = 10 * p; } return 19; } public static void getChars(long i, int index, char[] buf) { long q; int r; int charPos = index; char sign = 0; if (i < 0) { sign = '-'; i = -i; } // Get 2 digits/iteration using longs until quotient fits into an int while (i > Integer.MAX_VALUE) { q = i / 100; // really: r = i - (q * 100); r = (int) (i - ((q << 6) + (q << 5) + (q << 2))); i = q; buf[--charPos] = DigitOnes[r]; buf[--charPos] = DigitTens[r]; } // Get 2 digits/iteration using ints int q2; int i2 = (int) i; while (i2 >= 65536) { q2 = i2 / 100; // really: r = i2 - (q * 100); r = i2 - ((q2 << 6) + (q2 << 5) + (q2 << 2)); i2 = q2; buf[--charPos] = DigitOnes[r]; buf[--charPos] = DigitTens[r]; } // Fall thru to fast mode for smaller numbers // assert(i2 <= 65536, i2); for (;;) { q2 = (i2 * 52429) >>> (16 + 3); r = i2 - ((q2 << 3) + (q2 << 1)); // r = i2-(q2*10) ... buf[--charPos] = digits[r]; i2 = q2; if (i2 == 0) break; } if (sign != 0) { buf[--charPos] = sign; } } /** * Places characters representing the integer i into the character array buf. The characters are placed into the * buffer backwards starting with the least significant digit at the specified index (exclusive), and working * backwards from there. Will fail if i == Integer.MIN_VALUE */ public static void getChars(int i, int index, char[] buf) { int q, r; int charPos = index; char sign = 0; if (i < 0) { sign = '-'; i = -i; } // Generate two digits per iteration while (i >= 65536) { q = i / 100; // really: r = i - (q * 100); r = i - ((q << 6) + (q << 5) + (q << 2)); i = q; buf[--charPos] = DigitOnes[r]; buf[--charPos] = DigitTens[r]; } // Fall thru to fast mode for smaller numbers // assert(i <= 65536, i); for (;;) { q = (i * 52429) >>> (16 + 3); r = i - ((q << 3) + (q << 1)); // r = i-(q*10) ... buf[--charPos] = digits[r]; i = q; if (i == 0) break; } if (sign != 0) { buf[--charPos] = sign; } } public static void getChars(byte b, int index, char[] buf) { int i = b; int q, r; int charPos = index; char sign = 0; if (i < 0) { sign = '-'; i = -i; } // Fall thru to fast mode for smaller numbers // assert(i <= 65536, i); for (;;) { q = (i * 52429) >>> (16 + 3); r = i - ((q << 3) + (q << 1)); // r = i-(q*10) ... buf[--charPos] = digits[r]; i = q; if (i == 0) break; } if (sign != 0) { buf[--charPos] = sign; } } /** * All possible chars for representing a number as a String */ final static char[] digits = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' }; final static char[] DigitTens = { '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '2', '2', '2', '2', '2', '2', '2', '2', '2', '2', '3', '3', '3', '3', '3', '3', '3', '3', '3', '3', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '5', '5', '5', '5', '5', '5', '5', '5', '5', '5', '6', '6', '6', '6', '6', '6', '6', '6', '6', '6', '7', '7', '7', '7', '7', '7', '7', '7', '7', '7', '8', '8', '8', '8', '8', '8', '8', '8', '8', '8', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', }; final static char[] DigitOnes = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', }; final static int[] sizeTable = { 9, 99, 999, 9999, 99999, 999999, 9999999, 99999999, 999999999, Integer.MAX_VALUE }; // Requires positive x public static int stringSize(int x) { for (int i = 0;; i++) { if (x <= sizeTable[i]) { return i + 1; } } } public static void decode(CharsetDecoder charsetDecoder, ByteBuffer byteBuf, CharBuffer charByte) { try { CoderResult cr = charsetDecoder.decode(byteBuf, charByte, true); if (!cr.isUnderflow()) { cr.throwException(); } cr = charsetDecoder.flush(charByte); if (!cr.isUnderflow()) { cr.throwException(); } } catch (CharacterCodingException x) { // Substitution is always enabled, // so this shouldn't happen throw new JSONException("utf8 decode error, " + x.getMessage(), x); } } public static boolean firstIdentifier(char ch) { return ch < IOUtils.firstIdentifierFlags.length && IOUtils.firstIdentifierFlags[ch]; } public static boolean isIdent(char ch) { return ch < identifierFlags.length && identifierFlags[ch]; } public static final char[] CA = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".toCharArray(); public static final int[] IA = new int[256]; static { Arrays.fill(IA, -1); for (int i = 0, iS = CA.length; i < iS; i++) IA[CA[i]] = i; IA['='] = 0; } /** * Decodes a BASE64 encoded char array that is known to be resonably well formatted. The method is about twice as * fast as #decode(char[]). The preconditions are:<br> * + The array must have a line length of 76 chars OR no line separators at all (one line).<br> * + Line separator must be "\r\n", as specified in RFC 2045 + The array must not contain illegal characters within * the encoded string<br> * + The array CAN have illegal characters at the beginning and end, those will be dealt with appropriately.<br> * * @param chars The source array. Length 0 will return an empty array. <code>null</code> will throw an exception. * @return The decoded array of bytes. May be of length 0. */ public static byte[] decodeBase64(char[] chars, int offset, int charsLen) { // Check special case if (charsLen == 0) { return new byte[0]; } int sIx = offset, eIx = offset + charsLen - 1; // Start and end index after trimming. // Trim illegal chars from start while (sIx < eIx && IA[chars[sIx]] < 0) sIx++; // Trim illegal chars from end while (eIx > 0 && IA[chars[eIx]] < 0) eIx--; // get the padding count (=) (0, 1 or 2) int pad = chars[eIx] == '=' ? (chars[eIx - 1] == '=' ? 2 : 1) : 0; // Count '=' at end. int cCnt = eIx - sIx + 1; // Content count including possible separators int sepCnt = charsLen > 76 ? (chars[76] == '\r' ? cCnt / 78 : 0) << 1 : 0; int len = ((cCnt - sepCnt) * 6 >> 3) - pad; // The number of decoded bytes byte[] bytes = new byte[len]; // Preallocate byte[] of exact length // Decode all but the last 0 - 2 bytes. int d = 0; for (int cc = 0, eLen = (len / 3) * 3; d < eLen;) { // Assemble three bytes into an int from four "valid" characters. int i = IA[chars[sIx++]] << 18 | IA[chars[sIx++]] << 12 | IA[chars[sIx++]] << 6 | IA[chars[sIx++]]; // Add the bytes bytes[d++] = (byte) (i >> 16); bytes[d++] = (byte) (i >> 8); bytes[d++] = (byte) i; // If line separator, jump over it. if (sepCnt > 0 && ++cc == 19) { sIx += 2; cc = 0; } } if (d < len) { // Decode last 1-3 bytes (incl '=') into 1-3 bytes int i = 0; for (int j = 0; sIx <= eIx - pad; j++) i |= IA[chars[sIx++]] << (18 - j * 6); for (int r = 16; d < len; r -= 8) bytes[d++] = (byte) (i >> r); } return bytes; } public static byte[] decodeBase64(String chars, int offset, int charsLen) { // Check special case if (charsLen == 0) { return new byte[0]; } int sIx = offset, eIx = offset + charsLen - 1; // Start and end index after trimming. // Trim illegal chars from start while (sIx < eIx && IA[chars.charAt(sIx)] < 0) sIx++; // Trim illegal chars from end while (eIx > 0 && IA[chars.charAt(eIx)] < 0) eIx--; // get the padding count (=) (0, 1 or 2) int pad = chars.charAt(eIx) == '=' ? (chars.charAt(eIx - 1) == '=' ? 2 : 1) : 0; // Count '=' at end. int cCnt = eIx - sIx + 1; // Content count including possible separators int sepCnt = charsLen > 76 ? (chars.charAt(76) == '\r' ? cCnt / 78 : 0) << 1 : 0; int len = ((cCnt - sepCnt) * 6 >> 3) - pad; // The number of decoded bytes byte[] bytes = new byte[len]; // Preallocate byte[] of exact length // Decode all but the last 0 - 2 bytes. int d = 0; for (int cc = 0, eLen = (len / 3) * 3; d < eLen;) { // Assemble three bytes into an int from four "valid" characters. int i = IA[chars.charAt(sIx++)] << 18 | IA[chars.charAt(sIx++)] << 12 | IA[chars.charAt(sIx++)] << 6 | IA[chars.charAt(sIx++)]; // Add the bytes bytes[d++] = (byte) (i >> 16); bytes[d++] = (byte) (i >> 8); bytes[d++] = (byte) i; // If line separator, jump over it. if (sepCnt > 0 && ++cc == 19) { sIx += 2; cc = 0; } } if (d < len) { // Decode last 1-3 bytes (incl '=') into 1-3 bytes int i = 0; for (int j = 0; sIx <= eIx - pad; j++) i |= IA[chars.charAt(sIx++)] << (18 - j * 6); for (int r = 16; d < len; r -= 8) bytes[d++] = (byte) (i >> r); } return bytes; } /** * Decodes a BASE64 encoded string that is known to be resonably well formatted. The method is about twice as fast * as decode(String). The preconditions are:<br> * + The array must have a line length of 76 chars OR no line separators at all (one line).<br> * + Line separator must be "\r\n", as specified in RFC 2045 + The array must not contain illegal characters within * the encoded string<br> * + The array CAN have illegal characters at the beginning and end, those will be dealt with appropriately.<br> * * @param s The source string. Length 0 will return an empty array. <code>null</code> will throw an exception. * @return The decoded array of bytes. May be of length 0. */ public static byte[] decodeBase64(String s) { // Check special case int sLen = s.length(); if (sLen == 0) { return new byte[0]; } int sIx = 0, eIx = sLen - 1; // Start and end index after trimming. // Trim illegal chars from start while (sIx < eIx && IA[s.charAt(sIx) & 0xff] < 0) sIx++; // Trim illegal chars from end while (eIx > 0 && IA[s.charAt(eIx) & 0xff] < 0) eIx--; // get the padding count (=) (0, 1 or 2) int pad = s.charAt(eIx) == '=' ? (s.charAt(eIx - 1) == '=' ? 2 : 1) : 0; // Count '=' at end. int cCnt = eIx - sIx + 1; // Content count including possible separators int sepCnt = sLen > 76 ? (s.charAt(76) == '\r' ? cCnt / 78 : 0) << 1 : 0; int len = ((cCnt - sepCnt) * 6 >> 3) - pad; // The number of decoded bytes byte[] dArr = new byte[len]; // Preallocate byte[] of exact length // Decode all but the last 0 - 2 bytes. int d = 0; for (int cc = 0, eLen = (len / 3) * 3; d < eLen;) { // Assemble three bytes into an int from four "valid" characters. int i = IA[s.charAt(sIx++)] << 18 | IA[s.charAt(sIx++)] << 12 | IA[s.charAt(sIx++)] << 6 | IA[s.charAt(sIx++)]; // Add the bytes dArr[d++] = (byte) (i >> 16); dArr[d++] = (byte) (i >> 8); dArr[d++] = (byte) i; // If line separator, jump over it. if (sepCnt > 0 && ++cc == 19) { sIx += 2; cc = 0; } } if (d < len) { // Decode last 1-3 bytes (incl '=') into 1-3 bytes int i = 0; for (int j = 0; sIx <= eIx - pad; j++) i |= IA[s.charAt(sIx++)] << (18 - j * 6); for (int r = 16; d < len; r -= 8) dArr[d++] = (byte) (i >> r); } return dArr; } public static int encodeUTF8(char[] sa, int sp, int len, byte[] da) { int sl = sp + len; int dp = 0; int dlASCII = dp + Math.min(len, da.length); // ASCII only optimized loop while (dp < dlASCII && sa[sp] < '\u0080') { da[dp++] = (byte) sa[sp++]; } while (sp < sl) { char c = sa[sp++]; if (c < 0x80) { // Have at most seven bits da[dp++] = (byte) c; } else if (c < 0x800) { // 2 bytes, 11 bits da[dp++] = (byte) (0xc0 | (c >> 6)); da[dp++] = (byte) (0x80 | (c & 0x3f)); } else if (c >= '\uD800' && c < ('\uDFFF' + 1)) { //Character.isSurrogate(c) but 1.7 final int uc; int ip = sp - 1; if (Character.isHighSurrogate(c)) { if (sl - ip < 2) { uc = -1; } else { char d = sa[ip + 1]; if (Character.isLowSurrogate(d)) { uc = Character.toCodePoint(c, d); } else { throw new JSONException("encodeUTF8 error", new MalformedInputException(1)); } } } else { if (Character.isLowSurrogate(c)) { throw new JSONException("encodeUTF8 error", new MalformedInputException(1)); } else { uc = c; } } if (uc < 0) { da[dp++] = (byte) '?'; } else { da[dp++] = (byte) (0xf0 | ((uc >> 18))); da[dp++] = (byte) (0x80 | ((uc >> 12) & 0x3f)); da[dp++] = (byte) (0x80 | ((uc >> 6) & 0x3f)); da[dp++] = (byte) (0x80 | (uc & 0x3f)); sp++; // 2 chars } } else { // 3 bytes, 16 bits da[dp++] = (byte) (0xe0 | ((c >> 12))); da[dp++] = (byte) (0x80 | ((c >> 6) & 0x3f)); da[dp++] = (byte) (0x80 | (c & 0x3f)); } } return dp; } public static int decodeUTF8(byte[] sa, int sp, int len, char[] da) { final int sl = sp + len; int dp = 0; int dlASCII = Math.min(len, da.length); // ASCII only optimized loop while (dp < dlASCII && sa[sp] >= 0) da[dp++] = (char) sa[sp++]; while (sp < sl) { int b1 = sa[sp++]; if (b1 >= 0) { // 1 byte, 7 bits: 0xxxxxxx da[dp++] = (char) b1; } else if ((b1 >> 5) == -2 && (b1 & 0x1e) != 0) { // 2 bytes, 11 bits: 110xxxxx 10xxxxxx if (sp < sl) { int b2 = sa[sp++]; if ((b2 & 0xc0) != 0x80) { // isNotContinuation(b2) return -1; } else { da[dp++] = (char) (((b1 << 6) ^ b2)^ (((byte) 0xC0 << 6) ^ ((byte) 0x80 << 0))); } continue; } return -1; } else if ((b1 >> 4) == -2) { // 3 bytes, 16 bits: 1110xxxx 10xxxxxx 10xxxxxx if (sp + 1 < sl) { int b2 = sa[sp++]; int b3 = sa[sp++]; if ((b1 == (byte) 0xe0 && (b2 & 0xe0) == 0x80) // || (b2 & 0xc0) != 0x80 // || (b3 & 0xc0) != 0x80) { // isMalformed3(b1, b2, b3) return -1; } else { char c = (char)((b1 << 12) ^ (b2 << 6) ^ (b3 ^ (((byte) 0xE0 << 12) ^ ((byte) 0x80 << 6) ^ ((byte) 0x80 << 0)))); if (Character.isSurrogate(c)) { return -1; } else { da[dp++] = c; } } continue; } return -1; } else if ((b1 >> 3) == -2) { // 4 bytes, 21 bits: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx if (sp + 2 < sl) { int b2 = sa[sp++]; int b3 = sa[sp++]; int b4 = sa[sp++]; int uc = ((b1 << 18) ^ (b2 << 12) ^ (b3 << 6) ^ (b4 ^ (((byte) 0xF0 << 18) ^ ((byte) 0x80 << 12) ^ ((byte) 0x80 << 6) ^ ((byte) 0x80 << 0)))); if (((b2 & 0xc0) != 0x80 || (b3 & 0xc0) != 0x80 || (b4 & 0xc0) != 0x80) // isMalformed4 || // shortest form check !Character.isSupplementaryCodePoint(uc)) { return -1; } else { da[dp++] = Character.highSurrogate(uc); da[dp++] = Character.lowSurrogate(uc); } continue; } return -1; } else { return -1; } } return dp; } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.indexing.common.task.batch.parallel; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.jsontype.NamedType; import org.apache.druid.common.config.NullHandling; import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.DimensionsSpec; import org.apache.druid.data.input.impl.LocalInputSource; import org.apache.druid.data.input.impl.TimestampSpec; import org.apache.druid.indexer.partitions.HashedPartitionsSpec; import org.apache.druid.indexer.partitions.PartitionsSpec; import org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec; import org.apache.druid.indexing.common.TestUtils; import org.apache.druid.indexing.common.task.Task; import org.apache.druid.indexing.common.task.TaskResource; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.LongSumAggregatorFactory; import org.apache.druid.segment.indexing.DataSchema; import org.apache.druid.segment.indexing.granularity.UniformGranularitySpec; import org.apache.druid.segment.realtime.firehose.LocalFirehoseFactory; import org.hamcrest.CoreMatchers; import org.joda.time.Interval; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import javax.annotation.Nullable; import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; public class ParallelIndexSupervisorTaskSerdeTest { static { NullHandling.initializeForTests(); } private static final ObjectMapper OBJECT_MAPPER = createObjectMapper(); private static final List<Interval> INTERVALS = Collections.singletonList(Intervals.of("2018/2019")); private static ObjectMapper createObjectMapper() { TestUtils testUtils = new TestUtils(); ObjectMapper objectMapper = testUtils.getTestObjectMapper(); objectMapper.registerSubtypes( new NamedType(LocalFirehoseFactory.class, "local") ); return objectMapper; } @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void serde() throws IOException { ParallelIndexSupervisorTask task = new ParallelIndexSupervisorTaskBuilder() .ingestionSpec( new ParallelIndexIngestionSpecBuilder() .inputIntervals(INTERVALS) .build() ) .build(); String json = OBJECT_MAPPER.writeValueAsString(task); Assert.assertEquals(task, OBJECT_MAPPER.readValue(json, Task.class)); } @Test public void forceGuaranteedRollupWithHashPartitionsMissingNumShards() { Integer numShards = null; ParallelIndexSupervisorTask task = new ParallelIndexSupervisorTaskBuilder() .ingestionSpec( new ParallelIndexIngestionSpecBuilder() .forceGuaranteedRollup(true) .partitionsSpec(new HashedPartitionsSpec(null, numShards, null)) .inputIntervals(INTERVALS) .build() ) .build(); PartitionsSpec partitionsSpec = task.getIngestionSchema().getTuningConfig().getPartitionsSpec(); Assert.assertThat(partitionsSpec, CoreMatchers.instanceOf(HashedPartitionsSpec.class)); } @Test public void forceGuaranteedRollupWithHashPartitionsValid() { Integer numShards = 2; ParallelIndexSupervisorTask task = new ParallelIndexSupervisorTaskBuilder() .ingestionSpec( new ParallelIndexIngestionSpecBuilder() .forceGuaranteedRollup(true) .partitionsSpec(new HashedPartitionsSpec(null, numShards, null)) .inputIntervals(INTERVALS) .build() ) .build(); PartitionsSpec partitionsSpec = task.getIngestionSchema().getTuningConfig().getPartitionsSpec(); Assert.assertThat(partitionsSpec, CoreMatchers.instanceOf(HashedPartitionsSpec.class)); } @Test public void forceGuaranteedRollupWithSingleDimPartitionsMissingDimension() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("partitionDimension must be specified"); new ParallelIndexSupervisorTaskBuilder() .ingestionSpec( new ParallelIndexIngestionSpecBuilder() .forceGuaranteedRollup(true) .partitionsSpec(new SingleDimensionPartitionsSpec(1, null, null, true)) .inputIntervals(INTERVALS) .build() ) .build(); } @Test public void forceGuaranteedRollupWithSingleDimPartitionsValid() { ParallelIndexSupervisorTask task = new ParallelIndexSupervisorTaskBuilder() .ingestionSpec( new ParallelIndexIngestionSpecBuilder() .forceGuaranteedRollup(true) .partitionsSpec(new SingleDimensionPartitionsSpec(1, null, "a", true)) .inputIntervals(INTERVALS) .build() ) .build(); PartitionsSpec partitionsSpec = task.getIngestionSchema().getTuningConfig().getPartitionsSpec(); Assert.assertThat(partitionsSpec, CoreMatchers.instanceOf(SingleDimensionPartitionsSpec.class)); } private static class ParallelIndexSupervisorTaskBuilder { private static final String ID = "taskId"; private final TaskResource taskResource = new TaskResource("group", 1); private final Map<String, Object> context = Collections.emptyMap(); private ParallelIndexIngestionSpec ingestionSpec; ParallelIndexSupervisorTaskBuilder ingestionSpec(ParallelIndexIngestionSpec ingestionSpec) { this.ingestionSpec = ingestionSpec; return this; } ParallelIndexSupervisorTask build() { return new ParallelIndexSupervisorTask( ID, null, taskResource, ingestionSpec, context ); } } private static class ParallelIndexIngestionSpecBuilder { private static final TimestampSpec TIMESTAMP_SPEC = new TimestampSpec("ts", "auto", null); private static final DimensionsSpec DIMENSIONS_SPEC = new DimensionsSpec( DimensionsSpec.getDefaultSchemas(Arrays.asList("ts", "dim")) ); private final ParallelIndexIOConfig ioConfig = new ParallelIndexIOConfig( null, new LocalInputSource(new File("tmp"), "test_*"), new CsvInputFormat(Arrays.asList("ts", "dim", "val"), null, null, false, 0), false, null ); // For dataSchema.granularitySpec @Nullable private List<Interval> inputIntervals = null; // For tuningConfig @Nullable private Boolean forceGuaranteedRollup = null; @Nullable PartitionsSpec partitionsSpec = null; @SuppressWarnings("SameParameterValue") ParallelIndexIngestionSpecBuilder inputIntervals(List<Interval> inputIntervals) { this.inputIntervals = inputIntervals; return this; } @SuppressWarnings("SameParameterValue") ParallelIndexIngestionSpecBuilder forceGuaranteedRollup(boolean forceGuaranteedRollup) { this.forceGuaranteedRollup = forceGuaranteedRollup; return this; } ParallelIndexIngestionSpecBuilder partitionsSpec(PartitionsSpec partitionsSpec) { this.partitionsSpec = partitionsSpec; return this; } ParallelIndexIngestionSpec build() { DataSchema dataSchema = new DataSchema( "dataSource", TIMESTAMP_SPEC, DIMENSIONS_SPEC, new AggregatorFactory[]{ new LongSumAggregatorFactory("val", "val") }, new UniformGranularitySpec(Granularities.DAY, Granularities.MINUTE, inputIntervals), null ); ParallelIndexTuningConfig tuningConfig = new ParallelIndexTuningConfig( null, null, null, null, null, null, null, null, null, partitionsSpec, null, null, null, forceGuaranteedRollup, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ); return new ParallelIndexIngestionSpec(dataSchema, ioConfig, tuningConfig); } } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.phoenix.mapreduce; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.jdbc.PhoenixDriver; import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair; import org.apache.phoenix.mapreduce.bulkload.TargetTableRef; import org.apache.phoenix.mapreduce.bulkload.TargetTableRefFunctions; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** * Base tool for running MapReduce-based ingests of data. */ public abstract class AbstractBulkLoadTool extends Configured implements Tool { protected static final Logger LOGGER = LoggerFactory.getLogger(AbstractBulkLoadTool.class); static final Option ZK_QUORUM_OPT = new Option("z", "zookeeper", true, "Supply zookeeper connection details (optional)"); static final Option INPUT_PATH_OPT = new Option("i", "input", true, "Input path(s) (comma-separated, mandatory)"); static final Option OUTPUT_PATH_OPT = new Option("o", "output", true, "Output path for temporary HFiles (optional)"); static final Option SCHEMA_NAME_OPT = new Option("s", "schema", true, "Phoenix schema name (optional)"); static final Option TABLE_NAME_OPT = new Option("t", "table", true, "Phoenix table name (mandatory)"); static final Option INDEX_TABLE_NAME_OPT = new Option("it", "index-table", true, "Phoenix index table name when just loading this particualar index table"); static final Option IMPORT_COLUMNS_OPT = new Option("c", "import-columns", true, "Comma-separated list of columns to be imported"); static final Option IGNORE_ERRORS_OPT = new Option("g", "ignore-errors", false, "Ignore input errors"); static final Option HELP_OPT = new Option("h", "help", false, "Show this help and quit"); static final Option SKIP_HEADER_OPT = new Option("k", "skip-header", false, "Skip the first line of CSV files (the header)"); /** * Set configuration values based on parsed command line options. * * @param cmdLine supplied command line options * @param importColumns descriptors of columns to be imported * @param conf job configuration */ protected abstract void configureOptions(CommandLine cmdLine, List<ColumnInfo> importColumns, Configuration conf) throws SQLException; protected abstract void setupJob(Job job); protected Options getOptions() { Options options = new Options(); options.addOption(INPUT_PATH_OPT); options.addOption(TABLE_NAME_OPT); options.addOption(INDEX_TABLE_NAME_OPT); options.addOption(ZK_QUORUM_OPT); options.addOption(OUTPUT_PATH_OPT); options.addOption(SCHEMA_NAME_OPT); options.addOption(IMPORT_COLUMNS_OPT); options.addOption(IGNORE_ERRORS_OPT); options.addOption(HELP_OPT); options.addOption(SKIP_HEADER_OPT); return options; } /** * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are * missing. * * @param args supplied command line arguments * @return the parsed command line */ protected CommandLine parseOptions(String[] args) { Options options = getOptions(); CommandLineParser parser = new DefaultParser(false, false); CommandLine cmdLine = null; try { cmdLine = parser.parse(options, args); } catch (ParseException e) { printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } if (cmdLine.hasOption(HELP_OPT.getOpt())) { printHelpAndExit(options, 0); } if (!cmdLine.hasOption(TABLE_NAME_OPT.getOpt())) { throw new IllegalStateException(TABLE_NAME_OPT.getLongOpt() + " is a mandatory " + "parameter"); } if (!cmdLine.getArgList().isEmpty()) { throw new IllegalStateException("Got unexpected extra parameters: " + cmdLine.getArgList()); } if (!cmdLine.hasOption(INPUT_PATH_OPT.getOpt())) { throw new IllegalStateException(INPUT_PATH_OPT.getLongOpt() + " is a mandatory " + "parameter"); } return cmdLine; } private void printHelpAndExit(String errorMessage, Options options) { System.err.println(errorMessage); printHelpAndExit(options, 1); } private void printHelpAndExit(Options options, int exitCode) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("help", options); System.exit(exitCode); } @Override public int run(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(getConf()); CommandLine cmdLine = null; try { cmdLine = parseOptions(args); } catch (IllegalStateException e) { printHelpAndExit(e.getMessage(), getOptions()); } return loadData(conf, cmdLine); } private int loadData(Configuration conf, CommandLine cmdLine) throws Exception { String tableName = cmdLine.getOptionValue(TABLE_NAME_OPT.getOpt()); String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPT.getOpt()); String indexTableName = cmdLine.getOptionValue(INDEX_TABLE_NAME_OPT.getOpt()); String qualifiedTableName = SchemaUtil.getQualifiedTableName(schemaName, tableName); String qualifiedIndexTableName = null; if (indexTableName != null){ qualifiedIndexTableName = SchemaUtil.getQualifiedTableName(schemaName, indexTableName); } if (cmdLine.hasOption(ZK_QUORUM_OPT.getOpt())) { // ZK_QUORUM_OPT is optional, but if it's there, use it for both the conn and the job. String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt()); PhoenixDriver.ConnectionInfo info = PhoenixDriver.ConnectionInfo.create(zkQuorum); LOGGER.info("Configuring HBase connection to {}", info); for (Map.Entry<String,String> entry : info.asProps()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Setting {} = {}", entry.getKey(), entry.getValue()); } conf.set(entry.getKey(), entry.getValue()); } } // Skip the first line of the CSV file(s)? if (cmdLine.hasOption(SKIP_HEADER_OPT.getOpt())) { PhoenixTextInputFormat.setSkipHeader(conf); } final Connection conn = QueryUtil.getConnection(conf); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(), qualifiedTableName); } List<ColumnInfo> importColumns = buildImportColumns(conn, cmdLine, qualifiedTableName); Preconditions.checkNotNull(importColumns); Preconditions.checkArgument(!importColumns.isEmpty(), "Column info list is empty"); FormatToBytesWritableMapper.configureColumnInfoList(conf, importColumns); boolean ignoreInvalidRows = cmdLine.hasOption(IGNORE_ERRORS_OPT.getOpt()); conf.setBoolean(FormatToBytesWritableMapper.IGNORE_INVALID_ROW_CONFKEY, ignoreInvalidRows); conf.set(FormatToBytesWritableMapper.TABLE_NAME_CONFKEY, SchemaUtil.getEscapedFullTableName(qualifiedTableName)); // give subclasses their hook configureOptions(cmdLine, importColumns, conf); String sName = SchemaUtil.normalizeIdentifier(schemaName); String tName = SchemaUtil.normalizeIdentifier(tableName); try { validateTable(conn, sName, tName); } finally { conn.close(); } final String inputPaths = cmdLine.getOptionValue(INPUT_PATH_OPT.getOpt()); final Path outputPath; if (cmdLine.hasOption(OUTPUT_PATH_OPT.getOpt())) { outputPath = new Path(cmdLine.getOptionValue(OUTPUT_PATH_OPT.getOpt())); } else { outputPath = new Path("/tmp/" + UUID.randomUUID()); } List<TargetTableRef> tablesToBeLoaded = new ArrayList<TargetTableRef>(); PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName); tablesToBeLoaded.add(new TargetTableRef(qualifiedTableName, table.getPhysicalName().getString())); boolean hasLocalIndexes = false; for(PTable index: table.getIndexes()) { if (index.getIndexType() == IndexType.LOCAL) { hasLocalIndexes = qualifiedIndexTableName == null ? true : index.getTableName().getString() .equals(qualifiedIndexTableName); if (hasLocalIndexes) break; } } // using conn after it's been closed... o.O tablesToBeLoaded.addAll(getIndexTables(conn, qualifiedTableName)); // When loading a single index table, check index table name is correct if (qualifiedIndexTableName != null){ TargetTableRef targetIndexRef = null; for (TargetTableRef tmpTable : tablesToBeLoaded){ if (tmpTable.getLogicalName().compareToIgnoreCase(qualifiedIndexTableName) == 0) { targetIndexRef = tmpTable; break; } } if (targetIndexRef == null){ throw new IllegalStateException("Bulk Loader error: index table " + qualifiedIndexTableName + " doesn't exist"); } tablesToBeLoaded.clear(); tablesToBeLoaded.add(targetIndexRef); } return submitJob(conf, tableName, inputPaths, outputPath, tablesToBeLoaded, hasLocalIndexes); } /** * Submits the jobs to the cluster. * Loads the HFiles onto the respective tables. * @throws Exception */ public int submitJob(final Configuration conf, final String qualifiedTableName, final String inputPaths, final Path outputPath, List<TargetTableRef> tablesToBeLoaded, boolean hasLocalIndexes) throws Exception { Job job = Job.getInstance(conf, "Phoenix MapReduce import for " + qualifiedTableName); FileInputFormat.addInputPaths(job, inputPaths); FileOutputFormat.setOutputPath(job, outputPath); job.setInputFormatClass(PhoenixTextInputFormat.class); job.setMapOutputKeyClass(TableRowkeyPair.class); job.setMapOutputValueClass(ImmutableBytesWritable.class); job.setOutputKeyClass(TableRowkeyPair.class); job.setOutputValueClass(KeyValue.class); job.setReducerClass(FormatToKeyValueReducer.class); byte[][] splitKeysBeforeJob = null; try(org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(job.getConfiguration())) { RegionLocator regionLocator = null; if(hasLocalIndexes) { try{ regionLocator = hbaseConn.getRegionLocator( TableName.valueOf(qualifiedTableName)); splitKeysBeforeJob = regionLocator.getStartKeys(); } finally { if (regionLocator != null) regionLocator.close(); } } MultiHfileOutputFormat.configureIncrementalLoad(job, tablesToBeLoaded); final String tableNamesAsJson = TargetTableRefFunctions.NAMES_TO_JSON .apply(tablesToBeLoaded); final String logicalNamesAsJson = TargetTableRefFunctions.LOGICAL_NAMES_TO_JSON .apply(tablesToBeLoaded); job.getConfiguration().set(FormatToBytesWritableMapper.TABLE_NAMES_CONFKEY, tableNamesAsJson); job.getConfiguration().set(FormatToBytesWritableMapper.LOGICAL_NAMES_CONFKEY, logicalNamesAsJson); // give subclasses their hook setupJob(job); LOGGER.info("Running MapReduce import job from {} to {}", inputPaths, outputPath); boolean success = job.waitForCompletion(true); if (success) { if (hasLocalIndexes) { try { regionLocator = hbaseConn.getRegionLocator( TableName.valueOf(qualifiedTableName)); if(!IndexUtil.matchingSplitKeys(splitKeysBeforeJob, regionLocator.getStartKeys())) { LOGGER.error("The table " + qualifiedTableName + " has local indexes and" + " there is split key mismatch before and after running" + " bulkload job. Please rerun the job otherwise there may be" + " inconsistencies between actual data and index data."); return -1; } } finally { if (regionLocator != null) regionLocator.close(); } } LOGGER.info("Loading HFiles from {}", outputPath); completebulkload(conf,outputPath,tablesToBeLoaded); LOGGER.info("Removing output directory {}", outputPath); if(!outputPath.getFileSystem(conf).delete(outputPath, true)) { LOGGER.error("Failed to delete the output directory {}", outputPath); } return 0; } else { return -1; } } } private void completebulkload(Configuration conf,Path outputPath , List<TargetTableRef> tablesToBeLoaded) throws Exception { Set<String> tableNames = new HashSet<>(tablesToBeLoaded.size()); for(TargetTableRef table : tablesToBeLoaded) { if(tableNames.contains(table.getPhysicalName())){ continue; } tableNames.add(table.getPhysicalName()); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); String tableName = table.getPhysicalName(); Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputPath, tableName); try(org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(conf); Table htable = hbaseConn.getTable(TableName.valueOf(tableName))) { LOGGER.info("Loading HFiles for {} from {}", tableName , tableOutputPath); loader.doBulkLoad(tableOutputPath, hbaseConn.getAdmin(), htable, hbaseConn.getRegionLocator(TableName.valueOf(tableName))); LOGGER.info("Incremental load complete for table=" + tableName); } } } /** * Build up the list of columns to be imported. The list is taken from the command line if * present, otherwise it is taken from the table description. * * @param conn connection to Phoenix * @param cmdLine supplied command line options * @param qualifiedTableName table name (possibly with schema) of the table to be imported * @return the list of columns to be imported */ List<ColumnInfo> buildImportColumns(Connection conn, CommandLine cmdLine, String qualifiedTableName) throws SQLException { List<String> userSuppliedColumnNames = null; if (cmdLine.hasOption(IMPORT_COLUMNS_OPT.getOpt())) { userSuppliedColumnNames = Lists.newArrayList( Splitter.on(",").trimResults().split (cmdLine.getOptionValue(IMPORT_COLUMNS_OPT.getOpt()))); } return SchemaUtil.generateColumnInfo( conn, qualifiedTableName, userSuppliedColumnNames, true); } /** * Perform any required validation on the table being bulk loaded into: * - ensure no column family names start with '_', as they'd be ignored leading to problems. * @throws java.sql.SQLException */ private void validateTable(Connection conn, String schemaName, String tableName) throws SQLException { ResultSet rs = conn.getMetaData().getColumns( null, StringUtil.escapeLike(schemaName), StringUtil.escapeLike(tableName), null); while (rs.next()) { String familyName = rs.getString(PhoenixDatabaseMetaData.COLUMN_FAMILY); if (familyName != null && familyName.startsWith("_")) { if (QueryConstants.DEFAULT_COLUMN_FAMILY.equals(familyName)) { throw new IllegalStateException( "Bulk Loader error: All column names that are not part of the " + "primary key constraint must be prefixed with a column family " + "name (i.e. f.my_column VARCHAR)"); } else { throw new IllegalStateException("Bulk Loader error: Column family name " + "must not start with '_': " + familyName); } } } rs.close(); } /** * Get the index tables of current data table * @throws java.sql.SQLException */ private List<TargetTableRef> getIndexTables(Connection conn, String qualifiedTableName) throws SQLException { PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName); List<TargetTableRef> indexTables = new ArrayList<TargetTableRef>(); for(PTable indexTable : table.getIndexes()){ indexTables.add(new TargetTableRef(indexTable.getName().getString(), indexTable .getPhysicalName().getString())); } return indexTables; } }
/* * Copyright 2016 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.keycloak.adapters.saml; import org.apache.catalina.Context; import org.apache.catalina.Lifecycle; import org.apache.catalina.LifecycleEvent; import org.apache.catalina.LifecycleListener; import org.apache.catalina.authenticator.FormAuthenticator; import org.apache.catalina.connector.Request; import org.apache.catalina.connector.Response; import org.keycloak.adapters.saml.config.parsers.DeploymentBuilder; import org.keycloak.adapters.saml.config.parsers.ResourceLoader; import org.keycloak.adapters.spi.AuthChallenge; import org.keycloak.adapters.spi.AuthOutcome; import org.keycloak.adapters.spi.HttpFacade; import org.keycloak.adapters.spi.InMemorySessionIdMapper; import org.keycloak.adapters.spi.SessionIdMapper; import org.keycloak.adapters.tomcat.CatalinaHttpFacade; import org.keycloak.adapters.tomcat.CatalinaUserSessionManagement; import org.keycloak.adapters.tomcat.GenericPrincipalFactory; import org.keycloak.saml.common.exceptions.ParsingException; import javax.servlet.RequestDispatcher; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServletResponse; import java.io.ByteArrayInputStream; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.util.logging.Level; import java.util.logging.Logger; /** * Keycloak authentication valve * * @author <a href="mailto:ungarida@gmail.com">Davide Ungari</a> * @author <a href="mailto:bill@burkecentral.com">Bill Burke</a> * @version $Revision: 1 $ */ public abstract class AbstractSamlAuthenticatorValve extends FormAuthenticator implements LifecycleListener { public static final String TOKEN_STORE_NOTE = "TOKEN_STORE_NOTE"; private final static Logger log = Logger.getLogger(""+AbstractSamlAuthenticatorValve.class); protected CatalinaUserSessionManagement userSessionManagement = new CatalinaUserSessionManagement(); protected SamlDeploymentContext deploymentContext; protected SessionIdMapper mapper = new InMemorySessionIdMapper(); @Override public void lifecycleEvent(LifecycleEvent event) { if (Lifecycle.START_EVENT.equals(event.getType())) { cache = false; } else if (Lifecycle.AFTER_START_EVENT.equals(event.getType())) { keycloakInit(); } else if (event.getType() == Lifecycle.BEFORE_STOP_EVENT) { beforeStop(); } } protected void logoutInternal(Request request) { CatalinaHttpFacade facade = new CatalinaHttpFacade(null, request); SamlDeployment deployment = deploymentContext.resolveDeployment(facade); SamlSessionStore tokenStore = getSessionStore(request, facade, deployment); tokenStore.logoutAccount(); request.setUserPrincipal(null); } @SuppressWarnings("UseSpecificCatch") public void keycloakInit() { // Possible scenarios: // 1) The deployment has a keycloak.config.resolver specified and it exists: // Outcome: adapter uses the resolver // 2) The deployment has a keycloak.config.resolver and isn't valid (doesn't exists, isn't a resolver, ...) : // Outcome: adapter is left unconfigured // 3) The deployment doesn't have a keycloak.config.resolver , but has a keycloak.json (or equivalent) // Outcome: adapter uses it // 4) The deployment doesn't have a keycloak.config.resolver nor keycloak.json (or equivalent) // Outcome: adapter is left unconfigured String configResolverClass = context.getServletContext().getInitParameter("keycloak.config.resolver"); if (configResolverClass != null) { try { throw new RuntimeException("Not implemented yet"); //KeycloakConfigResolver configResolver = (KeycloakConfigResolver) context.getLoader().getClassLoader().loadClass(configResolverClass).newInstance(); //deploymentContext = new SamlDeploymentContext(configResolver); //log.log(Level.INFO, "Using {0} to resolve Keycloak configuration on a per-request basis.", configResolverClass); } catch (Exception ex) { log.log(Level.FINE, "The specified resolver {0} could NOT be loaded. Keycloak is unconfigured and will deny all requests. Reason: {1}", new Object[]{configResolverClass, ex.getMessage()}); //deploymentContext = new AdapterDeploymentContext(new KeycloakDeployment()); } } else { InputStream is = getConfigInputStream(context); final SamlDeployment deployment; if (is == null) { log.info("No adapter configuration. Keycloak is unconfigured and will deny all requests."); deployment = new DefaultSamlDeployment(); } else { try { ResourceLoader loader = new ResourceLoader() { @Override public InputStream getResourceAsStream(String resource) { return context.getServletContext().getResourceAsStream(resource); } }; deployment = new DeploymentBuilder().build(is, loader); } catch (ParsingException e) { throw new RuntimeException(e); } } deploymentContext = new SamlDeploymentContext(deployment); log.fine("Keycloak is using a per-deployment configuration."); } context.getServletContext().setAttribute(SamlDeploymentContext.class.getName(), deploymentContext); } protected void beforeStop() { } private static InputStream getConfigFromServletContext(ServletContext servletContext) { String xml = servletContext.getInitParameter(AdapterConstants.AUTH_DATA_PARAM_NAME); if (xml == null) { return null; } log.finest("**** using " + AdapterConstants.AUTH_DATA_PARAM_NAME); log.finest(xml); return new ByteArrayInputStream(xml.getBytes()); } private static InputStream getConfigInputStream(Context context) { InputStream is = getConfigFromServletContext(context.getServletContext()); if (is == null) { String path = context.getServletContext().getInitParameter("keycloak.config.file"); if (path == null) { log.fine("**** using /WEB-INF/keycloak-saml.xml"); is = context.getServletContext().getResourceAsStream("/WEB-INF/keycloak-saml.xml"); } else { try { is = new FileInputStream(path); } catch (FileNotFoundException e) { log.log(Level.SEVERE, "NOT FOUND {0}", path); throw new RuntimeException(e); } } } return is; } @Override public void invoke(Request request, Response response) throws IOException, ServletException { log.fine("*********************** SAML ************"); CatalinaHttpFacade facade = new CatalinaHttpFacade(response, request); SamlDeployment deployment = deploymentContext.resolveDeployment(facade); if (request.getRequestURI().substring(request.getContextPath().length()).endsWith("/saml")) { if (deployment != null && deployment.isConfigured()) { SamlSessionStore tokenStore = getSessionStore(request, facade, deployment); SamlAuthenticator authenticator = new CatalinaSamlEndpoint(facade, deployment, tokenStore); executeAuthenticator(request, response, facade, deployment, authenticator); return; } } try { getSessionStore(request, facade, deployment).isLoggedIn(); // sets request UserPrincipal if logged in. we do this so that the UserPrincipal is available on unsecured, unconstrainted URLs super.invoke(request, response); } finally { } } protected abstract GenericPrincipalFactory createPrincipalFactory(); protected abstract boolean forwardToErrorPageInternal(Request request, HttpServletResponse response, Object loginConfig) throws IOException; protected void forwardToLogoutPage(Request request, HttpServletResponse response,SamlDeployment deployment) { RequestDispatcher disp = request.getRequestDispatcher(deployment.getLogoutPage()); //make sure the login page is never cached response.setHeader("Cache-Control", "no-cache, no-store, must-revalidate"); response.setHeader("Pragma", "no-cache"); response.setHeader("Expires", "0"); try { disp.forward(request.getRequest(), response); } catch (ServletException e) { throw new RuntimeException(e); } catch (IOException e) { throw new RuntimeException(e); } } protected boolean authenticateInternal(Request request, HttpServletResponse response, Object loginConfig) throws IOException { log.fine("authenticateInternal"); CatalinaHttpFacade facade = new CatalinaHttpFacade(response, request); SamlDeployment deployment = deploymentContext.resolveDeployment(facade); if (deployment == null || !deployment.isConfigured()) { log.fine("deployment not configured"); return false; } SamlSessionStore tokenStore = getSessionStore(request, facade, deployment); SamlAuthenticator authenticator = new CatalinaSamlAuthenticator(facade, deployment, tokenStore); return executeAuthenticator(request, response, facade, deployment, authenticator); } protected boolean executeAuthenticator(Request request, HttpServletResponse response, CatalinaHttpFacade facade, SamlDeployment deployment, SamlAuthenticator authenticator) { AuthOutcome outcome = authenticator.authenticate(); if (outcome == AuthOutcome.AUTHENTICATED) { log.fine("AUTHENTICATED"); if (facade.isEnded()) { return false; } return true; } if (outcome == AuthOutcome.LOGGED_OUT) { logoutInternal(request); if (deployment.getLogoutPage() != null) { forwardToLogoutPage(request, response, deployment); } log.fine("Logging OUT"); return false; } AuthChallenge challenge = authenticator.getChallenge(); if (challenge != null) { log.fine("challenge"); challenge.challenge(facade); } return false; } public void keycloakSaveRequest(Request request) throws IOException { saveRequest(request, request.getSessionInternal(true)); } public boolean keycloakRestoreRequest(Request request) { try { return restoreRequest(request, request.getSessionInternal()); } catch (IOException e) { throw new RuntimeException(e); } } protected SamlSessionStore getSessionStore(Request request, HttpFacade facade, SamlDeployment resolvedDeployment) { SamlSessionStore store = (SamlSessionStore)request.getNote(TOKEN_STORE_NOTE); if (store != null) { return store; } store = createSessionStore(request, facade, resolvedDeployment); request.setNote(TOKEN_STORE_NOTE, store); return store; } protected SamlSessionStore createSessionStore(Request request, HttpFacade facade, SamlDeployment resolvedDeployment) { SamlSessionStore store; store = new CatalinaSamlSessionStore(userSessionManagement, createPrincipalFactory(), mapper, request, this, facade, resolvedDeployment); return store; } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.fileupload2; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.stream.Stream; import org.apache.commons.fileupload2.portlet.PortletFileUploadTest; import org.apache.commons.fileupload2.servlet.ServletFileUploadTest; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; /** * Common tests for implementations of {@link FileUpload}. This is a parameterized test. * Tests must be valid and common to all implementations of FileUpload added as parameter * in this class. * * @see ServletFileUploadTest * @see PortletFileUploadTest * @since 1.4 */ public class FileUploadTest { /** * @return {@link FileUpload} classes under test. */ public static Stream<FileUpload> data() { return Util.fileUploadImplementations().stream(); } // --- Test methods common to all implementations of a FileUpload @ParameterizedTest @MethodSource("data") public void testFileUpload(final FileUpload upload) throws IOException, FileUploadException { final List<FileItem> fileItems = Util.parseUpload(upload, "-----1234\r\n" + "Content-Disposition: " + "form-data; name=\"file\"; filename=\"foo.tab\"\r\n" + "Content-Type: text/whatever\r\n" + "\r\n" + "This is the content of the file\n" + "\r\n" + "-----1234\r\n" + "Content-Disposition: form-data; name=\"field\"\r\n" + "\r\n" + "fieldValue\r\n" + "-----1234\r\n" + "Content-Disposition: form-data; name=\"multi\"\r\n" + "\r\n" + "value1\r\n" + "-----1234\r\n" + "Content-Disposition: form-data; name=\"multi\"\r\n" + "\r\n" + "value2\r\n" + "-----1234--\r\n"); assertEquals(4, fileItems.size()); final FileItem file = fileItems.get(0); assertEquals("file", file.getFieldName()); assertFalse(file.isFormField()); assertEquals("This is the content of the file\n", file.getString()); assertEquals("text/whatever", file.getContentType()); assertEquals("foo.tab", file.getName()); final FileItem field = fileItems.get(1); assertEquals("field", field.getFieldName()); assertTrue(field.isFormField()); assertEquals("fieldValue", field.getString()); final FileItem multi0 = fileItems.get(2); assertEquals("multi", multi0.getFieldName()); assertTrue(multi0.isFormField()); assertEquals("value1", multi0.getString()); final FileItem multi1 = fileItems.get(3); assertEquals("multi", multi1.getFieldName()); assertTrue(multi1.isFormField()); assertEquals("value2", multi1.getString()); } @ParameterizedTest @MethodSource("data") public void testFilenameCaseSensitivity(final FileUpload upload) throws IOException, FileUploadException { final List<FileItem> fileItems = Util.parseUpload(upload, "-----1234\r\n" + "Content-Disposition: form-data; " + "name=\"FiLe\"; filename=\"FOO.tab\"\r\n" + "Content-Type: text/whatever\r\n" + "\r\n" + "This is the content of the file\n" + "\r\n" + "-----1234--\r\n"); assertEquals(1, fileItems.size()); final FileItem file = fileItems.get(0); assertEquals("FiLe", file.getFieldName()); assertEquals("FOO.tab", file.getName()); } /** * This is what the browser does if you submit the form without choosing a file. */ @ParameterizedTest @MethodSource("data") public void testEmptyFile(final FileUpload upload) throws UnsupportedEncodingException, FileUploadException { final List<FileItem> fileItems = Util.parseUpload (upload, "-----1234\r\n" + "Content-Disposition: form-data; name=\"file\"; filename=\"\"\r\n" + "\r\n" + "\r\n" + "-----1234--\r\n"); assertEquals(1, fileItems.size()); final FileItem file = fileItems.get(0); assertFalse(file.isFormField()); assertEquals("", file.getString()); assertEquals("", file.getName()); } /** * Internet Explorer 5 for the Mac has a bug where the carriage * return is missing on any boundary line immediately preceding * an input with type=image. (type=submit does not have the bug.) */ @ParameterizedTest @MethodSource("data") public void testIE5MacBug(final FileUpload upload) throws UnsupportedEncodingException, FileUploadException { final List<FileItem> fileItems = Util.parseUpload(upload, "-----1234\r\n" + "Content-Disposition: form-data; name=\"field1\"\r\n" + "\r\n" + "fieldValue\r\n" + "-----1234\n" + // NOTE \r missing "Content-Disposition: form-data; name=\"submitName.x\"\r\n" + "\r\n" + "42\r\n" + "-----1234\n" + // NOTE \r missing "Content-Disposition: form-data; name=\"submitName.y\"\r\n" + "\r\n" + "21\r\n" + "-----1234\r\n" + "Content-Disposition: form-data; name=\"field2\"\r\n" + "\r\n" + "fieldValue2\r\n" + "-----1234--\r\n"); assertEquals(4, fileItems.size()); final FileItem field1 = fileItems.get(0); assertEquals("field1", field1.getFieldName()); assertTrue(field1.isFormField()); assertEquals("fieldValue", field1.getString()); final FileItem submitX = fileItems.get(1); assertEquals("submitName.x", submitX.getFieldName()); assertTrue(submitX.isFormField()); assertEquals("42", submitX.getString()); final FileItem submitY = fileItems.get(2); assertEquals("submitName.y", submitY.getFieldName()); assertTrue(submitY.isFormField()); assertEquals("21", submitY.getString()); final FileItem field2 = fileItems.get(3); assertEquals("field2", field2.getFieldName()); assertTrue(field2.isFormField()); assertEquals("fieldValue2", field2.getString()); } /** * Test for <a href="https://issues.apache.org/jira/browse/FILEUPLOAD-62">FILEUPLOAD-62</a> */ @ParameterizedTest @MethodSource("data") public void testFILEUPLOAD62(final FileUpload upload) throws Exception { final String contentType = "multipart/form-data; boundary=AaB03x"; final String request = "--AaB03x\r\n" + "content-disposition: form-data; name=\"field1\"\r\n" + "\r\n" + "Joe Blow\r\n" + "--AaB03x\r\n" + "content-disposition: form-data; name=\"pics\"\r\n" + "Content-type: multipart/mixed; boundary=BbC04y\r\n" + "\r\n" + "--BbC04y\r\n" + "Content-disposition: attachment; filename=\"file1.txt\"\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "... contents of file1.txt ...\r\n" + "--BbC04y\r\n" + "Content-disposition: attachment; filename=\"file2.gif\"\r\n" + "Content-type: image/gif\r\n" + "Content-Transfer-Encoding: binary\r\n" + "\r\n" + "...contents of file2.gif...\r\n" + "--BbC04y--\r\n" + "--AaB03x--"; final List<FileItem> fileItems = Util.parseUpload(upload, request.getBytes(StandardCharsets.US_ASCII), contentType); assertEquals(3, fileItems.size()); final FileItem item0 = fileItems.get(0); assertEquals("field1", item0.getFieldName()); assertNull(item0.getName()); assertEquals("Joe Blow", new String(item0.get())); final FileItem item1 = fileItems.get(1); assertEquals("pics", item1.getFieldName()); assertEquals("file1.txt", item1.getName()); assertEquals("... contents of file1.txt ...", new String(item1.get())); final FileItem item2 = fileItems.get(2); assertEquals("pics", item2.getFieldName()); assertEquals("file2.gif", item2.getName()); assertEquals("...contents of file2.gif...", new String(item2.get())); } /** * Test for <a href="https://issues.apache.org/jira/browse/FILEUPLOAD-111">FILEUPLOAD-111</a> */ @ParameterizedTest @MethodSource("data") public void testFoldedHeaders(final FileUpload upload) throws IOException, FileUploadException { final List<FileItem> fileItems = Util.parseUpload(upload, "-----1234\r\n" + "Content-Disposition: form-data; name=\"file\"; filename=\"foo.tab\"\r\n" + "Content-Type: text/whatever\r\n" + "\r\n" + "This is the content of the file\n" + "\r\n" + "-----1234\r\n" + "Content-Disposition: form-data; \r\n" + "\tname=\"field\"\r\n" + "\r\n" + "fieldValue\r\n" + "-----1234\r\n" + "Content-Disposition: form-data;\r\n" + " name=\"multi\"\r\n" + "\r\n" + "value1\r\n" + "-----1234\r\n" + "Content-Disposition: form-data; name=\"multi\"\r\n" + "\r\n" + "value2\r\n" + "-----1234--\r\n"); assertEquals(4, fileItems.size()); final FileItem file = fileItems.get(0); assertEquals("file", file.getFieldName()); assertFalse(file.isFormField()); assertEquals("This is the content of the file\n", file.getString()); assertEquals("text/whatever", file.getContentType()); assertEquals("foo.tab", file.getName()); final FileItem field = fileItems.get(1); assertEquals("field", field.getFieldName()); assertTrue(field.isFormField()); assertEquals("fieldValue", field.getString()); final FileItem multi0 = fileItems.get(2); assertEquals("multi", multi0.getFieldName()); assertTrue(multi0.isFormField()); assertEquals("value1", multi0.getString()); final FileItem multi1 = fileItems.get(3); assertEquals("multi", multi1.getFieldName()); assertTrue(multi1.isFormField()); assertEquals("value2", multi1.getString()); } /** * Test case for <a href="https://issues.apache.org/jira/browse/FILEUPLOAD-130"> */ @ParameterizedTest @MethodSource("data") public void testFileUpload130(final FileUpload upload) throws Exception { final String[] headerNames = new String[] { "SomeHeader", "OtherHeader", "YetAnotherHeader", "WhatAHeader" }; final String[] headerValues = new String[] { "present", "Is there", "Here", "Is That" }; final List<FileItem> fileItems = Util.parseUpload(upload, "-----1234\r\n" + "Content-Disposition: form-data; name=\"file\"; " + "filename=\"foo.tab\"\r\n" + "Content-Type: text/whatever\r\n" + headerNames[0] + ": " + headerValues[0] + "\r\n" + "\r\n" + "This is the content of the file\n" + "\r\n" + "-----1234\r\n" + "Content-Disposition: form-data; \r\n" + "\tname=\"field\"\r\n" + headerNames[1] + ": " + headerValues[1] + "\r\n" + "\r\n" + "fieldValue\r\n" + "-----1234\r\n" + "Content-Disposition: form-data;\r\n" + " name=\"multi\"\r\n" + headerNames[2] + ": " + headerValues[2] + "\r\n" + "\r\n" + "value1\r\n" + "-----1234\r\n" + "Content-Disposition: form-data; name=\"multi\"\r\n" + headerNames[3] + ": " + headerValues[3] + "\r\n" + "\r\n" + "value2\r\n" + "-----1234--\r\n"); assertEquals(4, fileItems.size()); final FileItem file = fileItems.get(0); assertHeaders(headerNames, headerValues, file, 0); final FileItem field = fileItems.get(1); assertHeaders(headerNames, headerValues, field, 1); final FileItem multi0 = fileItems.get(2); assertHeaders(headerNames, headerValues, multi0, 2); final FileItem multi1 = fileItems.get(3); assertHeaders(headerNames, headerValues, multi1, 3); } /** * Test for <a href="https://issues.apache.org/jira/browse/FILEUPLOAD-239">FILEUPLOAD-239</a> */ @ParameterizedTest @MethodSource("data") public void testContentTypeAttachment(final FileUpload upload) throws IOException, FileUploadException { final List<FileItem> fileItems = Util.parseUpload(upload, "-----1234\r\n" + "content-disposition: form-data; name=\"field1\"\r\n" + "\r\n" + "Joe Blow\r\n" + "-----1234\r\n" + "content-disposition: form-data; name=\"pics\"\r\n" + "Content-type: multipart/mixed, boundary=---9876\r\n" + "\r\n" + "-----9876\r\n" + "Content-disposition: attachment; filename=\"file1.txt\"\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "... contents of file1.txt ...\r\n" + "-----9876--\r\n" + "-----1234--\r\n"); assertEquals(2, fileItems.size()); final FileItem field = fileItems.get(0); assertEquals("field1", field.getFieldName()); assertTrue(field.isFormField()); assertEquals("Joe Blow", field.getString()); final FileItem file = fileItems.get(1); assertEquals("pics", file.getFieldName()); assertFalse(file.isFormField()); assertEquals("... contents of file1.txt ...", file.getString()); assertEquals("text/plain", file.getContentType()); assertEquals("file1.txt", file.getName()); } private void assertHeaders(final String[] pHeaderNames, final String[] pHeaderValues, final FileItem pItem, final int pIndex) { for (int i = 0; i < pHeaderNames.length; i++) { final String value = pItem.getHeaders().getHeader(pHeaderNames[i]); if (i == pIndex) { assertEquals(pHeaderValues[i], value); } else { assertNull(value); } } } }
package com.thinkbiganalytics.spark.conf.model; /*- * #%L * Spark Shell Core * %% * Copyright (C) 2017 ThinkBig Analytics * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ public class SparkShellProperties { /** * Path to the kylo-spark-shell-client jar file */ private String appResource; /** * Startup timeout in seconds */ private int clientTimeout = 60; /** * Spark deploy mode */ private String deployMode = "client"; /** * Additional files to be submitted with the application */ private String files; /** * Request timeout in seconds */ private int idleTimeout = 900; /** * Additional jars to be submitted with the Spark application */ private String jars; /** * The {@code JAVA_HOME} for launching the Spark application */ private String javaHome; /** * Spark master */ private String master = "yarn"; /** * Maximum port number */ private int portMax = 45999; /** * Minimum port number */ private int portMin = 45000; /** * Custom properties file with Spark configuration for the application */ private String propertiesFile; /** * Enables user impersonation */ private boolean proxyUser = false; /** * Password for keystore */ private String registrationKeystorePassword; /** * Path to keystore */ private String registrationKeystorePath; /** * Registration URL */ private String registrationUrl; /** * Externally managed process */ private SparkShellServerProperties server; /** * Additional command-line options */ private String sparkArgs; /** * Custom Spark installation location */ private String sparkHome; /** * Enables verbose reporting for Spark Submit */ private boolean verbose; public String getAppResource() { return appResource; } public void setAppResource(String appResource) { this.appResource = appResource; } public int getClientTimeout() { return clientTimeout; } public void setClientTimeout(int clientTimeout) { this.clientTimeout = clientTimeout; } public String getDeployMode() { return deployMode; } public void setDeployMode(String deployMode) { this.deployMode = deployMode; } public String getFiles() { return files; } public void setFiles(String files) { this.files = files; } public int getIdleTimeout() { return idleTimeout; } public void setIdleTimeout(int idleTimeout) { this.idleTimeout = idleTimeout; } public String getJars() { return jars; } public void setJars(String jars) { this.jars = jars; } public String getJavaHome() { return javaHome; } public void setJavaHome(String javaHome) { this.javaHome = javaHome; } public String getMaster() { return master; } public void setMaster(String master) { this.master = master; } public int getPortMax() { return portMax; } public void setPortMax(int portMax) { this.portMax = portMax; } public int getPortMin() { return portMin; } public void setPortMin(int portMin) { this.portMin = portMin; } public String getPropertiesFile() { return propertiesFile; } public void setPropertiesFile(String propertiesFile) { this.propertiesFile = propertiesFile; } public boolean isProxyUser() { return proxyUser; } public void setProxyUser(boolean proxyUser) { this.proxyUser = proxyUser; } public String getRegistrationKeystorePassword() { return registrationKeystorePassword; } public void setRegistrationKeystorePassword(String registrationKeystorePassword) { this.registrationKeystorePassword = registrationKeystorePassword; } public String getRegistrationKeystorePath() { return registrationKeystorePath; } public void setRegistrationKeystorePath(String registrationKeystorePath) { this.registrationKeystorePath = registrationKeystorePath; } public String getRegistrationUrl() { return registrationUrl; } public void setRegistrationUrl(String registrationUrl) { this.registrationUrl = registrationUrl; } public SparkShellServerProperties getServer() { return server; } public void setServer(SparkShellServerProperties server) { this.server = server; } public String getSparkArgs() { return sparkArgs; } public void setSparkArgs(String sparkArgs) { this.sparkArgs = sparkArgs; } public String getSparkHome() { return sparkHome; } public void setSparkHome(String sparkHome) { this.sparkHome = sparkHome; } public boolean isVerbose() { return verbose; } public void setVerbose(boolean verbose) { this.verbose = verbose; } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.metastore; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.client.builder.GetTablesRequestBuilder; import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.ExtendedTableInfo; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.GetTablesExtRequestFields; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ACCESSTYPE_NONE; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ACCESSTYPE_READONLY; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ACCESSTYPE_READWRITE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.parse.WarehouseInstance; import org.apache.hadoop.util.StringUtils; import com.google.common.collect.Lists; import org.apache.thrift.TException; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class TestTenantBasedStorageHierarchy { private static final Logger LOG = LoggerFactory.getLogger(TestTenantBasedStorageHierarchy.class); protected static HiveMetaStoreClient client; protected static Configuration conf; File ext_wh = null; File wh = null; protected static boolean isThriftClient = true; private static final String CAPABILITIES_KEY = "OBJCAPABILITIES"; private static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; @Before public void setUp() throws Exception { conf = MetastoreConf.newMetastoreConf(); wh = new File(System.getProperty("java.io.tmpdir") + File.separator + "hive" + File.separator + "warehouse" + File.separator + "hive" + File.separator); wh.mkdirs(); ext_wh = new File(System.getProperty("java.io.tmpdir") + File.separator + "hive" + File.separator + "warehouse" + File.separator + "hive-external" + File.separator); ext_wh.mkdirs(); MetastoreConf.setVar(conf, ConfVars.METASTORE_METADATA_TRANSFORMER_CLASS, "org.apache.hadoop.hive.metastore.MetastoreDefaultTransformer"); MetastoreConf.setBoolVar(conf, ConfVars.ALLOW_TENANT_BASED_STORAGE, true); MetastoreConf.setBoolVar(conf, ConfVars.HIVE_IN_TEST, false); MetastoreConf.setVar(conf, ConfVars.WAREHOUSE, wh.getCanonicalPath()); MetastoreConf.setVar(conf, ConfVars.WAREHOUSE_EXTERNAL, ext_wh.getCanonicalPath()); client = new HiveMetaStoreClient(conf); } private static void silentDropDatabase(String dbName) throws TException { try { for (String tableName : client.getTables(dbName, "*")) { client.dropTable(dbName, tableName); } client.dropDatabase(dbName); } catch (NoSuchObjectException|InvalidOperationException e) { // NOP } } private void resetHMSClient() { client.setProcessorIdentifier(null); client.setProcessorCapabilities(null); } private void setHMSClient(String id, String[] caps) { client.setProcessorIdentifier(id); client.setProcessorCapabilities(caps); } private File getManagedRootForTenant(String tenant) { return new File(System.getProperty("java.io.tmpdir") + File.separator + tenant + File.separator + "hive" + File.separator + "warehouse" + File.separator + "managed" + File.separator); } private File getExternalRootForTenant(String tenant) { return new File(System.getProperty("java.io.tmpdir") + File.separator + tenant + File.separator + "hive" + File.separator + "warehouse" + File.separator + "external" + File.separator); } @Test public void testCreateDatabaseOldSyntax() throws Exception { try { resetHMSClient(); final String dbName = "db1"; String basetblName = "oldstyletable"; Map<String, Object> tProps = new HashMap<>(); Database db = createDatabase("hive", dbName, null, null); Database db2 = client.getDatabase("hive", dbName); assertNull(db2.getManagedLocationUri()); assertNotNull("Database's locationUri is expected to be not null:actual=" + db2.getLocationUri()); String tblName = "ext_" + basetblName; tProps.put("DBNAME", dbName); tProps.put("TBLNAME", tblName); tProps.put("TBLTYPE", TableType.EXTERNAL_TABLE); StringBuilder properties = new StringBuilder(); properties.append("EXTERNAL").append("=").append("TRUE"); properties.append(";"); tProps.put("PROPERTIES", properties.toString()); Table tbl = createTableWithCapabilities(tProps); setHMSClient("testCreateDatabaseOldSyntax", (new String[] { "HIVEBUCKET2", "EXTREAD", "EXTWRITE"})); Table tbl2 = client.getTable(dbName, tblName); assertEquals("Created and retrieved tables do not match:" + tbl2.getTableName() + ":" + tblName, tbl2.getTableName(), tblName); assertTrue("Database location not as expected:actual=" + db2.getLocationUri(), tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname()))); resetHMSClient(); tblName = "mgd_" + basetblName; tProps.put("DBNAME", dbName); tProps.put("TBLNAME", tblName); tProps.put("TBLTYPE", TableType.MANAGED_TABLE); properties = new StringBuilder(); properties.append("transactional=true"); properties.append(";"); properties.append("transactional_properties=insert_only"); tProps.put("PROPERTIES", properties.toString()); setHMSClient("createTable", new String[] {"HIVEMANAGEDINSERTWRITE", "HIVEFULLACIDWRITE"}); tbl = createTableWithCapabilities(tProps); tbl2 = client.getTable(dbName, tblName); assertEquals("Created and retrieved tables do not match:" + tbl2.getTableName() + ":" + tblName, tbl2.getTableName(), tblName); assertTrue("Database location not as expected:actual=" + db2.getLocationUri(), tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE.getVarname()))); } catch (Exception e) { fail("testCreateDatabaseOldSyntax failed with " + e.getMessage()); } finally { silentDropDatabase("db1"); resetHMSClient(); } } @Test public void testCreateDatabaseWithOldLocation() throws Exception { try { resetHMSClient(); final String dbName = "dbx"; String basetblName = "oldstyletable"; Map<String, Object> tProps = new HashMap<>(); String location = ext_wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX); Database db = createDatabase("hive", dbName, location, null); Database db2 = client.getDatabase("hive", dbName); assertNull(db2.getManagedLocationUri()); assertNotNull("Database's locationUri is expected to be not null:actual=" + db2.getLocationUri()); String tblName = basetblName; tProps.put("DBNAME", dbName); tProps.put("TBLNAME", tblName); tProps.put("TBLTYPE", TableType.EXTERNAL_TABLE); StringBuilder properties = new StringBuilder(); properties.append("EXTERNAL").append("=").append("TRUE"); properties.append(";"); tProps.put("PROPERTIES", properties.toString()); Table tbl = createTableWithCapabilities(tProps); setHMSClient("testCreateDatabaseWithOldLocation", (new String[] { "HIVEBUCKET2", "EXTREAD", "EXTWRITE"})); Table tbl2 = client.getTable(dbName, tblName); assertEquals("Created and retrieved tables do not match:" + tbl2.getTableName() + ":" + tblName, tbl2.getTableName(), tblName); assertTrue("External table location not as expected:actual=" + db2.getLocationUri(), tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname()))); tblName = "mgd_" + basetblName; tProps.put("DBNAME", dbName); tProps.put("TBLNAME", tblName); tProps.put("TBLTYPE", TableType.MANAGED_TABLE); properties = new StringBuilder(); properties.append("transactional=true"); properties.append(";"); properties.append("transactional_properties=insert_only"); tProps.put("PROPERTIES", properties.toString()); setHMSClient("createTable", new String[] {"HIVEMANAGEDINSERTWRITE", "HIVEFULLACIDWRITE"}); tbl = createTableWithCapabilities(tProps); tbl2 = client.getTable(dbName, tblName); assertEquals("Created and retrieved tables do not match:" + tbl2.getTableName() + ":" + tblName, tbl2.getTableName(), tblName); assertEquals("Database's locationUri is expected to be equal to set value", Path.getPathWithoutSchemeAndAuthority(new Path(location)), Path.getPathWithoutSchemeAndAuthority(new Path(db2.getLocationUri()))); assertTrue("Managed table location not as expected:actual=" + db2.getLocationUri(), tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE.getVarname()))); } catch (Exception e) { fail("testCreateDatabaseWithOldLocation failed with " + e.getMessage()); } finally { silentDropDatabase("dbx"); resetHMSClient(); } } @Test public void testCreateDatabaseWithNewLocation() throws Exception { try { resetHMSClient(); String dbName = "dbx"; String basetblName = "newstyletable"; Map<String, Object> tProps = new HashMap<>(); String tenant1 = "tenant1"; String tenant2 = "tenant2"; String location = getExternalRootForTenant(tenant1).getAbsolutePath(); Database db = createDatabase("hive", dbName, location, null); Database db2 = client.getDatabase("hive", dbName); assertNull(db2.getManagedLocationUri()); assertNotNull("Database's locationUri is expected to be not null:actual=" + db2.getLocationUri()); assertEquals("Expected location is different from actual location", Path.getPathWithoutSchemeAndAuthority(new Path(db2.getLocationUri())), Path.getPathWithoutSchemeAndAuthority(new Path(location))); String tblName = basetblName; tProps.put("DBNAME", dbName); tProps.put("TBLNAME", tblName); tProps.put("TBLTYPE", TableType.EXTERNAL_TABLE); StringBuilder properties = new StringBuilder(); properties.append("EXTERNAL").append("=").append("TRUE"); properties.append(";"); tProps.put("PROPERTIES", properties.toString()); Table tbl = createTableWithCapabilities(tProps); setHMSClient("testCreateDatabaseWithNewLocation", (new String[] { "HIVEBUCKET2", "EXTREAD", "EXTWRITE"})); Table tbl2 = client.getTable(dbName, tblName); assertEquals("Created and retrieved tables do not match:" + tbl2.getTableName() + ":" + tblName, tbl2.getTableName(), tblName); assertTrue("External table location not as expected:actual=" + db2.getLocationUri(), tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname()))); dbName = "dbm"; String mgdLocation = getManagedRootForTenant(tenant2).getAbsolutePath(); location = getExternalRootForTenant(tenant2).getAbsolutePath(); db = createDatabase("hive", dbName, location, mgdLocation); db2 = client.getDatabase("hive", dbName); assertNotNull("Database's managedLocationUri is expected to be not null" + db2.getManagedLocationUri()); assertNotNull("Database's locationUri is expected to be not null" + db2.getLocationUri()); assertEquals("Expected location is different from actual location", Path.getPathWithoutSchemeAndAuthority(new Path(location)), Path.getPathWithoutSchemeAndAuthority(new Path(db2.getLocationUri()))); tblName = "mgd_" + basetblName; tProps.put("DBNAME", dbName); tProps.put("TBLNAME", tblName); tProps.put("TBLTYPE", TableType.MANAGED_TABLE); properties = new StringBuilder(); properties.append("transactional=true"); properties.append(";"); properties.append("transactional_properties=insert_only"); tProps.put("PROPERTIES", properties.toString()); setHMSClient("createTable", new String[] {"HIVEMANAGEDINSERTWRITE", "HIVEFULLACIDWRITE"}); tbl = createTableWithCapabilities(tProps); tbl2 = client.getTable(dbName, tblName); assertEquals("Created and retrieved tables do not match:" + tbl2.getTableName() + ":" + tblName, tbl2.getTableName(), tblName); assertTrue("Managed table location not as expected:actual=" + db2.getLocationUri(), tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE.getVarname()))); } catch (Exception e) { fail("testCreateDatabaseWithNewLocation failed with " + e.getMessage()); } finally { silentDropDatabase("dbx"); resetHMSClient(); } } @Test public void testCreateDatabaseWithExtAndManagedLocations() throws Exception { try { resetHMSClient(); final String dbName = "dbxm"; String basetblName = "newstyletable"; Map<String, Object> tProps = new HashMap<>(); String location = ext_wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX); String mgdLocation = wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX); Database db = createDatabase("hive", dbName, location, mgdLocation); Database db2 = client.getDatabase("hive", dbName); assertNotNull("Database's managedLocationUri is expected to be not null" + db2.getManagedLocationUri()); assertNotNull("Database's locationUri is expected to be not null" + db2.getLocationUri()); String tblName = basetblName; tProps.put("DBNAME", dbName); tProps.put("TBLNAME", tblName); tProps.put("TBLTYPE", TableType.EXTERNAL_TABLE); StringBuilder properties = new StringBuilder(); properties.append("EXTERNAL").append("=").append("TRUE"); properties.append(";"); tProps.put("PROPERTIES", properties.toString()); Table tbl = createTableWithCapabilities(tProps); setHMSClient("testCreateDatabaseWithLocation", (new String[] { "HIVEBUCKET2", "EXTREAD", "EXTWRITE"})); Table tbl2 = client.getTable(dbName, tblName); assertEquals("Created and retrieved tables do not match:" + tbl2.getTableName() + ":" + tblName, tbl2.getTableName(), tblName); assertTrue("External table location not as expected:actual=" + db2.getLocationUri(), tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname()))); tblName = "mgd_" + basetblName; tProps.put("DBNAME", dbName); tProps.put("TBLNAME", tblName); tProps.put("TBLTYPE", TableType.MANAGED_TABLE); properties = new StringBuilder(); properties.append("transactional=true"); properties.append(";"); properties.append("transactional_properties=insert_only"); tProps.put("PROPERTIES", properties.toString()); setHMSClient("createTable", new String[] {"HIVEMANAGEDINSERTWRITE", "HIVEFULLACIDWRITE"}); tbl = createTableWithCapabilities(tProps); tbl2 = client.getTable(dbName, tblName); assertEquals("Created and retrieved tables do not match:" + tbl2.getTableName() + ":" + tblName, tbl2.getTableName(), tblName); assertEquals("Database's locationUri is expected to be equal to set value", Path.getPathWithoutSchemeAndAuthority(new Path(location)), Path.getPathWithoutSchemeAndAuthority(new Path(db2.getLocationUri()))); assertTrue("Managed table location not as expected:actual=" + db2.getLocationUri(), tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE.getVarname()))); } catch (Exception e) { fail("testCreateDatabaseWithLocation failed with " + e.getMessage()); } finally { silentDropDatabase("dbxm"); resetHMSClient(); } } @Test public void testAlterDatabase() throws Exception { try { resetHMSClient(); final String dbName = "dbalter"; Database db = createDatabase("hive", dbName, null, null); Database db2 = client.getDatabase("hive", dbName); assertNull(db2.getManagedLocationUri()); assertNotNull("Database's locationUri is expected to be not null:actual=" + db2.getLocationUri()); String mgdLocation = wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX); db.setManagedLocationUri(mgdLocation); client.alterDatabase(dbName, db); db2 = client.getDatabase("hive", dbName); assertNotNull("Database's managedLocationUri is expected to be not null" + db2.getManagedLocationUri()); assertEquals("Database's managed location is expected to be equal", db2.getManagedLocationUri(), mgdLocation); String location = ext_wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX); db.setLocationUri(location); db2 = client.getDatabase("hive", dbName); assertEquals("Database's managed location is expected to be equal", Path.getPathWithoutSchemeAndAuthority(new Path(db2.getManagedLocationUri())), Path.getPathWithoutSchemeAndAuthority(new Path(mgdLocation))); assertEquals("Database's location is expected to be equal", Path.getPathWithoutSchemeAndAuthority(new Path(db2.getLocationUri())), Path.getPathWithoutSchemeAndAuthority(new Path(location))); } catch (Exception e) { System.err.println(org.apache.hadoop.util.StringUtils.stringifyException(e)); System.err.println("testAlterDatabase() failed."); fail("testAlterDatabase failed:" + e.getMessage()); } finally { silentDropDatabase("dbalter"); resetHMSClient(); } } private Database createDatabase(String catName, String dbName, String location, String managedLocation) throws Exception { if (catName == null) catName = "hive"; DatabaseBuilder builder = new DatabaseBuilder() .setName(dbName) .setCatalogName(catName); if (location != null) builder.setLocation(location); if (managedLocation != null) builder.setManagedLocation(managedLocation); return builder.create(client, conf); } private Table createTableWithCapabilities(Map<String, Object> props) throws Exception { String catalog = (String)props.getOrDefault("CATALOG", MetaStoreUtils.getDefaultCatalog(conf)); String dbName = (String)props.getOrDefault("DBNAME", "simpdb"); String tblName = (String)props.getOrDefault("TBLNAME", "test_table"); TableType type = (TableType)props.getOrDefault("TBLTYPE", TableType.MANAGED_TABLE); int buckets = ((Integer)props.getOrDefault("BUCKETS", -1)).intValue(); String properties = (String)props.getOrDefault("PROPERTIES", ""); String location = (String)(props.get("LOCATION")); boolean dropDb = ((Boolean)props.getOrDefault("DROPDB", Boolean.TRUE)).booleanValue(); int partitionCount = ((Integer)props.getOrDefault("PARTITIONS", 0)).intValue(); final String typeName = "Person"; if (type == TableType.EXTERNAL_TABLE) { if (!properties.contains("EXTERNAL=TRUE")) { properties.concat(";EXTERNAL=TRUE;"); } } Map<String,String> table_params = new HashMap(); if (properties.length() > 0) { String[] propArray = properties.split(";"); for (String prop : propArray) { String[] keyValue = prop.split("="); table_params.put(keyValue[0], keyValue[1]); } } Catalog cat = null; try { cat = client.getCatalog(catalog); } catch (NoSuchObjectException e) { LOG.debug("Catalog does not exist, creating a new one"); try { if (cat == null) { cat = new Catalog(); cat.setName(catalog.toLowerCase()); Warehouse wh = new Warehouse(conf); cat.setLocationUri(wh.getWhRootExternal().toString() + File.separator + catalog); cat.setDescription("Non-hive catalog"); client.createCatalog(cat); LOG.debug("Catalog " + catalog + " created"); } } catch (Exception ce) { LOG.warn("Catalog " + catalog + " could not be created"); } } catch (Exception e) { LOG.error("Creation of a new catalog failed, aborting test"); throw e; } try { client.dropTable(dbName, tblName); } catch (Exception e) { LOG.info("Drop table failed for " + dbName + "." + tblName); } try { if (dropDb) silentDropDatabase(dbName); } catch (Exception e) { LOG.info("Drop database failed for " + dbName); } if (dropDb) new DatabaseBuilder() .setName(dbName) .setCatalogName(catalog) .create(client, conf); try { client.dropType(typeName); } catch (Exception e) { LOG.info("Drop type failed for " + typeName); } Type typ1 = new Type(); typ1.setName(typeName); typ1.setFields(new ArrayList<>(2)); typ1.getFields().add( new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); typ1.getFields().add( new FieldSchema("income", ColumnType.INT_TYPE_NAME, "")); client.createType(typ1); TableBuilder builder = new TableBuilder() .setCatName(catalog) .setDbName(dbName) .setTableName(tblName) .setCols(typ1.getFields()) .setType(type.name()) .setLocation(location) .setNumBuckets(buckets) .setTableParams(table_params) .addBucketCol("name") .addStorageDescriptorParam("test_param_1", "Use this for comments etc"); if (location != null) builder.setLocation(location); if (buckets > 0) builder.setNumBuckets(buckets).addBucketCol("name"); if (partitionCount > 0) { builder.addPartCol("partcol", "string"); } if (type == TableType.MANAGED_TABLE) { if (properties.contains("transactional=true") && !properties.contains("transactional_properties=insert_only")) { builder.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"); builder.setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"); builder.setSerdeLib("org.apache.hadoop.hive.ql.io.orc.OrcSerde"); builder.addStorageDescriptorParam("inputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"); builder.addStorageDescriptorParam("outputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"); } } Table tbl = builder.create(client, conf); LOG.info("Table " + tbl.getTableName() + " created:type=" + tbl.getTableType()); if (partitionCount > 0) { List<Partition> partitions = new ArrayList<>(); List<List<String>> partValues = new ArrayList<>(); for (int i = 1; i <= partitionCount; i++) { partValues.add(Lists.newArrayList("" + i)); } for(List<String> vals : partValues){ addPartition(client, tbl, vals); } } if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' // object when the client is a thrift client and the code below relies // on the location being present in the 'tbl' object - so get the table // from the metastore tbl = client.getTable(catalog, dbName, tblName); LOG.info("Fetched Table " + tbl.getTableName() + " created:type=" + tbl.getTableType()); } return tbl; } private void addPartition(IMetaStoreClient client, Table table, List<String> values) throws TException { PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); Partition p = partitionBuilder.build(conf); p.getSd().setNumBuckets(-1); // PartitionBuilder uses 0 as default whereas we use -1 for Tables. client.add_partition(p); } }
// ======================================================================== // Copyright (c) 1997-2009 Mort Bay Consulting Pty. Ltd. // ------------------------------------------------------------------------ // All rights reserved. This program and the accompanying materials // are made available under the terms of the Eclipse Public License v1.0 // and Apache License v2.0 which accompanies this distribution. // The Eclipse Public License is available at // http://www.eclipse.org/legal/epl-v10.html // The Apache License v2.0 is available at // http://www.opensource.org/licenses/apache2.0.php // You may elect to redistribute this code under either of these licenses. // ======================================================================== package org.eclipse.jetty.server; import java.io.IOException; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.util.Locale; import java.util.TimeZone; import javax.servlet.http.Cookie; import org.eclipse.jetty.http.HttpHeaders; import org.eclipse.jetty.http.PathMap; import org.eclipse.jetty.util.DateCache; import org.eclipse.jetty.util.RolloverFileOutputStream; import org.eclipse.jetty.util.StringUtil; import org.eclipse.jetty.util.component.AbstractLifeCycle; import org.eclipse.jetty.util.log.Log; /** * This {@link RequestLog} implementation outputs logs in the pseudo-standard * NCSA common log format. Configuration options allow a choice between the * standard Common Log Format (as used in the 3 log format) and the Combined Log * Format (single log format). This log format can be output by most web * servers, and almost all web log analysis software can understand these * formats. * * @org.apache.xbean.XBean element="ncsaLog" */ /* ------------------------------------------------------------ */ /** */ public class NCSARequestLog extends AbstractLifeCycle implements RequestLog { private String _filename; private boolean _extended; private boolean _append; private int _retainDays; private boolean _closeOut; private boolean _preferProxiedForAddress; private String _logDateFormat = "dd/MMM/yyyy:HH:mm:ss Z"; private String _filenameDateFormat = null; private Locale _logLocale = Locale.getDefault(); private String _logTimeZone = "GMT"; private String[] _ignorePaths; private boolean _logLatency = false; private boolean _logCookies = false; private boolean _logServer = false; private boolean _logDispatch = false; private transient OutputStream _out; private transient OutputStream _fileOut; private transient DateCache _logDateCache; private transient PathMap _ignorePathMap; private transient Writer _writer; /* ------------------------------------------------------------ */ /** * Create request log object with default settings. */ public NCSARequestLog() { _extended = true; _append = true; _retainDays = 31; } /* ------------------------------------------------------------ */ /** * Create request log object with specified output file name. * * @param filename the file name for the request log. * This may be in the format expected * by {@link RolloverFileOutputStream} */ public NCSARequestLog(String filename) { _extended = true; _append = true; _retainDays = 31; setFilename(filename); } /* ------------------------------------------------------------ */ /** * Set the output file name of the request log. * The file name may be in the format expected by * {@link RolloverFileOutputStream}. * * @param filename file name of the request log * */ public void setFilename(String filename) { if (filename != null) { filename = filename.trim(); if (filename.length() == 0) filename = null; } _filename = filename; } /* ------------------------------------------------------------ */ /** * Retrieve the output file name of the request log. * * @return file name of the request log */ public String getFilename() { return _filename; } /* ------------------------------------------------------------ */ /** * Retrieve the file name of the request log with the expanded * date wildcard if the output is written to the disk using * {@link RolloverFileOutputStream}. * * @return file name of the request log, or null if not applicable */ public String getDatedFilename() { if (_fileOut instanceof RolloverFileOutputStream) return ((RolloverFileOutputStream)_fileOut).getDatedFilename(); return null; } /* ------------------------------------------------------------ */ /** * Set the timestamp format for request log entries in the file. * If this is not set, the pre-formated request timestamp is used. * * @param format timestamp format string */ public void setLogDateFormat(String format) { _logDateFormat = format; } /* ------------------------------------------------------------ */ /** * Retrieve the timestamp format string for request log entries. * * @return timestamp format string. */ public String getLogDateFormat() { return _logDateFormat; } /* ------------------------------------------------------------ */ /** * Set the locale of the request log. * * @param logLocale locale object */ public void setLogLocale(Locale logLocale) { _logLocale = logLocale; } /* ------------------------------------------------------------ */ /** * Retrieve the locale of the request log. * * @return locale object */ public Locale getLogLocale() { return _logLocale; } /* ------------------------------------------------------------ */ /** * Set the timezone of the request log. * * @param tz timezone string */ public void setLogTimeZone(String tz) { _logTimeZone = tz; } /* ------------------------------------------------------------ */ /** * Retrieve the timezone of the request log. * * @return timezone string */ public String getLogTimeZone() { return _logTimeZone; } /* ------------------------------------------------------------ */ /** * Set the number of days before rotated log files are deleted. * * @param retainDays number of days to keep a log file */ public void setRetainDays(int retainDays) { _retainDays = retainDays; } /* ------------------------------------------------------------ */ /** * Retrieve the number of days before rotated log files are deleted. * * @return number of days to keep a log file */ public int getRetainDays() { return _retainDays; } /* ------------------------------------------------------------ */ /** * Set the extended request log format flag. * * @param extended true - log the extended request information, * false - do not log the extended request information */ public void setExtended(boolean extended) { _extended = extended; } /* ------------------------------------------------------------ */ /** * Retrieve the extended request log format flag. * * @return value of the flag */ public boolean isExtended() { return _extended; } /* ------------------------------------------------------------ */ /** * Set append to log flag. * * @param append true - request log file will be appended after restart, * false - request log file will be overwritten after restart */ public void setAppend(boolean append) { _append = append; } /* ------------------------------------------------------------ */ /** * Retrieve append to log flag. * * @return value of the flag */ public boolean isAppend() { return _append; } /* ------------------------------------------------------------ */ /** * Set request paths that will not be logged. * * @param ignorePaths array of request paths */ public void setIgnorePaths(String[] ignorePaths) { _ignorePaths = ignorePaths; } /* ------------------------------------------------------------ */ /** * Retrieve the request paths that will not be logged. * * @return array of request paths */ public String[] getIgnorePaths() { return _ignorePaths; } /* ------------------------------------------------------------ */ /** * Controls logging of the request cookies. * * @param logCookies true - values of request cookies will be logged, * false - values of request cookies will not be logged */ public void setLogCookies(boolean logCookies) { _logCookies = logCookies; } /* ------------------------------------------------------------ */ /** * Retrieve log cookies flag * * @return value of the flag */ public boolean getLogCookies() { return _logCookies; } /* ------------------------------------------------------------ */ /** * Controls logging of the request hostname. * * @param logServer true - request hostname will be logged, * false - request hostname will not be logged */ public void setLogServer(boolean logServer) { _logServer = logServer; } /* ------------------------------------------------------------ */ /** * Retrieve log hostname flag. * * @return value of the flag */ public boolean getLogServer() { return _logServer; } /* ------------------------------------------------------------ */ /** * Controls logging of request processing time. * * @param logLatency true - request processing time will be logged * false - request processing time will not be logged */ public void setLogLatency(boolean logLatency) { _logLatency = logLatency; } /* ------------------------------------------------------------ */ /** * Retrieve log request processing time flag. * * @return value of the flag */ public boolean getLogLatency() { return _logLatency; } /* ------------------------------------------------------------ */ /** * Controls whether the actual IP address of the connection or * the IP address from the X-Forwarded-For header will be logged. * * @param preferProxiedForAddress true - IP address from header will be logged, * false - IP address from the connection will be logged */ public void setPreferProxiedForAddress(boolean preferProxiedForAddress) { _preferProxiedForAddress = preferProxiedForAddress; } /* ------------------------------------------------------------ */ /** * Retrieved log X-Forwarded-For IP address flag. * * @return value of the flag */ public boolean getPreferProxiedForAddress() { return _preferProxiedForAddress; } /* ------------------------------------------------------------ */ /** * Set the log file name date format. * @see RolloverFileOutputStream#RolloverFileOutputStream(String, boolean, int, TimeZone, String, String) * * @param logFileDateFormat format string that is passed to {@link RolloverFileOutputStream} */ public void setFilenameDateFormat(String logFileDateFormat) { _filenameDateFormat = logFileDateFormat; } /* ------------------------------------------------------------ */ /** * Retrieve the file name date format string. * * @return the log File Date Format */ public String getFilenameDateFormat() { return _filenameDateFormat; } /* ------------------------------------------------------------ */ /** * Controls logging of the request dispatch time * * @param value true - request dispatch time will be logged * false - request dispatch time will not be logged */ public void setLogDispatch(boolean value) { _logDispatch = value; } /* ------------------------------------------------------------ */ /** * Retrieve request dispatch time logging flag * * @return value of the flag */ public boolean isLogDispatch() { return _logDispatch; } /* ------------------------------------------------------------ */ /** * Writes the request and response information to the output stream. * * @see org.eclipse.jetty.server.RequestLog#log(org.eclipse.jetty.server.Request, org.eclipse.jetty.server.Response) */ public void log(Request request, Response response) { try { if (_ignorePathMap != null && _ignorePathMap.getMatch(request.getRequestURI()) != null) return; if (_fileOut == null) return; StringBuilder buf= new StringBuilder(256); if (_logServer) { buf.append(request.getServerName()); buf.append(' '); } String addr = null; if (_preferProxiedForAddress) { addr = request.getHeader(HttpHeaders.X_FORWARDED_FOR); } if (addr == null) addr = request.getRemoteAddr(); buf.append(addr); buf.append(" - "); Authentication authentication=request.getAuthentication(); if (authentication instanceof Authentication.User) buf.append(((Authentication.User)authentication).getUserIdentity().getUserPrincipal().getName()); else buf.append(" - "); buf.append(" ["); if (_logDateCache != null) buf.append(_logDateCache.format(request.getTimeStamp())); else buf.append(request.getTimeStampBuffer().toString()); buf.append("] \""); buf.append(request.getMethod()); buf.append(' '); buf.append(request.getUri().toString()); buf.append(' '); buf.append(request.getProtocol()); buf.append("\" "); if (request.getAsyncContinuation().isInitial()) { int status = response.getStatus(); if (status <= 0) status = 404; buf.append((char)('0' + ((status / 100) % 10))); buf.append((char)('0' + ((status / 10) % 10))); buf.append((char)('0' + (status % 10))); } else buf.append("Async"); long responseLength = response.getContentCount(); if (responseLength >= 0) { buf.append(' '); if (responseLength > 99999) buf.append(responseLength); else { if (responseLength > 9999) buf.append((char)('0' + ((responseLength / 10000) % 10))); if (responseLength > 999) buf.append((char)('0' + ((responseLength / 1000) % 10))); if (responseLength > 99) buf.append((char)('0' + ((responseLength / 100) % 10))); if (responseLength > 9) buf.append((char)('0' + ((responseLength / 10) % 10))); buf.append((char)('0' + (responseLength) % 10)); } buf.append(' '); } else buf.append(" - "); if (_extended) logExtended(request, response, buf); if (_logCookies) { Cookie[] cookies = request.getCookies(); if (cookies == null || cookies.length == 0) buf.append(" -"); else { buf.append(" \""); for (int i = 0; i < cookies.length; i++) { if (i != 0) buf.append(';'); buf.append(cookies[i].getName()); buf.append('='); buf.append(cookies[i].getValue()); } buf.append('\"'); } } if (_logDispatch || _logLatency) { long now = System.currentTimeMillis(); if (_logDispatch) { long d = request.getDispatchTime(); buf.append(' '); buf.append(now - (d==0 ? request.getTimeStamp():d)); } if (_logLatency) { buf.append(' '); buf.append(now - request.getTimeStamp()); } } buf.append(StringUtil.__LINE_SEPARATOR); String log = buf.toString(); synchronized(this) { if (_writer==null) return; _writer.write(log); _writer.flush(); } } catch (IOException e) { Log.warn(e); } } /* ------------------------------------------------------------ */ /** * Writes extended request and response information to the output stream. * * @param request request object * @param response response object * @param b StringBuilder to write to * @throws IOException */ protected void logExtended(Request request, Response response, StringBuilder b) throws IOException { String referer = request.getHeader(HttpHeaders.REFERER); if (referer == null) b.append("\"-\" "); else { b.append('"'); b.append(referer); b.append("\" "); } String agent = request.getHeader(HttpHeaders.USER_AGENT); if (agent == null) b.append("\"-\" "); else { b.append('"'); b.append(agent); b.append('"'); } } /* ------------------------------------------------------------ */ /** * Set up request logging and open log file. * * @see org.eclipse.jetty.util.component.AbstractLifeCycle#doStart() */ @Override protected void doStart() throws Exception { if (_logDateFormat != null) { _logDateCache = new DateCache(_logDateFormat,_logLocale); _logDateCache.setTimeZoneID(_logTimeZone); } if (_filename != null) { _fileOut = new RolloverFileOutputStream(_filename,_append,_retainDays,TimeZone.getTimeZone(_logTimeZone),_filenameDateFormat,null); _closeOut = true; Log.info("Opened " + getDatedFilename()); } else _fileOut = System.err; _out = _fileOut; if (_ignorePaths != null && _ignorePaths.length > 0) { _ignorePathMap = new PathMap(); for (int i = 0; i < _ignorePaths.length; i++) _ignorePathMap.put(_ignorePaths[i],_ignorePaths[i]); } else _ignorePathMap = null; _writer = new OutputStreamWriter(_out); super.doStart(); } /* ------------------------------------------------------------ */ /** * Close the log file and perform cleanup. * * @see org.eclipse.jetty.util.component.AbstractLifeCycle#doStop() */ @Override protected void doStop() throws Exception { synchronized (this) { super.doStop(); try { if (_writer != null) _writer.flush(); } catch (IOException e) { Log.ignore(e); } if (_out != null && _closeOut) try { _out.close(); } catch (IOException e) { Log.ignore(e); } _out = null; _fileOut = null; _closeOut = false; _logDateCache = null; _writer = null; } } }
/* * Copyright (C) 2008 ZXing authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.zxing.client.android.result; import com.google.zxing.Result; import com.google.zxing.client.android.Contents; import com.google.zxing.client.android.Intents; import com.google.zxing.client.android.LocaleManager; import com.google.zxing.client.android.PreferencesActivity; import com.google.zxing.client.android.R; import com.google.zxing.client.result.ParsedResult; import com.google.zxing.client.result.ParsedResultType; import com.google.zxing.client.result.ResultParser; import android.app.Activity; import android.app.AlertDialog; import android.content.ActivityNotFoundException; import android.content.ContentValues; import android.content.Intent; import android.content.SharedPreferences; import android.net.Uri; import android.preference.PreferenceManager; import android.provider.ContactsContract; import android.util.Log; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.Locale; import java.util.ArrayList; /** * A base class for the Android-specific barcode handlers. These allow the app to polymorphically * suggest the appropriate actions for each data type. * * This class also contains a bunch of utility methods to take common actions like opening a URL. * They could easily be moved into a helper object, but it can't be static because the Activity * instance is needed to launch an intent. * * @author dswitkin@google.com (Daniel Switkin) * @author Sean Owen */ public abstract class ResultHandler { private static final String TAG = ResultHandler.class.getSimpleName(); private static final String[] EMAIL_TYPE_STRINGS = {"home", "work", "mobile"}; private static final String[] PHONE_TYPE_STRINGS = {"home", "work", "mobile", "fax", "pager", "main"}; private static final String[] ADDRESS_TYPE_STRINGS = {"home", "work"}; private static final int[] EMAIL_TYPE_VALUES = { ContactsContract.CommonDataKinds.Email.TYPE_HOME, ContactsContract.CommonDataKinds.Email.TYPE_WORK, ContactsContract.CommonDataKinds.Email.TYPE_MOBILE, }; private static final int[] PHONE_TYPE_VALUES = { ContactsContract.CommonDataKinds.Phone.TYPE_HOME, ContactsContract.CommonDataKinds.Phone.TYPE_WORK, ContactsContract.CommonDataKinds.Phone.TYPE_MOBILE, ContactsContract.CommonDataKinds.Phone.TYPE_FAX_WORK, ContactsContract.CommonDataKinds.Phone.TYPE_PAGER, ContactsContract.CommonDataKinds.Phone.TYPE_MAIN, }; private static final int[] ADDRESS_TYPE_VALUES = { ContactsContract.CommonDataKinds.StructuredPostal.TYPE_HOME, ContactsContract.CommonDataKinds.StructuredPostal.TYPE_WORK, }; private static final int NO_TYPE = -1; public static final int MAX_BUTTON_COUNT = 4; private final ParsedResult result; private final Activity activity; private final Result rawResult; private final String customProductSearch; ResultHandler(Activity activity, ParsedResult result) { this(activity, result, null); } ResultHandler(Activity activity, ParsedResult result, Result rawResult) { this.result = result; this.activity = activity; this.rawResult = rawResult; this.customProductSearch = parseCustomSearchURL(); } public final ParsedResult getResult() { return result; } final boolean hasCustomProductSearch() { return customProductSearch != null; } final Activity getActivity() { return activity; } /** * Indicates how many buttons the derived class wants shown. * * @return The integer button count. */ public abstract int getButtonCount(); /** * The text of the nth action button. * * @param index From 0 to getButtonCount() - 1 * @return The button text as a resource ID */ public abstract int getButtonText(int index); public Integer getDefaultButtonID() { return null; } /** * Execute the action which corresponds to the nth button. * * @param index The button that was clicked. */ public abstract void handleButtonPress(int index); /** * Some barcode contents are considered secure, and should not be saved to history, copied to * the clipboard, or otherwise persisted. * * @return If true, do not create any permanent record of these contents. */ public boolean areContentsSecure() { return false; } /** * Create a possibly styled string for the contents of the current barcode. * * @return The text to be displayed. */ public CharSequence getDisplayContents() { String contents = result.getDisplayResult(); return contents.replace("\r", ""); } /** * A string describing the kind of barcode that was found, e.g. "Found contact info". * * @return The resource ID of the string. */ public abstract int getDisplayTitle(); /** * A convenience method to get the parsed type. Should not be overridden. * * @return The parsed type, e.g. URI or ISBN */ public final ParsedResultType getType() { return result.getType(); } final void addPhoneOnlyContact(String[] phoneNumbers,String[] phoneTypes) { addContact(null, null, null, phoneNumbers, phoneTypes, null, null, null, null, null, null, null, null, null, null, null); } final void addEmailOnlyContact(String[] emails, String[] emailTypes) { addContact(null, null, null, null, null, emails, emailTypes, null, null, null, null, null, null, null, null, null); } final void addContact(String[] names, String[] nicknames, String pronunciation, String[] phoneNumbers, String[] phoneTypes, String[] emails, String[] emailTypes, String note, String instantMessenger, String address, String addressType, String org, String title, String[] urls, String birthday, String[] geo) { // Only use the first name in the array, if present. Intent intent = new Intent(Intent.ACTION_INSERT_OR_EDIT, ContactsContract.Contacts.CONTENT_URI); intent.setType(ContactsContract.Contacts.CONTENT_ITEM_TYPE); putExtra(intent, ContactsContract.Intents.Insert.NAME, names != null ? names[0] : null); putExtra(intent, ContactsContract.Intents.Insert.PHONETIC_NAME, pronunciation); int phoneCount = Math.min(phoneNumbers != null ? phoneNumbers.length : 0, Contents.PHONE_KEYS.length); for (int x = 0; x < phoneCount; x++) { putExtra(intent, Contents.PHONE_KEYS[x], phoneNumbers[x]); if (phoneTypes != null && x < phoneTypes.length) { int type = toPhoneContractType(phoneTypes[x]); if (type >= 0) { intent.putExtra(Contents.PHONE_TYPE_KEYS[x], type); } } } int emailCount = Math.min(emails != null ? emails.length : 0, Contents.EMAIL_KEYS.length); for (int x = 0; x < emailCount; x++) { putExtra(intent, Contents.EMAIL_KEYS[x], emails[x]); if (emailTypes != null && x < emailTypes.length) { int type = toEmailContractType(emailTypes[x]); if (type >= 0) { intent.putExtra(Contents.EMAIL_TYPE_KEYS[x], type); } } } ArrayList<ContentValues> data = new ArrayList<>(); if (urls != null) { for (String url : urls) { if (url != null && !url.isEmpty()) { ContentValues row = new ContentValues(2); row.put(ContactsContract.Data.MIMETYPE, ContactsContract.CommonDataKinds.Website.CONTENT_ITEM_TYPE); row.put(ContactsContract.CommonDataKinds.Website.URL, url); data.add(row); break; } } } if (birthday != null) { ContentValues row = new ContentValues(3); row.put(ContactsContract.Data.MIMETYPE, ContactsContract.CommonDataKinds.Event.CONTENT_ITEM_TYPE); row.put(ContactsContract.CommonDataKinds.Event.TYPE, ContactsContract.CommonDataKinds.Event.TYPE_BIRTHDAY); row.put(ContactsContract.CommonDataKinds.Event.START_DATE, birthday); data.add(row); } if (nicknames != null) { for (String nickname : nicknames) { if (nickname != null && !nickname.isEmpty()) { ContentValues row = new ContentValues(3); row.put(ContactsContract.Data.MIMETYPE, ContactsContract.CommonDataKinds.Nickname.CONTENT_ITEM_TYPE); row.put(ContactsContract.CommonDataKinds.Nickname.TYPE, ContactsContract.CommonDataKinds.Nickname.TYPE_DEFAULT); row.put(ContactsContract.CommonDataKinds.Nickname.NAME, nickname); data.add(row); break; } } } if (!data.isEmpty()) { intent.putParcelableArrayListExtra(ContactsContract.Intents.Insert.DATA, data); } StringBuilder aggregatedNotes = new StringBuilder(); if (note != null) { aggregatedNotes.append('\n').append(note); } if (geo != null) { aggregatedNotes.append('\n').append(geo[0]).append(',').append(geo[1]); } if (aggregatedNotes.length() > 0) { // Remove extra leading '\n' putExtra(intent, ContactsContract.Intents.Insert.NOTES, aggregatedNotes.substring(1)); } putExtra(intent, ContactsContract.Intents.Insert.IM_HANDLE, instantMessenger); putExtra(intent, ContactsContract.Intents.Insert.POSTAL, address); if (addressType != null) { int type = toAddressContractType(addressType); if (type >= 0) { intent.putExtra(ContactsContract.Intents.Insert.POSTAL_TYPE, type); } } putExtra(intent, ContactsContract.Intents.Insert.COMPANY, org); putExtra(intent, ContactsContract.Intents.Insert.JOB_TITLE, title); launchIntent(intent); } private static int toEmailContractType(String typeString) { return doToContractType(typeString, EMAIL_TYPE_STRINGS, EMAIL_TYPE_VALUES); } private static int toPhoneContractType(String typeString) { return doToContractType(typeString, PHONE_TYPE_STRINGS, PHONE_TYPE_VALUES); } private static int toAddressContractType(String typeString) { return doToContractType(typeString, ADDRESS_TYPE_STRINGS, ADDRESS_TYPE_VALUES); } private static int doToContractType(String typeString, String[] types, int[] values) { if (typeString == null) { return NO_TYPE; } for (int i = 0; i < types.length; i++) { String type = types[i]; if (typeString.startsWith(type) || typeString.startsWith(type.toUpperCase(Locale.ENGLISH))) { return values[i]; } } return NO_TYPE; } final void shareByEmail(String contents) { sendEmail(null, null, null, null, contents); } final void sendEmail(String[] to, String[] cc, String[] bcc, String subject, String body) { Intent intent = new Intent(Intent.ACTION_SEND, Uri.parse("mailto:")); if (to != null && to.length != 0) { intent.putExtra(Intent.EXTRA_EMAIL, to); } if (cc != null && cc.length != 0) { intent.putExtra(Intent.EXTRA_CC, cc); } if (bcc != null && bcc.length != 0) { intent.putExtra(Intent.EXTRA_BCC, bcc); } putExtra(intent, Intent.EXTRA_SUBJECT, subject); putExtra(intent, Intent.EXTRA_TEXT, body); intent.setType("text/plain"); launchIntent(intent); } final void shareBySMS(String contents) { sendSMSFromUri("smsto:", contents); } final void sendSMS(String phoneNumber, String body) { sendSMSFromUri("smsto:" + phoneNumber, body); } private void sendSMSFromUri(String uri, String body) { Intent intent = new Intent(Intent.ACTION_SENDTO, Uri.parse(uri)); putExtra(intent, "sms_body", body); // Exit the app once the SMS is sent intent.putExtra("compose_mode", true); launchIntent(intent); } final void sendMMS(String phoneNumber, String subject, String body) { sendMMSFromUri("mmsto:" + phoneNumber, subject, body); } private void sendMMSFromUri(String uri, String subject, String body) { Intent intent = new Intent(Intent.ACTION_SENDTO, Uri.parse(uri)); // The Messaging app needs to see a valid subject or else it will treat this an an SMS. if (subject == null || subject.isEmpty()) { putExtra(intent, "subject", activity.getString(R.string.msg_default_mms_subject)); } else { putExtra(intent, "subject", subject); } putExtra(intent, "sms_body", body); intent.putExtra("compose_mode", true); launchIntent(intent); } final void dialPhone(String phoneNumber) { launchIntent(new Intent(Intent.ACTION_DIAL, Uri.parse("tel:" + phoneNumber))); } final void dialPhoneFromUri(String uri) { launchIntent(new Intent(Intent.ACTION_DIAL, Uri.parse(uri))); } final void openMap(String geoURI) { launchIntent(new Intent(Intent.ACTION_VIEW, Uri.parse(geoURI))); } /** * Do a geo search using the address as the query. * * @param address The address to find */ final void searchMap(String address) { launchIntent(new Intent(Intent.ACTION_VIEW, Uri.parse("geo:0,0?q=" + Uri.encode(address)))); } final void getDirections(double latitude, double longitude) { launchIntent(new Intent(Intent.ACTION_VIEW, Uri.parse("http://maps.google." + LocaleManager.getCountryTLD(activity) + "/maps?f=d&daddr=" + latitude + ',' + longitude))); } // Uses the mobile-specific version of Product Search, which is formatted for small screens. final void openProductSearch(String upc) { Uri uri = Uri.parse("http://www.google." + LocaleManager.getProductSearchCountryTLD(activity) + "/m/products?q=" + upc + "&source=zxing"); launchIntent(new Intent(Intent.ACTION_VIEW, uri)); } final void openBookSearch(String isbn) { Uri uri = Uri.parse("http://books.google." + LocaleManager.getBookSearchCountryTLD(activity) + "/books?vid=isbn" + isbn); launchIntent(new Intent(Intent.ACTION_VIEW, uri)); } final void openURL(String url) { // Strangely, some Android browsers don't seem to register to handle HTTP:// or HTTPS://. // Lower-case these as it should always be OK to lower-case these schemes. if (url.startsWith("HTTP://")) { url = "http" + url.substring(4); } else if (url.startsWith("HTTPS://")) { url = "https" + url.substring(5); } Intent intent = new Intent(Intent.ACTION_VIEW, Uri.parse(url)); try { launchIntent(intent); } catch (ActivityNotFoundException ignored) { Log.w(TAG, "Nothing available to handle " + intent); } } final void webSearch(String query) { Intent intent = new Intent(Intent.ACTION_WEB_SEARCH); intent.putExtra("query", query); launchIntent(intent); } /** * Like {@link #launchIntent(Intent)} but will tell you if it is not handle-able * via {@link ActivityNotFoundException}. * * @throws ActivityNotFoundException if Intent can't be handled */ final void rawLaunchIntent(Intent intent) { if (intent != null) { intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_WHEN_TASK_RESET); Log.d(TAG, "Launching intent: " + intent + " with extras: " + intent.getExtras()); activity.startActivity(intent); } } /** * Like {@link #rawLaunchIntent(Intent)} but will show a user dialog if nothing is available to handle. */ final void launchIntent(Intent intent) { try { rawLaunchIntent(intent); } catch (ActivityNotFoundException ignored) { AlertDialog.Builder builder = new AlertDialog.Builder(activity); builder.setTitle(R.string.app_name); builder.setMessage(R.string.msg_intent_failed); builder.setPositiveButton(R.string.button_ok, null); builder.show(); } } private static void putExtra(Intent intent, String key, String value) { if (value != null && !value.isEmpty()) { intent.putExtra(key, value); } } private String parseCustomSearchURL() { SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(activity); String customProductSearch = prefs.getString(PreferencesActivity.KEY_CUSTOM_PRODUCT_SEARCH, null); if (customProductSearch != null && customProductSearch.trim().isEmpty()) { return null; } return customProductSearch; } final String fillInCustomSearchURL(String text) { if (customProductSearch == null) { return text; // ? } try { text = URLEncoder.encode(text, "UTF-8"); } catch (UnsupportedEncodingException e) { // can't happen; UTF-8 is always supported. Continue, I guess, without encoding } String url = customProductSearch; if (rawResult != null) { // Replace %f but only if it doesn't seem to be a hex escape sequence. This remains // problematic but avoids the more surprising problem of breaking escapes url = url.replaceFirst("%f(?![0-9a-f])", rawResult.getBarcodeFormat().toString()); if (url.contains("%t")) { ParsedResult parsedResultAgain = ResultParser.parseResult(rawResult); url = url.replace("%t", parsedResultAgain.getType().toString()); } } // Replace %s last as it might contain itself %f or %t return url.replace("%s", text); } }
/******************************************************************************* * Copyright 2015 MobileMan GmbH * www.mobileman.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.mobileman.projecth.business; import java.util.Date; import java.util.List; import com.mobileman.projecth.domain.chart.HaqChart; import com.mobileman.projecth.domain.dto.patient.AnswerFrequency; import com.mobileman.projecth.domain.dto.patient.PatientQuestionaryAnswerStatistic; import com.mobileman.projecth.domain.medicine.Medication; import com.mobileman.projecth.domain.patient.Patient; import com.mobileman.projecth.domain.patient.PatientQuestionAnswer; import com.mobileman.projecth.domain.questionary.CustomQuestion; import com.mobileman.projecth.domain.questionary.QuestionType; import com.mobileman.projecth.domain.user.connection.UserConnection; import com.mobileman.projecth.services.ws.mobile.ProjectHPatientDailyPost; import com.mobileman.projecth.services.ws.mobile.ProjectHPatientInitialPost; import com.mobileman.projecth.util.Pair; /** * Declares business layer for the {@link Patient} * * @author MobileMan * */ public interface PatientService extends SearchService<Patient> { /** * @return all patients with account (not deleted) */ List<Patient> findAllPatientsWithAccounts(); /** * @param posts * @throws IllegalArgumentException */ void processPatientDailyPost(List<ProjectHPatientDailyPost> posts) throws IllegalArgumentException; /** * @param data * @throws IllegalArgumentException */ void processPatientInitialPost(ProjectHPatientInitialPost data) throws IllegalArgumentException; /** * * @param patientId * @param diseaseId * @param logDate * @return true exists record for given day */ boolean patientAnswerExists(Long patientId, Long diseaseId, Date logDate); /** * * @param patientId * @param diseaseId * @return true exists patient's answer for one time question for given disease */ boolean existsAnswerToOneTimeQuesion(Long patientId, Long diseaseId); /** * @return the oldest answer date of all patients in the system */ Date findOldestPatientsAnswerDate(); /** * * @param patientId * @return the oldest patient's answer date */ Date findOldestPatientAnswerDate(Long patientId); /** * * @param doctorId * @return UserConnections list of a doctor */ List<UserConnection> findAllByDoctor(Long doctorId); /** * * @param patientId * @return Info about given patient */ Patient getPatientInfo(Long patientId); /** * Computes questionary answers statistics for given patient * * @see {@link #computeQuestionaryAnswersReport} * @param patientId * @param haqId * @param startDate * @param endDate * @return computetd statistics for a patient */ /** * Computes questionary answers time statistics for given patient * * @see {@link #computePositiveAnswerFrequencyReport} * @param patientId * @param haqId * @param startDate * @param endDate * @return computed statistics for a patient - [Date, count] */ /** * <code>UC5 Medication</code> Creates an association between the usage of * medication and given patient associated disease * * @param patientId * @param diseaseId * @param medicationId * @param standarUnitsTaken * @param consumptionDate * @param comment * @throws IllegalArgumentException * <li>patientId == null</li> <li>diseaseId == null</li> <li> * medicationId == null</li> <li>standarUnitsTaken less or equal * zero</li> */ void addConsumedMedication(Long patientId, Long diseaseId, Long medicationId, double standarUnitsTaken, Date consumptionDate, String comment) throws IllegalArgumentException; /** * Finds all medications consumed by given patient (consumed medication * history) * * @param patientId * @param diseaseId * @return all medications consumed by given patient (consumed medication * history) * @throws IllegalArgumentException * if <li>patientId == null</li> <li>diseaseId == null</li> */ List<Medication> findConsumedMedications(Long patientId, Long diseaseId) throws IllegalArgumentException; /** * Dynamicaly computes list of patient's CDAI for each day in given time * interval. * * @param patientId * @param startDate * @param endDate * @return list of patient's CDAI for each day in given time interval. * @throws IllegalArgumentException * if <li>patientId == null</li> <li>startDate == null</li> <li> * endDate == null</li> * @deprecated use * {@link PatientKeyPerformanceIndicatorValidation#computeKPITimeline(Long, Long, Date, Date)} */ /** * <code>UC3050</code> Patient creates custom question for himself * * @param patientId * @param diseaseId * @param text * @param explanation * @param questionTypeId * @return id of new custom questions * @throws IllegalArgumentException * <li>patientId == null</li> <li>diseaseId == null</li> <li> * questionText == null</li> <li>questionTypeId == null</li> */ Long addCustomQuestion(Long patientId, Long diseaseId, String text, String explanation, Long questionTypeId) throws IllegalArgumentException; /** * <code>UC3050</code> Patient creates custom question for himself with * custom question type (custom answers) * * @param patientId * @param diseaseId * @param text * @param explanation * @param questionType * @return id of new custom questions * @throws IllegalArgumentException * <li>patientId == null</li> <li>diseaseId == null</li> <li> * questionText == null</li> <li>questionType == null</li> <li> * questionType.id == null</li> */ Long addCustomQuestion(Long patientId, Long diseaseId, String text, String explanation, QuestionType questionType) throws IllegalArgumentException; /** * Computes time based statistics for given patient, chart and time window * * @param patientId * patient for whom * @param haqChartId * id of a {@link HaqChart} * @param startDate * @param endDate * @return computed statistics for a patient - [Date, count] */ List<AnswerFrequency> computePositiveAnswerFrequencyReport(Long patientId, Long haqChartId, Date startDate, Date endDate); /** * Computes questionary answers statistics for given patient and chart for * all possible answers * * @param patientId * @param haqChartId * @param startDate * @param endDate * @return questionary statistics for given patient for all possible answers */ List<Object[]> computeAllAnswersFrequencyReport(Long patientId, Long haqChartId, Date startDate, Date endDate); /** * Computes questionary answers statistics for given patient and * all question's possible answers * * @param patientId * @param questionId * @param startDate * @param endDate * @return questionary statistics for given patient for all possible answers */ List<Object[]> computeQuestionAnswersFrequencyReport(Long patientId, Long questionId, Date startDate, Date endDate); /** * Computes questionary answers statistics for given patient and * all question's possible answers * * @param patientId * @param questionId * @param startDate * @param endDate * @return questionary statistics for given patient for all possible answers */ List<Object[]> computeCustomQuestionAnswersFrequencyReport(Long patientId, Long questionId, Date startDate, Date endDate); /** * Computes questionary answers time statistics for given patient and chart * * @param patientId * @param haqChartId * @param startDate * @param endDate * @return questionary answers time statistics for given patient */ List<PatientQuestionaryAnswerStatistic> computeQuestionaryAnswersReport(Long patientId, Long haqChartId, Date startDate, Date endDate); /** * Computes question's answers time statistics for given patient and question * * @param patientId * @param questionId * @param startDate * @param endDate * @return question's answers time statistics for given patient and question */ List<PatientQuestionaryAnswerStatistic> computeQuestionAnswersReport(Long patientId, Long questionId, Date startDate, Date endDate); /** * Computes question's answers time statistics for given patient and question * * @param patientId * @param questionId * @param startDate * @param endDate * @return question's answers time statistics for given patient and question */ List<PatientQuestionaryAnswerStatistic> computeCustomQuestionAnswersReport(Long patientId, Long questionId, Date startDate, Date endDate); /** * Finds all custom questions defined for given patient and disease * * @param patientId * @param diseaseId * @return all custom questions defined for given patient and disease * @throws IllegalArgumentException * if <li>patientId == null</li> <li>diseaseId == null</li> */ List<CustomQuestion> findCustomQuestions(Long patientId, Long diseaseId) throws IllegalArgumentException; /** * Finds all patient's single text answers for given disease and time window * * @param patientId * @param diseaseId * @param startDate * @param endDate * @return all patient's single text answers for given disease and time * window * @throws IllegalArgumentException * if <li>patientId == null</li> <li>diseaseId == null</li> <li> * startDate == null</li> <li>endDate == null</li> */ List<PatientQuestionAnswer> findAnswersForSingleAnswerEntryQuestions(Long patientId, Long diseaseId, Date startDate, Date endDate) throws IllegalArgumentException; /** * Finds patient's initial symptom date and diagnosis date for given disease * * @param patientId * @param diseaseId * @return patient's first symptom date and diagnosis date for given disease * @throws IllegalArgumentException * if <li>patientId == null</li> <li>diseaseId == null</li> */ Pair<Date, Date> findFirstSymptomeAndDiagnosisDate(Long patientId, Long diseaseId) throws IllegalArgumentException; }
package com.googlecode.d2j.smali; import com.googlecode.d2j.DexConstants; import com.googlecode.d2j.Field; import com.googlecode.d2j.Method; import com.googlecode.d2j.Visibility; import com.googlecode.d2j.reader.Op; import com.googlecode.d2j.visitors.DexAnnotationVisitor; import java.math.BigInteger; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; public class Utils implements DexConstants { public static void doAccept(DexAnnotationVisitor dexAnnotationVisitor, String k, Object value) { if (value instanceof ArrayList) { DexAnnotationVisitor a = dexAnnotationVisitor.visitArray(k); for (Object o : (ArrayList) value) { doAccept(a, null, o); } a.visitEnd(); } else if (value instanceof Ann) { Ann ann = (Ann) value; DexAnnotationVisitor a = dexAnnotationVisitor.visitAnnotation(k, ann.name); for (Map.Entry<String, Object> e : ann.elements) { doAccept(a, e.getKey(), e.getValue()); } a.visitEnd(); } else if (value instanceof Field) { Field f = (Field) value; dexAnnotationVisitor.visitEnum(k, f.getOwner(), f.getName()); } else { dexAnnotationVisitor.visit(k, value); } } public static int getAcc(String name) { if (name.equals("public")) { return ACC_PUBLIC; } else if (name.equals("private")) { return ACC_PRIVATE; } else if (name.equals("protected")) { return ACC_PROTECTED; } else if (name.equals("static")) { return ACC_STATIC; } else if (name.equals("final")) { return ACC_FINAL; } else if (name.equals("synchronized")) { return ACC_SYNCHRONIZED; } else if (name.equals("volatile")) { return ACC_VOLATILE; } else if (name.equals("bridge")) { return ACC_BRIDGE; } else if (name.equals("varargs")) { return ACC_VARARGS; } else if (name.equals("transient")) { return ACC_TRANSIENT; } else if (name.equals("native")) { return ACC_NATIVE; } else if (name.equals("interface")) { return ACC_INTERFACE; } else if (name.equals("abstract")) { return ACC_ABSTRACT; } else if (name.equals("strict")) { return ACC_STRICT; } else if (name.equals("synthetic")) { return ACC_SYNTHETIC; } else if (name.equals("annotation")) { return ACC_ANNOTATION; } else if (name.equals("enum")) { return ACC_ENUM; } else if (name.equals("constructor")) { return ACC_CONSTRUCTOR; } else if (name.equals("declared-synchronized")) { return ACC_DECLARED_SYNCHRONIZED; } return 0; } public static List<String> listDesc(String desc) { List<String> list = new ArrayList(5); if (desc == null) { return list; } char[] chars = desc.toCharArray(); int i = 0; while (i < chars.length) { switch (chars[i]) { case 'V': case 'Z': case 'C': case 'B': case 'S': case 'I': case 'F': case 'J': case 'D': list.add(Character.toString(chars[i])); i++; break; case '[': { int count = 1; while (chars[i + count] == '[') { count++; } if (chars[i + count] == 'L') { count++; while (chars[i + count] != ';') { count++; } } count++; list.add(new String(chars, i, count)); i += count; break; } case 'L': { int count = 1; while (chars[i + count] != ';') { ++count; } count++; list.add(new String(chars, i, count)); i += count; break; } default: throw new RuntimeException("can't parse type list: " + desc); } } return list; } public static String[] toTypeList(String s) { return listDesc(s).toArray(new String[0]); } static public Byte parseByte(String str) { return Byte.valueOf((byte) parseInt(str.substring(0, str.length() - 1))); } static public Short parseShort(String str) { return Short.valueOf((short) parseInt(str.substring(0, str.length() - 1))); } static public Long parseLong(String str) { int sof = 0; int end = str.length() - 1; int x = 1; if (str.charAt(sof) == '+') { sof++; } else if (str.charAt(sof) == '-') { sof++; x = -1; } BigInteger v; if (str.charAt(sof) == '0') { sof++; if (sof >= end) { return 0L; } char c = str.charAt(sof); if (c == 'x' || c == 'X') {// hex sof++; v = new BigInteger(str.substring(sof, end), 16); } else {// oct v = new BigInteger(str.substring(sof, end), 8); } } else { v = new BigInteger(str.substring(sof, end), 10); } if (x == -1) { return v.negate().longValue(); } else { return v.longValue(); } } static public float parseFloat(String str) { str = str.toLowerCase(); int s = 0; float x = 1f; if (str.charAt(s) == '+') { s++; } else if (str.charAt(s) == '-') { s++; x = -1; } int e = str.length() - 1; if (str.charAt(e) == 'f') { e--; } str = str.substring(s, e + 1); if (str.equals("nan")) { return Float.NaN; } if (str.equals("infinity")) { return x < 0 ? Float.NEGATIVE_INFINITY : Float.POSITIVE_INFINITY; } return (float) x * Float.parseFloat(str); } static public double parseDouble(String str) { str = str.toLowerCase(); int s = 0; double x = 1; if (str.charAt(s) == '+') { s++; } else if (str.charAt(s) == '-') { s++; x = -1; } int e = str.length() - 1; if (str.charAt(e) == 'd') { e--; } str = str.substring(s, e + 1); if (str.equals("nan")) { return Double.NaN; } if (str.equals("infinity")) { return x < 0 ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; } return x * Double.parseDouble(str); } static public int parseInt(String str, int start, int end) { int sof = start; int x = 1; if (str.charAt(sof) == '+') { sof++; } else if (str.charAt(sof) == '-') { sof++; x = -1; } long v; if (str.charAt(sof) == '0') { sof++; if (sof >= end) { return 0; } char c = str.charAt(sof); if (c == 'x' || c == 'X') {// hex sof++; v = Long.parseLong(str.substring(sof, end), 16); } else {// oct v = Long.parseLong(str.substring(sof, end), 8); } } else { v = Long.parseLong(str.substring(sof, end), 10); } return (int) (v * x); } static public int parseInt(String str) { return parseInt(str, 0, str.length()); } public static String unescapeStr(String str) { return unEscape(str); } public static Character unescapeChar(String str) { return unEscape(str).charAt(0); } public static int[] toIntArray(List<String> ss) { int vs[] = new int[ss.size()]; for (int i = 0; i < ss.size(); i++) { vs[i] = parseInt(ss.get(i)); } return vs; } public static byte[] toByteArray(List<Object> ss) { byte vs[] = new byte[ss.size()]; for (int i = 0; i < ss.size(); i++) { vs[i] = ((Number) (ss.get(i))).byteValue(); } return vs; } static Map<String, Op> ops = new HashMap(); static { for (Op op : Op.values()) { ops.put(op.displayName, op); } } static public Op getOp(String name) { return ops.get(name); } public static String unEscape(String str) { return unEscape0(str, 1, str.length() - 1); } public static String unEscapeId(String str) { return unEscape0(str, 0, str.length()); } public static int findString(String str, int start, int end, char dEnd) { for (int i = start; i < end; ) { char c = str.charAt(i); if (c == '\\') { char d = str.charAt(i + 1); switch (d) { // ('b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\') case 'b': case 't': case 'n': case 'f': case 'r': case '\"': case '\'': case '\\': i += 2; break; case 'u': String sub = str.substring(i + 2, i + 6); i += 6; break; default: int x = 0; while (x < 3) { char e = str.charAt(i + 1 + x); if (e >= '0' && e <= '7') { x++; } else { break; } } if (x == 0) { throw new RuntimeException("can't pase string"); } i += 1 + x; } } else { if (c == dEnd) { return i; } i++; } } return end; } public static String unEscape0(String str, int start, int end) { StringBuilder sb = new StringBuilder(); for (int i = start; i < end; ) { char c = str.charAt(i); if (c == '\\') { char d = str.charAt(i + 1); switch (d) { // ('b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\') case 'b': sb.append('\b'); i += 2; break; case 't': sb.append('\t'); i += 2; break; case 'n': sb.append('\n'); i += 2; break; case 'f': sb.append('\f'); i += 2; break; case 'r': sb.append('\r'); i += 2; break; case '\"': sb.append('\"'); i += 2; break; case '\'': sb.append('\''); i += 2; break; case '\\': sb.append('\\'); i += 2; break; case 'u': String sub = str.substring(i + 2, i + 6); sb.append((char) Integer.parseInt(sub, 16)); i += 6; break; default: int x = 0; while (x < 3) { char e = str.charAt(i + 1 + x); if (e >= '0' && e <= '7') { x++; } else { break; } } if (x == 0) { throw new RuntimeException("can't pase string"); } sb.append((char) Integer.parseInt(str.substring(i + 1, i + 1 + x), 8)); i += 1 + x; } } else { sb.append(c); i++; } } return sb.toString(); } public static class Ann { public String name; public List<Map.Entry<String, Object>> elements = new ArrayList(); public void put(String name, Object value) { elements.add(new java.util.AbstractMap.SimpleEntry(name, value)); } } public static Visibility getAnnVisibility(String name) { return Visibility.valueOf(name.toUpperCase()); } public static int methodIns(Method m, boolean isStatic) { int a = isStatic ? 0 : 1; for (String t : m.getParameterTypes()) { switch (t.charAt(0)) { case 'J': case 'D': a += 2; break; default: a += 1; break; } } return a; } public static int reg2ParamIdx(Method m, int reg, int locals, boolean isStatic) { int x = reg - locals; if (x < 0) { return -1; } int a = isStatic ? 0 : 1; String[] parameterTypes = m.getParameterTypes(); for (int i = 0, parameterTypesLength = parameterTypes.length; i < parameterTypesLength; i++) { if (x == a) { return i; } String t = parameterTypes[i]; switch (t.charAt(0)) { case 'J': case 'D': a += 2; break; default: a += 1; break; } } return -1; } public static Method parseMethodAndUnescape(String owner, String part) throws RuntimeException { int x = part.indexOf('('); if (x < 0) { throw new RuntimeException(); } int y = part.indexOf(')', x); if (y < 0) { throw new RuntimeException(); } String methodName = unEscapeId(part.substring(0, x)); String[] params = toTypeList(part.substring(x + 1, y)); for (int i = 0; i < params.length; i++) { params[i] = unEscapeId(params[i]); } String ret = unEscapeId(part.substring(y + 1)); return new Method(owner, methodName, params, ret); } public static Method parseMethodAndUnescape(String full) throws RuntimeException { int x = full.indexOf("->"); if (x <= 0) { throw new RuntimeException(); } return parseMethodAndUnescape(unEscapeId(full.substring(0, x)), full.substring(x + 2)); } public static Field parseFieldAndUnescape(String owner, String part) throws RuntimeException { int x = part.indexOf(':'); if (x < 0) { throw new RuntimeException(); } return new Field(owner, unEscapeId(part.substring(0, x)), unEscapeId(part.substring(x + 1))); } public static Field parseFieldAndUnescape(String full) throws RuntimeException { int x = full.indexOf("->"); if (x <= 0) { throw new RuntimeException(); } return parseFieldAndUnescape(unEscapeId(full.substring(0, x)), full.substring(x + 2)); } }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.mapreduce; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job * bypasses HBase servers, and directly accesses the underlying files (hfile, recovered edits, * wals, etc) directly to provide maximum performance. The snapshot is not required to be * restored to the live cluster or cloned. This also allows to run the mapreduce job from an * online or offline hbase cluster. The snapshot files can be exported by using the * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, * and this InputFormat can be used to run the mapreduce job directly over the snapshot files. * The snapshot should not be deleted while there are jobs reading from snapshot files. * <p> * Usage is similar to TableInputFormat, and * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, boolean, Path)} * can be used to configure the job. * <pre>{@code * Job job = new Job(conf); * Scan scan = new Scan(); * TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, * scan, MyTableMapper.class, MyMapKeyOutput.class, * MyMapOutputValueWritable.class, job, true); * } * </pre> * <p> * Internally, this input format restores the snapshot into the given tmp directory. By default, * and similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you * can run N mapper tasks per every region, in which case the region key range will be split to * N sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading * from each RecordReader. An internal RegionScanner is used to execute the * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user. * <p> * HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from * snapshot files and data files. * To read from snapshot files directly from the file system, the user who is running the MR job * must have sufficient permissions to access snapshot and reference files. * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase * user or the user must have group or other privileges in the filesystem (See HBASE-8369). * Note that, given other users access to read from snapshot/data files will completely circumvent * the access control enforced by HBase. * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ @InterfaceAudience.Public public class TableSnapshotInputFormat extends InputFormat<ImmutableBytesWritable, Result> { public static class TableSnapshotRegionSplit extends InputSplit implements Writable { private TableSnapshotInputFormatImpl.InputSplit delegate; // constructor for mapreduce framework / Writable public TableSnapshotRegionSplit() { this.delegate = new TableSnapshotInputFormatImpl.InputSplit(); } public TableSnapshotRegionSplit(TableSnapshotInputFormatImpl.InputSplit delegate) { this.delegate = delegate; } public TableSnapshotRegionSplit(HTableDescriptor htd, HRegionInfo regionInfo, List<String> locations, Scan scan, Path restoreDir) { this.delegate = new TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, restoreDir); } @Override public long getLength() throws IOException, InterruptedException { return delegate.getLength(); } @Override public String[] getLocations() throws IOException, InterruptedException { return delegate.getLocations(); } @Override public void write(DataOutput out) throws IOException { delegate.write(out); } @Override public void readFields(DataInput in) throws IOException { delegate.readFields(in); } /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 * Use {@link #getRegion()} */ @Deprecated public HRegionInfo getRegionInfo() { return delegate.getRegionInfo(); } public RegionInfo getRegion() { return delegate.getRegionInfo(); } TableSnapshotInputFormatImpl.InputSplit getDelegate() { return this.delegate; } } @VisibleForTesting static class TableSnapshotRegionRecordReader extends RecordReader<ImmutableBytesWritable, Result> { private TableSnapshotInputFormatImpl.RecordReader delegate = new TableSnapshotInputFormatImpl.RecordReader(); private TaskAttemptContext context; private Method getCounter; @Override public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { this.context = context; getCounter = TableRecordReaderImpl.retrieveGetCounterWithStringsParams(context); delegate.initialize( ((TableSnapshotRegionSplit) split).delegate, context.getConfiguration()); } @Override public boolean nextKeyValue() throws IOException, InterruptedException { boolean result = delegate.nextKeyValue(); if (result) { ScanMetrics scanMetrics = delegate.getScanner().getScanMetrics(); if (scanMetrics != null && context != null) { TableRecordReaderImpl.updateCounters(scanMetrics, 0, getCounter, context, 0); } } return result; } @Override public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return delegate.getCurrentKey(); } @Override public Result getCurrentValue() throws IOException, InterruptedException { return delegate.getCurrentValue(); } @Override public float getProgress() throws IOException, InterruptedException { return delegate.getProgress(); } @Override public void close() throws IOException { delegate.close(); } } @Override public RecordReader<ImmutableBytesWritable, Result> createRecordReader( InputSplit split, TaskAttemptContext context) throws IOException { return new TableSnapshotRegionRecordReader(); } @Override public List<InputSplit> getSplits(JobContext job) throws IOException, InterruptedException { List<InputSplit> results = new ArrayList<>(); for (TableSnapshotInputFormatImpl.InputSplit split : TableSnapshotInputFormatImpl.getSplits(job.getConfiguration())) { results.add(new TableSnapshotRegionSplit(split)); } return results; } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. * @param job the job to configure * @param snapshotName the name of the snapshot to read from * @param restoreDir a temporary directory to restore the snapshot into. Current user should * have write permissions to this directory, and this should not be a subdirectory of rootdir. * After the job is finished, restoreDir can be deleted. * @throws IOException if an error occurs */ public static void setInput(Job job, String snapshotName, Path restoreDir) throws IOException { TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir); } /** * Configures the job to use TableSnapshotInputFormat to read from a snapshot. * @param job the job to configure * @param snapshotName the name of the snapshot to read from * @param restoreDir a temporary directory to restore the snapshot into. Current user should * have write permissions to this directory, and this should not be a subdirectory of rootdir. * After the job is finished, restoreDir can be deleted. * @param splitAlgo split algorithm to generate splits from region * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException if an error occurs */ public static void setInput(Job job, String snapshotName, Path restoreDir, RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir, splitAlgo, numSplitsPerRegion); } }
/* * Copyright (c) 2008-2016 Haulmont. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.haulmont.cuba.web.gui.components; import com.haulmont.bali.events.Subscription; import com.haulmont.chile.core.model.utils.InstanceUtils; import com.haulmont.cuba.core.global.AppBeans; import com.haulmont.cuba.gui.components.*; import com.haulmont.cuba.gui.components.data.ValueSource; import com.haulmont.cuba.gui.components.data.meta.ValueBinding; import com.haulmont.cuba.gui.components.data.value.ValueBinder; import com.haulmont.cuba.web.widgets.compatibility.CubaValueChangeEvent; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.function.Consumer; public abstract class WebAbstractField<T extends com.vaadin.v7.ui.AbstractField, V> extends WebAbstractComponent<T> implements Field<V> /* todo ds: move to Field */ { protected static final int VALIDATORS_LIST_INITIAL_CAPACITY = 4; protected List<Consumer> validators; // lazily initialized list protected boolean editable = true; protected V internalValue; protected ValueBinding<V> valueBinding; // VAADIN8: gg, replace with Subscription protected Consumer<EditableChangeNotifier.EditableChangeEvent> parentEditableChangeListener; @Override public ValueSource<V> getValueSource() { return valueBinding != null ? valueBinding.getSource() : null; } @Override public void setValueSource(ValueSource<V> valueSource) { if (this.valueBinding != null) { valueBinding.unbind(); this.valueBinding = null; } if (valueSource != null) { // todo use ApplicationContextAware and lookup ValueBinder binder = AppBeans.get(ValueBinder.class); this.valueBinding = binder.bind(this, valueSource); valueBindingConnected(valueSource); this.valueBinding.activate(); valueBindingActivated(valueSource); } } protected void valueBindingActivated(ValueSource<V> valueSource) { // hook } protected void valueBindingConnected(ValueSource<V> valueSource) { // hook } protected void initFieldConverter() { } @SuppressWarnings("unchecked") @Override public Subscription addValueChangeListener(Consumer<ValueChangeEvent<V>> listener) { return getEventHub().subscribe(ValueChangeEvent.class, (Consumer) listener); } @SuppressWarnings("unchecked") @Override public void removeValueChangeListener(Consumer<ValueChangeEvent<V>> listener) { unsubscribe(ValueChangeEvent.class, (Consumer) listener); } @Override public boolean isRequired() { return component.isRequired(); } @Override public void setRequired(boolean required) { component.setRequired(required); } @Override public void setRequiredMessage(String msg) { component.setRequiredError(msg); } @Override public String getRequiredMessage() { return component.getRequiredError(); } @Override public V getValue() { //noinspection unchecked return (V) component.getValue(); } @Override public void setValue(V value) { setValueToPresentation(convertToPresentation(value)); } @Override public void setParent(Component parent) { if (this.parent instanceof EditableChangeNotifier && parentEditableChangeListener != null) { ((EditableChangeNotifier) this.parent).removeEditableChangeListener(parentEditableChangeListener); parentEditableChangeListener = null; } super.setParent(parent); if (parent instanceof EditableChangeNotifier) { parentEditableChangeListener = event -> { boolean parentEditable = event.getSource().isEditable(); boolean finalEditable = parentEditable && editable; setEditableToComponent(finalEditable); }; ((EditableChangeNotifier) parent).addEditableChangeListener(parentEditableChangeListener); Editable parentEditable = (Editable) parent; if (!parentEditable.isEditable()) { setEditableToComponent(false); } } } protected void setValueToPresentation(Object value) { if (hasValidationError()) { setValidationError(null); } component.setValue(value); } @Override public boolean isEditable() { return editable; } @Override public void setEditable(boolean editable) { if (this.editable == editable) { return; } this.editable = editable; boolean parentEditable = true; if (parent instanceof ChildEditableController) { parentEditable = ((ChildEditableController) parent).isEditable(); } boolean finalEditable = parentEditable && editable; setEditableToComponent(finalEditable); } protected void setEditableToComponent(boolean editable) { component.setReadOnly(!editable); } @SuppressWarnings("unchecked") protected void attachListener(T component) { component.addValueChangeListener(event -> { Object value = event.getProperty().getValue(); componentValueChanged(value, event instanceof CubaValueChangeEvent && ((CubaValueChangeEvent) event).isUserOriginated()); }); } protected void componentValueChanged(Object newComponentValue, boolean userOriginated) { V value = convertToModel(newComponentValue); V oldValue = internalValue; internalValue = value; if (!fieldValueEquals(value, oldValue)) { if (hasValidationError()) { setValidationError(null); } ValueChangeEvent<V> event = new ValueChangeEvent<>(this, oldValue, value, userOriginated); publish(ValueChangeEvent.class, event); } } @SuppressWarnings("unchecked") protected V convertToModel(Object componentRawValue) { return (V) componentRawValue; } @SuppressWarnings("unchecked") protected Object convertToPresentation(V modelValue) { return modelValue; } protected boolean fieldValueEquals(V value, V oldValue) { return InstanceUtils.propertyValueEquals(oldValue, value); } @Override public void addValidator(Consumer<? super V> validator) { if (validators == null) { validators = new ArrayList<>(VALIDATORS_LIST_INITIAL_CAPACITY); } if (!validators.contains(validator)) { validators.add(validator); } } @Override public void removeValidator(Consumer<V> validator) { if (validators != null) { validators.remove(validator); } } @SuppressWarnings("unchecked") @Override public Collection<Consumer<V>> getValidators() { if (validators == null) { return Collections.emptyList(); } return (Collection) Collections.unmodifiableCollection(validators); } @Override public boolean isValid() { try { validate(); return true; } catch (ValidationException e) { return false; } } @Override public void validate() throws ValidationException { if (hasValidationError()) { setValidationError(null); } if (!isVisibleRecursive() || !isEditableWithParent() || !isEnabledRecursive()) { return; } Object value = getValue(); if (isEmpty(value)) { if (isRequired()) { throw new RequiredValueMissingException(getRequiredMessage(), this); } else { return; } } if (validators != null) { try { for (Consumer validator : validators) { validator.accept(value); } } catch (ValidationException e) { setValidationError(e.getDetailsMessage()); throw new ValidationFailedException(e.getDetailsMessage(), this, e); } } } protected void commit() { component.commit(); } protected void discard() { component.discard(); } protected boolean isBuffered() { return component.isBuffered(); } protected void setBuffered(boolean buffered) { component.setBuffered(buffered); } protected boolean isModified() { return component.isModified(); } protected boolean isEmpty(Object value) { return value == null; } /*@Override public String getContextHelpText() { return component.getContextHelpText(); } @Override public void setContextHelpText(String contextHelpText) { component.setContextHelpText(contextHelpText); } @Override public boolean isContextHelpTextHtmlEnabled() { return component.isContextHelpTextHtmlEnabled(); } @Override public void setContextHelpTextHtmlEnabled(boolean enabled) { component.setContextHelpTextHtmlEnabled(enabled); }*/ /*@Override public Consumer<ContextHelpIconClickEvent> getContextHelpIconClickHandler() { return contextHelpIconClickHandler; } @Override public void setContextHelpIconClickHandler(Consumer<ContextHelpIconClickEvent> handler) { if (!Objects.equals(this.contextHelpIconClickHandler, handler)) { this.contextHelpIconClickHandler = handler; if (handler == null) { // todo vaadin8 component.removeContextHelpIconClickListener(contextHelpIconClickListener); contextHelpIconClickListener = null; } else { if (contextHelpIconClickListener == null) { contextHelpIconClickListener = (ContextHelpIconClickListener) e -> { ContextHelpIconClickEvent event = new ContextHelpIconClickEvent(WebAbstractField.this); fireContextHelpIconClick(event); }; component.addContextHelpIconClickListener(contextHelpIconClickListener); } } } } protected void fireContextHelpIconClick(ContextHelpIconClickEvent event) { if (contextHelpIconClickHandler != null) { contextHelpIconClickHandler.accept(event); } }*/ }
// Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/ads/googleads/v10/services/shared_set_service.proto package com.google.ads.googleads.v10.services; /** * <pre> * Response message for a shared set mutate. * </pre> * * Protobuf type {@code google.ads.googleads.v10.services.MutateSharedSetsResponse} */ public final class MutateSharedSetsResponse extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.ads.googleads.v10.services.MutateSharedSetsResponse) MutateSharedSetsResponseOrBuilder { private static final long serialVersionUID = 0L; // Use MutateSharedSetsResponse.newBuilder() to construct. private MutateSharedSetsResponse(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private MutateSharedSetsResponse() { results_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new MutateSharedSetsResponse(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private MutateSharedSetsResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 18: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { results_ = new java.util.ArrayList<com.google.ads.googleads.v10.services.MutateSharedSetResult>(); mutable_bitField0_ |= 0x00000001; } results_.add( input.readMessage(com.google.ads.googleads.v10.services.MutateSharedSetResult.parser(), extensionRegistry)); break; } case 26: { com.google.rpc.Status.Builder subBuilder = null; if (partialFailureError_ != null) { subBuilder = partialFailureError_.toBuilder(); } partialFailureError_ = input.readMessage(com.google.rpc.Status.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(partialFailureError_); partialFailureError_ = subBuilder.buildPartial(); } break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { results_ = java.util.Collections.unmodifiableList(results_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.ads.googleads.v10.services.SharedSetServiceProto.internal_static_google_ads_googleads_v10_services_MutateSharedSetsResponse_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.ads.googleads.v10.services.SharedSetServiceProto.internal_static_google_ads_googleads_v10_services_MutateSharedSetsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.ads.googleads.v10.services.MutateSharedSetsResponse.class, com.google.ads.googleads.v10.services.MutateSharedSetsResponse.Builder.class); } public static final int PARTIAL_FAILURE_ERROR_FIELD_NUMBER = 3; private com.google.rpc.Status partialFailureError_; /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> * @return Whether the partialFailureError field is set. */ @java.lang.Override public boolean hasPartialFailureError() { return partialFailureError_ != null; } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> * @return The partialFailureError. */ @java.lang.Override public com.google.rpc.Status getPartialFailureError() { return partialFailureError_ == null ? com.google.rpc.Status.getDefaultInstance() : partialFailureError_; } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> */ @java.lang.Override public com.google.rpc.StatusOrBuilder getPartialFailureErrorOrBuilder() { return getPartialFailureError(); } public static final int RESULTS_FIELD_NUMBER = 2; private java.util.List<com.google.ads.googleads.v10.services.MutateSharedSetResult> results_; /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ @java.lang.Override public java.util.List<com.google.ads.googleads.v10.services.MutateSharedSetResult> getResultsList() { return results_; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ @java.lang.Override public java.util.List<? extends com.google.ads.googleads.v10.services.MutateSharedSetResultOrBuilder> getResultsOrBuilderList() { return results_; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ @java.lang.Override public int getResultsCount() { return results_.size(); } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ @java.lang.Override public com.google.ads.googleads.v10.services.MutateSharedSetResult getResults(int index) { return results_.get(index); } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ @java.lang.Override public com.google.ads.googleads.v10.services.MutateSharedSetResultOrBuilder getResultsOrBuilder( int index) { return results_.get(index); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < results_.size(); i++) { output.writeMessage(2, results_.get(i)); } if (partialFailureError_ != null) { output.writeMessage(3, getPartialFailureError()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < results_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, results_.get(i)); } if (partialFailureError_ != null) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, getPartialFailureError()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.ads.googleads.v10.services.MutateSharedSetsResponse)) { return super.equals(obj); } com.google.ads.googleads.v10.services.MutateSharedSetsResponse other = (com.google.ads.googleads.v10.services.MutateSharedSetsResponse) obj; if (hasPartialFailureError() != other.hasPartialFailureError()) return false; if (hasPartialFailureError()) { if (!getPartialFailureError() .equals(other.getPartialFailureError())) return false; } if (!getResultsList() .equals(other.getResultsList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasPartialFailureError()) { hash = (37 * hash) + PARTIAL_FAILURE_ERROR_FIELD_NUMBER; hash = (53 * hash) + getPartialFailureError().hashCode(); } if (getResultsCount() > 0) { hash = (37 * hash) + RESULTS_FIELD_NUMBER; hash = (53 * hash) + getResultsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.ads.googleads.v10.services.MutateSharedSetsResponse prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * <pre> * Response message for a shared set mutate. * </pre> * * Protobuf type {@code google.ads.googleads.v10.services.MutateSharedSetsResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.ads.googleads.v10.services.MutateSharedSetsResponse) com.google.ads.googleads.v10.services.MutateSharedSetsResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.ads.googleads.v10.services.SharedSetServiceProto.internal_static_google_ads_googleads_v10_services_MutateSharedSetsResponse_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.ads.googleads.v10.services.SharedSetServiceProto.internal_static_google_ads_googleads_v10_services_MutateSharedSetsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.ads.googleads.v10.services.MutateSharedSetsResponse.class, com.google.ads.googleads.v10.services.MutateSharedSetsResponse.Builder.class); } // Construct using com.google.ads.googleads.v10.services.MutateSharedSetsResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getResultsFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); if (partialFailureErrorBuilder_ == null) { partialFailureError_ = null; } else { partialFailureError_ = null; partialFailureErrorBuilder_ = null; } if (resultsBuilder_ == null) { results_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { resultsBuilder_.clear(); } return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.ads.googleads.v10.services.SharedSetServiceProto.internal_static_google_ads_googleads_v10_services_MutateSharedSetsResponse_descriptor; } @java.lang.Override public com.google.ads.googleads.v10.services.MutateSharedSetsResponse getDefaultInstanceForType() { return com.google.ads.googleads.v10.services.MutateSharedSetsResponse.getDefaultInstance(); } @java.lang.Override public com.google.ads.googleads.v10.services.MutateSharedSetsResponse build() { com.google.ads.googleads.v10.services.MutateSharedSetsResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.ads.googleads.v10.services.MutateSharedSetsResponse buildPartial() { com.google.ads.googleads.v10.services.MutateSharedSetsResponse result = new com.google.ads.googleads.v10.services.MutateSharedSetsResponse(this); int from_bitField0_ = bitField0_; if (partialFailureErrorBuilder_ == null) { result.partialFailureError_ = partialFailureError_; } else { result.partialFailureError_ = partialFailureErrorBuilder_.build(); } if (resultsBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { results_ = java.util.Collections.unmodifiableList(results_); bitField0_ = (bitField0_ & ~0x00000001); } result.results_ = results_; } else { result.results_ = resultsBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.ads.googleads.v10.services.MutateSharedSetsResponse) { return mergeFrom((com.google.ads.googleads.v10.services.MutateSharedSetsResponse)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.ads.googleads.v10.services.MutateSharedSetsResponse other) { if (other == com.google.ads.googleads.v10.services.MutateSharedSetsResponse.getDefaultInstance()) return this; if (other.hasPartialFailureError()) { mergePartialFailureError(other.getPartialFailureError()); } if (resultsBuilder_ == null) { if (!other.results_.isEmpty()) { if (results_.isEmpty()) { results_ = other.results_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureResultsIsMutable(); results_.addAll(other.results_); } onChanged(); } } else { if (!other.results_.isEmpty()) { if (resultsBuilder_.isEmpty()) { resultsBuilder_.dispose(); resultsBuilder_ = null; results_ = other.results_; bitField0_ = (bitField0_ & ~0x00000001); resultsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getResultsFieldBuilder() : null; } else { resultsBuilder_.addAllMessages(other.results_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.ads.googleads.v10.services.MutateSharedSetsResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.ads.googleads.v10.services.MutateSharedSetsResponse) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private com.google.rpc.Status partialFailureError_; private com.google.protobuf.SingleFieldBuilderV3< com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> partialFailureErrorBuilder_; /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> * @return Whether the partialFailureError field is set. */ public boolean hasPartialFailureError() { return partialFailureErrorBuilder_ != null || partialFailureError_ != null; } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> * @return The partialFailureError. */ public com.google.rpc.Status getPartialFailureError() { if (partialFailureErrorBuilder_ == null) { return partialFailureError_ == null ? com.google.rpc.Status.getDefaultInstance() : partialFailureError_; } else { return partialFailureErrorBuilder_.getMessage(); } } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> */ public Builder setPartialFailureError(com.google.rpc.Status value) { if (partialFailureErrorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } partialFailureError_ = value; onChanged(); } else { partialFailureErrorBuilder_.setMessage(value); } return this; } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> */ public Builder setPartialFailureError( com.google.rpc.Status.Builder builderForValue) { if (partialFailureErrorBuilder_ == null) { partialFailureError_ = builderForValue.build(); onChanged(); } else { partialFailureErrorBuilder_.setMessage(builderForValue.build()); } return this; } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> */ public Builder mergePartialFailureError(com.google.rpc.Status value) { if (partialFailureErrorBuilder_ == null) { if (partialFailureError_ != null) { partialFailureError_ = com.google.rpc.Status.newBuilder(partialFailureError_).mergeFrom(value).buildPartial(); } else { partialFailureError_ = value; } onChanged(); } else { partialFailureErrorBuilder_.mergeFrom(value); } return this; } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> */ public Builder clearPartialFailureError() { if (partialFailureErrorBuilder_ == null) { partialFailureError_ = null; onChanged(); } else { partialFailureError_ = null; partialFailureErrorBuilder_ = null; } return this; } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> */ public com.google.rpc.Status.Builder getPartialFailureErrorBuilder() { onChanged(); return getPartialFailureErrorFieldBuilder().getBuilder(); } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> */ public com.google.rpc.StatusOrBuilder getPartialFailureErrorOrBuilder() { if (partialFailureErrorBuilder_ != null) { return partialFailureErrorBuilder_.getMessageOrBuilder(); } else { return partialFailureError_ == null ? com.google.rpc.Status.getDefaultInstance() : partialFailureError_; } } /** * <pre> * Errors that pertain to operation failures in the partial failure mode. * Returned only when partial_failure = true and all errors occur inside the * operations. If any errors occur outside the operations (e.g. auth errors), * we return an RPC level error. * </pre> * * <code>.google.rpc.Status partial_failure_error = 3;</code> */ private com.google.protobuf.SingleFieldBuilderV3< com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> getPartialFailureErrorFieldBuilder() { if (partialFailureErrorBuilder_ == null) { partialFailureErrorBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder>( getPartialFailureError(), getParentForChildren(), isClean()); partialFailureError_ = null; } return partialFailureErrorBuilder_; } private java.util.List<com.google.ads.googleads.v10.services.MutateSharedSetResult> results_ = java.util.Collections.emptyList(); private void ensureResultsIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { results_ = new java.util.ArrayList<com.google.ads.googleads.v10.services.MutateSharedSetResult>(results_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilderV3< com.google.ads.googleads.v10.services.MutateSharedSetResult, com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder, com.google.ads.googleads.v10.services.MutateSharedSetResultOrBuilder> resultsBuilder_; /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public java.util.List<com.google.ads.googleads.v10.services.MutateSharedSetResult> getResultsList() { if (resultsBuilder_ == null) { return java.util.Collections.unmodifiableList(results_); } else { return resultsBuilder_.getMessageList(); } } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public int getResultsCount() { if (resultsBuilder_ == null) { return results_.size(); } else { return resultsBuilder_.getCount(); } } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public com.google.ads.googleads.v10.services.MutateSharedSetResult getResults(int index) { if (resultsBuilder_ == null) { return results_.get(index); } else { return resultsBuilder_.getMessage(index); } } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder setResults( int index, com.google.ads.googleads.v10.services.MutateSharedSetResult value) { if (resultsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureResultsIsMutable(); results_.set(index, value); onChanged(); } else { resultsBuilder_.setMessage(index, value); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder setResults( int index, com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder builderForValue) { if (resultsBuilder_ == null) { ensureResultsIsMutable(); results_.set(index, builderForValue.build()); onChanged(); } else { resultsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder addResults(com.google.ads.googleads.v10.services.MutateSharedSetResult value) { if (resultsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureResultsIsMutable(); results_.add(value); onChanged(); } else { resultsBuilder_.addMessage(value); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder addResults( int index, com.google.ads.googleads.v10.services.MutateSharedSetResult value) { if (resultsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureResultsIsMutable(); results_.add(index, value); onChanged(); } else { resultsBuilder_.addMessage(index, value); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder addResults( com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder builderForValue) { if (resultsBuilder_ == null) { ensureResultsIsMutable(); results_.add(builderForValue.build()); onChanged(); } else { resultsBuilder_.addMessage(builderForValue.build()); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder addResults( int index, com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder builderForValue) { if (resultsBuilder_ == null) { ensureResultsIsMutable(); results_.add(index, builderForValue.build()); onChanged(); } else { resultsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder addAllResults( java.lang.Iterable<? extends com.google.ads.googleads.v10.services.MutateSharedSetResult> values) { if (resultsBuilder_ == null) { ensureResultsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, results_); onChanged(); } else { resultsBuilder_.addAllMessages(values); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder clearResults() { if (resultsBuilder_ == null) { results_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { resultsBuilder_.clear(); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public Builder removeResults(int index) { if (resultsBuilder_ == null) { ensureResultsIsMutable(); results_.remove(index); onChanged(); } else { resultsBuilder_.remove(index); } return this; } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder getResultsBuilder( int index) { return getResultsFieldBuilder().getBuilder(index); } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public com.google.ads.googleads.v10.services.MutateSharedSetResultOrBuilder getResultsOrBuilder( int index) { if (resultsBuilder_ == null) { return results_.get(index); } else { return resultsBuilder_.getMessageOrBuilder(index); } } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public java.util.List<? extends com.google.ads.googleads.v10.services.MutateSharedSetResultOrBuilder> getResultsOrBuilderList() { if (resultsBuilder_ != null) { return resultsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(results_); } } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder addResultsBuilder() { return getResultsFieldBuilder().addBuilder( com.google.ads.googleads.v10.services.MutateSharedSetResult.getDefaultInstance()); } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder addResultsBuilder( int index) { return getResultsFieldBuilder().addBuilder( index, com.google.ads.googleads.v10.services.MutateSharedSetResult.getDefaultInstance()); } /** * <pre> * All results for the mutate. * </pre> * * <code>repeated .google.ads.googleads.v10.services.MutateSharedSetResult results = 2;</code> */ public java.util.List<com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder> getResultsBuilderList() { return getResultsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< com.google.ads.googleads.v10.services.MutateSharedSetResult, com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder, com.google.ads.googleads.v10.services.MutateSharedSetResultOrBuilder> getResultsFieldBuilder() { if (resultsBuilder_ == null) { resultsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< com.google.ads.googleads.v10.services.MutateSharedSetResult, com.google.ads.googleads.v10.services.MutateSharedSetResult.Builder, com.google.ads.googleads.v10.services.MutateSharedSetResultOrBuilder>( results_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); results_ = null; } return resultsBuilder_; } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.ads.googleads.v10.services.MutateSharedSetsResponse) } // @@protoc_insertion_point(class_scope:google.ads.googleads.v10.services.MutateSharedSetsResponse) private static final com.google.ads.googleads.v10.services.MutateSharedSetsResponse DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.ads.googleads.v10.services.MutateSharedSetsResponse(); } public static com.google.ads.googleads.v10.services.MutateSharedSetsResponse getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<MutateSharedSetsResponse> PARSER = new com.google.protobuf.AbstractParser<MutateSharedSetsResponse>() { @java.lang.Override public MutateSharedSetsResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new MutateSharedSetsResponse(input, extensionRegistry); } }; public static com.google.protobuf.Parser<MutateSharedSetsResponse> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<MutateSharedSetsResponse> getParserForType() { return PARSER; } @java.lang.Override public com.google.ads.googleads.v10.services.MutateSharedSetsResponse getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }
/* * Copyright 2000-2015 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.ui; import com.intellij.icons.AllIcons; import com.intellij.ide.CopyProvider; import com.intellij.ide.DataManager; import com.intellij.openapi.Disposable; import com.intellij.openapi.actionSystem.*; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.ide.CopyPasteManager; import com.intellij.openapi.project.DumbService; import com.intellij.openapi.project.IndexNotReadyException; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.Computable; import com.intellij.openapi.util.Disposer; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.openapi.wm.IdeFocusManager; import com.intellij.pom.Navigatable; import com.intellij.psi.PsiElement; import com.intellij.ui.components.JBLabel; import com.intellij.ui.components.JBList; import com.intellij.ui.speedSearch.ListWithFilter; import com.intellij.util.ArrayUtil; import com.intellij.util.Function; import com.intellij.util.ui.UIUtil; import com.intellij.util.ui.update.MergingUpdateQueue; import com.intellij.util.ui.update.Update; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import javax.swing.event.ListSelectionEvent; import javax.swing.event.ListSelectionListener; import java.awt.*; import java.awt.datatransfer.StringSelection; import java.awt.event.KeyEvent; import java.awt.event.MouseEvent; import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; /** * @param <T> List item type. Must implement {@code equals()/hashCode()} correctly. * @since 13.0 */ public abstract class FinderRecursivePanel<T> extends JBSplitter implements DataProvider, Disposable { @NotNull private final Project myProject; @Nullable private final String myGroupId; @Nullable private final FinderRecursivePanel myParent; @Nullable private JComponent myChild = null; protected JBList myList; protected final CollectionListModel<T> myListModel = new CollectionListModel<T>(); private final MergingUpdateQueue myMergingUpdateQueue = new MergingUpdateQueue("FinderRecursivePanel", 100, true, this, this); private volatile boolean isMergeListItemsRunning; private final AtomicBoolean myUpdateSelectedPathModeActive = new AtomicBoolean(); private final CopyProvider myCopyProvider = new CopyProvider() { @Override public void performCopy(@NotNull DataContext dataContext) { final T value = getSelectedValue(); CopyPasteManager.getInstance().setContents(new StringSelection(getItemText(value))); } @Override public boolean isCopyEnabled(@NotNull DataContext dataContext) { return getSelectedValue() != null; } @Override public boolean isCopyVisible(@NotNull DataContext dataContext) { return false; } }; protected FinderRecursivePanel(@NotNull FinderRecursivePanel parent) { this(parent.getProject(), parent, parent.getGroupId()); } protected FinderRecursivePanel(@NotNull Project project, @Nullable String groupId) { this(project, null, groupId); } protected FinderRecursivePanel(@NotNull Project project, @Nullable FinderRecursivePanel parent, @Nullable String groupId) { super(false, 0f); myProject = project; myParent = parent; myGroupId = groupId; if (myParent != null) { Disposer.register(myParent, this); } } public void init() { initWithoutUpdatePanel(); updatePanel(); } private void initWithoutUpdatePanel() { setFirstComponent(createLeftComponent()); setSecondComponent(createDefaultRightComponent()); if (getGroupId() != null) { setAndLoadSplitterProportionKey(getGroupId() + "[" + getIndex() + "]"); } setDividerWidth(3); setShowDividerIcon(false); setShowDividerControls(true); } /** * Called in read action. * * @return Items for list. */ @NotNull protected abstract List<T> getListItems(); protected String getListEmptyText() { return "No entries"; } @NotNull protected abstract String getItemText(T t); @Nullable protected Icon getItemIcon(T t) { return null; } protected abstract boolean hasChildren(T t); /** * To determine item list background color (if enabled). * * @param t Current item. * @return Containing file. */ @Nullable protected VirtualFile getContainingFile(T t) { return null; } protected boolean isEditable() { return getSelectedValue() != null; } @Nullable protected JComponent createRightComponent(T t) { return new JPanel(); } @Nullable protected JComponent createDefaultRightComponent() { final JBLabel label = new JBLabel("Nothing selected", SwingConstants.CENTER); label.setFontColor(UIUtil.FontColor.BRIGHTER); return label; } protected JComponent createLeftComponent() { myList = createList(); final JScrollPane pane = ScrollPaneFactory.createScrollPane(myList, ScrollPaneConstants.VERTICAL_SCROLLBAR_AS_NEEDED, ScrollPaneConstants.HORIZONTAL_SCROLLBAR_NEVER); return ListWithFilter.wrap(myList, pane, new Function<T, String>() { @Override public String fun(T o) { return getItemText(o); } }); } protected JBList createList() { final JBList list = new JBList(myListModel); list.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); list.setEmptyText(getListEmptyText()); list.setCellRenderer(createListCellRenderer()); installListActions(list); list.addListSelectionListener(new ListSelectionListener() { @Override public void valueChanged(ListSelectionEvent event) { if (event.getValueIsAdjusting()) return; if (isMergeListItemsRunning()) return; if (myUpdateSelectedPathModeActive.get()) return; updateRightComponent(true); } }); ScrollingUtil.installActions(list); // installSpeedSearch(list); // TODO installEditOnDoubleClick(list); return list; } private void handleGotoPrevious() { IdeFocusManager.getInstance(myProject).requestFocus(myList, true); } private void handleGotoNext() { if (!myList.isEmpty()) { if (myList.getSelectedValue() == null) { myList.setSelectedIndex(0); updateRightComponent(true); } } IdeFocusManager.getInstance(myProject).requestFocus(myList, true); } private void installListActions(JBList list) { AnAction previousPanelAction = new AnAction("Previous", null, AllIcons.Actions.Back) { @Override public void update(AnActionEvent e) { e.getPresentation().setEnabled(!isRootPanel()); } @Override public void actionPerformed(AnActionEvent e) { assert myParent != null; myParent.handleGotoPrevious(); } }; previousPanelAction.registerCustomShortcutSet(KeyEvent.VK_LEFT, 0, list); AnAction nextPanelAction = new AnAction("Next", null, AllIcons.Actions.Forward) { @Override public void update(AnActionEvent e) { final T value = getSelectedValue(); e.getPresentation().setEnabled(value != null && hasChildren(value) && getSecondComponent() instanceof FinderRecursivePanel); } @Override public void actionPerformed(AnActionEvent e) { FinderRecursivePanel finderRecursivePanel = (FinderRecursivePanel)getSecondComponent(); finderRecursivePanel.handleGotoNext(); } }; nextPanelAction.registerCustomShortcutSet(KeyEvent.VK_RIGHT, 0, list); AnAction editAction = new AnAction("Edit", null, AllIcons.Actions.Edit) { @Override public void update(AnActionEvent e) { e.getPresentation().setEnabled(isEditable()); } @Override public void actionPerformed(AnActionEvent e) { performEditAction(); } }; editAction.registerCustomShortcutSet(CommonShortcuts.ENTER, list); AnAction[] actions = new AnAction[]{ previousPanelAction, nextPanelAction, Separator.getInstance(), editAction}; final AnAction[] customActions = getCustomListActions(); if (customActions.length > 0) { actions = ArrayUtil.append(actions, Separator.getInstance()); actions = ArrayUtil.mergeArrays(actions, customActions); } ActionGroup contextActionGroup = new DefaultActionGroup(actions); PopupHandler.installUnknownPopupHandler(list, contextActionGroup, ActionManager.getInstance()); } protected AnAction[] getCustomListActions() { return AnAction.EMPTY_ARRAY; } private void installSpeedSearch(JBList list) { final ListSpeedSearch search = new ListSpeedSearch(list, new Function<Object, String>() { @Override public String fun(Object o) { //noinspection unchecked return getItemText((T)o); } }); search.setComparator(new SpeedSearchComparator(false)); } private void installEditOnDoubleClick(JBList list) { new DoubleClickListener() { @Override protected boolean onDoubleClick(MouseEvent event) { performEditAction(); return true; } }.installOn(list); } protected boolean performEditAction() { Navigatable data = CommonDataKeys.NAVIGATABLE.getData(DataManager.getInstance().getDataContext(myList)); if (data != null && data.canNavigate()) { data.navigate(true); } return false; } protected ListCellRenderer createListCellRenderer() { return new ColoredListCellRenderer() { private final FileColorManager myFileColorManager = FileColorManager.getInstance(getProject()); public Component getListCellRendererComponent(JList list, Object value, int index, boolean isSelected, boolean cellHasFocus) { mySelected = isSelected; myForeground = UIUtil.getTreeTextForeground(); mySelectionForeground = cellHasFocus ? list.getSelectionForeground() : UIUtil.getTreeTextForeground(); clear(); setFont(UIUtil.getListFont()); //noinspection unchecked final T t = (T)value; setIcon(getItemIcon(t)); try { append(getItemText(t)); } catch (IndexNotReadyException e) { append("loading..."); } doCustomizeCellRenderer(this, list, t, index, isSelected, cellHasFocus); Color bg = isSelected ? UIUtil.getTreeSelectionBackground(cellHasFocus) : UIUtil.getTreeTextBackground(); if (!isSelected && myFileColorManager.isEnabled()) { final Color fileBgColor = myFileColorManager.getRendererBackground(getContainingFile(t)); bg = fileBgColor == null ? bg : fileBgColor; } setBackground(bg); if (hasChildren(t)) { JPanel result = new JPanel(new BorderLayout(0, 0)); JLabel childrenLabel = new JLabel(); childrenLabel.setOpaque(true); childrenLabel.setVisible(true); childrenLabel.setBackground(bg); final boolean isDark = ColorUtil.isDark(UIUtil.getListSelectionBackground()); childrenLabel.setIcon(isSelected ? isDark ? AllIcons.Icons.Ide.NextStepInverted : AllIcons.Icons.Ide.NextStep : AllIcons.Icons.Ide.NextStepGrayed); result.add(this, BorderLayout.CENTER); result.add(childrenLabel, BorderLayout.EAST); return result; } return this; } @Override protected final void customizeCellRenderer(JList list, Object value, int index, boolean selected, boolean hasFocus) { } }; } protected void doCustomizeCellRenderer(SimpleColoredComponent comp, JList list, T value, int index, boolean selected, boolean hasFocus) { } @Nullable @Override public Object getData(@NonNls String dataId) { Object selectedValue = getSelectedValue(); if (selectedValue == null) return null; if (CommonDataKeys.PSI_ELEMENT.is(dataId) && selectedValue instanceof PsiElement) { return selectedValue; } if (CommonDataKeys.NAVIGATABLE.is(dataId) && selectedValue instanceof Navigatable) { return selectedValue; } if (selectedValue instanceof DataProvider) { return ((DataProvider)selectedValue).getData(dataId); } if (PlatformDataKeys.COPY_PROVIDER.is(dataId)) { return myCopyProvider; } return null; } @Override public void dispose() { super.dispose(); myMergingUpdateQueue.cancelAllUpdates(); } @SuppressWarnings("unchecked") @Nullable public T getSelectedValue() { return (T)myList.getSelectedValue(); } /** * Performs recursive update selecting given values. * * @param pathToSelect Values to select. * @since 14 */ public void updateSelectedPath(Object... pathToSelect) { if (!myUpdateSelectedPathModeActive.compareAndSet(false, true)) return; FinderRecursivePanel panel = this; for (int i = 0; i < pathToSelect.length; i++) { Object selectedValue = pathToSelect[i]; panel.setSelectedValue(selectedValue); if (i < pathToSelect.length - 1) { final JComponent component = panel.getSecondComponent(); assert component instanceof FinderRecursivePanel : Arrays.toString(pathToSelect); panel = (FinderRecursivePanel)component; } } IdeFocusManager.getInstance(myProject).requestFocus(panel.myList, true); myUpdateSelectedPathModeActive.set(false); } private void setSelectedValue(final Object value) { if (value.equals(myList.getSelectedValue())) { return; } // load list items synchronously myList.setPaintBusy(true); try { final List<T> listItems = ApplicationManager.getApplication().runReadAction(new Computable<List<T>>() { @Override public List<T> compute() { return getListItems(); } }); mergeListItems(myListModel, myList, listItems); } finally { myList.setPaintBusy(false); } myList.setSelectedValue(value, true); // always recreate since instance might depend on this one's selected value createRightComponent(false); } @NotNull public Project getProject() { return myProject; } @Nullable public FinderRecursivePanel getParentPanel() { return myParent; } @Nullable protected String getGroupId() { return myGroupId; } public void updatePanel() { if (myUpdateSelectedPathModeActive.get()) { return; } myList.setPaintBusy(true); myMergingUpdateQueue.queue(new Update("update") { @Override public void run() { final T oldValue = getSelectedValue(); final int oldIndex = myList.getSelectedIndex(); ApplicationManager.getApplication().executeOnPooledThread(new Runnable() { @Override public void run() { DumbService.getInstance(getProject()).runReadActionInSmartMode(new Runnable() { @Override public void run() { try { final List<T> listItems = getListItems(); SwingUtilities.invokeLater(new Runnable() { @Override public void run() { mergeListItems(myListModel, myList, listItems); if (myList.isEmpty()) { createRightComponent(true); } else if (myList.getSelectedIndex() < 0) { myList.setSelectedIndex(myListModel.getSize() > oldIndex ? oldIndex : 0); } else { Object newValue = myList.getSelectedValue(); updateRightComponent(oldValue == null || !oldValue.equals(newValue) || myList.isEmpty()); } } }); } finally { myList.setPaintBusy(false); } } }); } }); } }); } protected void mergeListItems(@NotNull CollectionListModel<T> listModel, @NotNull JList list, @NotNull List<T> newItems) { setMergeListItemsRunning(true); try { if (listModel.getSize() == 0) { listModel.add(newItems); } else if (newItems.size() == 0) { listModel.removeAll(); } else { int newSelectedIndex = -1; T selection = (T)list.getSelectedValue(); if (selection != null) { newSelectedIndex = newItems.indexOf(selection); } listModel.removeAll(); listModel.add(newItems); list.setSelectedIndex(newSelectedIndex); } } finally { setMergeListItemsRunning(false); } } public boolean isMergeListItemsRunning() { return isMergeListItemsRunning; } protected void setMergeListItemsRunning(boolean isListMergeRunning) { this.isMergeListItemsRunning = isListMergeRunning; } public void updateRightComponent(boolean force) { if (force) { createRightComponent(true); } else if (myChild instanceof FinderRecursivePanel) { ((FinderRecursivePanel)myChild).updatePanel(); } } private void createRightComponent(boolean withUpdatePanel) { if (myChild instanceof Disposable) { Disposer.dispose((Disposable)myChild); } T value = getSelectedValue(); if (value != null) { myChild = createRightComponent(value); if (myChild instanceof FinderRecursivePanel) { final FinderRecursivePanel childPanel = (FinderRecursivePanel)myChild; if (withUpdatePanel) { childPanel.init(); } else { childPanel.initWithoutUpdatePanel(); } } } else { myChild = createDefaultRightComponent(); } setSecondComponent(myChild); } private int getIndex() { int index = 0; FinderRecursivePanel parent = myParent; while (parent != null) { index++; parent = parent.getParentPanel(); } return index; } protected boolean isRootPanel() { return getParentPanel() == null; } @Override public void doLayout() { if (myProportion == 0) { int total = getOrientation() ? getHeight() : getWidth(); float proportion = (float)getFirstComponentPreferredSize() / (total - getDividerWidth()); if (proportion > .0f && proportion < 1.0f) { setProportion(proportion); } } super.doLayout(); } protected int getFirstComponentPreferredSize() { return 200; } }
/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.cloudformation.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.AmazonWebServiceRequest; /** * <p> * The input for the <a>GetTemplateSummary</a> action. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/GetTemplateSummary" target="_top">AWS * API Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class GetTemplateSummaryRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable { /** * <p> * Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For * more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template Anatomy</a> * in the AWS CloudFormation User Guide. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> */ private String templateBody; /** * <p> * Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is * located in an Amazon S3 bucket. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template Anatomy</a> * in the AWS CloudFormation User Guide. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> */ private String templateURL; /** * <p> * The name or the stack ID that is associated with the stack, which are not always interchangeable. For running * stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the * unique stack ID. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> */ private String stackName; /** * <p> * Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For * more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template Anatomy</a> * in the AWS CloudFormation User Guide. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @param templateBody * Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 * bytes. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template * Anatomy</a> in the AWS CloudFormation User Guide.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. */ public void setTemplateBody(String templateBody) { this.templateBody = templateBody; } /** * <p> * Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For * more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template Anatomy</a> * in the AWS CloudFormation User Guide. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @return Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 * bytes. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template * Anatomy</a> in the AWS CloudFormation User Guide.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. */ public String getTemplateBody() { return this.templateBody; } /** * <p> * Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For * more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template Anatomy</a> * in the AWS CloudFormation User Guide. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @param templateBody * Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 * bytes. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template * Anatomy</a> in the AWS CloudFormation User Guide.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * @return Returns a reference to this object so that method calls can be chained together. */ public GetTemplateSummaryRequest withTemplateBody(String templateBody) { setTemplateBody(templateBody); return this; } /** * <p> * Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is * located in an Amazon S3 bucket. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template Anatomy</a> * in the AWS CloudFormation User Guide. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @param templateURL * Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) * that is located in an Amazon S3 bucket. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template * Anatomy</a> in the AWS CloudFormation User Guide.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. */ public void setTemplateURL(String templateURL) { this.templateURL = templateURL; } /** * <p> * Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is * located in an Amazon S3 bucket. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template Anatomy</a> * in the AWS CloudFormation User Guide. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @return Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) * that is located in an Amazon S3 bucket. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template * Anatomy</a> in the AWS CloudFormation User Guide.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. */ public String getTemplateURL() { return this.templateURL; } /** * <p> * Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is * located in an Amazon S3 bucket. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template Anatomy</a> * in the AWS CloudFormation User Guide. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @param templateURL * Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) * that is located in an Amazon S3 bucket. For more information about templates, see <a * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template * Anatomy</a> in the AWS CloudFormation User Guide.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * @return Returns a reference to this object so that method calls can be chained together. */ public GetTemplateSummaryRequest withTemplateURL(String templateURL) { setTemplateURL(templateURL); return this; } /** * <p> * The name or the stack ID that is associated with the stack, which are not always interchangeable. For running * stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the * unique stack ID. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @param stackName * The name or the stack ID that is associated with the stack, which are not always interchangeable. For * running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you * must specify the unique stack ID.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. */ public void setStackName(String stackName) { this.stackName = stackName; } /** * <p> * The name or the stack ID that is associated with the stack, which are not always interchangeable. For running * stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the * unique stack ID. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @return The name or the stack ID that is associated with the stack, which are not always interchangeable. For * running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you * must specify the unique stack ID.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. */ public String getStackName() { return this.stackName; } /** * <p> * The name or the stack ID that is associated with the stack, which are not always interchangeable. For running * stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the * unique stack ID. * </p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * </p> * * @param stackName * The name or the stack ID that is associated with the stack, which are not always interchangeable. For * running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you * must specify the unique stack ID.</p> * <p> * Conditional: You must specify only one of the following parameters: <code>StackName</code>, * <code>TemplateBody</code>, or <code>TemplateURL</code>. * @return Returns a reference to this object so that method calls can be chained together. */ public GetTemplateSummaryRequest withStackName(String stackName) { setStackName(stackName); return this; } /** * Returns a string representation of this object; useful for testing and debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getTemplateBody() != null) sb.append("TemplateBody: ").append(getTemplateBody()).append(","); if (getTemplateURL() != null) sb.append("TemplateURL: ").append(getTemplateURL()).append(","); if (getStackName() != null) sb.append("StackName: ").append(getStackName()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof GetTemplateSummaryRequest == false) return false; GetTemplateSummaryRequest other = (GetTemplateSummaryRequest) obj; if (other.getTemplateBody() == null ^ this.getTemplateBody() == null) return false; if (other.getTemplateBody() != null && other.getTemplateBody().equals(this.getTemplateBody()) == false) return false; if (other.getTemplateURL() == null ^ this.getTemplateURL() == null) return false; if (other.getTemplateURL() != null && other.getTemplateURL().equals(this.getTemplateURL()) == false) return false; if (other.getStackName() == null ^ this.getStackName() == null) return false; if (other.getStackName() != null && other.getStackName().equals(this.getStackName()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getTemplateBody() == null) ? 0 : getTemplateBody().hashCode()); hashCode = prime * hashCode + ((getTemplateURL() == null) ? 0 : getTemplateURL().hashCode()); hashCode = prime * hashCode + ((getStackName() == null) ? 0 : getStackName().hashCode()); return hashCode; } @Override public GetTemplateSummaryRequest clone() { return (GetTemplateSummaryRequest) super.clone(); } }
/* * Copyright (c) WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.developerstudio.eclipse.gmf.esb.diagram.custom.provider; import static org.wso2.developerstudio.eclipse.platform.core.registry.util.Constants.REGISTRY_RESOURCE; import java.io.File; import java.io.IOException; import java.util.Map; import org.apache.maven.model.Plugin; import org.apache.maven.model.PluginExecution; import org.apache.maven.model.Repository; import org.apache.maven.project.MavenProject; import org.codehaus.plexus.util.xml.Xpp3Dom; import org.eclipse.core.resources.IProject; import org.eclipse.core.resources.IResource; import org.eclipse.core.resources.ResourcesPlugin; import org.eclipse.core.runtime.CoreException; import org.eclipse.core.runtime.IPath; import org.eclipse.core.runtime.NullProgressMonitor; import org.eclipse.jface.dialogs.Dialog; import org.eclipse.jface.dialogs.IDialogConstants; import org.eclipse.jface.dialogs.MessageDialog; import org.eclipse.swt.SWT; import org.eclipse.swt.events.ModifyEvent; import org.eclipse.swt.events.ModifyListener; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.graphics.Point; import org.eclipse.swt.layout.FormAttachment; import org.eclipse.swt.layout.FormData; import org.eclipse.swt.layout.FormLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Combo; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.Event; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Link; import org.eclipse.swt.widgets.List; import org.eclipse.swt.widgets.Listener; import org.eclipse.swt.widgets.Shell; import org.eclipse.swt.widgets.Text; import org.eclipse.ui.PlatformUI; import org.wso2.developerstudio.eclipse.esb.core.ESBMavenConstants; import org.wso2.developerstudio.eclipse.capp.maven.utils.MavenConstants; import org.wso2.developerstudio.eclipse.esb.core.utils.EsbTemplateFormatter; import org.wso2.developerstudio.eclipse.general.project.artifact.GeneralProjectArtifact; import org.wso2.developerstudio.eclipse.general.project.artifact.RegistryArtifact; import org.wso2.developerstudio.eclipse.general.project.utils.GeneralProjectUtils; import org.wso2.developerstudio.eclipse.general.project.artifact.GeneralProjectArtifact; import org.wso2.developerstudio.eclipse.general.project.artifact.RegistryArtifact; import org.wso2.developerstudio.eclipse.general.project.artifact.bean.RegistryElement; import org.wso2.developerstudio.eclipse.general.project.artifact.bean.RegistryItem; import org.wso2.developerstudio.eclipse.general.project.utils.GeneralProjectUtils; import org.wso2.developerstudio.eclipse.logging.core.IDeveloperStudioLog; import org.wso2.developerstudio.eclipse.logging.core.Logger; import org.wso2.developerstudio.eclipse.maven.util.MavenUtils; import org.wso2.developerstudio.eclipse.platform.core.registry.util.RegistryResourceInfo; import org.wso2.developerstudio.eclipse.platform.core.registry.util.RegistryResourceInfoDoc; import org.wso2.developerstudio.eclipse.platform.core.registry.util.RegistryResourceUtils; import org.wso2.developerstudio.eclipse.platform.core.templates.ArtifactTemplate; import org.wso2.developerstudio.eclipse.platform.core.templates.ArtifactTemplateHandler; import org.wso2.developerstudio.eclipse.platform.core.utils.Constants; import org.wso2.developerstudio.eclipse.utils.data.ITemporaryFileTag; import org.wso2.developerstudio.eclipse.utils.file.FileUtils; public class NewResourceTemplateDialog extends Dialog { private static IDeveloperStudioLog log = Logger .getLog("org.wso2.developerstudio.eclipse.esb.editor"); private Text txtResourceName; private Text txtRegistryPath; private ArtifactTemplate[] artifactTemplates; private List lstTemplates; private int emptyXmlFileTemplate; private int emptyTextFileTemplate; private String selectedPath; private String ipathOfSelection; private Map<String, java.util.List<String>> filters; private Combo cmbProject; private Button btnOpenResourceOnce; /** * Create the dialog. * * @param parentShell */ public NewResourceTemplateDialog(Shell parentShell, Map<String, java.util.List<String>> filters) { super(parentShell); setFilters(filters); } /** * Create contents of the dialog. * * @param parent */ @Override protected Control createDialogArea(Composite parent) { Composite container = (Composite) super.createDialogArea(parent); container.setLayout(new FormLayout()); Label lblProject = new Label(container, SWT.NONE); FormData fd_lblProject = new FormData(); fd_lblProject.top = new FormAttachment(0, 31); fd_lblProject.left = new FormAttachment(0, 10); lblProject.setLayoutData(fd_lblProject); lblProject.setText("Project"); cmbProject = new Combo(container, SWT.READ_ONLY); cmbProject.addModifyListener(new ModifyListener() { public void modifyText(ModifyEvent arg0) { validate(); } }); FormData fd_cmbProject = new FormData(); fd_cmbProject.left = new FormAttachment(lblProject, 16); fd_cmbProject.top = new FormAttachment(lblProject, -5, SWT.TOP); cmbProject.setLayoutData(fd_cmbProject); Link linkButton = new Link(container, SWT.NULL); linkButton.setText("<a>Create New Project</a>"); FormData fd_linkButton = new FormData(); fd_linkButton.left = new FormAttachment(lblProject, 16); fd_linkButton.top = new FormAttachment(cmbProject, 40, SWT.TOP); linkButton.setLayoutData(fd_linkButton); linkButton.addListener (SWT.Selection, new Listener () { public void handleEvent(Event event) { Shell shell = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell(); IProject generalProject = GeneralProjectUtils.createGeneralProject(shell,null); if(generalProject!=null){ cmbProject.add(generalProject.getName()); if(cmbProject.getItems().length>0){ cmbProject.select(cmbProject.getItems().length -1); } } } }); Label lblArtifactName = new Label(container, SWT.NONE); FormData fd_lblArtifactName = new FormData(); fd_lblArtifactName.top = new FormAttachment(linkButton, 22); fd_lblArtifactName.right = new FormAttachment(cmbProject, 22, SWT.RIGHT); fd_lblArtifactName.left = new FormAttachment(0, 10); lblArtifactName.setLayoutData(fd_lblArtifactName); lblArtifactName.setText("File name"); txtResourceName = new Text(container, SWT.BORDER); txtResourceName.addModifyListener(new ModifyListener() { public void modifyText(ModifyEvent arg0) { validate(); } }); txtResourceName.setText("NewResource"); FormData fd_txtResourceName = new FormData(); fd_txtResourceName.top = new FormAttachment(lblArtifactName, 6); fd_txtResourceName.right = new FormAttachment(cmbProject, 0, SWT.RIGHT); fd_txtResourceName.left = new FormAttachment(0, 32); txtResourceName.setLayoutData(fd_txtResourceName); Label lblRegistryPath = new Label(container, SWT.NONE); FormData fd_lblRegistryPath = new FormData(); fd_lblRegistryPath.top = new FormAttachment(txtResourceName, 17); fd_lblRegistryPath.left = new FormAttachment(lblProject, 0, SWT.LEFT); lblRegistryPath.setLayoutData(fd_lblRegistryPath); lblRegistryPath.setText("Registry path"); txtRegistryPath = new Text(container, SWT.BORDER); txtRegistryPath.addModifyListener(new ModifyListener() { public void modifyText(ModifyEvent arg0) { validate(); } }); txtRegistryPath.setText("/_system/config/myresources"); FormData fd_txtRegistryPath = new FormData(); fd_txtRegistryPath.top = new FormAttachment(lblRegistryPath, 6); fd_txtRegistryPath.left = new FormAttachment(lblProject, 22, SWT.LEFT); fd_txtRegistryPath.right = new FormAttachment(cmbProject, 0, SWT.RIGHT); txtRegistryPath.setLayoutData(fd_txtRegistryPath); Label label = new Label(container, SWT.SEPARATOR | SWT.VERTICAL); fd_lblArtifactName.right = new FormAttachment(label, -122); fd_cmbProject.right = new FormAttachment(label, -14); FormData fd_label = new FormData(); fd_label.left = new FormAttachment(0, 250); fd_label.top = new FormAttachment(0, 10); fd_label.bottom = new FormAttachment(0, 198); label.setLayoutData(fd_label); lstTemplates = new List(container, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL); lstTemplates.addSelectionListener(new SelectionAdapter() { @Override public void widgetSelected(SelectionEvent e) { updateResourceFileName(); } }); FormData fd_list = new FormData(); fd_list.right = new FormAttachment(label, 182, SWT.RIGHT); lstTemplates.setLayoutData(fd_list); Label lblSelectTemplate = new Label(container, SWT.NONE); fd_label.right = new FormAttachment(lblSelectTemplate, -6); fd_list.top = new FormAttachment(lblSelectTemplate, 6); fd_list.left = new FormAttachment(lblSelectTemplate, 10, SWT.LEFT); FormData fd_lblSelectTemplate = new FormData(); fd_lblSelectTemplate.top = new FormAttachment(0, 26); fd_lblSelectTemplate.left = new FormAttachment(0, 258); lblSelectTemplate.setLayoutData(fd_lblSelectTemplate); lblSelectTemplate.setText("Select template..."); btnOpenResourceOnce = new Button(container, SWT.CHECK); fd_list.bottom = new FormAttachment(btnOpenResourceOnce, 0, SWT.BOTTOM); FormData fd_btnOpenResourceOnce = new FormData(); fd_btnOpenResourceOnce.bottom = new FormAttachment(100, -10); fd_btnOpenResourceOnce.left = new FormAttachment(lblProject, 0, SWT.LEFT); btnOpenResourceOnce.setLayoutData(fd_btnOpenResourceOnce); btnOpenResourceOnce.setText("Open resource once created"); loadCAppProjects(); loadTemplateList(); return container; } private void loadCAppProjects() { IProject[] projects = ResourcesPlugin.getWorkspace().getRoot() .getProjects(); cmbProject.removeAll(); for (IProject project : projects) { try { if (project.isOpen() && project.hasNature(Constants.GENERAL_PROJECT_NATURE)) { cmbProject.add(project.getName()); } } catch (Exception e) { /*ignore*/ } } if (cmbProject.getItemCount() > 0) { cmbProject.select(0); } } private void validate() { Button okButton = getButton(IDialogConstants.OK_ID); boolean okButtonState = true; if (cmbProject.getText() == null || cmbProject.getText().equals("") || txtResourceName.getText().trim().equals("") || txtRegistryPath.getText().trim().equals("")) { okButtonState = false; } if (okButton!=null){ okButton.setEnabled(okButtonState); } } private void updateResourceFileName(){ String resourceName = txtResourceName.getText(); String resourceFileName = FileUtils.getResourceFileName(resourceName); String selectedTemplateExtension = getSelectedTemplateExtension(); if (selectedTemplateExtension==null){ txtResourceName.setText(resourceFileName); }else{ txtResourceName.setText(resourceFileName+"."+selectedTemplateExtension); } } private String getSelectedTemplateExtension() { int selectionIndex = lstTemplates.getSelectionIndex(); if (selectionIndex == emptyXmlFileTemplate) { return "xml"; } else if (selectionIndex == emptyTextFileTemplate) { return "txt"; } else { return artifactTemplates[selectionIndex].getDefaultExtension(); } } private void loadTemplateList() { lstTemplates.removeAll(); artifactTemplates = ArtifactTemplateHandler .getArtifactTemplates(getFilters()); for (ArtifactTemplate artifactTemplate : artifactTemplates) { lstTemplates.add(artifactTemplate.getName()); } emptyXmlFileTemplate = lstTemplates.getItemCount(); lstTemplates.add("Empty XML file"); emptyTextFileTemplate = lstTemplates.getItemCount(); lstTemplates.add("Empty text file"); lstTemplates.select(0); updateResourceFileName(); } @Override protected void okPressed() { int selectionIndex = lstTemplates.getSelectionIndex(); try { String templateString; if (selectionIndex == emptyXmlFileTemplate) { templateString = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"; } else if (selectionIndex == emptyTextFileTemplate) { templateString = ""; } else { ArtifactTemplate esbArtifactTemplate = artifactTemplates[selectionIndex]; templateString = FileUtils.getContentAsString(esbArtifactTemplate.getTemplateUrl()); } String name = txtResourceName.getText(); String content=EsbTemplateFormatter.stripParameters(templateString, name); ITemporaryFileTag createNewTempTag = FileUtils.createNewTempTag(); File tempFile = new File(FileUtils.createTempDirectory(),name); tempFile.getParentFile().mkdirs(); FileUtils.writeContent(tempFile, content); String projectName = cmbProject.getText(); IProject project = ResourcesPlugin.getWorkspace().getRoot() .getProject(projectName); String path = txtRegistryPath.getText(); boolean ret = createRegistryArtifact(project,name,path,content); if (!ret) { MessageDialog.openInformation(getShell(), "Resource creation", "A resource already exists with name \'" + name + " on " + projectName); return; } path=path.endsWith("/")?path+name:path+"/"+name; setSelectedPath(path); setIPathOfSelection(project.getFullPath().append(name).toString()); createNewTempTag.clearAndEnd(); } catch (IOException e) { log.error(e); MessageDialog.openError(getShell(), "Template creation", e .getMessage()); return; } catch (CoreException e) { log.error(e); MessageDialog.openError(getShell(), "Resource creation", e .getMessage()); return; } catch (Exception e) { log.error(e); MessageDialog.openError(getShell(), "Resource creation", e .getMessage()); return; } super.okPressed(); } /** * Create contents of the button bar. * * @param parent */ @Override protected void createButtonsForButtonBar(Composite parent) { createButton(parent, IDialogConstants.OK_ID, IDialogConstants.OK_LABEL, true); createButton(parent, IDialogConstants.CANCEL_ID, IDialogConstants.CANCEL_LABEL, false); validate(); } /** * Return the initial size of the dialog. */ @Override protected Point getInitialSize() { return new Point(446, 340); } public void setSelectedPath(String selectedPath) { this.selectedPath = selectedPath; } public String getSelectedPath() { return selectedPath; } public void setFilters(Map<String, java.util.List<String>> filters) { this.filters = filters; } public Map<String, java.util.List<String>> getFilters() { return filters; } private String getMavenGroupId(File pomLocation){ String groupId = "org.wso2.carbon"; if(pomLocation!=null && pomLocation.exists()){ try { MavenProject mavenProject = MavenUtils.getMavenProject(pomLocation); groupId = mavenProject.getGroupId(); } catch (Exception e) { log.error("error reading pom file", e); } } return groupId; } private boolean createRegistryArtifact(IProject project,String fileName, String registryPath,String content) throws Exception{ File destFile = project.getFile(fileName).getLocation().toFile(); String resourceName = FileUtils.getResourceFileName(fileName); if(destFile.exists()){ return false; } String groupId = getMavenGroupId(project.getFile("pom.xml").getLocation().toFile()); groupId += ".resource"; FileUtils.createFile(destFile, content); RegistryResourceInfoDoc regResInfoDoc = new RegistryResourceInfoDoc(); RegistryResourceUtils.createMetaDataForFolder(registryPath, project .getLocation().toFile()); RegistryResourceUtils.addRegistryResourceInfo(destFile, regResInfoDoc, project.getLocation().toFile(), registryPath); GeneralProjectArtifact generalProjectArtifact = new GeneralProjectArtifact(); generalProjectArtifact.fromFile(project.getFile("artifact.xml") .getLocation().toFile()); RegistryArtifact artifact = new RegistryArtifact(); artifact.setName(resourceName); artifact.setVersion("1.0.0"); artifact.setType("registry/resource"); artifact.setServerRole("EnterpriseServiceBus"); artifact.setGroupId(groupId); java.util.List<RegistryResourceInfo> registryResources = regResInfoDoc .getRegistryResources(); for (RegistryResourceInfo registryResourceInfo : registryResources) { RegistryElement item = null; if (registryResourceInfo.getType() == REGISTRY_RESOURCE) { item = new RegistryItem(); ((RegistryItem) item).setFile(registryResourceInfo .getResourceBaseRelativePath()); item.setPath(registryResourceInfo.getDeployPath().replaceAll("/$", "")); artifact.addRegistryElement(item); } } generalProjectArtifact.addArtifact(artifact); generalProjectArtifact.toFile(); addGeneralProjectPlugin(project); project.refreshLocal(IResource.DEPTH_INFINITE, new NullProgressMonitor()); return true; } private void addGeneralProjectPlugin(IProject project) throws Exception{ MavenProject mavenProject; File mavenProjectPomLocation = project.getFile("pom.xml").getLocation().toFile(); if(!mavenProjectPomLocation.exists()){ mavenProject = MavenUtils.createMavenProject("org.wso2.carbon", project.getName(), "1.0.0","pom"); } else { mavenProject = MavenUtils.getMavenProject(mavenProjectPomLocation); } boolean pluginExists = MavenUtils.checkOldPluginEntry(mavenProject, "org.wso2.maven", "wso2-general-project-plugin", ESBMavenConstants.WSO2_GENERAL_PROJECT_VERSION); if(pluginExists){ return ; } mavenProject = MavenUtils.getMavenProject(mavenProjectPomLocation); Plugin plugin = MavenUtils.createPluginEntry(mavenProject, "org.wso2.maven", "wso2-general-project-plugin", ESBMavenConstants.WSO2_GENERAL_PROJECT_VERSION, true); PluginExecution pluginExecution; pluginExecution = new PluginExecution(); pluginExecution.addGoal("pom-gen"); pluginExecution.setPhase("process-resources"); pluginExecution.setId("registry"); plugin.addExecution(pluginExecution); Xpp3Dom configurationNode = MavenUtils.createMainConfigurationNode(); Xpp3Dom artifactLocationNode = MavenUtils.createXpp3Node(configurationNode, "artifactLocation"); artifactLocationNode.setValue("."); Xpp3Dom typeListNode = MavenUtils.createXpp3Node(configurationNode, "typeList"); typeListNode.setValue("${artifact.types}"); pluginExecution.setConfiguration(configurationNode); Repository repo = new Repository(); repo.setUrl("http://dist.wso2.org/maven2"); repo.setId("wso2-maven2-repository-1"); Repository repo1 = new Repository(); repo1.setUrl("http://maven.wso2.org/nexus/content/groups/wso2-public/"); repo1.setId("wso2-nexus-maven2-repository-1"); if (!mavenProject.getRepositories().contains(repo)) { mavenProject.getModel().addRepository(repo); mavenProject.getModel().addPluginRepository(repo); } if (!mavenProject.getRepositories().contains(repo1)) { mavenProject.getModel().addRepository(repo1); mavenProject.getModel().addPluginRepository(repo1); } MavenUtils.saveMavenProject(mavenProject, mavenProjectPomLocation); } public void setIPathOfSelection(String ipath) { ipathOfSelection = ipath; } public String getIPathOfSelection() { return ipathOfSelection; } }
package com.seed.pontointeligente.api.entities; import java.io.Serializable; import java.math.BigDecimal; import java.util.Date; import java.util.List; import java.util.Optional; import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; import javax.persistence.Enumerated; import javax.persistence.FetchType; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.ManyToOne; import javax.persistence.OneToMany; import javax.persistence.PrePersist; import javax.persistence.PreUpdate; import javax.persistence.Table; import javax.persistence.Transient; import com.seed.pontointeligente.api.enums.PerfilEnum; @Entity @Table(name = "funcionario") public class Funcionario implements Serializable { private static final long serialVersionUID = -1749427231274347907L; private Long id; private String nome; private String email; private String senha; private String cpf; private BigDecimal valorHora; private Float qtdHorasTrabalhoDia; private Float qtdHorasAlmoco; private PerfilEnum perfil; private Date dataCriacao; private Date dataAtualizacao; private Empresa empresa; private List<Lancamento> lancamentos; @Id @GeneratedValue(strategy = GenerationType.AUTO) public Long getId() { return id; } public void setId(Long id) { this.id = id; } @Column(name = "nome", nullable = false) public String getNome() { return nome; } public void setNome(String nome) { this.nome = nome; } @Column(name = "email", nullable = false) public String getEmail() { return email; } public void setEmail(String email) { this.email = email; } @Column(name = "senha", nullable = false) public String getSenha() { return senha; } public void setSenha(String senha) { this.senha = senha; } @Column(name = "cpf", nullable = false) public String getCpf() { return cpf; } public void setCpf(String cpf) { this.cpf = cpf; } @Column(name = "valor_hora", nullable = true) public BigDecimal getValorHora() { return valorHora; } @Transient public Optional<BigDecimal> getValorHoraOpt() { return Optional.ofNullable(valorHora); } public void setValorHora(BigDecimal valorHora) { this.valorHora = valorHora; } @Column(name = "qtd_horas_trabalho_dia", nullable = true) public Float getQtdHorasTrabalhoDia() { return qtdHorasTrabalhoDia; } @Transient public Optional<Float> getQtdHorasTrabalhoDiaOpt() { return Optional.ofNullable(qtdHorasTrabalhoDia); } public void setQtdHorasTrabalhoDia(Float qtdHorasTrabalhoDia) { this.qtdHorasTrabalhoDia = qtdHorasTrabalhoDia; } @Column(name = "qtd_horas_almoco", nullable = true) public Float getQtdHorasAlmoco() { return qtdHorasAlmoco; } @Transient public Optional<Float> getQtdHorasAlmocoOpt() { return Optional.ofNullable(qtdHorasAlmoco); } public void setQtdHorasAlmoco(Float qtdHorasAlmoco) { this.qtdHorasAlmoco = qtdHorasAlmoco; } @Enumerated(EnumType.STRING) @Column(name = "perfil", nullable = false) public PerfilEnum getPerfil() { return perfil; } public void setPerfil(PerfilEnum perfil) { this.perfil = perfil; } @Column(name = "data_criacao", nullable = false) public Date getDataCriacao() { return dataCriacao; } public void setDataCriacao(Date dataCriacao) { this.dataCriacao = dataCriacao; } @Column(name = "data_atualizacao", nullable = false) public Date getDataAtualizacao() { return dataAtualizacao; } public void setDataAtualizacao(Date dataAtualizacao) { this.dataAtualizacao = dataAtualizacao; } @ManyToOne(fetch = FetchType.EAGER) public Empresa getEmpresa() { return empresa; } public void setEmpresa(Empresa empresa) { this.empresa = empresa; } @OneToMany(mappedBy = "funcionario", fetch = FetchType.LAZY, cascade = CascadeType.ALL) public List<Lancamento> getLancamentos() { return lancamentos; } public void setLancamentos(List<Lancamento> lancamentos) { this.lancamentos = lancamentos; } @PreUpdate public void preUpdate() { dataAtualizacao = new Date(); } @PrePersist public void prePersist() { final Date atual = new Date(); dataCriacao = atual; dataAtualizacao = atual; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((id == null) ? 0 : id.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Funcionario other = (Funcionario) obj; if (id == null) { if (other.id != null) return false; } else if (!id.equals(other.id)) return false; return true; } @Override public String toString() { return "Funcionario [id=" + id + ", nome=" + nome + ", email=" + email + ", senha=" + senha + ", cpf=" + cpf + ", valorHora=" + valorHora + ", qtdHorasTrabalhoDia=" + qtdHorasTrabalhoDia + ", qtdHorasAlmoco=" + qtdHorasAlmoco + ", perfil=" + perfil + ", dataCriacao=" + dataCriacao + ", dataAtualizacao=" + dataAtualizacao + ", empresa=" + empresa + ", lancamentos=" + lancamentos + "]"; } }
/* * Copyright 2015 Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.app.impl; import org.apache.felix.scr.annotations.Activate; import org.apache.felix.scr.annotations.Component; import org.apache.felix.scr.annotations.Deactivate; import org.apache.felix.scr.annotations.Reference; import org.apache.felix.scr.annotations.ReferenceCardinality; import org.apache.felix.scr.annotations.Service; import org.apache.karaf.features.Feature; import org.apache.karaf.features.FeaturesService; import org.onosproject.app.ApplicationAdminService; import org.onosproject.app.ApplicationEvent; import org.onosproject.app.ApplicationListener; import org.onosproject.app.ApplicationService; import org.onosproject.app.ApplicationState; import org.onosproject.app.ApplicationStore; import org.onosproject.app.ApplicationStoreDelegate; import org.onosproject.core.Application; import org.onosproject.core.ApplicationId; import org.onosproject.core.Permission; import org.onosproject.event.ListenerRegistry; import org.onosproject.event.EventDeliveryService; import org.slf4j.Logger; import java.io.InputStream; import java.util.Set; import static com.google.common.base.Preconditions.checkNotNull; import static org.onosproject.app.ApplicationEvent.Type.*; import static org.onosproject.security.AppGuard.checkPermission; import static org.slf4j.LoggerFactory.getLogger; /** * Implementation of the application management service. */ @Component(immediate = true) @Service public class ApplicationManager implements ApplicationService, ApplicationAdminService { private final Logger log = getLogger(getClass()); private static final String APP_ID_NULL = "Application ID cannot be null"; protected final ListenerRegistry<ApplicationEvent, ApplicationListener> listenerRegistry = new ListenerRegistry<>(); private final ApplicationStoreDelegate delegate = new InternalStoreDelegate(); @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected ApplicationStore store; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected FeaturesService featuresService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected EventDeliveryService eventDispatcher; private boolean initializing; @Activate public void activate() { eventDispatcher.addSink(ApplicationEvent.class, listenerRegistry); initializing = true; store.setDelegate(delegate); initializing = false; log.info("Started"); } @Deactivate public void deactivate() { eventDispatcher.removeSink(ApplicationEvent.class); store.unsetDelegate(delegate); log.info("Stopped"); } @Override public Set<Application> getApplications() { checkPermission(Permission.APP_READ); return store.getApplications(); } @Override public ApplicationId getId(String name) { checkPermission(Permission.APP_READ); checkNotNull(name, "Name cannot be null"); return store.getId(name); } @Override public Application getApplication(ApplicationId appId) { checkPermission(Permission.APP_READ); checkNotNull(appId, APP_ID_NULL); return store.getApplication(appId); } @Override public ApplicationState getState(ApplicationId appId) { checkPermission(Permission.APP_READ); checkNotNull(appId, APP_ID_NULL); return store.getState(appId); } @Override public Set<Permission> getPermissions(ApplicationId appId) { checkPermission(Permission.APP_READ); checkNotNull(appId, APP_ID_NULL); return store.getPermissions(appId); } @Override public Application install(InputStream appDescStream) { checkNotNull(appDescStream, "Application archive stream cannot be null"); return store.create(appDescStream); } @Override public void uninstall(ApplicationId appId) { checkNotNull(appId, APP_ID_NULL); try { store.remove(appId); } catch (Exception e) { log.warn("Unable to purge application directory for {}", appId.name()); } } @Override public void activate(ApplicationId appId) { checkNotNull(appId, APP_ID_NULL); store.activate(appId); } @Override public void deactivate(ApplicationId appId) { checkNotNull(appId, APP_ID_NULL); store.deactivate(appId); } @Override public void setPermissions(ApplicationId appId, Set<Permission> permissions) { checkNotNull(appId, APP_ID_NULL); checkNotNull(permissions, "Permissions cannot be null"); store.setPermissions(appId, permissions); } @Override public void addListener(ApplicationListener listener) { checkPermission(Permission.APP_EVENT); listenerRegistry.addListener(listener); } @Override public void removeListener(ApplicationListener listener) { checkPermission(Permission.APP_EVENT); listenerRegistry.removeListener(listener); } private class InternalStoreDelegate implements ApplicationStoreDelegate { @Override public void notify(ApplicationEvent event) { ApplicationEvent.Type type = event.type(); Application app = event.subject(); try { if (type == APP_ACTIVATED) { if (installAppFeatures(app)) { log.info("Application {} has been activated", app.id().name()); } } else if (type == APP_DEACTIVATED) { if (uninstallAppFeatures(app)) { log.info("Application {} has been deactivated", app.id().name()); } } else if (type == APP_INSTALLED) { if (installAppArtifacts(app)) { log.info("Application {} has been installed", app.id().name()); } } else if (type == APP_UNINSTALLED) { if (uninstallAppFeatures(app) || uninstallAppArtifacts(app)) { log.info("Application {} has been uninstalled", app.id().name()); } } eventDispatcher.post(event); } catch (Exception e) { log.warn("Unable to perform operation on application " + app.id().name(), e); } } } // The following methods are fully synchronized to guard against remote vs. // locally induced feature service interactions. private synchronized boolean installAppArtifacts(Application app) throws Exception { if (app.featuresRepo().isPresent() && featuresService.getRepository(app.featuresRepo().get()) == null) { featuresService.addRepository(app.featuresRepo().get()); return true; } return false; } private synchronized boolean uninstallAppArtifacts(Application app) throws Exception { if (app.featuresRepo().isPresent() && featuresService.getRepository(app.featuresRepo().get()) != null) { featuresService.removeRepository(app.featuresRepo().get()); return true; } return false; } private synchronized boolean installAppFeatures(Application app) throws Exception { boolean changed = false; for (String name : app.features()) { Feature feature = featuresService.getFeature(name); if (feature != null && !featuresService.isInstalled(feature)) { featuresService.installFeature(name); changed = true; } else if (feature == null && !initializing) { // Suppress feature-not-found reporting during startup since these // can arise naturally from the staggered cluster install. log.warn("Feature {} not found", name); } } return changed; } private synchronized boolean uninstallAppFeatures(Application app) throws Exception { boolean changed = false; for (String name : app.features()) { Feature feature = featuresService.getFeature(name); if (feature != null && featuresService.isInstalled(feature)) { featuresService.uninstallFeature(name); changed = true; } else if (feature == null) { log.warn("Feature {} not found", name); } } return changed; } }
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.hawkular.agent.monitor.log; import java.util.List; import javax.management.MalformedObjectNameException; import org.hawkular.agent.monitor.inventory.MonitoredEndpoint; import org.hawkular.agent.monitor.protocol.EndpointService; import org.hawkular.agent.monitor.protocol.ProtocolException; import org.jboss.logging.BasicLogger; import org.jboss.logging.Logger; import org.jboss.logging.Logger.Level; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageLogger; import org.jboss.logging.annotations.ValidIdRange; @MessageLogger(projectCode = "HAWKMONITOR") @ValidIdRange(min = 10000, max = 19999) public interface MsgLogger extends BasicLogger { MsgLogger LOG = Logger.getMessageLogger(MsgLogger.class, "org.hawkular.agent.monitor"); @LogMessage(level = Level.INFO) @Message(id = 10000, value = "Starting Hawkular Monitor service") void infoStarting(); @LogMessage(level = Level.INFO) @Message(id = 10001, value = "Stopping Hawkular Monitor service") void infoStopping(); @LogMessage(level = Level.INFO) @Message(id = 10002, value = "Hawkular Monitor subsystem is disabled; service will not be started") void infoSubsystemDisabled(); @LogMessage(level = Level.INFO) @Message(id = 10003, value = "JNDI binding [%s]: bound to object of type [%s]") void infoBindJndiResource(String jndiName, String objectTypeName); @LogMessage(level = Level.INFO) @Message(id = 10004, value = "JNDI binding [%s]: unbound") void infoUnbindJndiResource(String jndiName); @LogMessage(level = Level.INFO) @Message(id = 10005, value = "No diagnostics configuration - diagnostics will be disabled") void infoNoDiagnosticsConfig(); @LogMessage(level = Level.INFO) @Message(id = 10006, value = "There are no enabled %s metric sets") void infoNoEnabledMetricsConfigured(String type); @LogMessage(level = Level.INFO) @Message(id = 10007, value = "There are no enabled %s availability check sets") void infoNoEnabledAvailsConfigured(String type); @LogMessage(level = Level.ERROR) @Message(id = 10008, value = "A metric collection failed") void errorMetricCollectionFailed(@Cause Throwable t); @LogMessage(level = Level.ERROR) @Message(id = 10009, value = "An availability check failed") void errorAvailCheckFailed(@Cause Throwable t); @LogMessage(level = Level.ERROR) @Message(id = 10010, value = "Failed to store metric data: %s") void errorFailedToStoreMetricData(@Cause Throwable t, String data); @LogMessage(level = Level.ERROR) @Message(id = 10011, value = "Failed to store avail data: %s") void errorFailedToStoreAvailData(@Cause Throwable t, String data); @LogMessage(level = Level.INFO) @Message(id = 10012, value = "Starting scheduler") void infoStartingScheduler(); @LogMessage(level = Level.INFO) @Message(id = 10013, value = "Stopping scheduler") void infoStoppingScheduler(); @LogMessage(level = Level.WARN) @Message(id = 10014, value = "Batch operation requested [%d] values but received [%d]") void warnBatchResultsDoNotMatchRequests(int expectedCound, int actualCount); @LogMessage(level = Level.WARN) @Message(id = 10015, value = "Comma in name! This will interfere with comma-separators in lists. [%s]") void warnCommaInName(String name); @LogMessage(level = Level.WARN) @Message(id = 10016, value = "The resource type [%s] wants to use an unknown metric set [%s]") void warnMetricSetDoesNotExist(String resourceTypeName, String metricSetName); @LogMessage(level = Level.WARN) @Message(id = 10017, value = "The resource type [%s] wants to use an unknown avail set [%s]") void warnAvailSetDoesNotExist(String resourceTypeName, String availSetName); @LogMessage(level = Level.WARN) @Message(id = 10018, value = "Cannot obtain server identifiers for [%s]: %s") void warnCannotObtainServerIdentifiersForDMREndpoint(String endpoint, String errorString); @LogMessage(level = Level.INFO) @Message(id = 10019, value = "Managed server [%s] is disabled. It will not be monitored.") void infoManagedServerDisabled(String name); @LogMessage(level = Level.WARN) @Message(id = 10020, value = "The managed server [%s] wants to use an unknown resource type set [%s]") void warnResourceTypeSetDoesNotExist(String managedServerName, String resourceTypeSetName); @LogMessage(level = Level.INFO) @Message(id = 10021, value = "There are no enabled %s resource type sets") void infoNoEnabledResourceTypesConfigured(String type); @LogMessage(level = Level.INFO) @Message(id = 10022, value = "Resource type [%s] is disabled - all if its child types will also be disabled: %s") void infoDisablingResourceTypes(Object disabledType, List<?> toBeDisabled); @LogMessage(level = Level.ERROR) @Message(id = 10023, value = "Discovery failed while probing endpoint [%s]") void errorDiscoveryFailed(@Cause Exception e, MonitoredEndpoint endpoint); @LogMessage(level = Level.ERROR) @Message(id = 10024, value = "Failed to store inventory data") void errorFailedToStoreInventoryData(@Cause Throwable t); @LogMessage(level = Level.INFO) @Message(id = 10025, value = "Will talk to Hawkular at URL [%s]") void infoUsingServerSideUrl(String url); @LogMessage(level = Level.ERROR) @Message(id = 10026, value = "Can't do anything without a feed; aborting startup") void errorCannotDoAnythingWithoutFeed(@Cause Throwable t); @LogMessage(level = Level.ERROR) @Message(id = 10027, value = "To use standalone Hawkular Metrics, you must configure a tenant ID") void errorMustHaveTenantIdConfigured(); @LogMessage(level = Level.ERROR) @Message(id = 10028, value = "Cannot start storage adapter; aborting startup") void errorCannotStartStorageAdapter(@Cause Throwable t); @LogMessage(level = Level.ERROR) @Message(id = 10029, value = "Scheduler failed to initialize; aborting startup") void errorCannotInitializeScheduler(@Cause Throwable t); @LogMessage(level = Level.INFO) @Message(id = 10030, value = "Using keystore at [%s]") void infoUseKeystore(String keystorePath); @LogMessage(level = Level.INFO) @Message(id = 10031, value = "The storage adapter URL is explicitly specified [%s], so useSSL will be set to [%s]") void infoUsingSSL(String url, boolean useSSL); @LogMessage(level = Logger.Level.ERROR) @Message(id = 10032, value = "Server provided an invalid command request: [%s]") void errorInvalidCommandRequestFeed(String requestClassName); @LogMessage(level = Logger.Level.ERROR) @Message(id = 10033, value = "Failed to execute command [%s] for server") void errorCommandExecutionFailureFeed(String requestClassName, @Cause Throwable t); @LogMessage(level = Logger.Level.INFO) @Message(id = 10034, value = "Opened feed WebSocket connection to endpoint [%s]") void infoOpenedFeedComm(String endpoint); @LogMessage(level = Logger.Level.INFO) @Message(id = 10035, value = "Closed feed WebSocket connection to endpoint [%s]. Code=[%d], Reason=[%s]") void infoClosedFeedComm(String endpoint, int reasonCode, String reason); @LogMessage(level = Logger.Level.WARN) @Message(id = 10036, value = "Feed communications channel encountered a failure. Response=[%s]") void warnFeedCommFailure(String response, @Cause Exception e); @LogMessage(level = Logger.Level.ERROR) @Message(id = 10037, value = "Failed to send message [%s] over the feed communications channel") void errorFailedToSendOverFeedComm(String command, @Cause Throwable t); @LogMessage(level = Logger.Level.WARN) @Message(id = 10038, value = "Failed to close web socket with code=[%d] and reason=[%s]") void warnFailedToCloseWebSocket(int code, String reason, @Cause Exception e); @LogMessage(level = Logger.Level.INFO) @Message(id = 10039, value = "The command-gateway URL is [%s]") void infoFeedCommUrl(String feedcommUrl); @LogMessage(level = Logger.Level.ERROR) @Message(id = 10040, value = "Cannot re-establish websocket connection") void errorCannotReconnectToWebSocket(@Cause Exception e); @LogMessage(level = Logger.Level.ERROR) @Message(id = 10041, value = "Cannot close command-gateway websocket") void errorCannotCloseWebSocketCall(@Cause Exception e); @LogMessage(level = Logger.Level.ERROR) @Message(id = 10042, value = "Cannot connect to the server over the feed communications channel.") void errorCannotEstablishFeedComm(@Cause Exception e); @LogMessage(level = Logger.Level.WARN) @Message(id = 10043, value = "Received the following error message and stack trace from server: %s\n%s") void warnReceivedGenericErrorResponse(String errorMessage, String stackTrace); @LogMessage(level = Logger.Level.ERROR) @Message(id = 10044, value = "Failed to execute [%s] for request [%s]") void errorFailedToExecuteCommand(@Cause Exception e, String commandClassName, Object request); @LogMessage(level = Level.INFO) @Message(id = 10045, value = "No platform configuration - platform metrics will be disabled") void infoNoPlatformConfig(); @LogMessage(level = Level.ERROR) @Message(id = 10046, value = "Got response code [%d] when storing entity of type [%s] under path [%s] to inventory") void errorFailedToStorePathToInventory(int code, String entityType, String path); @LogMessage(level = Level.WARN) @Message(id = 10047, value = "Failed to locate [%s] at location [%s] relative to [%s]") void warnFailedToLocate(@Cause ProtocolException e, String typeName, String location, String parentLocation); @LogMessage(level = Level.WARN) @Message(id = 10048, value = "Malformed JMX object name: [%s]") void warnMalformedJMXObjectName(String objectName, @Cause MalformedObjectNameException e); @LogMessage(level = Level.ERROR) @Message(id = 10049, value = "Could not access resources of endpoint [%s]") void errorCouldNotAccess(EndpointService<?, ?> endpoint, @Cause Throwable e); @LogMessage(level = Level.WARN) @Message(id = 10050, value = "The tenant ID [%s] set in standalone.xml or domain.xml or similar xml file will be" + " ignored in favor of tenant ID [%s] retrieved from Hawkular Accounts") void warnIgnoringTenantIdFromXml(String tenantIdXml, String tenantIdAccounts); @LogMessage(level = Level.ERROR) @Message(id = 10051, value = "The tenant ID could not be retrieved from Hawkular Accounts") void errorNoTenantIdFromAccounts(); @LogMessage(level = Level.ERROR) @Message(id = 10052, value = "Could not store metrics for monitored endpoint [%s]") void errorFailedToStoreMetrics(String endpoint, @Cause Throwable t); @LogMessage(level = Level.ERROR) @Message(id = 10053, value = "Could not store availability data for monitored endpoint [%s]") void errorFailedToStoreAvails(String endpoint, @Cause Throwable t); @LogMessage(level = Level.ERROR) @Message(id = 10054, value = "Agent encountered errors during start up and will be stopped.") void errorFailedToStartAgent(@Cause Throwable t); @LogMessage(level = Level.WARN) @Message(id = 10055, value = "Agent encountered errors during shutdown.") void warnFailedToStopAgent(@Cause Throwable t); }
package org.testobject.kernel.imgproc.diff; import static org.testobject.kernel.imgproc.diff.TopDownTreeDiffTest.Util.toList; import java.util.ArrayList; import java.util.List; import junit.framework.Assert; import org.junit.Test; import org.testobject.commons.util.distances.StringDistances; import org.testobject.kernel.imgproc.diff.TopDownTreeDiff; /** * * @author nijkamp * * TODO more test-cases which reflect tab issue etc. * */ public class TopDownTreeDiffTest { public interface Node {} public interface Container extends Node { List<Node> getChilds(); } public class Leaf implements Node { public final String label; public Leaf(String label) { this.label = label; } @Override public boolean equals(Object obj) { if(obj instanceof Leaf == false) { return false; } Leaf leaf = (Leaf) obj; return label.equals(leaf.label); } } public class Branch implements Container { public final String label; public final List<Node> childs; public Branch(String label, List<Node> childs) { this.label = label; this.childs = childs; } @Override public List<Node> getChilds() { return childs; } @Override public boolean equals(Object obj) { if(obj instanceof Branch == false) { return false; } Branch branch = (Branch) obj; for(Node child : childs) { if(branch.childs.contains(child)) { return false; } } return true; } } public static class Util { public static <T> List<T> toList(T t1) { List<T> list = new ArrayList<T>(); list.add(t1); return list; } public static <T> List<T> toList(T t1, T t2) { List<T> list = toList(t1); list.add(t2); return list; } } private static TopDownTreeDiff<Node> newDiff() { TopDownTreeDiff.Adapter<Node> adapter = new TopDownTreeDiff.Adapter<Node>() { @Override public boolean isContainer(Node node) { return node instanceof Container; } @Override public List<Node> getChilds(Node node) { Container container = (Container) node; return container.getChilds(); } @Override public float similarity(Node node1, Node node2) { if(node1.getClass() != node2.getClass()) { return 0; } if(node1 instanceof Leaf) { Leaf leaf1 = (Leaf) node1; Leaf leaf2 = (Leaf) node2; float length = leaf1.label.length() > leaf2.label.length() ? leaf1.label.length() : leaf2.label.length(); float distance = StringDistances.getLevenshteinDistance(leaf1.label, leaf2.label) / length; return 1f - distance; } if(node1 instanceof Branch) { Branch branch1 = (Branch) node1; Branch branch2 = (Branch) node2; float max = branch1.childs.size() > branch2.childs.size() ? branch1.childs.size() : branch2.childs.size(); return 1f - (Math.abs(branch1.childs.size() - branch2.childs.size()) / max); } return 0f; } }; return new TopDownTreeDiff<Node>(adapter); } @Test public void testInsert() { /* before => after a a A A B */ Node before; { Node A = new Leaf("A"); before = new Branch("a", toList(A)); } Node after; { Node A = new Leaf("A"); Node B = new Leaf("B"); after = new Branch("a", toList(B, A)); } List<Node> inserts = newDiff().inserts(before, after); { Assert.assertEquals(1, inserts.size()); Node node = inserts.get(0); Assert.assertTrue(node instanceof Leaf); Leaf leaf = (Leaf) node; Assert.assertEquals("B", leaf.label); } } @Test public void testInsertLong() { /* before => after a a A A b c B */ Node before; { Node A = new Leaf("A"); before = new Branch("a", toList(A)); } Node after; { Node A = new Leaf("A"); Node B = new Leaf("B"); Branch c = new Branch("c", toList(B)); Branch b = new Branch("b", Util.<Node>toList(c)); after = new Branch("a", toList(A, b)); } List<Node> inserts = newDiff().inserts(before, after); { Assert.assertEquals(1, inserts.size()); Node node = inserts.get(0); Assert.assertTrue(node instanceof Branch); Branch branch = (Branch) node; Assert.assertEquals("b", branch.label); } } @Test public void testInsertBranch() { /* before => after a a A A b B C */ Node before; { Node A = new Leaf("A"); before = new Branch("a", toList(A)); } Node after; { Node A = new Leaf("A"); Node B = new Leaf("B"); Node C = new Leaf("C"); Branch b = new Branch("b", toList(C, B)); after = new Branch("a", toList(A, b)); } List<Node> inserts = newDiff().inserts(before, after); { Assert.assertEquals(1, inserts.size()); Node node = inserts.get(0); Assert.assertTrue(node instanceof Branch); Branch branch = (Branch) node; Assert.assertEquals("b", branch.label); } } @Test public void testRemoveInsert() { /* before => after a a A B A b D C */ Node before; { Node A = new Leaf("A"); Node B = new Leaf("B"); before = new Branch("a", toList(A, B)); } Node after; { Node A = new Leaf("A"); Node C = new Leaf("C"); Node D = new Leaf("D"); Branch b = new Branch("b", toList(C, D)); after = new Branch("a", toList(A, b)); } List<Node> inserts = newDiff().inserts(before, after); { Assert.assertEquals(1, inserts.size()); Node node = inserts.get(0); Assert.assertTrue(node instanceof Branch); Branch branch = (Branch) node; Assert.assertEquals("b", branch.label); } } }
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.openapi.vfs.encoding; import com.intellij.concurrency.JobSchedulerImpl; import com.intellij.ide.AppLifecycleListener; import com.intellij.openapi.Disposable; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.application.ReadAction; import com.intellij.openapi.components.PersistentStateComponent; import com.intellij.openapi.components.State; import com.intellij.openapi.components.Storage; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.editor.Document; import com.intellij.openapi.editor.EditorFactory; import com.intellij.openapi.editor.event.DocumentEvent; import com.intellij.openapi.editor.event.DocumentListener; import com.intellij.openapi.editor.event.EditorFactoryEvent; import com.intellij.openapi.editor.event.EditorFactoryListener; import com.intellij.openapi.fileEditor.FileDocumentManager; import com.intellij.openapi.fileEditor.FileEditorManager; import com.intellij.openapi.fileEditor.impl.LoadTextUtil; import com.intellij.openapi.project.Project; import com.intellij.openapi.project.ProjectLocator; import com.intellij.openapi.project.ProjectManager; import com.intellij.openapi.util.Comparing; import com.intellij.openapi.util.Disposer; import com.intellij.openapi.util.Key; import com.intellij.openapi.util.UserDataHolderEx; import com.intellij.openapi.vfs.CharsetToolkit; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.util.ObjectUtils; import com.intellij.util.concurrency.AppExecutorUtil; import com.intellij.util.concurrency.BoundedTaskExecutor; import com.intellij.util.messages.MessageBus; import com.intellij.util.xmlb.annotations.Attribute; import gnu.trove.Equality; import gnu.trove.THashSet; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jetbrains.ide.PooledThreadExecutor; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.beans.PropertyChangeSupport; import java.lang.ref.Reference; import java.lang.ref.WeakReference; import java.nio.charset.Charset; import java.util.Collection; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @State(name = "Encoding", storages = @Storage("encoding.xml")) public class EncodingManagerImpl extends EncodingManager implements PersistentStateComponent<EncodingManagerImpl.State>, Disposable { private static final Logger LOG = Logger.getInstance(EncodingManagerImpl.class); private static final Equality<Reference<Document>> REFERENCE_EQUALITY = new Equality<Reference<Document>>() { @Override public boolean equals(Reference<Document> o1, Reference<Document> o2) { Object v1 = o1 == null ? REFERENCE_EQUALITY : o1.get(); Object v2 = o2 == null ? REFERENCE_EQUALITY : o2.get(); return v1 == v2; } }; private final PropertyChangeSupport myPropertyChangeSupport = new PropertyChangeSupport(this); static class State { @NotNull private Charset myDefaultEncoding = CharsetToolkit.UTF8_CHARSET; @Attribute("default_encoding") @NotNull public String getDefaultCharsetName() { return myDefaultEncoding == ChooseFileEncodingAction.NO_ENCODING ? "" : myDefaultEncoding.name(); } public void setDefaultCharsetName(@NotNull String name) { myDefaultEncoding = name.isEmpty() ? ChooseFileEncodingAction.NO_ENCODING : ObjectUtils.notNull(CharsetToolkit.forName(name), CharsetToolkit.getDefaultSystemCharset()); } } private State myState = new State(); private static final Key<Charset> CACHED_CHARSET_FROM_CONTENT = Key.create("CACHED_CHARSET_FROM_CONTENT"); private final ExecutorService changedDocumentExecutor = AppExecutorUtil.createBoundedApplicationPoolExecutor( "EncodingManagerImpl Document Pool", PooledThreadExecutor.INSTANCE, JobSchedulerImpl.getJobPoolParallelism(), this); private final AtomicBoolean myDisposed = new AtomicBoolean(); public EncodingManagerImpl(@NotNull EditorFactory editorFactory, MessageBus messageBus) { messageBus.connect().subscribe(AppLifecycleListener.TOPIC, new AppLifecycleListener() { @Override public void appClosing() { // should call before dispose in write action // prevent any further re-detection and wait for the queue to clear myDisposed.set(true); clearDocumentQueue(); } }); editorFactory.getEventMulticaster().addDocumentListener(new DocumentListener() { @Override public void documentChanged(@NotNull DocumentEvent e) { Document document = e.getDocument(); if (isEditorOpenedFor(document)) { queueUpdateEncodingFromContent(document); } } }, this); editorFactory.addEditorFactoryListener(new EditorFactoryListener() { @Override public void editorCreated(@NotNull EditorFactoryEvent event) { queueUpdateEncodingFromContent(event.getEditor().getDocument()); } }, this); } private static boolean isEditorOpenedFor(@NotNull Document document) { VirtualFile virtualFile = FileDocumentManager.getInstance().getFile(document); if (virtualFile == null) return false; Project project = guessProject(virtualFile); return project != null && !project.isDisposed() && FileEditorManager.getInstance(project).isFileOpen(virtualFile); } @NonNls public static final String PROP_CACHED_ENCODING_CHANGED = "cachedEncoding"; private void handleDocument(@NotNull final Document document) { VirtualFile virtualFile = FileDocumentManager.getInstance().getFile(document); if (virtualFile == null) return; Project project = guessProject(virtualFile); while(true) { if (project != null && project.isDisposed()) break; int nRequests = addNumberOfRequestedRedetects(document, 0); Charset charset = LoadTextUtil.charsetFromContentOrNull(project, virtualFile, document.getImmutableCharSequence()); Charset oldCached = getCachedCharsetFromContent(document); if (!Comparing.equal(charset, oldCached)) { setCachedCharsetFromContent(charset, oldCached, document); } if (addNumberOfRequestedRedetects(document, -nRequests) == 0) break; } } private void setCachedCharsetFromContent(Charset charset, Charset oldCached, @NotNull Document document) { document.putUserData(CACHED_CHARSET_FROM_CONTENT, charset); firePropertyChange(document, PROP_CACHED_ENCODING_CHANGED, oldCached, charset); } @Nullable("returns null if charset set cannot be determined from content") Charset computeCharsetFromContent(@NotNull final VirtualFile virtualFile) { final Document document = FileDocumentManager.getInstance().getDocument(virtualFile); if (document == null) { return null; } Charset cached = EncodingManager.getInstance().getCachedCharsetFromContent(document); if (cached != null) { return cached; } final Project project = ProjectLocator.getInstance().guessProjectForFile(virtualFile); return ReadAction.compute(() -> { Charset charsetFromContent = LoadTextUtil.charsetFromContentOrNull(project, virtualFile, document.getImmutableCharSequence()); if (charsetFromContent != null) { setCachedCharsetFromContent(charsetFromContent, null, document); } return charsetFromContent; }); } @Override public void dispose() { myDisposed.set(true); } // stores number of re-detection requests for this document private static final Key<AtomicInteger> RUNNING_REDETECTS_KEY = Key.create("DETECTING_ENCODING_KEY"); private static int addNumberOfRequestedRedetects(@NotNull Document document, int delta) { AtomicInteger oldData = document.getUserData(RUNNING_REDETECTS_KEY); if (oldData == null) { oldData = ((UserDataHolderEx)document).putUserDataIfAbsent(RUNNING_REDETECTS_KEY, new AtomicInteger()); } return oldData.addAndGet(delta); } void queueUpdateEncodingFromContent(@NotNull Document document) { if (myDisposed.get()) return; // ignore re-detect requests on app close if (addNumberOfRequestedRedetects(document, 1) == 1) { changedDocumentExecutor.execute(new DocumentEncodingDetectRequest(document, myDisposed)); } } private static class DocumentEncodingDetectRequest implements Runnable { private final Reference<Document> ref; @NotNull private final AtomicBoolean myDisposed; private DocumentEncodingDetectRequest(@NotNull Document document, @NotNull AtomicBoolean disposed) { ref = new WeakReference<>(document); myDisposed = disposed; } @Override public void run() { if (myDisposed.get()) return; Document document = ref.get(); if (document == null) return; // document gced, don't bother ((EncodingManagerImpl)getInstance()).handleDocument(document); } } @Override @Nullable public Charset getCachedCharsetFromContent(@NotNull Document document) { return document.getUserData(CACHED_CHARSET_FROM_CONTENT); } @Override @NotNull public State getState() { return myState; } @Override public void loadState(@NotNull State state) { myState = state; } @Override @NotNull public Collection<Charset> getFavorites() { Collection<Charset> result = new THashSet<>(); Project[] projects = ProjectManager.getInstance().getOpenProjects(); for (Project project : projects) { result.addAll(EncodingProjectManager.getInstance(project).getFavorites()); } result.addAll(EncodingProjectManagerImpl.widelyKnownCharsets()); return result; } @Override @Nullable public Charset getEncoding(@Nullable VirtualFile virtualFile, boolean useParentDefaults) { Project project = guessProject(virtualFile); if (project == null) return null; EncodingProjectManager encodingManager = EncodingProjectManager.getInstance(project); if (encodingManager == null) return null; //tests return encodingManager.getEncoding(virtualFile, useParentDefaults); } public void clearDocumentQueue() { if (ApplicationManager.getApplication().isWriteAccessAllowed()) { throw new IllegalStateException("Must not call clearDocumentQueue() from under write action because some queued detectors require read action"); } ((BoundedTaskExecutor)changedDocumentExecutor).clearAndCancelAll(); // after clear and canceling all queued tasks, make sure they all are finished waitAllTasksExecuted(1, TimeUnit.MINUTES); } void waitAllTasksExecuted(long timeout, @NotNull TimeUnit unit) { try { ((BoundedTaskExecutor)changedDocumentExecutor).waitAllTasksExecuted(timeout, unit); } catch (Exception e) { LOG.error(e); } } @Nullable private static Project guessProject(@Nullable VirtualFile virtualFile) { return ProjectLocator.getInstance().guessProjectForFile(virtualFile); } @Override public void setEncoding(@Nullable VirtualFile virtualFileOrDir, @Nullable Charset charset) { Project project = guessProject(virtualFileOrDir); if (project != null) { EncodingProjectManager.getInstance(project).setEncoding(virtualFileOrDir, charset); } } @Override public boolean isNative2Ascii(@NotNull final VirtualFile virtualFile) { Project project = guessProject(virtualFile); return project != null && EncodingProjectManager.getInstance(project).isNative2Ascii(virtualFile); } @Override public boolean isNative2AsciiForPropertiesFiles() { Project project = guessProject(null); return project != null && EncodingProjectManager.getInstance(project).isNative2AsciiForPropertiesFiles(); } @Override public void setNative2AsciiForPropertiesFiles(final VirtualFile virtualFile, final boolean native2Ascii) { Project project = guessProject(virtualFile); if (project == null) return; EncodingProjectManager.getInstance(project).setNative2AsciiForPropertiesFiles(virtualFile, native2Ascii); } @Override @NotNull public Charset getDefaultCharset() { return myState.myDefaultEncoding == ChooseFileEncodingAction.NO_ENCODING ? CharsetToolkit.getDefaultSystemCharset() : myState.myDefaultEncoding; } @Override @NotNull public String getDefaultCharsetName() { return myState.getDefaultCharsetName(); } @Override public void setDefaultCharsetName(@NotNull String name) { myState.setDefaultCharsetName(name); } @Override @Nullable public Charset getDefaultCharsetForPropertiesFiles(@Nullable final VirtualFile virtualFile) { Project project = guessProject(virtualFile); if (project == null) return null; return EncodingProjectManager.getInstance(project).getDefaultCharsetForPropertiesFiles(virtualFile); } @Override public void setDefaultCharsetForPropertiesFiles(@Nullable final VirtualFile virtualFile, final Charset charset) { Project project = guessProject(virtualFile); if (project == null) return; EncodingProjectManager.getInstance(project).setDefaultCharsetForPropertiesFiles(virtualFile, charset); } @Override public void addPropertyChangeListener(@NotNull final PropertyChangeListener listener, @NotNull Disposable parentDisposable) { myPropertyChangeSupport.addPropertyChangeListener(listener); Disposer.register(parentDisposable, () -> removePropertyChangeListener(listener)); } private void removePropertyChangeListener(@NotNull PropertyChangeListener listener){ myPropertyChangeSupport.removePropertyChangeListener(listener); } void firePropertyChange(@Nullable Document document, @NotNull String propertyName, final Object oldValue, final Object newValue) { Object source = document == null ? this : document; myPropertyChangeSupport.firePropertyChange(new PropertyChangeEvent(source, propertyName, oldValue, newValue)); } }
/* * Copyright (C) 2011 Everit Kft. (http://www.everit.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.everit.jetty.server.ecm.internal; import javax.servlet.http.HttpSessionAttributeListener; import javax.servlet.http.HttpSessionListener; import org.eclipse.jetty.server.session.SessionHandler; import org.everit.jetty.server.SessionCacheFactory; import org.everit.jetty.server.SessionHandlerFactory; import org.everit.jetty.server.ecm.SessionHandlerFactoryConstants; import org.everit.osgi.ecm.annotation.Component; import org.everit.osgi.ecm.annotation.ConfigurationPolicy; import org.everit.osgi.ecm.annotation.Service; import org.everit.osgi.ecm.annotation.ServiceRef; import org.everit.osgi.ecm.annotation.attribute.BooleanAttribute; import org.everit.osgi.ecm.annotation.attribute.IntegerAttribute; import org.everit.osgi.ecm.annotation.attribute.StringAttribute; import org.everit.osgi.ecm.annotation.attribute.StringAttributes; import org.everit.osgi.ecm.extender.ExtendComponent; import org.osgi.framework.Constants; /** * Configurable component that creates a {@link SessionHandler} instance. */ @ExtendComponent @Component(componentId = SessionHandlerFactoryConstants.SERVICE_FACTORY_PID, configurationPolicy = ConfigurationPolicy.FACTORY, label = "Everit Jetty SessionHandler Factory") @StringAttributes({ @StringAttribute(attributeId = Constants.SERVICE_DESCRIPTION, optional = true, priority = SessionHandlerFactoryAttributePriority.P01_SERVICE_DESCRIPTION, label = "Service description", description = "Optional description for SessionHandler Factory service.") }) @Service public class SessionHandlerFactoryComponent implements SessionHandlerFactory { private boolean checkingRemoteSessionIdEncoding; private String cookieName; private boolean httpOnly; private int maxInactiveInterval; private boolean nodeIdInSessionId; private int refreshCookieAge; private boolean secureRequestOnly; private HttpSessionAttributeListener[] sessionAttributeListeners; private SessionCacheFactory sessionCacheFactory; private String sessionIdParameterName; private HttpSessionListener[] sessionListeners; private boolean usingCookies; @Override public synchronized SessionHandler createSessionHandler() { SessionHandler sessionHandler = new SessionHandler(); sessionHandler.setCheckingRemoteSessionIdEncoding(this.checkingRemoteSessionIdEncoding); sessionHandler.setHttpOnly(this.httpOnly); sessionHandler.setMaxInactiveInterval(this.maxInactiveInterval); sessionHandler.setNodeIdInSessionId(this.nodeIdInSessionId); sessionHandler.setRefreshCookieAge(this.refreshCookieAge); sessionHandler.setSecureRequestOnly(this.secureRequestOnly); if (this.sessionCacheFactory != null) { sessionHandler.setSessionCache(this.sessionCacheFactory.createSessionCache(sessionHandler)); } sessionHandler.setSessionCookie(this.cookieName); // TODO sessionHandler.setSessionIdManager(metaManager); sessionHandler.setSessionIdPathParameterName(this.sessionIdParameterName); sessionHandler.setUsingCookies(this.usingCookies); sessionHandler.clearEventListeners(); if (this.sessionAttributeListeners != null) { for (HttpSessionAttributeListener sessionAttributeListener : this.sessionAttributeListeners) { sessionHandler.addEventListener(sessionAttributeListener); } } if (this.sessionListeners != null) { for (HttpSessionListener sessionListener : this.sessionListeners) { sessionHandler.addEventListener(sessionListener); } } // TODO handle more listener types return sessionHandler; } @BooleanAttribute( attributeId = SessionHandlerFactoryConstants.ATTR_CHECKING_REMOTE_SESSION_ID_ENCODING, defaultValue = SessionHandlerFactoryConstants.DEFAULT_CHECKING_REMOTE_SESSION_ID_ENCODING, priority = SessionHandlerFactoryAttributePriority.P12_CHECKING_REMOTE_SESSION_ID_ENCODING, label = "Checking remote session id encoding", description = "True if absolute URLs are check for remoteness before being session encoded.") public void setCheckingRemoteSessionIdEncoding(final boolean checkingRemoteSessionIdEncoding) { this.checkingRemoteSessionIdEncoding = checkingRemoteSessionIdEncoding; } @StringAttribute(attributeId = SessionHandlerFactoryConstants.ATTR_COOKIE_NAME, defaultValue = SessionHandler.__DefaultSessionCookie, priority = SessionHandlerFactoryAttributePriority.P08_COOKIE_NAME, label = "Cookie name", description = "The name of the cookie.") public void setCookieName(final String cookieName) { this.cookieName = cookieName; } @BooleanAttribute(attributeId = SessionHandlerFactoryConstants.ATTR_HTTP_ONLY, defaultValue = SessionHandlerFactoryConstants.DEFAULT_HTTP_ONLY, priority = SessionHandlerFactoryAttributePriority.P16_HTTP_ONLY, label = "HTTP only", description = "") public void setHttpOnly(final boolean httpOnly) { this.httpOnly = httpOnly; } /** * Sets the session-timeout on the component and on all referenced session managers. */ @IntegerAttribute(attributeId = SessionHandlerFactoryConstants.ATTR_MAX_INACTIVE_INTERVAL, defaultValue = SessionHandlerFactoryConstants.DEFAULT_MAX_INACTIVE_INTERVAL, dynamic = true, priority = SessionHandlerFactoryAttributePriority.P02_MAX_INACTIVE_INTERVAL, label = "Max inactive interval", description = "The max period of inactivity, after which the session is invalidated, " + "in seconds.") public synchronized void setMaxInactiveInterval(final int maxInactiveInterval) { this.maxInactiveInterval = maxInactiveInterval; } @BooleanAttribute(attributeId = SessionHandlerFactoryConstants.ATTR_NODE_IN_SESSION_ID, defaultValue = SessionHandlerFactoryConstants.DEFAULT_NODE_IN_SESSION_ID, priority = SessionHandlerFactoryAttributePriority.P18_NODE_IN_SESSION_ID, label = "Node in session id", description = "Wether the cluster node id (worker id) will be returned as part of the " + "session id by HttpSession.getId() or not.") public void setNodeIdInSessionId(final boolean nodeIdInSessionId) { this.nodeIdInSessionId = nodeIdInSessionId; } @IntegerAttribute(attributeId = SessionHandlerFactoryConstants.ATTR_REFRESH_COOKIE_AGE, defaultValue = SessionHandlerFactoryConstants.DEFAULT_REFRESH_COOKIE_AGE, priority = SessionHandlerFactoryAttributePriority.P19_REFRESH_COOKIE_AGE, label = "Refresh cookie age", description = "Time before a session cookie is re-set in seconds.") public void setRefreshCookieAge(final int refreshCookieAge) { this.refreshCookieAge = refreshCookieAge; } @BooleanAttribute(attributeId = SessionHandlerFactoryConstants.ATTR_SECURE_REQUEST_ONLY, defaultValue = SessionHandlerFactoryConstants.DEFAULT_SECURE_REQUEST_ONLY, priority = SessionHandlerFactoryAttributePriority.P07_SECURE_REQUEST_ONLY, label = "Secure request only", description = "HTTPS request. Can be overridden by setting " + "SessionCookieConfig.setSecure(true), in which case the session cookie will be marked " + "as secure on both HTTPS and HTTP.") public void setSecureRequestOnly(final boolean secureRequestOnly) { this.secureRequestOnly = secureRequestOnly; } @ServiceRef(referenceId = SessionHandlerFactoryConstants.ATTR_SESSION_ATTRIBUTE_LISTENERS, optional = true, attributePriority = SessionHandlerFactoryAttributePriority.P06_SESSION_ATTRIBUTE_LISTENERS, // CS_DISABLE_LINE_LENGTH label = "Session attribute listeners (target)", description = "Zero or more filter expression for HttpSessionAttributeListener services") public void setSessionAttributeListeners( final HttpSessionAttributeListener[] sessionAttributeListeners) { this.sessionAttributeListeners = sessionAttributeListeners; } @ServiceRef(referenceId = SessionHandlerFactoryConstants.ATTR_SESSION_CACHE_FACTORY, optional = true, attributePriority = SessionHandlerFactoryAttributePriority.P04_SESSION_CACHE_FACTORY, label = "Session cache Factory (target)", description = "Filter expression for Session cache factory services.") public void setSessionCacheFactory(SessionCacheFactory sessionCacheFactory) { this.sessionCacheFactory = sessionCacheFactory; } @StringAttribute(attributeId = SessionHandlerFactoryConstants.SESSION_ID_PARAMETER_NAME, defaultValue = SessionHandler.__DefaultSessionIdPathParameterName, priority = SessionHandlerFactoryAttributePriority.P10_SESSION_ID_PARAMETER_NAME, label = "Session id parameter name", description = "The URL path parameter name for session id URL rewriting " + "(\"none\" for no rewriting).") public void setSessionIdParameterName(final String sessionIdParameterName) { this.sessionIdParameterName = sessionIdParameterName; } @ServiceRef(referenceId = SessionHandlerFactoryConstants.ATTR_SESSION_LISTENERS, optional = true, attributePriority = SessionHandlerFactoryAttributePriority.P05_SESSION_LISTENERS, label = "Session listeners (target)", description = "Zero or more filter expression for HttpSessionListener services.") public void setSessionListeners(final HttpSessionListener[] sessionListeners) { this.sessionListeners = sessionListeners; } @BooleanAttribute(attributeId = SessionHandlerFactoryConstants.ATTR_USING_COOKIES, defaultValue = SessionHandlerFactoryConstants.DEFAULT_USING_COOKIES, priority = SessionHandlerFactoryAttributePriority.P09_USING_COOKIES, label = "Using cookies") public void setUsingCookies(final boolean usingCookies) { this.usingCookies = usingCookies; } }
package extra.inpro.synthesis.visual; import java.awt.Color; import java.awt.Dimension; import java.awt.Graphics; import java.awt.Image; import java.awt.Point; import java.awt.Rectangle; import java.awt.Toolkit; import java.awt.event.ActionEvent; import java.awt.event.MouseEvent; import java.util.Arrays; import java.util.List; import javax.swing.Action; import javax.swing.JComponent; import javax.swing.JFrame; import javax.swing.JMenuItem; import javax.swing.SwingUtilities; /** * A MenuItem that shows the relevant parts of an IPA table and returns the * selected value via a registered Action (segmentAction) * @author timo */ @SuppressWarnings("serial") public class SegmentSelector extends JMenuItem { private static final int HOTSPOT_SIZE = 10; Dimension size = new Dimension(522, 200); Image vowelImage = Toolkit.getDefaultToolkit().createImage(SegmentSelector.class.getResource("vowels-small.png")); List<StringHotSpot> vowelHotSpots = Arrays.<StringHotSpot>asList( new StringHotSpot("i:", 6, 7), new StringHotSpot("y:", 27, 9), new StringHotSpot("I", 56, 21), new StringHotSpot("Y", 79, 21), new StringHotSpot("e:", 31, 48), new StringHotSpot("2:", 55, 48), new StringHotSpot("E", 60, 88), new StringHotSpot("9", 84, 88), new StringHotSpot("E:", 73, 108), new StringHotSpot("a:", 88, 127), new StringHotSpot("a", 161, 127), new StringHotSpot("6", 131, 108), new StringHotSpot("@", 119, 76), new StringHotSpot("aI", 105, 91), new StringHotSpot("aU", 151, 87), new StringHotSpot("OY", 114, 43), new StringHotSpot("u:", 183, 7), new StringHotSpot("U", 160, 21), new StringHotSpot("o:", 183, 48), new StringHotSpot("O", 183, 88) ); Point vowelImageOffset = new Point(15, 55); Image consonantImage = Toolkit.getDefaultToolkit().createImage(SegmentSelector.class.getResource("consonants-small.png")); List<StringHotSpot> consonantHotSpots = Arrays.<StringHotSpot>asList( new StringHotSpot("m", 33, 15), new StringHotSpot("n", 115, 15), new StringHotSpot("N", 238, 15), new StringHotSpot("p", 12, 42), new StringHotSpot("b", 33, 41), new StringHotSpot("t", 94, 41), new StringHotSpot("d", 115, 41), new StringHotSpot("k", 217, 41), new StringHotSpot("g", 238, 41), new StringHotSpot("f", 53, 67), new StringHotSpot("v", 73, 69), new StringHotSpot("s", 94, 69), new StringHotSpot("z", 115, 69), new StringHotSpot("S", 136, 69), new StringHotSpot("Z", 156, 69), new StringHotSpot("C", 177, 69), new StringHotSpot("x", 218, 69), new StringHotSpot("h", 260, 69), new StringHotSpot("pf", 56, 93), new StringHotSpot("ts", 95, 93), new StringHotSpot("tS", 136, 93), new StringHotSpot("l", 115, 119), new StringHotSpot("j", 196, 121), new StringHotSpot("R", 238, 119) ); Point consonantImageOffset = new Point(215, 55); Rectangle silenceRect; String mostRecentSound = null; private final Action segmentAction; public SegmentSelector(Action segmentAction) { this.segmentAction = segmentAction; } public SegmentSelector() { this(null); } @Override protected void processMouseEvent(MouseEvent e) { super.processMouseEvent(e); if (e.getID() == MouseEvent.MOUSE_RELEASED) { mostRecentSound = getSoundAt(e.getPoint()); if (segmentAction != null) { ActionEvent event = new ActionEvent(this, 0, mostRecentSound); segmentAction.actionPerformed(event); } } } private String getSoundAt(Point p) { if (inImageArea(p, vowelImageOffset, vowelImage)) { return getHotSpotAt(p, vowelImageOffset, vowelHotSpots); } if (inImageArea(p, consonantImageOffset, consonantImage)) { return getHotSpotAt(p, consonantImageOffset, consonantHotSpots); } if (silenceRect.contains(p)) { return "_"; } return null; } private boolean inImageArea(Point p, Point imageOffset, Image image) { return p.x >= imageOffset.x && p.x <= imageOffset.x + image.getWidth(null) && p.y >= imageOffset.y && p.y <= imageOffset.y + image.getHeight(null); } private String getHotSpotAt(Point p, Point imageOffset, List<StringHotSpot> hotspots) { Point relativePosition = new Point(p.x - imageOffset.x, p.y - imageOffset.y); for (StringHotSpot hs : hotspots) { if (hs.matches(relativePosition)) { return hs.getItem(); } } return null; } @Override public Dimension getPreferredSize() { return size; } private void printCentered(Graphics g, String str, int x, int y) { int width = g.getFontMetrics().stringWidth(str); g.drawString(str, x - width / 2, y); } private void drawSilenceSpot(Graphics g) { g.setColor(Color.WHITE); String cmd = "Stille"; int width = g.getFontMetrics().stringWidth(cmd); int height = g.getFontMetrics().getHeight(); silenceRect = new Rectangle(200 - 1, 20 - height, width + 1, height + 4); g.fillRect(silenceRect.x, silenceRect.y, silenceRect.width, silenceRect.height); g.setColor(Color.BLACK); g.drawString(cmd, 200, 20); } @Override protected void paintComponent(Graphics g) { super.paintComponent(g); if (segmentAction != null) { String name = segmentAction.getValue(Action.NAME).toString(); g.drawString(name, 0, 20); } printCentered(g, "Vokale", 110, 48); printCentered(g, "Konsonanten", 343, 48); drawSilenceSpot(g); if (!g.drawImage(vowelImage, vowelImageOffset.x, vowelImageOffset.y, null)) repaint(); if (!g.drawImage(consonantImage, consonantImageOffset.x, consonantImageOffset.y, null)) repaint(); } /** a hostspot with a fixed size, containing a string */ class StringHotSpot extends HotSpot<String> { StringHotSpot(String item, int x, int y) { super(new Point(x, y), item, HOTSPOT_SIZE); } } private static void createAndShowGUI() { JFrame frame = new JFrame("SegmentSelector Test"); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); // add our object JComponent mainPanel = new SegmentSelector(); frame.setContentPane(mainPanel); // Display the window. frame.pack(); frame.setVisible(true); } public static void main(String[] args) { SwingUtilities.invokeLater(new Runnable() { public void run() { createAndShowGUI(); } }); } }
/* * The MIT License (MIT) * * Copyright (c) Despector <https://despector.voxelgenesis.com> * Copyright (c) contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.spongepowered.despector.ast.stmt.branch; import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.collect.Lists; import org.spongepowered.despector.ast.AstVisitor; import org.spongepowered.despector.ast.Locals.LocalInstance; import org.spongepowered.despector.ast.stmt.Statement; import org.spongepowered.despector.ast.stmt.StatementBlock; import org.spongepowered.despector.ast.stmt.StatementVisitor; import org.spongepowered.despector.util.serialization.AstSerializer; import org.spongepowered.despector.util.serialization.MessagePacker; import java.io.IOException; import java.util.List; import javax.annotation.Nullable; /** * A try-catch block. */ public class TryCatch implements Statement { private StatementBlock block; final List<CatchBlock> catch_blocks = Lists.newArrayList(); public TryCatch(StatementBlock block) { this.block = checkNotNull(block, "block"); } /** * Gets the body of the try block. */ public StatementBlock getTryBlock() { return this.block; } /** * Sets the body of the try block. */ public void setBlock(StatementBlock block) { this.block = checkNotNull(block, "block"); } /** * Gets all attached catch blocks. */ public List<CatchBlock> getCatchBlocks() { return this.catch_blocks; } @Override public void accept(AstVisitor visitor) { if (visitor instanceof StatementVisitor) { ((StatementVisitor) visitor).visitTryCatch(this); } for (Statement stmt : this.block.getStatements()) { stmt.accept(visitor); } for (CatchBlock catch_block : this.catch_blocks) { catch_block.accept(visitor); } } @Override public void writeTo(MessagePacker pack) throws IOException { pack.startMap(3); pack.writeString("id").writeInt(AstSerializer.STATEMENT_ID_TRY_CATCH); pack.writeString("body"); pack.startArray(this.block.getStatementCount()); for (Statement stmt : this.block.getStatements()) { stmt.writeTo(pack); } pack.endArray(); pack.writeString("catch").startArray(this.catch_blocks.size()); for (CatchBlock cat : this.catch_blocks) { pack.startMap(3); pack.writeString("exceptions").startArray(cat.getExceptions().size()); for (String ex : cat.getExceptions()) { pack.writeString(ex); } pack.endArray(); pack.writeString("block"); pack.startArray(cat.getBlock().getStatementCount()); for (Statement stmt : cat.getBlock().getStatements()) { stmt.writeTo(pack); } pack.endArray(); if (cat.getExceptionLocal() != null) { pack.writeString("local"); cat.getExceptionLocal().writeToSimple(pack); } else { pack.writeString("dummy_name").writeString(cat.getDummyName()); } pack.endMap(); } pack.endArray(); pack.endMap(); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (!(obj instanceof TryCatch)) { return false; } TryCatch insn = (TryCatch) obj; return this.block.equals(insn.block) && this.catch_blocks.equals(insn.catch_blocks); } @Override public int hashCode() { int h = 1; h = h * 37 + this.block.hashCode(); for (CatchBlock cat : this.catch_blocks) { h = h * 37 + cat.hashCode(); } return h; } /** * A catch block. */ public class CatchBlock { private final List<String> exceptions; private StatementBlock block; private LocalInstance exception_local; private String dummy_name; public CatchBlock(LocalInstance exception_local, List<String> ex, StatementBlock block) { this.exception_local = checkNotNull(exception_local, "local"); this.dummy_name = null; this.exceptions = ex; this.block = block; TryCatch.this.catch_blocks.add(this); } public CatchBlock(String dummy_name, List<String> ex, StatementBlock block) { this.exception_local = null; this.dummy_name = checkNotNull(dummy_name, "name"); this.exceptions = ex; this.block = block; TryCatch.this.catch_blocks.add(this); } /** * Gets the local that the exception is placed into. */ @Nullable public LocalInstance getExceptionLocal() { return this.exception_local; } /** * Sets the local that the exception is placed into. */ public void setExceptionLocal(@Nullable LocalInstance local) { if (local == null && this.dummy_name == null) { throw new IllegalStateException("Cannot have both a null exception local and dummy name in catch block."); } this.exception_local = local; } /** * Gets the dummy name for this variable. This name is ignored if the * {@link #getExceptionLocal()} is not null. */ public String getDummyName() { if (this.exception_local != null) { return this.exception_local.getName(); } return this.dummy_name; } /** * Sets the dummy name for this variable. This name is ignored if the * {@link #getExceptionLocal()} is not null. */ public void setDummyName(String name) { if (name == null && this.exception_local == null) { throw new IllegalStateException("Cannot have both a null exception local and dummy name in catch block."); } this.dummy_name = name; } /** * Gets all exceptions that this catch block is catching. */ public List<String> getExceptions() { return this.exceptions; } /** * Gets the body of this catch block. */ public StatementBlock getBlock() { return this.block; } /** * Sets the body of this catch block. */ public void setBlock(StatementBlock block) { this.block = checkNotNull(block, "block"); } /** * Accepts the given visitor. */ public void accept(AstVisitor visitor) { if (visitor instanceof StatementVisitor) { ((StatementVisitor) visitor).visitCatchBlock(this); } for (Statement stmt : this.block.getStatements()) { stmt.accept(visitor); } } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (!(obj instanceof CatchBlock)) { return false; } CatchBlock insn = (CatchBlock) obj; return this.exception_local.equals(insn.exception_local) && this.exceptions.equals(insn.exceptions) && this.block.equals(insn.block) && this.dummy_name.equals(insn.dummy_name); } @Override public int hashCode() { int h = 1; h = h * 37 + this.exception_local.hashCode(); h = h * 37 + this.exceptions.hashCode(); h = h * 37 + this.block.hashCode(); h = h * 37 + this.dummy_name.hashCode(); return h; } } }
/* * Copyright 2000-2016 Vaadin Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.vaadin.v7.ui; import java.io.OutputStream; import java.io.Serializable; import java.lang.reflect.Method; import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.Map; import com.vaadin.server.NoInputStreamException; import com.vaadin.server.NoOutputStreamException; import com.vaadin.server.PaintException; import com.vaadin.server.PaintTarget; import com.vaadin.server.StreamVariable.StreamingProgressEvent; import com.vaadin.shared.EventId; import com.vaadin.ui.Component; import com.vaadin.ui.LegacyComponent; import com.vaadin.util.ReflectTools; import com.vaadin.v7.shared.ui.upload.UploadClientRpc; import com.vaadin.v7.shared.ui.upload.UploadServerRpc; import com.vaadin.v7.shared.ui.upload.UploadState; /** * Component for uploading files from client to server. * * <p> * The visible component consists of a file name input box and a browse button * and an upload submit button to start uploading. * * <p> * The Upload component needs a java.io.OutputStream to write the uploaded data. * You need to implement the Upload.Receiver interface and return the output * stream in the receiveUpload() method. * * <p> * You can get an event regarding starting (StartedEvent), progress * (ProgressEvent), and finishing (FinishedEvent) of upload by implementing * StartedListener, ProgressListener, and FinishedListener, respectively. The * FinishedListener is called for both failed and succeeded uploads. If you wish * to separate between these two cases, you can use SucceededListener * (SucceededEvenet) and FailedListener (FailedEvent). * * <p> * The upload component does not itself show upload progress, but you can use * the ProgressIndicator for providing progress feedback by implementing * ProgressListener and updating the indicator in updateProgress(). * * <p> * Setting upload component immediate initiates the upload as soon as a file is * selected, instead of the common pattern of file selection field and upload * button. * * <p> * Note! Because of browser dependent implementations of <input type="file"> * element, setting size for Upload component is not supported. For some * browsers setting size may work to some extend. * * @author Vaadin Ltd. * @since 3.0 * * @deprecated As of 8.0 replaced by {@link com.vaadin.ui.Upload} that is by * default in immediate mode. */ @SuppressWarnings("serial") @Deprecated public class Upload extends AbstractLegacyComponent implements Component.Focusable, LegacyComponent { /** * Should the field be focused on next repaint? */ private final boolean focus = false; /** * The tab order number of this field. */ private int tabIndex = 0; /** * The output of the upload is redirected to this receiver. */ private Receiver receiver; private boolean isUploading; private long contentLength = -1; private int totalBytes; private String buttonCaption = "Upload"; /** * ProgressListeners to which information about progress is sent during * upload */ private LinkedHashSet<ProgressListener> progressListeners; private boolean interrupted = false; private boolean notStarted; private int nextid; /** * Creates a new instance of Upload. * * The receiver must be set before performing an upload. */ public Upload() { registerRpc(new UploadServerRpc() { @Override public void change(String filename) { fireEvent(new ChangeEvent(Upload.this, filename)); } @Override public void poll() { // Nothing to do, called only to visit the server } }); } public Upload(String caption, Receiver uploadReceiver) { this(); setCaption(caption); receiver = uploadReceiver; } /** * Invoked when the value of a variable has changed. * * @see com.vaadin.ui.AbstractComponent#changeVariables(java.lang.Object, * java.util.Map) */ @Override public void changeVariables(Object source, Map<String, Object> variables) { if (variables.containsKey("pollForStart")) { int id = (Integer) variables.get("pollForStart"); if (!isUploading && id == nextid) { notStarted = true; markAsDirty(); } else { } } } /** * Paints the content of this component. * * @param target * Target to paint the content on. * @throws PaintException * if the paint operation failed. */ @Override public void paintContent(PaintTarget target) throws PaintException { if (notStarted) { target.addAttribute("notStarted", true); notStarted = false; return; } // The field should be focused if (focus) { target.addAttribute("focus", true); } // The tab ordering number if (tabIndex >= 0) { target.addAttribute("tabindex", tabIndex); } target.addAttribute("state", isUploading); if (buttonCaption != null) { target.addAttribute("buttoncaption", buttonCaption); } target.addAttribute("nextid", nextid); // Post file to this strean variable target.addVariable(this, "action", getStreamVariable()); } /** * Interface that must be implemented by the upload receivers to provide the * Upload component an output stream to write the uploaded data. * * @author Vaadin Ltd. * @since 3.0 */ @Deprecated public interface Receiver extends Serializable { /** * Invoked when a new upload arrives. * * @param filename * the desired filename of the upload, usually as specified * by the client. * @param mimeType * the MIME type of the uploaded file. * @return Stream to which the uploaded file should be written. */ public OutputStream receiveUpload(String filename, String mimeType); } /* Upload events */ private static final Method UPLOAD_FINISHED_METHOD; private static final Method UPLOAD_FAILED_METHOD; private static final Method UPLOAD_SUCCEEDED_METHOD; private static final Method UPLOAD_STARTED_METHOD; static { try { UPLOAD_FINISHED_METHOD = FinishedListener.class.getDeclaredMethod( "uploadFinished", new Class[] { FinishedEvent.class }); UPLOAD_FAILED_METHOD = FailedListener.class.getDeclaredMethod( "uploadFailed", new Class[] { FailedEvent.class }); UPLOAD_STARTED_METHOD = StartedListener.class.getDeclaredMethod( "uploadStarted", new Class[] { StartedEvent.class }); UPLOAD_SUCCEEDED_METHOD = SucceededListener.class.getDeclaredMethod( "uploadSucceeded", new Class[] { SucceededEvent.class }); } catch (final java.lang.NoSuchMethodException e) { // This should never happen throw new java.lang.RuntimeException( "Internal error finding methods in Upload"); } } /** * Upload.FinishedEvent is sent when the upload receives a file, regardless * of whether the reception was successful or failed. If you wish to * distinguish between the two cases, use either SucceededEvent or * FailedEvent, which are both subclasses of the FinishedEvent. * * @author Vaadin Ltd. * @since 3.0 */ @Deprecated public static class FinishedEvent extends Component.Event { /** * Length of the received file. */ private final long length; /** * MIME type of the received file. */ private final String type; /** * Received file name. */ private final String filename; /** * * @param source * the source of the file. * @param filename * the received file name. * @param MIMEType * the MIME type of the received file. * @param length * the length of the received file. */ public FinishedEvent(Upload source, String filename, String MIMEType, long length) { super(source); type = MIMEType; this.filename = filename; this.length = length; } /** * Uploads where the event occurred. * * @return the Source of the event. */ public Upload getUpload() { return (Upload) getSource(); } /** * Gets the file name. * * @return the filename. */ public String getFilename() { return filename; } /** * Gets the MIME Type of the file. * * @return the MIME type. */ public String getMIMEType() { return type; } /** * Gets the length of the file. * * @return the length. */ public long getLength() { return length; } } /** * Upload.FailedEvent event is sent when the upload is received, but the * reception is interrupted for some reason. * * @author Vaadin Ltd. * @since 3.0 */ @Deprecated public static class FailedEvent extends FinishedEvent { private Exception reason = null; /** * * @param source * @param filename * @param MIMEType * @param length * @param exception */ public FailedEvent(Upload source, String filename, String MIMEType, long length, Exception reason) { this(source, filename, MIMEType, length); this.reason = reason; } /** * * @param source * @param filename * @param MIMEType * @param length * @param exception */ public FailedEvent(Upload source, String filename, String MIMEType, long length) { super(source, filename, MIMEType, length); } /** * Gets the exception that caused the failure. * * @return the exception that caused the failure, null if n/a */ public Exception getReason() { return reason; } } /** * FailedEvent that indicates that an output stream could not be obtained. */ @Deprecated public static class NoOutputStreamEvent extends FailedEvent { /** * * @param source * @param filename * @param MIMEType * @param length */ public NoOutputStreamEvent(Upload source, String filename, String MIMEType, long length) { super(source, filename, MIMEType, length); } } /** * FailedEvent that indicates that an input stream could not be obtained. */ @Deprecated public static class NoInputStreamEvent extends FailedEvent { /** * * @param source * @param filename * @param MIMEType * @param length */ public NoInputStreamEvent(Upload source, String filename, String MIMEType, long length) { super(source, filename, MIMEType, length); } } /** * Upload.SucceededEvent event is sent when the upload is received * successfully. * * @author Vaadin Ltd. * @since 3.0 */ @Deprecated public static class SucceededEvent extends FinishedEvent { /** * * @param source * @param filename * @param MIMEType * @param length */ public SucceededEvent(Upload source, String filename, String MIMEType, long length) { super(source, filename, MIMEType, length); } } /** * Upload.StartedEvent event is sent when the upload is started to received. * * @author Vaadin Ltd. * @since 5.0 */ @Deprecated public static class StartedEvent extends Component.Event { private final String filename; private final String type; /** * Length of the received file. */ private final long length; /** * * @param source * @param filename * @param MIMEType * @param length */ public StartedEvent(Upload source, String filename, String MIMEType, long contentLength) { super(source); this.filename = filename; type = MIMEType; length = contentLength; } /** * Uploads where the event occurred. * * @return the Source of the event. */ public Upload getUpload() { return (Upload) getSource(); } /** * Gets the file name. * * @return the filename. */ public String getFilename() { return filename; } /** * Gets the MIME Type of the file. * * @return the MIME type. */ public String getMIMEType() { return type; } /** * @return the length of the file that is being uploaded */ public long getContentLength() { return length; } } /** * Upload.ChangeEvent event is sent when the value (filename) of the upload * changes. * * @since 7.2 */ @Deprecated public static class ChangeEvent extends Component.Event { private final String filename; public ChangeEvent(Upload source, String filename) { super(source); this.filename = filename; } /** * Uploads where the event occurred. * * @return the Source of the event. */ @Override public Upload getSource() { return (Upload) super.getSource(); } /** * Gets the file name. * * @return the filename. */ public String getFilename() { return filename; } } /** * Receives the events when the upload starts. * * @author Vaadin Ltd. * @since 5.0 */ @Deprecated public interface StartedListener extends Serializable { /** * Upload has started. * * @param event * the Upload started event. */ public void uploadStarted(StartedEvent event); } /** * Receives the events when the uploads are ready. * * @author Vaadin Ltd. * @since 3.0 */ @Deprecated public interface FinishedListener extends Serializable { /** * Upload has finished. * * @param event * the Upload finished event. */ public void uploadFinished(FinishedEvent event); } /** * Receives events when the uploads are finished, but unsuccessful. * * @author Vaadin Ltd. * @since 3.0 */ @Deprecated public interface FailedListener extends Serializable { /** * Upload has finished unsuccessfully. * * @param event * the Upload failed event. */ public void uploadFailed(FailedEvent event); } /** * Receives events when the uploads are successfully finished. * * @author Vaadin Ltd. * @since 3.0 */ @Deprecated public interface SucceededListener extends Serializable { /** * Upload successfull.. * * @param event * the Upload successfull event. */ public void uploadSucceeded(SucceededEvent event); } /** * Listener for {@link ChangeEvent} * * @since 7.2 */ @Deprecated public interface ChangeListener extends Serializable { Method FILENAME_CHANGED = ReflectTools.findMethod(ChangeListener.class, "filenameChanged", ChangeEvent.class); /** * A file has been selected but upload has not yet started. * * @param event * the change event */ public void filenameChanged(ChangeEvent event); } /** * Adds the upload started event listener. * * @param listener * the Listener to be added, not null */ public void addStartedListener(StartedListener listener) { addListener(StartedEvent.class, listener, UPLOAD_STARTED_METHOD); } /** * @deprecated As of 7.0, replaced by * {@link #addStartedListener(StartedListener)} **/ @Deprecated public void addListener(StartedListener listener) { addStartedListener(listener); } /** * Removes the upload started event listener. * * @param listener * the Listener to be removed. */ public void removeStartedListener(StartedListener listener) { removeListener(StartedEvent.class, listener, UPLOAD_STARTED_METHOD); } /** * @deprecated As of 7.0, replaced by * {@link #removeStartedListener(StartedListener)} **/ @Deprecated public void removeListener(StartedListener listener) { removeStartedListener(listener); } /** * Adds the upload received event listener. * * @param listener * the Listener to be added, not null */ public void addFinishedListener(FinishedListener listener) { addListener(FinishedEvent.class, listener, UPLOAD_FINISHED_METHOD); } /** * @deprecated As of 7.0, replaced by * {@link #addFinishedListener(FinishedListener)} **/ @Deprecated public void addListener(FinishedListener listener) { addFinishedListener(listener); } /** * Removes the upload received event listener. * * @param listener * the Listener to be removed. */ public void removeFinishedListener(FinishedListener listener) { removeListener(FinishedEvent.class, listener, UPLOAD_FINISHED_METHOD); } /** * @deprecated As of 7.0, replaced by * {@link #removeFinishedListener(FinishedListener)} **/ @Deprecated public void removeListener(FinishedListener listener) { removeFinishedListener(listener); } /** * Adds the upload interrupted event listener. * * @param listener * the Listener to be added, not null */ public void addFailedListener(FailedListener listener) { addListener(FailedEvent.class, listener, UPLOAD_FAILED_METHOD); } /** * @deprecated As of 7.0, replaced by * {@link #addFailedListener(FailedListener)} **/ @Deprecated public void addListener(FailedListener listener) { addFailedListener(listener); } /** * Removes the upload interrupted event listener. * * @param listener * the Listener to be removed. */ public void removeFailedListener(FailedListener listener) { removeListener(FailedEvent.class, listener, UPLOAD_FAILED_METHOD); } /** * @deprecated As of 7.0, replaced by * {@link #removeFailedListener(FailedListener)} **/ @Deprecated public void removeListener(FailedListener listener) { removeFailedListener(listener); } /** * Adds the upload success event listener. * * @param listener * the Listener to be added, not null */ public void addSucceededListener(SucceededListener listener) { addListener(SucceededEvent.class, listener, UPLOAD_SUCCEEDED_METHOD); } /** * @deprecated As of 7.0, replaced by * {@link #addSucceededListener(SucceededListener)} **/ @Deprecated public void addListener(SucceededListener listener) { addSucceededListener(listener); } /** * Removes the upload success event listener. * * @param listener * the Listener to be removed. */ public void removeSucceededListener(SucceededListener listener) { removeListener(SucceededEvent.class, listener, UPLOAD_SUCCEEDED_METHOD); } /** * @deprecated As of 7.0, replaced by * {@link #removeSucceededListener(SucceededListener)} **/ @Deprecated public void removeListener(SucceededListener listener) { removeSucceededListener(listener); } /** * Adds the upload progress event listener. * * @param listener * the progress listener to be added */ public void addProgressListener(ProgressListener listener) { if (progressListeners == null) { progressListeners = new LinkedHashSet<ProgressListener>(); } progressListeners.add(listener); } /** * @deprecated As of 7.0, replaced by * {@link #addProgressListener(ProgressListener)} **/ @Deprecated public void addListener(ProgressListener listener) { addProgressListener(listener); } /** * Removes the upload progress event listener. * * @param listener * the progress listener to be removed */ public void removeProgressListener(ProgressListener listener) { if (progressListeners != null) { progressListeners.remove(listener); } } /** * Adds a filename change event listener * * @param listener * the Listener to add, not null */ public void addChangeListener(ChangeListener listener) { super.addListener(EventId.CHANGE, ChangeEvent.class, listener, ChangeListener.FILENAME_CHANGED); } /** * Removes a filename change event listener * * @param listener * the listener to be removed */ public void removeChangeListener(ChangeListener listener) { super.removeListener(EventId.CHANGE, ChangeEvent.class, listener); } /** * @deprecated As of 7.0, replaced by * {@link #removeProgressListener(ProgressListener)} **/ @Deprecated public void removeListener(ProgressListener listener) { removeProgressListener(listener); } /** * Emit upload received event. * * @param filename * @param MIMEType * @param length */ protected void fireStarted(String filename, String MIMEType) { fireEvent(new Upload.StartedEvent(this, filename, MIMEType, contentLength)); } /** * Emits the upload failed event. * * @param filename * @param MIMEType * @param length */ protected void fireUploadInterrupted(String filename, String MIMEType, long length) { fireEvent(new Upload.FailedEvent(this, filename, MIMEType, length)); } protected void fireNoInputStream(String filename, String MIMEType, long length) { fireEvent(new Upload.NoInputStreamEvent(this, filename, MIMEType, length)); } protected void fireNoOutputStream(String filename, String MIMEType, long length) { fireEvent(new Upload.NoOutputStreamEvent(this, filename, MIMEType, length)); } protected void fireUploadInterrupted(String filename, String MIMEType, long length, Exception e) { fireEvent(new Upload.FailedEvent(this, filename, MIMEType, length, e)); } /** * Emits the upload success event. * * @param filename * @param MIMEType * @param length * */ protected void fireUploadSuccess(String filename, String MIMEType, long length) { fireEvent(new Upload.SucceededEvent(this, filename, MIMEType, length)); } /** * Emits the progress event. * * @param totalBytes * bytes received so far * @param contentLength * actual size of the file being uploaded, if known * */ protected void fireUpdateProgress(long totalBytes, long contentLength) { // this is implemented differently than other listeners to maintain // backwards compatibility if (progressListeners != null) { for (Iterator<ProgressListener> it = progressListeners .iterator(); it.hasNext();) { ProgressListener l = it.next(); l.updateProgress(totalBytes, contentLength); } } } /** * Returns the current receiver. * * @return the StreamVariable. */ public Receiver getReceiver() { return receiver; } /** * Sets the receiver. * * @param receiver * the receiver to set. */ public void setReceiver(Receiver receiver) { this.receiver = receiver; } /** * {@inheritDoc} */ @Override public void focus() { super.focus(); } /** * Gets the Tabulator index of this Focusable component. * * @see com.vaadin.ui.Component.Focusable#getTabIndex() */ @Override public int getTabIndex() { return tabIndex; } /** * Sets the Tabulator index of this Focusable component. * * @see com.vaadin.ui.Component.Focusable#setTabIndex(int) */ @Override public void setTabIndex(int tabIndex) { this.tabIndex = tabIndex; } /** * Go into upload state. This is to prevent double uploading on same * component. * * Warning: this is an internal method used by the framework and should not * be used by user of the Upload component. Using it results in the Upload * component going in wrong state and not working. It is currently public * because it is used by another class. */ public void startUpload() { if (isUploading) { throw new IllegalStateException("uploading already started"); } isUploading = true; nextid++; } /** * Interrupts the upload currently being received. The interruption will be * done by the receiving thread so this method will return immediately and * the actual interrupt will happen a bit later. */ public void interruptUpload() { if (isUploading) { interrupted = true; } } /** * Go into state where new uploading can begin. * * Warning: this is an internal method used by the framework and should not * be used by user of the Upload component. */ private void endUpload() { isUploading = false; contentLength = -1; interrupted = false; markAsDirty(); } public boolean isUploading() { return isUploading; } /** * Gets read bytes of the file currently being uploaded. * * @return bytes */ public long getBytesRead() { return totalBytes; } /** * Returns size of file currently being uploaded. Value sane only during * upload. * * @return size in bytes */ public long getUploadSize() { return contentLength; } /** * ProgressListener receives events to track progress of upload. */ @Deprecated public interface ProgressListener extends Serializable { /** * Updates progress to listener * * @param readBytes * bytes transferred * @param contentLength * total size of file currently being uploaded, -1 if unknown */ public void updateProgress(long readBytes, long contentLength); } /** * @return String to be rendered into button that fires uploading */ public String getButtonCaption() { return buttonCaption; } /** * In addition to the actual file chooser, upload components have button * that starts actual upload progress. This method is used to set text in * that button. * <p> * In case the button text is set to null, the button is hidden. In this * case developer must explicitly initiate the upload process with * {@link #submitUpload()}. * <p> * In case the Upload is used in immediate mode using * {@link #setImmediate(boolean)}, the file choose (html input with type * "file") is hidden and only the button with this text is shown. * <p> * * <p> * <strong>Note</strong> the string given is set as is to the button. HTML * formatting is not stripped. Be sure to properly validate your value * according to your needs. * * @param buttonCaption * text for upload components button. */ public void setButtonCaption(String buttonCaption) { this.buttonCaption = buttonCaption; markAsDirty(); } /** * Forces the upload the send selected file to the server. * <p> * In case developer wants to use this feature, he/she will most probably * want to hide the uploads internal submit button by setting its caption to * null with {@link #setButtonCaption(String)} method. * <p> * Note, that the upload runs asynchronous. Developer should use normal * upload listeners to trac the process of upload. If the field is empty * uploaded the file name will be empty string and file length 0 in the * upload finished event. * <p> * Also note, that the developer should not remove or modify the upload in * the same user transaction where the upload submit is requested. The * upload may safely be hidden or removed once the upload started event is * fired. */ public void submitUpload() { markAsDirty(); getRpcProxy(UploadClientRpc.class).submitUpload(); } @Override public void markAsDirty() { super.markAsDirty(); } /* * Handle to terminal via Upload monitors and controls the upload during it * is being streamed. */ private com.vaadin.server.StreamVariable streamVariable; protected com.vaadin.server.StreamVariable getStreamVariable() { if (streamVariable == null) { streamVariable = new com.vaadin.server.StreamVariable() { private StreamingStartEvent lastStartedEvent; @Override public boolean listenProgress() { return progressListeners != null && !progressListeners.isEmpty(); } @Override public void onProgress(StreamingProgressEvent event) { fireUpdateProgress(event.getBytesReceived(), event.getContentLength()); } @Override public boolean isInterrupted() { return interrupted; } @Override public OutputStream getOutputStream() { if (getReceiver() == null) { throw new IllegalStateException( "Upload cannot be performed without a receiver set"); } OutputStream receiveUpload = getReceiver().receiveUpload( lastStartedEvent.getFileName(), lastStartedEvent.getMimeType()); lastStartedEvent = null; return receiveUpload; } @Override public void streamingStarted(StreamingStartEvent event) { startUpload(); contentLength = event.getContentLength(); fireStarted(event.getFileName(), event.getMimeType()); lastStartedEvent = event; } @Override public void streamingFinished(StreamingEndEvent event) { fireUploadSuccess(event.getFileName(), event.getMimeType(), event.getContentLength()); endUpload(); } @Override public void streamingFailed(StreamingErrorEvent event) { try { Exception exception = event.getException(); if (exception instanceof NoInputStreamException) { fireNoInputStream(event.getFileName(), event.getMimeType(), 0); } else if (exception instanceof NoOutputStreamException) { fireNoOutputStream(event.getFileName(), event.getMimeType(), 0); } else { fireUploadInterrupted(event.getFileName(), event.getMimeType(), 0, exception); } } finally { endUpload(); } } }; } return streamVariable; } @Override public java.util.Collection<?> getListeners(java.lang.Class<?> eventType) { if (StreamingProgressEvent.class.isAssignableFrom(eventType)) { if (progressListeners == null) { return Collections.EMPTY_LIST; } else { return Collections.unmodifiableCollection(progressListeners); } } return super.getListeners(eventType); } /** * Returns the immediate mode of the component. * <p> * An immediate mode Upload component displays the browser file choosing * button immediately, whereas a non-immediate upload only shows a Vaadin * button. * <p> * The default mode of an Upload component is non-immediate. * * @return true if the component is in immediate mode, false if the * component if not in immediate mode */ @Override public boolean isImmediate() { if (getExplicitImmediateValue() != null) { return getExplicitImmediateValue(); } else { return false; } } @Override protected UploadState getState() { return (UploadState) super.getState(); } }
// Copyright 2015 The Project Buendia Authors // // Licensed under the Apache License, Version 2.0 (the "License"); you may not // use this file except in compliance with the License. You may obtain a copy // of the License at: http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distrib- // uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES // OR CONDITIONS OF ANY KIND, either express or implied. See the License for // specific language governing permissions and limitations under the License. package org.projectbuendia.client.ui.login; import android.test.AndroidTestCase; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import org.projectbuendia.client.diagnostics.Troubleshooter; import org.projectbuendia.client.diagnostics.TroubleshootingAction; import org.projectbuendia.client.events.diagnostics.TroubleshootingActionsChangedEvent; import org.projectbuendia.client.events.user.KnownUsersLoadFailedEvent; import org.projectbuendia.client.events.user.KnownUsersLoadedEvent; import org.projectbuendia.client.events.user.UserAddFailedEvent; import org.projectbuendia.client.events.user.UserAddedEvent; import org.projectbuendia.client.json.JsonNewUser; import org.projectbuendia.client.json.JsonUser; import org.projectbuendia.client.ui.FakeEventBus; import org.projectbuendia.client.user.UserManager; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; /** Tests for {@link LoginController}. */ public class LoginControllerTest extends AndroidTestCase { private LoginController mController; @Mock private UserManager mMockUserManager; @Mock private LoginController.Ui mMockUi; @Mock private LoginController.FragmentUi mMockFragmentUi; @Mock private Troubleshooter mTroubleshooter; private FakeEventBus mFakeEventBus; /** Tests that init() attempts to load known users. */ public void testInit_SetsKnownUserLoadGoing() { // WHEN the controller is inited mController.init(); // THEN it requests that the user manager loads the list of users mMockUserManager.loadKnownUsers(); } /** Tests that suspend() unregisters subscribers from the event bus. */ public void testSuspend_UnregistersFromEventBus() { // GIVEN an initialized controller mController.init(); // WHEN the controller is suspended mController.suspend(); // THEN the controller unregisters from the event bus assertEquals(0, mFakeEventBus.countRegisteredReceivers()); } /** Tests that the UI updates when users are loaded. */ public void testKnownUsersLoadedEvent_UpdatesUi() throws Exception { // GIVEN the controller is inited mController.init(); // WHEN a KnownUsersLoadedEvent is sent over the event bus JsonUser user = new JsonUser("idA", "nameA"); mFakeEventBus.post(new KnownUsersLoadedEvent(ImmutableSet.of(user))); // THEN the UI is updated verify(mMockFragmentUi).showUsers(ImmutableList.of(user)); } /** Tests that settings are shown when requested. */ public void testSettingsPress_ShowsSettings() { // GIVEN an inited controller mController.init(); // WHEN the settings button is pressed mController.onSettingsPressed(); // THEN the settings screen is opened verify(mMockUi).showSettings(); } /** Tests that selecting a user causes a transition to the tent selection screen. */ public void testSelectingUser_SetsUserAndOpensTentSelection() throws Exception { // GIVEN an controller inited controller with users loaded mController.init(); JsonUser user = new JsonUser("idA", "nameA"); mFakeEventBus.post(new KnownUsersLoadedEvent(ImmutableSet.of(user))); // WHEN one of the users is selected mController.onUserSelected(user); // THEN that user is set as the active user verify(mMockUserManager).setActiveUser(user); // THEN the tent selection screen is shown verify(mMockUi).showTentSelectionScreen(); } /** Tests that spinner is shown when the controller is first initialized. */ public void testInit_showsSpinner() { // WHEN controller is inited mController.init(); // THEN spinner is shown verify(mMockFragmentUi).showSpinner(true); } /** Tests that successful user load hides the spinner. */ public void testUsersLoaded_hidesSpinner() { // GIVEN initialized controller mController.init(); // WHEN users are loaded JsonUser user = new JsonUser("idA", "nameA"); mFakeEventBus.post(new KnownUsersLoadedEvent(ImmutableSet.of(user))); // THEN the spinner is hidden verify(mMockFragmentUi).showSpinner(false); } /** Tests that the sync failed dialog appears when loading users fails. */ public void testUserLoadFails_showsSyncFailedDialog() { // GIVEN initialized controller mController.init(); // WHEN users fail to load mFakeEventBus.post(new KnownUsersLoadFailedEvent(KnownUsersLoadFailedEvent.REASON_UNKNOWN)); // THEN the sync fail dialog is shown verify(mMockUi).showSyncFailedDialog(true); } /** Tests that the sync failed dialog is hidden when users are successfully loaded. */ public void testUserLoaded_hidesSyncFailedDialog() { // GIVEN initialized controller mController.init(); // WHEN users are loaded JsonUser user = new JsonUser("idA", "nameA"); mFakeEventBus.post(new KnownUsersLoadedEvent(ImmutableSet.of(user))); // THEN the sync fail dialog is hidden verify(mMockUi).showSyncFailedDialog(false); } /** Tests that users are requested when a retry is requested. */ public void testOnSyncRetry_requestsUsers() { // GIVEN initialized controller mController.init(); // WHEN onSyncRetry is called mController.onSyncRetry(); // THEN users are requested // Note: already called once in init(). verify(mMockUserManager, times(2)).loadKnownUsers(); } /** Tests that the spinner is shown when a retry is requested. */ public void testOnSyncRetry_showsSpinner() { // GIVEN initialized controller mController.init(); // WHEN onSyncRetry is called mController.onSyncRetry(); // THEN spinner is shown // Note: already shown once in init(). verify(mMockFragmentUi, times(2)).showSpinner(true); } /** Tests that the spinner is hidden whenever a user is added. */ public void testOnUserAdded_showsSpinner() { // GIVEN initialized controller mController.init(); // WHEN a user is added JsonUser user = new JsonUser("idA", "nameA"); mFakeEventBus.post(new UserAddedEvent(user)); // THEN spinner is hidden verify(mMockFragmentUi).showSpinner(false); } /** Tests that the spinner is hidden whenever a user add operation fails. */ public void testOnUserAddFailed_showsSpinner() { // GIVEN initialized controller mController.init(); // WHEN a user fails to be added JsonUser user = new JsonUser("idA", "nameA"); mFakeEventBus.post(new UserAddFailedEvent(new JsonNewUser(), 0)); // THEN spinner is hidden verify(mMockFragmentUi).showSpinner(false); } /** Tests that users are reloaded if the server becomes healthy and users are unavailable. */ public void testOnServerHealthy_reloadsUsersIfNotAvailable() { // GIVEN initialized controller, no users loaded, server unhealthy when(mTroubleshooter.isServerHealthy()).thenReturn(false); mController.init(); // WHEN server becomes healthy when(mTroubleshooter.isServerHealthy()).thenReturn(true); mFakeEventBus.post(new TroubleshootingActionsChangedEvent( ImmutableSet.of(TroubleshootingAction.CHECK_PACKAGE_SERVER_CONFIGURATION), null)); // THEN users are reloaded // Note: already called once in init() verify(mMockUserManager, times(2)).loadKnownUsers(); } /** Tests that users are not reloaded if the server becomes healthy and users are available. */ public void testOnServerHealthy_doesNothingIfUsersAvailable() { // GIVEN initialized controller, users loaded, server unhealthy when(mTroubleshooter.isServerHealthy()).thenReturn(false); mController.init(); JsonUser user = new JsonUser("idA", "nameA"); mFakeEventBus.post(new KnownUsersLoadedEvent(ImmutableSet.of(user))); // WHEN server becomes healthy when(mTroubleshooter.isServerHealthy()).thenReturn(true); mFakeEventBus.post(new TroubleshootingActionsChangedEvent( ImmutableSet.of(TroubleshootingAction.CHECK_PACKAGE_SERVER_CONFIGURATION), null)); // THEN users are not reloaded verify(mMockUserManager, times(1)).loadKnownUsers(); } /** * Tests that TroubleshootingActionsChangedEvents do not trigger user reload if server is still * unhealthy. */ public void testOnTroubleshootingActionsChanged_checksServerHealthy() { // GIVEN initialized controller, no users loaded, server unhealthy when(mTroubleshooter.isServerHealthy()).thenReturn(false); mController.init(); // WHEN TroubleshootingActions change but server is still unhealthy mFakeEventBus.post(new TroubleshootingActionsChangedEvent( ImmutableSet.of(TroubleshootingAction.CHECK_PACKAGE_SERVER_CONFIGURATION), null)); // THEN users are not reloaded // Note: this function is called once during init(), so expect it to be called once, but // only once. verify(mMockUserManager, times(1)).loadKnownUsers(); } @Override protected void setUp() throws Exception { super.setUp(); MockitoAnnotations.initMocks(this); mFakeEventBus = new FakeEventBus(); mController = new LoginController( mMockUserManager, mFakeEventBus, mTroubleshooter, mMockUi, mMockFragmentUi); } }
/* Derby - Class org.apache.derby.impl.store.access.btree.index.B2IFactory Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.impl.store.access.btree.index; import java.util.Properties; import org.apache.derby.iapi.reference.SQLState; import org.apache.derby.iapi.services.monitor.ModuleControl; import org.apache.derby.iapi.services.monitor.Monitor; import org.apache.derby.shared.common.sanity.SanityManager; import org.apache.derby.catalog.UUID; import org.apache.derby.iapi.services.uuid.UUIDFactory; import org.apache.derby.iapi.error.StandardException; import org.apache.derby.iapi.store.access.conglomerate.Conglomerate; import org.apache.derby.iapi.store.access.conglomerate.ConglomerateFactory; import org.apache.derby.iapi.store.access.conglomerate.TransactionManager; import org.apache.derby.iapi.store.access.ColumnOrdering; import org.apache.derby.iapi.store.access.TransactionController; import org.apache.derby.iapi.store.raw.ContainerKey; import org.apache.derby.iapi.store.raw.ContainerHandle; import org.apache.derby.iapi.store.raw.LockingPolicy; import org.apache.derby.iapi.store.raw.RawStoreFactory; import org.apache.derby.iapi.types.DataValueDescriptor; import org.apache.derby.impl.store.access.btree.BTree; import org.apache.derby.impl.store.access.btree.ControlRow; /** The "B2I" (acronym for b-tree secondary index) factory manages b-tree conglomerates implemented on the raw store which are used as secondary indexes. <p> Most of this code is generic to all conglomerates. This class might be more easily maintained as an abstract class in Raw/Conglomerate/Generic. The concrete ConglomerateFactories would simply have to supply the IMPLEMENTATIONID, FORMATUUIDSTRING, and implement createConglomerate and defaultProperties. Conglomerates which support more than one format would have to override supportsFormat, and conglomerates which support more than one implementation would have to override supportsImplementation. **/ public class B2IFactory implements ConglomerateFactory, ModuleControl { private static final String IMPLEMENTATIONID = "BTREE"; private static final String FORMATUUIDSTRING = "C6CEEEF0-DAD3-11d0-BB01-0060973F0942"; private UUID formatUUID; /* ** Methods of MethodFactory (via ConglomerateFactory) */ /** Return the default properties for this kind of conglomerate. @see org.apache.derby.iapi.store.access.conglomerate.MethodFactory#defaultProperties **/ public Properties defaultProperties() { // XXX (nat) Need to return the default b-tree secondary index properties. return new Properties(); } /** Return whether this access method implements the implementation type given in the argument string. The btree only has one implementation type, "BTREE". @see org.apache.derby.iapi.store.access.conglomerate.MethodFactory#supportsImplementation **/ public boolean supportsImplementation(String implementationId) { return implementationId.equals(IMPLEMENTATIONID); } /** Return the primary implementation type for this access method. The btree only has one implementation type, "BTREE". @see org.apache.derby.iapi.store.access.conglomerate.MethodFactory#primaryImplementationType **/ public String primaryImplementationType() { return IMPLEMENTATIONID; } /** Return whether this access method supports the format supplied in the argument. The btree currently only supports one format. @see org.apache.derby.iapi.store.access.conglomerate.MethodFactory#supportsFormat **/ public boolean supportsFormat(UUID formatid) { return formatid.equals(formatUUID); } /** Return the primary format that this access method supports. The btree currently only supports one format. @see org.apache.derby.iapi.store.access.conglomerate.MethodFactory#primaryFormat **/ public UUID primaryFormat() { return formatUUID; } /* ** Methods of ConglomerateFactory */ /** * Return the conglomerate factory id. * <p> * Return a number in the range of 0-15 which identifies this factory. * Code which names conglomerates depends on this range currently, but * could be easily changed to handle larger ranges. One hex digit seemed * reasonable for the number of conglomerate types being currently * considered (heap, btree, gist, gist btree, gist rtree, hash, others? ). * <p> * @see ConglomerateFactory#getConglomerateFactoryId * * @return an unique identifier used to the factory into the conglomid. **/ public int getConglomerateFactoryId() { return(ConglomerateFactory.BTREE_FACTORY_ID); } /** Create the conglomerate and return a conglomerate object for it. @see ConglomerateFactory#createConglomerate @exception StandardException Standard exception policy. **/ public Conglomerate createConglomerate( TransactionManager xact_mgr, int segment, long input_containerid, DataValueDescriptor[] template, ColumnOrdering[] columnOrder, int[] collationIds, Properties properties, int temporaryFlag) throws StandardException { B2I btree = null; if ((temporaryFlag & TransactionController.IS_TEMPORARY) != 0 && xact_mgr.getAccessManager().isReadOnly()) { // If this is a temporary conglomerate created for a read-only // database, we don't really care which disk format we use, since // it is not used for persisting data in the database. Use the // current format. A special case is needed because checkVersion() // throws an exception in read-only databases (DERBY-2354). btree = new B2I(); } else if (xact_mgr.checkVersion( RawStoreFactory.DERBY_STORE_MAJOR_VERSION_10, RawStoreFactory.DERBY_STORE_MINOR_VERSION_4, null)) { // on disk databases with version higher than 10.3 should use // current disk format B2I. This includes new databases or // hard upgraded databases. btree = new B2I(); } else if (xact_mgr.checkVersion( RawStoreFactory.DERBY_STORE_MAJOR_VERSION_10, RawStoreFactory.DERBY_STORE_MINOR_VERSION_3, null)) { // Old databases that are running in new versions of the software, // but are running in soft upgrade mode at release level 10.3 // use the 10.3 B2I version. This version will // continue to write metadata that can be read by 10.3. btree = new B2I_10_3(); } else { // Old databases that are running in new versions of the software, // but are running in soft upgrade mode at release level 10.2 // and before should use the old B2I version. This version will // continue to write metadata that can be read by 10.2 and previous // versions. btree = new B2I_v10_2(); } btree.create( xact_mgr, segment, input_containerid, template, columnOrder, collationIds, properties, temporaryFlag); return(btree); } /** * Return Conglomerate object for conglomerate with conglomid. * <p> * Return the Conglomerate Object. This is implementation specific. * Examples of what will be done is using the id to find the file where * the conglomerate is located, and then executing implementation specific * code to instantiate an object from reading a "special" row from a * known location in the file. In the btree case the btree conglomerate * is stored as a column in the control row on the root page. * <p> * This operation is costly so it is likely an implementation using this * will cache the conglomerate row in memory so that subsequent accesses * need not perform this operation. * <p> * The btree object returned by this routine may be installed in a cache * so the object must not change. * * @return An instance of the conglomerate. * * @exception StandardException Standard exception policy. **/ public Conglomerate readConglomerate( TransactionManager xact_manager, ContainerKey container_key) throws StandardException { Conglomerate btree = null; ContainerHandle container = null; ControlRow root = null; try { // open readonly, with no locks. Dirty read is ok as it is the // responsibility of client code to make sure this data is not // changing while being read. The only changes that currently // happen to this data is creation and deletion - no updates // ever happen to btree conglomerates. container = (xact_manager.getRawStoreXact()).openContainer( container_key, (LockingPolicy) null, ContainerHandle.MODE_READONLY); if (container == null) { // thrown a "known" error if the conglomerate does not exist // which is checked for explicitly by callers of the store // interface. throw StandardException.newException( SQLState.STORE_CONGLOMERATE_DOES_NOT_EXIST, new Long(container_key.getContainerId())); } // The conglomerate is located in the control row on the root page. root = ControlRow.get(container, BTree.ROOTPAGEID); if (SanityManager.DEBUG) SanityManager.ASSERT(root.getPage().isLatched()); // read the Conglomerate from it's entry in the control row. btree = (Conglomerate) root.getConglom(B2I.FORMAT_NUMBER); if (SanityManager.DEBUG) SanityManager.ASSERT(btree instanceof B2I); } finally { if (root != null) root.release(); if (container != null) container.close(); } // if any error, just return null - meaning can't access the container. return(btree); } /* ** Methods of ModuleControl. */ public boolean canSupport(Properties startParams) { String impl = startParams.getProperty("derby.access.Conglomerate.type"); if (impl == null) return false; return supportsImplementation(impl); } public void boot(boolean create, Properties startParams) throws StandardException { // Find the UUID factory. UUIDFactory uuidFactory = Monitor.getMonitor().getUUIDFactory(); // Make a UUID that identifies this conglomerate's format. formatUUID = uuidFactory.recreateUUID(FORMATUUIDSTRING); } public void stop() { } }
/** * Copyright mcplissken. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.cradle.platform.vertx.httpgateway; import java.nio.ByteBuffer; import java.util.Map; import org.cradle.platform.document.DocumentReader; import org.cradle.platform.httpgateway.spi.BasicHttpAdapter; import org.cradle.platform.httpgateway.spi.GatewayRequest; import org.vertx.java.core.buffer.Buffer; import org.vertx.java.core.http.HttpServerRequest; import org.vertx.java.core.http.HttpServerResponse; import org.vertx.java.core.streams.Pump; /** * @author Sherief Shawky * @email mcrakens@gmail.com * @date Aug 11, 2014 */ public class VertxHttpAdapter extends BasicHttpAdapter{ private HttpServerResponse response; private HttpServerRequest request; public VertxHttpAdapter(HttpServerRequest request, HttpServerResponse response) { this.response = response; this.request = request; Pump.createPump(request, response).start(); } /* (non-Javadoc) * @see org.cradle.osgi.gateway.HttpAdapter#readParameter(java.lang.String) */ @Override public String readParameter(String name) { return request.params().get(name); } /* (non-Javadoc) * @see org.cradle.osgi.gateway.HttpAdapter#writeHeader(java.lang.String, java.lang.String) */ @Override public void writeHeader(String name, String value) { response.putHeader(name, value); } /* (non-Javadoc) * @see org.cradle.osgi.gateway.HttpAdapter#readHeader(java.lang.String) */ @Override public String readHeader(String name) { return request.headers().get(name); } /* (non-Javadoc) * @see org.cradle.osgi.gateway.HttpAdapter#writeResponse(java.nio.ByteBuffer) */ @Override public void writeResponse(ByteBuffer buffer) { int length = buffer.limit(); byte[] bytesBuff = new byte[length]; buffer.get(bytesBuff); response.end(new Buffer(bytesBuff)); } /** * @return the request */ public HttpServerRequest getRequest() { return request; } /** * @return the response */ public HttpServerResponse getResponse() { return response; } /* (non-Javadoc) * @see org.cradle.gateway.HttpAdapter#sessionId() */ @Override public String sessionId() { if(sessionId == null){ String cookie = request.headers().get(COOKIE); if(cookie != null) extractSessionId(cookie); else return ""; } return sessionId; } /* (non-Javadoc) * @see org.cradle.gateway.HttpAdapter#setCookie() */ @Override public void setCookie(String name, String value) { String cookie = response.headers().get(COOKIE); cookie = createCookie(name, value, cookie); response.putHeader("Set-Cookie", cookie); } /* (non-Javadoc) * @see org.cradle.gateway.HttpAdapter#method() */ @Override public String method() { return request.method(); } protected void handleErrorResponse(int errorCode, String message) { response.setStatusCode(errorCode); if(message != null){ response.end(message); } else { response.end("Internal server error"); } } /* (non-Javadoc) * @see org.cradle.gateway.HttpAdapter#path() */ @Override public String path() { return request.path(); } @Override public String getContentType() { String[] headerParts = readHeader(CONTENT_TYPE).split(HEADER_SEP); return headerParts[0]; } /* (non-Javadoc) * @see org.cradle.gateway.HttpAdapter#getContentLangauge() */ @Override public String getContentLangauge() { String header = readHeader(CONTENT_LANGUAGE); if(header == null) return null; String[] headerParts = header.split(HEADER_SEP); return headerParts[0]; } /* (non-Javadoc) * @see org.cradle.gateway.HttpAdapter#sendRedirect(java.lang.String) */ @Override public void sendRedirect(String uri) { response.setStatusCode(REDIRECT_RESPONSE_CODE); response.putHeader("Location", uri); response.end(); } /* (non-Javadoc) * @see org.cradle.platform.httpgateway.HttpAdapter#createGatewayRequest() */ @Override public GatewayRequest createGatewayRequest(Map<String, DocumentReader> documentReaders) { return new VertxAsynchronusRequest(this, documentReaders); } }
package com.ifightmonsters.yarra.data; import android.content.ContentProvider; import android.content.ContentUris; import android.content.ContentValues; import android.content.UriMatcher; import android.database.Cursor; import android.database.SQLException; import android.database.sqlite.SQLiteDatabase; import android.database.sqlite.SQLiteQueryBuilder; import android.net.Uri; public class YarraProvider extends ContentProvider { private static final int STATUS = 100; public static final int STATUS_ID = 101; private static final int SONG = 200; private static final int SONG_ID = 201; public static final UriMatcher sUriMatcher = buildUriMatcher(); private YarraDbHelper mOpenHelper; private static UriMatcher buildUriMatcher() { final UriMatcher matcher = new UriMatcher(UriMatcher.NO_MATCH); final String authority = YarraContract.CONTENT_AUTHORITY; matcher.addURI(authority, YarraContract.PATH_STATUS, STATUS); matcher.addURI(authority, YarraContract.PATH_STATUS + "/#", STATUS_ID); matcher.addURI(authority, YarraContract.PATH_SONG, SONG); matcher.addURI(authority, YarraContract.PATH_SONG + "/#", SONG_ID); return matcher; } private static final SQLiteQueryBuilder sStatusIdQueryBuilder; static { sStatusIdQueryBuilder = new SQLiteQueryBuilder(); sStatusIdQueryBuilder.setTables( YarraContract.Status.TABLE_NAME + " INNER JOIN " + YarraContract.Song.TABLE_NAME + " ON " + YarraContract.Status.TABLE_NAME + "." + YarraContract.Status._ID + " = " + YarraContract.Song.TABLE_NAME + "." + YarraContract.Song.COLUMN_STATUS_ID); } private Cursor getStatusById(Uri uri, String[] projection, String sortOrder) { return sStatusIdQueryBuilder.query(mOpenHelper.getReadableDatabase(), projection, YarraContract.Status.TABLE_NAME + "." + YarraContract.Status._ID + " = '" + ContentUris.parseId(uri) + "'", null, null, null, sortOrder); } @Override public boolean onCreate() { mOpenHelper = new YarraDbHelper(getContext()); return true; } @Override public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) { Cursor cursor; switch (sUriMatcher.match(uri)) { case STATUS: cursor = mOpenHelper.getReadableDatabase().query( YarraContract.Status.TABLE_NAME, projection, selection, selectionArgs, null, null, sortOrder ); break; case STATUS_ID: cursor = getStatusById(uri, projection, sortOrder); break; case SONG: cursor = mOpenHelper.getReadableDatabase().query( YarraContract.Song.TABLE_NAME, projection, selection, selectionArgs, null, null, sortOrder ); break; case SONG_ID: cursor = mOpenHelper.getReadableDatabase().query( YarraContract.Song.TABLE_NAME, projection, YarraContract.Song._ID + " = '" + ContentUris.parseId(uri) + "'", null, null, null, sortOrder ); break; default: throw new UnsupportedOperationException("Unknown uri: " + uri); } cursor.setNotificationUri(getContext().getContentResolver(), uri); return cursor; } @Override public String getType(Uri uri) { final int match = sUriMatcher.match(uri); switch (match) { case STATUS: return YarraContract.Status.CONTENT_TYPE; case STATUS_ID: return YarraContract.Status.CONTENT_ITEM_TYPE; case SONG: return YarraContract.Song.CONTENT_TYPE; case SONG_ID: return YarraContract.Song.CONTENT_ITEM_TYPE; default: throw new UnsupportedOperationException("Unknown uri: " + uri); } } @Override public Uri insert(Uri uri, ContentValues values) { final SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); Uri returnUri; switch (match) { case STATUS: { long _id = db.insert(YarraContract.Status.TABLE_NAME, null, values); if (_id > 0) { returnUri = YarraContract.Status.buildStatusUri(_id); } else { throw new SQLException("Failed to insert row into " + uri); } } break; case SONG: { long _id = db.insert(YarraContract.Song.TABLE_NAME, null, values); if (_id > 0) { returnUri = YarraContract.Song.buildSongUri(_id); } else { throw new SQLException("Failed to insert row into " + uri); } break; } default: throw new UnsupportedOperationException("Unknown uri: " + uri); } getContext().getContentResolver().notifyChange(uri, null); return returnUri; } @Override public int delete(Uri uri, String selection, String[] selectionArgs) { final SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); int rowsDeleted; switch (match) { case STATUS: rowsDeleted = db.delete(YarraContract.Status.TABLE_NAME, selection, selectionArgs); break; case SONG: rowsDeleted = db.delete(YarraContract.Song.TABLE_NAME, selection, selectionArgs); break; default: throw new UnsupportedOperationException("Unknown uri: " + uri); } if (selection == null || rowsDeleted != 0) { getContext().getContentResolver().notifyChange(uri, null); } return rowsDeleted; } @Override public int update(Uri uri, ContentValues values, String selection, String[] selectionArgs) { final SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); int rowsUpdated; switch (match) { case STATUS: rowsUpdated = db.update(YarraContract.Status.TABLE_NAME, values, selection, selectionArgs); break; case SONG: rowsUpdated = db.update(YarraContract.Song.TABLE_NAME, values, selection, selectionArgs); break; default: throw new UnsupportedOperationException("Unknown uri: " + uri); } if (rowsUpdated != 0) { getContext().getContentResolver().notifyChange(uri, null); } return rowsUpdated; } @Override public int bulkInsert(Uri uri, ContentValues[] values) { final SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); switch (match) { case SONG: db.beginTransaction(); int returnCount = 0; try { for (ContentValues value : values) { long _id = db.insert(YarraContract.Song.TABLE_NAME, null, value); if (_id != -1) { returnCount++; } } db.setTransactionSuccessful(); } finally { db.endTransaction(); } getContext().getContentResolver().notifyChange(uri, null); return returnCount; default: return super.bulkInsert(uri, values); } } }
package org.bouncycastle.cert.ocsp.test; import java.io.IOException; import java.math.BigInteger; import java.security.KeyPair; import java.security.Security; import java.util.Date; import java.util.Random; import java.util.Set; import java.util.Vector; import org.bouncycastle.asn1.ASN1Encodable; import org.bouncycastle.asn1.ASN1Exception; import org.bouncycastle.asn1.ASN1OctetString; import org.bouncycastle.asn1.DEROctetString; import org.bouncycastle.asn1.ocsp.OCSPObjectIdentifiers; import org.bouncycastle.asn1.x509.Extension; import org.bouncycastle.asn1.x509.ExtensionsGenerator; import org.bouncycastle.asn1.x509.GeneralName; import org.bouncycastle.asn1.x509.X509Name; import org.bouncycastle.cert.CertIOException; import org.bouncycastle.cert.X509CertificateHolder; import org.bouncycastle.cert.jcajce.JcaX509CertificateHolder; import org.bouncycastle.cert.ocsp.BasicOCSPResp; import org.bouncycastle.cert.ocsp.BasicOCSPRespBuilder; import org.bouncycastle.cert.ocsp.CertificateID; import org.bouncycastle.cert.ocsp.CertificateStatus; import org.bouncycastle.cert.ocsp.OCSPReq; import org.bouncycastle.cert.ocsp.OCSPReqBuilder; import org.bouncycastle.cert.ocsp.OCSPResp; import org.bouncycastle.cert.ocsp.OCSPRespBuilder; import org.bouncycastle.cert.ocsp.Req; import org.bouncycastle.cert.ocsp.RespID; import org.bouncycastle.cert.ocsp.SingleResp; import org.bouncycastle.cert.ocsp.jcajce.JcaBasicOCSPRespBuilder; import org.bouncycastle.jce.X509Principal; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.operator.DigestCalculatorProvider; import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; import org.bouncycastle.operator.jcajce.JcaContentVerifierProviderBuilder; import org.bouncycastle.operator.jcajce.JcaDigestCalculatorProviderBuilder; import org.bouncycastle.util.encoders.Base64; import org.bouncycastle.util.test.SimpleTest; public class OCSPTest extends SimpleTest { byte[] testResp1 = Base64.decode( "MIIFnAoBAKCCBZUwggWRBgkrBgEFBQcwAQEEggWCMIIFfjCCARehgZ8wgZwx" + "CzAJBgNVBAYTAklOMRcwFQYDVQQIEw5BbmRocmEgcHJhZGVzaDESMBAGA1UE" + "BxMJSHlkZXJhYmFkMQwwCgYDVQQKEwNUQ1MxDDAKBgNVBAsTA0FUQzEeMBwG" + "A1UEAxMVVENTLUNBIE9DU1AgUmVzcG9uZGVyMSQwIgYJKoZIhvcNAQkBFhVv" + "Y3NwQHRjcy1jYS50Y3MuY28uaW4YDzIwMDMwNDAyMTIzNDU4WjBiMGAwOjAJ" + "BgUrDgMCGgUABBRs07IuoCWNmcEl1oHwIak1BPnX8QQUtGyl/iL9WJ1VxjxF" + "j0hAwJ/s1AcCAQKhERgPMjAwMjA4MjkwNzA5MjZaGA8yMDAzMDQwMjEyMzQ1" + "OFowDQYJKoZIhvcNAQEFBQADgYEAfbN0TCRFKdhsmvOdUoiJ+qvygGBzDxD/" + "VWhXYA+16AphHLIWNABR3CgHB3zWtdy2j7DJmQ/R7qKj7dUhWLSqclAiPgFt" + "QQ1YvSJAYfEIdyHkxv4NP0LSogxrumANcDyC9yt/W9yHjD2ICPBIqCsZLuLk" + "OHYi5DlwWe9Zm9VFwCGgggPMMIIDyDCCA8QwggKsoAMCAQICAQYwDQYJKoZI" + "hvcNAQEFBQAwgZQxFDASBgNVBAMTC1RDUy1DQSBPQ1NQMSYwJAYJKoZIhvcN" + "AQkBFhd0Y3MtY2FAdGNzLWNhLnRjcy5jby5pbjEMMAoGA1UEChMDVENTMQww" + "CgYDVQQLEwNBVEMxEjAQBgNVBAcTCUh5ZGVyYWJhZDEXMBUGA1UECBMOQW5k" + "aHJhIHByYWRlc2gxCzAJBgNVBAYTAklOMB4XDTAyMDgyOTA3MTE0M1oXDTAz" + "MDgyOTA3MTE0M1owgZwxCzAJBgNVBAYTAklOMRcwFQYDVQQIEw5BbmRocmEg" + "cHJhZGVzaDESMBAGA1UEBxMJSHlkZXJhYmFkMQwwCgYDVQQKEwNUQ1MxDDAK" + "BgNVBAsTA0FUQzEeMBwGA1UEAxMVVENTLUNBIE9DU1AgUmVzcG9uZGVyMSQw" + "IgYJKoZIhvcNAQkBFhVvY3NwQHRjcy1jYS50Y3MuY28uaW4wgZ8wDQYJKoZI" + "hvcNAQEBBQADgY0AMIGJAoGBAM+XWW4caMRv46D7L6Bv8iwtKgmQu0SAybmF" + "RJiz12qXzdvTLt8C75OdgmUomxp0+gW/4XlTPUqOMQWv463aZRv9Ust4f8MH" + "EJh4ekP/NS9+d8vEO3P40ntQkmSMcFmtA9E1koUtQ3MSJlcs441JjbgUaVnm" + "jDmmniQnZY4bU3tVAgMBAAGjgZowgZcwDAYDVR0TAQH/BAIwADALBgNVHQ8E" + "BAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwkwNgYIKwYBBQUHAQEEKjAoMCYG" + "CCsGAQUFBzABhhpodHRwOi8vMTcyLjE5LjQwLjExMDo3NzAwLzAtBgNVHR8E" + "JjAkMCKgIKAehhxodHRwOi8vMTcyLjE5LjQwLjExMC9jcmwuY3JsMA0GCSqG" + "SIb3DQEBBQUAA4IBAQB6FovM3B4VDDZ15o12gnADZsIk9fTAczLlcrmXLNN4" + "PgmqgnwF0Ymj3bD5SavDOXxbA65AZJ7rBNAguLUo+xVkgxmoBH7R2sBxjTCc" + "r07NEadxM3HQkt0aX5XYEl8eRoifwqYAI9h0ziZfTNes8elNfb3DoPPjqq6V" + "mMg0f0iMS4W8LjNPorjRB+kIosa1deAGPhq0eJ8yr0/s2QR2/WFD5P4aXc8I" + "KWleklnIImS3zqiPrq6tl2Bm8DZj7vXlTOwmraSQxUwzCKwYob1yGvNOUQTq" + "pG6jxn7jgDawHU1+WjWQe4Q34/pWeGLysxTraMa+Ug9kPe+jy/qRX2xwvKBZ"); byte[] testResp2 = Base64.decode( "MIII1QoBAKCCCM4wggjKBgkrBgEFBQcwAQEEggi7MIIItzCBjqADAgEAoSMw" + "ITEfMB0GA1UEAxMWT0NTUCBjZXJ0LVFBLUNMSUVOVC04NxgPMjAwMzA1MTky" + "MDI2MzBaMFEwTzA6MAkGBSsOAwIaBQAEFJniwiUuyrhKIEF2TjVdVdCAOw0z" + "BBR2olPKrPOJUVyGZ7BXOC4L2BmAqgIBL4AAGA8yMDAzMDUxOTIwMjYzMFow" + "DQYJKoZIhvcNAQEEBQADggEBALImFU3kUtpNVf4tIFKg/1sDHvGpk5Pk0uhH" + "TiNp6vdPfWjOgPkVXskx9nOTabVOBE8RusgwEcK1xeBXSHODb6mnjt9pkfv3" + "ZdbFLFvH/PYjOb6zQOgdIOXhquCs5XbcaSFCX63hqnSaEqvc9w9ctmQwds5X" + "tCuyCB1fWu/ie8xfuXR5XZKTBf5c6dO82qFE65gTYbGOxJBYiRieIPW1XutZ" + "A76qla4m+WdxubV6SPG8PVbzmAseqjsJRn4jkSKOGenqSOqbPbZn9oBsU0Ku" + "hul3pwsNJvcBvw2qxnWybqSzV+n4OvYXk+xFmtTjw8H9ChV3FYYDs8NuUAKf" + "jw1IjWegggcOMIIHCjCCAzMwggIboAMCAQICAQIwDQYJKoZIhvcNAQEEBQAw" + "bzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1BMRAwDgYDVQQHEwdXYWx0aGFt" + "MRYwFAYDVQQKEw1Gb3J1bSBTeXN0ZW1zMQswCQYDVQQLEwJRQTEcMBoGA1UE" + "AxMTQ2VydGlmaWNhdGUgTWFuYWdlcjAeFw0wMzAzMjEwNTAwMDBaFw0yNTAz" + "MjEwNTAwMDBaMCExHzAdBgNVBAMTFk9DU1AgY2VydC1RQS1DTElFTlQtODcw" + "ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVuxRCZgJAYAftYuRy" + "9axdtsHrkIJyVVRorLCTWOoLmx2tlrGqKbHOGKmvqEPEpeCDYQk+0WIlWMuM" + "2pgiYAolwqSFBwCjkjQN3fCIHXiby0JBgCCLoe7wa0pZffE+8XZH0JdSjoT3" + "2OYD19wWZeY2VB0JWJFWYAnIL+R5Eg7LwJ5QZSdvghnOWKTv60m/O1rC0see" + "9lbPO+3jRuaDyCUKYy/YIKBYC9rtC4hS47jg70dTfmE2nccjn7rFCPBrVr4M" + "5szqdRzwu3riL9W+IE99LTKXOH/24JX0S4woeGXMS6me7SyZE6x7P2tYkNXM" + "OfXk28b3SJF75K7vX6T6ecWjAgMBAAGjKDAmMBMGA1UdJQQMMAoGCCsGAQUF" + "BwMJMA8GCSsGAQUFBzABBQQCBQAwDQYJKoZIhvcNAQEEBQADggEBAKNSn7pp" + "UEC1VTN/Iqk8Sc2cAYM7KSmeB++tuyes1iXY4xSQaEgOxRa5AvPAKnXKSzfY" + "vqi9WLdzdkpTo4AzlHl5nqU/NCUv3yOKI9lECVMgMxLAvZgMALS5YXNZsqrs" + "hP3ASPQU99+5CiBGGYa0PzWLstXLa6SvQYoHG2M8Bb2lHwgYKsyrUawcfc/s" + "jE3jFJeyCyNwzH0eDJUVvW1/I3AhLNWcPaT9/VfyIWu5qqZU+ukV/yQXrKiB" + "glY8v4QDRD4aWQlOuiV2r9sDRldOPJe2QSFDBe4NtBbynQ+MRvF2oQs/ocu+" + "OAHX7uiskg9GU+9cdCWPwJf9cP/Zem6MemgwggPPMIICt6ADAgECAgEBMA0G" + "CSqGSIb3DQEBBQUAMG8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJNQTEQMA4G" + "A1UEBxMHV2FsdGhhbTEWMBQGA1UEChMNRm9ydW0gU3lzdGVtczELMAkGA1UE" + "CxMCUUExHDAaBgNVBAMTE0NlcnRpZmljYXRlIE1hbmFnZXIwHhcNMDMwMzIx" + "MDUwMDAwWhcNMjUwMzIxMDUwMDAwWjBvMQswCQYDVQQGEwJVUzELMAkGA1UE" + "CBMCTUExEDAOBgNVBAcTB1dhbHRoYW0xFjAUBgNVBAoTDUZvcnVtIFN5c3Rl" + "bXMxCzAJBgNVBAsTAlFBMRwwGgYDVQQDExNDZXJ0aWZpY2F0ZSBNYW5hZ2Vy" + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4VeU+48VBjI0mGRt" + "9qlD+WAhx3vv4KCOD5f3HWLj8D2DcoszVTVDqtRK+HS1eSpO/xWumyXhjV55" + "FhG2eYi4e0clv0WyswWkGLqo7IxYn3ZhVmw04ohdTjdhVv8oS+96MUqPmvVW" + "+MkVRyqm75HdgWhKRr/lEpDNm+RJe85xMCipkyesJG58p5tRmAZAAyRs3jYw" + "5YIFwDOnt6PCme7ui4xdas2zolqOlynMuq0ctDrUPKGLlR4mVBzgAVPeatcu" + "ivEQdB3rR6UN4+nv2jx9kmQNNb95R1M3J9xHfOWX176UWFOZHJwVq8eBGF9N" + "pav4ZGBAyqagW7HMlo7Hw0FzUwIDAQABo3YwdDARBglghkgBhvhCAQEEBAMC" + "AJcwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU64zBxl1yKES8tjU3/rBA" + "NaeBpjkwHwYDVR0jBBgwFoAU64zBxl1yKES8tjU3/rBANaeBpjkwDgYDVR0P" + "AQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQAzHnf+Z+UgxDVOpCu0DHF+" + "qYZf8IaUQxLhUD7wjwnt3lJ0QV1z4oyc6Vs9J5xa8Mvf7u1WMmOxvN8r8Kb0" + "k8DlFszLd0Qwr+NVu5NQO4Vn01UAzCtH4oX2bgrVzotqDnzZ4TcIr11EX3Nb" + "tO8yWWl+xWIuxKoAO8a0Rh97TyYfAj4++GIm43b2zIvRXEWAytjz7rXUMwRC" + "1ipRQwSA9gyw2y0s8emV/VwJQXsTe9xtDqlEC67b90V/BgL/jxck5E8yrY9Z" + "gNxlOgcqscObisAkB5I6GV+dfa+BmZrhSJ/bvFMUrnFzjLFvZp/9qiK11r5K" + "A5oyOoNv0w+8bbtMNEc1"); /** * extra version number encoding. */ private static byte[] irregReq = Base64.decode( "MIIQpTBUoAMCAQAwTTBLMEkwCQYFKw4DAhoFAAQUIcFvFFVjPem15pKox4cfcnzF" + "Kf4EFJf8OQzmVmyJ/hc4EhitQbXcqAzDAhB9ePsP19SuP6CsAgFwQuEAoIIQSzCC" + "EEcwDQYJKoZIhvcNAQEFBQADgYEAlq/Tjl8OtFM8Tib1JYTiaPy9vFDr8UZhqXJI" + "FyrdgtUyyDt0EcrgnBGacAeRZzF5sokIC6DjXweU7EItGqrpw/RaCUPUWFpPxR6y" + "HjuzrLmICocTI9MH7dRUXm0qpxoY987sx1PtWB4pSR99ixBtq3OPNdsI0uJ+Qkei" + "LbEZyvWggg+wMIIPrDCCA5owggKCoAMCAQICEEAxXx/eFe7gm/NX7AkcS68wDQYJ" + "KoZIhvcNAQEFBQAwgZoxCzAJBgNVBAYTAlNFMTMwMQYDVQQKDCpMw6Ruc2bDtnJz" + "w6RrcmluZ2FyIEJhbmsgQWt0aWVib2xhZyAocHVibCkxFTATBgNVBAUTDDExMTEx" + "MTExMTExMTE/MD0GA1UEAww2TMOkbnNmw7Zyc8Oka3JpbmdhciBCYW5rIFB1cmNo" + "YXNlciBDQTEgZm9yIEJhbmtJRCBURVNUMB4XDTA4MTAwNjIyMDAwMFoXDTEwMTAx" + "MDIxNTk1OVowgZExCzAJBgNVBAYTAlNFMTMwMQYDVQQKDCpMw6Ruc2bDtnJzw6Rr" + "cmluZ2FyIEJhbmsgQWt0aWVib2xhZyAocHVibCkxFTATBgNVBAUTDDExMTExMTEx" + "MTExMTE2MDQGA1UEAwwtTMOkbnNmw7Zyc8Oka3JpbmdhciBCYW5rIE9DU1AgZm9y" + "IEJhbmtJRCBURVNUMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC5e/h6aL2m" + "DVpWeu5e5p1Ps9kbvuuGeAp9zJDYLbZz7uzT67X+s59HaViroD2+2my/gg7rX7tK" + "H9VXpJad1W9O19SjfNyxgeAMwVMkrbb4IlrQwu0v/Ub8JPxSWwZZXYiODq5abeXA" + "abMYIHxSaSkhrsUj1dpSAohHLJRlq707swIDAQABo2cwZTAfBgNVHSMEGDAWgBTR" + "vcp2QyNdNGZ+q7TjKSrrHZqxmDATBgNVHSAEDDAKMAgGBiqFcDwBBjAOBgNVHQ8B" + "Af8EBAMCBkAwHQYDVR0OBBYEFF/3557FEvkA8iiPv2XcBclxKnTdMA0GCSqGSIb3" + "DQEBBQUAA4IBAQAOxRvHO89XJ0v83BZdPFzEBA4B2Tqc1oABUn13S6fAkcGWvOmG" + "eY61MK16aMnLPNDadZrAqJc6PEtVY57uaywE9acwv9XpHO0bcS94tLwvZZJ2KBt0" + "Oq96gaI6gnJViUjyWjm+qBZvod0QPOLGv6wUPoiNcCpSid/COTjKpLYpCJj3ZWUV" + "nsTRWSRVXsdY/xI0gs/A8/c5P1PuTxoi99RTmcruoFxvV4MmhWyX7IGqG4OAtLdo" + "yefz/90FPGOrmqY9OgEb+gNuTM26YDvSs1dfarPl89d8jjwxHgNbZjh2VHFqKolJ" + "8TB8ZS5aNvhHPumOOE47y95rTBxrxSmGvKb8MIIENDCCAxygAwIBAgIRAJAFaeOw" + "7XbxH/DN/Vvhjx8wDQYJKoZIhvcNAQEFBQAwgZUxCzAJBgNVBAYTAlNFMTMwMQYD" + "VQQKDCpMw6Ruc2bDtnJzw6RrcmluZ2FyIEJhbmsgQWt0aWVib2xhZyAocHVibCkx" + "FTATBgNVBAUTDDExMTExMTExMTExMTE6MDgGA1UEAwwxTMOkbnNmw7Zyc8Oka3Jp" + "bmdhciBCYW5rIFJvb3QgQ0ExIGZvciBCYW5rSUQgVEVTVDAeFw0wNzEwMDExMjAw" + "MzdaFw0yOTA3MDExMjAwMzdaMIGaMQswCQYDVQQGEwJTRTEzMDEGA1UECgwqTMOk" + "bnNmw7Zyc8Oka3JpbmdhciBCYW5rIEFrdGllYm9sYWcgKHB1YmwpMRUwEwYDVQQF" + "EwwxMTExMTExMTExMTExPzA9BgNVBAMMNkzDpG5zZsO2cnPDpGtyaW5nYXIgQmFu" + "ayBQdXJjaGFzZXIgQ0ExIGZvciBCYW5rSUQgVEVTVDCCASIwDQYJKoZIhvcNAQEB" + "BQADggEPADCCAQoCggEBAMK5WbYojYRX1ZKrbxJBgbd4x503LfMWgr67sVD5L0NY" + "1RPhZVFJRKJWvawE5/eXJ4oNQwc831h2jiOgINXuKyGXqdAVGBcpFwIxTfzxwT4l" + "fvztr8pE6wk7mLLwKUvIjbM3EF1IL3zUI3UU/U5ioyGmcb/o4GGN71kMmvV/vrkU" + "02/s7xicXNxYej4ExLiCkS5+j/+3sR47Uq5cL9e8Yg7t5/6FyLGQjKoS8HU/abYN" + "4kpx/oyrxzrXMhnMVDiI8QX9NYGJwI8KZ/LU6GDq/NnZ3gG5v4l4UU1GhgUbrk4I" + "AZPDu99zvwCtkdj9lJN0eDv8jdyEPZ6g1qPBE0pCNqcCAwEAAaN4MHYwDwYDVR0T" + "AQH/BAUwAwEB/zATBgNVHSAEDDAKMAgGBiqFcDwBBjAOBgNVHQ8BAf8EBAMCAQYw" + "HwYDVR0jBBgwFoAUnkjp1bkQUOrkRiLgxpxwAe2GQFYwHQYDVR0OBBYEFNG9ynZD" + "I100Zn6rtOMpKusdmrGYMA0GCSqGSIb3DQEBBQUAA4IBAQAPVSC4HEd+yCtSgL0j" + "NI19U2hJeP28lAD7OA37bcLP7eNrvfU/2tuqY7rEn1m44fUbifewdgR8x2DzhM0m" + "fJcA5Z12PYUb85L9z8ewGQdyHLNlMpKSTP+0lebSc/obFbteC4jjuvux60y5KVOp" + "osXbGw2qyrS6uhZJrTDP1B+bYg/XBttG+i7Qzx0S5Tq//VU9OfAQZWpvejadKAk9" + "WCcXq6zALiJcxsUwOHZRvvHDxkHuf5eZpPvm1gaqa+G9CtV+oysZMU1eTRasBHsB" + "NRWYfOSXggsyqRHfIAVieB4VSsB8WhZYm8UgYoLhAQfSJ5Xq5cwBOHkVj33MxAyP" + "c7Y5MIID/zCCAuegAwIBAgIRAOXEoBcV4gV3Z92gk5AuRgwwDQYJKoZIhvcNAQEF" + "BQAwZjEkMCIGA1UECgwbRmluYW5zaWVsbCBJRC1UZWtuaWsgQklEIEFCMR8wHQYD" + "VQQLDBZCYW5rSUQgTWVtYmVyIEJhbmtzIENBMR0wGwYDVQQDDBRCYW5rSUQgUm9v" + "dCBDQSBURVNUMjAeFw0wNzEwMDExMTQ1NDlaFw0yOTA4MDExMTU4MjVaMIGVMQsw" + "CQYDVQQGEwJTRTEzMDEGA1UECgwqTMOkbnNmw7Zyc8Oka3JpbmdhciBCYW5rIEFr" + "dGllYm9sYWcgKHB1YmwpMRUwEwYDVQQFEwwxMTExMTExMTExMTExOjA4BgNVBAMM" + "MUzDpG5zZsO2cnPDpGtyaW5nYXIgQmFuayBSb290IENBMSBmb3IgQmFua0lEIFRF" + "U1QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDBzn7IXIpyOGCCTuzL" + "DKE/T+pFRTgFh3QgKtifZ4zxdvB2Sd5+90vUEGcGExUhzpgb9gOUrT1eE0XhdiUR" + "YuYYpJI/nzPQWTsRtEaql7NHBPKnEauoA9oAhCT4pE5gLlqpTfkB8nAsRTI2XqpI" + "hQ7vTvnTRx20xog21NIbz1GztV8H1kBH2eDvRX7cXGiugp6CXV/le9cB+/4TBNUN" + "Xqupt79dM49KCoDuYr72W7Hv4BSWw3IInEN2m8T2X6UBpBGkCiGwLQy/+KOmYRK7" + "1PSFC0rXDwOJ0HJ/8fHwx6vLMxHAQ6s/9vOW10MjgjSQlbVqH/4Pa+TlpWumSV4E" + "l0z9AgMBAAGjeDB2MA8GA1UdEwEB/wQFMAMBAf8wEwYDVR0gBAwwCjAIBgYqhXA8" + "AQYwDgYDVR0PAQH/BAQDAgEGMB8GA1UdIwQYMBaAFJuTMPljHcYdrRO9sEi1amb4" + "tE3VMB0GA1UdDgQWBBSeSOnVuRBQ6uRGIuDGnHAB7YZAVjANBgkqhkiG9w0BAQUF" + "AAOCAQEArnW/9n+G+84JOgv1Wn4tsBBS7QgJp1rdCoiNrZPx2du/7Wz3wQVNKBjL" + "eMCyLjg0OVHuq4hpCv9MZpUqdcUW8gpp4dLDAAd1uE7xqVuG8g4Ir5qocxbZHQew" + "fnqSJJDlEZgDeZIzod92OO+htv0MWqKWbr3Mo2Hqhn+t0+UVWsW4k44e7rUw3xQq" + "r2VdMJv/C68BXUgqh3pplUDjWyXfreiACTT0q3HT6v6WaihKCa2WY9Kd1IkDcLHb" + "TZk8FqMmGn72SgJw3H5Dvu7AiZijjNAUulMnMpxBEKyFTU2xRBlZZVcp50VJ2F7+" + "siisxbcYOAX4GztLMlcyq921Ov/ipDCCA88wggK3oAMCAQICEQCmaX+5+m5bF5us" + "CtyMq41SMA0GCSqGSIb3DQEBBQUAMGYxJDAiBgNVBAoMG0ZpbmFuc2llbGwgSUQt" + "VGVrbmlrIEJJRCBBQjEfMB0GA1UECwwWQmFua0lEIE1lbWJlciBCYW5rcyBDQTEd" + "MBsGA1UEAwwUQmFua0lEIFJvb3QgQ0EgVEVTVDIwHhcNMDQwODEzMDcyMDEwWhcN" + "MjkwODEyMTIwMjQ2WjBmMSQwIgYDVQQKDBtGaW5hbnNpZWxsIElELVRla25payBC" + "SUQgQUIxHzAdBgNVBAsMFkJhbmtJRCBNZW1iZXIgQmFua3MgQ0ExHTAbBgNVBAMM" + "FEJhbmtJRCBSb290IENBIFRFU1QyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB" + "CgKCAQEA25D0f1gipbACk4Bg3t6ODUlCWOU0TWeTkzAHR7IRB5T++yvsVosedMMW" + "6KYYTbPONeJSt5kydX+wZi9nVNdlhkNULLbDKWfRY7x+B9MR1Q0Kq/e4VR0uRsak" + "Bv5iwEYZ7cSR63HfBaPTqQsGobq+wtGH5JeTBrmCt4A3kN1UWgX32Dv/I3m7v8bK" + "iwh4cnvAD9PIOtq6pOmAkSvLvp8jCy3qFLe9KAxm8M/ZAmnxYaRV8DVEg57FGoG6" + "oiG3Ixx8PSVVdzpFY4kuUFLi4ueMPwjnXFiBhhWJJeOtFG3Lc2aW3zvcDbD/MsDm" + "rSZNTmtbOOou8xuMKjlNY9PU5MHIaQIDAQABo3gwdjAPBgNVHRMBAf8EBTADAQH/" + "MBMGA1UdIAQMMAowCAYGKoVwPAEGMA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAW" + "gBSbkzD5Yx3GHa0TvbBItWpm+LRN1TAdBgNVHQ4EFgQUm5Mw+WMdxh2tE72wSLVq" + "Zvi0TdUwDQYJKoZIhvcNAQEFBQADggEBAIQ4ZBHWssA38pfNzH5A+H3SXpAlI8Jc" + "LuoMVOIwwbfd1Up0xopCs+Ay41v8FZtcTMFqCVTih2nzVusTgnFBPMPJ2cnTlRue" + "kAtVRNsiWn2/Ool/OXoYf5YnpgYu8t9jLCBCoDS5YJg714r9V9hCwfey8TCWBU80" + "vL7EIfjK13nUxf8d49GzZlFMNqGDMjfMp1FYrHBGLZBr8br/G/7em1Cprw7iR8cw" + "pddz+QXXFIrIz5Y9D/x1RrwoLibPw0kMrSwI2G4aCvoBySfbD6cpnJf6YHRctdSb" + "755zhdBW7XWTl6ReUVuEt0hTFms4F60kFAi5hIbDRSN1Slv5yP2b0EA="); private static byte[] invalidResp = Base64.decode( "MIIGggoAoIIGfDCCBngGCSsGAQUFBzABAQSCBmkwggZlMIHeoTQwMjELMAkG" + "A1UEBhMCVVMxDTALBgNVBAoMBGlXYXkxFDASBgNVBAMMC2lXYXkgT3BlbkNB" + "GA8yMDEyMDEyMzIxMjkxMVowbjBsMEQwCQYFKw4DAhoFAAQUPA5ymcOyHyZJ" + "d7DAidsEh79Uh6QEFMHnDLGSc/VElMBzr5f0+LQnpN2YAgsA5xIzv2Ln0dAa" + "94IAGA8yMDEyMDEyMzIxMjkxMVqgERgPMjAxMjAxMjMyMTM0MTFaoSUwIzAh" + "BgkrBgEFBQcwAQIEFCHEdgCz5w64KgppPIetaRzxewinMA0GCSqGSIb3DQEB" + "CwUAA4IBAQBsW8cXR4eOLgclY/uRodjso/5xkHIAiJy+DpgqELRrnzKe87HO" + "Km7DCicz1nwsPJskK14xtIw1rfQ8nzgztriComAUVc/pxJ9wQWGZI3d2dNbW" + "AmecKb/mG0QrJrt3U5D0+CFTUq5u7NOs1jZRe+df9TDLBr0vIA6a0I6K9M9F" + "ZOPWU/j5KVjoi0/kv4wnxRzQ2zc4Z3b5gm9T0MXMH5bST3z4yhOs/NRezNTA" + "fBQvimS60d4fybH0pXcVYUH81y5fm9rCpuwQ6rMt2vi0ZKrfyVom4OIAr/gh" + "Doj8Yh/LdtI1RvFkAL3pvzs06cfg3qM38b9Uh9w93w4/Hguw14eroIIEbDCC" + "BGgwggRkMIIDTKADAgECAgEBMA0GCSqGSIb3DQEBCwUAMDIxCzAJBgNVBAYT" + "AlVTMQ0wCwYDVQQKDARpV2F5MRQwEgYDVQQDDAtpV2F5IE9wZW5DQTAeFw0x" + "MjAxMjAxNTIyMjFaFw0zMjAxMTUxNTIyMjFaMDIxCzAJBgNVBAYTAlVTMQ0w" + "CwYDVQQKDARpV2F5MRQwEgYDVQQDDAtpV2F5IE9wZW5DQTCCASIwDQYJKoZI" + "hvcNAQEBBQADggEPADCCAQoCggEBALOnLWYPvGNLxodQQ16tqCKflpEQF2OA" + "0inZbIeUVxOgph5Qf562XV1Mtbv5Agv+z4/LSLbwuo28NTkhSlEEwf1k9vL9" + "/wFvpPZ4ecpqXOS6LJ6khmMh53IwK/QpG8CeF9UxTZskjQzD9XgnNGYd2BIj" + "qVbzU5qWhsPYPRrsAaE2jS6My5+xfiw46/Xj26VZQ/PR/rVURsc40fpCE30y" + "TyORQeeZfjb/LxXH3e/3wjya04MBACv+uX89n5YXG7OH6zTriMAOn/aiXPfE" + "E8g834RKvVS7ruELWG/IcZDC+Eoy2qtgG7y1rFlXd3H/6rny+Xd+BZrt0WP/" + "hfezklVw3asCAwEAAaOCAYMwggF/MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0P" + "BAQDAgEGMB0GA1UdDgQWBBTB5wyxknP1RJTAc6+X9Pi0J6TdmDAfBgNVHSME" + "GDAWgBTB5wyxknP1RJTAc6+X9Pi0J6TdmDAjBgNVHREEHDAagRhzdXBwb3J0" + "QGl3YXlzb2Z0d2FyZS5jb20wIwYDVR0SBBwwGoEYc3VwcG9ydEBpd2F5c29m" + "dHdhcmUuY29tMIGYBggrBgEFBQcBAQSBizCBiDA5BggrBgEFBQcwAoYtaHR0" + "cDovL2l3NTRjZW50LXZtMi9wa2kvcHViL2NhY2VydC9jYWNlcnQuY3J0MCUG" + "CCsGAQUFBzABhhlodHRwOi8vaXc1NGNlbnQtdm0yOjI1NjAvMCQGCCsGAQUF" + "BzAMhhhodHRwOi8vaXc1NGNlbnQtdm0yOjgzMC8wOgYDVR0fBDMwMTAvoC2g" + "K4YpaHR0cDovL2l3NTRjZW50LXZtMi9wa2kvcHViL2NybC9jYWNybC5jcmww" + "DQYJKoZIhvcNAQELBQADggEBAE9wBjQ1c+HAO2gIzT+J5Gqgrcu/m7t4hnHN" + "m5eyIfwXD1T6wOhovFmzPTaO9BSNsi4G5R7yZxOHeLN4PIY2kwFIbSkg7mwe" + "5aGp2RPIuK/MtzMZT6pq8uMGhzyHGsqtdkz7p26/G0anU2u59eimcvISdwNE" + "QXOIp/KNUC+Vx+Pmfw8PuFYDNacZ6YXp5qKoEjyUoBhNicmVINTNfDu0CQhu" + "pDr2UmDMDT2cdmTSRC0rcTe3BNzWqtsXNmIBFL1oB7B0PZbmFm8Bgvk1azxa" + "ClrcOKZWKOWa14XJy/DJk6nlOiq5W2AglUt8JVOpa5oVdiNRIT2WoGnpqVV9" + "tUeoWog="); private static final String BC = "BC"; public String getName() { return "OCSP"; } private void testECDSA() throws Exception { String signDN = "O=Bouncy Castle, C=AU"; KeyPair signKP = OCSPTestUtil.makeECKeyPair(); X509CertificateHolder testCert = new JcaX509CertificateHolder(OCSPTestUtil.makeECDSACertificate(signKP, signDN, signKP, signDN)); DigestCalculatorProvider digCalcProv = new JcaDigestCalculatorProviderBuilder().setProvider(BC).build(); String origDN = "CN=Eric H. Echidna, E=eric@bouncycastle.org, O=Bouncy Castle, C=AU"; GeneralName origName = new GeneralName(new X509Name(origDN)); // // general id value for our test issuer cert and a serial number. // CertificateID id = new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1)); // // basic request generation // OCSPReqBuilder gen = new OCSPReqBuilder(); gen.addRequest(id); OCSPReq req = gen.build(); if (req.isSigned()) { fail("signed but shouldn't be"); } X509CertificateHolder[] certs = req.getCerts(); if (certs.length != 0) { fail("0 certs expected, but not found"); } Req[] requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } // // request generation with signing // X509CertificateHolder[] chain = new X509CertificateHolder[1]; gen = new OCSPReqBuilder(); gen.setRequestorName(new GeneralName(GeneralName.directoryName, new X509Principal("CN=fred"))); gen.addRequest( new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1))); chain[0] = testCert; req = gen.build(new JcaContentSignerBuilder("SHA1withECDSA").setProvider(BC).build( signKP.getPrivate()), chain); if (!req.isSigned()) { fail("not signed but should be"); } if (!req.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("signature failed to verify"); } requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } certs = req.getCerts(); if (certs == null) { fail("null certs found"); } if (certs.length != 1 || !certs[0].equals(testCert)) { fail("incorrect certs found in request"); } // // encoding test // byte[] reqEnc = req.getEncoded(); OCSPReq newReq = new OCSPReq(reqEnc); if (!newReq.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("newReq signature failed to verify"); } // // request generation with signing and nonce // chain = new X509CertificateHolder[1]; gen = new OCSPReqBuilder(); Vector oids = new Vector(); Vector values = new Vector(); byte[] sampleNonce = new byte[16]; Random rand = new Random(); rand.nextBytes(sampleNonce); gen.setRequestorName(new GeneralName(GeneralName.directoryName, new X509Principal("CN=fred"))); ExtensionsGenerator extGen = new ExtensionsGenerator(); extGen.addExtension(OCSPObjectIdentifiers.id_pkix_ocsp_nonce, false, new DEROctetString(sampleNonce)); gen.setRequestExtensions(extGen.generate()); gen.addRequest( new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1))); chain[0] = testCert; req = gen.build(new JcaContentSignerBuilder("SHA1withECDSA").setProvider(BC).build(signKP.getPrivate()), chain); if (!req.isSigned()) { fail("not signed but should be"); } if (!req.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("signature failed to verify"); } // // extension check. // Set extOids = req.getCriticalExtensionOIDs(); if (extOids.size() != 0) { fail("wrong number of critical extensions in OCSP request."); } extOids = req.getNonCriticalExtensionOIDs(); if (extOids.size() != 1) { fail("wrong number of non-critical extensions in OCSP request."); } Extension extValue = req.getExtension(OCSPObjectIdentifiers.id_pkix_ocsp_nonce); ASN1Encodable extObj = extValue.getParsedValue(); if (!(extObj instanceof ASN1OctetString)) { fail("wrong extension type found."); } if (!areEqual(((ASN1OctetString)extObj).getOctets(), sampleNonce)) { fail("wrong extension value found."); } // // request list check // requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } // // response generation // BasicOCSPRespBuilder respGen = new JcaBasicOCSPRespBuilder(signKP.getPublic(), digCalcProv.get(RespID.HASH_SHA1)); respGen.addResponse(id, CertificateStatus.GOOD); BasicOCSPResp resp = respGen.build(new JcaContentSignerBuilder("SHA1withECDSA").setProvider(BC).build(signKP.getPrivate()), chain, new Date()); } private void testRSA() throws Exception { String signDN = "O=Bouncy Castle, C=AU"; KeyPair signKP = OCSPTestUtil.makeKeyPair(); X509CertificateHolder testCert = new JcaX509CertificateHolder(OCSPTestUtil.makeCertificate(signKP, signDN, signKP, signDN)); DigestCalculatorProvider digCalcProv = new JcaDigestCalculatorProviderBuilder().setProvider(BC).build(); String origDN = "CN=Eric H. Echidna, E=eric@bouncycastle.org, O=Bouncy Castle, C=AU"; GeneralName origName = new GeneralName(new X509Name(origDN)); // // general id value for our test issuer cert and a serial number. // CertificateID id = new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1)); // // basic request generation // OCSPReqBuilder gen = new OCSPReqBuilder(); gen.addRequest( new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1))); OCSPReq req = gen.build(); if (req.isSigned()) { fail("signed but shouldn't be"); } X509CertificateHolder[] certs = req.getCerts(); if (certs.length != 0) { fail("0 certs expected, but not found"); } Req[] requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } // // request generation with signing // X509CertificateHolder[] chain = new X509CertificateHolder[1]; gen = new OCSPReqBuilder(); gen.setRequestorName(new GeneralName(GeneralName.directoryName, new X509Principal("CN=fred"))); gen.addRequest( new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1))); chain[0] = testCert; req = gen.build(new JcaContentSignerBuilder("SHA1withRSA").setProvider(BC).build(signKP.getPrivate()), chain); if (!req.isSigned()) { fail("not signed but should be"); } if (!req.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("signature failed to verify"); } requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } certs = req.getCerts(); if (certs == null) { fail("null certs found"); } if (certs.length != 1 || !certs[0].equals(testCert)) { fail("incorrect certs found in request"); } // // encoding test // byte[] reqEnc = req.getEncoded(); OCSPReq newReq = new OCSPReq(reqEnc); if (!newReq.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("newReq signature failed to verify"); } // // request generation with signing and nonce // chain = new X509CertificateHolder[1]; gen = new OCSPReqBuilder(); byte[] sampleNonce = new byte[16]; Random rand = new Random(); rand.nextBytes(sampleNonce); gen.setRequestorName(new GeneralName(GeneralName.directoryName, new X509Principal("CN=fred"))); ExtensionsGenerator extGen = new ExtensionsGenerator(); extGen.addExtension(OCSPObjectIdentifiers.id_pkix_ocsp_nonce, false, new DEROctetString(sampleNonce)); gen.setRequestExtensions(extGen.generate()); gen.addRequest( new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1))); chain[0] = testCert; req = gen.build(new JcaContentSignerBuilder("SHA1withRSA").setProvider(BC).build(signKP.getPrivate()), chain); if (!req.isSigned()) { fail("not signed but should be"); } if (!req.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("signature failed to verify"); } // // extension check. // Set extOids = req.getCriticalExtensionOIDs(); if (extOids.size() != 0) { fail("wrong number of critical extensions in OCSP request."); } extOids = req.getNonCriticalExtensionOIDs(); if (extOids.size() != 1) { fail("wrong number of non-critical extensions in OCSP request."); } Extension ext = req.getExtension(OCSPObjectIdentifiers.id_pkix_ocsp_nonce); ASN1Encodable extObj = ext.getParsedValue(); if (!(extObj instanceof ASN1OctetString)) { fail("wrong extension type found."); } if (!areEqual(((ASN1OctetString)extObj).getOctets(), sampleNonce)) { fail("wrong extension value found."); } // // request list check // requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } // // response generation // BasicOCSPRespBuilder respGen = new JcaBasicOCSPRespBuilder(signKP.getPublic(), digCalcProv.get(RespID.HASH_SHA1)); respGen.addResponse(id, CertificateStatus.GOOD); BasicOCSPResp resp = respGen.build(new JcaContentSignerBuilder("SHA1withRSA").setProvider(BC).build(signKP.getPrivate()), chain, new Date()); OCSPRespBuilder rGen = new OCSPRespBuilder(); byte[] enc = rGen.build(OCSPRespBuilder.SUCCESSFUL, resp).getEncoded(); } private void testIrregularVersionReq() throws Exception { OCSPReq ocspRequest = new OCSPReq(irregReq); X509CertificateHolder cert = ocspRequest.getCerts()[0]; if (!ocspRequest.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(cert))) { fail("extra version encoding test failed"); } } public void testInvalidResp() throws Exception { try { OCSPResp response = new OCSPResp(invalidResp); } catch (CertIOException e) { if (e.getCause() instanceof ASN1Exception) { Throwable c = ((ASN1Exception)e.getCause()).getCause(); if (!c.getMessage().equals("ENUMERATED has zero length")) { fail("parsing failed, but for wrong reason: " + c.getMessage()); } } else { fail("parsing failed, but for wrong reason: " + e.getMessage()); } } } public void performTest() throws Exception { String signDN = "O=Bouncy Castle, C=AU"; KeyPair signKP = OCSPTestUtil.makeKeyPair(); X509CertificateHolder testCert = new JcaX509CertificateHolder(OCSPTestUtil.makeCertificate(signKP, signDN, signKP, signDN)); String origDN = "CN=Eric H. Echidna, E=eric@bouncycastle.org, O=Bouncy Castle, C=AU"; GeneralName origName = new GeneralName(new X509Name(origDN)); DigestCalculatorProvider digCalcProv = new JcaDigestCalculatorProviderBuilder().setProvider(BC).build(); // // general id value for our test issuer cert and a serial number. // CertificateID id = new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1)); // // basic request generation // OCSPReqBuilder gen = new OCSPReqBuilder(); gen.addRequest( new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1))); OCSPReq req = gen.build(); if (req.isSigned()) { fail("signed but shouldn't be"); } X509CertificateHolder[] certs = req.getCerts(); if (certs.length != 0) { fail("0 certs expected, but not found"); } Req[] requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } // // request generation with signing // X509CertificateHolder[] chain = new X509CertificateHolder[1]; gen = new OCSPReqBuilder(); gen.setRequestorName(new GeneralName(GeneralName.directoryName, new X509Principal("CN=fred"))); gen.addRequest( new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1))); chain[0] = testCert; req = gen.build(new JcaContentSignerBuilder("SHA1withRSA").setProvider(BC).build(signKP.getPrivate()), chain); if (!req.isSigned()) { fail("not signed but should be"); } if (!req.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("signature failed to verify"); } requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } certs = req.getCerts(); if (certs == null) { fail("null certs found"); } if (certs.length != 1 || !certs[0].equals(testCert)) { fail("incorrect certs found in request"); } // // encoding test // byte[] reqEnc = req.getEncoded(); OCSPReq newReq = new OCSPReq(reqEnc); if (!newReq.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("newReq signature failed to verify"); } // // request generation with signing and nonce // chain = new X509CertificateHolder[1]; gen = new OCSPReqBuilder(); Vector oids = new Vector(); Vector values = new Vector(); byte[] sampleNonce = new byte[16]; Random rand = new Random(); rand.nextBytes(sampleNonce); gen.setRequestorName(new GeneralName(GeneralName.directoryName, new X509Principal("CN=fred"))); ExtensionsGenerator extGen = new ExtensionsGenerator(); extGen.addExtension(OCSPObjectIdentifiers.id_pkix_ocsp_nonce, false, new DEROctetString(sampleNonce)); gen.setRequestExtensions(extGen.generate()); gen.addRequest( new CertificateID(digCalcProv.get(CertificateID.HASH_SHA1), testCert, BigInteger.valueOf(1))); chain[0] = testCert; req = gen.build(new JcaContentSignerBuilder("SHA1withRSA").setProvider(BC).build(signKP.getPrivate()), chain); if (!req.isSigned()) { fail("not signed but should be"); } if (!req.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(signKP.getPublic()))) { fail("signature failed to verify"); } // // extension check. // Set extOids = req.getCriticalExtensionOIDs(); if (extOids.size() != 0) { fail("wrong number of critical extensions in OCSP request."); } extOids = req.getNonCriticalExtensionOIDs(); if (extOids.size() != 1) { fail("wrong number of non-critical extensions in OCSP request."); } Extension ext = req.getExtension(OCSPObjectIdentifiers.id_pkix_ocsp_nonce); ASN1Encodable extObj = ext.getParsedValue(); if (!(extObj instanceof ASN1OctetString)) { fail("wrong extension type found."); } if (!areEqual(((ASN1OctetString)extObj).getOctets(), sampleNonce)) { fail("wrong extension value found."); } // // request list check // requests = req.getRequestList(); if (!requests[0].getCertID().equals(id)) { fail("Failed isFor test"); } // // response parsing - test 1 // OCSPResp response = new OCSPResp(testResp1); if (response.getStatus() != 0) { fail("response status not zero."); } BasicOCSPResp brep = (BasicOCSPResp)response.getResponseObject(); chain = brep.getCerts(); if (!brep.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(chain[0]))) { fail("response 1 failed to verify."); } // // test 2 // SingleResp[] singleResp = brep.getResponses(); response = new OCSPResp(testResp2); if (response.getStatus() != 0) { fail("response status not zero."); } brep = (BasicOCSPResp)response.getResponseObject(); chain = brep.getCerts(); if (!brep.isSignatureValid(new JcaContentVerifierProviderBuilder().setProvider(BC).build(chain[0]))) { fail("response 2 failed to verify."); } singleResp = brep.getResponses(); // // simple response generation // OCSPRespBuilder respGen = new OCSPRespBuilder(); OCSPResp resp = respGen.build(OCSPRespBuilder.SUCCESSFUL, response.getResponseObject()); if (!resp.getResponseObject().equals(response.getResponseObject())) { fail("response fails to match"); } testECDSA(); testRSA(); testIrregularVersionReq(); testInvalidResp(); // // Empty data test // try { response = new OCSPResp(new byte[0]); fail("no exception thrown"); } catch (IOException e) { if (!e.getMessage().equals("malformed response: no response data found")) { fail("wrong exception"); } } try { req = new OCSPReq(new byte[0]); fail("no exception thrown"); } catch (IOException e) { if (!e.getMessage().equals("malformed request: no request data found")) { fail("wrong exception"); } } } public static void main( String[] args) { Security.addProvider(new BouncyCastleProvider()); runTest(new OCSPTest()); } }
package com.jecapps.dumpMe; import java.io.*; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.ProtocolException; import java.net.URL; import java.text.ParseException; import java.util.ArrayList; import java.util.Map; import java.util.Scanner; import javax.json.*; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; import org.springframework.security.authentication.AnonymousAuthenticationToken; import org.springframework.security.core.Authentication; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.core.userdetails.UserDetails; import org.springframework.security.web.authentication.logout.SecurityContextLogoutHandler; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.servlet.ModelAndView; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; @Controller public class MainController { @RequestMapping(value = { "/" }, method = RequestMethod.GET) public ModelAndView defaultPage() { ModelAndView mav = new ModelAndView("redirect:/dashboard"); return mav; } @RequestMapping(value = "/dashboard", method = RequestMethod.GET) public ModelAndView dashboardPage() { ArrayList<JSONObject> addressesList = null; String[] result = null; try { URL url = new URL("https://yeg4tfkkal.execute-api.us-west-2.amazonaws.com/prod/address"); InputStream input = url.openStream(); addressesList = ReadJSON(input, "UTF-8"); } catch (IOException d) { System.out.println(d); } ArrayList<String> addresses = new ArrayList<String>(); ArrayList<String> deviceId = new ArrayList<String>(); for (JSONObject obj : addressesList) { JSONObject something = obj; for (JSONObject address : (ArrayList<JSONObject>) something.get("Items")) { if (address.get("deviceId").toString().toLowerCase().contains("web")) { addresses.add((String) address.get("streetAddress")); deviceId.add((String) address.get("deviceId")); } } } ModelAndView model = new ModelAndView(); model.addObject("addresses", addresses); model.addObject("devices", deviceId); model.setViewName("dashboard"); return model; } @RequestMapping(value = "/schedule", params = { "schedule" }, method = RequestMethod.POST) public ModelAndView schedule(@RequestParam("address") String address, @RequestParam("type") String type, @RequestParam(value = "schedule") String buttonType) { if (buttonType.equals("Schedule Pickup")) { try { String url = "https://yeg4tfkkal.execute-api.us-west-2.amazonaws.com/prod/request"; URL object = new URL(url); HttpURLConnection con = (HttpURLConnection) object.openConnection(); con.setDoOutput(true); con.setDoInput(true); con.setRequestProperty("Content-Type", "application/json"); con.setRequestProperty("Accept", "application/json"); con.setRequestMethod("POST"); JsonObject request = Json.createObjectBuilder().add("deviceId", address).add("deviceType", type) .add("status", "requested").build(); OutputStreamWriter wr = new OutputStreamWriter(con.getOutputStream()); wr.write(request.toString()); wr.flush(); // display what returns the POST request StringBuilder sb = new StringBuilder(); int HttpResult = con.getResponseCode(); if (HttpResult == HttpURLConnection.HTTP_OK) { BufferedReader br = new BufferedReader(new InputStreamReader(con.getInputStream(), "utf-8")); String line = null; while ((line = br.readLine()) != null) { sb.append(line + "\n"); } br.close(); System.out.println("" + sb.toString()); } else { System.out.println(con.getResponseMessage()); } } catch (IOException ex) { System.out.println(ex); } ModelAndView model = new ModelAndView(); model.addObject("address", address); model.addObject("type", type); model.setViewName("scheduleResp"); return model; } else if (buttonType.equals("Get History")) { ArrayList<JSONObject> addressesList = null; ArrayList<JSONObject> scheduleHistory = null; String[] result = null; try { URL url = new URL("https://yeg4tfkkal.execute-api.us-west-2.amazonaws.com/prod/address"); InputStream input = url.openStream(); addressesList = ReadJSON(input, "UTF-8"); } catch (IOException d) { System.out.println(d); } ArrayList<String> deviceIds = new ArrayList<String>(); for (JSONObject obj : addressesList) { JSONObject something = obj; System.out.println(address); for (JSONObject address1 : (ArrayList<JSONObject>) something.get("Items")) { if (address1.get("streetAddress").toString().contains(address)) { deviceIds.add((String) address1.get("deviceId")); System.out.println(address1.get("deviceId")); } else System.out.println(address1.get("streetAddress")); } } ArrayList<String> timeHist = new ArrayList<String>(); ArrayList<String> typeHist = new ArrayList<String>(); for (String ID : deviceIds) { try { URL url = new URL("https://yeg4tfkkal.execute-api.us-west-2.amazonaws.com/history/" + ID); InputStream input = url.openStream(); scheduleHistory = ReadJSON(input, "UTF-8"); for (JSONObject obj : scheduleHistory) { JSONObject something = obj; for (JSONObject ids : (ArrayList<JSONObject>) something.get("Items")) { timeHist.add((String) ids.get("timestamp")); typeHist.add((String) ids.get("deviceType")); } } } catch (IOException d) { System.out.println(d); } } ModelAndView model = new ModelAndView(); model.addObject("time", timeHist); model.addObject("type", typeHist); model.setViewName("scheduleHist"); return model; } else throw new IllegalArgumentException("Need either approve or deny!"); } // for 403 access denied page @RequestMapping(value = "/403", method = RequestMethod.GET) public ModelAndView accesssDenied() { ModelAndView model = new ModelAndView(); // check if user is login Authentication auth = SecurityContextHolder.getContext().getAuthentication(); if (!(auth instanceof AnonymousAuthenticationToken)) { UserDetails userDetail = (UserDetails) auth.getPrincipal(); model.addObject("username", userDetail.getUsername()); } model.setViewName("403"); return model; } public static synchronized ArrayList<JSONObject> ReadJSON(InputStream input, String Encoding) { Scanner scn = new Scanner(input, Encoding); ArrayList<JSONObject> json = new ArrayList<JSONObject>(); // Reading and Parsing Strings to Json while (scn.hasNext()) { JSONObject obj = null; try { obj = (JSONObject) new JSONParser().parse(scn.nextLine()); } catch (org.json.simple.parser.ParseException e) { // TODO Auto-generated catch block e.printStackTrace(); } json.add(obj); } // Here Printing Json Objects return json; } }
package net.ripe.db.whois.api.mail.dequeue; import com.google.common.base.Charsets; import net.ripe.db.whois.api.MimeMessageProvider; import net.ripe.db.whois.api.mail.MailMessage; import net.ripe.db.whois.common.Message; import net.ripe.db.whois.update.domain.*; import net.ripe.db.whois.update.log.LoggerContext; import org.apache.commons.lang.StringUtils; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import javax.mail.MessagingException; import javax.mail.internet.ContentType; import javax.mail.internet.MimeMessage; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.List; import static junit.framework.Assert.assertTrue; import static org.hamcrest.Matchers.*; import static org.junit.Assert.assertThat; import static org.mockito.Matchers.any; import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) public class MessageParserTest { @Mock MimeMessage mimeMessage; @Mock UpdateContext updateContext; @Mock LoggerContext loggerContext; @InjectMocks MessageParser subject; @Before public void setUp() throws Exception { when(mimeMessage.getContentType()).thenReturn("text/plain"); when(mimeMessage.getContent()).thenReturn("1234"); } @Test public void parseKeywords_validOnes() throws Exception { final Keyword[] validKeywords = new Keyword[]{Keyword.HELP, Keyword.HOWTO, Keyword.NEW, Keyword.NONE}; for (Keyword keyword : validKeywords) { final String keywordKeyword = keyword.getKeyword(); if (keywordKeyword == null) { continue; } when(mimeMessage.getSubject()).thenReturn(keywordKeyword); final MailMessage message = subject.parse(mimeMessage, updateContext); assertThat(keyword.toString(), message.getKeyword(), is(keyword)); verify(updateContext, never()).addGlobalMessage(any(Message.class)); } } @Test public void parseKeywords_diff() throws Exception { final Keyword keyword = Keyword.DIFF; when(mimeMessage.getSubject()).thenReturn(keyword.getKeyword()); final MailMessage message = subject.parse(mimeMessage, updateContext); assertThat(keyword.toString(), message.getKeyword(), is(keyword)); verify(updateContext, never()).addGlobalMessage(any(Message.class)); } @Test public void parse_set_delivery_date() throws Exception { MimeMessage simpleTextUnsignedMessage = MimeMessageProvider.getMessageSimpleTextUnsigned(); final MailMessage result = subject.parse(simpleTextUnsignedMessage, updateContext); assertThat(result.getDate(), is("Mon, 28 May 2012 00:04:45 +0200")); } @Test public void parse_set_default_date() throws Exception { when(mimeMessage.getSubject()).thenReturn("NEW"); final MailMessage message = subject.parse(mimeMessage, updateContext); assertThat(message.getDate().length(), not(is(0))); final String timezone = DateTimeFormat.forPattern("zzz").print(new DateTime()); assertThat(message.getDate(), containsString(timezone)); final String year = DateTimeFormat.forPattern("yyyy").print(new DateTime()); assertThat(message.getDate(), containsString(year)); } @Test public void parseKeywords_mixedCaps() throws Exception { when(mimeMessage.getSubject()).thenReturn("nEw"); final MailMessage message = subject.parse(mimeMessage, updateContext); assertThat(message.getKeyword(), is(Keyword.NEW)); verify(updateContext, never()).addGlobalMessage(any(Message.class)); } @Test public void parseSending_stringWithKeyword() throws Exception { final String keywordString = "sending my new objects"; when(mimeMessage.getSubject()).thenReturn(keywordString); final MailMessage message = subject.parse(mimeMessage, updateContext); assertThat(message.getKeyword(), is(Keyword.NONE)); verify(updateContext, times(1)).addGlobalMessage(UpdateMessages.invalidKeywordsFound(keywordString)); verify(updateContext, times(1)).addGlobalMessage(UpdateMessages.allKeywordsIgnored()); } @Test public void parseKeywords_keyWords() throws Exception { final String keywordString = "KEYWORDS:"; when(mimeMessage.getSubject()).thenReturn(keywordString); final MailMessage message = subject.parse(mimeMessage, updateContext); assertThat(message.getKeyword(), is(Keyword.NONE)); verify(updateContext, times(1)).addGlobalMessage(UpdateMessages.invalidKeywordsFound(keywordString)); verify(updateContext, times(1)).addGlobalMessage(UpdateMessages.allKeywordsIgnored()); } @Test public void parseKeywords_keyWordsAndNew() throws Exception { final String keywordString = "KEYWORDS: new"; when(mimeMessage.getSubject()).thenReturn(keywordString); final MailMessage message = subject.parse(mimeMessage, updateContext); assertThat(message.getKeyword(), is(Keyword.NONE)); verify(updateContext, times(1)).addGlobalMessage(UpdateMessages.invalidKeywordsFound(keywordString)); verify(updateContext, times(1)).addGlobalMessage(UpdateMessages.allKeywordsIgnored()); } @Test public void parseKeywords_twoValidKeywords() throws Exception { final String keywordString = "new help"; when(mimeMessage.getSubject()).thenReturn(keywordString); final MailMessage message = subject.parse(mimeMessage, updateContext); assertThat(message.getKeyword(), is(Keyword.NONE)); assertThat(message.getContentWithCredentials(), hasSize(1)); final ContentWithCredentials contentWithCredentials = message.getContentWithCredentials().get(0); assertThat(contentWithCredentials.getContent(), containsString("1234")); assertThat(contentWithCredentials.getCredentials(), hasSize(0)); assertThat(message.getUpdateMessage(), is("1234")); verify(updateContext, times(1)).addGlobalMessage(UpdateMessages.invalidKeywordsFound(keywordString)); verify(updateContext, times(1)).addGlobalMessage(UpdateMessages.allKeywordsIgnored()); } @Test public void parse_invalid_reply_to() throws Exception { MimeMessage messageWithInvalidReplyTo = new MimeMessage(null, new ByteArrayInputStream("Reply-To: <respondera: ventas@amusing.cl>".getBytes())); MailMessage result = subject.parse(messageWithInvalidReplyTo, updateContext); assertTrue(StringUtils.isBlank(result.getReplyTo())); } @Test public void parse_missing_reply_to() throws Exception { MimeMessage messageWithoutReplyTo = new MimeMessage(null, new ByteArrayInputStream("From: minimal@mailclient.org".getBytes())); MailMessage result = subject.parse(messageWithoutReplyTo, updateContext); assertThat(result.getReplyTo(), is("minimal@mailclient.org")); assertThat(result.getFrom(), is("minimal@mailclient.org")); } @Test public void parse_plain_text_unsigned_message() throws Exception { final MailMessage message = subject.parse(MimeMessageProvider.getMessageSimpleTextUnsigned(), updateContext); assertThat(message.getSubject(), is("")); assertThat(message.getFrom(), is("\"foo@test.de\" <bitbucket@ripe.net>")); assertThat(message.getId(), is("<20120527220444.GA6565@XXXsource.test.de>")); assertThat(message.getReplyTo(), is("\"foo@test.de\" <bitbucket@ripe.net>")); assertThat(message.getKeyword(), is(Keyword.NONE)); final String expectedValue = "" + "inetnum: 109.69.68.0 - 109.69.68.7\n" + "netname: delete\n" + "descr: Description\n" + "country: DE\n" + "admin-c: T1-RIPE\n" + "tech-c: T1-RIPE\n" + "status: ASSIGNED PA\n" + "mnt-by: TEST-MNT\n" + "password: password\n" + "changed: changed@ripe.net 20120528\n" + "source: RIPE\n" + "\n" + "inetnum: 109.69.68.0 - 109.69.68.7\n" + "netname: delete\n" + "descr: Description\n" + "country: DE\n" + "admin-c: T1-RIPE\n" + "tech-c: T1-RIPE\n" + "status: ASSIGNED PA\n" + "mnt-by: TEST-MNT\n" + "password: password\n" + "changed: changed@ripe.net 20120528\n" + "source: RIPE\n" + "delete: new subnet size\n"; assertThat(message.getUpdateMessage(), is(expectedValue)); assertThat(message.getContentWithCredentials(), hasSize(1)); final ContentWithCredentials contentWithCredentials = message.getContentWithCredentials().get(0); assertThat(contentWithCredentials.getContent(), containsString(expectedValue)); assertThat(contentWithCredentials.getCredentials(), hasSize(0)); } @Test public void parse_text_html_message() throws Exception { final MailMessage message = subject.parse(MimeMessageProvider.getUpdateMessage("simpleHtmlTextUnsigned.mail"), updateContext); assertThat(message.getId(), is("<20120508173357.B3369481C7@ip-10-251-81-156.ec2.internal>")); assertThat(message.getReplyTo(), is("bitbucket@ripe.net")); assertThat(message.getKeyword(), is(Keyword.NONE)); assertThat(message.getContentWithCredentials(), hasSize(0)); } @Test public void parse_plain_text_utf8_encoded_message() throws Exception { final MailMessage message = subject.parse(MimeMessageProvider.getUpdateMessage("simplePlainTextUtf8Encoded.mail"), updateContext); assertThat(message.getId(), is("<20120824155310.258DB8F65CA@mx2.bogus.com>")); assertThat(message.getReplyTo(), is("test@foo.com")); assertThat(message.getKeyword(), is(Keyword.NONE)); final String expectedValue = "" + "inetnum: 10.0.0.0 - 10.0.0.255\n" + "status:ASSIGNED PI\n" + "mnt-by:T2-MNT\n" + "mnt-by:TEST-DBM-MNT\n" + "tech-c:VB1-TEST\n" + "tech-c:AA1-TEST\n" + "notify:test@foo.com\n" + "netname:TEST-NETWORK\n" + "org:ORG-VBO1-TEST\n" + "admin-c:VB1-TEST\n" + "admin-c:AA2-TEST\n" + "password:password\n" + "descr:\u042E\u043D\u0438\u043A\u043E\u0434\u043D\u043E\u0435 \u043E\u043F\u0438\u0441\u0430\u043D\u0438\u0435\n" + "country:US\n" + "country:BY\n" + "country:RU\n" + "changed:test@foo.com 20120824\n" + "source:TEST"; assertThat(message.getUpdateMessage(), is(expectedValue)); assertThat(message.getContentWithCredentials(), hasSize(1)); final ContentWithCredentials contentWithCredentials = message.getContentWithCredentials().get(0); assertThat(contentWithCredentials.getContent(), containsString("inetnum: 10.0.0.0 - 10.0.0.255")); assertThat(contentWithCredentials.getCredentials(), hasSize(0)); } @Test public void parse_plain_text_inline_pgp_signature() throws Exception { final MailMessage message = subject.parse(MimeMessageProvider.getUpdateMessage("inlinePgpSigned.mail"), updateContext); assertThat(message.getId(), is("<ABC593474ADCF6EC3E43BC80@XXX[10.41.147.77]>")); assertThat(message.getReplyTo(), is("John Doe <bitbucket@ripe.net>")); assertThat(message.getKeyword(), is(Keyword.NONE)); final String expectedValue = "" + "-----BEGIN PGP SIGNED MESSAGE-----\n" + "Hash: SHA1\n" + "\n" + "route: 194.39.132.0/24\n" + "descr: Description\n" + "origin: AS12510\n" + "notify: foo@bar.com\n" + "mnt-by: BOGUS-MNT\n" + "mnt-by: T8-MNT\n" + "changed: nm@bogus.com 20120504\n" + "changed: nm@bogus.com 20120529\n" + "source: RIPE\n" + "delete: no longer required for AS12510\n" + "-----BEGIN PGP SIGNATURE-----\n" + "Version: GnuPG v1.4.9 (SunOS)\n" + "\n" + "iEYEARECAAYFAk/FbSMACgkQsAWoDcAb7KJmJgCfe2PjxUFIeHycZ85jteosU1ez\n" + "kL0An3ypg8F75jlPyTYIUuiCQEcP/9sz\n" + "=j7tD\n" + "-----END PGP SIGNATURE-----"; assertThat(message.getUpdateMessage(), is(expectedValue)); assertThat(message.getContentWithCredentials(), hasSize(1)); final ContentWithCredentials contentWithCredentials = message.getContentWithCredentials().get(0); assertThat(contentWithCredentials.getContent(), containsString(expectedValue)); assertThat(contentWithCredentials.getCredentials(), hasSize(0)); } @Test public void parse_multipart_text_plain_detached_pgp_signature() throws Exception { final MailMessage message = subject.parse(MimeMessageProvider.getMessageMultipartPgpSigned(), updateContext); assertThat(message.getId(), is("<0C4C4196-55E6-4E8B-BE54-F8A92DEBD1A0@ripe.net>")); assertThat(message.getReplyTo(), is("User <user@ripe.net>")); assertThat(message.getKeyword(), is(Keyword.NEW)); final String expectedValue = "" + "key-cert: AUTO-1\n" + "method: X509\n" + "owner: /CN=4a96eecf-9d1c-4e12-8add-5ea5522976d8\n" + "fingerpr: 82:7C:C5:40:D1:DB:AE:6A:FA:F8:40:3E:3C:9C:27:7C\n" + "certif: -----BEGIN CERTIFICATE-----\n" + "certif: -----END CERTIFICATE-----\n" + "mnt-by: TEST-DBM-MNT\n" + "remarks: remark\n" + "changed: noreply@ripe.net 20121001\n" + "source: TEST"; assertThat(message.getUpdateMessage(), is(expectedValue)); List<ContentWithCredentials> contentWithCredentials = message.getContentWithCredentials(); assertThat(contentWithCredentials, hasSize(1)); assertThat(contentWithCredentials.get(0).getContent(), is(expectedValue)); assertTrue(contentWithCredentials.get(0).getCredentials().get(0) instanceof PgpCredential); } @Test public void parse_multipart_alternative_unsigned() throws Exception { final MailMessage message = subject.parse(MimeMessageProvider.getUpdateMessage("multipartAlternativeUnsigned.mail"), updateContext); final String expectedValue = "" + "password: password\npassword: password\n\n" + "as-set: AS198792:AS-TEST\n" + "descr: Description\n" + "tech-c: P1-RIPE\n" + "admin-c: P1-RIPE\n" + "mnt-by: TEST-MNT\n" + "mnt-by: TEST-MNT\n" + "changed: test@bogus.fr 20120531\n" + "source: RIPE\n\n\n" + "as-set: AS1:AS-FOO-BOGUS\n" + "descr: Description\n" + "tech-c: P1-RIPE\n" + "admin-c: P1-RIPE\n" + "mnt-by: TEST-MNT\n" + "mnt-by: TEST-MNT\n" + "changed: test@bogus.fr 20120531\n" + "source: RIPE\n"; assertThat(message.getUpdateMessage(), is(expectedValue)); assertThat(message.getId(), is("<CACt+-2p6YfJAgtZLYSf=stuVV7M+6gBvbDLKWdBzXVC+vmxkbg@XXXmail.gmail.com>")); assertThat(message.getReplyTo(), is("John Doe <john@doe.com>")); assertThat(message.getKeyword(), is(Keyword.NONE)); assertThat(message.getContentWithCredentials(), hasSize(1)); final ContentWithCredentials contentWithCredentials = message.getContentWithCredentials().get(0); assertThat(contentWithCredentials.getContent(), containsString(expectedValue)); assertThat(contentWithCredentials.getCredentials(), hasSize(0)); } @Test public void parse_multipart_alternative_inline_pgp_signature() throws Exception { final MailMessage message = subject.parse(MimeMessageProvider.getMessageMultipartAlternativePgpSigned(), updateContext); final String expectedValue = "" + "-----BEGIN PGP SIGNED MESSAGE-----\n" + "\n" + "route: 165.251.168.0/22\n" + "descr: Description\n" + "origin: AS1\n" + "notify: ripe-admin@foo.net\n" + "mnt-by: TEST-MNT\n" + "changed: john@doe.net 20120530\n" + "source: RIPE\n" + "\n" + "-----BEGIN PGP SIGNATURE-----\n" + "Version: PGP 6.5.8\n" + "\n" + "iQCVAwUBT8amRPVKpQwG/7ZlAQFM0gP+N3d2N2IivRzte0o6bvU3nqN84yGC4l3r\n" + "zeZKi7dvU3R2betF8IElvL4x/bpBPAHXQWO+QaYMg3Yz6HCBKLJwMFgyWbmcJtD0\n" + "zL1HUOJmGyNv/eFjNSMgfpeZEsPZ3R+Pz9gSjEAW5aAj1wLdpXvVK9rYOQPc3TVc\n" + "z2xhfX4BqpQ=\n" + "=ODtr\n" + "-----END PGP SIGNATURE-----"; assertThat(message.getUpdateMessage(), is(expectedValue)); assertThat(message.getId(), is("<D8447F003C9F4CA891CFA626760EBE8E@XXXRaysOfficePC>")); assertThat(message.getReplyTo(), is("John Doe <john@doe.net>")); assertThat(message.getKeyword(), is(Keyword.NONE)); assertThat(message.getContentWithCredentials(), hasSize(1)); final ContentWithCredentials contentWithCredentials = message.getContentWithCredentials().get(0); assertThat(contentWithCredentials.getContent(), containsString(expectedValue)); assertThat(contentWithCredentials.getCredentials(), hasSize(0)); } @Test public void parse_multipart_alternative_detached_pgp_signature() throws Exception { final MimeMessage message = getMessage("" + "From: noreply@ripe.net\n" + "Content-Type: multipart/signed;\n" + "\tboundary=\"Apple-Mail=_8CAC1D90-3ABC-4010-9219-07F34D68A205\";\n" + "\tprotocol=\"application/pgp-signature\";\n" + "\tmicalg=pgp-sha1\n" + "Subject: NEW\n" + "Date: Wed, 2 Jan 2013 16:53:25 +0100\n" + "Message-Id: <220284EA-D739-4453-BBD2-807C87666F23@ripe.net>\n" + "To: test-dbm@ripe.net\n" + "Mime-Version: 1.0 (Apple Message framework v1283)\n" + "\n" + "\n" + "--Apple-Mail=_8CAC1D90-3ABC-4010-9219-07F34D68A205\n" + "Content-Type: multipart/alternative;\n" + "\tboundary=\"Apple-Mail=_40C18EAF-8C7D-479F-9001-D91F1181EEDA\"\n" + "\n" + "\n" + "--Apple-Mail=_40C18EAF-8C7D-479F-9001-D91F1181EEDA\n" + "Content-Transfer-Encoding: 7bit\n" + "Content-Type: text/plain;\n" + "\tcharset=us-ascii\n" + "\n" + "person: First Person\n" + "address: St James Street\n" + "address: Burnley\n" + "address: UK\n" + "phone: +44 282 420469\n" + "nic-hdl: FP1-TEST\n" + "mnt-by: OWNER-MNT\n" + "changed: denis@ripe.net 20121016\n" + "source: TEST\n" + "\n" + "\n" + "--Apple-Mail=_40C18EAF-8C7D-479F-9001-D91F1181EEDA\n" + "Content-Transfer-Encoding: 7bit\n" + "Content-Type: text/html;\n" + "\tcharset=us-ascii\n" + "\n" + "<html><head></head><body style=\"word-wrap: break-word; -webkit-nbsp-mode: space; -webkit-line-break: after-white-space; \"><div style=\"font-size: 13px; \"><b>person: &nbsp;First Person</b></div><div style=\"font-size: 13px; \"><b>address: St James Street</b></div><div style=\"font-size: 13px; \"><b>address: Burnley</b></div><div style=\"font-size: 13px; \"><b>address: UK</b></div><div style=\"font-size: 13px; \"><b>phone: &nbsp; +44 282 420469</b></div><div style=\"font-size: 13px; \"><b>nic-hdl: FP1-TEST</b></div><div style=\"font-size: 13px; \"><b>mnt-by: &nbsp;OWNER-MNT</b></div><div style=\"font-size: 13px; \"><b>changed: <a href=\"mailto:denis@ripe.net\">denis@ripe.net</a> 20121016</b></div><div style=\"font-size: 13px; \"><b>source: &nbsp;TEST</b></div><div><br></div></body></html>\n" + "--Apple-Mail=_40C18EAF-8C7D-479F-9001-D91F1181EEDA--\n" + "\n" + "--Apple-Mail=_8CAC1D90-3ABC-4010-9219-07F34D68A205\n" + "Content-Transfer-Encoding: 7bit\n" + "Content-Disposition: attachment;\n" + "\tfilename=signature.asc\n" + "Content-Type: application/pgp-signature;\n" + "\tname=signature.asc\n" + "Content-Description: Message signed with OpenPGP using GPGMail\n" + "\n" + "-----BEGIN PGP SIGNATURE-----\n" + "Version: GnuPG v1.4.12 (Darwin)\n" + "\n" + "iQEcBAEBAgAGBQJQ5Ff1AAoJEO6ZHuIo9s1sQxoIAJYdnvbYjCwRyKgz7sB6/Lmh\n" + "Ca7A9FrKuRFXHH2IUM6FIlC8hvFpAlXfkSWtJ03PL4If3od0jL9pwge8hov75+nL\n" + "FnhCG2ktb6CfzjoaeumTvzbt5oSbq2itgvaQ15V6Rpb2LIh7yfAcoJ7UgK5X1XEI\n" + "OhZvuGy9M49unziI3oF0WwHl4b2bAt/r7/7DNgxlT00pMFqrcI3n00TXEAJphpzH\n" + "7Ym5+7PYvTtanxb5x8pMCmgtsKgF5RoHQv4ZBaSS0z00WVivk3cuCugziyTrwI2+\n" + "4IkFu75GfD+xKAldd2of09SrFEaOJfXNslq+BZoqc3hGOV+b7vpNARp0s7zsq4E=\n" + "=O7qu\n" + "-----END PGP SIGNATURE-----\n" + "\n" + "--Apple-Mail=_8CAC1D90-3ABC-4010-9219-07F34D68A205--"); final MailMessage mailMessage = subject.parse(message, updateContext); assertThat(mailMessage.getContentWithCredentials(), hasSize(1)); final ContentWithCredentials contentWithCredentials = mailMessage.getContentWithCredentials().get(0); assertThat(contentWithCredentials.getCredentials(), hasSize(1)); assertTrue(contentWithCredentials.getCredentials().get(0) instanceof PgpCredential); assertThat(contentWithCredentials.getContent(), is("" + "person: First Person\n" + "address: St James Street\n" + "address: Burnley\n" + "address: UK\n" + "phone: +44 282 420469\n" + "nic-hdl: FP1-TEST\n" + "mnt-by: OWNER-MNT\n" + "changed: denis@ripe.net 20121016\n" + "source: TEST\n\n")); } @Test public void parse_smime_multipart_text_plain() throws Exception { final MimeMessage message = getMessage("" + "From: <Registration.Ripe@company.com>\n" + "To: <auto-dbm@ripe.net>\n" + "Subject: Bogus\n" + "Date: Mon, 20 Aug 2012 10:52:39 +0000\n" + "Message-ID: <3723299.113919.1345459955655.JavaMail.trustmail@ss000807>\n" + "Accept-Language: de-CH, en-US\n" + "Content-Language: de-DE\n" + "Content-Type: multipart/signed; protocol=\"application/pkcs7-signature\"; micalg=sha1; \n" + "\tboundary=\"----=_Part_113918_874669.1345459955655\"\n" + "MIME-Version: 1.0\n" + "\n" + "------=_Part_113918_874669.1345459955655\n" + "Content-Type: text/plain; charset=\"iso-8859-1\"\n" + "Content-Transfer-Encoding: 7bit\n" + "Content-Language: de-DE\n" + "\n" + "inetnum: 217.193.127.137 - 217.193.127.137\n" + "netname: NET-NAME\n" + "descr: Bogus\n" + "descr: Address\n" + "country: CH\n" + "admin-c: TEST-RIPE\n" + "tech-c: TEST-RIPE\n" + "status: ASSIGNED PA\n" + "mnt-by: TEST-MNT\n" + "changed: email@foobar.net 20120312\n" + "source: RIPE\n" + "\n" + "changed: email@foobar.net\n" + "\n" + "\n" + "\n" + "\n" + "------=_Part_113918_874669.1345459955655\n" + "Content-Type: application/pkcs7-signature; name=smime.p7s; smime-type=signed-data\n" + "Content-Transfer-Encoding: base64\n" + "Content-Disposition: attachment; filename=\"smime.p7s\"\n" + "Content-Description: S/MIME Cryptographic Signature\n" + "\n" + "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAQAAoIIQ/TCCBVkw\n" + "ggNBoAMCAQICECYdlHUPbJ2C1O/M47kPYTowDQYJKoZIhvcNAQEFBQAwZDELMAkGA1UEBhMCY2gx\n" + "ETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNlcnZpY2Vz\n" + "MRswGQYDVQQDExJTd2lzc2NvbSBSb290IENBIDEwHhcNMDYwMjIzMDk1MzEyWhcNMTYwMjIzMDk1\n" + "MzEyWjBlMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwg\n" + "Q2VydGlmaWNhdGUgU2VydmljZXMxHDAaBgNVBAMTE1N3aXNzY29tIFJ1YmluIENBIDEwggEiMA0G\n" + "CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGWTjNZ38DiwRxLNBue9D2Bn85oUpE1lIpRjp2qm3P\n" + "BE+K9/smk2OvKBJ9PlLsROK4t3Hsc7De+BkG/f3eyyR9+4Acvx36nv4/74rS3+GNTGIFBIhcpjyY\n" + "s7w+TJNHAG7Pdok7hLUst6PwCU16YNcRdUIbDBWknzVs6hqsyYJRlzzYGEs9UuwvpQJUXE/gCxc0\n" + "x5zUyCFtZZjuQ32v+8ID7KWNvdp8HlOv8EEC/XrR/NoFq4CX5fHaJQf7YwMbkeM3/55za0cD8rDt\n" + "S4fRekIfeRJM5W+5vCEXzjJ1e+UoBrLzYxjZlgwEA/UXut1ulQHlKB88qDTJ+lLVg1JuAUGTAgMB\n" + "AAGjggEEMIIBADBABggrBgEFBQcBAQQ0MDIwMAYIKwYBBQUHMAKGJGh0dHA6Ly93d3cuc3dpc3Nk\n" + "aWdpY2VydC5jaC9kb3dubG9hZDASBgNVHRMBAf8ECDAGAQH/AgEAMBMGA1UdIAQMMAowCAYGYIV0\n" + "AVMEMEMGA1UdHwQ8MDowOKA2oDSGMmh0dHA6Ly93d3cuc3dpc3NkaWdpY2VydC5jaC9kb3dubG9h\n" + "ZC9zZGNzLXJvb3QuY3JsMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULcKno2M+P4NHq0gzNoGF\n" + "99TprMAwHwYDVR0jBBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wDQYJKoZIhvcNAQEFBQADggIB\n" + "ALXafISxFtQcm29o2r5cMUTRoUroEcdITEFjKa8ZQZf63JpHZYWQrziklNcvJOt5TmWtUsUc9KQK\n" + "y8AQN6m4AJR+Ol/7U8+3IFaqyRt9Bp38iaPv3AZSHU4Uf83UhW6zI/SlvXZxuKMkFP+l08eJCIFo\n" + "HHdHVUi1fzN9B15s3qFnSlCsNHWB3A5j81P8bKc7QdpIj4pUte6QTZd4ESchKEiUNzzqawVfVCW8\n" + "Btmq5xMv2r1cn34pBAjBGF/p3AE30DH5he4Yqn3xhnvBMOhHGbkoeaR5XXQKpvMmFSYo9eH2Yj6h\n" + "mQ/1R27eqdjaR9eGjbqJCz5HU2kiSgceI2YZgafoFjhlrPuq/7V4ZVKJbf7/eKqRWVuUh7/6wUsJ\n" + "lqiXtEIjM0R7hD0kjQzvOCAthV3byvB/V5VIzEViKRp0DQbNRGhog9nGP13xE4FF6xTMxFP7urip\n" + "elCDWc9Yc/pp50Vi7f0isynOC3BRjM0WXrHJumu83zDxzxNYUV12XIg6CL/SasRJpEheTsc9LcBU\n" + "3pxcsVQfxAl4SQG6nbdLUc8EwpCZs0fGxnRSstJ6394tIk8VqI1Dr/B8/a709t2rOptea4MgpEo7\n" + "BikJvbyRP8DPobdmQu4R66QOFdhlfZUWvxTZJvv0wXaKycYyNO8e3lpAnU2gQ4WJjnG9I9zynDLr\n" + "MIIFvzCCBKegAwIBAgIQGm9r2E0WLw04jBcdooo7hTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG\n" + "EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy\n" + "dmljZXMxHDAaBgNVBAMTE1N3aXNzY29tIFJ1YmluIENBIDEwHhcNMTEwODEyMTI1NDUyWhcNMTQw\n" + "ODEyMTI1NDUyWjCBpTENMAsGA1UEBwwEQmVybjELMAkGA1UECAwCQkUxDTALBgNVBAsMBFNDSVMx\n" + "IDAeBgNVBAoMF1N3aXNzY29tIElUIFNlcnZpY2VzIEFHMQswCQYDVQQGEwJDSDEaMBgGA1UEAwwR\n" + "UmVnaXN0cmF0aW9uIFJpcGUxLTArBgkqhkiG9w0BCQEWHnJlZ2lzdHJhdGlvbi5yaXBlQHN3aXNz\n" + "Y29tLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAID/R0iVboEhBcA9loz1DW2z\n" + "2ZAb2mlDoKGSW6wXK/Eu77AvRYWwjwqUBaRA1bLcoLiHt/q6f6b1jOc5YZqdMJ+s09gVcmYmvjMr\n" + "G6FCW/hM+/JjgovowwZbZcmQONJDYjAGL8YU23IFFy1mK0B0ZyYA2LeFGkHmqX1dhXk4zm34/XXs\n" + "9GZylRBAOH4qa2KviV+2VdufVl12u14aD6KaogCZhkAPCW8MTdZ2ZGNOu984KQMie4yHeFBM05/S\n" + "vMNs7ND9mPx67r1iJM4samc7KJx3VYa75427Hfr4Y9F6q9MRAB5wMirx1a3TS880NpWF5UoZpCNJ\n" + "Gr1L3Y/shbEGRJ8CAwEAAaOCAigwggIkMH8GCCsGAQUFBwEBBHMwcTAuBggrBgEFBQcwAYYiaHR0\n" + "cDovL29jc3Auc3dpc3NkaWdpY2VydC5jaC9ydWJpbjA/BggrBgEFBQcwAoYzaHR0cDovL3d3dy5z\n" + "d2lzc2RpZ2ljZXJ0LmNoL2Rvd25sb2FkL3NkY3MtcnViaW4uY3J0MB8GA1UdIwQYMBaAFC3Cp6Nj\n" + "Pj+DR6tIMzaBhffU6azAMEgGA1UdIARBMD8wPQYGYIV0AVMEMDMwMQYIKwYBBQUHAgEWJWh0dHA6\n" + "Ly93d3cuc3dpc3NkaWdpY2VydC5jaC9kb2N1bWVudHMwgbwGA1UdHwSBtDCBsTB0oHKgcIZubGRh\n" + "cDovL2xkYXAuc3dpc3NkaWdpY2VydC5jaC9DTj1Td2lzc2NvbSBSdWJpbiBDQSAxLGRjPXJ1Ymlu\n" + "LGRjPXN3aXNzZGlnaWNlcnQsZGM9Y2g/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdD8wOaA3oDWG\n" + "M2h0dHA6Ly93d3cuc3dpc3NkaWdpY2VydC5jaC9kb3dubG9hZC9zZGNzLXJ1YmluLmNybDAdBgNV\n" + "HSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwDgYDVR0PAQH/BAQDAgSwMCkGA1UdEQQiMCCBHnJl\n" + "Z2lzdHJhdGlvbi5yaXBlQHN3aXNzY29tLmNvbTAdBgNVHQ4EFgQUI/zu8XKWjoVtr4oMzM1hkaP/\n" + "BCswDQYJKoZIhvcNAQEFBQADggEBAHaM6iCWIZJDYTBy+3k2ER4RhXTk1ksPIEg6a6Sp0UBNXDyI\n" + "Z3CfeDOheNuHC95psPryyuvFQUhzki/LYIYxYobwqsaO3CwbO07iABWmCOcEpuJSA8FdZ/BwnEGS\n" + "EIv3/a/ve/nRC4dXz1WL9r7Y/UBFD0+m2LSDme1Awmsk0ri0RQYlEXkTb3zAklPHKclb1yXRzmmN\n" + "i122nPl0Ax3GhfqFJY9gUmsb28QVDvLHnax/C+IsxQhIAi9zmgrTixGKOEUSECYKN9v1ug60yeqi\n" + "SFIKTXI2iZp6WY74HGQRGk5NRmnVFx5/69yFW27gk3vkuxHOsEJlDVOr8vAwgb5CYXowggXZMIID\n" + "waADAgECAhBcC4VcC+dZQd9XzD9/nag2MA0GCSqGSIb3DQEBBQUAMGQxCzAJBgNVBAYTAmNoMREw\n" + "DwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEb\n" + "MBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMB4XDTA1MDgxODEyMDYyMFoXDTI1MDgxODIyMDYy\n" + "MFowZDELMAkGA1UEBhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENl\n" + "cnRpZmljYXRlIFNlcnZpY2VzMRswGQYDVQQDExJTd2lzc2NvbSBSb290IENBIDEwggIiMA0GCSqG\n" + "SIb3DQEBAQUAA4ICDwAwggIKAoICAQDQubCoDNm7PyH4G9Uzk4AWZSB1sj2bYG1GyIwxbxfD+pps\n" + "Vu08xZFXw82rlkmQKhlLHqNtV93xK2IodUVeqtZb+gsl2KEW+RzELuaVKmfM0CluPIU0OGFJsQCf\n" + "1jpxX01tzl+5qeSJf2pS+sqb8typ+Z2ZRz9OKV+0po1dewuZEQMD/ufb26P/HaXNkB4BHzWwfwDb\n" + "kG/GfnvR7np6p6oMV2+kbcUTO7Cl2e0yHLReZ4tU3HOH5dMXfGZQcl3UGljB2c/YiQJvp0m0Nl3Q\n" + "pN4HLLZ1tyiR1pe+KPWYHupbJsm9sJdz2q6RJutowfk5FdZnSwptT8vPsORCcYxTeefu4dsdoG4d\n" + "jBp3NVwWHitTHzSL0Wz88mcHevWt7daaq6GxS+HMN1/9f81NrrgfnEP5KlhVQ0W8ls1wDvzJ42a6\n" + "To07gcsVZHu5lOhdM1KFcS5PjqIGEVHJ48uhbjEIZAzC0jz1NujX0A54IyCRySQqZSlbIvchzoNe\n" + "pPPeS9Noj0Z1XIMJbilrxHCM9Z3XIC//RtIrOMIvdRw9ftql7x5ghWlC08z4Y/4eQzmFprZjQRCz\n" + "cx680/rKfRZH4qfV0KOKCgiWYlZuNNvZArkwdeME0uePwrARQAqs1XECYosxvt3GI1gxQkMtdPnG\n" + "nqaKD+n+v4PmQ1ckuu9GNKrXEgE47QIDAQABo4GGMIGDMA4GA1UdDwEB/wQEAwIBhjAdBgNVHSEE\n" + "FjAUMBIGB2CFdAFTAAEGB2CFdAFTAAEwEgYDVR0TAQH/BAgwBgEB/wIBBzAfBgNVHSMEGDAWgBQD\n" + "JS/eb4IBOlws3CuhabVn1IzT/TAdBgNVHQ4EFgQUAyUv3m+CATpcLNwroWm1Z9SM0/0wDQYJKoZI\n" + "hvcNAQEFBQADggIBADUQy+ymBA0ND83A26uo8oiXDN+TL018QFYxeuukD2DNevO+wyeOAz6k3RLv\n" + "fh50Bjw/MfIce5ExIbTw0GyX1OmXsiRWHlbDNb2IBQ9bEBpk4ceCMPkyrZ5QLOd4BdAxsVqYinVO\n" + "kFxqFCrgUkeCYOYe2oGx+xQLWvGf0pW6PtAb1hUdo76G1dsPwElkuy5QGUvSJPjdHgdW0DiglXAg\n" + "dozX3R7en3HEI++DE1yjJBVNKUA8asSp2LemRKUN9OCddx5AcCb82tk25HnktT+8m2W+uxGWz9vG\n" + "KDk6CM5HW1NaxZn+Xand70zUxqWtAuaMBxIebwPRb6Cj8ym9EsdQorB/iKmZd5qxwKU5Llx8aeIs\n" + "sOo3aqThWuH1UOWD76W7KojnjNv9bV6XGah+ZnVrceq/scdvoPSOpOw0UVuMJgNwoXfVARJXADXb\n" + "I94OiiiZ/bEQb0v/OC1gTiyc62e1rUnuSx+sr/sNkFpmYHBdqs141CTuyEGgkwGSnGqe/LkkxbMV\n" + "gn6+rpUr67HA2uMBYAteaayEVmG+cRf+HRMP/saHRen+MqAaDROklFVxpRaLusqJsLLH/I/YVLWT\n" + "Yp3Oz1n7PRjOKss1FYJd/1QiW3FS+7fJ/mCbAEFk8Koq7LZCQ86JZoHIi585VAMl0xY1joTQX/ow\n" + "GvWabPQOU/k6W9EcMYIDTzCCA0sCAQEweTBlMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Nj\n" + "b20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHDAaBgNVBAMTE1N3aXNz\n" + "Y29tIFJ1YmluIENBIDECEBpva9hNFi8NOIwXHaKKO4UwCQYFKw4DAhoFAKCCAaswGAYJKoZIhvcN\n" + "AQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcNMTIwODIwMTA1MjM1WjAjBgkqhkiG9w0B\n" + "CQQxFgQUZIBQXsg/8X8GMBoZ1TakgRgWyz4wNAYJKoZIhvcNAQkPMScwJTAKBggqhkiG9w0DBzAO\n" + "BggqhkiG9w0DAgICAIAwBwYFKw4DAgcwgYgGCSsGAQQBgjcQBDF7MHkwZTELMAkGA1UEBhMCY2gx\n" + "ETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNlcnZpY2Vz\n" + "MRwwGgYDVQQDExNTd2lzc2NvbSBSdWJpbiBDQSAxAhAab2vYTRYvDTiMFx2iijuFMIGKBgsqhkiG\n" + "9w0BCRACCzF7MHkwZTELMAkGA1UEBhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxE\n" + "aWdpdGFsIENlcnRpZmljYXRlIFNlcnZpY2VzMRwwGgYDVQQDExNTd2lzc2NvbSBSdWJpbiBDQSAx\n" + "AhAab2vYTRYvDTiMFx2iijuFMA0GCSqGSIb3DQEBAQUABIIBAC13K/ZrP6tmTtSLshOjv0txup1u\n" + "VFpHvP5i1nU2ly/SR9BkB8MnN0bpMAvb/QM5VydRST9IFT9m4pdHRFpbfx+N8LGm7TBt9T2e3/mL\n" + "cj5lZrZUoNOlOeoMODR3HSoPl9NYPvCUKirP4H0jgFb9fxZSWIBvIg6CMbAyF2rE9aRMHKa13xte\n" + "WqgO1Ml0AIDQ0BnVPyLMF1BPtu4e4VCXWDbnwf714TBkS+Qx/8YkmzTEd5hkVA3A11b5JChrcRex\n" + "E362Cd8e5dhRKLV9FrbGZ3UPpnFcmuuQPjYCRP/eqDJzIfyoeJwEY4WrmZqGrTQNWz27t7Ov6214\n" + "rJme/XmWwocAAAAAAAA=\n" + "------=_Part_113918_874669.1345459955655--"); final MailMessage result = subject.parse(message, updateContext); assertThat(result.getId(), is("<3723299.113919.1345459955655.JavaMail.trustmail@ss000807>")); assertThat(result.getReplyTo(), is("Registration.Ripe@company.com")); assertThat(result.getKeyword(), is(Keyword.NONE)); List<ContentWithCredentials> contentWithCredentialsList = result.getContentWithCredentials(); assertThat(contentWithCredentialsList, hasSize(1)); assertThat(contentWithCredentialsList.get(0).getContent(), is("" + "inetnum: 217.193.127.137 - 217.193.127.137\n" + "netname: NET-NAME\n" + "descr: Bogus\n" + "descr: Address\n" + "country: CH\n" + "admin-c: TEST-RIPE\n" + "tech-c: TEST-RIPE\n" + "status: ASSIGNED PA\n" + "mnt-by: TEST-MNT\n" + "changed: email@foobar.net 20120312\n" + "source: RIPE\n\n" + "changed: email@foobar.net\n\n\n\n")); assertThat(contentWithCredentialsList.get(0).getCredentials(), hasSize(1)); assertTrue(contentWithCredentialsList.get(0).getCredentials().get(0) instanceof X509Credential); } @Test public void parse_smime_multipart_text_plain_no_headers_in_content() throws Exception { final MailMessage result = subject.parse(MimeMessageProvider.getUpdateMessage("multipartPkcs7Signed.mail"), updateContext); final String expectedValue = "first line\nsecond line\nthird line\n"; assertThat(result.getSubject(), is("Bogus")); assertThat(result.getFrom(), is("Registration.Ripe@company.com")); assertThat(result.getUpdateMessage(), is(expectedValue)); List<ContentWithCredentials> contentWithCredentialsList = result.getContentWithCredentials(); assertThat(contentWithCredentialsList, hasSize(1)); assertThat(contentWithCredentialsList.get(0).getContent(), is(expectedValue)); assertThat(contentWithCredentialsList.get(0).getCredentials(), hasSize(1)); assertTrue(contentWithCredentialsList.get(0).getCredentials().get(0) instanceof X509Credential); } @Test public void parse_smime_multipart_alternative() throws Exception { final MimeMessage input = getMessage("" + "Message-ID: <28483859.46585.1352362093823.JavaMail.trustmail@ss000807>\n" + "MIME-Version: 1.0\n" + "Content-Type: multipart/signed; protocol=\"application/pkcs7-signature\"; micalg=sha1; \n" + "\tboundary=\"----=_Part_46584_13090458.1352362093823\"\n" + "From: <Registration.Ripe@company.com>\n" + "To: <auto-dbm@ripe.net>\n" + "Subject: IP-Request for FOO\n" + "Date: Thu, 8 Nov 2012 08:08:07 +0000\n" + "Accept-Language: de-CH, en-US\n" + "Content-Language: de-DE\n" + "\n" + "------=_Part_46584_13090458.1352362093823\n" + "Content-Type: multipart/alternative;\n" + "\tboundary=\"_000_54F98DDECB052B4A98783EDEF1B77C4C30649470SG000708corproo_\"\n" + "Content-Language: de-DE\n" + "\n" + "--_000_54F98DDECB052B4A98783EDEF1B77C4C30649470SG000708corproo_\n" + "Content-Type: text/plain; charset=\"us-ascii\"\n" + "Content-Transfer-Encoding: quoted-printable\n" + "\n" + "inetnum: 217.193.204.248 - 217.193.204.255\n" + "netname: TEST-NET\n" + "descr: FOO\n" + "descr: BAR\n" + "country: CH\n" + "admin-c: TEST-RIPE\n" + "tech-c: TEST-RIPE\n" + "status: ASSIGNED PA\n" + "mnt-by: TEST-MNT\n" + "changed: email@foobar.net\n" + "source: RIPE\n" + "\n" + "\n" + "--_000_54F98DDECB052B4A98783EDEF1B77C4C30649470SG000708corproo_\n" + "Content-Type: text/html; charset=\"us-ascii\"\n" + "Content-Transfer-Encoding: quoted-printable\n" + "\n" + "<html>\n" + "<head>\n" + "<meta http-equiv=3D\"Content-Type\" content=3D\"text/html; charset=3Dus-ascii\"=\n" + ">\n" + "<meta name=3D\"Generator\" content=3D\"Microsoft Exchange Server\">\n" + "<!-- converted from rtf -->\n" + "<style><!-- .EmailQuote { margin-left: 1pt; padding-left: 4pt; border-left:=\n" + " #800000 2px solid; } --></style>\n" + "</head>\n" + "<body>\n" + "<font face=3D\"Bogus\" size=3D\"2\"><span style=3D\"font-size:10pt;\">\n" + "<div>inetnum: 217.193.204.248 - 217.193.204.255</div>\n" + "<div>netname: TEST-NET</div>\n" + "<div>descr: FOO</div>\n" + "<div>descr: BAR</div>\n" + "<div>country: CH</div>\n" + "<div>admin-c: TEST-RIPE</div>\n" + "<div>tech-c: TEST-RIPE</div>\n" + "<div>status: ASSIGNED PA</div>\n" + "<div>mnt-by: TEST-MNT</div>\n" + "<div>changed: email@foobar.net</div>\n" + "<div>source: RIPE</div>\n" + "<div><font face=3D\"Calibri\" size=3D\"2\"><span style=3D\"font-size:11pt;\">&nbs=\n" + "p;</span></font></div>\n" + "</span></font>\n" + "</body>\n" + "</html>\n" + "\n" + "--_000_54F98DDECB052B4A98783EDEF1B77C4C30649470SG000708corproo_--\n" + "\n" + "------=_Part_46584_13090458.1352362093823\n" + "Content-Type: application/pkcs7-signature; name=smime.p7s; smime-type=signed-data\n" + "Content-Transfer-Encoding: base64\n" + "Content-Disposition: attachment; filename=\"smime.p7s\"\n" + "Content-Description: S/MIME Cryptographic Signature\n" + "\n" + "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAQAAoIIQ/TCCBVkw\n" + "NuXtlQgW7sAAAAAAAAA=\n" + "------=_Part_46584_13090458.1352362093823--\n"); final MailMessage message = subject.parse(input, updateContext); List<ContentWithCredentials> contentWithCredentialsList = message.getContentWithCredentials(); assertThat(contentWithCredentialsList, hasSize(1)); assertThat(contentWithCredentialsList.get(0).getContent(), is("" + "inetnum: 217.193.204.248 - 217.193.204.255\n" + "netname: TEST-NET\n" + "descr: FOO\n" + "descr: BAR\n" + "country: CH\n" + "admin-c: TEST-RIPE\n" + "tech-c: TEST-RIPE\n" + "status: ASSIGNED PA\n" + "mnt-by: TEST-MNT\n" + "changed: email@foobar.net\n" + "source: RIPE\n\n")); assertThat(contentWithCredentialsList.get(0).getCredentials(), hasSize(1)); assertTrue(contentWithCredentialsList.get(0).getCredentials().get(0) instanceof X509Credential); } @Test public void illegal_charset() throws Exception { assertThat(subject.getCharset(new ContentType("text/plain;\n\tcharset=\"_iso-2022-jp$ESC\"")), is(Charsets.ISO_8859_1)); } private MimeMessage getMessage(final String message) throws MessagingException, IOException { return new MimeMessage(null, new ByteArrayInputStream(message.getBytes())); } }
// Copyright 2018 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.starlarkbuildapi.cpp; import com.google.devtools.build.lib.collect.nestedset.Depset; import com.google.devtools.build.lib.starlarkbuildapi.FileApi; import com.google.devtools.build.lib.starlarkbuildapi.StarlarkActionFactoryApi; import com.google.devtools.build.lib.starlarkbuildapi.StarlarkRuleContextApi; import com.google.devtools.build.lib.starlarkbuildapi.platform.ConstraintValueInfoApi; import com.google.devtools.build.lib.syntax.EvalException; import com.google.devtools.build.lib.syntax.NoneType; import com.google.devtools.build.lib.syntax.Sequence; import com.google.devtools.build.lib.syntax.StarlarkThread; import com.google.devtools.build.lib.syntax.Tuple; import net.starlark.java.annot.Param; import net.starlark.java.annot.ParamType; import net.starlark.java.annot.StarlarkBuiltin; import net.starlark.java.annot.StarlarkMethod; /** Utilites related to C++ support. */ @StarlarkBuiltin( name = "cc_common", doc = "Utilities for C++ compilation, linking, and command line generation.") public interface BazelCcModuleApi< StarlarkActionFactoryT extends StarlarkActionFactoryApi, FileT extends FileApi, ConstraintValueT extends ConstraintValueInfoApi, StarlarkRuleContextT extends StarlarkRuleContextApi<ConstraintValueT>, CcToolchainProviderT extends CcToolchainProviderApi<FeatureConfigurationT>, FeatureConfigurationT extends FeatureConfigurationApi, CompilationContextT extends CcCompilationContextApi<FileT>, CompilationOutputsT extends CcCompilationOutputsApi<FileT>, LinkingOutputsT extends CcLinkingOutputsApi<FileT>, LinkerInputT extends LinkerInputApi<LibraryToLinkT, FileT>, LibraryToLinkT extends LibraryToLinkApi<FileT>, LinkingContextT extends CcLinkingContextApi<FileT>, CcToolchainVariablesT extends CcToolchainVariablesApi, CcToolchainConfigInfoT extends CcToolchainConfigInfoApi> extends CcModuleApi< StarlarkActionFactoryT, FileT, CcToolchainProviderT, FeatureConfigurationT, CompilationContextT, LinkerInputT, LinkingContextT, LibraryToLinkT, CcToolchainVariablesT, ConstraintValueT, StarlarkRuleContextT, CcToolchainConfigInfoT, CompilationOutputsT> { @StarlarkMethod( name = "compile", doc = "Should be used for C++ compilation. Returns tuple of " + "(<code>CompilationContext</code>, <code>CcCompilationOutputs</code>).", useStarlarkThread = true, parameters = { @Param( name = "actions", type = StarlarkActionFactoryApi.class, positional = false, named = true, doc = "<code>actions</code> object."), @Param( name = "feature_configuration", doc = "<code>feature_configuration</code> to be queried.", positional = false, named = true, type = FeatureConfigurationApi.class), @Param( name = "cc_toolchain", doc = "<code>CcToolchainInfo</code> provider to be used.", positional = false, named = true, type = CcToolchainProviderApi.class), @Param( name = "srcs", doc = "The list of source files to be compiled.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "public_hdrs", doc = "List of headers needed for compilation of srcs and may be included by dependent " + "rules transitively.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "private_hdrs", doc = "List of headers needed for compilation of srcs and NOT to be included by" + " dependent rules.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "includes", doc = "Search paths for header files referenced both by angle bracket and quotes. " + "Usually passed with -I. Propagated to dependents transitively.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "quote_includes", doc = "Search paths for header files referenced by quotes, " + "e.g. #include \"foo/bar/header.h\". They can be either relative to the exec " + "root or absolute. Usually passed with -iquote. Propagated to dependents " + "transitively.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "system_includes", doc = "Search paths for header files referenced by angle brackets, e.g. #include" + " &lt;foo/bar/header.h&gt;. They can be either relative to the exec root or" + " absolute. Usually passed with -isystem. Propagated to dependents " + "transitively.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "framework_includes", doc = "Search paths for header files from Apple frameworks. They can be either relative " + "to the exec root or absolute. Usually passed with -F. Propagated to " + "dependents transitively.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "defines", doc = "Set of defines needed to compile this target. Each define is a string. Propagated" + " to dependents transitively.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "local_defines", doc = "Set of defines needed to compile this target. Each define is a string. Not" + " propagated to dependents transitively.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "include_prefix", doc = "The prefix to add to the paths of the headers of this rule. When set, the " + "headers in the hdrs attribute of this rule are accessible at is the " + "value of this attribute prepended to their repository-relative path. " + "The prefix in the strip_include_prefix attribute is removed before this " + "prefix is added.", positional = false, named = true, defaultValue = "''", type = String.class), @Param( name = "strip_include_prefix", doc = "The prefix to strip from the paths of the headers of this rule. When set, the" + " headers in the hdrs attribute of this rule are accessible at their path" + " with this prefix cut off. If it's a relative path, it's taken as a" + " package-relative one. If it's an absolute one, it's understood as a" + " repository-relative path. The prefix in the include_prefix attribute is" + " added after this prefix is stripped.", positional = false, named = true, defaultValue = "''", type = String.class), @Param( name = "user_compile_flags", doc = "Additional list of compilation options.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "compilation_contexts", doc = "Headers from dependencies used for compilation.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "name", doc = "This is used for naming the output artifacts of actions created by this " + "method.", positional = false, named = true, type = String.class), @Param( name = "disallow_pic_outputs", doc = "Whether PIC outputs should be created.", positional = false, named = true, defaultValue = "False", type = Boolean.class), @Param( name = "disallow_nopic_outputs", doc = "Whether NOPIC outputs should be created.", positional = false, named = true, defaultValue = "False", type = Boolean.class), @Param( name = "additional_inputs", doc = "List of additional files needed for compilation of srcs", positional = false, named = true, defaultValue = "[]", type = Sequence.class), }) Tuple<Object> compile( StarlarkActionFactoryT starlarkActionFactoryApi, FeatureConfigurationT starlarkFeatureConfiguration, CcToolchainProviderT starlarkCcToolchainProvider, Sequence<?> sources, // <FileT> expected Sequence<?> publicHeaders, // <FileT> expected Sequence<?> privateHeaders, // <FileT> expected Sequence<?> includes, // <String> expected Sequence<?> quoteIncludes, // <String> expected Sequence<?> systemIncludes, // <String> expected Sequence<?> frameworkIncludes, // <String> expected Sequence<?> defines, // <String> expected Sequence<?> localDefines, // <String> expected String includePrefix, String stripIncludePrefix, Sequence<?> userCompileFlags, // <String> expected Sequence<?> ccCompilationContexts, // <CompilationContextT> expected String name, boolean disallowPicOutputs, boolean disallowNopicOutputs, Sequence<?> additionalInputs, // <FileT> expected StarlarkThread thread) throws EvalException, InterruptedException; @StarlarkMethod( name = "link", doc = "Should be used for C++ transitive linking.", useStarlarkThread = true, parameters = { @Param( name = "actions", type = StarlarkActionFactoryApi.class, positional = false, named = true, doc = "<code>actions</code> object."), @Param( name = "feature_configuration", doc = "<code>feature_configuration</code> to be queried.", positional = false, named = true, type = FeatureConfigurationApi.class), @Param( name = "cc_toolchain", doc = "<code>CcToolchainInfo</code> provider to be used.", positional = false, named = true, type = CcToolchainProviderApi.class), @Param( name = "compilation_outputs", doc = "Compilation outputs containing object files to link.", positional = false, named = true, defaultValue = "None", noneable = true, allowedTypes = { @ParamType(type = CcCompilationOutputsApi.class), @ParamType(type = NoneType.class) }), @Param( name = "user_link_flags", doc = "Additional list of linker options.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "linking_contexts", doc = "Linking contexts from dependencies to be linked into the linking context " + "generated by this rule.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "name", doc = "This is used for naming the output artifacts of actions created by this " + "method.", positional = false, named = true, type = String.class), @Param( name = "language", doc = "Only C++ supported for now. Do not use this parameter.", positional = false, named = true, defaultValue = "'c++'", type = String.class), @Param( name = "output_type", doc = "Can be either 'executable' or 'dynamic_library'.", positional = false, named = true, defaultValue = "'executable'", type = String.class), @Param( name = "link_deps_statically", doc = " True to link dependencies statically, False dynamically.", positional = false, named = true, defaultValue = "True", type = Boolean.class), @Param( name = "stamp", doc = "Whether to include build information in the linked executable, if output_type is " + "'executable'. If 1, build information is always included. If 0 (the " + "default build information is always excluded. If -1, uses the default " + "behavior, which may be overridden by the --[no]stamp flag. This should be " + "unset (or set to 0) when generating the executable output for test rules.", positional = false, named = true, defaultValue = "0", type = Integer.class), @Param( name = "additional_inputs", doc = "For additional inputs to the linking action, e.g.: linking scripts.", positional = false, named = true, defaultValue = "[]", type = Sequence.class), @Param( name = "grep_includes", positional = false, named = true, noneable = true, defaultValue = "None", allowedTypes = {@ParamType(type = FileApi.class), @ParamType(type = NoneType.class)}), }) LinkingOutputsT link( StarlarkActionFactoryT starlarkActionFactoryApi, FeatureConfigurationT starlarkFeatureConfiguration, CcToolchainProviderT starlarkCcToolchainProvider, Object compilationOutputs, Sequence<?> userLinkFlags, // <String> expected Sequence<?> linkingContexts, // <LinkingContextT> expected String name, String language, String outputType, boolean linkDepsStatically, int stamp, Sequence<?> additionalInputs, // <FileT> expected Object grepIncludes, StarlarkThread thread) throws InterruptedException, EvalException; @StarlarkMethod( name = "create_compilation_outputs", doc = "Create compilation outputs object.", parameters = { @Param( name = "objects", doc = "List of object files.", positional = false, named = true, noneable = true, defaultValue = "None", allowedTypes = {@ParamType(type = Depset.class), @ParamType(type = NoneType.class)}), @Param( name = "pic_objects", doc = "List of pic object files.", positional = false, named = true, noneable = true, defaultValue = "None", allowedTypes = {@ParamType(type = Depset.class), @ParamType(type = NoneType.class)}), }) CompilationOutputsT createCompilationOutputsFromStarlark( Object objectsObject, Object picObjectsObject) throws EvalException; @StarlarkMethod( name = "merge_compilation_outputs", doc = "Merge compilation outputs.", parameters = { @Param( name = "compilation_outputs", positional = false, named = true, defaultValue = "[]", type = Sequence.class), }) CompilationOutputsT mergeCcCompilationOutputsFromStarlark( Sequence<?> compilationOutputs) // <CompilationOutputsT> expected throws EvalException; }
/* * Copyright 2015-present Open Networking Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.vtnrsc; import static com.google.common.base.Preconditions.checkNotNull; import java.util.Objects; import org.onlab.packet.IpPrefix; import com.google.common.base.MoreObjects; /** * Provides Default flow classifier. */ public final class DefaultFlowClassifier implements FlowClassifier { private final FlowClassifierId flowClassifierId; private final TenantId tenantId; private final String name; private final String description; private final String etherType; private final String protocol; private final int priority; private final int minSrcPortRange; private final int maxSrcPortRange; private final int minDstPortRange; private final int maxDstPortRange; private final IpPrefix srcIpPrefix; private final IpPrefix dstIpPrefix; private final VirtualPortId srcPort; private final VirtualPortId dstPort; private static final int NULL_PORT = 0; private static final String FLOW_CLASSIFIER_ID_NOT_NULL = "FlowClassifier id can not be null."; private static final String TENANT_ID_NOT_NULL = "Tenant id can not be null."; private static final String NAME_NOT_NULL = "Name can not be null."; private static final String ETHER_TYPE_NOT_NULL = "Ether Type can not be null."; private static final int DEFAULT_CLASSIFIER_PRIORITY = 0xCB20; /** * Constructor to create default flow classifier. * * @param flowClassifierId flow classifier Id * @param tenantId Tenant ID * @param name flow classifier name * @param description flow classifier description * @param etherType etherType * @param protocol IP protocol * @param priority priority for classification * @param minSrcPortRange Minimum Source port range * @param maxSrcPortRange Maximum Source port range * @param minDstPortRange Minimum destination port range * @param maxDstPortRange Maximum destination port range * @param srcIpPrefix Source IP prefix * @param dstIpPrefix destination IP prefix * @param srcPort Source VirtualPort * @param dstPort destination VirtualPort */ private DefaultFlowClassifier(FlowClassifierId flowClassifierId, TenantId tenantId, String name, String description, String etherType, String protocol, int priority, int minSrcPortRange, int maxSrcPortRange, int minDstPortRange, int maxDstPortRange, IpPrefix srcIpPrefix, IpPrefix dstIpPrefix, VirtualPortId srcPort, VirtualPortId dstPort) { this.flowClassifierId = flowClassifierId; this.tenantId = tenantId; this.name = name; this.description = description; this.etherType = etherType; this.protocol = protocol; this.priority = priority; this.minSrcPortRange = minSrcPortRange; this.maxSrcPortRange = maxSrcPortRange; this.minDstPortRange = minDstPortRange; this.maxDstPortRange = maxDstPortRange; this.srcIpPrefix = srcIpPrefix; this.dstIpPrefix = dstIpPrefix; this.srcPort = srcPort; this.dstPort = dstPort; } @Override public FlowClassifierId flowClassifierId() { return flowClassifierId; } @Override public TenantId tenantId() { return tenantId; } @Override public String name() { return name; } @Override public String description() { return description; } @Override public String etherType() { return etherType; } @Override public String protocol() { return protocol; } @Override public int priority() { return priority; } @Override public int minSrcPortRange() { return minSrcPortRange; } @Override public int maxSrcPortRange() { return maxSrcPortRange; } @Override public int minDstPortRange() { return minDstPortRange; } @Override public int maxDstPortRange() { return maxDstPortRange; } @Override public IpPrefix srcIpPrefix() { return srcIpPrefix; } @Override public IpPrefix dstIpPrefix() { return dstIpPrefix; } @Override public VirtualPortId srcPort() { return srcPort; } @Override public VirtualPortId dstPort() { return dstPort; } /** * Builder class for constructing Flow classifier. */ public static class Builder implements FlowClassifier.Builder { private FlowClassifierId flowClassifierId; private TenantId tenantId; private String name; private String description; private boolean isFlowClassifierDescriptionSet = false; private String etherType; private String protocol; private boolean isProtocolSet = false; private int priority; private boolean isPrioritySet = false; private int minSrcPortRange; private boolean isMinSrcPortRangeSet = false; private int maxSrcPortRange; private boolean isMaxSrcPortRangeSet = false; private int minDstPortRange; private boolean isMinDstPortRangeSet = false; private int maxDstPortRange; private boolean isMaxDstPortRangeSet = false; private IpPrefix srcIpPrefix; private boolean isSrcIpPrefixSet = false; private IpPrefix dstIpPrefix; private boolean isDstIpPrefixSet = false; private VirtualPortId srcPort; private boolean isSrcPortSet = false; private VirtualPortId dstPort; private boolean isDstPortSet = false; @Override public FlowClassifier build() { checkNotNull(flowClassifierId, FLOW_CLASSIFIER_ID_NOT_NULL); checkNotNull(tenantId, TENANT_ID_NOT_NULL); checkNotNull(name, NAME_NOT_NULL); checkNotNull(etherType, ETHER_TYPE_NOT_NULL); String description = null; String protocol = null; int priority = DEFAULT_CLASSIFIER_PRIORITY; int minSrcPortRange = NULL_PORT; int maxSrcPortRange = NULL_PORT; int minDstPortRange = NULL_PORT; int maxDstPortRange = NULL_PORT; IpPrefix srcIpPrefix = null; IpPrefix dstIpPrefix = null; VirtualPortId srcPort = null; VirtualPortId dstPort = null; if (isFlowClassifierDescriptionSet) { description = this.description; } if (isProtocolSet) { protocol = this.protocol; } if (isPrioritySet) { priority = this.priority; } if (isMinSrcPortRangeSet) { minSrcPortRange = this.minSrcPortRange; } if (isMaxSrcPortRangeSet) { maxSrcPortRange = this.maxSrcPortRange; } if (isMinDstPortRangeSet) { minDstPortRange = this.minDstPortRange; } if (isMaxDstPortRangeSet) { maxDstPortRange = this.maxDstPortRange; } if (isSrcIpPrefixSet) { srcIpPrefix = this.srcIpPrefix; } if (isDstIpPrefixSet) { dstIpPrefix = this.dstIpPrefix; } if (isSrcPortSet) { srcPort = this.srcPort; } if (isDstPortSet) { dstPort = this.dstPort; } return new DefaultFlowClassifier(flowClassifierId, tenantId, name, description, etherType, protocol, priority, minSrcPortRange, maxSrcPortRange, minDstPortRange, maxDstPortRange, srcIpPrefix, dstIpPrefix, srcPort, dstPort); } @Override public Builder setFlowClassifierId(FlowClassifierId flowClassifierId) { this.flowClassifierId = flowClassifierId; return this; } @Override public Builder setTenantId(TenantId tenantId) { this.tenantId = tenantId; return this; } @Override public Builder setName(String name) { this.name = name; return this; } @Override public Builder setDescription(String description) { this.description = description; this.isFlowClassifierDescriptionSet = true; return this; } @Override public Builder setEtherType(String etherType) { this.etherType = etherType; return this; } @Override public Builder setProtocol(String protocol) { this.protocol = protocol; this.isProtocolSet = true; return this; } @Override public Builder setPriority(int priority) { this.priority = priority; this.isPrioritySet = true; return this; } @Override public Builder setMinSrcPortRange(int minSrcPortRange) { this.minSrcPortRange = minSrcPortRange; this.isMinSrcPortRangeSet = true; return this; } @Override public Builder setMaxSrcPortRange(int maxSrcPortRange) { this.maxSrcPortRange = maxSrcPortRange; this.isMaxSrcPortRangeSet = true; return this; } @Override public Builder setMinDstPortRange(int minDstPortRange) { this.minDstPortRange = minDstPortRange; this.isMinDstPortRangeSet = true; return this; } @Override public Builder setMaxDstPortRange(int maxDstPortRange) { this.maxDstPortRange = maxDstPortRange; this.isMaxDstPortRangeSet = true; return this; } @Override public Builder setSrcIpPrefix(IpPrefix srcIpPrefix) { this.srcIpPrefix = srcIpPrefix; this.isSrcIpPrefixSet = true; return this; } @Override public Builder setDstIpPrefix(IpPrefix dstIpPrefix) { this.dstIpPrefix = dstIpPrefix; this.isDstIpPrefixSet = true; return this; } @Override public Builder setSrcPort(VirtualPortId srcPort) { this.srcPort = srcPort; this.isSrcPortSet = true; return this; } @Override public Builder setDstPort(VirtualPortId dstPort) { this.dstPort = dstPort; this.isDstPortSet = true; return this; } } @Override public int hashCode() { return Objects.hash(flowClassifierId, tenantId, name, description, etherType, protocol, minSrcPortRange, maxSrcPortRange, minDstPortRange, maxDstPortRange, srcIpPrefix, dstIpPrefix, srcPort, dstPort); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof DefaultFlowClassifier) { DefaultFlowClassifier other = (DefaultFlowClassifier) obj; return Objects.equals(this.flowClassifierId, other.flowClassifierId) && Objects.equals(this.tenantId, other.tenantId) && Objects.equals(this.name, other.name) && Objects.equals(this.description, other.description) && Objects.equals(this.etherType, other.etherType) && Objects.equals(this.protocol, other.protocol) && Objects.equals(this.priority, other.priority) && Objects.equals(this.minSrcPortRange, other.minSrcPortRange) && Objects.equals(this.maxSrcPortRange, other.maxSrcPortRange) && Objects.equals(this.minDstPortRange, other.minDstPortRange) && Objects.equals(this.maxDstPortRange, other.maxDstPortRange) && Objects.equals(this.srcIpPrefix, other.srcIpPrefix) && Objects.equals(this.dstIpPrefix, other.dstIpPrefix) && Objects.equals(this.srcPort, other.srcPort) && Objects.equals(this.dstPort, other.dstPort); } return false; } @Override public boolean exactMatch(FlowClassifier flowClassifier) { return this.equals(flowClassifier) && Objects.equals(this.flowClassifierId, flowClassifier.flowClassifierId()) && Objects.equals(this.tenantId, flowClassifier.tenantId()) && Objects.equals(this.name, flowClassifier.name()) && Objects.equals(this.description, flowClassifier.description()) && Objects.equals(this.etherType, flowClassifier.etherType()) && Objects.equals(this.protocol, flowClassifier.protocol()) && Objects.equals(this.priority, flowClassifier.priority()) && Objects.equals(this.minSrcPortRange, flowClassifier.minSrcPortRange()) && Objects.equals(this.maxSrcPortRange, flowClassifier.maxSrcPortRange()) && Objects.equals(this.minDstPortRange, flowClassifier.minDstPortRange()) && Objects.equals(this.maxDstPortRange, flowClassifier.maxDstPortRange()) && Objects.equals(this.srcIpPrefix, flowClassifier.srcIpPrefix()) && Objects.equals(this.dstIpPrefix, flowClassifier.dstIpPrefix()) && Objects.equals(this.srcPort, flowClassifier.srcPort()) && Objects.equals(this.dstPort, flowClassifier.dstPort()); } @Override public String toString() { return MoreObjects.toStringHelper(getClass()) .add("FlowClassifierId", flowClassifierId) .add("TenantId", tenantId) .add("Name", name) .add("Description", description) .add("String", etherType) .add("Protocol", protocol) .add("Priority", priority) .add("MinSrcPortRange", minSrcPortRange) .add("MaxSrcPortRange", maxSrcPortRange) .add("MinDstPortRange", minDstPortRange) .add("MaxDstPortRange", maxDstPortRange) .add("SrcIpPrefix", srcIpPrefix) .add("DstIpPrefix", dstIpPrefix) .add("SrcPort", srcPort) .add("DstPort", dstPort) .toString(); } }