index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/BufferingRFileRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.output;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
final class BufferingRFileRecordWriter extends RecordWriter<Text,Mutation> {
private final long maxSize;
private final Configuration conf;
private long size;
private Map<Text,TreeMap<Key,Value>> buffers = new HashMap<Text,TreeMap<Key,Value>>();
private Map<Text,Long> bufferSizes = new HashMap<Text,Long>();
private TreeMap<Key,Value> getBuffer(Text tablename) {
TreeMap<Key,Value> buffer = buffers.get(tablename);
if (buffer == null) {
buffer = new TreeMap<Key,Value>();
buffers.put(tablename, buffer);
bufferSizes.put(tablename, 0l);
}
return buffer;
}
private Text getLargestTablename() {
long max = 0;
Text table = null;
for (Entry<Text,Long> e : bufferSizes.entrySet()) {
if (e.getValue() > max) {
max = e.getValue();
table = e.getKey();
}
}
return table;
}
private void flushLargestTable() throws IOException {
Text tablename = getLargestTablename();
if (tablename == null)
return;
long bufferSize = bufferSizes.get(tablename);
TreeMap<Key,Value> buffer = buffers.get(tablename);
if (buffer.size() == 0)
return;
Connector conn;
try {
conn = WikipediaConfiguration.getConnector(conf);
BatchWriterConfig bwconfig = new BatchWriterConfig();
BatchWriter writer = conn.createBatchWriter(tablename.toString(), bwconfig);
for (Entry<Key,Value> e : buffer.entrySet()) {
Key k = e.getKey();
Mutation m = new Mutation();
m.put(k.getColumnFamily(), k.getColumnQualifier(), e.getValue());
writer.addMutation(m);
}
writer.close();
} catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
System.err.println("Error occured in flushLargestTable: " + e.getMessage());
e.printStackTrace();
}
// TODO get the table configuration for the given table?
size -= bufferSize;
buffer.clear();
bufferSizes.put(tablename, 0l);
}
BufferingRFileRecordWriter(long maxSize, Configuration conf) {
this.maxSize = maxSize;
this.conf = conf;
}
@Override
public void close(TaskAttemptContext arg0) throws IOException, InterruptedException {
while (size > 0)
flushLargestTable();
}
@Override
public void write(Text table, Mutation mutation) throws IOException, InterruptedException {
TreeMap<Key,Value> buffer = getBuffer(table);
int mutationSize = 0;
for (ColumnUpdate update : mutation.getUpdates()) {
Key k = new Key(mutation.getRow(), update.getColumnFamily(), update.getColumnQualifier(), update.getColumnVisibility(), update.getTimestamp(),
update.isDeleted());
Value v = new Value(update.getValue());
// TODO account for object overhead
mutationSize += k.getSize();
mutationSize += v.getSize();
buffer.put(k, v);
}
size += mutationSize;
long bufferSize = bufferSizes.get(table);
// TODO use a MutableLong instead
bufferSize += mutationSize;
bufferSizes.put(table, bufferSize);
while (size >= maxSize) {
flushLargestTable();
}
}
}
| 6,200 |
0 | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/TextIndexCombiner.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.client.lexicoder.Encoder;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.TypedValueCombiner;
import org.apache.accumulo.core.iterators.ValueFormatException;
import org.apache.accumulo.examples.wikisearch.protobuf.TermWeight;
import com.google.protobuf.InvalidProtocolBufferException;
/**
*
*/
public class TextIndexCombiner extends TypedValueCombiner<TermWeight.Info> {
public static final Encoder<TermWeight.Info> TERMWEIGHT_INFO_ENCODER =
new TermWeightInfoEncoder();
@Override
public TermWeight.Info typedReduce(Key key, Iterator<TermWeight.Info> iter) {
TermWeight.Info.Builder builder = TermWeight.Info.newBuilder();
List<Integer> offsets = new ArrayList<>();
float normalizedTermFrequency = 0f;
while (iter.hasNext()) {
TermWeight.Info info = iter.next();
if (null == info)
continue;
// Add each offset into the list maintaining sorted order
for (int offset : info.getWordOffsetList()) {
int pos = Collections.binarySearch(offsets, offset);
if (pos < 0) {
// Undo the transform on the insertion point
offsets.add((-1 * pos) - 1, offset);
} else {
offsets.add(pos, offset);
}
}
if (info.getNormalizedTermFrequency() > 0) {
normalizedTermFrequency += info.getNormalizedTermFrequency();
}
}
// Keep the sorted order we tried to maintain
for (Integer offset : offsets) {
builder.addWordOffset(offset);
}
builder.setNormalizedTermFrequency(normalizedTermFrequency);
return builder.build();
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
super.init(source, options, env);
setEncoder(TERMWEIGHT_INFO_ENCODER);
}
public static class TermWeightInfoEncoder implements Encoder<TermWeight.Info> {
@Override
public byte[] encode(TermWeight.Info v) {
return v.toByteArray();
}
@Override
public TermWeight.Info decode(byte[] b) {
if (b.length == 0)
return null;
try {
return TermWeight.Info.parseFrom(b);
} catch (InvalidProtocolBufferException e) {
throw new ValueFormatException(
"Value passed to aggregator was not of type TermWeight.Info");
}
}
}
}
| 6,201 |
0 | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/GlobalIndexUidCombiner.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import org.apache.accumulo.core.client.lexicoder.Encoder;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.TypedValueCombiner;
import org.apache.accumulo.core.iterators.ValueFormatException;
import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
import com.google.protobuf.InvalidProtocolBufferException;
/**
*
*/
public class GlobalIndexUidCombiner extends TypedValueCombiner<Uid.List> {
public static final Encoder<Uid.List> UID_LIST_ENCODER = new UidListEncoder();
public static final int MAX = 20;
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
super.init(source, options, env);
setEncoder(UID_LIST_ENCODER);
}
@Override
public Uid.List typedReduce(Key key, Iterator<Uid.List> iter) {
Uid.List.Builder builder = Uid.List.newBuilder();
HashSet<String> uids = new HashSet<>();
boolean seenIgnore = false;
long count = 0;
while (iter.hasNext()) {
Uid.List v = iter.next();
if (null == v)
continue;
count = count + v.getCOUNT();
if (v.getIGNORE()) {
seenIgnore = true;
}
uids.addAll(v.getUIDList());
}
// Special case logic
// If we have aggregated more than MAX UIDs, then null out the UID list and set IGNORE to true
// However, always maintain the count
builder.setCOUNT(count);
if (uids.size() > MAX || seenIgnore) {
builder.setIGNORE(true);
builder.clearUID();
} else {
builder.setIGNORE(false);
builder.addAllUID(uids);
}
return builder.build();
}
public static class UidListEncoder implements Encoder<Uid.List> {
@Override
public byte[] encode(Uid.List v) {
return v.toByteArray();
}
@Override
public Uid.List decode(byte[] b) {
if (b.length == 0)
return null;
try {
return Uid.List.parseFrom(b);
} catch (InvalidProtocolBufferException e) {
throw new ValueFormatException("Value passed to aggregator was not of type Uid.List");
}
}
}
}
| 6,202 |
0 | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/normalizer/LcNoDiacriticsNormalizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.normalizer;
import java.text.Normalizer;
import java.text.Normalizer.Form;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* An {@link Normalizer} which performs the following steps:
* <ol>
* <li>Unicode canonical decomposition ({@link Form#NFD})</li>
* <li>Removal of diacritical marks</li>
* <li>Unicode canonical composition ({@link Form#NFC})</li>
* <li>lower casing in the {@link Locale#ENGLISH English local}
* </ol>
*/
public class LcNoDiacriticsNormalizer implements org.apache.accumulo.examples.wikisearch.normalizer.Normalizer {
private static final Pattern diacriticals = Pattern.compile("\\p{InCombiningDiacriticalMarks}");
public String normalizeFieldValue(String fieldName, Object fieldValue) {
String decomposed = Normalizer.normalize(fieldValue.toString(), Form.NFD);
String noDiacriticals = removeDiacriticalMarks(decomposed);
String recomposed = Normalizer.normalize(noDiacriticals, Form.NFC);
return recomposed.toLowerCase(Locale.ENGLISH);
}
private String removeDiacriticalMarks(String str) {
Matcher matcher = diacriticals.matcher(str);
return matcher.replaceAll("");
}
}
| 6,203 |
0 | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/normalizer/Normalizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.normalizer;
public interface Normalizer {
/**
* Creates normalized content for ingest based upon implemented logic.
*
* @param field
* The field being normalized
* @param value
* The value to normalize
* @return a normalized value
*/
public String normalizeFieldValue(String field, Object value);
}
| 6,204 |
0 | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/normalizer/NoOpNormalizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.normalizer;
public class NoOpNormalizer implements Normalizer {
public String normalizeFieldValue(String field, Object value) {
return value.toString();
}
}
| 6,205 |
0 | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/protobuf/TermWeight.java | // Generated by the protocol buffer compiler. DO NOT EDIT!
// source: TermWeight.proto
package org.apache.accumulo.examples.wikisearch.protobuf;
public final class TermWeight {
private TermWeight() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
public interface InfoOrBuilder extends
// @@protoc_insertion_point(interface_extends:org.apache.accumulo.examples.wikisearch.protobuf.Info)
com.google.protobuf.MessageOrBuilder {
/**
* <code>required float normalizedTermFrequency = 1;</code>
* @return Whether the normalizedTermFrequency field is set.
*/
boolean hasNormalizedTermFrequency();
/**
* <code>required float normalizedTermFrequency = 1;</code>
* @return The normalizedTermFrequency.
*/
float getNormalizedTermFrequency();
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @return A list containing the wordOffset.
*/
java.util.List<java.lang.Integer> getWordOffsetList();
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @return The count of wordOffset.
*/
int getWordOffsetCount();
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @param index The index of the element to return.
* @return The wordOffset at the given index.
*/
int getWordOffset(int index);
}
/**
* Protobuf type {@code org.apache.accumulo.examples.wikisearch.protobuf.Info}
*/
public static final class Info extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:org.apache.accumulo.examples.wikisearch.protobuf.Info)
InfoOrBuilder {
private static final long serialVersionUID = 0L;
// Use Info.newBuilder() to construct.
private Info(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private Info() {
wordOffset_ = emptyIntList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new Info();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Info(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 13: {
bitField0_ |= 0x00000001;
normalizedTermFrequency_ = input.readFloat();
break;
}
case 16: {
if (!((mutable_bitField0_ & 0x00000002) != 0)) {
wordOffset_ = newIntList();
mutable_bitField0_ |= 0x00000002;
}
wordOffset_.addInt(input.readUInt32());
break;
}
case 18: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000002) != 0) && input.getBytesUntilLimit() > 0) {
wordOffset_ = newIntList();
mutable_bitField0_ |= 0x00000002;
}
while (input.getBytesUntilLimit() > 0) {
wordOffset_.addInt(input.readUInt32());
}
input.popLimit(limit);
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) != 0)) {
wordOffset_.makeImmutable(); // C
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info.class, org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info.Builder.class);
}
private int bitField0_;
public static final int NORMALIZEDTERMFREQUENCY_FIELD_NUMBER = 1;
private float normalizedTermFrequency_;
/**
* <code>required float normalizedTermFrequency = 1;</code>
* @return Whether the normalizedTermFrequency field is set.
*/
@java.lang.Override
public boolean hasNormalizedTermFrequency() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>required float normalizedTermFrequency = 1;</code>
* @return The normalizedTermFrequency.
*/
@java.lang.Override
public float getNormalizedTermFrequency() {
return normalizedTermFrequency_;
}
public static final int WORDOFFSET_FIELD_NUMBER = 2;
private com.google.protobuf.Internal.IntList wordOffset_;
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @return A list containing the wordOffset.
*/
@java.lang.Override
public java.util.List<java.lang.Integer>
getWordOffsetList() {
return wordOffset_;
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @return The count of wordOffset.
*/
public int getWordOffsetCount() {
return wordOffset_.size();
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @param index The index of the element to return.
* @return The wordOffset at the given index.
*/
public int getWordOffset(int index) {
return wordOffset_.getInt(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasNormalizedTermFrequency()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeFloat(1, normalizedTermFrequency_);
}
for (int i = 0; i < wordOffset_.size(); i++) {
output.writeUInt32(2, wordOffset_.getInt(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(1, normalizedTermFrequency_);
}
{
int dataSize = 0;
for (int i = 0; i < wordOffset_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt32SizeNoTag(wordOffset_.getInt(i));
}
size += dataSize;
size += 1 * getWordOffsetList().size();
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info)) {
return super.equals(obj);
}
org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info other = (org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info) obj;
if (hasNormalizedTermFrequency() != other.hasNormalizedTermFrequency()) return false;
if (hasNormalizedTermFrequency()) {
if (java.lang.Float.floatToIntBits(getNormalizedTermFrequency())
!= java.lang.Float.floatToIntBits(
other.getNormalizedTermFrequency())) return false;
}
if (!getWordOffsetList()
.equals(other.getWordOffsetList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasNormalizedTermFrequency()) {
hash = (37 * hash) + NORMALIZEDTERMFREQUENCY_FIELD_NUMBER;
hash = (53 * hash) + java.lang.Float.floatToIntBits(
getNormalizedTermFrequency());
}
if (getWordOffsetCount() > 0) {
hash = (37 * hash) + WORDOFFSET_FIELD_NUMBER;
hash = (53 * hash) + getWordOffsetList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code org.apache.accumulo.examples.wikisearch.protobuf.Info}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:org.apache.accumulo.examples.wikisearch.protobuf.Info)
org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.InfoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info.class, org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info.Builder.class);
}
// Construct using org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
normalizedTermFrequency_ = 0F;
bitField0_ = (bitField0_ & ~0x00000001);
wordOffset_ = emptyIntList();
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_descriptor;
}
@java.lang.Override
public org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info getDefaultInstanceForType() {
return org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info.getDefaultInstance();
}
@java.lang.Override
public org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info build() {
org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info buildPartial() {
org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info result = new org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.normalizedTermFrequency_ = normalizedTermFrequency_;
to_bitField0_ |= 0x00000001;
}
if (((bitField0_ & 0x00000002) != 0)) {
wordOffset_.makeImmutable();
bitField0_ = (bitField0_ & ~0x00000002);
}
result.wordOffset_ = wordOffset_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info) {
return mergeFrom((org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info other) {
if (other == org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info.getDefaultInstance()) return this;
if (other.hasNormalizedTermFrequency()) {
setNormalizedTermFrequency(other.getNormalizedTermFrequency());
}
if (!other.wordOffset_.isEmpty()) {
if (wordOffset_.isEmpty()) {
wordOffset_ = other.wordOffset_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureWordOffsetIsMutable();
wordOffset_.addAll(other.wordOffset_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasNormalizedTermFrequency()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private float normalizedTermFrequency_ ;
/**
* <code>required float normalizedTermFrequency = 1;</code>
* @return Whether the normalizedTermFrequency field is set.
*/
@java.lang.Override
public boolean hasNormalizedTermFrequency() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>required float normalizedTermFrequency = 1;</code>
* @return The normalizedTermFrequency.
*/
@java.lang.Override
public float getNormalizedTermFrequency() {
return normalizedTermFrequency_;
}
/**
* <code>required float normalizedTermFrequency = 1;</code>
* @param value The normalizedTermFrequency to set.
* @return This builder for chaining.
*/
public Builder setNormalizedTermFrequency(float value) {
bitField0_ |= 0x00000001;
normalizedTermFrequency_ = value;
onChanged();
return this;
}
/**
* <code>required float normalizedTermFrequency = 1;</code>
* @return This builder for chaining.
*/
public Builder clearNormalizedTermFrequency() {
bitField0_ = (bitField0_ & ~0x00000001);
normalizedTermFrequency_ = 0F;
onChanged();
return this;
}
private com.google.protobuf.Internal.IntList wordOffset_ = emptyIntList();
private void ensureWordOffsetIsMutable() {
if (!((bitField0_ & 0x00000002) != 0)) {
wordOffset_ = mutableCopy(wordOffset_);
bitField0_ |= 0x00000002;
}
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @return A list containing the wordOffset.
*/
public java.util.List<java.lang.Integer>
getWordOffsetList() {
return ((bitField0_ & 0x00000002) != 0) ?
java.util.Collections.unmodifiableList(wordOffset_) : wordOffset_;
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @return The count of wordOffset.
*/
public int getWordOffsetCount() {
return wordOffset_.size();
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @param index The index of the element to return.
* @return The wordOffset at the given index.
*/
public int getWordOffset(int index) {
return wordOffset_.getInt(index);
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @param index The index to set the value at.
* @param value The wordOffset to set.
* @return This builder for chaining.
*/
public Builder setWordOffset(
int index, int value) {
ensureWordOffsetIsMutable();
wordOffset_.setInt(index, value);
onChanged();
return this;
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @param value The wordOffset to add.
* @return This builder for chaining.
*/
public Builder addWordOffset(int value) {
ensureWordOffsetIsMutable();
wordOffset_.addInt(value);
onChanged();
return this;
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @param values The wordOffset to add.
* @return This builder for chaining.
*/
public Builder addAllWordOffset(
java.lang.Iterable<? extends java.lang.Integer> values) {
ensureWordOffsetIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, wordOffset_);
onChanged();
return this;
}
/**
* <code>repeated uint32 wordOffset = 2;</code>
* @return This builder for chaining.
*/
public Builder clearWordOffset() {
wordOffset_ = emptyIntList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:org.apache.accumulo.examples.wikisearch.protobuf.Info)
}
// @@protoc_insertion_point(class_scope:org.apache.accumulo.examples.wikisearch.protobuf.Info)
private static final org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info();
}
public static org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser<Info>
PARSER = new com.google.protobuf.AbstractParser<Info>() {
@java.lang.Override
public Info parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Info(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<Info> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<Info> getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.accumulo.examples.wikisearch.protobuf.TermWeight.Info getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\020TermWeight.proto\0220org.apache.accumulo." +
"examples.wikisearch.protobuf\";\n\004Info\022\037\n\027" +
"normalizedTermFrequency\030\001 \002(\002\022\022\n\nwordOff" +
"set\030\002 \003(\rB4\n0org.apache.accumulo.example" +
"s.wikisearch.protobufH\001"
};
descriptor = com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
});
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_Info_descriptor,
new java.lang.String[] { "NormalizedTermFrequency", "WordOffset", });
}
// @@protoc_insertion_point(outer_class_scope)
}
| 6,206 |
0 | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/protobuf/Uid.java | // Generated by the protocol buffer compiler. DO NOT EDIT!
// source: Uid.proto
package org.apache.accumulo.examples.wikisearch.protobuf;
public final class Uid {
private Uid() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
public interface ListOrBuilder extends
// @@protoc_insertion_point(interface_extends:org.apache.accumulo.examples.wikisearch.protobuf.List)
com.google.protobuf.MessageOrBuilder {
/**
* <code>required bool IGNORE = 1;</code>
* @return Whether the iGNORE field is set.
*/
boolean hasIGNORE();
/**
* <code>required bool IGNORE = 1;</code>
* @return The iGNORE.
*/
boolean getIGNORE();
/**
* <code>required uint64 COUNT = 2;</code>
* @return Whether the cOUNT field is set.
*/
boolean hasCOUNT();
/**
* <code>required uint64 COUNT = 2;</code>
* @return The cOUNT.
*/
long getCOUNT();
/**
* <code>repeated string UID = 3;</code>
* @return A list containing the uID.
*/
java.util.List<java.lang.String>
getUIDList();
/**
* <code>repeated string UID = 3;</code>
* @return The count of uID.
*/
int getUIDCount();
/**
* <code>repeated string UID = 3;</code>
* @param index The index of the element to return.
* @return The uID at the given index.
*/
java.lang.String getUID(int index);
/**
* <code>repeated string UID = 3;</code>
* @param index The index of the value to return.
* @return The bytes of the uID at the given index.
*/
com.google.protobuf.ByteString
getUIDBytes(int index);
}
/**
* Protobuf type {@code org.apache.accumulo.examples.wikisearch.protobuf.List}
*/
public static final class List extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:org.apache.accumulo.examples.wikisearch.protobuf.List)
ListOrBuilder {
private static final long serialVersionUID = 0L;
// Use List.newBuilder() to construct.
private List(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private List() {
uID_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new List();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private List(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
iGNORE_ = input.readBool();
break;
}
case 16: {
bitField0_ |= 0x00000002;
cOUNT_ = input.readUInt64();
break;
}
case 26: {
com.google.protobuf.ByteString bs = input.readBytes();
if (!((mutable_bitField0_ & 0x00000004) != 0)) {
uID_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000004;
}
uID_.add(bs);
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) != 0)) {
uID_ = uID_.getUnmodifiableView();
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.accumulo.examples.wikisearch.protobuf.Uid.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.accumulo.examples.wikisearch.protobuf.Uid.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.class, org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder.class);
}
private int bitField0_;
public static final int IGNORE_FIELD_NUMBER = 1;
private boolean iGNORE_;
/**
* <code>required bool IGNORE = 1;</code>
* @return Whether the iGNORE field is set.
*/
@java.lang.Override
public boolean hasIGNORE() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>required bool IGNORE = 1;</code>
* @return The iGNORE.
*/
@java.lang.Override
public boolean getIGNORE() {
return iGNORE_;
}
public static final int COUNT_FIELD_NUMBER = 2;
private long cOUNT_;
/**
* <code>required uint64 COUNT = 2;</code>
* @return Whether the cOUNT field is set.
*/
@java.lang.Override
public boolean hasCOUNT() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* <code>required uint64 COUNT = 2;</code>
* @return The cOUNT.
*/
@java.lang.Override
public long getCOUNT() {
return cOUNT_;
}
public static final int UID_FIELD_NUMBER = 3;
private com.google.protobuf.LazyStringList uID_;
/**
* <code>repeated string UID = 3;</code>
* @return A list containing the uID.
*/
public com.google.protobuf.ProtocolStringList
getUIDList() {
return uID_;
}
/**
* <code>repeated string UID = 3;</code>
* @return The count of uID.
*/
public int getUIDCount() {
return uID_.size();
}
/**
* <code>repeated string UID = 3;</code>
* @param index The index of the element to return.
* @return The uID at the given index.
*/
public java.lang.String getUID(int index) {
return uID_.get(index);
}
/**
* <code>repeated string UID = 3;</code>
* @param index The index of the value to return.
* @return The bytes of the uID at the given index.
*/
public com.google.protobuf.ByteString
getUIDBytes(int index) {
return uID_.getByteString(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasIGNORE()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCOUNT()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeBool(1, iGNORE_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(2, cOUNT_);
}
for (int i = 0; i < uID_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 3, uID_.getRaw(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(1, iGNORE_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, cOUNT_);
}
{
int dataSize = 0;
for (int i = 0; i < uID_.size(); i++) {
dataSize += computeStringSizeNoTag(uID_.getRaw(i));
}
size += dataSize;
size += 1 * getUIDList().size();
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.accumulo.examples.wikisearch.protobuf.Uid.List)) {
return super.equals(obj);
}
org.apache.accumulo.examples.wikisearch.protobuf.Uid.List other = (org.apache.accumulo.examples.wikisearch.protobuf.Uid.List) obj;
if (hasIGNORE() != other.hasIGNORE()) return false;
if (hasIGNORE()) {
if (getIGNORE()
!= other.getIGNORE()) return false;
}
if (hasCOUNT() != other.hasCOUNT()) return false;
if (hasCOUNT()) {
if (getCOUNT()
!= other.getCOUNT()) return false;
}
if (!getUIDList()
.equals(other.getUIDList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasIGNORE()) {
hash = (37 * hash) + IGNORE_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getIGNORE());
}
if (hasCOUNT()) {
hash = (37 * hash) + COUNT_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getCOUNT());
}
if (getUIDCount() > 0) {
hash = (37 * hash) + UID_FIELD_NUMBER;
hash = (53 * hash) + getUIDList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.accumulo.examples.wikisearch.protobuf.Uid.List prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code org.apache.accumulo.examples.wikisearch.protobuf.List}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:org.apache.accumulo.examples.wikisearch.protobuf.List)
org.apache.accumulo.examples.wikisearch.protobuf.Uid.ListOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.accumulo.examples.wikisearch.protobuf.Uid.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.accumulo.examples.wikisearch.protobuf.Uid.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.class, org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder.class);
}
// Construct using org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
iGNORE_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
cOUNT_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
uID_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.accumulo.examples.wikisearch.protobuf.Uid.internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_descriptor;
}
@java.lang.Override
public org.apache.accumulo.examples.wikisearch.protobuf.Uid.List getDefaultInstanceForType() {
return org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.getDefaultInstance();
}
@java.lang.Override
public org.apache.accumulo.examples.wikisearch.protobuf.Uid.List build() {
org.apache.accumulo.examples.wikisearch.protobuf.Uid.List result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.accumulo.examples.wikisearch.protobuf.Uid.List buildPartial() {
org.apache.accumulo.examples.wikisearch.protobuf.Uid.List result = new org.apache.accumulo.examples.wikisearch.protobuf.Uid.List(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.iGNORE_ = iGNORE_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.cOUNT_ = cOUNT_;
to_bitField0_ |= 0x00000002;
}
if (((bitField0_ & 0x00000004) != 0)) {
uID_ = uID_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00000004);
}
result.uID_ = uID_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.accumulo.examples.wikisearch.protobuf.Uid.List) {
return mergeFrom((org.apache.accumulo.examples.wikisearch.protobuf.Uid.List)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.accumulo.examples.wikisearch.protobuf.Uid.List other) {
if (other == org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.getDefaultInstance()) return this;
if (other.hasIGNORE()) {
setIGNORE(other.getIGNORE());
}
if (other.hasCOUNT()) {
setCOUNT(other.getCOUNT());
}
if (!other.uID_.isEmpty()) {
if (uID_.isEmpty()) {
uID_ = other.uID_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureUIDIsMutable();
uID_.addAll(other.uID_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasIGNORE()) {
return false;
}
if (!hasCOUNT()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.accumulo.examples.wikisearch.protobuf.Uid.List parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.accumulo.examples.wikisearch.protobuf.Uid.List) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private boolean iGNORE_ ;
/**
* <code>required bool IGNORE = 1;</code>
* @return Whether the iGNORE field is set.
*/
@java.lang.Override
public boolean hasIGNORE() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>required bool IGNORE = 1;</code>
* @return The iGNORE.
*/
@java.lang.Override
public boolean getIGNORE() {
return iGNORE_;
}
/**
* <code>required bool IGNORE = 1;</code>
* @param value The iGNORE to set.
* @return This builder for chaining.
*/
public Builder setIGNORE(boolean value) {
bitField0_ |= 0x00000001;
iGNORE_ = value;
onChanged();
return this;
}
/**
* <code>required bool IGNORE = 1;</code>
* @return This builder for chaining.
*/
public Builder clearIGNORE() {
bitField0_ = (bitField0_ & ~0x00000001);
iGNORE_ = false;
onChanged();
return this;
}
private long cOUNT_ ;
/**
* <code>required uint64 COUNT = 2;</code>
* @return Whether the cOUNT field is set.
*/
@java.lang.Override
public boolean hasCOUNT() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* <code>required uint64 COUNT = 2;</code>
* @return The cOUNT.
*/
@java.lang.Override
public long getCOUNT() {
return cOUNT_;
}
/**
* <code>required uint64 COUNT = 2;</code>
* @param value The cOUNT to set.
* @return This builder for chaining.
*/
public Builder setCOUNT(long value) {
bitField0_ |= 0x00000002;
cOUNT_ = value;
onChanged();
return this;
}
/**
* <code>required uint64 COUNT = 2;</code>
* @return This builder for chaining.
*/
public Builder clearCOUNT() {
bitField0_ = (bitField0_ & ~0x00000002);
cOUNT_ = 0L;
onChanged();
return this;
}
private com.google.protobuf.LazyStringList uID_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureUIDIsMutable() {
if (!((bitField0_ & 0x00000004) != 0)) {
uID_ = new com.google.protobuf.LazyStringArrayList(uID_);
bitField0_ |= 0x00000004;
}
}
/**
* <code>repeated string UID = 3;</code>
* @return A list containing the uID.
*/
public com.google.protobuf.ProtocolStringList
getUIDList() {
return uID_.getUnmodifiableView();
}
/**
* <code>repeated string UID = 3;</code>
* @return The count of uID.
*/
public int getUIDCount() {
return uID_.size();
}
/**
* <code>repeated string UID = 3;</code>
* @param index The index of the element to return.
* @return The uID at the given index.
*/
public java.lang.String getUID(int index) {
return uID_.get(index);
}
/**
* <code>repeated string UID = 3;</code>
* @param index The index of the value to return.
* @return The bytes of the uID at the given index.
*/
public com.google.protobuf.ByteString
getUIDBytes(int index) {
return uID_.getByteString(index);
}
/**
* <code>repeated string UID = 3;</code>
* @param index The index to set the value at.
* @param value The uID to set.
* @return This builder for chaining.
*/
public Builder setUID(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureUIDIsMutable();
uID_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated string UID = 3;</code>
* @param value The uID to add.
* @return This builder for chaining.
*/
public Builder addUID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureUIDIsMutable();
uID_.add(value);
onChanged();
return this;
}
/**
* <code>repeated string UID = 3;</code>
* @param values The uID to add.
* @return This builder for chaining.
*/
public Builder addAllUID(
java.lang.Iterable<java.lang.String> values) {
ensureUIDIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, uID_);
onChanged();
return this;
}
/**
* <code>repeated string UID = 3;</code>
* @return This builder for chaining.
*/
public Builder clearUID() {
uID_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
/**
* <code>repeated string UID = 3;</code>
* @param value The bytes of the uID to add.
* @return This builder for chaining.
*/
public Builder addUIDBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureUIDIsMutable();
uID_.add(value);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:org.apache.accumulo.examples.wikisearch.protobuf.List)
}
// @@protoc_insertion_point(class_scope:org.apache.accumulo.examples.wikisearch.protobuf.List)
private static final org.apache.accumulo.examples.wikisearch.protobuf.Uid.List DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.accumulo.examples.wikisearch.protobuf.Uid.List();
}
public static org.apache.accumulo.examples.wikisearch.protobuf.Uid.List getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser<List>
PARSER = new com.google.protobuf.AbstractParser<List>() {
@java.lang.Override
public List parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new List(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<List> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<List> getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.accumulo.examples.wikisearch.protobuf.Uid.List getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\tUid.proto\0220org.apache.accumulo.example" +
"s.wikisearch.protobuf\"2\n\004List\022\016\n\006IGNORE\030" +
"\001 \002(\010\022\r\n\005COUNT\030\002 \002(\004\022\013\n\003UID\030\003 \003(\tB4\n0org" +
".apache.accumulo.examples.wikisearch.pro" +
"tobufH\001"
};
descriptor = com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
});
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_org_apache_accumulo_examples_wikisearch_protobuf_List_descriptor,
new java.lang.String[] { "IGNORE", "COUNT", "UID", });
}
// @@protoc_insertion_point(outer_class_scope)
}
| 6,207 |
0 | Create_ds/accumulo-wikisearch/query/src/test/hadoop2/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/test/hadoop2/org/apache/accumulo/examples/wikisearch/logic/TestQueryLogic.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.logic;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map.Entry;
import junit.framework.Assert;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.mock.MockInstance;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaConfiguration;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaIngester;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaInputFormat.WikipediaInputSplit;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaMapper;
import org.apache.accumulo.examples.wikisearch.parser.RangeCalculator;
import org.apache.accumulo.examples.wikisearch.reader.AggregatingRecordReader;
import org.apache.accumulo.examples.wikisearch.sample.Document;
import org.apache.accumulo.examples.wikisearch.sample.Field;
import org.apache.accumulo.examples.wikisearch.sample.Results;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.security.Credentials;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Before;
import org.junit.Test;
public class TestQueryLogic {
private static final String METADATA_TABLE_NAME = "wikiMetadata";
private static final String TABLE_NAME = "wiki";
private static final String INDEX_TABLE_NAME = "wikiIndex";
private static final String RINDEX_TABLE_NAME = "wikiReverseIndex";
private static final String TABLE_NAMES[] = {METADATA_TABLE_NAME, TABLE_NAME, RINDEX_TABLE_NAME, INDEX_TABLE_NAME};
private class MockAccumuloRecordWriter extends RecordWriter<Text,Mutation> {
@Override
public void write(Text key, Mutation value) throws IOException, InterruptedException {
try {
writerMap.get(key).addMutation(value);
} catch (MutationsRejectedException e) {
throw new IOException("Error adding mutation", e);
}
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
try {
for (BatchWriter w : writerMap.values()) {
w.flush();
w.close();
}
} catch (MutationsRejectedException e) {
throw new IOException("Error closing Batch Writer", e);
}
}
}
private Connector c = null;
private Configuration conf = new Configuration();
private HashMap<Text,BatchWriter> writerMap = new HashMap<Text,BatchWriter>();
private QueryLogic table = null;
@Before
public void setup() throws Exception {
Logger.getLogger(AbstractQueryLogic.class).setLevel(Level.DEBUG);
Logger.getLogger(QueryLogic.class).setLevel(Level.DEBUG);
Logger.getLogger(RangeCalculator.class).setLevel(Level.DEBUG);
conf.set(AggregatingRecordReader.START_TOKEN, "<page>");
conf.set(AggregatingRecordReader.END_TOKEN, "</page>");
conf.set(WikipediaConfiguration.TABLE_NAME, TABLE_NAME);
conf.set(WikipediaConfiguration.NUM_PARTITIONS, "1");
conf.set(WikipediaConfiguration.NUM_GROUPS, "1");
MockInstance i = new MockInstance();
c = i.getConnector("root", new PasswordToken(""));
WikipediaIngester.createTables(c.tableOperations(), TABLE_NAME, false);
for (String table : TABLE_NAMES) {
writerMap.put(new Text(table), c.createBatchWriter(table, 1000L, 1000L, 1));
}
TaskAttemptID id = new TaskAttemptID( "fake", 1, TaskType.MAP, 1, 1);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, id);
RawLocalFileSystem fs = new RawLocalFileSystem();
fs.setConf(conf);
URL url = ClassLoader.getSystemResource("enwiki-20110901-001.xml");
Assert.assertNotNull(url);
File data = new File(url.toURI());
Path tmpFile = new Path(data.getAbsolutePath());
// Setup the Mapper
WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(tmpFile, 0, fs.pathToFile(tmpFile).length(), null), 0);
AggregatingRecordReader rr = new AggregatingRecordReader();
Path ocPath = new Path(tmpFile, "oc");
OutputCommitter oc = new FileOutputCommitter(ocPath, context);
fs.deleteOnExit(ocPath);
StandaloneStatusReporter sr = new StandaloneStatusReporter();
rr.initialize(split, context);
MockAccumuloRecordWriter rw = new MockAccumuloRecordWriter();
WikipediaMapper mapper = new WikipediaMapper();
// there are times I wonder, "Why do Java people think this is good?" then I drink more whiskey
final MapContextImpl<LongWritable,Text,Text,Mutation> mapContext = new MapContextImpl<LongWritable,Text,Text,Mutation>(conf, id, rr, rw, oc, sr, split);
// Load data into Mock Accumulo
Mapper<LongWritable,Text,Text,Mutation>.Context con = mapper.new Context() {
/**
* Get the input split for this map.
*/
public InputSplit getInputSplit() {
return mapContext.getInputSplit();
}
@Override
public LongWritable getCurrentKey() throws IOException, InterruptedException {
return mapContext.getCurrentKey();
}
@Override
public Text getCurrentValue() throws IOException, InterruptedException {
return mapContext.getCurrentValue();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return mapContext.nextKeyValue();
}
@Override
public Counter getCounter(Enum<?> counterName) {
return mapContext.getCounter(counterName);
}
@Override
public Counter getCounter(String groupName, String counterName) {
return mapContext.getCounter(groupName, counterName);
}
@Override
public OutputCommitter getOutputCommitter() {
return mapContext.getOutputCommitter();
}
@Override
public void write(Text key, Mutation value) throws IOException,
InterruptedException {
mapContext.write(key, value);
}
@Override
public String getStatus() {
return mapContext.getStatus();
}
@Override
public TaskAttemptID getTaskAttemptID() {
return mapContext.getTaskAttemptID();
}
@Override
public void setStatus(String msg) {
mapContext.setStatus(msg);
}
@Override
public Path[] getArchiveClassPaths() {
return mapContext.getArchiveClassPaths();
}
@Override
public String[] getArchiveTimestamps() {
return mapContext.getArchiveTimestamps();
}
@Override
public URI[] getCacheArchives() throws IOException {
return mapContext.getCacheArchives();
}
@Override
public URI[] getCacheFiles() throws IOException {
return mapContext.getCacheArchives();
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getCombinerClass()
throws ClassNotFoundException {
return mapContext.getCombinerClass();
}
@Override
public Configuration getConfiguration() {
return mapContext.getConfiguration();
}
@Override
public Path[] getFileClassPaths() {
return mapContext.getFileClassPaths();
}
@Override
public String[] getFileTimestamps() {
return mapContext.getFileTimestamps();
}
@Override
public RawComparator<?> getGroupingComparator() {
return mapContext.getGroupingComparator();
}
@Override
public Class<? extends InputFormat<?, ?>> getInputFormatClass()
throws ClassNotFoundException {
return mapContext.getInputFormatClass();
}
@Override
public String getJar() {
return mapContext.getJar();
}
@Override
public JobID getJobID() {
return mapContext.getJobID();
}
@Override
public String getJobName() {
return mapContext.getJobName();
}
/*@Override
public boolean userClassesTakesPrecedence() {
return mapContext.userClassesTakesPrecedence();
}*/
@Override
public boolean getJobSetupCleanupNeeded() {
return mapContext.getJobSetupCleanupNeeded();
}
@Override
public boolean getTaskCleanupNeeded() {
return mapContext.getTaskCleanupNeeded();
}
@Override
public Path[] getLocalCacheArchives() throws IOException {
return mapContext.getLocalCacheArchives();
}
@Override
public Path[] getLocalCacheFiles() throws IOException {
return mapContext.getLocalCacheFiles();
}
@Override
public Class<?> getMapOutputKeyClass() {
return mapContext.getMapOutputKeyClass();
}
@Override
public Class<?> getMapOutputValueClass() {
return mapContext.getMapOutputValueClass();
}
@Override
public Class<? extends Mapper<?, ?, ?, ?>> getMapperClass()
throws ClassNotFoundException {
return mapContext.getMapperClass();
}
@Override
public int getMaxMapAttempts() {
return mapContext.getMaxMapAttempts();
}
@Override
public int getMaxReduceAttempts() {
return mapContext.getMaxReduceAttempts();
}
@Override
public int getNumReduceTasks() {
return mapContext.getNumReduceTasks();
}
@Override
public Class<? extends OutputFormat<?, ?>> getOutputFormatClass()
throws ClassNotFoundException {
return mapContext.getOutputFormatClass();
}
@Override
public Class<?> getOutputKeyClass() {
return mapContext.getOutputKeyClass();
}
@Override
public Class<?> getOutputValueClass() {
return mapContext.getOutputValueClass();
}
@Override
public Class<? extends Partitioner<?, ?>> getPartitionerClass()
throws ClassNotFoundException {
return mapContext.getPartitionerClass();
}
@Override
public Class<? extends Reducer<?, ?, ?, ?>> getReducerClass()
throws ClassNotFoundException {
return mapContext.getReducerClass();
}
@Override
public RawComparator<?> getSortComparator() {
return mapContext.getSortComparator();
}
@Override
public boolean getSymlink() {
return mapContext.getSymlink();
}
@Override
public Path getWorkingDirectory() throws IOException {
return mapContext.getWorkingDirectory();
}
@Override
public void progress() {
mapContext.progress();
}
@Override
public boolean getProfileEnabled() {
return mapContext.getProfileEnabled();
}
@Override
public String getProfileParams() {
return mapContext.getProfileParams();
}
@Override
public IntegerRanges getProfileTaskRange(boolean isMap) {
return mapContext.getProfileTaskRange(isMap);
}
@Override
public String getUser() {
return mapContext.getUser();
}
@Override
public Credentials getCredentials() {
return mapContext.getCredentials();
}
@Override
public float getProgress() {
return mapContext.getProgress();
}
};
mapper.run(con);
// Flush and close record writers.
rw.close(context);
table = new QueryLogic();
table.setMetadataTableName(METADATA_TABLE_NAME);
table.setTableName(TABLE_NAME);
table.setIndexTableName(INDEX_TABLE_NAME);
table.setReverseIndexTableName(RINDEX_TABLE_NAME);
table.setUseReadAheadIterator(false);
table.setUnevaluatedFields(Collections.singletonList("TEXT"));
}
void debugQuery(String tableName) throws Exception {
Scanner s = c.createScanner(tableName, new Authorizations("all"));
Range r = new Range();
s.setRange(r);
for (Entry<Key,Value> entry : s)
System.out.println(entry.getKey().toString() + " " + entry.getValue().toString());
}
@Test
public void testTitle() throws Exception {
Logger.getLogger(AbstractQueryLogic.class).setLevel(Level.OFF);
Logger.getLogger(RangeCalculator.class).setLevel(Level.OFF);
List<String> auths = new ArrayList<String>();
auths.add("enwiki");
Results results = table.runQuery(c, auths, "TITLE == 'asphalt' or TITLE == 'abacus' or TITLE == 'acid' or TITLE == 'acronym'", null, null, null);
List<Document> docs = results.getResults();
assertEquals(4, docs.size());
results = table.runQuery(c, auths, "TEXT == 'abacus'", null, null, null);
docs = results.getResults();
assertEquals(1, docs.size());
for (Document doc : docs) {
System.out.println("id: " + doc.getId());
for (Field field : doc.getFields())
System.out.println(field.getFieldName() + " -> " + field.getFieldValue());
}
}
}
| 6,208 |
0 | Create_ds/accumulo-wikisearch/query/src/test/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/test/java/org/apache/accumulo/examples/wikisearch/logic/StandaloneStatusReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.logic;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.StatusReporter;
public class StandaloneStatusReporter extends StatusReporter {
private Counters c = new Counters();
private long filesProcessed = 0;
private long recordsProcessed = 0;
public Counters getCounters() {
return c;
}
@Override
public Counter getCounter(Enum<?> name) {
return c.findCounter(name);
}
@Override
public Counter getCounter(String group, String name) {
return c.findCounter(group, name);
}
@Override
public void progress() {
// do nothing
}
@Override
public void setStatus(String status) {
// do nothing
}
public long getFilesProcessed() {
return filesProcessed;
}
public long getRecordsProcessed() {
return recordsProcessed;
}
public void incrementFilesProcessed() {
filesProcessed++;
recordsProcessed = 0;
}
public void incrementRecordsProcessed() {
recordsProcessed++;
}
public float getProgress() {
return 0;
}
}
| 6,209 |
0 | Create_ds/accumulo-wikisearch/query/src/test/hadoop1/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/test/hadoop1/org/apache/accumulo/examples/wikisearch/logic/TestQueryLogic.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.logic;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map.Entry;
import junit.framework.Assert;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.mock.MockInstance;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaConfiguration;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaIngester;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaInputFormat.WikipediaInputSplit;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaMapper;
import org.apache.accumulo.examples.wikisearch.parser.RangeCalculator;
import org.apache.accumulo.examples.wikisearch.reader.AggregatingRecordReader;
import org.apache.accumulo.examples.wikisearch.sample.Document;
import org.apache.accumulo.examples.wikisearch.sample.Field;
import org.apache.accumulo.examples.wikisearch.sample.Results;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Before;
import org.junit.Test;
public class TestQueryLogic {
private static final String METADATA_TABLE_NAME = "wikiMetadata";
private static final String TABLE_NAME = "wiki";
private static final String INDEX_TABLE_NAME = "wikiIndex";
private static final String RINDEX_TABLE_NAME = "wikiReverseIndex";
private static final String TABLE_NAMES[] = {METADATA_TABLE_NAME, TABLE_NAME, RINDEX_TABLE_NAME, INDEX_TABLE_NAME};
private class MockAccumuloRecordWriter extends RecordWriter<Text,Mutation> {
@Override
public void write(Text key, Mutation value) throws IOException, InterruptedException {
try {
writerMap.get(key).addMutation(value);
} catch (MutationsRejectedException e) {
throw new IOException("Error adding mutation", e);
}
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
try {
for (BatchWriter w : writerMap.values()) {
w.flush();
w.close();
}
} catch (MutationsRejectedException e) {
throw new IOException("Error closing Batch Writer", e);
}
}
}
private Connector c = null;
private Configuration conf = new Configuration();
private HashMap<Text,BatchWriter> writerMap = new HashMap<Text,BatchWriter>();
private QueryLogic table = null;
@Before
public void setup() throws Exception {
Logger.getLogger(AbstractQueryLogic.class).setLevel(Level.DEBUG);
Logger.getLogger(QueryLogic.class).setLevel(Level.DEBUG);
Logger.getLogger(RangeCalculator.class).setLevel(Level.DEBUG);
conf.set(AggregatingRecordReader.START_TOKEN, "<page>");
conf.set(AggregatingRecordReader.END_TOKEN, "</page>");
conf.set(WikipediaConfiguration.TABLE_NAME, TABLE_NAME);
conf.set(WikipediaConfiguration.NUM_PARTITIONS, "1");
conf.set(WikipediaConfiguration.NUM_GROUPS, "1");
MockInstance i = new MockInstance();
c = i.getConnector("root", new PasswordToken(""));
WikipediaIngester.createTables(c.tableOperations(), TABLE_NAME, false);
for (String table : TABLE_NAMES) {
writerMap.put(new Text(table), c.createBatchWriter(table, 1000L, 1000L, 1));
}
TaskAttemptID id = new TaskAttemptID();
TaskAttemptContext context = new TaskAttemptContext(conf, id);
RawLocalFileSystem fs = new RawLocalFileSystem();
fs.setConf(conf);
URL url = ClassLoader.getSystemResource("enwiki-20110901-001.xml");
Assert.assertNotNull(url);
File data = new File(url.toURI());
Path tmpFile = new Path(data.getAbsolutePath());
// Setup the Mapper
WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(tmpFile, 0, fs.pathToFile(tmpFile).length(), null), 0);
AggregatingRecordReader rr = new AggregatingRecordReader();
Path ocPath = new Path(tmpFile, "oc");
OutputCommitter oc = new FileOutputCommitter(ocPath, context);
fs.deleteOnExit(ocPath);
StandaloneStatusReporter sr = new StandaloneStatusReporter();
rr.initialize(split, context);
MockAccumuloRecordWriter rw = new MockAccumuloRecordWriter();
WikipediaMapper mapper = new WikipediaMapper();
// Load data into Mock Accumulo
Mapper<LongWritable,Text,Text,Mutation>.Context con = mapper.new Context(conf, id, rr, rw, oc, sr, split);
mapper.run(con);
// Flush and close record writers.
rw.close(context);
table = new QueryLogic();
table.setMetadataTableName(METADATA_TABLE_NAME);
table.setTableName(TABLE_NAME);
table.setIndexTableName(INDEX_TABLE_NAME);
table.setReverseIndexTableName(RINDEX_TABLE_NAME);
table.setUseReadAheadIterator(false);
table.setUnevaluatedFields(Collections.singletonList("TEXT"));
}
void debugQuery(String tableName) throws Exception {
Scanner s = c.createScanner(tableName, new Authorizations("all"));
Range r = new Range();
s.setRange(r);
for (Entry<Key,Value> entry : s)
System.out.println(entry.getKey().toString() + " " + entry.getValue().toString());
}
@Test
public void testTitle() throws Exception {
Logger.getLogger(AbstractQueryLogic.class).setLevel(Level.OFF);
Logger.getLogger(RangeCalculator.class).setLevel(Level.OFF);
List<String> auths = new ArrayList<String>();
auths.add("enwiki");
Results results = table.runQuery(c, auths, "TITLE == 'asphalt' or TITLE == 'abacus' or TITLE == 'acid' or TITLE == 'acronym'", null, null, null);
List<Document> docs = results.getResults();
assertEquals(4, docs.size());
results = table.runQuery(c, auths, "TEXT == 'abacus'", null, null, null);
docs = results.getResults();
assertEquals(1, docs.size());
for (Document doc : docs) {
System.out.println("id: " + doc.getId());
for (Field field : doc.getFields())
System.out.println(field.getFieldName() + " -> " + field.getFieldValue());
}
}
}
| 6,210 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/util/BaseKeyParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.util;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.data.Key;
public class BaseKeyParser {
public static final String ROW_FIELD = "row";
public static final String COLUMN_FAMILY_FIELD = "columnFamily";
public static final String COLUMN_QUALIFIER_FIELD = "columnQualifier";
protected Map<String,String> keyFields = new HashMap<>();
protected Key key = null;
/**
* Parses a Key object into its constituent fields. This method clears any prior values, so the
* object can be reused without requiring a new instantiation. This default implementation makes
* the row, columnFamily, and columnQualifier available.
*/
public void parse(Key key) {
this.key = key;
keyFields.clear();
keyFields.put(ROW_FIELD, key.getRow().toString());
keyFields.put(COLUMN_FAMILY_FIELD, key.getColumnFamily().toString());
keyFields.put(COLUMN_QUALIFIER_FIELD, key.getColumnQualifier().toString());
}
public String getFieldValue(String fieldName) {
return keyFields.get(fieldName);
}
public String[] getFieldNames() {
String[] fieldNames = new String[keyFields.size()];
return keyFields.keySet().toArray(fieldNames);
}
public BaseKeyParser duplicate() {
return new BaseKeyParser();
}
public String getRow() {
return keyFields.get(ROW_FIELD);
}
public String getColumnFamily() {
return keyFields.get(COLUMN_FAMILY_FIELD);
}
public String getColumnQualifier() {
return keyFields.get(COLUMN_QUALIFIER_FIELD);
}
public Key getKey() {
return this.key;
}
}
| 6,211 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/util/FieldIndexKeyParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.util;
import org.apache.accumulo.core.data.Key;
public class FieldIndexKeyParser extends KeyParser {
public static final String DELIMITER = "\0";
@Override
public void parse(Key key) {
super.parse(key);
String[] colFamParts = this.keyFields.get(BaseKeyParser.COLUMN_FAMILY_FIELD).split(DELIMITER);
this.keyFields.put(FIELDNAME_FIELD, colFamParts.length >= 2 ? colFamParts[1] : "");
String[] colQualParts = this.keyFields.get(BaseKeyParser.COLUMN_QUALIFIER_FIELD).split(DELIMITER);
this.keyFields.put(SELECTOR_FIELD, colQualParts.length >= 1 ? colQualParts[0] : "");
this.keyFields.put(DATATYPE_FIELD, colQualParts.length >= 2 ? colQualParts[1] : "");
this.keyFields.put(UID_FIELD, colQualParts.length >= 3 ? colQualParts[2] : "");
}
@Override
public BaseKeyParser duplicate() {
return new FieldIndexKeyParser();
}
@Override
public String getSelector() {
return keyFields.get(SELECTOR_FIELD);
}
@Override
public String getDataType() {
return keyFields.get(DATATYPE_FIELD);
}
@Override
public String getFieldName() {
return keyFields.get(FIELDNAME_FIELD);
}
@Override
public String getUid() {
return keyFields.get(UID_FIELD);
}
public String getDataTypeUid() {
return getDataType() + DELIMITER + getUid();
}
// An alias for getSelector
public String getFieldValue() {
return getSelector();
}
}
| 6,212 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/util/KeyParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.util;
import org.apache.accumulo.core.data.Key;
public class KeyParser extends BaseKeyParser {
public static final String SELECTOR_FIELD = "selector";
public static final String DATATYPE_FIELD = "dataType";
public static final String FIELDNAME_FIELD = "fieldName";
public static final String UID_FIELD = "uid";
public static final String DELIMITER = "\0";
@Override
public void parse(Key key) {
super.parse(key);
String[] colFamParts = this.keyFields.get(BaseKeyParser.COLUMN_FAMILY_FIELD).split(DELIMITER);
this.keyFields.put(FIELDNAME_FIELD, colFamParts.length >= 2 ? colFamParts[1] : "");
String[] colQualParts = this.keyFields.get(BaseKeyParser.COLUMN_QUALIFIER_FIELD).split(DELIMITER);
this.keyFields.put(SELECTOR_FIELD, colQualParts.length >= 1 ? colQualParts[0] : "");
this.keyFields.put(DATATYPE_FIELD, colQualParts.length >= 2 ? colQualParts[1] : "");
this.keyFields.put(UID_FIELD, colQualParts.length >= 3 ? colQualParts[2] : "");
}
@Override
public BaseKeyParser duplicate() {
return new KeyParser();
}
public String getSelector() {
return keyFields.get(SELECTOR_FIELD);
}
public String getDataType() {
return keyFields.get(DATATYPE_FIELD);
}
public String getFieldName() {
return keyFields.get(FIELDNAME_FIELD);
}
public String getUid() {
return keyFields.get(UID_FIELD);
}
public String getDataTypeUid() {
return getDataType() + DELIMITER + getUid();
}
// An alias for getSelector
public String getFieldValue() {
return getSelector();
}
}
| 6,213 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/logic/QueryLogic.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.logic;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.wikisearch.iterator.EvaluatingIterator;
import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
import org.apache.accumulo.examples.wikisearch.normalizer.Normalizer;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.QueryTerm;
import org.apache.accumulo.examples.wikisearch.parser.RangeCalculator;
import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
import org.apache.accumulo.examples.wikisearch.util.TextUtil;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
import com.google.common.collect.Multimap;
import com.google.protobuf.InvalidProtocolBufferException;
/**
* <pre>
* <h2>Overview</h2>
* QueryTable implementation that works with the JEXL grammar. This QueryTable
* uses the metadata, global index, and partitioned table to return
* results based on the query. Example queries:
*
* <b>Single Term Query</b>
* 'foo' - looks in global index for foo, and if any entries are found, then the query
* is rewritten to be field1 == 'foo' or field2 == 'foo', etc. This is then passed
* down the optimized query path which uses the intersecting iterators on the shard
* table.
*
* <b>Boolean expression</b>
* field == 'foo' - For fielded queries, those that contain a field, an operator, and a literal (string or number),
* the query is parsed and the set of eventFields in the query that are indexed is determined by
* querying the metadata table. Depending on the conjunctions in the query (or, and, not) and the
* eventFields that are indexed, the query may be sent down the optimized path or the full scan path.
*
* We are not supporting all of the operators that JEXL supports at this time. We are supporting the following operators:
*
* ==, !=, >, ≥, <, ≤, =~, and !~
*
* Custom functions can be created and registered with the Jexl engine. The functions can be used in the queries in conjunction
* with other supported operators. A sample function has been created, called between, and is bound to the 'f' namespace. An
* example using this function is : "f:between(LATITUDE,60.0, 70.0)"
*
* <h2>Constraints on Query Structure</h2>
* Queries that are sent to this class need to be formatted such that there is a space on either side of the operator. We are
* rewriting the query in some cases and the current implementation is expecting a space on either side of the operator. Users
* should also be aware that the literals used in the query need to match the data in the table. If an error occurs in the evaluation
* we are skipping the event.
*
* <h2>Notes on Optimization</h2>
* Queries that meet any of the following criteria will perform a full scan of the events in the partitioned table:
*
* 1. An 'or' conjunction exists in the query but not all of the terms are indexed.
* 2. No indexed terms exist in the query
* 3. An unsupported operator exists in the query
*
* </pre>
*
*/
public class QueryLogic extends AbstractQueryLogic {
protected static Logger log = Logger.getLogger(QueryLogic.class);
public QueryLogic() {
super();
}
@Override
protected RangeCalculator getTermIndexInformation(Connector c, Authorizations auths, Multimap<String,Normalizer> indexedTerms,
Multimap<String,QueryTerm> terms, String indexTableName, String reverseIndexTableName, String queryString, int queryThreads, Set<String> typeFilter)
throws TableNotFoundException, org.apache.commons.jexl2.parser.ParseException {
RangeCalculator calc = new RangeCalculator();
calc.execute(c, auths, indexedTerms, terms, queryString, this, typeFilter);
return calc;
}
protected Collection<Range> getFullScanRange(Date begin, Date end, Multimap<String,QueryTerm> terms) {
return Collections.singletonList(new Range());
}
@Override
protected IndexRanges getTermIndexInformation(Connector c, Authorizations auths, String value, Set<String> typeFilter) throws TableNotFoundException {
final String dummyTermName = "DUMMY";
UnionIndexRanges indexRanges = new UnionIndexRanges();
// The entries in the index are normalized, since we don't have a field, just try using the LcNoDiacriticsNormalizer.
String normalizedFieldValue = new LcNoDiacriticsNormalizer().normalizeFieldValue("", value);
// Remove the begin and end ' marks
if (normalizedFieldValue.startsWith("'") && normalizedFieldValue.endsWith("'")) {
normalizedFieldValue = normalizedFieldValue.substring(1, normalizedFieldValue.length() - 1);
}
Text fieldValue = new Text(normalizedFieldValue);
if (log.isDebugEnabled()) {
log.debug("Querying index table : " + this.getIndexTableName() + " for normalized indexed term: " + fieldValue);
}
Scanner scanner = c.createScanner(this.getIndexTableName(), auths);
Range r = new Range(fieldValue);
scanner.setRange(r);
if (log.isDebugEnabled()) {
log.debug("Range for index query: " + r.toString());
}
for (Entry<Key,Value> entry : scanner) {
if (log.isDebugEnabled()) {
log.debug("Index entry: " + entry.getKey().toString());
}
// Get the shard id and datatype from the colq
String fieldName = entry.getKey().getColumnFamily().toString();
String colq = entry.getKey().getColumnQualifier().toString();
int separator = colq.indexOf(EvaluatingIterator.NULL_BYTE_STRING);
String shardId = null;
String datatype = null;
if (separator != -1) {
shardId = colq.substring(0, separator);
datatype = colq.substring(separator + 1);
} else {
shardId = colq;
}
// Skip this entry if the type is not correct
if (null != datatype && null != typeFilter && !typeFilter.contains(datatype))
continue;
// Parse the UID.List object from the value
Uid.List uidList = null;
try {
uidList = Uid.List.parseFrom(entry.getValue().get());
} catch (InvalidProtocolBufferException e) {
// Don't add UID information, at least we know what shards
// it is located in.
}
// Add the count for this shard to the total count for the term.
long count = 0;
Long storedCount = indexRanges.getTermCardinality().get(dummyTermName);
if (null == storedCount) {
count = uidList.getCOUNT();
} else {
count = uidList.getCOUNT() + storedCount;
}
indexRanges.getTermCardinality().put(dummyTermName, count);
// Add the field name
indexRanges.getFieldNamesAndValues().put(fieldName, normalizedFieldValue);
// Create the keys
Text shard = new Text(shardId);
if (uidList.getIGNORE()) {
// Then we create a scan range that is the entire shard
indexRanges.add(dummyTermName, new Range(shard));
} else {
// We should have UUIDs, create event ranges
for (String uuid : uidList.getUIDList()) {
Text cf = new Text(datatype);
TextUtil.textAppend(cf, uuid);
Key startKey = new Key(shard, cf);
Key endKey = new Key(shard, new Text(cf.toString() + EvaluatingIterator.NULL_BYTE_STRING));
Range eventRange = new Range(startKey, true, endKey, false);
indexRanges.add(dummyTermName, eventRange);
}
}
}
if (log.isDebugEnabled()) {
log.debug("Found " + indexRanges.getRanges().size() + " entries in the index for field value: " + normalizedFieldValue);
}
return indexRanges;
}
}
| 6,214 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/logic/AbstractQueryLogic.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.logic;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeSet;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.user.RegExFilter;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaMapper;
import org.apache.accumulo.examples.wikisearch.iterator.BooleanLogicIterator;
import org.apache.accumulo.examples.wikisearch.iterator.EvaluatingIterator;
import org.apache.accumulo.examples.wikisearch.iterator.OptimizedQueryIterator;
import org.apache.accumulo.examples.wikisearch.iterator.ReadAheadIterator;
import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
import org.apache.accumulo.examples.wikisearch.normalizer.Normalizer;
import org.apache.accumulo.examples.wikisearch.parser.EventFields;
import org.apache.accumulo.examples.wikisearch.parser.EventFields.FieldValue;
import org.apache.accumulo.examples.wikisearch.parser.FieldIndexQueryReWriter;
import org.apache.accumulo.examples.wikisearch.parser.JexlOperatorConstants;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.QueryTerm;
import org.apache.accumulo.examples.wikisearch.parser.RangeCalculator;
import org.apache.accumulo.examples.wikisearch.sample.Document;
import org.apache.accumulo.examples.wikisearch.sample.Field;
import org.apache.accumulo.examples.wikisearch.sample.Results;
import org.apache.commons.jexl2.parser.ParserTreeConstants;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.time.StopWatch;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
import com.esotericsoftware.kryo.Kryo;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
/**
* <pre>
* <h2>Overview</h2>
* Query implementation that works with the JEXL grammar. This
* uses the metadata, global index, and partitioned table to return
* results based on the query. Example queries:
*
* <b>Single Term Query</b>
* 'foo' - looks in global index for foo, and if any entries are found, then the query
* is rewritten to be field1 == 'foo' or field2 == 'foo', etc. This is then passed
* down the optimized query path which uses the intersecting iterators on the partitioned
* table.
*
* <b>Boolean expression</b>
* field == 'foo' - For fielded queries, those that contain a field, an operator, and a literal (string or number),
* the query is parsed and the set of eventFields in the query that are indexed is determined by
* querying the metadata table. Depending on the conjunctions in the query (or, and, not) and the
* eventFields that are indexed, the query may be sent down the optimized path or the full scan path.
*
* We are not supporting all of the operators that JEXL supports at this time. We are supporting the following operators:
*
* ==, !=, >, ≥, <, ≤, =~, and !~
*
* Custom functions can be created and registered with the Jexl engine. The functions can be used in the queries in conjunction
* with other supported operators. A sample function has been created, called between, and is bound to the 'f' namespace. An
* example using this function is : "f:between(LATITUDE,60.0, 70.0)"
*
* <h2>Constraints on Query Structure</h2>
* Queries that are sent to this class need to be formatted such that there is a space on either side of the operator. We are
* rewriting the query in some cases and the current implementation is expecting a space on either side of the operator. If
* an error occurs in the evaluation we are skipping the event.
*
* <h2>Notes on Optimization</h2>
* Queries that meet any of the following criteria will perform a full scan of the events in the partitioned table:
*
* 1. An 'or' conjunction exists in the query but not all of the terms are indexed.
* 2. No indexed terms exist in the query
* 3. An unsupported operator exists in the query
*
* </pre>
*
*/
public abstract class AbstractQueryLogic {
protected static Logger log = Logger.getLogger(AbstractQueryLogic.class);
/**
* Set of datatypes to limit the query to.
*/
public static final String DATATYPE_FILTER_SET = "datatype.filter.set";
private static class DoNotPerformOptimizedQueryException extends Exception {
private static final long serialVersionUID = 1L;
}
/**
* Object that is used to hold ranges found in the index. Subclasses may compute the final range
* set in various ways.
*/
public static abstract class IndexRanges {
private Map<String,String> indexValuesToOriginalValues = null;
private Multimap<String,String> fieldNamesAndValues = HashMultimap.create();
private Map<String,Long> termCardinality = new HashMap<>();
protected Map<String,TreeSet<Range>> ranges = new HashMap<>();
public Multimap<String,String> getFieldNamesAndValues() {
return fieldNamesAndValues;
}
public void setFieldNamesAndValues(Multimap<String,String> fieldNamesAndValues) {
this.fieldNamesAndValues = fieldNamesAndValues;
}
public final Map<String,Long> getTermCardinality() {
return termCardinality;
}
public Map<String,String> getIndexValuesToOriginalValues() {
return indexValuesToOriginalValues;
}
public void setIndexValuesToOriginalValues(Map<String,String> indexValuesToOriginalValues) {
this.indexValuesToOriginalValues = indexValuesToOriginalValues;
}
public abstract void add(String term, Range r);
public abstract Set<Range> getRanges();
}
/**
* Object that computes the ranges by unioning all of the ranges for all of the terms together. In
* the case where ranges overlap, the largest range is used.
*/
public static class UnionIndexRanges extends IndexRanges {
public static String DEFAULT_KEY = "default";
public UnionIndexRanges() {
this.ranges.put(DEFAULT_KEY, new TreeSet<Range>());
}
@Override
public Set<Range> getRanges() {
// So the set of ranges is ordered. It *should* be the case that
// ranges with partition ids will sort before ranges that point to
// a specific event. Populate a new set of ranges but don't add a
// range for an event where that range is contained in a range already
// added.
Set<Text> shardsAdded = new HashSet<>();
Set<Range> returnSet = new HashSet<>();
for (Range r : ranges.get(DEFAULT_KEY)) {
if (!shardsAdded.contains(r.getStartKey().getRow())) {
// Only add ranges with a start key for the entire partition.
if (r.getStartKey().getColumnFamily() == null) {
shardsAdded.add(r.getStartKey().getRow());
}
returnSet.add(r);
} else {
// if (log.isTraceEnabled())
log.info("Skipping event specific range: " + r.toString()
+ " because range has already been added: "
+ shardsAdded.contains(r.getStartKey().getRow()));
}
}
return returnSet;
}
@Override
public void add(String term, Range r) {
ranges.get(DEFAULT_KEY).add(r);
}
}
private String metadataTableName;
private String indexTableName;
private String reverseIndexTableName;
private String tableName;
private int queryThreads = 8;
private String readAheadQueueSize;
private String readAheadTimeOut;
private boolean useReadAheadIterator;
private Kryo kryo = new Kryo();
private EventFields eventFields = new EventFields();
private List<String> unevaluatedFields = null;
private Map<Class<? extends Normalizer>,Normalizer> normalizerCacheMap = new HashMap<>();
private static final String NULL_BYTE = "\u0000";
public AbstractQueryLogic() {
super();
EventFields.initializeKryo(kryo);
}
/**
* Queries metadata table to determine which terms are indexed.
*
* @param datatypes
* - optional list of types
* @return map of indexed field names to types to normalizers used in this date range
*/
protected Map<String,Multimap<String,Class<? extends Normalizer>>> findIndexedTerms(Connector c,
Authorizations auths, Set<String> queryLiterals, Set<String> datatypes)
throws TableNotFoundException, InstantiationException, IllegalAccessException {
Map<String,Multimap<String,Class<? extends Normalizer>>> results = new HashMap<>();
for (String literal : queryLiterals) {
if (log.isDebugEnabled()) {
log.debug("Querying " + this.getMetadataTableName() + " table for " + literal);
}
Range range = new Range(literal.toUpperCase());
Scanner scanner = c.createScanner(this.getMetadataTableName(), auths);
scanner.setRange(range);
scanner.fetchColumnFamily(new Text(WikipediaMapper.METADATA_INDEX_COLUMN_FAMILY));
for (Entry<Key,Value> entry : scanner) {
if (!results.containsKey(literal)) {
Multimap<String,Class<? extends Normalizer>> m = HashMultimap.create();
results.put(literal, m);
}
// Get the column qualifier from the key. It contains the datatype and normalizer class
String colq = entry.getKey().getColumnQualifier().toString();
if (null != colq && colq.contains("\0")) {
int idx = colq.indexOf("\0");
if (idx != -1) {
String type = colq.substring(0, idx);
// If types are specified and this type is not in the list then skip it.
if (null != datatypes && !datatypes.contains(type)) {
continue;
}
try {
@SuppressWarnings("unchecked")
Class<? extends Normalizer> clazz =
(Class<? extends Normalizer>) Class.forName(colq.substring(idx + 1));
if (!normalizerCacheMap.containsKey(clazz)) {
normalizerCacheMap.put(clazz, clazz.newInstance());
}
results.get(literal).put(type, clazz);
} catch (ClassNotFoundException e) {
log.error("Unable to find normalizer on class path: " + colq.substring(idx + 1), e);
results.get(literal).put(type, LcNoDiacriticsNormalizer.class);
}
} else {
log.warn("EventMetadata entry did not contain NULL byte: " + entry.getKey().toString());
}
} else {
log.warn("ColumnQualifier null in EventMetadata for key: " + entry.getKey().toString());
}
}
}
if (log.isDebugEnabled()) {
log.debug("METADATA RESULTS: " + results.toString());
}
return results;
}
/**
* Performs a lookup in the global index for a single non-fielded term.
*
* @param datatypes
* - optional list of types
* @return ranges that fit into the date range.
*/
protected abstract IndexRanges getTermIndexInformation(Connector c, Authorizations auths,
String value, Set<String> datatypes) throws TableNotFoundException;
/**
* Performs a lookup in the global index / reverse index and returns a RangeCalculator
*
* @param c
* Accumulo connection
* @param auths
* authset for queries
* @param indexedTerms
* multimap of indexed field name and Normalizers used
* @param terms
* multimap of field name and QueryTerm object
* @param queryString
* original query string
* @param datatypes
* - optional list of types
* @return range calculator
*/
protected abstract RangeCalculator getTermIndexInformation(Connector c, Authorizations auths,
Multimap<String,Normalizer> indexedTerms, Multimap<String,QueryTerm> terms,
String indexTableName, String reverseIndexTableName, String queryString, int queryThreads,
Set<String> datatypes)
throws TableNotFoundException, org.apache.commons.jexl2.parser.ParseException;
protected abstract Collection<Range> getFullScanRange(Date begin, Date end,
Multimap<String,QueryTerm> terms);
public String getMetadataTableName() {
return metadataTableName;
}
public String getIndexTableName() {
return indexTableName;
}
public String getTableName() {
return tableName;
}
public void setMetadataTableName(String metadataTableName) {
this.metadataTableName = metadataTableName;
}
public void setIndexTableName(String indexTableName) {
this.indexTableName = indexTableName;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public int getQueryThreads() {
return queryThreads;
}
public void setQueryThreads(int queryThreads) {
this.queryThreads = queryThreads;
}
public String getReadAheadQueueSize() {
return readAheadQueueSize;
}
public String getReadAheadTimeOut() {
return readAheadTimeOut;
}
public boolean isUseReadAheadIterator() {
return useReadAheadIterator;
}
public void setReadAheadQueueSize(String readAheadQueueSize) {
this.readAheadQueueSize = readAheadQueueSize;
}
public void setReadAheadTimeOut(String readAheadTimeOut) {
this.readAheadTimeOut = readAheadTimeOut;
}
public void setUseReadAheadIterator(boolean useReadAheadIterator) {
this.useReadAheadIterator = useReadAheadIterator;
}
public String getReverseIndexTableName() {
return reverseIndexTableName;
}
public void setReverseIndexTableName(String reverseIndexTableName) {
this.reverseIndexTableName = reverseIndexTableName;
}
public List<String> getUnevaluatedFields() {
return unevaluatedFields;
}
public void setUnevaluatedFields(List<String> unevaluatedFields) {
this.unevaluatedFields = unevaluatedFields;
}
public void setUnevaluatedFields(String unevaluatedFieldList) {
this.unevaluatedFields = new ArrayList<>();
for (String field : unevaluatedFieldList.split(",")) {
this.unevaluatedFields.add(field);
}
}
public Document createDocument(Key key, Value value) {
Document doc = new Document();
eventFields.clear();
ByteBuffer buf = ByteBuffer.wrap(value.get());
eventFields.readObjectData(kryo, buf);
// Set the id to the document id which is located in the colf
String row = key.getRow().toString();
String colf = key.getColumnFamily().toString();
int idx = colf.indexOf(NULL_BYTE);
String type = colf.substring(0, idx);
String id = colf.substring(idx + 1);
doc.setId(id);
for (Entry<String,Collection<FieldValue>> entry : eventFields.asMap().entrySet()) {
for (FieldValue fv : entry.getValue()) {
Field val = new Field();
val.setFieldName(entry.getKey());
val.setFieldValue(new String(fv.getValue(), Charset.forName("UTF-8")));
doc.getFields().add(val);
}
}
// Add the pointer for the content.
Field docPointer = new Field();
docPointer.setFieldName("DOCUMENT");
docPointer.setFieldValue("DOCUMENT:" + row + "/" + type + "/" + id);
doc.getFields().add(docPointer);
return doc;
}
public String getResultsKey(Entry<Key,Value> key) {
// Use the colf from the table, it contains the uuid and datatype
return key.getKey().getColumnFamily().toString();
}
public Results runQuery(Connector connector, List<String> authorizations, String query,
Date beginDate, Date endDate, Set<String> types) {
if (StringUtils.isEmpty(query)) {
throw new IllegalArgumentException(
"NULL QueryNode reference passed to " + this.getClass().getSimpleName());
}
Set<Range> ranges = new HashSet<>();
Set<String> typeFilter = types;
String array[] = authorizations.toArray(new String[0]);
Authorizations auths = new Authorizations(array);
Results results = new Results();
// Get the query string
String queryString = query;
StopWatch abstractQueryLogic = new StopWatch();
StopWatch optimizedQuery = new StopWatch();
StopWatch queryGlobalIndex = new StopWatch();
StopWatch optimizedEventQuery = new StopWatch();
StopWatch fullScanQuery = new StopWatch();
StopWatch processResults = new StopWatch();
abstractQueryLogic.start();
StopWatch parseQuery = new StopWatch();
parseQuery.start();
QueryParser parser;
try {
if (log.isDebugEnabled()) {
log.debug("ShardQueryLogic calling QueryParser.execute");
}
parser = new QueryParser();
parser.execute(queryString);
} catch (org.apache.commons.jexl2.parser.ParseException e1) {
throw new IllegalArgumentException("Error parsing query", e1);
}
int hash = parser.getHashValue();
parseQuery.stop();
if (log.isDebugEnabled()) {
log.debug(hash + " Query: " + queryString);
}
Set<String> fields = new HashSet<>();
for (String f : parser.getQueryIdentifiers()) {
fields.add(f);
}
if (log.isDebugEnabled()) {
log.debug("getQueryIdentifiers: " + parser.getQueryIdentifiers().toString());
}
// Remove any negated fields from the fields list, we don't want to lookup negated fields
// in the index.
fields.removeAll(parser.getNegatedTermsForOptimizer());
if (log.isDebugEnabled()) {
log.debug("getQueryIdentifiers: " + parser.getQueryIdentifiers().toString());
}
// Get the mapping of field name to QueryTerm object from the query. The query term object
// contains the operator, whether its negated or not, and the literal to test against.
Multimap<String,QueryTerm> terms = parser.getQueryTerms();
// Find out which terms are indexed
// TODO: Should we cache indexed terms or does that not make sense since we are always
// loading data.
StopWatch queryMetadata = new StopWatch();
queryMetadata.start();
Map<String,Multimap<String,Class<? extends Normalizer>>> metadataResults;
try {
metadataResults = findIndexedTerms(connector, auths, fields, typeFilter);
} catch (Exception e1) {
throw new RuntimeException("Error in metadata lookup", e1);
}
// Create a map of indexed term to set of normalizers for it
Multimap<String,Normalizer> indexedTerms = HashMultimap.create();
for (Entry<String,Multimap<String,Class<? extends Normalizer>>> entry : metadataResults
.entrySet()) {
// Get the normalizer from the normalizer cache
for (Class<? extends Normalizer> clazz : entry.getValue().values()) {
indexedTerms.put(entry.getKey(), normalizerCacheMap.get(clazz));
}
}
queryMetadata.stop();
if (log.isDebugEnabled()) {
log.debug(hash + " Indexed Terms: " + indexedTerms.toString());
}
Set<String> orTerms = parser.getOrTermsForOptimizer();
// Iterate over the query terms to get the operators specified in the query.
ArrayList<String> unevaluatedExpressions = new ArrayList<>();
boolean unsupportedOperatorSpecified = false;
for (Entry<String,QueryTerm> entry : terms.entries()) {
if (null == entry.getValue()) {
continue;
}
if (null != this.unevaluatedFields
&& this.unevaluatedFields.contains(entry.getKey().trim())) {
unevaluatedExpressions.add(entry.getKey().trim() + " " + entry.getValue().getOperator()
+ " " + entry.getValue().getValue());
}
int operator = JexlOperatorConstants.getJJTNodeType(entry.getValue().getOperator());
if (!(operator == ParserTreeConstants.JJTEQNODE || operator == ParserTreeConstants.JJTNENODE
|| operator == ParserTreeConstants.JJTLENODE || operator == ParserTreeConstants.JJTLTNODE
|| operator == ParserTreeConstants.JJTGENODE || operator == ParserTreeConstants.JJTGTNODE
|| operator == ParserTreeConstants.JJTERNODE)) {
unsupportedOperatorSpecified = true;
break;
}
}
if (null != unevaluatedExpressions) {
unevaluatedExpressions.trimToSize();
}
if (log.isDebugEnabled()) {
log.debug(hash + " unsupportedOperators: " + unsupportedOperatorSpecified + " indexedTerms: "
+ indexedTerms.toString() + " orTerms: " + orTerms.toString()
+ " unevaluatedExpressions: " + unevaluatedExpressions.toString());
}
// We can use the intersecting iterator over the field index as an optimization under the
// following conditions
//
// 1. No unsupported operators in the query.
// 2. No 'or' operators and at least one term indexed
// or
// 1. No unsupported operators in the query.
// 2. and all terms indexed
// or
// 1. All or'd terms are indexed. NOTE, this will potentially skip some queries and push to a
// full table scan
// // WE should look into finding a better way to handle whether we do an optimized query or
// not.
boolean optimizationSucceeded = false;
boolean orsAllIndexed = false;
if (orTerms.isEmpty()) {
orsAllIndexed = false;
} else {
orsAllIndexed = indexedTerms.keySet().containsAll(orTerms);
}
if (log.isDebugEnabled()) {
log.debug("All or terms are indexed");
}
if (!unsupportedOperatorSpecified
&& (((null == orTerms || orTerms.isEmpty()) && indexedTerms.size() > 0)
|| (fields.size() > 0 && indexedTerms.size() == fields.size()) || orsAllIndexed)) {
optimizedQuery.start();
// Set up intersecting iterator over field index.
// Get information from the global index for the indexed terms. The results object will
// contain the term
// mapped to an object that contains the total count, and partitions where this term is
// located.
// TODO: Should we cache indexed term information or does that not make sense since we are
// always loading data
queryGlobalIndex.start();
IndexRanges termIndexInfo;
try {
// If fields is null or zero, then it's probably the case that the user entered a value
// to search for with no fields. Check for the value in index.
if (fields.isEmpty()) {
termIndexInfo = this.getTermIndexInformation(connector, auths, queryString, typeFilter);
if (null != termIndexInfo && termIndexInfo.getRanges().isEmpty()) {
// Then we didn't find anything in the index for this query. This may happen for an
// indexed term that has wildcards
// in unhandled locations.
// Break out of here by throwing a named exception and do full scan
throw new DoNotPerformOptimizedQueryException();
}
// We need to rewrite the query string here so that it's valid.
if (termIndexInfo instanceof UnionIndexRanges) {
UnionIndexRanges union = (UnionIndexRanges) termIndexInfo;
StringBuilder buf = new StringBuilder();
String sep = "";
for (String fieldName : union.getFieldNamesAndValues().keySet()) {
buf.append(sep).append(fieldName).append(" == ");
if (!(queryString.startsWith("'") && queryString.endsWith("'"))) {
buf.append("'").append(queryString).append("'");
} else {
buf.append(queryString);
}
sep = " or ";
}
if (log.isDebugEnabled()) {
log.debug("Rewrote query for non-fielded single term query: " + queryString + " to "
+ buf.toString());
}
queryString = buf.toString();
} else {
throw new RuntimeException("Unexpected IndexRanges implementation");
}
} else {
RangeCalculator calc = this.getTermIndexInformation(connector, auths, indexedTerms, terms,
this.getIndexTableName(), this.getReverseIndexTableName(), queryString,
this.queryThreads, typeFilter);
if (null == calc.getResult() || calc.getResult().isEmpty()) {
// Then we didn't find anything in the index for this query. This may happen for an
// indexed term that has wildcards
// in unhandled locations.
// Break out of here by throwing a named exception and do full scan
throw new DoNotPerformOptimizedQueryException();
}
termIndexInfo = new UnionIndexRanges();
termIndexInfo.setIndexValuesToOriginalValues(calc.getIndexValues());
termIndexInfo.setFieldNamesAndValues(calc.getIndexEntries());
termIndexInfo.getTermCardinality().putAll(calc.getTermCardinalities());
for (Range r : calc.getResult()) {
// foo is a placeholder and is ignored.
termIndexInfo.add("foo", r);
}
}
} catch (TableNotFoundException e) {
log.error(this.getIndexTableName() + "not found", e);
throw new RuntimeException(this.getIndexTableName() + "not found", e);
} catch (org.apache.commons.jexl2.parser.ParseException e) {
throw new RuntimeException("Error determining ranges for query: " + queryString, e);
} catch (DoNotPerformOptimizedQueryException e) {
log.info("Indexed fields not found in index, performing full scan");
termIndexInfo = null;
}
queryGlobalIndex.stop();
// Determine if we should proceed with optimized query based on results from the global index
boolean proceed = false;
if (null == termIndexInfo || termIndexInfo.getFieldNamesAndValues().values().size() == 0) {
proceed = false;
} else if (null != orTerms && orTerms.size() > 0
&& (termIndexInfo.getFieldNamesAndValues().values().size() == indexedTerms.size())) {
proceed = true;
} else if (termIndexInfo.getFieldNamesAndValues().values().size() > 0) {
proceed = true;
} else if (orsAllIndexed) {
proceed = true;
} else {
proceed = false;
}
if (log.isDebugEnabled()) {
log.debug("Proceed with optimized query: " + proceed);
if (null != termIndexInfo) {
log.debug("termIndexInfo.getTermsFound().size(): "
+ termIndexInfo.getFieldNamesAndValues().values().size() + " indexedTerms.size: "
+ indexedTerms.size() + " fields.size: " + fields.size());
}
}
if (proceed) {
if (log.isDebugEnabled()) {
log.debug(hash + " Performing optimized query");
}
// Use the scan ranges from the GlobalIndexRanges object as the ranges for the batch scanner
ranges = termIndexInfo.getRanges();
if (log.isDebugEnabled()) {
log.info(hash + " Ranges: count: " + ranges.size() + ", " + ranges.toString());
}
// Create BatchScanner, set the ranges, and setup the iterators.
optimizedEventQuery.start();
BatchScanner bs = null;
try {
bs = connector.createBatchScanner(this.getTableName(), auths, queryThreads);
bs.setRanges(ranges);
IteratorSetting si = new IteratorSetting(21, "eval", OptimizedQueryIterator.class);
if (log.isDebugEnabled()) {
log.debug(
"Setting scan option: " + EvaluatingIterator.QUERY_OPTION + " to " + queryString);
}
// Set the query option
si.addOption(EvaluatingIterator.QUERY_OPTION, queryString);
// Set the Indexed Terms List option. This is the field name and normalized field value
// pair separated
// by a comma.
StringBuilder buf = new StringBuilder();
String sep = "";
for (Entry<String,String> entry : termIndexInfo.getFieldNamesAndValues().entries()) {
buf.append(sep);
buf.append(entry.getKey());
buf.append(":");
buf.append(termIndexInfo.getIndexValuesToOriginalValues().get(entry.getValue()));
buf.append(":");
buf.append(entry.getValue());
if (sep.equals("")) {
sep = ";";
}
}
if (log.isDebugEnabled()) {
log.debug("Setting scan option: " + FieldIndexQueryReWriter.INDEXED_TERMS_LIST + " to "
+ buf.toString());
}
FieldIndexQueryReWriter rewriter = new FieldIndexQueryReWriter();
String q = "";
try {
q = queryString;
q = rewriter.applyCaseSensitivity(q, true, false);// Set upper/lower case for
// fieldname/fieldvalue
Map<String,String> opts = new HashMap<>();
opts.put(FieldIndexQueryReWriter.INDEXED_TERMS_LIST, buf.toString());
q = rewriter.removeNonIndexedTermsAndInvalidRanges(q, opts);
q = rewriter.applyNormalizedTerms(q, opts);
if (log.isDebugEnabled()) {
log.debug("runServerQuery, FieldIndex Query: " + q);
}
} catch (org.apache.commons.jexl2.parser.ParseException ex) {
log.error("Could not parse query, Jexl ParseException: " + ex);
} catch (Exception ex) {
log.error("Problem rewriting query, Exception: " + ex.getMessage());
}
si.addOption(BooleanLogicIterator.FIELD_INDEX_QUERY, q);
// Set the term cardinality option
sep = "";
buf.delete(0, buf.length());
for (Entry<String,Long> entry : termIndexInfo.getTermCardinality().entrySet()) {
buf.append(sep);
buf.append(entry.getKey());
buf.append(":");
buf.append(entry.getValue());
sep = ",";
}
if (log.isDebugEnabled()) {
log.debug("Setting scan option: " + BooleanLogicIterator.TERM_CARDINALITIES + " to "
+ buf.toString());
}
si.addOption(BooleanLogicIterator.TERM_CARDINALITIES, buf.toString());
if (this.useReadAheadIterator) {
if (log.isDebugEnabled()) {
log.debug("Enabling read ahead iterator with queue size: " + this.readAheadQueueSize
+ " and timeout: " + this.readAheadTimeOut);
}
si.addOption(ReadAheadIterator.QUEUE_SIZE, this.readAheadQueueSize);
si.addOption(ReadAheadIterator.TIMEOUT, this.readAheadTimeOut);
}
if (null != unevaluatedExpressions) {
StringBuilder unevaluatedExpressionList = new StringBuilder();
String sep2 = "";
for (String exp : unevaluatedExpressions) {
unevaluatedExpressionList.append(sep2).append(exp);
sep2 = ",";
}
if (log.isDebugEnabled()) {
log.debug("Setting scan option: " + EvaluatingIterator.UNEVALUTED_EXPRESSIONS + " to "
+ unevaluatedExpressionList.toString());
}
si.addOption(EvaluatingIterator.UNEVALUTED_EXPRESSIONS,
unevaluatedExpressionList.toString());
}
bs.addScanIterator(si);
processResults.start();
processResults.suspend();
long count = 0;
for (Entry<Key,Value> entry : bs) {
count++;
// The key that is returned by the EvaluatingIterator is not the same key that is in
// the table. The value that is returned by the EvaluatingIterator is a kryo
// serialized EventFields object.
processResults.resume();
Document d = this.createDocument(entry.getKey(), entry.getValue());
results.getResults().add(d);
processResults.suspend();
}
log.info(count + " matching entries found in optimized query.");
optimizationSucceeded = true;
processResults.stop();
} catch (TableNotFoundException e) {
log.error(this.getTableName() + "not found", e);
throw new RuntimeException(this.getIndexTableName() + "not found", e);
} finally {
if (bs != null) {
bs.close();
}
}
optimizedEventQuery.stop();
}
optimizedQuery.stop();
}
// WE should look into finding a better way to handle whether we do an optimized query or not.
// We are not setting up an else condition here because we may have aborted the logic early in
// the if statement.
if (!optimizationSucceeded || ((null != orTerms && orTerms.size() > 0)
&& (indexedTerms.size() != fields.size()) && !orsAllIndexed)) {
// if (!optimizationSucceeded || ((null != orTerms && orTerms.size() > 0) &&
// (indexedTerms.size() != fields.size()))) {
fullScanQuery.start();
if (log.isDebugEnabled()) {
log.debug(hash + " Performing full scan query");
}
// Set up a full scan using the date ranges from the query
// Create BatchScanner, set the ranges, and setup the iterators.
BatchScanner bs = null;
try {
// The ranges are the start and end dates
Collection<Range> r = getFullScanRange(beginDate, endDate, terms);
ranges.addAll(r);
if (log.isDebugEnabled()) {
log.debug(hash + " Ranges: count: " + ranges.size() + ", " + ranges.toString());
}
bs = connector.createBatchScanner(this.getTableName(), auths, queryThreads);
bs.setRanges(ranges);
IteratorSetting si = new IteratorSetting(22, "eval", EvaluatingIterator.class);
// Create datatype regex if needed
if (null != typeFilter) {
StringBuilder buf = new StringBuilder();
String s = "";
for (String type : typeFilter) {
buf.append(s).append(type).append(".*");
s = "|";
}
if (log.isDebugEnabled()) {
log.debug("Setting colf regex iterator to: " + buf.toString());
}
IteratorSetting ri = new IteratorSetting(21, "typeFilter", RegExFilter.class);
RegExFilter.setRegexs(ri, null, buf.toString(), null, null, false);
bs.addScanIterator(ri);
}
if (log.isDebugEnabled()) {
log.debug(
"Setting scan option: " + EvaluatingIterator.QUERY_OPTION + " to " + queryString);
}
si.addOption(EvaluatingIterator.QUERY_OPTION, queryString);
if (null != unevaluatedExpressions) {
StringBuilder unevaluatedExpressionList = new StringBuilder();
String sep2 = "";
for (String exp : unevaluatedExpressions) {
unevaluatedExpressionList.append(sep2).append(exp);
sep2 = ",";
}
if (log.isDebugEnabled()) {
log.debug("Setting scan option: " + EvaluatingIterator.UNEVALUTED_EXPRESSIONS + " to "
+ unevaluatedExpressionList.toString());
}
si.addOption(EvaluatingIterator.UNEVALUTED_EXPRESSIONS,
unevaluatedExpressionList.toString());
}
bs.addScanIterator(si);
long count = 0;
processResults.start();
processResults.suspend();
for (Entry<Key,Value> entry : bs) {
count++;
// The key that is returned by the EvaluatingIterator is not the same key that is in
// the partition table. The value that is returned by the EvaluatingIterator is a kryo
// serialized EventFields object.
processResults.resume();
Document d = this.createDocument(entry.getKey(), entry.getValue());
results.getResults().add(d);
processResults.suspend();
}
processResults.stop();
log.info(count + " matching entries found in full scan query.");
} catch (TableNotFoundException e) {
log.error(this.getTableName() + "not found", e);
} finally {
if (bs != null) {
bs.close();
}
}
fullScanQuery.stop();
}
log.info("AbstractQueryLogic: " + queryString + " " + timeString(abstractQueryLogic.getTime()));
log.info(" 1) parse query " + timeString(parseQuery.getTime()));
log.info(" 2) query metadata " + timeString(queryMetadata.getTime()));
log.info(" 3) full scan query " + timeString(fullScanQuery.getTime()));
log.info(" 3) optimized query " + timeString(optimizedQuery.getTime()));
log.info(" 1) process results " + timeString(processResults.getTime()));
log.info(" 1) query global index " + timeString(queryGlobalIndex.getTime()));
log.info(hash + " Query completed.");
return results;
}
private static String timeString(long millis) {
return String.format("%4.2f", millis / 1000.);
}
}
| 6,215 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/logic/ContentLogic.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.logic;
import java.util.List;
import java.util.Map.Entry;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaMapper;
import org.apache.accumulo.examples.wikisearch.sample.Document;
import org.apache.accumulo.examples.wikisearch.sample.Field;
import org.apache.accumulo.examples.wikisearch.sample.Results;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
/**
* This query table implementation returns a Results object that contains documents from the wiki table. The query will contain the partition id, wikitype, and
* UID so that we can seek directly to the document. The document is stored as base64 compressed binary in the Accumulo table. We will decompress the data so
* that it is base64 encoded binary data in the Results object.
*
* The query that needs to be passed to the web service is: DOCUMENT:partitionId/wikitype/uid.
*
*/
public class ContentLogic {
private static final Logger log = Logger.getLogger(ContentLogic.class);
private static final String NULL_BYTE = "\u0000";
private String tableName = null;
private Pattern queryPattern = Pattern.compile("^DOCUMENT:(.*)/(.*)/(.*)$");
public String getTableName() {
return tableName;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public Results runQuery(Connector connector, String query, List<String> authorizations) {
Results results = new Results();
Authorizations auths = new Authorizations(StringUtils.join(authorizations, "|"));
Matcher match = queryPattern.matcher(query);
if (!match.matches()) {
throw new IllegalArgumentException("Query does not match the pattern: DOCUMENT:partitionId/wikitype/uid, your query: " + query.toString());
} else {
String partitionId = match.group(1);
String wikitype = match.group(2);
String id = match.group(3);
log.debug("Received pieces: " + partitionId + ", " + wikitype + ", " + id);
// Create the Range
Key startKey = new Key(partitionId, WikipediaMapper.DOCUMENT_COLUMN_FAMILY, wikitype + NULL_BYTE + id);
Key endKey = new Key(partitionId, WikipediaMapper.DOCUMENT_COLUMN_FAMILY, wikitype + NULL_BYTE + id + NULL_BYTE);
Range r = new Range(startKey, true, endKey, false);
log.debug("Setting range: " + r);
try {
Scanner scanner = connector.createScanner(this.getTableName(), auths);
scanner.setRange(r);
// This should in theory only match one thing.
for (Entry<Key,Value> entry : scanner) {
Document doc = new Document();
doc.setId(id);
Field val = new Field();
val.setFieldName("DOCUMENT");
val.setFieldValue(new String(Base64.decodeBase64(entry.getValue().toString())));
doc.getFields().add(val);
results.getResults().add(doc);
}
} catch (TableNotFoundException e) {
throw new RuntimeException("Table not found: " + this.getTableName(), e);
}
}
return results;
}
}
| 6,216 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/function/QueryFunctions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.function;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.log4j.Logger;
public class QueryFunctions {
protected static Logger log = Logger.getLogger(QueryFunctions.class);
public static boolean between(String fieldValue, double left, double right) {
try {
Double value = Double.parseDouble(fieldValue);
if (value >= left && value <= right)
return true;
return false;
} catch (NumberFormatException nfe) {
return false;
}
}
public static boolean between(String fieldValue, long left, long right) {
try {
Long value = Long.parseLong(fieldValue);
if (value >= left && value <= right)
return true;
return false;
} catch (NumberFormatException nfe) {
return false;
}
}
public static Number abs(String fieldValue) {
Number retval = null;
try {
Number value = NumberUtils.createNumber(fieldValue);
if (null == value)
retval = (Number) Integer.MIN_VALUE;
else if (value instanceof Long)
retval = Math.abs(value.longValue());
else if (value instanceof Double)
retval = Math.abs(value.doubleValue());
else if (value instanceof Float)
retval = Math.abs(value.floatValue());
else if (value instanceof Integer)
retval = Math.abs(value.intValue());
} catch (NumberFormatException nfe) {
return (Number) Integer.MIN_VALUE;
}
return retval;
}
}
| 6,217 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/FieldIndexQueryReWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.parser;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import javax.swing.tree.DefaultMutableTreeNode;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.QueryTerm;
import org.apache.accumulo.examples.wikisearch.parser.RangeCalculator.RangeBounds;
import org.apache.commons.jexl2.parser.ASTAndNode;
import org.apache.commons.jexl2.parser.ASTEQNode;
import org.apache.commons.jexl2.parser.ASTERNode;
import org.apache.commons.jexl2.parser.ASTGENode;
import org.apache.commons.jexl2.parser.ASTGTNode;
import org.apache.commons.jexl2.parser.ASTJexlScript;
import org.apache.commons.jexl2.parser.ASTLENode;
import org.apache.commons.jexl2.parser.ASTLTNode;
import org.apache.commons.jexl2.parser.ASTNENode;
import org.apache.commons.jexl2.parser.ASTNRNode;
import org.apache.commons.jexl2.parser.ASTNotNode;
import org.apache.commons.jexl2.parser.ASTOrNode;
import org.apache.commons.jexl2.parser.ParseException;
import org.apache.commons.jexl2.parser.ParserTreeConstants;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
/**
* The server-side field index queries can only support operations on indexed fields. Additionally,
* queries that have differing ranges (i.e. one range at the fieldname level and another at the
* fieldValue level) are not currently supported. This class removes these conflicts from the query
* as well as sets proper capitalization configurations etc.
*
* Once the query has been modified, you can pass it to the BooleanLogicIterator on the server-side
* via the options map.
*
*/
public class FieldIndexQueryReWriter {
protected static final Logger log = Logger.getLogger(FieldIndexQueryReWriter.class);
public static final String INDEXED_TERMS_LIST = "INDEXED_TERMS_LIST"; // comma separated list of
// indexed terms.
public static Set<Integer> rangeNodeSet;
static {
rangeNodeSet = new HashSet<>();
rangeNodeSet.add(ParserTreeConstants.JJTLENODE);
rangeNodeSet.add(ParserTreeConstants.JJTLTNODE);
rangeNodeSet.add(ParserTreeConstants.JJTGENODE);
rangeNodeSet.add(ParserTreeConstants.JJTGTNODE);
rangeNodeSet = Collections.unmodifiableSet(rangeNodeSet);
}
/*
* Given a JEXL Query, rewrite it and return it.
*
* 1. ParseQuery 2. Transform query 3. Refactor query 4. remove non-indexed terms a. remove any
* tree conflicts b. collapse any branches 7. add normalized values 8. adjust for case sensitivity
* 9. add prefix.. but jexl chokes on null byte
*/
public static void setLogLevel(Level lev) {
log.setLevel(lev);
}
/**
*
* @return String representation of a given query.
*/
public String removeNonIndexedTermsAndInvalidRanges(String query, Map<String,String> options)
throws ParseException, Exception {
Multimap<String,String> indexedTerms = parseIndexedTerms(options);
RewriterTreeNode node = parseJexlQuery(query);
if (log.isDebugEnabled()) {
log.debug("Tree: " + node.getContents());
}
node = removeNonIndexedTerms(node, indexedTerms);
node = removeTreeConflicts(node, indexedTerms);
node = collapseBranches(node);
node = removeNegationViolations(node);
if (log.isDebugEnabled()) {
log.debug("Tree -NonIndexed: " + node.getContents());
}
return rebuildQueryFromTree(node);
}
/**
*
* @return String representation of a given query.
*/
public String applyNormalizedTerms(String query, Map<String,String> options)
throws ParseException, Exception {
if (log.isDebugEnabled()) {
log.debug("applyNormalizedTerms, query: " + query);
}
Multimap<String,String> normalizedTerms = parseIndexedTerms(options);
RewriterTreeNode node = parseJexlQuery(query);
if (log.isDebugEnabled()) {
log.debug("applyNormalizedTerms, Tree: " + node.getContents());
}
node = orNormalizedTerms(node, normalizedTerms);
if (log.isDebugEnabled()) {
log.debug("applyNormalizedTerms,Normalized: " + node.getContents());
}
return rebuildQueryFromTree(node);
}
/**
*
* @return String representation of a given query.
*/
public String applyCaseSensitivity(String query, boolean fNameUpper, boolean fValueUpper)
throws ParseException {
RewriterTreeNode node = parseJexlQuery(query);
if (log.isDebugEnabled()) {
log.debug("Tree: " + node.getContents());
}
node = applyCaseSensitivity(node, fNameUpper, fValueUpper);
if (log.isDebugEnabled()) {
log.debug("Case: " + node.getContents());
}
return rebuildQueryFromTree(node);
}
private String rebuildQueryFromTree(RewriterTreeNode node) {
if (node.isLeaf()) {
String fName = node.getFieldName();
String fValue = node.getFieldValue();
String operator = node.getOperator();
if (node.isNegated()) {
if (node.getType() == JexlOperatorConstants.JJTEQNODE) {
operator = JexlOperatorConstants.getOperator(JexlOperatorConstants.JJTNENODE);
} else if (node.getType() == JexlOperatorConstants.JJTERNODE) {
operator = JexlOperatorConstants.getOperator(JexlOperatorConstants.JJTNRNODE);
} else if (node.getType() == JexlOperatorConstants.JJTLTNODE) {
operator = JexlOperatorConstants.getOperator(JexlOperatorConstants.JJTGENODE);
} else if (node.getType() == JexlOperatorConstants.JJTLENODE) {
operator = JexlOperatorConstants.getOperator(JexlOperatorConstants.JJTGTNODE);
} else if (node.getType() == JexlOperatorConstants.JJTGTNODE) {
operator = JexlOperatorConstants.getOperator(JexlOperatorConstants.JJTLENODE);
} else if (node.getType() == JexlOperatorConstants.JJTGENODE) {
operator = JexlOperatorConstants.getOperator(JexlOperatorConstants.JJTLTNODE);
}
}
return fName + operator + "'" + fValue + "'";
} else {
List<String> parts = new ArrayList<>();
Enumeration<?> children = node.children();
while (children.hasMoreElements()) {
RewriterTreeNode child = (RewriterTreeNode) children.nextElement();
parts.add(rebuildQueryFromTree(child));
}
if (node.getType() == ParserTreeConstants.JJTJEXLSCRIPT) {
return org.apache.commons.lang.StringUtils.join(parts, "");
}
String op = " " + JexlOperatorConstants.getOperator(node.getType()) + " ";
if (log.isDebugEnabled()) {
log.debug("Operator: " + op);
}
String query = org.apache.commons.lang.StringUtils.join(parts, op);
query = "(" + query + ")";
return query;
}
}
/*
* Don't use this, Jexl currently chokes on null bytes in the query
*/
// public String applyFieldNamePrefix(String query, String prefix) throws ParseException {
// NuwaveTreeNode node = parseJexlQuery(query);
// if (log.isDebugEnabled()) {
// log.debug("Tree: " + node.getContents());
// }
// node = applyFieldNamePrefix(node, prefix);
// if (log.isDebugEnabled()) {
// log.debug("Prefix: " + node.getContents());
// }
// return null;
// }
private RewriterTreeNode parseJexlQuery(String query) throws ParseException {
if (log.isDebugEnabled()) {
log.debug("parseJexlQuery, query: " + query);
}
QueryParser parser = new QueryParser();
parser.execute(query);
TreeNode tree = parser.getIteratorTree();
RewriterTreeNode root = transformTreeNode(tree);
if (log.isDebugEnabled()) {
log.debug("parseJexlQuery, transformedTree: " + root.getContents());
}
root = refactorTree(root);
if (log.isDebugEnabled()) {
log.debug("parseJexlQuery, refactorTree: " + root.getContents());
}
return root;
}
/*
*
*/
private RewriterTreeNode transformTreeNode(TreeNode node) throws ParseException {
if (node.getType().equals(ASTEQNode.class) || node.getType().equals(ASTNENode.class)) {
if (log.isDebugEnabled()) {
log.debug("transformTreeNode, Equals Node");
}
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "");
boolean negated = t.getOperator().equals("!=");
RewriterTreeNode child =
new RewriterTreeNode(ParserTreeConstants.JJTEQNODE, fName, fValue, negated);
return child;
}
}
}
if (node.getType().equals(ASTERNode.class) || node.getType().equals(ASTNRNode.class)) {
if (log.isDebugEnabled()) {
log.debug("transformTreeNode, Regex Node");
}
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "");
boolean negated = node.getType().equals(ASTNRNode.class);
RewriterTreeNode child =
new RewriterTreeNode(ParserTreeConstants.JJTERNODE, fName, fValue, negated);
return child;
}
}
}
if (node.getType().equals(ASTLTNode.class) || node.getType().equals(ASTLENode.class)
|| node.getType().equals(ASTGTNode.class) || node.getType().equals(ASTGENode.class)) {
if (log.isDebugEnabled()) {
log.debug("transformTreeNode, LT/LE/GT/GE node");
}
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "").toLowerCase();
boolean negated = false; // to be negated, must be child of Not, which is handled
// elsewhere.
int mytype = JexlOperatorConstants.getJJTNodeType(t.getOperator());
RewriterTreeNode child = new RewriterTreeNode(mytype, fName, fValue, negated);
return child;
}
}
}
RewriterTreeNode returnNode = null;
if (node.getType().equals(ASTAndNode.class) || node.getType().equals(ASTOrNode.class)) {
int parentType = node.getType().equals(ASTAndNode.class) ? ParserTreeConstants.JJTANDNODE
: ParserTreeConstants.JJTORNODE;
if (log.isDebugEnabled()) {
log.debug("transformTreeNode, AND/OR node: " + parentType);
}
if (node.isLeaf() || !node.getTerms().isEmpty()) {
returnNode = new RewriterTreeNode(parentType);
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "");
boolean negated = t.getOperator().equals("!=");
int childType = JexlOperatorConstants.getJJTNodeType(t.getOperator());
RewriterTreeNode child = new RewriterTreeNode(childType, fName, fValue, negated);
if (log.isDebugEnabled()) {
log.debug("adding child node: " + child.getContents());
}
returnNode.add(child);
}
}
} else {
returnNode = new RewriterTreeNode(parentType);
}
} else if (node.getType().equals(ASTNotNode.class)) {
if (log.isDebugEnabled()) {
log.debug("transformTreeNode, NOT node");
}
if (node.isLeaf()) {
// NOTE: this should be cleaned up a bit.
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "").toLowerCase();
boolean negated = !t.getOperator().equals("!=");
int mytype = JexlOperatorConstants.getJJTNodeType(t.getOperator());
return new RewriterTreeNode(mytype, fName, fValue, negated);
}
}
} else {
returnNode = new RewriterTreeNode(ParserTreeConstants.JJTNOTNODE);
}
} else if (node.getType().equals(ASTJexlScript.class)
|| node.getType().getSimpleName().equals("RootNode")) {
if (log.isDebugEnabled()) {
log.debug("transformTreeNode, ROOT/JexlScript node");
}
if (node.isLeaf()) {
returnNode = new RewriterTreeNode(ParserTreeConstants.JJTJEXLSCRIPT);
// NOTE: this should be cleaned up a bit.
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "");
boolean negated = t.getOperator().equals("!=");
int mytype = JexlOperatorConstants.getJJTNodeType(t.getOperator());
RewriterTreeNode child = new RewriterTreeNode(mytype, fName, fValue, negated);
returnNode.add(child);
return returnNode;
}
}
} else {
returnNode = new RewriterTreeNode(ParserTreeConstants.JJTJEXLSCRIPT);
}
} else {
log.error("transformTreeNode, Currently Unsupported Node type: " + node.getClass().getName()
+ " \t" + node.getType());
}
for (TreeNode child : node.getChildren()) {
returnNode.add(transformTreeNode(child));
}
return returnNode;
}
private RewriterTreeNode removeNonIndexedTerms(RewriterTreeNode root,
Multimap<String,String> indexedTerms) throws Exception {
// public void removeNonIndexedTerms(BooleanLogicTreeNodeJexl myroot, String indexedTerms)
// throws Exception {
if (indexedTerms.isEmpty()) {
throw new Exception("removeNonIndexedTerms, indexed Terms empty");
}
// NOTE: doing a depth first enumeration didn't work when I started
// removing nodes halfway through. The following method does work,
// it's essentially a reverse breadth first traversal.
List<RewriterTreeNode> nodes = new ArrayList<>();
Enumeration<?> bfe = root.breadthFirstEnumeration();
while (bfe.hasMoreElements()) {
RewriterTreeNode node = (RewriterTreeNode) bfe.nextElement();
nodes.add(node);
}
// walk backwards
for (int i = nodes.size() - 1; i >= 0; i--) {
RewriterTreeNode node = nodes.get(i);
if (log.isDebugEnabled()) {
log.debug(
"removeNonIndexedTerms, analyzing node: " + node.toString() + " " + node.printNode());
}
if (node.getType() == ParserTreeConstants.JJTANDNODE
|| node.getType() == ParserTreeConstants.JJTORNODE) {
// If all of your children are gone, AND/OR has no purpose, remove
if (node.getChildCount() == 0) {
node.removeFromParent();
// If AND/OR has only 1 child, attach it to the parent directly.
} else if (node.getChildCount() == 1) {
RewriterTreeNode p = (RewriterTreeNode) node.getParent();
RewriterTreeNode c = (RewriterTreeNode) node.getFirstChild();
node.removeFromParent();
p.add(c);
}
} else if (node.getType() == ParserTreeConstants.JJTJEXLSCRIPT) { // Head node
// If head node has no children, we have nothing to search on.
if (node.getChildCount() == 0) {
throw new Exception();
}
} else if (rangeNodeSet.contains(node.getType())) { // leave it alone
// leave ranges untouched, they'll be handled elsewhere.
continue;
} else {
if (log.isDebugEnabled()) {
log.debug("removeNonIndexedTerms, Testing: " + node.getFieldName() + ":"
+ node.getFieldValue());
}
if (!indexedTerms
.containsKey(node.getFieldName().toString() + ":" + node.getFieldValue().toString())) {
if (log.isDebugEnabled()) {
log.debug(node.getFieldName() + ":" + node.getFieldValue() + " is NOT indexed");
}
node.removeFromParent();
} else {
if (log.isDebugEnabled()) {
log.debug(node.getFieldName() + ":" + node.getFieldValue() + " is indexed");
}
}
}
}
return root;
}
private RewriterTreeNode orNormalizedTerms(RewriterTreeNode myroot,
Multimap<String,String> indexedTerms) throws Exception {
// we have multimap of FieldName to multiple FieldValues
if (indexedTerms.isEmpty()) {
throw new Exception("indexed Terms empty");
}
try {
// NOTE: doing a depth first enumeration didn't work when I started
// removing nodes halfway through. The following method does work,
// it's essentially a reverse breadth first traversal.
List<RewriterTreeNode> nodes = new ArrayList<>();
Enumeration<?> bfe = myroot.breadthFirstEnumeration();
while (bfe.hasMoreElements()) {
RewriterTreeNode node = (RewriterTreeNode) bfe.nextElement();
nodes.add(node);
}
// walk backwards
for (int i = nodes.size() - 1; i >= 0; i--) {
RewriterTreeNode node = nodes.get(i);
if (log.isDebugEnabled()) {
log.debug(
"orNormalizedTerms, analyzing node: " + node.toString() + " " + node.printNode());
}
if (node.getType() == ParserTreeConstants.JJTANDNODE
|| node.getType() == ParserTreeConstants.JJTORNODE) {
continue;
} else if (node.getType() == ParserTreeConstants.JJTJEXLSCRIPT) {
if (node.getChildCount() == 0) {
if (log.isDebugEnabled()) {
log.debug("orNormalizedTerms: Head node has no children!");
}
throw new Exception(); // Head node has no children.
}
} else {
if (log.isDebugEnabled()) {
log.debug("Testing data location: " + node.getFieldName());
}
String fName = node.getFieldName().toString();
String fValue = node.getFieldValue().toString();
if (indexedTerms.containsKey(fName + ":" + fValue)) {
if (indexedTerms.get(fName + ":" + fValue).size() > 1) {
// Replace node with an OR, and make children from the multimap collection
node.setType(ParserTreeConstants.JJTORNODE);
boolean neg = node.isNegated();
node.setNegated(false);
node.setFieldName(null);
node.setFieldValue(null);
Collection<String> values = indexedTerms.get(fName + ":" + fValue);
for (String value : values) {
RewriterTreeNode n =
new RewriterTreeNode(ParserTreeConstants.JJTEQNODE, fName, value, neg);
node.add(n);
}
} else if (indexedTerms.get(fName + ":" + fValue).size() == 1) {
// Straight replace
Collection<String> values = indexedTerms.get(fName + ":" + fValue);
for (String val : values) {
// should only be 1
node.setFieldValue(val);
}
}
} else {
// throw new Exception("orNormalizedTerms, encountered a non-indexed term: " +
// node.getFieldName().toString());
}
}
}
} catch (Exception e) {
log.debug("Caught exception in orNormalizedTerms(): " + e);
throw new Exception("exception in: orNormalizedTerms");
}
return myroot;
}
/***
* We only want to pass ranges on if they meet a very narrow set of conditions. All ranges must be
* bounded i.e. x between(1,5) so their parent is an AND. We will only pass a range if 1. The AND
* is the direct child of HEAD node 2. The AND is a child of an OR which is a direct child of HEAD
* node.
*
* If there is an HEAD-AND[x,OR[b,AND[range]]], and you remove the range, this turns the tree into
* HEAD-AND[X,OR[B]] which becomes HEAD-AND[X,B] which will miss entries, so you need to cut out
* the entire OR at this point and let the positive side of the AND pick it up.
*/
private RewriterTreeNode removeTreeConflicts(RewriterTreeNode root,
Multimap<String,String> indexedTerms) {
if (log.isDebugEnabled()) {
log.debug("removeTreeConflicts");
}
/*
* You can't modify the enumeration, so save it into a list. We want to walk backwards in a
* breadthFirstEnumeration. So we don't throw null pointers when we erase nodes and shorten our
* list.
*/
List<RewriterTreeNode> nodeList = new ArrayList<>();
Enumeration<?> nodes = root.breadthFirstEnumeration();
while (nodes.hasMoreElements()) {
RewriterTreeNode child = (RewriterTreeNode) nodes.nextElement();
nodeList.add(child);
}
// walk backwards
for (int i = nodeList.size() - 1; i >= 0; i--) {
RewriterTreeNode node = nodeList.get(i);
if (node.isRemoval()) {
node.removeFromParent();
continue;
}
RewriterTreeNode parent = (RewriterTreeNode) node.getParent();
/*
* All ranges must be bounded! This means the range must be part of an AND, and the parent of
* AND must be a HEAD node or an OR whose parent is a HEAD node.
*/
if (node.getType() == ParserTreeConstants.JJTANDNODE && (node.getLevel() == 1
|| (parent.getType() == ParserTreeConstants.JJTORNODE && parent.getLevel() == 1))) {
if (log.isDebugEnabled()) {
log.debug("AND at level 1 or with OR parent at level 1");
}
Map<Text,RangeBounds> rangeMap = getBoundedRangeMap(node);
// can't modify the enumeration... save children to a list.
List<RewriterTreeNode> childList = new ArrayList<>();
Enumeration<?> children = node.children();
while (children.hasMoreElements()) {
RewriterTreeNode child = (RewriterTreeNode) children.nextElement();
childList.add(child);
}
for (int j = childList.size() - 1; j >= 0; j--) {
RewriterTreeNode child = childList.get(j);
// currently we are not allowing unbounded ranges, so they must sit under an AND node.
if (rangeNodeSet.contains(child.getType())) {
if (log.isDebugEnabled()) {
log.debug("child type: " + JexlOperatorConstants.getOperator(child.getType()));
}
if (rangeMap == null) {
// remove
child.removeFromParent();
} else {
if (!rangeMap.containsKey(new Text(child.getFieldName()))) {
child.removeFromParent();
} else {
// check if it has a single non-range sibling
boolean singleSib = false;
if (log.isDebugEnabled()) {
log.debug("checking for singleSib.");
}
Enumeration<?> sibs = child.getParent().children();
while (sibs.hasMoreElements()) {
RewriterTreeNode sib = (RewriterTreeNode) sibs.nextElement();
if (!rangeNodeSet.contains(sib.getType())) {
singleSib = true;
break;
}
}
if (singleSib) {
child.removeFromParent();
} else {
if (indexedTerms
.containsKey(child.getFieldName() + ":" + child.getFieldValue())) {
if (log.isDebugEnabled()) {
log.debug("removeTreeConflicts, node: " + node.getContents());
}
// swap parent AND with an OR
node.removeAllChildren();
node.setType(ParserTreeConstants.JJTORNODE);
Collection<String> values =
indexedTerms.get(child.getFieldName() + ":" + child.getFieldValue());
for (String value : values) {
RewriterTreeNode n = new RewriterTreeNode(ParserTreeConstants.JJTEQNODE,
child.getFieldName(), value, child.isNegated());
node.add(n);
}
if (log.isDebugEnabled()) {
log.debug("removeTreeConflicts, node: " + node.getContents());
}
break;
} else {
child.removeFromParent();
}
}
}
}
}
} // end inner for
} else { // remove all ranges!
if (node.isLeaf()) {
continue;
}
// can't modify the enumeration...
List<RewriterTreeNode> childList = new ArrayList<>();
Enumeration<?> children = node.children();
while (children.hasMoreElements()) {
RewriterTreeNode child = (RewriterTreeNode) children.nextElement();
childList.add(child);
}
// walk backwards
for (int j = childList.size() - 1; j >= 0; j--) {
RewriterTreeNode child = childList.get(j);
if (log.isDebugEnabled()) {
log.debug("removeTreeConflicts, looking at node: " + node);
}
if (rangeNodeSet.contains(child.getType())) {
// if grand parent is an OR and not top level, mark whole thing for removal.
RewriterTreeNode grandParent = (RewriterTreeNode) child.getParent().getParent();
if (grandParent.getType() == ParserTreeConstants.JJTORNODE
&& grandParent.getLevel() != 1) {
grandParent.setRemoval(true);
}
child.removeFromParent();
}
}
}
} // end outer for
return root;
}
private RewriterTreeNode removeNegationViolations(RewriterTreeNode node) throws Exception {
// Double check the top level node for negation violations
// if AND, one child must be positive, if OR, no negatives allowed.
RewriterTreeNode one = (RewriterTreeNode) node.getFirstChild(); // Head node has only 1 child.
ArrayList<RewriterTreeNode> childrenList = new ArrayList<>();
Enumeration<?> children = one.children();
while (children.hasMoreElements()) {
RewriterTreeNode child = (RewriterTreeNode) children.nextElement();
childrenList.add(child);
}
if (one.getType() == JexlOperatorConstants.JJTORNODE) {
for (RewriterTreeNode child : childrenList) {
if (child.isNegated()) {
child.removeFromParent();
}
}
if (one.getChildCount() == 0) {
throw new Exception("FieldIndexQueryReWriter: Top level query node cannot be processed.");
}
} else if (one.getType() == JexlOperatorConstants.JJTANDNODE) {
boolean ok = false;
for (RewriterTreeNode child : childrenList) {
if (!child.isNegated()) {
ok = true;
break;
}
}
if (!ok) {
throw new Exception("FieldIndexQueryReWriter: Top level query node cannot be processed.");
}
}
return node;
}
// After tree conflicts have been resolve, we can collapse branches where
// leaves have been pruned.
private RewriterTreeNode collapseBranches(RewriterTreeNode myroot) throws Exception {
// NOTE: doing a depth first enumeration didn't wory when I started
// removing nodes halfway through. The following method does work,
// it's essentially a reverse breadth first traversal.
List<RewriterTreeNode> nodes = new ArrayList<>();
Enumeration<?> bfe = myroot.breadthFirstEnumeration();
while (bfe.hasMoreElements()) {
RewriterTreeNode node = (RewriterTreeNode) bfe.nextElement();
nodes.add(node);
}
// walk backwards
for (int i = nodes.size() - 1; i >= 0; i--) {
RewriterTreeNode node = nodes.get(i);
if (log.isDebugEnabled()) {
log.debug(
"collapseBranches, inspecting node: " + node.toString() + " " + node.printNode());
}
if (node.getType() == ParserTreeConstants.JJTANDNODE
|| node.getType() == ParserTreeConstants.JJTORNODE) {
if (node.getChildCount() == 0) {
node.removeFromParent();
} else if (node.getChildCount() == 1) {
RewriterTreeNode p = (RewriterTreeNode) node.getParent();
RewriterTreeNode c = (RewriterTreeNode) node.getFirstChild();
node.removeFromParent();
p.add(c);
}
} else if (node.getType() == ParserTreeConstants.JJTJEXLSCRIPT) {
if (node.getChildCount() == 0) {
throw new Exception();
}
}
}
return myroot;
}
public Multimap<String,String> parseIndexedTerms(Map<String,String> options) {
if (options.get(INDEXED_TERMS_LIST) != null) {
Multimap<String,String> mmap = HashMultimap.create();
String[] items = options.get(INDEXED_TERMS_LIST).split(";");
for (String item : items) {
item = item.trim();
if (log.isDebugEnabled()) {}
String[] parts = item.split(":");
if (log.isDebugEnabled()) {
log.debug("adding: " + parts[0]);
}
for (int i = 2; i < parts.length; i++) {
// key is original query token, i.e. color:red
mmap.put(parts[0] + ":" + parts[1], parts[i]);
}
}
if (log.isDebugEnabled()) {
log.debug("multimap: " + mmap);
}
return mmap;
}
if (log.isDebugEnabled()) {
log.debug("parseIndexedTerms: returning null");
}
return null;
}
public RewriterTreeNode refactorTree(RewriterTreeNode root) {
Enumeration<?> dfe = root.breadthFirstEnumeration();
while (dfe.hasMoreElements()) {
RewriterTreeNode n = (RewriterTreeNode) dfe.nextElement();
if (n.getType() == ParserTreeConstants.JJTNOTNODE) {// BooleanLogicTreeNode.NodeType.NOT) {
RewriterTreeNode child = (RewriterTreeNode) n.getChildAt(0);
child.setNegated(true);
RewriterTreeNode parent = (RewriterTreeNode) n.getParent();
parent.remove(n);
parent.add(child);
}
}
// cycle through again and distribute nots
Enumeration<?> bfe = root.breadthFirstEnumeration();
RewriterTreeNode child;
while (bfe.hasMoreElements()) {
child = (RewriterTreeNode) bfe.nextElement();
if (child.isNegated()) {
if (child.getChildCount() > 0) {
demorganSubTree(child);
}
}
}
return root;
}
private void demorganSubTree(RewriterTreeNode root) {
root.setNegated(false);
// root.setChildrenAllNegated(true);
if (root.getType() == ParserTreeConstants.JJTANDNODE) {// BooleanLogicTreeNode.NodeType.AND) {
// root.setType(BooleanLogicTreeNode.NodeType.OR);
root.setType(ParserTreeConstants.JJTORNODE);
} else if (root.getType() == ParserTreeConstants.JJTORNODE) {// BooleanLogicTreeNode.NodeType.OR)
// {
// root.setType(BooleanLogicTreeNode.NodeType.AND);
root.setType(ParserTreeConstants.JJTANDNODE);
} else if (root.getType() == ParserTreeConstants.JJTEQNODE
|| root.getType() == ParserTreeConstants.JJTERNODE) {
// do nothing
} else {
log.error("refactorSubTree, node type not supported");
}
Enumeration<?> children = root.children();
RewriterTreeNode child = null;
// now distribute the negative
while (children.hasMoreElements()) {
child = (RewriterTreeNode) children.nextElement();
if (child.isNegated()) {
child.setNegated(false);
} else {
child.setNegated(true);
}
}
}
private RewriterTreeNode applyCaseSensitivity(RewriterTreeNode root, boolean fnUpper,
boolean fvUpper) {
// for each leaf, apply case sensitivity
Enumeration<?> bfe = root.breadthFirstEnumeration();
while (bfe.hasMoreElements()) {
RewriterTreeNode node = (RewriterTreeNode) bfe.nextElement();
if (node.isLeaf()) {
String fName =
fnUpper ? node.getFieldName().toUpperCase() : node.getFieldName().toLowerCase();
node.setFieldName(fName);
String fValue =
fvUpper ? node.getFieldValue().toUpperCase() : node.getFieldValue().toLowerCase();
node.setFieldValue(fValue);
}
}
return root;
}
private Map<Text,RangeBounds> getBoundedRangeMap(RewriterTreeNode node) {
if (node.getType() == ParserTreeConstants.JJTANDNODE
|| node.getType() == ParserTreeConstants.JJTORNODE) {
Enumeration<?> children = node.children();
Map<Text,RangeBounds> rangeMap = new HashMap<>();
while (children.hasMoreElements()) {
RewriterTreeNode child = (RewriterTreeNode) children.nextElement();
if (child.getType() == ParserTreeConstants.JJTLENODE
|| child.getType() == ParserTreeConstants.JJTLTNODE) {
Text fName = new Text(child.getFieldName());
if (rangeMap.containsKey(fName)) {
RangeBounds rb = rangeMap.get(fName);
if (rb.getLower() != null) {
log.error("testBoundedRangeExistence, two lower bounds exist for bounded range.");
}
rb.setLower(new Text(child.getFieldValue()));
} else {
RangeBounds rb = new RangeBounds();
rb.setLower(new Text(child.getFieldValue()));
rangeMap.put(new Text(child.getFieldName()), rb);
}
} else if (child.getType() == ParserTreeConstants.JJTGENODE
|| child.getType() == ParserTreeConstants.JJTGTNODE) {
Text fName = new Text(child.getFieldName());
if (rangeMap.containsKey(fName)) {
RangeBounds rb = rangeMap.get(fName);
if (rb.getUpper() != null) {
log.error("testBoundedRangeExistence, two Upper bounds exist for bounded range.");
}
rb.setUpper(new Text(child.getFieldValue()));
} else {
RangeBounds rb = new RangeBounds();
rb.setUpper(new Text(child.getFieldValue()));
rangeMap.put(new Text(child.getFieldName()), rb);
}
}
}
for (Entry<Text,RangeBounds> entry : rangeMap.entrySet()) {
RangeBounds rb = entry.getValue();
if (rb.getLower() == null || rb.getUpper() == null) {
// unbounded range, remove
if (log.isDebugEnabled()) {
log.debug(
"testBoundedRangeExistence: Unbounded Range detected, removing entry from rangeMap");
}
rangeMap.remove(entry.getKey());
}
}
if (!rangeMap.isEmpty()) {
return rangeMap;
}
}
return null;
}
/**
* INNER CLASSES
*/
public class RewriterTreeNode extends DefaultMutableTreeNode {
private static final long serialVersionUID = 1L;
private boolean negated = false;
private String fieldName;
private String fieldValue;
private String operator;
private int type;
private boolean removal = false;
public RewriterTreeNode(int type) {
super();
this.type = type;
}
public RewriterTreeNode(int type, String fName, String fValue) {
super();
init(type, fName, fValue);
}
public RewriterTreeNode(int type, String fName, String fValue, boolean negate) {
super();
init(type, fName, fValue, negate);
}
private void init(int type, String fName, String fValue) {
init(type, fName, fValue, false);
}
private void init(int type, String fName, String fValue, boolean negate) {
this.type = type;
this.fieldName = fName;
this.fieldValue = fValue;
this.negated = negate;
this.operator = JexlOperatorConstants.getOperator(type);
if (log.isDebugEnabled()) {
log.debug("FN: " + this.fieldName + " FV: " + this.fieldValue + " Op: " + this.operator);
}
}
/**
* @return The field name.
*/
public String getFieldName() {
return fieldName;
}
public void setFieldName(String fieldName) {
this.fieldName = fieldName;
}
/**
*
* @return The field value.
*/
public String getFieldValue() {
return fieldValue;
}
public void setFieldValue(String fieldValue) {
this.fieldValue = fieldValue;
}
/**
*
* @return true if negated, otherwise false.
*/
public boolean isNegated() {
return negated;
}
public void setNegated(boolean negated) {
this.negated = negated;
}
/**
*
* @return The operator.
*/
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
/**
*
* @return The type.
*/
public int getType() {
return type;
}
public void setType(int type) {
this.type = type;
}
public boolean isRemoval() {
return removal;
}
public void setRemoval(boolean removal) {
this.removal = removal;
}
public String getContents() {
StringBuilder s = new StringBuilder("[");
s.append(toString());
if (children != null) {
Enumeration<?> e = this.children();
while (e.hasMoreElements()) {
RewriterTreeNode n = (RewriterTreeNode) e.nextElement();
s.append(",");
s.append(n.getContents());
}
}
s.append("]");
return s.toString();
}
/**
*
* @return A string represenation of the field name and value.
*/
public String printNode() {
StringBuilder s = new StringBuilder("[");
s.append("Full Location & Term = ");
if (this.fieldName != null) {
s.append(this.fieldName.toString());
} else {
s.append("BlankDataLocation");
}
s.append(" ");
if (this.fieldValue != null) {
s.append(this.fieldValue.toString());
} else {
s.append("BlankTerm");
}
s.append("]");
return s.toString();
}
@Override
public String toString() {
switch (type) {
case ParserTreeConstants.JJTEQNODE:
return fieldName + ":" + fieldValue + ":negated=" + isNegated();
case ParserTreeConstants.JJTNENODE:
return fieldName + ":" + fieldValue + ":negated=" + isNegated();
case ParserTreeConstants.JJTERNODE:
return fieldName + ":" + fieldValue + ":negated=" + isNegated();
case ParserTreeConstants.JJTNRNODE:
return fieldName + ":" + fieldValue + ":negated=" + isNegated();
case ParserTreeConstants.JJTLENODE:
return fieldName + ":" + fieldValue + ":negated=" + isNegated();
case ParserTreeConstants.JJTLTNODE:
return fieldName + ":" + fieldValue + ":negated=" + isNegated();
case ParserTreeConstants.JJTGENODE:
return fieldName + ":" + fieldValue + ":negated=" + isNegated();
case ParserTreeConstants.JJTGTNODE:
return fieldName + ":" + fieldValue + ":negated=" + isNegated();
case ParserTreeConstants.JJTJEXLSCRIPT:
return "HEAD";
case ParserTreeConstants.JJTANDNODE:
return "AND";
case ParserTreeConstants.JJTNOTNODE:
return "NOT";
case ParserTreeConstants.JJTORNODE:
return "OR";
default:
System.out.println("Problem in NuwaveTreeNode.toString()");
return null;
}
}
}
}
| 6,218 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/TreeNode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.parser;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Vector;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.QueryTerm;
import org.apache.commons.jexl2.parser.JexlNode;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
public class TreeNode {
private Class<? extends JexlNode> type = null;
/* navigation elements */
private TreeNode parent = null;
private List<TreeNode> children = new ArrayList<TreeNode>();
private Multimap<String,QueryTerm> terms = HashMultimap.create();
public TreeNode() {
super();
}
public Class<? extends JexlNode> getType() {
return type;
}
public TreeNode getParent() {
return parent;
}
public List<TreeNode> getChildren() {
return children;
}
public Enumeration<TreeNode> getChildrenAsEnumeration() {
return Collections.enumeration(children);
}
public Multimap<String,QueryTerm> getTerms() {
return terms;
}
public void setType(Class<? extends JexlNode> type) {
this.type = type;
}
public void setParent(TreeNode parent) {
this.parent = parent;
}
public void setChildren(List<TreeNode> children) {
this.children = children;
}
public void setTerms(Multimap<String,QueryTerm> terms) {
this.terms = terms;
}
public boolean isLeaf() {
return children.isEmpty();
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append("Type: ").append(type.getSimpleName());
buf.append(" Terms: ");
if (null == terms) {
buf.append("null");
} else {
buf.append(terms.toString());
}
return buf.toString();
}
public final Enumeration<?> depthFirstEnumeration() {
return new PostorderEnumeration(this);
}
public Enumeration<?> breadthFirstEnumeration() {
return new BreadthFirstEnumeration(this);
}
public final class PostorderEnumeration implements Enumeration<TreeNode> {
protected TreeNode root;
protected Enumeration<TreeNode> children;
protected Enumeration<TreeNode> subtree;
public PostorderEnumeration(TreeNode rootNode) {
super();
root = rootNode;
children = root.getChildrenAsEnumeration();
subtree = EMPTY_ENUMERATION;
}
public boolean hasMoreElements() {
return root != null;
}
public TreeNode nextElement() {
TreeNode retval;
if (subtree.hasMoreElements()) {
retval = subtree.nextElement();
} else if (children.hasMoreElements()) {
subtree = new PostorderEnumeration((TreeNode) children.nextElement());
retval = subtree.nextElement();
} else {
retval = root;
root = null;
}
return retval;
}
} // End of class PostorderEnumeration
static public final Enumeration<TreeNode> EMPTY_ENUMERATION = new Enumeration<TreeNode>() {
public boolean hasMoreElements() {
return false;
}
public TreeNode nextElement() {
throw new NoSuchElementException("No more elements");
}
};
final class BreadthFirstEnumeration implements Enumeration<TreeNode> {
protected Queue queue;
public BreadthFirstEnumeration(TreeNode rootNode) {
super();
Vector<TreeNode> v = new Vector<TreeNode>(1);
v.addElement(rootNode); // PENDING: don't really need a vector
queue = new Queue();
queue.enqueue(v.elements());
}
public boolean hasMoreElements() {
return (!queue.isEmpty() && ((Enumeration<?>) queue.firstObject()).hasMoreElements());
}
public TreeNode nextElement() {
Enumeration<?> enumer = (Enumeration<?>) queue.firstObject();
TreeNode node = (TreeNode) enumer.nextElement();
Enumeration<?> children = node.getChildrenAsEnumeration();
if (!enumer.hasMoreElements()) {
queue.dequeue();
}
if (children.hasMoreElements()) {
queue.enqueue(children);
}
return node;
}
// A simple queue with a linked list data structure.
final class Queue {
QNode head; // null if empty
QNode tail;
final class QNode {
public Object object;
public QNode next; // null if end
public QNode(Object object, QNode next) {
this.object = object;
this.next = next;
}
}
public void enqueue(Object anObject) {
if (head == null) {
head = tail = new QNode(anObject, null);
} else {
tail.next = new QNode(anObject, null);
tail = tail.next;
}
}
public Object dequeue() {
if (head == null) {
throw new NoSuchElementException("No more elements");
}
Object retval = head.object;
QNode oldHead = head;
head = head.next;
if (head == null) {
tail = null;
} else {
oldHead.next = null;
}
return retval;
}
public Object firstObject() {
if (head == null) {
throw new NoSuchElementException("No more elements");
}
return head.object;
}
public boolean isEmpty() {
return head == null;
}
} // End of class Queue
} // End of class BreadthFirstEnumeration
}
| 6,219 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/QueryEvaluator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.parser;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.examples.wikisearch.function.QueryFunctions;
import org.apache.accumulo.examples.wikisearch.jexl.Arithmetic;
import org.apache.accumulo.examples.wikisearch.parser.EventFields.FieldValue;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.QueryTerm;
import org.apache.commons.jexl2.Expression;
import org.apache.commons.jexl2.JexlContext;
import org.apache.commons.jexl2.JexlEngine;
import org.apache.commons.jexl2.MapContext;
import org.apache.commons.jexl2.Script;
import org.apache.commons.jexl2.parser.ParseException;
import org.apache.commons.jexl2.parser.ParserTreeConstants;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import com.google.common.collect.Multimap;
/**
* This class evaluates events against a query. The query is passed to the constructor and then
* parsed. It is evaluated against an event in the evaluate method.
*/
public class QueryEvaluator {
private static Logger log = Logger.getLogger(QueryEvaluator.class);
// According to the JEXL 2.0 docs, the engine is thread-safe. Let's create 1 engine per VM and
// cache 128 expressions
private static JexlEngine engine = new JexlEngine(null, new Arithmetic(false), null, null);
static {
engine.setSilent(false);
engine.setCache(128);
Map<String,Object> functions = new HashMap<>();
functions.put("f", QueryFunctions.class);
engine.setFunctions(functions);
}
private String query = null;
private Set<String> literals = null;
private Multimap<String,QueryTerm> terms = null;
private String modifiedQuery = null;
private JexlContext ctx = new MapContext();
private boolean caseInsensitive = true;
public QueryEvaluator(String query) throws ParseException {
this.caseInsensitive = true; // default case insensitive matching.
if (caseInsensitive) {
query = query.toLowerCase();
}
this.query = query;
QueryParser parser = new QueryParser();
parser.execute(query);
this.terms = parser.getQueryTerms();
if (caseInsensitive) {
literals = new HashSet<>();
for (String lit : parser.getQueryIdentifiers()) {
literals.add(lit.toLowerCase());
}
} else {
this.literals = parser.getQueryIdentifiers();
}
}
public QueryEvaluator(String query, boolean insensitive) throws ParseException {
this.caseInsensitive = insensitive;
if (this.caseInsensitive) {
query = query.toLowerCase();
}
this.query = query;
QueryParser parser = new QueryParser();
parser.execute(query);
this.terms = parser.getQueryTerms();
if (caseInsensitive) {
literals = new HashSet<>();
for (String lit : parser.getQueryIdentifiers()) {
literals.add(lit.toLowerCase());
}
} else {
this.literals = parser.getQueryIdentifiers();
}
}
public String getQuery() {
return this.query;
}
public void printLiterals() {
for (String s : literals) {
System.out.println("literal: " + s);
}
}
public void setLevel(Level lev) {
log.setLevel(lev);
}
public StringBuilder rewriteQuery(StringBuilder query, String fieldName,
Collection<FieldValue> fieldValues) {
if (log.isDebugEnabled()) {
log.debug("rewriteQuery");
}
// Here we have a field that has multiple values. In this case we need to put
// all values into the jexl context as an array and rewrite the query to account for all
// of the fields.
if (caseInsensitive) {
fieldName = fieldName.toLowerCase();
}
if (log.isDebugEnabled()) {
log.debug("Modifying original query: " + query);
}
// Pull the values out of the FieldValue object
String[] values = new String[fieldValues.size()];
int idx = 0;
for (FieldValue fv : fieldValues) {
if (caseInsensitive) {
values[idx] = (new String(fv.getValue())).toLowerCase();
} else {
values[idx] = new String(fv.getValue());
}
idx++;
}
// Add the array to the context
ctx.set(fieldName, values);
Collection<QueryTerm> qt = terms.get(fieldName);
// Add a script to the beginning of the query for this multi-valued field
StringBuilder script = new StringBuilder();
script.append("_").append(fieldName).append(" = false;\n");
script.append("for (field : ").append(fieldName).append(") {\n");
for (QueryTerm t : qt) {
if (!t.getOperator()
.equals(JexlOperatorConstants.getOperator(ParserTreeConstants.JJTFUNCTIONNODE))) {
script.append("\tif (_").append(fieldName).append(" == false && field ")
.append(t.getOperator()).append(" ").append(t.getValue()).append(") { \n");
} else {
script.append("\tif (_").append(fieldName).append(" == false && ")
.append(t.getValue().toString().replace(fieldName, "field")).append(") { \n");
}
script.append("\t\t_").append(fieldName).append(" = true;\n");
script.append("\t}\n");
}
script.append("}\n");
// Add the script to the beginning of the query
query.insert(0, script.toString());
StringBuilder newPredicate = new StringBuilder();
newPredicate.append("_").append(fieldName).append(" == true");
for (QueryTerm t : qt) {
// Find the location of this term in the query
StringBuilder predicate = new StringBuilder();
int start = 0;
if (!t.getOperator()
.equals(JexlOperatorConstants.getOperator(ParserTreeConstants.JJTFUNCTIONNODE))) {
predicate.append(fieldName).append(" ").append(t.getOperator()).append(" ")
.append(t.getValue());
start = query.indexOf(predicate.toString());
} else {
predicate.append(t.getValue().toString());
// need to find the second occurence of the string.
start = query.indexOf(predicate.toString());
}
if (-1 == start) {
log.warn("Unable to find predicate: " + predicate.toString() + " in rewritten query: "
+ query.toString());
}
int length = predicate.length();
// Now modify the query to check the value of my.fieldName
query.replace(start, start + length, newPredicate.toString());
}
if (log.isDebugEnabled()) {
log.debug("leaving rewriteQuery with: " + query.toString());
}
return query;
}
/**
* Evaluates the query against an event.
*/
public boolean evaluate(EventFields eventFields) {
this.modifiedQuery = null;
boolean rewritten = false;
// Copy the query
StringBuilder q = new StringBuilder(query);
// Copy the literals, we are going to remove elements from this set
// when they are added to the JEXL context. This will allow us to
// determine which items in the query where *NOT* in the data.
HashSet<String> literalsCopy = new HashSet<>(literals);
// Loop through the event fields and add them to the JexlContext.
for (Entry<String,Collection<FieldValue>> field : eventFields.asMap().entrySet()) {
String fName = field.getKey();
if (caseInsensitive) {
fName = fName.toLowerCase();
}
// If this field is not part of the expression, then skip it.
if (!literals.contains(fName)) {
continue;
} else {
literalsCopy.remove(fName);
}
// This field may have multiple values.
if (field.getValue().size() == 0) {
continue;
} else if (field.getValue().size() == 1) {
// We are explicitly converting these bytes to a String.
if (caseInsensitive) {
ctx.set(field.getKey().toLowerCase(),
(new String(field.getValue().iterator().next().getValue())).toLowerCase());
} else {
ctx.set(field.getKey(), new String(field.getValue().iterator().next().getValue()));
}
} else {
// q = queryRewrite(q, field.getKey(), field.getValue());
q = rewriteQuery(q, field.getKey(), field.getValue());
rewritten = true;
} // End of if
} // End of loop
// For any literals in the query that were not found in the data, add them to the context
// with a null value.
for (String lit : literalsCopy) {
ctx.set(lit, null);
}
if (log.isDebugEnabled()) {
log.debug("Evaluating query: " + q.toString());
}
this.modifiedQuery = q.toString();
Boolean result = null;
if (rewritten) {
Script script = engine.createScript(this.modifiedQuery);
try {
result = (Boolean) script.execute(ctx);
} catch (Exception e) {
log.error("Error evaluating script: " + this.modifiedQuery + " against event"
+ eventFields.toString(), e);
}
} else {
Expression expr = engine.createExpression(this.modifiedQuery);
try {
result = (Boolean) expr.evaluate(ctx);
} catch (Exception e) {
log.error("Error evaluating expression: " + this.modifiedQuery + " against event"
+ eventFields.toString(), e);
}
}
if (null != result && result) {
return true;
} else {
return false;
}
} // End of method
/**
*
* @return rewritten query that was evaluated against the most recent event
*/
public String getModifiedQuery() {
return this.modifiedQuery;
}
}
| 6,220 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/RangeCalculator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.parser;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeSet;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.wikisearch.iterator.EvaluatingIterator;
import org.apache.accumulo.examples.wikisearch.logic.AbstractQueryLogic;
import org.apache.accumulo.examples.wikisearch.normalizer.Normalizer;
import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
import org.apache.accumulo.examples.wikisearch.util.TextUtil;
import org.apache.commons.jexl2.parser.ASTAndNode;
import org.apache.commons.jexl2.parser.ASTEQNode;
import org.apache.commons.jexl2.parser.ASTERNode;
import org.apache.commons.jexl2.parser.ASTFalseNode;
import org.apache.commons.jexl2.parser.ASTFunctionNode;
import org.apache.commons.jexl2.parser.ASTGENode;
import org.apache.commons.jexl2.parser.ASTGTNode;
import org.apache.commons.jexl2.parser.ASTLENode;
import org.apache.commons.jexl2.parser.ASTLTNode;
import org.apache.commons.jexl2.parser.ASTNENode;
import org.apache.commons.jexl2.parser.ASTNRNode;
import org.apache.commons.jexl2.parser.ASTNullLiteral;
import org.apache.commons.jexl2.parser.ASTOrNode;
import org.apache.commons.jexl2.parser.ASTTrueNode;
import org.apache.commons.jexl2.parser.ParseException;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.builder.ToStringBuilder;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.google.protobuf.InvalidProtocolBufferException;
/**
* This class is used to query the global indices to determine that set of ranges to use when
* querying the shard table. The RangeCalculator looks at each term in the query to determine if it
* is a equivalence, range, or wildcard comparison, and queries the appropriate index to find the
* ranges for the terms which are then cached. The final set of ranges is computed as the AST is
* traversed.
*/
public class RangeCalculator extends QueryParser {
/**
* Container used as map keys in this class
*
*/
public static class MapKey implements Comparable<MapKey> {
private String fieldName = null;
private String fieldValue = null;
private String originalQueryValue = null;
public MapKey(String fieldName, String fieldValue) {
super();
this.fieldName = fieldName;
this.fieldValue = fieldValue;
}
public String getFieldName() {
return fieldName;
}
public String getFieldValue() {
return fieldValue;
}
public void setFieldName(String fieldName) {
this.fieldName = fieldName;
}
public void setFieldValue(String fieldValue) {
this.fieldValue = fieldValue;
}
public String getOriginalQueryValue() {
return originalQueryValue;
}
public void setOriginalQueryValue(String originalQueryValue) {
this.originalQueryValue = originalQueryValue;
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37).append(fieldName).append(fieldValue).toHashCode();
}
@Override
public String toString() {
return this.fieldName + " " + this.fieldValue;
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (other instanceof MapKey) {
MapKey o = (MapKey) other;
return (this.fieldName.equals(o.fieldName) && this.fieldValue.equals(o.fieldValue));
} else {
return false;
}
}
@Override
public int compareTo(MapKey o) {
int result = this.fieldName.compareTo(o.fieldName);
if (result != 0) {
return this.fieldValue.compareTo(o.fieldValue);
} else {
return result;
}
}
}
/**
* Container used to hold the lower and upper bound of a range
*
*/
public static class RangeBounds {
private String originalLower = null;
private Text lower = null;
private String originalUpper = null;
private Text upper = null;
public Text getLower() {
return lower;
}
public Text getUpper() {
return upper;
}
public void setLower(Text lower) {
this.lower = lower;
}
public void setUpper(Text upper) {
this.upper = upper;
}
public String getOriginalLower() {
return originalLower;
}
public String getOriginalUpper() {
return originalUpper;
}
public void setOriginalLower(String originalLower) {
this.originalLower = originalLower;
}
public void setOriginalUpper(String originalUpper) {
this.originalUpper = originalUpper;
}
}
/**
*
* Object that is used to hold ranges found in the index. Subclasses may compute the final range
* set in various ways.
*/
protected static class TermRange implements Comparable<TermRange> {
private String fieldName = null;
private Object fieldValue = null;
private Set<Range> ranges = new TreeSet<>();
public TermRange(String name, Object fieldValue) {
this.fieldName = name;
this.fieldValue = fieldValue;
}
public String getFieldName() {
return this.fieldName;
}
public Object getFieldValue() {
return this.fieldValue;
}
public void addAll(Set<Range> r) {
ranges.addAll(r);
}
public void add(Range r) {
ranges.add(r);
}
public Set<Range> getRanges() {
return ranges;
}
@Override
public String toString() {
ToStringBuilder tsb = new ToStringBuilder(this);
tsb.append("fieldName", fieldName);
tsb.append("fieldValue", fieldValue);
tsb.append("ranges", ranges);
return tsb.toString();
}
@Override
public int compareTo(TermRange o) {
int result = this.fieldName.compareTo(o.fieldName);
if (result == 0) {
return ((Integer) ranges.size()).compareTo(o.ranges.size());
} else {
return result;
}
}
}
/**
* Object used to store context information as the AST is being traversed.
*/
static class EvaluationContext {
boolean inOrContext = false;
boolean inNotContext = false;
boolean inAndContext = false;
TermRange lastRange = null;
String lastProcessedTerm = null;
}
protected static Logger log = Logger.getLogger(RangeCalculator.class);
private static String WILDCARD = ".*";
private static String SINGLE_WILDCARD = "\\.";
protected Connector c;
protected Authorizations auths;
protected Multimap<String,Normalizer> indexedTerms;
protected Multimap<String,QueryTerm> termsCopy = HashMultimap.create();
protected String indexTableName;
protected String reverseIndexTableName;
protected int queryThreads = 8;
/* final results of index lookups, ranges for the shard table */
protected Set<Range> result = null;
/* map of field names to values found in the index */
protected Multimap<String,String> indexEntries = HashMultimap.create();
/* map of value in the index to the original query values */
protected Map<String,String> indexValues = new HashMap<>();
/* map of values in the query to map keys used */
protected Multimap<String,MapKey> originalQueryValues = HashMultimap.create();
/* map of field name to cardinality */
protected Map<String,Long> termCardinalities = new HashMap<>();
/* cached results of all ranges found global index lookups */
protected Map<MapKey,TermRange> globalIndexResults = new HashMap<>();
public void execute(Connector c, Authorizations auths, Multimap<String,Normalizer> indexedTerms,
Multimap<String,QueryTerm> terms, String query, AbstractQueryLogic logic,
Set<String> typeFilter) throws ParseException {
super.execute(query);
this.c = c;
this.auths = auths;
this.indexedTerms = indexedTerms;
this.termsCopy.putAll(terms);
this.indexTableName = logic.getIndexTableName();
this.reverseIndexTableName = logic.getReverseIndexTableName();
this.queryThreads = logic.getQueryThreads();
Map<MapKey,Set<Range>> indexRanges = new HashMap<>();
Map<MapKey,Set<Range>> trailingWildcardRanges = new HashMap<>();
Map<MapKey,Set<Range>> leadingWildcardRanges = new HashMap<>();
Map<Text,RangeBounds> rangeMap = new HashMap<>();
// Here we iterate over all of the terms in the query to determine if they are an equivalence,
// wildcard, or range type operator
for (Entry<String,QueryTerm> entry : terms.entries()) {
if (entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTEQNode.class))
|| entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTERNode.class))
|| entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTLTNode.class))
|| entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTLENode.class))
|| entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTGTNode.class))
|| entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTGENode.class))) {
// If this term is not in the set of indexed terms, then bail
if (!indexedTerms.containsKey(entry.getKey())) {
termCardinalities.put(entry.getKey().toUpperCase(), 0L);
continue;
}
// In the case of function calls, the query term could be null. Dont query the index for it.
if (null == entry.getValue()) {
termCardinalities.put(entry.getKey().toUpperCase(), 0L);
continue;
}
// In the case where we are looking for 'null', then skip.
if (null == entry.getValue().getValue()
|| ((String) entry.getValue().getValue()).equals("null")) {
termCardinalities.put(entry.getKey().toUpperCase(), 0L);
continue;
}
// Remove the begin and end ' marks
String value = null;
if (((String) entry.getValue().getValue()).startsWith("'")
&& ((String) entry.getValue().getValue()).endsWith("'")) {
value = ((String) entry.getValue().getValue()).substring(1,
((String) entry.getValue().getValue()).length() - 1);
} else {
value = (String) entry.getValue().getValue();
}
// The entries in the index are normalized
for (Normalizer normalizer : indexedTerms.get(entry.getKey())) {
String normalizedFieldValue = normalizer.normalizeFieldValue(null, value);
Text fieldValue = new Text(normalizedFieldValue);
Text fieldName = new Text(entry.getKey().toUpperCase());
// EQUALS
if (entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTEQNode.class))) {
Key startRange = new Key(fieldValue, fieldName);
Range r = new Range(startRange, true, startRange.followingKey(PartialKey.ROW), true);
MapKey key = new MapKey(fieldName.toString(), fieldValue.toString());
key.setOriginalQueryValue(value);
this.originalQueryValues.put(value, key);
if (!indexRanges.containsKey(key)) {
indexRanges.put(key, new HashSet<Range>());
}
indexRanges.get(key).add(r);
// WILDCARD
} else if (entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTERNode.class))) {
// This is a wildcard query using regex. We can only support leading and trailing
// wildcards at this time. Leading
// wildcards will need be reversed and sent to the global reverse index. Trailing
// wildcard queries will be sent to the
// global index. In all cases, the range for the wilcard will be the range of possible
// UNICODE codepoints, hex 0 to 10FFFF.
int loc = normalizedFieldValue.indexOf(WILDCARD);
if (-1 == loc) {
loc = normalizedFieldValue.indexOf(SINGLE_WILDCARD);
}
if (-1 == loc) {
// Then no wildcard in the query? Treat like the equals case above.
Key startRange = new Key(fieldValue, fieldName);
Range r = new Range(startRange, true, startRange.followingKey(PartialKey.ROW), true);
MapKey key = new MapKey(fieldName.toString(), fieldValue.toString());
key.setOriginalQueryValue(value);
this.originalQueryValues.put(value, key);
if (!indexRanges.containsKey(key)) {
indexRanges.put(key, new HashSet<Range>());
}
indexRanges.get(key).add(r);
} else {
if (loc == 0) {
// Then we have a leading wildcard, reverse the term and use the global reverse
// index.
StringBuilder buf = new StringBuilder(normalizedFieldValue.substring(2));
normalizedFieldValue = buf.reverse().toString();
Key startRange = new Key(new Text(normalizedFieldValue + "\u0000"), fieldName);
Key endRange = new Key(new Text(normalizedFieldValue + "\u10FFFF"), fieldName);
Range r = new Range(startRange, true, endRange, true);
MapKey key = new MapKey(fieldName.toString(), normalizedFieldValue);
key.setOriginalQueryValue(value);
this.originalQueryValues.put(value, key);
if (!leadingWildcardRanges.containsKey(key)) {
leadingWildcardRanges.put(key, new HashSet<Range>());
}
leadingWildcardRanges.get(key).add(r);
} else if (loc == (normalizedFieldValue.length() - 2)) {
normalizedFieldValue = normalizedFieldValue.substring(0, loc);
// Then we have a trailing wildcard character.
Key startRange = new Key(new Text(normalizedFieldValue + "\u0000"), fieldName);
Key endRange = new Key(new Text(normalizedFieldValue + "\u10FFFF"), fieldName);
Range r = new Range(startRange, true, endRange, true);
MapKey key = new MapKey(fieldName.toString(), normalizedFieldValue);
key.setOriginalQueryValue(value);
this.originalQueryValues.put(value, key);
if (!trailingWildcardRanges.containsKey(key)) {
trailingWildcardRanges.put(key, new HashSet<Range>());
}
trailingWildcardRanges.get(key).add(r);
} else {
// throw new RuntimeException("Unsupported wildcard location. Only trailing or
// leading wildcards are supported: " + normalizedFieldValue);
// Don't throw an exception, there must be a wildcard in the query, we'll treat it
// as a filter on the results since it is not
// leading or trailing.
}
}
// RANGES
} else if (entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTGTNode.class))
|| entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTGENode.class))) {
// Then we have a lower bound to a range query
if (!rangeMap.containsKey(fieldName)) {
rangeMap.put(fieldName, new RangeBounds());
}
rangeMap.get(fieldName).setLower(fieldValue);
rangeMap.get(fieldName).setOriginalLower(value);
} else if (entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTLTNode.class))
|| entry.getValue().getOperator()
.equals(JexlOperatorConstants.getOperator(ASTLENode.class))) {
// Then we have an upper bound to a range query
if (!rangeMap.containsKey(fieldName)) {
rangeMap.put(fieldName, new RangeBounds());
}
rangeMap.get(fieldName).setUpper(fieldValue);
rangeMap.get(fieldName).setOriginalUpper(value);
}
}
}
}
// INDEX RANGE QUERY
// Now that we have figured out the range bounds, create the index ranges.
for (Entry<Text,RangeBounds> entry : rangeMap.entrySet()) {
if (entry.getValue().getLower() != null && entry.getValue().getUpper() != null) {
// Figure out the key order
Key lk = new Key(entry.getValue().getLower());
Key up = new Key(entry.getValue().getUpper());
Text lower = lk.getRow();
Text upper = up.getRow();
// Swith the order if needed.
if (lk.compareTo(up) > 0) {
lower = up.getRow();
upper = lk.getRow();
}
Key startRange = new Key(lower, entry.getKey());
Key endRange = new Key(upper, entry.getKey());
Range r = new Range(startRange, true, endRange, true);
// For the range queries we need to query the global index and then handle the results a
// little differently.
Map<MapKey,Set<Range>> ranges = new HashMap<>();
MapKey key = new MapKey(entry.getKey().toString(), entry.getValue().getLower().toString());
key.setOriginalQueryValue(entry.getValue().getOriginalLower().toString());
this.originalQueryValues.put(entry.getValue().getOriginalLower().toString(), key);
ranges.put(key, new HashSet<Range>());
ranges.get(key).add(r);
// Now query the global index and override the field value used in the results map
try {
Map<MapKey,TermRange> lowerResults = queryGlobalIndex(ranges, entry.getKey().toString(),
this.indexTableName, false, key, typeFilter);
// Add the results to the global index results for both the upper and lower field values.
Map<MapKey,TermRange> upperResults = new HashMap<>();
for (Entry<MapKey,TermRange> e : lowerResults.entrySet()) {
MapKey key2 =
new MapKey(e.getKey().getFieldName(), entry.getValue().getUpper().toString());
key2.setOriginalQueryValue(entry.getValue().getOriginalUpper().toString());
upperResults.put(key2, e.getValue());
this.originalQueryValues.put(entry.getValue().getOriginalUpper(), key2);
}
this.globalIndexResults.putAll(lowerResults);
this.globalIndexResults.putAll(upperResults);
} catch (TableNotFoundException e) {
log.error("index table not found", e);
throw new RuntimeException(" index table not found", e);
}
} else {
log.warn("Unbounded range detected, not querying index for it. Field "
+ entry.getKey().toString() + " in query: " + query);
}
}
// Now that we have calculated all of the ranges, query the global index.
try {
// Query for the trailing wildcards if we have any
for (Entry<MapKey,Set<Range>> trailing : trailingWildcardRanges.entrySet()) {
Map<MapKey,Set<Range>> m = new HashMap<>();
m.put(trailing.getKey(), trailing.getValue());
if (log.isDebugEnabled()) {
log.debug("Ranges for Wildcard Global Index query: " + m.toString());
}
this.globalIndexResults.putAll(queryGlobalIndex(m, trailing.getKey().getFieldName(),
this.indexTableName, false, trailing.getKey(), typeFilter));
}
// Query for the leading wildcards if we have any
for (Entry<MapKey,Set<Range>> leading : leadingWildcardRanges.entrySet()) {
Map<MapKey,Set<Range>> m = new HashMap<>();
m.put(leading.getKey(), leading.getValue());
if (log.isDebugEnabled()) {
log.debug("Ranges for Wildcard Global Reverse Index query: " + m.toString());
}
this.globalIndexResults.putAll(queryGlobalIndex(m, leading.getKey().getFieldName(),
this.reverseIndexTableName, true, leading.getKey(), typeFilter));
}
// Query for the equals case
for (Entry<MapKey,Set<Range>> equals : indexRanges.entrySet()) {
Map<MapKey,Set<Range>> m = new HashMap<>();
m.put(equals.getKey(), equals.getValue());
if (log.isDebugEnabled()) {
log.debug("Ranges for Global Index query: " + m.toString());
}
this.globalIndexResults.putAll(queryGlobalIndex(m, equals.getKey().getFieldName(),
this.indexTableName, false, equals.getKey(), typeFilter));
}
} catch (TableNotFoundException e) {
log.error("index table not found", e);
throw new RuntimeException(" index table not found", e);
}
if (log.isDebugEnabled()) {
log.debug("Ranges from Global Index query: " + globalIndexResults.toString());
}
// Now traverse the AST
EvaluationContext ctx = new EvaluationContext();
this.getAST().childrenAccept(this, ctx);
if (ctx.lastRange.getRanges().size() == 0) {
log.debug("No resulting range set");
} else {
if (log.isDebugEnabled()) {
log.debug("Setting range results to: " + ctx.lastRange.getRanges().toString());
}
this.result = ctx.lastRange.getRanges();
}
}
/**
*
* @return set of ranges to use for the shard table
*/
public Set<Range> getResult() {
return result;
}
/**
*
* @return map of field names to index field values
*/
public Multimap<String,String> getIndexEntries() {
return indexEntries;
}
public Map<String,String> getIndexValues() {
return indexValues;
}
/**
*
* @return Cardinality for each field name.
*/
public Map<String,Long> getTermCardinalities() {
return termCardinalities;
}
/**
*
* @param isReverse
* switch that determines whether or not to reverse the results
* @param override
* mapKey for wildcard and range queries that specify which mapkey to use in the results
* @param typeFilter
* - optional list of datatypes
*/
protected Map<MapKey,TermRange> queryGlobalIndex(Map<MapKey,Set<Range>> indexRanges,
String specificFieldName, String tableName, boolean isReverse, MapKey override,
Set<String> typeFilter) throws TableNotFoundException {
// The results map where the key is the field name and field value and the
// value is a set of ranges. The mapkey will always be the field name
// and field value that was passed in the original query. The TermRange
// will contain the field name and field value found in the index.
Map<MapKey,TermRange> results = new HashMap<>();
// Seed the results map and create the range set for the batch scanner
Set<Range> rangeSuperSet = new HashSet<>();
for (Entry<MapKey,Set<Range>> entry : indexRanges.entrySet()) {
rangeSuperSet.addAll(entry.getValue());
TermRange tr = new TermRange(entry.getKey().getFieldName(), entry.getKey().getFieldValue());
if (null == override) {
results.put(entry.getKey(), tr);
} else {
results.put(override, tr);
}
}
if (log.isDebugEnabled()) {
log.debug("Querying global index table: " + tableName + ", range: " + rangeSuperSet.toString()
+ " colf: " + specificFieldName);
}
BatchScanner bs = this.c.createBatchScanner(tableName, this.auths, this.queryThreads);
bs.setRanges(rangeSuperSet);
if (null != specificFieldName) {
bs.fetchColumnFamily(new Text(specificFieldName));
}
for (Entry<Key,Value> entry : bs) {
if (log.isDebugEnabled()) {
log.debug("Index entry: " + entry.getKey().toString());
}
String fieldValue = null;
if (!isReverse) {
fieldValue = entry.getKey().getRow().toString();
} else {
StringBuilder buf = new StringBuilder(entry.getKey().getRow().toString());
fieldValue = buf.reverse().toString();
}
String fieldName = entry.getKey().getColumnFamily().toString();
// Get the shard id and datatype from the colq
String colq = entry.getKey().getColumnQualifier().toString();
int separator = colq.indexOf(EvaluatingIterator.NULL_BYTE_STRING);
String shardId = null;
String datatype = null;
if (separator != -1) {
shardId = colq.substring(0, separator);
datatype = colq.substring(separator + 1);
} else {
shardId = colq;
}
// Skip this entry if the type is not correct
if (null != datatype && null != typeFilter && !typeFilter.contains(datatype)) {
continue;
}
// Parse the UID.List object from the value
Uid.List uidList = null;
try {
uidList = Uid.List.parseFrom(entry.getValue().get());
} catch (InvalidProtocolBufferException e) {
// Don't add UID information, at least we know what shards
// it is located in.
}
// Add the count for this shard to the total count for the term.
long count = 0;
Long storedCount = termCardinalities.get(fieldName);
if (null == storedCount || 0 == storedCount) {
count = uidList.getCOUNT();
} else {
count = uidList.getCOUNT() + storedCount;
}
termCardinalities.put(fieldName, count);
this.indexEntries.put(fieldName, fieldValue);
if (null == override) {
this.indexValues.put(fieldValue, fieldValue);
} else {
this.indexValues.put(fieldValue, override.getOriginalQueryValue());
}
// Create the keys
Text shard = new Text(shardId);
if (uidList.getIGNORE()) {
// Then we create a scan range that is the entire shard
if (null == override) {
results.get(new MapKey(fieldName, fieldValue)).add(new Range(shard));
} else {
results.get(override).add(new Range(shard));
}
} else {
// We should have UUIDs, create event ranges
for (String uuid : uidList.getUIDList()) {
Text cf = new Text(datatype);
TextUtil.textAppend(cf, uuid);
Key startKey = new Key(shard, cf);
Key endKey =
new Key(shard, new Text(cf.toString() + EvaluatingIterator.NULL_BYTE_STRING));
Range eventRange = new Range(startKey, true, endKey, false);
if (null == override) {
results.get(new MapKey(fieldName, fieldValue)).add(eventRange);
} else {
results.get(override).add(eventRange);
}
}
}
}
bs.close();
return results;
}
@Override
public Object visit(ASTOrNode node, Object data) {
boolean previouslyInOrContext = false;
EvaluationContext ctx = null;
if (null != data && data instanceof EvaluationContext) {
ctx = (EvaluationContext) data;
previouslyInOrContext = ctx.inOrContext;
} else {
ctx = new EvaluationContext();
}
ctx.inOrContext = true;
// Process both sides of this node. Left branch first
node.jjtGetChild(0).jjtAccept(this, ctx);
Long leftCardinality = this.termCardinalities.get(ctx.lastProcessedTerm);
if (null == leftCardinality) {
leftCardinality = 0L;
}
TermRange leftRange = ctx.lastRange;
if (log.isDebugEnabled()) {
log.debug("[OR-left] term: " + ctx.lastProcessedTerm + ", cardinality: " + leftCardinality
+ ", ranges: " + leftRange.getRanges().size());
}
// Process the right branch
node.jjtGetChild(1).jjtAccept(this, ctx);
Long rightCardinality = this.termCardinalities.get(ctx.lastProcessedTerm);
if (null == rightCardinality) {
rightCardinality = 0L;
}
TermRange rightRange = ctx.lastRange;
if (log.isDebugEnabled()) {
log.debug("[OR-right] term: " + ctx.lastProcessedTerm + ", cardinality: " + rightCardinality
+ ", ranges: " + rightRange.getRanges().size());
}
// reset the state
if (null != data && !previouslyInOrContext) {
ctx.inOrContext = false;
}
// Add the ranges for the left and right branches to a TreeSet to sort them
Set<Range> ranges = new TreeSet<>();
ranges.addAll(leftRange.getRanges());
ranges.addAll(rightRange.getRanges());
// Now create the union set
Set<Text> shardsAdded = new HashSet<>();
Set<Range> returnSet = new HashSet<>();
for (Range r : ranges) {
if (!shardsAdded.contains(r.getStartKey().getRow())) {
// Only add ranges with a start key for the entire shard.
if (r.getStartKey().getColumnFamily() == null) {
shardsAdded.add(r.getStartKey().getRow());
}
returnSet.add(r);
} else {
// if (log.isTraceEnabled())
log.info("Skipping event specific range: " + r.toString()
+ " because shard range has already been added: "
+ shardsAdded.contains(r.getStartKey().getRow()));
}
}
// Clear the ranges from the context and add the result in its place
TermRange orRange = new TermRange("OR_RESULT", "foo");
orRange.addAll(returnSet);
if (log.isDebugEnabled()) {
log.debug("[OR] results: " + orRange.getRanges().toString());
}
ctx.lastRange = orRange;
ctx.lastProcessedTerm = "OR_RESULT";
this.termCardinalities.put("OR_RESULT", (leftCardinality + rightCardinality));
return null;
}
@Override
public Object visit(ASTAndNode node, Object data) {
boolean previouslyInAndContext = false;
EvaluationContext ctx = null;
if (null != data && data instanceof EvaluationContext) {
ctx = (EvaluationContext) data;
previouslyInAndContext = ctx.inAndContext;
} else {
ctx = new EvaluationContext();
}
ctx.inAndContext = true;
// Process both sides of this node.
node.jjtGetChild(0).jjtAccept(this, ctx);
String leftTerm = ctx.lastProcessedTerm;
Long leftCardinality = this.termCardinalities.get(leftTerm);
if (null == leftCardinality) {
leftCardinality = 0L;
}
TermRange leftRange = ctx.lastRange;
if (log.isDebugEnabled()) {
log.debug("[AND-left] term: " + ctx.lastProcessedTerm + ", cardinality: " + leftCardinality
+ ", ranges: " + leftRange.getRanges().size());
}
// Process the right branch
node.jjtGetChild(1).jjtAccept(this, ctx);
String rightTerm = ctx.lastProcessedTerm;
Long rightCardinality = this.termCardinalities.get(rightTerm);
if (null == rightCardinality) {
rightCardinality = 0L;
}
TermRange rightRange = ctx.lastRange;
if (log.isDebugEnabled()) {
log.debug("[AND-right] term: " + ctx.lastProcessedTerm + ", cardinality: " + rightCardinality
+ ", ranges: " + rightRange.getRanges().size());
}
// reset the state
if (null != data && !previouslyInAndContext) {
ctx.inAndContext = false;
}
long card = 0L;
TermRange andRange = new TermRange("AND_RESULT", "foo");
if ((leftCardinality > 0 && leftCardinality <= rightCardinality) || rightCardinality == 0) {
card = leftCardinality;
andRange.addAll(leftRange.getRanges());
} else if ((rightCardinality > 0 && rightCardinality <= leftCardinality)
|| leftCardinality == 0) {
card = rightCardinality;
andRange.addAll(rightRange.getRanges());
}
if (log.isDebugEnabled()) {
log.debug("[AND] results: " + andRange.getRanges().toString());
}
ctx.lastRange = andRange;
ctx.lastProcessedTerm = "AND_RESULT";
this.termCardinalities.put("AND_RESULT", card);
return null;
}
@Override
public Object visit(ASTEQNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
termsCopy.put(fieldName.toString(), term);
// Get the terms from the global index
// Remove the begin and end ' marks
String termValue = null;
if (((String) term.getValue()).startsWith("'") && ((String) term.getValue()).endsWith("'")) {
termValue = ((String) term.getValue()).substring(1, ((String) term.getValue()).length() - 1);
} else {
termValue = (String) term.getValue();
}
// Get the values found in the index for this query term
TermRange ranges = null;
for (MapKey key : this.originalQueryValues.get(termValue)) {
if (key.getFieldName().equalsIgnoreCase(fieldName.toString())) {
ranges = this.globalIndexResults.get(key);
if (log.isDebugEnabled()) {
log.debug("Results for cached index ranges for key: " + key + " are " + ranges);
}
}
}
// If no result for this field name and value, then add empty range
if (null == ranges) {
ranges = new TermRange(fieldName.toString(), term.getValue());
}
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = ranges;
ctx.lastProcessedTerm = fieldName.toString();
}
return null;
}
@Override
public Object visit(ASTNENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = true;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
if (negated) {
negatedTerms.add(fieldName.toString());
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
termsCopy.put(fieldName.toString(), term);
// We can only use the global index for equality, put in fake results
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = new TermRange(fieldName.toString(), term.getValue());
ctx.lastProcessedTerm = fieldName.toString();
termCardinalities.put(fieldName.toString(), 0L);
}
return null;
}
@Override
public Object visit(ASTLTNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
termsCopy.put(fieldName.toString(), term);
// Get the terms from the global index
// Remove the begin and end ' marks
String termValue = null;
if (((String) term.getValue()).startsWith("'") && ((String) term.getValue()).endsWith("'")) {
termValue = ((String) term.getValue()).substring(1, ((String) term.getValue()).length() - 1);
} else {
termValue = (String) term.getValue();
}
// Get the values found in the index for this query term
TermRange ranges = null;
for (MapKey key : this.originalQueryValues.get(termValue)) {
if (key.getFieldName().equalsIgnoreCase(fieldName.toString())) {
ranges = this.globalIndexResults.get(key);
if (log.isDebugEnabled()) {
log.debug("Results for cached index ranges for key: " + key + " are " + ranges);
}
}
}
// If no result for this field name and value, then add empty range
if (null == ranges) {
ranges = new TermRange(fieldName.toString(), term.getValue());
}
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = ranges;
ctx.lastProcessedTerm = fieldName.toString();
}
return null;
}
@Override
public Object visit(ASTGTNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
termsCopy.put(fieldName.toString(), term);
// Get the terms from the global index
// Remove the begin and end ' marks
String termValue = null;
if (((String) term.getValue()).startsWith("'") && ((String) term.getValue()).endsWith("'")) {
termValue = ((String) term.getValue()).substring(1, ((String) term.getValue()).length() - 1);
} else {
termValue = (String) term.getValue();
}
// Get the values found in the index for this query term
TermRange ranges = null;
for (MapKey key : this.originalQueryValues.get(termValue)) {
if (key.getFieldName().equalsIgnoreCase(fieldName.toString())) {
ranges = this.globalIndexResults.get(key);
if (log.isDebugEnabled()) {
log.debug("Results for cached index ranges for key: " + key + " are " + ranges);
}
}
}
// If no result for this field name and value, then add empty range
if (null == ranges) {
ranges = new TermRange(fieldName.toString(), term.getValue());
}
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = ranges;
ctx.lastProcessedTerm = fieldName.toString();
}
return null;
}
@Override
public Object visit(ASTLENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
termsCopy.put(fieldName.toString(), term);
// Get the terms from the global index
// Remove the begin and end ' marks
String termValue = null;
if (((String) term.getValue()).startsWith("'") && ((String) term.getValue()).endsWith("'")) {
termValue = ((String) term.getValue()).substring(1, ((String) term.getValue()).length() - 1);
} else {
termValue = (String) term.getValue();
}
// Get the values found in the index for this query term
TermRange ranges = null;
for (MapKey key : this.originalQueryValues.get(termValue)) {
if (key.getFieldName().equalsIgnoreCase(fieldName.toString())) {
ranges = this.globalIndexResults.get(key);
if (log.isDebugEnabled()) {
log.debug("Results for cached index ranges for key: " + key + " are " + ranges);
}
}
}
// If no result for this field name and value, then add empty range
if (null == ranges) {
ranges = new TermRange(fieldName.toString(), term.getValue());
}
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = ranges;
ctx.lastProcessedTerm = fieldName.toString();
}
return null;
}
@Override
public Object visit(ASTGENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
termsCopy.put(fieldName.toString(), term);
// Get the terms from the global index
// Remove the begin and end ' marks
String termValue = null;
if (((String) term.getValue()).startsWith("'") && ((String) term.getValue()).endsWith("'")) {
termValue = ((String) term.getValue()).substring(1, ((String) term.getValue()).length() - 1);
} else {
termValue = (String) term.getValue();
}
// Get the values found in the index for this query term
TermRange ranges = null;
for (MapKey key : this.originalQueryValues.get(termValue)) {
if (key.getFieldName().equalsIgnoreCase(fieldName.toString())) {
ranges = this.globalIndexResults.get(key);
if (log.isDebugEnabled()) {
log.debug("Results for cached index ranges for key: " + key + " are " + ranges);
}
}
}
// If no result for this field name and value, then add empty range
if (null == ranges) {
ranges = new TermRange(fieldName.toString(), term.getValue());
}
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = ranges;
ctx.lastProcessedTerm = fieldName.toString();
}
return null;
}
@Override
public Object visit(ASTERNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
termsCopy.put(fieldName.toString(), term);
// Get the terms from the global index
// Remove the begin and end ' marks
String termValue = null;
if (((String) term.getValue()).startsWith("'") && ((String) term.getValue()).endsWith("'")) {
termValue = ((String) term.getValue()).substring(1, ((String) term.getValue()).length() - 1);
} else {
termValue = (String) term.getValue();
}
// Get the values found in the index for this query term
TermRange ranges = null;
for (MapKey key : this.originalQueryValues.get(termValue)) {
if (key.getFieldName().equalsIgnoreCase(fieldName.toString())) {
ranges = this.globalIndexResults.get(key);
if (log.isDebugEnabled()) {
log.debug("Results for cached index ranges for key: " + key + " are " + ranges);
}
}
}
// If no result for this field name and value, then add empty range
if (null == ranges) {
ranges = new TermRange(fieldName.toString(), term.getValue());
}
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = ranges;
ctx.lastProcessedTerm = fieldName.toString();
}
return null;
}
@Override
public Object visit(ASTNRNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = true;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
if (negated) {
negatedTerms.add(fieldName.toString());
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
termsCopy.put(fieldName.toString(), term);
// We can only use the global index for equality, put in fake results
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = new TermRange(fieldName.toString(), term.getValue());
ctx.lastProcessedTerm = fieldName.toString();
termCardinalities.put(fieldName.toString(), 0L);
}
return null;
}
@Override
public Object visit(ASTNullLiteral node, Object data) {
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = new TermRange("null", "null");
ctx.lastProcessedTerm = "null";
termCardinalities.put("null", 0L);
}
return new LiteralResult(node.image);
}
@Override
public Object visit(ASTTrueNode node, Object data) {
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = new TermRange("true", "true");
ctx.lastProcessedTerm = "true";
termCardinalities.put("true", 0L);
}
return new LiteralResult(node.image);
}
@Override
public Object visit(ASTFalseNode node, Object data) {
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = new TermRange("false", "false");
ctx.lastProcessedTerm = "false";
termCardinalities.put("false", 0L);
}
return new LiteralResult(node.image);
}
@Override
public Object visit(ASTFunctionNode node, Object data) {
// objectNode 0 is the prefix
// objectNode 1 is the identifier , the others are parameters.
// process the remaining arguments
FunctionResult fr = new FunctionResult();
int argc = node.jjtGetNumChildren() - 2;
for (int i = 0; i < argc; i++) {
// Process both sides of this node.
Object result = node.jjtGetChild(i + 2).jjtAccept(this, data);
if (result instanceof TermResult) {
TermResult tr = (TermResult) result;
fr.getTerms().add(tr);
termsCopy.put((String) tr.value, null);
}
}
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
ctx.lastRange = new TermRange(node.jjtGetChild(0).image, node.jjtGetChild(1).image);
ctx.lastProcessedTerm = node.jjtGetChild(0).image;
termCardinalities.put(node.jjtGetChild(0).image, 0L);
}
return fr;
}
}
| 6,221 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/QueryParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.parser;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.collections.map.LRUMap;
import org.apache.commons.jexl2.parser.ASTAdditiveNode;
import org.apache.commons.jexl2.parser.ASTAdditiveOperator;
import org.apache.commons.jexl2.parser.ASTAmbiguous;
import org.apache.commons.jexl2.parser.ASTAndNode;
import org.apache.commons.jexl2.parser.ASTArrayAccess;
import org.apache.commons.jexl2.parser.ASTArrayLiteral;
import org.apache.commons.jexl2.parser.ASTAssignment;
import org.apache.commons.jexl2.parser.ASTBitwiseAndNode;
import org.apache.commons.jexl2.parser.ASTBitwiseComplNode;
import org.apache.commons.jexl2.parser.ASTBitwiseOrNode;
import org.apache.commons.jexl2.parser.ASTBitwiseXorNode;
import org.apache.commons.jexl2.parser.ASTBlock;
import org.apache.commons.jexl2.parser.ASTConstructorNode;
import org.apache.commons.jexl2.parser.ASTDivNode;
import org.apache.commons.jexl2.parser.ASTEQNode;
import org.apache.commons.jexl2.parser.ASTERNode;
import org.apache.commons.jexl2.parser.ASTEmptyFunction;
import org.apache.commons.jexl2.parser.ASTFalseNode;
import org.apache.commons.jexl2.parser.ASTFloatLiteral;
import org.apache.commons.jexl2.parser.ASTForeachStatement;
import org.apache.commons.jexl2.parser.ASTFunctionNode;
import org.apache.commons.jexl2.parser.ASTGENode;
import org.apache.commons.jexl2.parser.ASTGTNode;
import org.apache.commons.jexl2.parser.ASTIdentifier;
import org.apache.commons.jexl2.parser.ASTIfStatement;
import org.apache.commons.jexl2.parser.ASTIntegerLiteral;
import org.apache.commons.jexl2.parser.ASTJexlScript;
import org.apache.commons.jexl2.parser.ASTLENode;
import org.apache.commons.jexl2.parser.ASTLTNode;
import org.apache.commons.jexl2.parser.ASTMapEntry;
import org.apache.commons.jexl2.parser.ASTMapLiteral;
import org.apache.commons.jexl2.parser.ASTMethodNode;
import org.apache.commons.jexl2.parser.ASTModNode;
import org.apache.commons.jexl2.parser.ASTMulNode;
import org.apache.commons.jexl2.parser.ASTNENode;
import org.apache.commons.jexl2.parser.ASTNRNode;
import org.apache.commons.jexl2.parser.ASTNotNode;
import org.apache.commons.jexl2.parser.ASTNullLiteral;
import org.apache.commons.jexl2.parser.ASTOrNode;
import org.apache.commons.jexl2.parser.ASTReference;
import org.apache.commons.jexl2.parser.ASTSizeFunction;
import org.apache.commons.jexl2.parser.ASTSizeMethod;
import org.apache.commons.jexl2.parser.ASTStringLiteral;
import org.apache.commons.jexl2.parser.ASTTernaryNode;
import org.apache.commons.jexl2.parser.ASTTrueNode;
import org.apache.commons.jexl2.parser.ASTUnaryMinusNode;
import org.apache.commons.jexl2.parser.ASTWhileStatement;
import org.apache.commons.jexl2.parser.ParseException;
import org.apache.commons.jexl2.parser.Parser;
import org.apache.commons.jexl2.parser.ParserVisitor;
import org.apache.commons.jexl2.parser.SimpleNode;
import org.apache.hadoop.util.hash.Hash;
import org.apache.hadoop.util.hash.MurmurHash;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
/**
* Parses the query for the purposes of extracting terms, operators, and literals for query optimization. This class does not necessarily understand how to
* parse all of the possible combinations of the JEXL syntax, but that does not mean that the query will not evaluate against the event objects. It means that
* the unsupported operators will not be parsed and included in the optimization step.
*
*/
public class QueryParser implements ParserVisitor {
public static class QueryTerm {
private boolean negated = false;
private String operator = null;
private Object value = null;
public QueryTerm(boolean negated, String operator, Object value) {
super();
this.negated = negated;
this.operator = operator;
this.value = value;
}
public boolean isNegated() {
return negated;
}
public String getOperator() {
return operator;
}
public Object getValue() {
return value;
}
public void setNegated(boolean negated) {
this.negated = negated;
}
public void setOperator(String operator) {
this.operator = operator;
}
public void setValue(Object value) {
this.value = value;
}
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append("negated: ").append(negated).append(", operator: ").append(operator).append(", value: ").append(value);
return buf.toString();
}
}
/**
* Holder object
*/
static class ObjectHolder {
Object object;
public Object getObject() {
return object;
}
public void setObject(Object object) {
this.object = object;
}
}
static class FunctionResult {
private List<TermResult> terms = new ArrayList<TermResult>();
public List<TermResult> getTerms() {
return terms;
}
}
/**
* Holder object for a term (i.e. field name)
*/
static class TermResult {
Object value;
public TermResult(Object value) {
this.value = value;
}
}
/**
* Holder object for a literal (integer, float, string, or null literal) value
*/
static class LiteralResult {
Object value;
public LiteralResult(Object value) {
this.value = value;
}
}
/**
* Object used to store context information as the AST is being iterated over.
*/
static class EvaluationContext {
boolean inOrContext = false;
boolean inNotContext = false;
boolean inAndContext = false;
}
/**
* Object to store information from previously parsed queries.
*/
private static class CacheEntry {
private Set<String> negatedTerms = null;
private Set<String> andTerms = null;
private Set<String> orTerms = null;
private Set<Object> literals = null;
private Multimap<String,QueryTerm> terms = null;
private ASTJexlScript rootNode = null;
private TreeNode tree = null;
public CacheEntry(Set<String> negatedTerms, Set<String> andTerms, Set<String> orTerms, Set<Object> literals, Multimap<String,QueryTerm> terms,
ASTJexlScript rootNode, TreeNode tree) {
super();
this.negatedTerms = negatedTerms;
this.andTerms = andTerms;
this.orTerms = orTerms;
this.literals = literals;
this.terms = terms;
this.rootNode = rootNode;
this.tree = tree;
}
public Set<String> getNegatedTerms() {
return negatedTerms;
}
public Set<String> getAndTerms() {
return andTerms;
}
public Set<String> getOrTerms() {
return orTerms;
}
public Set<Object> getLiterals() {
return literals;
}
public Multimap<String,QueryTerm> getTerms() {
return terms;
}
public ASTJexlScript getRootNode() {
return rootNode;
}
public TreeNode getTree() {
return tree;
}
}
private static final int SEED = 650567;
private static LRUMap cache = new LRUMap();
protected Set<String> negatedTerms = new HashSet<String>();
private Set<String> andTerms = new HashSet<String>();
private Set<String> orTerms = new HashSet<String>();
/**
* List of String, Integer, Float, etc literals that were passed in the query
*/
private Set<Object> literals = new HashSet<Object>();
/**
* Map of terms (field names) to QueryTerm objects.
*/
private Multimap<String,QueryTerm> terms = HashMultimap.create();
private ASTJexlScript rootNode = null;
private TreeNode tree = null;
private int hashVal = 0;
public QueryParser() {}
private void reset() {
this.negatedTerms.clear();
this.andTerms.clear();
this.orTerms.clear();
this.literals.clear();
this.terms = HashMultimap.create();
}
public void execute(String query) throws ParseException {
reset();
query = query.replaceAll("\\s+AND\\s+", " and ");
query = query.replaceAll("\\s+OR\\s+", " or ");
query = query.replaceAll("\\s+NOT\\s+", " not ");
// Check to see if its in the cache
Hash hash = MurmurHash.getInstance();
this.hashVal = hash.hash(query.getBytes(), SEED);
CacheEntry entry = null;
synchronized (cache) {
entry = (CacheEntry) cache.get(hashVal);
}
if (entry != null) {
this.negatedTerms = entry.getNegatedTerms();
this.andTerms = entry.getAndTerms();
this.orTerms = entry.getOrTerms();
this.literals = entry.getLiterals();
this.terms = entry.getTerms();
this.rootNode = entry.getRootNode();
this.tree = entry.getTree();
} else {
Parser p = new Parser(new StringReader(";"));
rootNode = p.parse(new StringReader(query), null);
rootNode.childrenAccept(this, null);
TreeBuilder builder = new TreeBuilder(rootNode);
tree = builder.getRootNode();
entry = new CacheEntry(this.negatedTerms, this.andTerms, this.orTerms, this.literals, this.terms, rootNode, tree);
synchronized (cache) {
cache.put(hashVal, entry);
}
}
}
/**
*
* @return this queries hash value
*/
public int getHashValue() {
return this.hashVal;
}
public TreeNode getIteratorTree() {
return this.tree;
}
/**
*
* @return JEXL abstract syntax tree
*/
public ASTJexlScript getAST() {
return this.rootNode;
}
/**
*
* @return Set of field names to use in the optimizer for nots. As a general rule none of these terms should be used to find an event and should they should
* be evaluated on each event after being found.
*/
public Set<String> getNegatedTermsForOptimizer() {
return negatedTerms;
}
/**
*
* @return Set of field names to use in the optimizer for ands. As a general rule any one term of an and clause can be used to find associated events.
*/
public Set<String> getAndTermsForOptimizer() {
return andTerms;
}
/**
*
* @return Set of field names to use in the optimizer for ors. As a general rule any terms that are part of an or clause need to be searched to find the
* associated events.
*/
public Set<String> getOrTermsForOptimizer() {
return orTerms;
}
/**
*
* @return String, Integer, and Float literals used in the query.
*/
public Set<Object> getQueryLiterals() {
return literals;
}
/**
*
* @return Set of all identifiers (field names) in the query.
*/
public Set<String> getQueryIdentifiers() {
return terms.keySet();
}
/**
*
* @return map of term (field name) to QueryTerm object
*/
public Multimap<String,QueryTerm> getQueryTerms() {
return terms;
}
public Object visit(SimpleNode node, Object data) {
return null;
}
public Object visit(ASTJexlScript node, Object data) {
return null;
}
public Object visit(ASTBlock node, Object data) {
return null;
}
public Object visit(ASTAmbiguous node, Object data) {
return null;
}
public Object visit(ASTIfStatement node, Object data) {
return null;
}
public Object visit(ASTWhileStatement node, Object data) {
return null;
}
public Object visit(ASTForeachStatement node, Object data) {
return null;
}
public Object visit(ASTAssignment node, Object data) {
return null;
}
public Object visit(ASTTernaryNode node, Object data) {
return null;
}
public Object visit(ASTOrNode node, Object data) {
boolean previouslyInOrContext = false;
EvaluationContext ctx = null;
if (null != data && data instanceof EvaluationContext) {
ctx = (EvaluationContext) data;
previouslyInOrContext = ctx.inOrContext;
} else {
ctx = new EvaluationContext();
}
ctx.inOrContext = true;
// Process both sides of this node.
node.jjtGetChild(0).jjtAccept(this, ctx);
node.jjtGetChild(1).jjtAccept(this, ctx);
// reset the state
if (null != data && !previouslyInOrContext)
ctx.inOrContext = false;
return null;
}
public Object visit(ASTAndNode node, Object data) {
boolean previouslyInAndContext = false;
EvaluationContext ctx = null;
if (null != data && data instanceof EvaluationContext) {
ctx = (EvaluationContext) data;
previouslyInAndContext = ctx.inAndContext;
} else {
ctx = new EvaluationContext();
}
ctx.inAndContext = true;
// Process both sides of this node.
node.jjtGetChild(0).jjtAccept(this, ctx);
node.jjtGetChild(1).jjtAccept(this, ctx);
// reset the state
if (null != data && !previouslyInAndContext)
ctx.inAndContext = false;
return null;
}
public Object visit(ASTBitwiseOrNode node, Object data) {
return null;
}
public Object visit(ASTBitwiseXorNode node, Object data) {
return null;
}
public Object visit(ASTBitwiseAndNode node, Object data) {
return null;
}
public Object visit(ASTEQNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult)
return null;
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), value.getObject());
terms.put(fieldName.toString(), term);
return null;
}
public Object visit(ASTNENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult)
return null;
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = true;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
if (negated)
negatedTerms.add(fieldName.toString());
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), value.getObject());
terms.put(fieldName.toString(), term);
return null;
}
public Object visit(ASTLTNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult)
return null;
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), value.getObject());
terms.put(fieldName.toString(), term);
return null;
}
public Object visit(ASTGTNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult)
return null;
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), value.getObject());
terms.put(fieldName.toString(), term);
return null;
}
public Object visit(ASTLENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult)
return null;
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), value.getObject());
terms.put(fieldName.toString(), term);
return null;
}
public Object visit(ASTGENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult)
return null;
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), value.getObject());
terms.put(fieldName.toString(), term);
return null;
}
public Object visit(ASTERNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult)
return null;
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), value.getObject());
terms.put(fieldName.toString(), term);
return null;
}
public Object visit(ASTNRNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult)
return null;
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = true;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
if (negated)
negatedTerms.add(fieldName.toString());
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), value.getObject());
terms.put(fieldName.toString(), term);
return null;
}
public Object visit(ASTAdditiveNode node, Object data) {
return null;
}
public Object visit(ASTAdditiveOperator node, Object data) {
return null;
}
public Object visit(ASTMulNode node, Object data) {
return null;
}
public Object visit(ASTDivNode node, Object data) {
return null;
}
public Object visit(ASTModNode node, Object data) {
return null;
}
public Object visit(ASTUnaryMinusNode node, Object data) {
return null;
}
public Object visit(ASTBitwiseComplNode node, Object data) {
return null;
}
public Object visit(ASTNotNode node, Object data) {
boolean previouslyInNotContext = false;
EvaluationContext ctx = null;
if (null != data && data instanceof EvaluationContext) {
ctx = (EvaluationContext) data;
previouslyInNotContext = ctx.inNotContext;
} else {
ctx = new EvaluationContext();
}
ctx.inNotContext = true;
// Process both sides of this node.
node.jjtGetChild(0).jjtAccept(this, ctx);
// reset the state
if (null != data && !previouslyInNotContext)
ctx.inNotContext = false;
return null;
}
public Object visit(ASTIdentifier node, Object data) {
if (data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inAndContext)
andTerms.add(node.image);
if (ctx.inNotContext)
negatedTerms.add(node.image);
if (ctx.inOrContext)
orTerms.add(node.image);
}
return new TermResult(node.image);
}
public Object visit(ASTNullLiteral node, Object data) {
literals.add(node.image);
return new LiteralResult(node.image);
}
public Object visit(ASTTrueNode node, Object data) {
return new LiteralResult(node.image);
}
public Object visit(ASTFalseNode node, Object data) {
return new LiteralResult(node.image);
}
public Object visit(ASTIntegerLiteral node, Object data) {
literals.add(node.image);
return new LiteralResult(node.image);
}
public Object visit(ASTFloatLiteral node, Object data) {
literals.add(node.image);
return new LiteralResult(node.image);
}
public Object visit(ASTStringLiteral node, Object data) {
literals.add("'" + node.image + "'");
return new LiteralResult("'" + node.image + "'");
}
public Object visit(ASTArrayLiteral node, Object data) {
return null;
}
public Object visit(ASTMapLiteral node, Object data) {
return null;
}
public Object visit(ASTMapEntry node, Object data) {
return null;
}
public Object visit(ASTEmptyFunction node, Object data) {
return null;
}
public Object visit(ASTSizeFunction node, Object data) {
return null;
}
public Object visit(ASTFunctionNode node, Object data) {
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = true;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext)
negated = !negated;
}
// used to rebuild function call from the AST
StringBuilder buf = new StringBuilder();
String sep = "";
// objectNode 0 is the prefix
buf.append(node.jjtGetChild(0).image).append(":");
// objectNode 1 is the identifier , the others are parameters.
buf.append(node.jjtGetChild(1).image).append("(");
// process the remaining arguments
FunctionResult fr = new FunctionResult();
int argc = node.jjtGetNumChildren() - 2;
for (int i = 0; i < argc; i++) {
// Process both sides of this node.
Object result = node.jjtGetChild(i + 2).jjtAccept(this, data);
if (result instanceof TermResult) {
TermResult tr = (TermResult) result;
fr.getTerms().add(tr);
buf.append(sep).append(tr.value);
sep = ", ";
} else {
buf.append(sep).append(node.jjtGetChild(i + 2).image);
sep = ", ";
}
}
buf.append(")");
// Capture the entire function call for each function parameter
for (TermResult tr : fr.terms)
terms.put((String) tr.value, new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()), buf.toString()));
return fr;
}
public Object visit(ASTMethodNode node, Object data) {
return null;
}
public Object visit(ASTSizeMethod node, Object data) {
return null;
}
public Object visit(ASTConstructorNode node, Object data) {
return null;
}
public Object visit(ASTArrayAccess node, Object data) {
return null;
}
public Object visit(ASTReference node, Object data) {
return node.jjtGetChild(0).jjtAccept(this, data);
}
protected void decodeResults(Object left, Object right, StringBuilder fieldName, ObjectHolder holder) {
if (left instanceof TermResult) {
TermResult tr = (TermResult) left;
fieldName.append((String) tr.value);
// Then the right has to be the value
if (right instanceof LiteralResult) {
holder.setObject(((LiteralResult) right).value);
} else {
throw new IllegalArgumentException("Object mismatch");
}
} else if (right instanceof TermResult) {
TermResult tr = (TermResult) right;
fieldName.append((String) tr.value);
if (left instanceof LiteralResult) {
holder.setObject(((LiteralResult) left).value);
} else {
throw new IllegalArgumentException("Object mismatch");
}
} else {
throw new IllegalArgumentException("No Term specified in query");
}
}
}
| 6,222 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/EventFields.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.parser;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.wikisearch.parser.EventFields.FieldValue;
import com.esotericsoftware.kryo.CustomSerialization;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.serialize.ArraySerializer;
import com.esotericsoftware.kryo.serialize.IntSerializer;
import com.esotericsoftware.kryo.serialize.StringSerializer;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multiset;
import com.google.common.collect.SetMultimap;
/**
* Object used to hold the fields in an event. This is a multimap because fields can be repeated.
*/
public class EventFields implements SetMultimap<String,FieldValue>, CustomSerialization {
private static boolean kryoInitialized = false;
private static ArraySerializer valueSerializer = null;
private Multimap<String,FieldValue> map = null;
public static class FieldValue {
ColumnVisibility visibility;
byte[] value;
public FieldValue(ColumnVisibility visibility, byte[] value) {
super();
this.visibility = visibility;
this.value = value;
}
public ColumnVisibility getVisibility() {
return visibility;
}
public byte[] getValue() {
return value;
}
public void setVisibility(ColumnVisibility visibility) {
this.visibility = visibility;
}
public void setValue(byte[] value) {
this.value = value;
}
public int size() {
return visibility.flatten().length + value.length;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
if (null != visibility)
buf.append(" visibility: ").append(new String(visibility.flatten()));
if (null != value)
buf.append(" value size: ").append(value.length);
if (null != value)
buf.append(" value: ").append(new String(value));
return buf.toString();
}
}
public EventFields() {
map = HashMultimap.create();
}
public int size() {
return map.size();
}
public boolean isEmpty() {
return map.isEmpty();
}
public boolean containsKey(Object key) {
return map.containsKey(key);
}
public boolean containsValue(Object value) {
return map.containsValue(value);
}
public boolean containsEntry(Object key, Object value) {
return map.containsEntry(key, value);
}
public boolean put(String key, FieldValue value) {
return map.put(key, value);
}
public boolean remove(Object key, Object value) {
return map.remove(key, value);
}
public boolean putAll(String key, Iterable<? extends FieldValue> values) {
return map.putAll(key, values);
}
public boolean putAll(Multimap<? extends String,? extends FieldValue> multimap) {
return map.putAll(multimap);
}
public void clear() {
map.clear();
}
public Set<String> keySet() {
return map.keySet();
}
public Multiset<String> keys() {
return map.keys();
}
public Collection<FieldValue> values() {
return map.values();
}
public Set<FieldValue> get(String key) {
return (Set<FieldValue>) map.get(key);
}
public Set<FieldValue> removeAll(Object key) {
return (Set<FieldValue>) map.removeAll(key);
}
public Set<FieldValue> replaceValues(String key, Iterable<? extends FieldValue> values) {
return (Set<FieldValue>) map.replaceValues(key, values);
}
public Set<Entry<String,FieldValue>> entries() {
return (Set<Entry<String,FieldValue>>) map.entries();
}
public Map<String,Collection<FieldValue>> asMap() {
return map.asMap();
}
public int getByteSize() {
int count = 0;
for (Entry<String,FieldValue> e : map.entries()) {
count += e.getKey().getBytes().length + e.getValue().size();
}
return count;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
for (Entry<String,FieldValue> entry : map.entries()) {
buf.append("\tkey: ").append(entry.getKey()).append(" -> ").append(entry.getValue().toString()).append("\n");
}
return buf.toString();
}
public static synchronized void initializeKryo(Kryo kryo) {
if (kryoInitialized)
return;
valueSerializer = new ArraySerializer(kryo);
valueSerializer.setDimensionCount(1);
valueSerializer.setElementsAreSameType(true);
valueSerializer.setCanBeNull(false);
valueSerializer.setElementsCanBeNull(false);
kryo.register(byte[].class, valueSerializer);
kryoInitialized = true;
}
public void readObjectData(Kryo kryo, ByteBuffer buf) {
if (!kryoInitialized)
EventFields.initializeKryo(kryo);
// Read in the number of map entries
int entries = IntSerializer.get(buf, true);
for (int i = 0; i < entries; i++) {
// Read in the key
String key = StringSerializer.get(buf);
// Read in the fields in the value
ColumnVisibility vis = new ColumnVisibility(valueSerializer.readObjectData(buf, byte[].class));
byte[] value = valueSerializer.readObjectData(buf, byte[].class);
map.put(key, new FieldValue(vis, value));
}
}
public void writeObjectData(Kryo kryo, ByteBuffer buf) {
if (!kryoInitialized)
EventFields.initializeKryo(kryo);
// Write out the number of entries;
IntSerializer.put(buf, map.size(), true);
for (Entry<String,FieldValue> entry : map.entries()) {
// Write the key
StringSerializer.put(buf, entry.getKey());
// Write the fields in the value
valueSerializer.writeObjectData(buf, entry.getValue().getVisibility().flatten());
valueSerializer.writeObjectData(buf, entry.getValue().getValue());
}
}
}
| 6,223 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/TreeBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.parser;
import java.io.StringReader;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.EvaluationContext;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.FunctionResult;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.LiteralResult;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.ObjectHolder;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.QueryTerm;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.TermResult;
import org.apache.commons.jexl2.parser.ASTAdditiveNode;
import org.apache.commons.jexl2.parser.ASTAdditiveOperator;
import org.apache.commons.jexl2.parser.ASTAmbiguous;
import org.apache.commons.jexl2.parser.ASTAndNode;
import org.apache.commons.jexl2.parser.ASTArrayAccess;
import org.apache.commons.jexl2.parser.ASTArrayLiteral;
import org.apache.commons.jexl2.parser.ASTAssignment;
import org.apache.commons.jexl2.parser.ASTBitwiseAndNode;
import org.apache.commons.jexl2.parser.ASTBitwiseComplNode;
import org.apache.commons.jexl2.parser.ASTBitwiseOrNode;
import org.apache.commons.jexl2.parser.ASTBitwiseXorNode;
import org.apache.commons.jexl2.parser.ASTBlock;
import org.apache.commons.jexl2.parser.ASTConstructorNode;
import org.apache.commons.jexl2.parser.ASTDivNode;
import org.apache.commons.jexl2.parser.ASTEQNode;
import org.apache.commons.jexl2.parser.ASTERNode;
import org.apache.commons.jexl2.parser.ASTEmptyFunction;
import org.apache.commons.jexl2.parser.ASTFalseNode;
import org.apache.commons.jexl2.parser.ASTFloatLiteral;
import org.apache.commons.jexl2.parser.ASTForeachStatement;
import org.apache.commons.jexl2.parser.ASTFunctionNode;
import org.apache.commons.jexl2.parser.ASTGENode;
import org.apache.commons.jexl2.parser.ASTGTNode;
import org.apache.commons.jexl2.parser.ASTIdentifier;
import org.apache.commons.jexl2.parser.ASTIfStatement;
import org.apache.commons.jexl2.parser.ASTIntegerLiteral;
import org.apache.commons.jexl2.parser.ASTJexlScript;
import org.apache.commons.jexl2.parser.ASTLENode;
import org.apache.commons.jexl2.parser.ASTLTNode;
import org.apache.commons.jexl2.parser.ASTMapEntry;
import org.apache.commons.jexl2.parser.ASTMapLiteral;
import org.apache.commons.jexl2.parser.ASTMethodNode;
import org.apache.commons.jexl2.parser.ASTModNode;
import org.apache.commons.jexl2.parser.ASTMulNode;
import org.apache.commons.jexl2.parser.ASTNENode;
import org.apache.commons.jexl2.parser.ASTNRNode;
import org.apache.commons.jexl2.parser.ASTNotNode;
import org.apache.commons.jexl2.parser.ASTNullLiteral;
import org.apache.commons.jexl2.parser.ASTOrNode;
import org.apache.commons.jexl2.parser.ASTReference;
import org.apache.commons.jexl2.parser.ASTSizeFunction;
import org.apache.commons.jexl2.parser.ASTSizeMethod;
import org.apache.commons.jexl2.parser.ASTStringLiteral;
import org.apache.commons.jexl2.parser.ASTTernaryNode;
import org.apache.commons.jexl2.parser.ASTTrueNode;
import org.apache.commons.jexl2.parser.ASTUnaryMinusNode;
import org.apache.commons.jexl2.parser.ASTWhileStatement;
import org.apache.commons.jexl2.parser.JexlNode;
import org.apache.commons.jexl2.parser.ParseException;
import org.apache.commons.jexl2.parser.Parser;
import org.apache.commons.jexl2.parser.ParserVisitor;
import org.apache.commons.jexl2.parser.SimpleNode;
import com.google.common.collect.Multimap;
/**
* Class that parses the query and returns a tree of TreeNode's. This class rolls up clauses that
* are below like conjunctions (AND, OR) for the purposes of creating intersecting iterators.
*
*/
public class TreeBuilder implements ParserVisitor {
class RootNode extends JexlNode {
public RootNode(int id) {
super(id);
}
public RootNode(Parser p, int id) {
super(p, id);
}
}
private TreeNode rootNode = null;
private TreeNode currentNode = null;
private boolean currentlyInCheckChildren = false;
public TreeBuilder(String query) throws ParseException {
Parser p = new Parser(new StringReader(";"));
ASTJexlScript script = p.parse(new StringReader(query), null);
// Check to see if the child node is an AND or OR. If not, then
// there must be just a single value in the query expression
rootNode = new TreeNode();
rootNode.setType(RootNode.class);
currentNode = rootNode;
EvaluationContext ctx = new EvaluationContext();
script.childrenAccept(this, ctx);
}
public TreeBuilder(ASTJexlScript script) {
// Check to see if the child node is an AND or OR. If not, then
// there must be just a single value in the query expression
rootNode = new TreeNode();
rootNode.setType(RootNode.class);
currentNode = rootNode;
EvaluationContext ctx = new EvaluationContext();
script.childrenAccept(this, ctx);
}
public TreeNode getRootNode() {
return this.rootNode;
}
@Override
public Object visit(SimpleNode node, Object data) {
return null;
}
@Override
public Object visit(ASTJexlScript node, Object data) {
return null;
}
@Override
public Object visit(ASTBlock node, Object data) {
return null;
}
@Override
public Object visit(ASTAmbiguous node, Object data) {
return null;
}
@Override
public Object visit(ASTIfStatement node, Object data) {
return null;
}
@Override
public Object visit(ASTWhileStatement node, Object data) {
return null;
}
@Override
public Object visit(ASTForeachStatement node, Object data) {
return null;
}
@Override
public Object visit(ASTAssignment node, Object data) {
return null;
}
@Override
public Object visit(ASTTernaryNode node, Object data) {
return null;
}
/**
* @return false if any of the nodes equals the fail class or contain a NOT in the subtree
*/
private boolean nodeCheck(JexlNode node, Class<?> failClass) {
if (node.getClass().equals(failClass) || node.getClass().equals(ASTNotNode.class)) {
return false;
} else {
for (int i = 0; i < node.jjtGetNumChildren(); i++) {
if (!nodeCheck(node.jjtGetChild(i), failClass)) {
return false;
}
}
}
return true;
}
/**
* Checks to see if all of the child nodes are of the same type (AND/OR) and if so then aggregates
* all of the child terms. If not returns null.
*
* @return Map of field names to query terms or null
*/
private Multimap<String,QueryTerm> checkChildren(JexlNode parent, EvaluationContext ctx) {
// If the current node is an AND, then make sure that there is no
// OR descendant node, and vice versa. If this is true, then we call
// roll up all of the descendent values.
this.currentlyInCheckChildren = true;
Multimap<String,QueryTerm> rolledUpTerms = null;
boolean result = false;
if (parent.getClass().equals(ASTOrNode.class)) {
for (int i = 0; i < parent.jjtGetNumChildren(); i++) {
result = nodeCheck(parent.jjtGetChild(i), ASTAndNode.class);
if (!result) {
break;
}
}
} else {
for (int i = 0; i < parent.jjtGetNumChildren(); i++) {
result = nodeCheck(parent.jjtGetChild(i), ASTOrNode.class);
if (!result) {
break;
}
}
}
if (result) {
// Set current node to a fake node and
// roll up the children from this node using the visitor pattern.
TreeNode rollupFakeNode = new TreeNode();
TreeNode previous = this.currentNode;
this.currentNode = rollupFakeNode;
// Run the visitor with the fake node.
parent.childrenAccept(this, ctx);
// Get the terms from the fake node
rolledUpTerms = this.currentNode.getTerms();
// Reset the current node pointer
this.currentNode = previous;
}
this.currentlyInCheckChildren = false;
return rolledUpTerms;
}
@Override
public Object visit(ASTOrNode node, Object data) {
boolean previouslyInOrContext = false;
EvaluationContext ctx = null;
if (null != data && data instanceof EvaluationContext) {
ctx = (EvaluationContext) data;
previouslyInOrContext = ctx.inOrContext;
} else {
ctx = new EvaluationContext();
}
ctx.inOrContext = true;
// Are we being called from the checkChildren method? If so, then we
// are rolling up terms. If not, then we need to call check children.
if (currentlyInCheckChildren) {
// Process both sides of this node.
node.jjtGetChild(0).jjtAccept(this, data);
node.jjtGetChild(1).jjtAccept(this, data);
} else {
// Create a new OR node under the current node.
TreeNode orNode = new TreeNode();
orNode.setType(ASTOrNode.class);
orNode.setParent(this.currentNode);
this.currentNode.getChildren().add(orNode);
Multimap<String,QueryTerm> terms = checkChildren(node, ctx);
if (terms == null) {
// Then there was no rollup, set the current node to the orNode
// and process the children. Be sure to set the current Node to
// the or node in between calls because we could be processing
// an AND node below and the current node will have been switched.
// Process both sides of this node.
currentNode = orNode;
node.jjtGetChild(0).jjtAccept(this, data);
currentNode = orNode;
node.jjtGetChild(1).jjtAccept(this, data);
} else {
// There was a rollup, don't process the children and set the terms
// on the or node.
orNode.setTerms(terms);
}
}
// reset the state
if (null != data && !previouslyInOrContext) {
ctx.inOrContext = false;
}
return null;
}
@Override
public Object visit(ASTAndNode node, Object data) {
boolean previouslyInAndContext = false;
EvaluationContext ctx = null;
if (null != data && data instanceof EvaluationContext) {
ctx = (EvaluationContext) data;
previouslyInAndContext = ctx.inAndContext;
} else {
ctx = new EvaluationContext();
}
ctx.inAndContext = true;
// Are we being called from the checkChildren method? If so, then we
// are rolling up terms. If not, then we need to call check children.
if (currentlyInCheckChildren) {
// Process both sides of this node.
node.jjtGetChild(0).jjtAccept(this, data);
node.jjtGetChild(1).jjtAccept(this, data);
} else {
// Create a new And node under the current node.
TreeNode andNode = new TreeNode();
andNode.setType(ASTAndNode.class);
andNode.setParent(this.currentNode);
this.currentNode.getChildren().add(andNode);
Multimap<String,QueryTerm> terms = checkChildren(node, ctx);
if (terms == null) {
// Then there was no rollup, set the current node to the orNode
// and process the children. Be sure to set the current Node to
// the and node in between calls because we could be processing
// an OR node below and the current node will have been switched.
// Process both sides of this node.
currentNode = andNode;
node.jjtGetChild(0).jjtAccept(this, data);
currentNode = andNode;
node.jjtGetChild(1).jjtAccept(this, data);
} else {
// There was a rollup, don't process the children and set the terms
// on the or node.
andNode.setTerms(terms);
}
}
if (null != data && !previouslyInAndContext) {
ctx.inAndContext = false;
}
return null;
}
@Override
public Object visit(ASTBitwiseOrNode node, Object data) {
return null;
}
@Override
public Object visit(ASTBitwiseXorNode node, Object data) {
return null;
}
@Override
public Object visit(ASTBitwiseAndNode node, Object data) {
return null;
}
@Override
public Object visit(ASTEQNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
this.currentNode.getTerms().put(fieldName.toString(), term);
return null;
}
@Override
public Object visit(ASTNENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = true;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
this.currentNode.getTerms().put(fieldName.toString(), term);
return null;
}
@Override
public Object visit(ASTLTNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
this.currentNode.getTerms().put(fieldName.toString(), term);
return null;
}
@Override
public Object visit(ASTGTNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
this.currentNode.getTerms().put(fieldName.toString(), term);
return null;
}
@Override
public Object visit(ASTLENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
this.currentNode.getTerms().put(fieldName.toString(), term);
return null;
}
@Override
public Object visit(ASTGENode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
this.currentNode.getTerms().put(fieldName.toString(), term);
return null;
}
@Override
public Object visit(ASTERNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = false;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, JexlOperatorConstants.getOperator(node.getClass()),
value.getObject());
this.currentNode.getTerms().put(fieldName.toString(), term);
return null;
}
@Override
public Object visit(ASTNRNode node, Object data) {
StringBuilder fieldName = new StringBuilder();
ObjectHolder value = new ObjectHolder();
// Process both sides of this node.
Object left = node.jjtGetChild(0).jjtAccept(this, data);
Object right = node.jjtGetChild(1).jjtAccept(this, data);
// Ignore functions in the query
if (left instanceof FunctionResult || right instanceof FunctionResult) {
return null;
}
decodeResults(left, right, fieldName, value);
// We need to check to see if we are in a NOT context. If so,
// then we need to reverse the negation.
boolean negated = true;
if (null != data && data instanceof EvaluationContext) {
EvaluationContext ctx = (EvaluationContext) data;
if (ctx.inNotContext) {
negated = !negated;
}
}
QueryTerm term = new QueryTerm(negated, "!~", value.getObject());
this.currentNode.getTerms().put(fieldName.toString(), term);
return null;
}
@Override
public Object visit(ASTAdditiveNode node, Object data) {
return null;
}
@Override
public Object visit(ASTAdditiveOperator node, Object data) {
return null;
}
@Override
public Object visit(ASTMulNode node, Object data) {
return null;
}
@Override
public Object visit(ASTDivNode node, Object data) {
return null;
}
@Override
public Object visit(ASTModNode node, Object data) {
return null;
}
@Override
public Object visit(ASTUnaryMinusNode node, Object data) {
return null;
}
@Override
public Object visit(ASTBitwiseComplNode node, Object data) {
return null;
}
@Override
public Object visit(ASTNotNode node, Object data) {
boolean previouslyInNotContext = false;
EvaluationContext ctx = null;
if (null != data && data instanceof EvaluationContext) {
ctx = (EvaluationContext) data;
previouslyInNotContext = ctx.inNotContext;
} else {
ctx = new EvaluationContext();
}
ctx.inNotContext = true;
// Create a new node in the tree to represent the NOT
// Create a new And node under the current node.
TreeNode notNode = new TreeNode();
notNode.setType(ASTNotNode.class);
notNode.setParent(this.currentNode);
this.currentNode.getChildren().add(notNode);
this.currentNode = notNode;
// Process both sides of this node.
node.jjtGetChild(0).jjtAccept(this, ctx);
// reset the state
if (null != data && !previouslyInNotContext) {
ctx.inNotContext = false;
}
return null;
}
@Override
public Object visit(ASTIdentifier node, Object data) {
return new TermResult(node.image);
}
@Override
public Object visit(ASTNullLiteral node, Object data) {
return new LiteralResult(node.image);
}
@Override
public Object visit(ASTTrueNode node, Object data) {
return new LiteralResult(node.image);
}
@Override
public Object visit(ASTFalseNode node, Object data) {
return new LiteralResult(node.image);
}
@Override
public Object visit(ASTIntegerLiteral node, Object data) {
return new LiteralResult(node.image);
}
@Override
public Object visit(ASTFloatLiteral node, Object data) {
return new LiteralResult(node.image);
}
@Override
public Object visit(ASTStringLiteral node, Object data) {
return new LiteralResult("'" + node.image + "'");
}
@Override
public Object visit(ASTArrayLiteral node, Object data) {
return null;
}
@Override
public Object visit(ASTMapLiteral node, Object data) {
return null;
}
@Override
public Object visit(ASTMapEntry node, Object data) {
return null;
}
@Override
public Object visit(ASTEmptyFunction node, Object data) {
return null;
}
@Override
public Object visit(ASTSizeFunction node, Object data) {
return null;
}
@Override
public Object visit(ASTFunctionNode node, Object data) {
// objectNode 0 is the prefix
// objectNode 1 is the identifier , the others are parameters.
// process the remaining arguments
FunctionResult fr = new FunctionResult();
int argc = node.jjtGetNumChildren() - 2;
for (int i = 0; i < argc; i++) {
// Process both sides of this node.
Object result = node.jjtGetChild(i + 2).jjtAccept(this, data);
if (result instanceof TermResult) {
TermResult tr = (TermResult) result;
fr.getTerms().add(tr);
}
}
return fr;
}
@Override
public Object visit(ASTMethodNode node, Object data) {
return null;
}
@Override
public Object visit(ASTSizeMethod node, Object data) {
return null;
}
@Override
public Object visit(ASTConstructorNode node, Object data) {
return null;
}
@Override
public Object visit(ASTArrayAccess node, Object data) {
return null;
}
@Override
public Object visit(ASTReference node, Object data) {
return node.jjtGetChild(0).jjtAccept(this, data);
}
private void decodeResults(Object left, Object right, StringBuilder fieldName,
ObjectHolder holder) {
if (left instanceof TermResult) {
TermResult tr = (TermResult) left;
fieldName.append((String) tr.value);
// Then the right has to be the value
if (right instanceof LiteralResult) {
holder.setObject(((LiteralResult) right).value);
} else {
throw new IllegalArgumentException("Object mismatch");
}
} else if (right instanceof TermResult) {
TermResult tr = (TermResult) right;
fieldName.append((String) tr.value);
if (left instanceof LiteralResult) {
holder.setObject(((LiteralResult) left).value);
} else {
throw new IllegalArgumentException("Object mismatch");
}
} else {
throw new IllegalArgumentException("No Term specified in query");
}
}
}
| 6,224 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/JexlOperatorConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.parser;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.jexl2.parser.ASTAndNode;
import org.apache.commons.jexl2.parser.ASTEQNode;
import org.apache.commons.jexl2.parser.ASTERNode;
import org.apache.commons.jexl2.parser.ASTFunctionNode;
import org.apache.commons.jexl2.parser.ASTGENode;
import org.apache.commons.jexl2.parser.ASTGTNode;
import org.apache.commons.jexl2.parser.ASTLENode;
import org.apache.commons.jexl2.parser.ASTLTNode;
import org.apache.commons.jexl2.parser.ASTNENode;
import org.apache.commons.jexl2.parser.ASTNRNode;
import org.apache.commons.jexl2.parser.ASTOrNode;
import org.apache.commons.jexl2.parser.JexlNode;
import org.apache.commons.jexl2.parser.ParserTreeConstants;
public class JexlOperatorConstants implements ParserTreeConstants {
private static Map<Class<? extends JexlNode>,String> operatorMap = new ConcurrentHashMap<Class<? extends JexlNode>,String>();
private static Map<String,Class<? extends JexlNode>> classMap = new ConcurrentHashMap<String,Class<? extends JexlNode>>();
private static Map<Integer,String> jjtOperatorMap = new ConcurrentHashMap<Integer,String>();
private static Map<String,Integer> jjtTypeMap = new ConcurrentHashMap<String,Integer>();
static {
operatorMap.put(ASTEQNode.class, "==");
operatorMap.put(ASTNENode.class, "!=");
operatorMap.put(ASTLTNode.class, "<");
operatorMap.put(ASTLENode.class, "<=");
operatorMap.put(ASTGTNode.class, ">");
operatorMap.put(ASTGENode.class, ">=");
operatorMap.put(ASTERNode.class, "=~");
operatorMap.put(ASTNRNode.class, "!~");
operatorMap.put(ASTFunctionNode.class, "f");
operatorMap.put(ASTAndNode.class, "and");
operatorMap.put(ASTOrNode.class, "or");
classMap.put("==", ASTEQNode.class);
classMap.put("!=", ASTNENode.class);
classMap.put("<", ASTLTNode.class);
classMap.put("<=", ASTLENode.class);
classMap.put(">", ASTGTNode.class);
classMap.put(">=", ASTGENode.class);
classMap.put("=~", ASTERNode.class);
classMap.put("!~", ASTNRNode.class);
classMap.put("f", ASTFunctionNode.class);
jjtOperatorMap.put(JJTEQNODE, "==");
jjtOperatorMap.put(JJTNENODE, "!=");
jjtOperatorMap.put(JJTLTNODE, "<");
jjtOperatorMap.put(JJTLENODE, "<=");
jjtOperatorMap.put(JJTGTNODE, ">");
jjtOperatorMap.put(JJTGENODE, ">=");
jjtOperatorMap.put(JJTERNODE, "=~");
jjtOperatorMap.put(JJTNRNODE, "!~");
jjtOperatorMap.put(JJTFUNCTIONNODE, "f");
jjtOperatorMap.put(JJTANDNODE, "and");
jjtOperatorMap.put(JJTORNODE, "or");
jjtTypeMap.put("==", JJTEQNODE);
jjtTypeMap.put("!=", JJTNENODE);
jjtTypeMap.put("<", JJTLTNODE);
jjtTypeMap.put("<=", JJTLENODE);
jjtTypeMap.put(">", JJTGTNODE);
jjtTypeMap.put(">=", JJTGENODE);
jjtTypeMap.put("=~", JJTERNODE);
jjtTypeMap.put("!~", JJTNRNODE);
jjtTypeMap.put("f", JJTFUNCTIONNODE);
}
public static String getOperator(Class<? extends JexlNode> nodeType) {
return operatorMap.get(nodeType);
}
public static String getOperator(Integer jjtNode) {
return jjtOperatorMap.get(jjtNode);
}
public static Class<? extends JexlNode> getClass(String operator) {
return classMap.get(operator);
}
public static int getJJTNodeType(String operator) {
return jjtTypeMap.get(operator);
}
}
| 6,225 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/jexl/Arithmetic.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.jexl;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.jexl2.JexlArithmetic;
import org.apache.commons.lang.math.NumberUtils;
public class Arithmetic extends JexlArithmetic {
public Arithmetic(boolean lenient) {
super(lenient);
}
/**
* This method differs from the parent in that we are not calling String.matches() because it does not match on a newline. Instead we are handling this case.
*
* @param left
* first value
* @param right
* second value
* @return test result.
*/
@Override
public boolean matches(Object left, Object right) {
if (left == null && right == null) {
// if both are null L == R
return true;
}
if (left == null || right == null) {
// we know both aren't null, therefore L != R
return false;
}
final String arg = left.toString();
if (right instanceof java.util.regex.Pattern) {
return ((java.util.regex.Pattern) right).matcher(arg).matches();
} else {
// return arg.matches(right.toString());
Pattern p = Pattern.compile(right.toString(), Pattern.DOTALL);
Matcher m = p.matcher(arg);
return m.matches();
}
}
/**
* This method differs from the parent class in that we are going to try and do a better job of coercing the types. As a last resort we will do a string
* comparison and try not to throw a NumberFormatException. The JexlArithmetic class performs coercion to a particular type if either the left or the right
* match a known type. We will look at the type of the right operator and try to make the left of the same type.
*/
@Override
public boolean equals(Object left, Object right) {
Object fixedLeft = fixLeft(left, right);
return super.equals(fixedLeft, right);
}
@Override
public boolean lessThan(Object left, Object right) {
Object fixedLeft = fixLeft(left, right);
return super.lessThan(fixedLeft, right);
}
protected Object fixLeft(Object left, Object right) {
if (null == left || null == right)
return left;
if (!(right instanceof Number) && left instanceof Number) {
right = NumberUtils.createNumber(right.toString());
}
if (right instanceof Number && left instanceof Number) {
if (right instanceof Double)
return ((Double) right).doubleValue();
else if (right instanceof Float)
return ((Float) right).floatValue();
else if (right instanceof Long)
return ((Long) right).longValue();
else if (right instanceof Integer)
return ((Integer) right).intValue();
else if (right instanceof Short)
return ((Short) right).shortValue();
else if (right instanceof Byte)
return ((Byte) right).byteValue();
else
return right;
}
if (right instanceof Number && left instanceof String) {
Number num = NumberUtils.createNumber(left.toString());
// Let's try to cast left as right's type.
if (this.isFloatingPointNumber(right) && this.isFloatingPointNumber(left))
return num;
else if (this.isFloatingPointNumber(right))
return num.doubleValue();
else if (right instanceof Number)
return num.longValue();
} else if (right instanceof Boolean && left instanceof String) {
if (left.equals("true") || left.equals("false"))
return Boolean.parseBoolean(left.toString());
Number num = NumberUtils.createNumber(left.toString());
if (num.intValue() == 1)
return (Boolean) true;
else if (num.intValue() == 0)
return (Boolean) false;
}
return left;
}
}
| 6,226 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/AndIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.util.TextUtil;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
public class AndIterator implements SortedKeyValueIterator<Key,Value> {
protected static final Logger log = Logger.getLogger(AndIterator.class);
private TermSource[] sources;
private int sourcesCount = 0;
protected Text nullText = new Text();
protected final byte[] emptyByteArray = new byte[0];
private Key topKey = null;
protected Value value = new Value(emptyByteArray);
private Range overallRange;
private Text currentRow = null;
private Text currentTerm = new Text(emptyByteArray);
private Text currentDocID = new Text(emptyByteArray);
private static boolean SEEK_INCLUSIVE = true;
private Text parentEndRow;
/**
* Used in representing a Term that is intersected on.
*/
protected static class TermSource {
public SortedKeyValueIterator<Key,Value> iter;
public Text dataLocation;
public Text term;
public boolean notFlag;
private Collection<ByteSequence> seekColumnFamilies;
private TermSource(TermSource other) {
this(other.iter, other.dataLocation, other.term, other.notFlag);
}
public TermSource(SortedKeyValueIterator<Key,Value> iter, Text dataLocation, Text term) {
this(iter, dataLocation, term, false);
}
public TermSource(SortedKeyValueIterator<Key,Value> iter, Text dataLocation, Text term,
boolean notFlag) {
this.iter = iter;
this.dataLocation = dataLocation;
ByteSequence bs = new ArrayByteSequence(dataLocation.getBytes(), 0, dataLocation.getLength());
this.seekColumnFamilies = Collections.singletonList(bs);
this.term = term;
this.notFlag = notFlag;
}
public String getTermString() {
return (this.term == null) ? new String("Iterator") : this.term.toString();
}
}
/*
* | Row | Column Family | Column Qualifier | Value | {RowID} | {dataLocation} |
* {term}\0{dataType}\0{UID} | Empty
*/
protected Text getPartition(Key key) {
return key.getRow();
}
/**
* Returns the given key's dataLocation
*
* @return The given key's dataLocation
*/
protected Text getDataLocation(Key key) {
return key.getColumnFamily();
}
/**
* Returns the given key's term
*
* @return The given key's term
*/
protected Text getTerm(Key key) {
int idx = 0;
String sKey = key.getColumnQualifier().toString();
idx = sKey.indexOf("\0");
return new Text(sKey.substring(0, idx));
}
/**
* Returns the given key's DocID
*
* @return The given key's DocID
*/
protected Text getDocID(Key key) {
int idx = 0;
String sKey = key.getColumnQualifier().toString();
idx = sKey.indexOf("\0");
return new Text(sKey.substring(idx + 1));
}
/**
* Returns the given key's UID
*
* @return The given key's UID
*/
protected String getUID(Key key) {
int idx = 0;
String sKey = key.getColumnQualifier().toString();
idx = sKey.indexOf("\0");
return sKey.substring(idx + 1);
}
/**
* Build a key from the given row and dataLocation
*
* @param row
* The desired row
* @param dataLocation
* The desired dataLocation
* @return A Key object built from the given row and dataLocation.
*/
protected Key buildKey(Text row, Text dataLocation) {
return new Key(row, (dataLocation == null) ? nullText : dataLocation);
}
/**
* Build a key from the given row, dataLocation, and term
*
* @param row
* The desired row
* @param dataLocation
* The desired dataLocation
* @param term
* The desired term
* @return A Key object built from the given row, dataLocation, and term.
*/
protected Key buildKey(Text row, Text dataLocation, Text term) {
return new Key(row, (dataLocation == null) ? nullText : dataLocation,
(term == null) ? nullText : term);
}
/**
* Return the key that directly follows the given key
*
* @param key
* The key who will be directly before the returned key
* @return The key directly following the given key.
*/
protected Key buildFollowingPartitionKey(Key key) {
return key.followingKey(PartialKey.ROW);
}
/**
* Empty default constructor
*/
public AndIterator() {}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new AndIterator(this, env);
}
public AndIterator(AndIterator other, IteratorEnvironment env) {
if (other.sources != null) {
sourcesCount = other.sourcesCount;
sources = new TermSource[sourcesCount];
for (int i = 0; i < sourcesCount; i++) {
sources[i] = new TermSource(other.sources[i].iter.deepCopy(env),
other.sources[i].dataLocation, other.sources[i].term);
}
}
}
@Override
public Key getTopKey() {
return topKey;
}
@Override
public Value getTopValue() {
return value;
}
@Override
public boolean hasTop() {
return currentRow != null;
}
/**
* Find the next key in the current TermSource that is at or beyond the cursor (currentRow,
* currentTerm, currentDocID).
*
* The index of the current source in <code>sources</code>
*
* @return True if the source advanced beyond the cursor
*/
private boolean seekOneSource(TermSource ts) throws IOException {
/*
* Within this loop progress must be made in one of the following forms: - currentRow,
* currentTerm, or curretDocID must be increased - the given source must advance its iterator
* This loop will end when any of the following criteria are met - the iterator for the given
* source is pointing to the key (currentRow, columnFamilies[sourceID], currentTerm,
* currentDocID) - the given source is out of data and currentRow is set to null - the given
* source has advanced beyond the endRow and currentRow is set to null
*/
// precondition: currentRow is not null
boolean advancedCursor = false;
while (true) {
if (ts.iter.hasTop() == false) {
if (log.isDebugEnabled()) {
log.debug("The current iterator no longer has a top");
}
// If we got to the end of an iterator, found a Match if it's a NOT
if (ts.notFlag) {
break;
}
currentRow = null;
// setting currentRow to null counts as advancing the cursor
return true;
}
// check if we're past the end key
int endCompare = -1;
if (log.isDebugEnabled()) {
log.debug("Current topKey = " + ts.iter.getTopKey());
}
// we should compare the row to the end of the range
if (overallRange.getEndKey() != null) {
if (log.isDebugEnabled()) {
log.debug("II.seekOneSource overallRange.getEndKey() != null");
}
endCompare = overallRange.getEndKey().getRow().compareTo(ts.iter.getTopKey().getRow());
if ((!overallRange.isEndKeyInclusive() && endCompare <= 0) || endCompare < 0) {
if (log.isDebugEnabled()) {
log.debug("II.seekOneSource at the end of the tablet server");
}
currentRow = null;
// setting currentRow to null counts as advancing the cursor
return true;
}
} else {
if (log.isDebugEnabled()) {
log.debug("II.seekOneSource overallRange.getEndKey() == null");
}
}
// Compare the Row IDs
int partitionCompare = currentRow.compareTo(getPartition(ts.iter.getTopKey()));
if (log.isDebugEnabled()) {
log.debug("Current partition: " + currentRow);
}
// check if this source is already at or beyond currentRow
// if not, then seek to at least the current row
if (partitionCompare > 0) {
if (log.isDebugEnabled()) {
log.debug("Need to seek to the current row");
// seek to at least the currentRow
log.debug("ts.dataLocation = " + ts.dataLocation.getBytes());
log.debug("Term = " + new Text(ts.term + "\0" + currentDocID).getBytes());
}
Key seekKey = buildKey(currentRow, ts.dataLocation, nullText);// new Text(ts.term + "\0" +
// currentDocID));
if (log.isDebugEnabled()) {
log.debug("Seeking to: " + seekKey);
}
ts.iter.seek(new Range(seekKey, true, null, false), ts.seekColumnFamilies, SEEK_INCLUSIVE);
continue;
}
// check if this source has gone beyond currentRow
// if so, advance currentRow
if (partitionCompare < 0) {
if (log.isDebugEnabled()) {
log.debug("Went too far beyond the currentRow");
}
if (ts.notFlag) {
break;
}
currentRow.set(getPartition(ts.iter.getTopKey()));
currentDocID.set(emptyByteArray);
advancedCursor = true;
continue;
}
// we have verified that the current source is positioned in currentRow
// now we must make sure we're in the right columnFamily in the current row
if (ts.dataLocation != null) {
int dataLocationCompare = ts.dataLocation.compareTo(getDataLocation(ts.iter.getTopKey()));
if (log.isDebugEnabled()) {
log.debug("Comparing dataLocations");
log.debug("dataLocation = " + ts.dataLocation);
log.debug("newDataLocation = " + getDataLocation(ts.iter.getTopKey()));
}
// check if this source is already on the right columnFamily
// if not, then seek forwards to the right columnFamily
if (dataLocationCompare > 0) {
if (log.isDebugEnabled()) {
log.debug("Need to seek to the right dataLocation");
}
Key seekKey = buildKey(currentRow, ts.dataLocation, nullText);// , new Text(ts.term + "\0"
// + currentDocID));
if (log.isDebugEnabled()) {
log.debug("Seeking to: " + seekKey);
}
ts.iter.seek(new Range(seekKey, true, null, false), ts.seekColumnFamilies,
SEEK_INCLUSIVE);
if (!ts.iter.hasTop()) {
currentRow = null;
return true;
}
continue;
}
// check if this source is beyond the right columnFamily
// if so, then seek to the next row
if (dataLocationCompare < 0) {
if (log.isDebugEnabled()) {
log.debug("Went too far beyond the dataLocation");
}
if (endCompare == 0) {
// we're done
currentRow = null;
// setting currentRow to null counts as advancing the cursor
return true;
}
// Seeking beyond the current dataLocation gives a valid negated result
if (ts.notFlag) {
break;
}
Key seekKey = buildFollowingPartitionKey(ts.iter.getTopKey());
if (log.isDebugEnabled()) {
log.debug("Seeking to: " + seekKey);
}
ts.iter.seek(new Range(seekKey, true, null, false), ts.seekColumnFamilies,
SEEK_INCLUSIVE);
if (!ts.iter.hasTop()) {
currentRow = null;
return true;
}
continue;
}
}
// Compare the Terms
int termCompare = ts.term.compareTo(getTerm(ts.iter.getTopKey()));
if (log.isDebugEnabled()) {
log.debug("term = " + ts.term);
log.debug("newTerm = " + getTerm(ts.iter.getTopKey()));
}
// We need to seek down farther into the data
if (termCompare > 0) {
if (log.isDebugEnabled()) {
log.debug("Need to seek to the right term");
}
Key seekKey = buildKey(currentRow, ts.dataLocation, new Text(ts.term + "\0"));// new
// Text(ts.term
// + "\0" +
// currentDocID));
if (log.isDebugEnabled()) {
log.debug("Seeking to: " + seekKey);
}
ts.iter.seek(new Range(seekKey, true, null, false), ts.seekColumnFamilies, SEEK_INCLUSIVE);
if (!ts.iter.hasTop()) {
currentRow = null;
return true;
}
// currentTerm = getTerm(ts.iter.getTopKey());
if (log.isDebugEnabled()) {
log.debug("topKey after seeking to correct term: " + ts.iter.getTopKey());
}
continue;
}
// We've jumped out of the current term, set the new term as currentTerm and start looking
// again
if (termCompare < 0) {
if (log.isDebugEnabled()) {
log.debug("TERM: Need to jump to the next row");
}
if (endCompare == 0) {
currentRow = null;
return true;
}
if (ts.notFlag) {
break;
}
Key seekKey = buildFollowingPartitionKey(ts.iter.getTopKey());
if (log.isDebugEnabled()) {
log.debug("Using this key to find the next key: " + ts.iter.getTopKey());
log.debug("Seeking to: " + seekKey);
}
ts.iter.seek(new Range(seekKey, true, null, false), ts.seekColumnFamilies, SEEK_INCLUSIVE);
if (!ts.iter.hasTop()) {
currentRow = null;
return true;
}
currentTerm = getTerm(ts.iter.getTopKey());
continue;
}
// Compare the DocIDs
Text docid = getDocID(ts.iter.getTopKey());
int docidCompare = currentDocID.compareTo(docid);
if (log.isDebugEnabled()) {
log.debug("Comparing DocIDs");
log.debug("currentDocID = " + currentDocID);
log.debug("docid = " + docid);
}
// The source isn't at the right DOC
if (docidCompare > 0) {
if (log.isDebugEnabled()) {
log.debug("Need to seek to the correct docid");
}
// seek forwards
Key seekKey =
buildKey(currentRow, ts.dataLocation, new Text(ts.term + "\0" + currentDocID));
if (log.isDebugEnabled()) {
log.debug("Seeking to: " + seekKey);
}
ts.iter.seek(new Range(seekKey, true, null, false), ts.seekColumnFamilies, SEEK_INCLUSIVE);
continue;
}
// if this source has advanced beyond the current column qualifier then advance currentCQ and
// return true
if (docidCompare < 0) {
if (ts.notFlag) {
break;
}
if (log.isDebugEnabled()) {
log.debug(
"We went too far, update the currentDocID to be the location of where were seek'ed to");
}
currentDocID.set(docid);
advancedCursor = true;
break;
}
// Set the term as currentTerm (in case we found this record on the first try)
currentTerm = getTerm(ts.iter.getTopKey());
if (log.isDebugEnabled()) {
log.debug("currentTerm = " + currentTerm);
}
// If we're negated, next() the first TermSource since we guaranteed it was not a NOT term
if (ts.notFlag) {
sources[0].iter.next();
advancedCursor = true;
}
// If we got here, we have a match
break;
}
return advancedCursor;
}
@Override
public void next() throws IOException {
if (log.isDebugEnabled()) {
log.debug("In ModifiedIntersectingIterator.next()");
}
if (currentRow == null) {
return;
}
// precondition: the current row is set up and the sources all have the same column qualifier
// while we don't have a match, seek in the source with the smallest column qualifier
sources[0].iter.next();
advanceToIntersection();
if (hasTop()) {
if (overallRange != null && !overallRange.contains(topKey)) {
topKey = null;
}
}
}
protected void advanceToIntersection() throws IOException {
if (log.isDebugEnabled()) {
log.debug("In AndIterator.advanceToIntersection()");
}
boolean cursorChanged = true;
while (cursorChanged) {
// seek all of the sources to at least the highest seen column qualifier in the current row
cursorChanged = false;
for (TermSource ts : sources) {
if (currentRow == null) {
topKey = null;
return;
}
if (seekOneSource(ts)) {
cursorChanged = true;
break;
}
}
}
topKey = buildKey(currentRow, currentTerm, currentDocID);
if (log.isDebugEnabled()) {
log.debug("ModifiedIntersectingIterator: Got a match: " + topKey);
}
}
public static String stringTopKey(SortedKeyValueIterator<Key,Value> iter) {
if (iter.hasTop()) {
return iter.getTopKey().toString();
}
return "";
}
public static final String columnFamiliesOptionName = "columnFamilies";
public static final String termValuesOptionName = "termValues";
public static final String notFlagsOptionName = "notFlags";
/**
* Encode a <code>Text</code> array of all the columns to intersect on
*
* @param columns
* The columns to be encoded
* @return A Base64 encoded string (using a \n delimiter) of all columns to intersect on.
*/
public static String encodeColumns(Text[] columns) {
StringBuilder sb = new StringBuilder();
for (Text column : columns) {
sb.append(new String(Base64.encodeBase64(TextUtil.getBytes(column))));
sb.append('\n');
}
return sb.toString();
}
/**
* Encode a <code>Text</code> array of all of the terms to intersect on. The terms should match
* the columns in a one-to-one manner
*
* @param terms
* The terms to be encoded
* @return A Base64 encoded string (using a \n delimiter) of all terms to intersect on.
*/
public static String encodeTermValues(Text[] terms) {
StringBuilder sb = new StringBuilder();
for (Text term : terms) {
sb.append(new String(Base64.encodeBase64(TextUtil.getBytes(term))));
sb.append('\n');
}
return sb.toString();
}
/**
* Encode an array of <code>booleans</code> denoted which columns are NOT'ed
*
* @param flags
* The array of NOTs
* @return A base64 encoded string of which columns are NOT'ed
*/
public static String encodeBooleans(boolean[] flags) {
byte[] bytes = new byte[flags.length];
for (int i = 0; i < flags.length; i++) {
if (flags[i]) {
bytes[i] = 1;
} else {
bytes[i] = 0;
}
}
return new String(Base64.encodeBase64(bytes));
}
/**
* Decode the encoded columns into a <code>Text</code> array
*
* @param columns
* The Base64 encoded String of the columns
* @return A Text array of the decoded columns
*/
public static Text[] decodeColumns(String columns) {
String[] columnStrings = columns.split("\n");
Text[] columnTexts = new Text[columnStrings.length];
for (int i = 0; i < columnStrings.length; i++) {
columnTexts[i] = new Text(Base64.decodeBase64(columnStrings[i].getBytes()));
}
return columnTexts;
}
/**
* Decode the encoded terms into a <code>Text</code> array
*
* @param terms
* The Base64 encoded String of the terms
* @return A Text array of decoded terms.
*/
public static Text[] decodeTermValues(String terms) {
String[] termStrings = terms.split("\n");
Text[] termTexts = new Text[termStrings.length];
for (int i = 0; i < termStrings.length; i++) {
termTexts[i] = new Text(Base64.decodeBase64(termStrings[i].getBytes()));
}
return termTexts;
}
/**
* Decode the encoded NOT flags into a <code>boolean</code> array
*
* @return A boolean array of decoded NOT flags
*/
public static boolean[] decodeBooleans(String flags) {
// return null of there were no flags
if (flags == null) {
return null;
}
byte[] bytes = Base64.decodeBase64(flags.getBytes());
boolean[] bFlags = new boolean[bytes.length];
for (int i = 0; i < bytes.length; i++) {
if (bytes[i] == 1) {
bFlags[i] = true;
} else {
bFlags[i] = false;
}
}
return bFlags;
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
if (log.isDebugEnabled()) {
log.debug("In AndIterator.init()");
}
Text[] dataLocations = decodeColumns(options.get(columnFamiliesOptionName));
Text[] terms = decodeTermValues(options.get(termValuesOptionName));
boolean[] notFlags = decodeBooleans(options.get(notFlagsOptionName));
if (terms.length < 2) {
throw new IllegalArgumentException("AndIterator requires two or more columns families");
}
// Scan the not flags.
// There must be at least one term that isn't negated
// And we are going to re-order such that the first term is not a ! term
if (notFlags == null) {
notFlags = new boolean[terms.length];
for (int i = 0; i < terms.length; i++) {
notFlags[i] = false;
}
}
// Make sure that the first dataLocation/Term is not a NOT by swapping it with a later
// dataLocation/Term
if (notFlags[0]) {
for (int i = 1; i < notFlags.length; i++) {
if (notFlags[i] == false) {
// Swap the terms
Text swap = new Text(terms[0]);
terms[0].set(terms[i]);
terms[i].set(swap);
// Swap the dataLocations
swap.set(dataLocations[0]);
dataLocations[0].set(dataLocations[i]);
dataLocations[i].set(swap);
// Flip the notFlags
notFlags[0] = false;
notFlags[i] = true;
break;
}
}
if (notFlags[0]) {
throw new IllegalArgumentException(
"AndIterator requires at least one column family without not");
}
}
// Build up the array of sources that are to be intersected
sources = new TermSource[dataLocations.length];
for (int i = 0; i < dataLocations.length; i++) {
sources[i] = new TermSource(source.deepCopy(env), dataLocations[i], terms[i], notFlags[i]);
}
sourcesCount = dataLocations.length;
}
@Override
public void seek(Range range, Collection<ByteSequence> seekColumnFamilies, boolean inclusive)
throws IOException {
if (log.isDebugEnabled()) {
log.debug("In AndIterator.seek()");
log.debug("AndIterator.seek Given range => " + range);
}
currentRow = new Text();
currentDocID.set(emptyByteArray);
doSeek(range);
}
private void doSeek(Range range) throws IOException {
overallRange = new Range(range);
if (range.getEndKey() != null && range.getEndKey().getRow() != null) {
this.parentEndRow = range.getEndKey().getRow();
}
// seek each of the sources to the right column family within the row given by key
for (int i = 0; i < sourcesCount; i++) {
Key sourceKey;
Text dataLocation = (sources[i].dataLocation == null) ? nullText : sources[i].dataLocation;
if (range.getStartKey() != null) {
// Build a key with the DocID if one is given
if (range.getStartKey().getColumnFamily() != null) {
sourceKey = buildKey(getPartition(range.getStartKey()), dataLocation,
(sources[i].term == null) ? nullText
: new Text(sources[i].term + "\0" + range.getStartKey().getColumnFamily()));
} // Build a key with just the term.
else {
sourceKey = buildKey(getPartition(range.getStartKey()), dataLocation,
(sources[i].term == null) ? nullText : sources[i].term);
}
if (!range.isStartKeyInclusive()) {
sourceKey = sourceKey.followingKey(PartialKey.ROW_COLFAM_COLQUAL);
}
sources[i].iter.seek(new Range(sourceKey, true, null, false), sources[i].seekColumnFamilies,
SEEK_INCLUSIVE);
} else {
sources[i].iter.seek(range, sources[i].seekColumnFamilies, SEEK_INCLUSIVE);
}
}
advanceToIntersection();
if (hasTop()) {
if (overallRange != null && !overallRange.contains(topKey)) {
topKey = null;
if (log.isDebugEnabled()) {
log.debug("doSeek, topKey is outside of overall range: " + overallRange);
}
}
}
}
public void addSource(SortedKeyValueIterator<Key,Value> source, IteratorEnvironment env,
Text term, boolean notFlag) {
addSource(source, env, null, term, notFlag);
}
public void addSource(SortedKeyValueIterator<Key,Value> source, IteratorEnvironment env,
Text dataLocation, Text term, boolean notFlag) {
// Check if we have space for the added Source
if (sources == null) {
sources = new TermSource[1];
} else {
// allocate space for node, and copy current tree.
// TODO: Should we change this to an ArrayList so that we can just add() ?
TermSource[] localSources = new TermSource[sources.length + 1];
int currSource = 0;
for (TermSource myTerm : sources) {
// TODO: Do I need to call new here? or can I just re-use the term?
localSources[currSource] = new TermSource(myTerm);
currSource++;
}
sources = localSources;
}
sources[sourcesCount] = new TermSource(source.deepCopy(env), dataLocation, term, notFlag);
sourcesCount++;
}
public boolean jump(Key jumpKey) throws IOException {
if (log.isDebugEnabled()) {
log.debug("jump: " + jumpKey);
}
// is the jumpKey outside my overall range?
if (parentEndRow != null && parentEndRow.compareTo(jumpKey.getRow()) < 0) {
// can't go there.
if (log.isDebugEnabled()) {
log.debug(
"jumpRow: " + jumpKey.getRow() + " is greater than my parentEndRow: " + parentEndRow);
}
return false;
}
if (!hasTop()) {
// TODO: will need to add current/last row if you want to measure if
// we don't have topkey because we hit end of tablet.
if (log.isDebugEnabled()) {
log.debug("jump called, but topKey is null, must need to move to next row");
}
return false;
} else {
int comp = this.topKey.getRow().compareTo(jumpKey.getRow());
// compare rows
if (comp > 0) {
if (log.isDebugEnabled()) {
log.debug("jump, our row is ahead of jumpKey.");
log.debug("jumpRow: " + jumpKey.getRow() + " myRow: " + topKey.getRow() + " parentEndRow"
+ parentEndRow);
}
return hasTop(); // do nothing, we're ahead of jumpKey row
} else if (comp < 0) { // a row behind jump key, need to move forward
if (log.isDebugEnabled()) {
log.debug("II jump, row jump");
}
Key endKey = null;
if (parentEndRow != null) {
endKey = new Key(parentEndRow);
}
Key sKey = new Key(jumpKey.getRow());
Range fake = new Range(sKey, true, endKey, false);
this.seek(fake, null, false);
return hasTop();
} else {
// need to check uid
String myUid = this.topKey.getColumnQualifier().toString();
String jumpUid = getUID(jumpKey);
if (log.isDebugEnabled()) {
if (myUid == null) {
log.debug("myUid is null");
} else {
log.debug("myUid: " + myUid);
}
if (jumpUid == null) {
log.debug("jumpUid is null");
} else {
log.debug("jumpUid: " + jumpUid);
}
}
int ucomp = myUid.compareTo(jumpUid);
if (ucomp < 0) { // need to move all sources forward
if (log.isDebugEnabled()) {
log.debug("jump, uid jump");
}
Text row = jumpKey.getRow();
Range range = new Range(row);
this.currentRow = row;
this.currentDocID = new Text(this.getUID(jumpKey));
doSeek(range);
// make sure it is in the range if we have one.
if (hasTop() && parentEndRow != null && topKey.getRow().compareTo(parentEndRow) > 0) {
topKey = null;
}
if (log.isDebugEnabled() && hasTop()) {
log.debug("jump, topKey is now: " + topKey);
}
return hasTop();
} // else do nothing
if (hasTop() && parentEndRow != null && topKey.getRow().compareTo(parentEndRow) > 0) {
topKey = null;
}
return hasTop();
}
}
}
}
| 6,227 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/OrIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.PriorityQueue;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
/**
* An iterator that handles "OR" query constructs on the server side. This code has been
* adapted/merged from Heap and Multi Iterators.
*/
public class OrIterator implements SortedKeyValueIterator<Key,Value> {
private TermSource currentTerm;
private ArrayList<TermSource> sources;
private PriorityQueue<TermSource> sorted = new PriorityQueue<>(5);
private static final Text nullText = new Text();
private Key topKey = null;
private Range overallRange;
private Collection<ByteSequence> columnFamilies;
private boolean inclusive;
protected static final Logger log = Logger.getLogger(OrIterator.class);
private Text parentEndRow;
protected static class TermSource implements Comparable<TermSource> {
public SortedKeyValueIterator<Key,Value> iter;
public Text dataLocation;
public Text term;
public Text docid;
public Text fieldTerm;
public Key topKey;
public boolean atEnd;
public TermSource(TermSource other) {
this.iter = other.iter;
this.term = other.term;
this.dataLocation = other.dataLocation;
this.atEnd = other.atEnd;
}
public TermSource(SortedKeyValueIterator<Key,Value> iter, Text term) {
this.iter = iter;
this.term = term;
this.atEnd = false;
}
public TermSource(SortedKeyValueIterator<Key,Value> iter, Text dataLocation, Text term) {
this.iter = iter;
this.dataLocation = dataLocation;
this.term = term;
this.atEnd = false;
}
public void setNew() {
if (!this.atEnd && this.iter.hasTop()) {
this.topKey = this.iter.getTopKey();
if (log.isDebugEnabled()) {
log.debug("OI.TermSource.setNew TS.iter.topKey >>" + topKey + "<<");
}
if (this.term == null) {
this.docid = this.topKey.getColumnQualifier();
} else {
String cqString = this.topKey.getColumnQualifier().toString();
int idx = cqString.indexOf("\0");
this.fieldTerm = new Text(cqString.substring(0, idx));
this.docid = new Text(cqString.substring(idx + 1));
}
} else {
if (log.isDebugEnabled()) {
log.debug("OI.TermSource.setNew Setting to null...");
}
// this.term = null;
// this.dataLocation = null;
this.topKey = null;
this.fieldTerm = null;
this.docid = null;
}
}
@Override
public int compareTo(TermSource o) {
// NOTE: If your implementation can have more than one row in a tablet,
// you must compare row key here first, then column qualifier.
// NOTE2: A null check is not needed because things are only added to the
// sorted after they have been determined to be valid.
// return this.docid.compareTo(o.docid);
// return this.topKey.compareTo(o.topKey);
// NOTE! We need to compare UID's, not Keys!
Key k1 = topKey;
Key k2 = o.topKey;
// return t1.compareTo(t2);
String uid1 = getUID(k1);
String uid2 = getUID(k2);
if (uid1 != null && uid2 != null) {
return uid1.compareTo(uid2);
} else if (uid1 == null && uid2 == null) {
return 0;
} else if (uid1 == null) {
return 1;
} else {
return -1;
}
}
@Override
public String toString() {
return "TermSource: " + this.dataLocation + " " + this.term;
}
public boolean hasTop() {
return this.topKey != null;
}
}
/**
* Returns the given key's row
*
* @return The given key's row
*/
protected Text getPartition(Key key) {
return key.getRow();
}
/**
* Returns the given key's dataLocation
*
* @return The given key's dataLocation
*/
protected Text getDataLocation(Key key) {
return key.getColumnFamily();
}
/**
* Returns the given key's term
*
* @return The given key's term
*/
protected Text getTerm(Key key) {
int idx = 0;
String sKey = key.getColumnQualifier().toString();
idx = sKey.indexOf("\0");
return new Text(sKey.substring(0, idx));
}
/**
* Returns the given key's DocID
*
* @return The given key's DocID
*/
protected Text getDocID(Key key) {
int idx = 0;
String sKey = key.getColumnQualifier().toString();
idx = sKey.indexOf("\0");
return new Text(sKey.substring(idx + 1));
}
/**
* Returns the given key's UID
*
* @return The given key's UID
*/
static protected String getUID(Key key) {
try {
int idx = 0;
String sKey = key.getColumnQualifier().toString();
idx = sKey.indexOf("\0");
return sKey.substring(idx + 1);
} catch (Exception e) {
return null;
}
}
public OrIterator() {
this.sources = new ArrayList<>();
}
private OrIterator(OrIterator other, IteratorEnvironment env) {
this.sources = new ArrayList<>();
for (TermSource TS : other.sources) {
this.sources.add(new TermSource(TS.iter.deepCopy(env), TS.dataLocation, TS.term));
}
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new OrIterator(this, env);
}
public void addTerm(SortedKeyValueIterator<Key,Value> source, Text term,
IteratorEnvironment env) {
if (log.isDebugEnabled()) {
log.debug("OI.addTerm Added source w/o family");
log.debug("OI.addTerm term >>" + term + "<<");
}
// Don't deepcopy an iterator
if (term == null) {
this.sources.add(new TermSource(source, term));
} else {
this.sources.add(new TermSource(source.deepCopy(env), term));
}
}
public void addTerm(SortedKeyValueIterator<Key,Value> source, Text dataLocation, Text term,
IteratorEnvironment env) {
if (log.isDebugEnabled()) {
log.debug("OI.addTerm Added source ");
log.debug("OI.addTerm family >>" + dataLocation + "<< term >>" + term + "<<");
}
// Don't deepcopy an iterator
if (term == null) {
this.sources.add(new TermSource(source, dataLocation, term));
} else {
this.sources.add(new TermSource(source.deepCopy(env), dataLocation, term));
}
}
/**
* Construct the topKey given the current <code>TermSource</code>
*
* @return The top Key for a given TermSource
*/
protected Key buildTopKey(TermSource TS) {
if ((TS == null) || (TS.topKey == null)) {
return null;
}
if (log.isDebugEnabled()) {
log.debug("OI.buildTopKey New topKey >>"
+ new Key(TS.topKey.getRow(), TS.dataLocation, TS.docid) + "<<");
}
return new Key(TS.topKey.getRow(), TS.topKey.getColumnFamily(), TS.topKey.getColumnQualifier());
}
@Override
final public void next() throws IOException {
if (log.isDebugEnabled()) {
log.debug("OI.next Enter: sorted.size = " + sorted.size() + " currentTerm = "
+ ((currentTerm == null) ? "null" : "not null"));
}
if (currentTerm == null) {
if (log.isDebugEnabled()) {
log.debug("OI.next currentTerm is NULL... returning");
}
topKey = null;
return;
}
// Advance currentTerm
currentTerm.iter.next();
advanceToMatch(currentTerm);
currentTerm.setNew();
// See if currentTerm is still valid, remove if not
if (log.isDebugEnabled()) {
log.debug("OI.next Checks (correct = 0,0,0): " + ((currentTerm.topKey != null) ? "0," : "1,")
+ ((currentTerm.dataLocation != null) ? "0," : "1,")
+ ((currentTerm.term != null && currentTerm.fieldTerm != null)
? (currentTerm.term.compareTo(currentTerm.fieldTerm)) : "0"));
}
if (currentTerm.topKey == null || ((currentTerm.dataLocation != null)
&& (currentTerm.term.compareTo(currentTerm.fieldTerm) != 0))) {
if (log.isDebugEnabled()) {
log.debug("OI.next removing entry:" + currentTerm.term);
}
currentTerm = null;
}
// optimization.
// if size == 0, currentTerm is the only item left,
// OR there are no items left.
// In either case, we don't need to use the PriorityQueue
if (sorted.size() > 0) {
// sort the term back in
if (currentTerm != null) {
sorted.add(currentTerm);
}
// and get the current top item out.
currentTerm = sorted.poll();
}
if (log.isDebugEnabled()) {
log.debug("OI.next CurrentTerm is " + ((currentTerm == null) ? "null" : currentTerm));
}
topKey = buildTopKey(currentTerm);
if (hasTop()) {
if (overallRange != null && !overallRange.contains(topKey)) {
topKey = null;
}
}
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
overallRange = new Range(range);
if (log.isDebugEnabled()) {
log.debug("seek, overallRange: " + overallRange);
}
if (range.getEndKey() != null && range.getEndKey().getRow() != null) {
this.parentEndRow = range.getEndKey().getRow();
}
if (log.isDebugEnabled()) {
log.debug("OI.seek Entry - sources.size = " + sources.size());
log.debug("OI.seek Entry - currentTerm = "
+ ((currentTerm == null) ? "false" : currentTerm.iter.getTopKey()));
log.debug(
"OI.seek Entry - Key from Range = " + ((range == null) ? "false" : range.getStartKey()));
}
// If sources.size is 0, there is nothing to process, so just return.
if (sources.isEmpty()) {
currentTerm = null;
topKey = null;
return;
}
this.columnFamilies = columnFamilies;
this.inclusive = inclusive;
Range newRange = range;
Key sourceKey = null;
Key startKey = null;
if (range != null) {
startKey = range.getStartKey();
}
// Clear the PriorityQueue so that we can re-populate it.
sorted.clear();
TermSource TS = null;
Iterator<TermSource> iter = sources.iterator();
// For each term, seek forward.
// if a hit is not found, delete it from future searches.
int counter = 1;
while (iter.hasNext()) {
TS = iter.next();
TS.atEnd = false;
if (sources.size() == 1) {
currentTerm = TS;
}
if (log.isDebugEnabled()) {
log.debug("OI.seek on TS >>" + TS + "<<");
log.debug("OI.seek seeking source >>" + counter + "<< ");
}
counter++;
newRange = range;
sourceKey = null;
if (startKey != null) {
// Construct the new key for the range
if (log.isDebugEnabled()) {
log.debug("OI.seek startKey >>" + startKey + "<<");
}
if (startKey.getColumnQualifier() != null) {
sourceKey =
new Key(startKey.getRow(), (TS.dataLocation == null) ? nullText : TS.dataLocation,
new Text(((TS.term == null) ? "" : TS.term + "\0")
+ range.getStartKey().getColumnQualifier()));
} else {
sourceKey =
new Key(startKey.getRow(), (TS.dataLocation == null) ? nullText : TS.dataLocation,
(TS.term == null) ? nullText : TS.term);
}
if (log.isDebugEnabled()) {
log.debug("OI.seek Seeking to the key => " + sourceKey);
}
newRange = new Range(sourceKey, true, sourceKey.followingKey(PartialKey.ROW), false);
} else {
if (log.isDebugEnabled()) {
log.debug("OI.seek Using the range Seek() argument to seek => " + newRange);
}
}
TS.iter.seek(newRange, columnFamilies, inclusive);
TS.setNew();
// Make sure we're on a key with the correct dataLocation and term
advanceToMatch(TS);
TS.setNew();
if (log.isDebugEnabled()) {
log.debug("OI.seek sourceKey >>" + sourceKey + "<< ");
log.debug("OI.seek topKey >>" + ((TS.topKey == null) ? "false" : TS.topKey) + "<< ");
log.debug("OI.seek TS.fieldTerm == " + TS.fieldTerm);
log.debug("OI.seek Checks (correct = 0,0,0 / 0,1,1): " + ((TS.topKey != null) ? "0," : "1,")
+ ((TS.dataLocation != null) ? "0," : "1,")
+ (((TS.term != null && TS.fieldTerm != null) && (TS.term.compareTo(TS.fieldTerm) != 0))
? "0" : "1"));
}
if ((TS.topKey == null)
|| ((TS.dataLocation != null) && (TS.term.compareTo(TS.fieldTerm) != 0))) {
// log.debug("OI.seek Removing " + TS.term);
// iter.remove();
} // Optimization if we only have one element
else if (sources.size() > 0 || iter.hasNext()) {
// We have more than one source to search for, use the priority queue
sorted.add(TS);
} else {
// Don't need to continue, only had one item to search
if (log.isDebugEnabled()) {
log.debug("OI.seek new topKey >>" + ((topKey == null) ? "false" : topKey) + "<< ");
}
// make sure it is in the range if we have one.
if (hasTop()) {
if (overallRange != null && !overallRange.contains(topKey)) {
if (log.isDebugEnabled()) {
log.debug("seek, topKey: " + topKey + " is not in the overallRange: " + overallRange);
}
topKey = null;
}
}
return;
}
}
// And set currentTerm = the next valid key/term.
currentTerm = sorted.poll();
if (log.isDebugEnabled()) {
log.debug("OI.seek currentTerm = " + currentTerm);
}
topKey = buildTopKey(currentTerm);
if (topKey == null) {
if (log.isDebugEnabled()) {
log.debug("OI.seek() topKey is null");
}
}
if (log.isDebugEnabled()) {
log.debug("OI.seek new topKey >>" + ((topKey == null) ? "false" : topKey) + "<< ");
}
if (hasTop()) {
if (overallRange != null && !overallRange.contains(topKey)) {
if (log.isDebugEnabled()) {
log.debug("seek, topKey: " + topKey + " is not in the overallRange: " + overallRange);
}
topKey = null;
}
}
}
@Override
final public Key getTopKey() {
if (log.isDebugEnabled()) {
log.debug("OI.getTopKey key >>" + topKey);
}
return topKey;
}
@Override
final public Value getTopValue() {
if (log.isDebugEnabled()) {
log.debug("OI.getTopValue key >>" + currentTerm.iter.getTopValue());
}
return currentTerm.iter.getTopValue();
}
@Override
final public boolean hasTop() {
if (log.isDebugEnabled()) {
log.debug("OI.hasTop = " + ((topKey == null) ? "false" : "true"));
}
return topKey != null;
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Ensures that the current <code>TermSource</code> is pointing to a key with the correct
* <code>dataLocation</code> and <code>term</code> or sets <code>topKey</code> to null if there is
* no such key remaining.
*
* @param TS
* The <code>TermSource</code> to advance
*/
private void advanceToMatch(TermSource TS) throws IOException {
boolean matched = false;
while (!matched) {
if (!TS.iter.hasTop()) {
TS.topKey = null;
return;
}
Key iterTopKey = TS.iter.getTopKey();
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch current topKey = " + iterTopKey);
}
// we should compare the row to the end of the range
if (overallRange.getEndKey() != null) {
if (overallRange != null && !overallRange.contains(TS.iter.getTopKey())) {
if (log.isDebugEnabled()) {
log.debug("overallRange: " + overallRange + " does not contain TS.iter.topKey: "
+ TS.iter.getTopKey());
log.debug("OI.advanceToMatch at the end, returning");
}
TS.atEnd = true;
TS.topKey = null;
return;
} else {
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch not at the end");
}
}
} else {
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch overallRange.getEndKey() == null");
}
}
// Advance to the correct dataLocation
if (log.isDebugEnabled()) {
log.debug("Comparing dataLocations.");
log.debug("OI.advanceToMatch dataLocationCompare: " + getDataLocation(iterTopKey) + " == "
+ TS.dataLocation);
}
int dataLocationCompare = getDataLocation(iterTopKey).compareTo(TS.dataLocation);
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch dataLocationCompare = " + dataLocationCompare);
}
// Make sure we're at a row for this dataLocation
if (dataLocationCompare < 0) {
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch seek to desired dataLocation");
}
Key seekKey = new Key(iterTopKey.getRow(), TS.dataLocation, nullText);
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch seeking to => " + seekKey);
}
TS.iter.seek(new Range(seekKey, true, null, false), columnFamilies, inclusive);
continue;
} else if (dataLocationCompare > 0) {
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch advanced beyond desired dataLocation, seek to next row");
}
// Gone past the current dataLocation, seek to the next row
Key seekKey = iterTopKey.followingKey(PartialKey.ROW);
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch seeking to => " + seekKey);
}
TS.iter.seek(new Range(seekKey, true, null, false), columnFamilies, inclusive);
continue;
}
// Advance to the correct term
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch termCompare: " + getTerm(iterTopKey) + " == " + TS.term);
}
int termCompare = getTerm(iterTopKey).compareTo(TS.term);
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch termCompare = " + termCompare);
}
// Make sure we're at a row for this term
if (termCompare < 0) {
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch seek to desired term");
}
Key seekKey = new Key(iterTopKey.getRow(), iterTopKey.getColumnFamily(), TS.term);
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch seeking to => " + seekKey);
}
TS.iter.seek(new Range(seekKey, true, null, false), columnFamilies, inclusive);
continue;
} else if (termCompare > 0) {
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch advanced beyond desired term, seek to next row");
}
// Gone past the current term, seek to the next row
Key seekKey = iterTopKey.followingKey(PartialKey.ROW);
if (log.isDebugEnabled()) {
log.debug("OI.advanceToMatch seeking to => " + seekKey);
}
TS.iter.seek(new Range(seekKey, true, null, false), columnFamilies, inclusive);
continue;
}
// If we made it here, we found a match
matched = true;
}
}
public boolean jump(Key jumpKey) throws IOException {
if (log.isDebugEnabled()) {
log.debug("OR jump: " + jumpKey);
printTopKeysForTermSources();
}
// is the jumpKey outside my overall range?
if (parentEndRow != null && parentEndRow.compareTo(jumpKey.getRow()) < 0) {
// can't go there.
if (log.isDebugEnabled()) {
log.debug(
"jumpRow: " + jumpKey.getRow() + " is greater than my parentEndRow: " + parentEndRow);
}
return false;
}
// Clear the PriorityQueue so that we can re-populate it.
sorted.clear();
// check each term source and jump it if necessary.
for (TermSource ts : sources) {
int comp;
if (!ts.hasTop()) {
if (log.isDebugEnabled()) {
log.debug("jump called, but ts.topKey is null, this one needs to move to next row.");
}
Key startKey = new Key(jumpKey.getRow(), ts.dataLocation,
new Text(ts.term + "\0" + jumpKey.getColumnFamily()));
Key endKey = null;
if (parentEndRow != null) {
endKey = new Key(parentEndRow);
}
Range newRange = new Range(startKey, true, endKey, false);
ts.iter.seek(newRange, columnFamilies, inclusive);
ts.setNew();
advanceToMatch(ts);
ts.setNew();
} else {
// check row, then uid
comp = this.topKey.getRow().compareTo(jumpKey.getRow());
if (comp > 0) {
if (log.isDebugEnabled()) {
log.debug("jump, our row is ahead of jumpKey.");
log.debug("jumpRow: " + jumpKey.getRow() + " myRow: " + topKey.getRow()
+ " parentEndRow" + parentEndRow);
}
if (ts.hasTop()) {
sorted.add(ts);
}
// do nothing, we're ahead of jumpKey row and have topkey
} else if (comp < 0) { // a row behind jump key, need to move forward
if (log.isDebugEnabled()) {
log.debug("OR jump, row jump");
}
Key endKey = null;
if (parentEndRow != null) {
endKey = new Key(parentEndRow);
}
Key sKey = new Key(jumpKey.getRow());
Range fake = new Range(sKey, true, endKey, false);
ts.iter.seek(fake, columnFamilies, inclusive);
ts.setNew();
advanceToMatch(ts);
ts.setNew();
} else {
// need to check uid
String myUid = getUID(ts.topKey);
String jumpUid = getUID(jumpKey);
if (log.isDebugEnabled()) {
if (myUid == null) {
log.debug("myUid is null");
} else {
log.debug("myUid: " + myUid);
}
if (jumpUid == null) {
log.debug("jumpUid is null");
} else {
log.debug("jumpUid: " + jumpUid);
}
}
int ucomp = myUid.compareTo(jumpUid);
if (ucomp < 0) {
// need to move forward
// create range and seek it.
Text row = ts.topKey.getRow();
Text cf = ts.topKey.getColumnFamily();
String cq = ts.topKey.getColumnQualifier().toString().replaceAll(myUid, jumpUid);
Text cq_text = new Text(cq);
Key sKey = new Key(row, cf, cq_text);
Key eKey = null;
if (parentEndRow != null) {
eKey = new Key(parentEndRow);
}
Range fake = new Range(sKey, true, eKey, false);
if (log.isDebugEnabled()) {
log.debug("uid jump, new ts.iter.seek range: " + fake);
}
ts.iter.seek(fake, columnFamilies, inclusive);
ts.setNew();
advanceToMatch(ts);
ts.setNew();
if (log.isDebugEnabled()) {
if (ts.iter.hasTop()) {
log.debug("ts.iter.topkey: " + ts.iter.getTopKey());
} else {
log.debug("ts.iter.topKey is null");
}
}
} // else do nothing, we're ahead of jump key
}
}
// ts should have moved, validate this particular ts.
if (ts.hasTop()) {
if (overallRange != null) {
if (overallRange.contains(topKey)) {
// if (topKey.getRow().compareTo(parentEndRow) < 0) {
sorted.add(ts);
}
} else {
sorted.add(ts);
}
}
}
// now get the top key from all TermSources.
currentTerm = sorted.poll();
if (log.isDebugEnabled()) {
log.debug("OI.jump currentTerm = " + currentTerm);
}
topKey = buildTopKey(currentTerm);
if (log.isDebugEnabled()) {
log.debug("OI.jump new topKey >>" + ((topKey == null) ? "false" : topKey) + "<< ");
}
return hasTop();
}
private void printTopKeysForTermSources() {
if (log.isDebugEnabled()) {
for (TermSource ts : sources) {
if (ts != null) {
if (ts.topKey == null) {
log.debug(ts.toString() + " topKey is null");
} else {
log.debug(ts.toString() + " topKey: " + ts.topKey);
}
} else {
log.debug("ts is null");
}
}
if (topKey != null) {
log.debug("OrIterator current topKey: " + topKey);
} else {
log.debug("OrIterator current topKey is null");
}
}
}
}
| 6,228 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/OptimizedQueryIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.OptionDescriber;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.log4j.Logger;
/**
* This iterator internally uses the BooleanLogicIterator to find event UIDs in the field index portion of the partition and uses the EvaluatingIterator to
* evaluate the events against an expression. The key and value that are emitted from this iterator are the key and value that come from the EvaluatingIterator.
*/
public class OptimizedQueryIterator implements SortedKeyValueIterator<Key,Value>, OptionDescriber {
private static Logger log = Logger.getLogger(OptimizedQueryIterator.class);
private EvaluatingIterator event = null;
private SortedKeyValueIterator<Key,Value> index = null;
private Key key = null;
private Value value = null;
private boolean eventSpecificRange = false;
public IteratorOptions describeOptions() {
Map<String,String> options = new HashMap<String,String>();
options.put(EvaluatingIterator.QUERY_OPTION, "full query expression");
options.put(BooleanLogicIterator.FIELD_INDEX_QUERY, "modified query for the field index query portion");
options.put(ReadAheadIterator.QUEUE_SIZE, "parallel queue size");
options.put(ReadAheadIterator.TIMEOUT, "parallel iterator timeout");
return new IteratorOptions(getClass().getSimpleName(), "evaluates event objects against an expression using the field index", options, null);
}
public boolean validateOptions(Map<String,String> options) {
if (options.containsKey(EvaluatingIterator.QUERY_OPTION) && options.containsKey(BooleanLogicIterator.FIELD_INDEX_QUERY)) {
return true;
}
return false;
}
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
if (!validateOptions(options)) {
throw new IllegalArgumentException("Invalid options");
}
// Setup the EvaluatingIterator
event = new EvaluatingIterator();
event.init(source.deepCopy(env), options, env);
// if queue size and timeout are set, then use the read ahead iterator
if (options.containsKey(ReadAheadIterator.QUEUE_SIZE) && options.containsKey(ReadAheadIterator.TIMEOUT)) {
BooleanLogicIterator bli = new BooleanLogicIterator();
bli.init(source, options, env);
index = new ReadAheadIterator();
index.init(bli, options, env);
} else {
index = new BooleanLogicIterator();
// index.setDebug(Level.DEBUG);
index.init(source, options, env);
}
}
public OptimizedQueryIterator() {}
public OptimizedQueryIterator(OptimizedQueryIterator other, IteratorEnvironment env) {
this.event = other.event;
this.index = other.index;
}
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new OptimizedQueryIterator(this, env);
}
public Key getTopKey() {
if (log.isDebugEnabled()) {
log.debug("getTopKey: " + key);
}
return key;
}
public Value getTopValue() {
if (log.isDebugEnabled()) {
log.debug("getTopValue: " + value);
}
return value;
}
public boolean hasTop() {
if (log.isDebugEnabled()) {
log.debug("hasTop: returned: " + (key != null));
}
return (key != null);
}
public void next() throws IOException {
if (log.isDebugEnabled()) {
log.debug("next");
}
if (key != null) {
key = null;
value = null;
}
if (eventSpecificRange) {
// Then this will probably return nothing
event.next();
if (event.hasTop()) {
key = event.getTopKey();
value = event.getTopValue();
}
} else {
do {
index.next();
// If the index has a match, then seek the event to the key
if (index.hasTop()) {
Key eventKey = index.getTopKey();
Key endKey = eventKey.followingKey(PartialKey.ROW_COLFAM);
Key startKey = new Key(eventKey.getRow(), eventKey.getColumnFamily());
Range eventRange = new Range(startKey, endKey);
HashSet<ByteSequence> cf = new HashSet<ByteSequence>();
cf.add(eventKey.getColumnFamilyData());
event.seek(eventRange, cf, true);
if (event.hasTop()) {
key = event.getTopKey();
value = event.getTopValue();
}
}
} while (key == null && index.hasTop());
}
// Sanity check. Make sure both returnValue and returnKey are null or both are not null
if (!((key == null && value == null) || (key != null && value != null))) {
log.warn("Key: " + ((key == null) ? "null" : key.toString()));
log.warn("Value: " + ((value == null) ? "null" : value.toString()));
throw new IOException("Return values are inconsistent");
}
}
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
if (log.isDebugEnabled()) {
log.debug("seek, range:" + range);
}
// Test the range to see if it is event specific.
if (null != range.getEndKey() && range.getEndKey().getColumnFamily() != null && range.getEndKey().getColumnFamily().getLength() != 0) {
if (log.isDebugEnabled()) {
log.debug("Jumping straight to the event");
}
// Then this range is for a specific event. We don't need to use the index iterator to find it, we can just
// seek to it with the event iterator and evaluate it.
eventSpecificRange = true;
event.seek(range, columnFamilies, inclusive);
if (event.hasTop()) {
key = event.getTopKey();
value = event.getTopValue();
}
} else {
if (log.isDebugEnabled()) {
log.debug("Using BooleanLogicIteratorJexl");
}
// Seek the boolean logic iterator
index.seek(range, columnFamilies, inclusive);
// If the index has a match, then seek the event to the key
if (index.hasTop()) {
Key eventKey = index.getTopKey();
// Range eventRange = new Range(eventKey, eventKey);
Range eventRange = new Range(eventKey.getRow());
HashSet<ByteSequence> cf = new HashSet<ByteSequence>();
cf.add(eventKey.getColumnFamilyData());
event.seek(eventRange, cf, true);
if (event.hasTop()) {
key = event.getTopKey();
value = event.getTopValue();
} else {
next();
}
}
}
}
}
| 6,229 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/BooleanLogicIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.PriorityQueue;
import java.util.Set;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.OptionDescriber;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.examples.wikisearch.parser.JexlOperatorConstants;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser;
import org.apache.accumulo.examples.wikisearch.parser.QueryParser.QueryTerm;
import org.apache.accumulo.examples.wikisearch.parser.RangeCalculator.RangeBounds;
import org.apache.accumulo.examples.wikisearch.parser.TreeNode;
import org.apache.accumulo.examples.wikisearch.util.FieldIndexKeyParser;
import org.apache.commons.jexl2.parser.ASTAndNode;
import org.apache.commons.jexl2.parser.ASTEQNode;
import org.apache.commons.jexl2.parser.ASTERNode;
import org.apache.commons.jexl2.parser.ASTGENode;
import org.apache.commons.jexl2.parser.ASTGTNode;
import org.apache.commons.jexl2.parser.ASTJexlScript;
import org.apache.commons.jexl2.parser.ASTLENode;
import org.apache.commons.jexl2.parser.ASTLTNode;
import org.apache.commons.jexl2.parser.ASTNENode;
import org.apache.commons.jexl2.parser.ASTNRNode;
import org.apache.commons.jexl2.parser.ASTNotNode;
import org.apache.commons.jexl2.parser.ASTOrNode;
import org.apache.commons.jexl2.parser.ParseException;
import org.apache.commons.jexl2.parser.ParserTreeConstants;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import com.google.common.collect.Multimap;
public class BooleanLogicIterator implements SortedKeyValueIterator<Key,Value>, OptionDescriber {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
protected static final Logger log = Logger.getLogger(BooleanLogicIterator.class);
public static final String QUERY_OPTION = "expr";
public static final String TERM_CARDINALITIES = "TERM_CARDINALITIES"; // comma separated list of
// term : count
public static final String FIELD_INDEX_QUERY = "FIELD_INDEX_QUERY";
public static final String FIELD_NAME_PREFIX = "fi\0";
// --------------------------------------------------------------------------
private static IteratorEnvironment env = new DefaultIteratorEnvironment();
protected Text nullText = new Text();
private Key topKey = null;
private Value topValue = null;
private SortedKeyValueIterator<Key,Value> sourceIterator;
private BooleanLogicTreeNode root;
private PriorityQueue<BooleanLogicTreeNode> positives;
private ArrayList<BooleanLogicTreeNode> negatives = new ArrayList<>();
private ArrayList<BooleanLogicTreeNode> rangerators;
private String updatedQuery;
private Map<String,Long> termCardinalities = new HashMap<>();
private Range overallRange = null;
private FieldIndexKeyParser keyParser;
public BooleanLogicIterator() {
keyParser = new FieldIndexKeyParser();
rangerators = new ArrayList<>();
}
public BooleanLogicIterator(BooleanLogicIterator other, IteratorEnvironment env) {
if (other.sourceIterator != null) {
this.sourceIterator = other.sourceIterator.deepCopy(env);
}
keyParser = new FieldIndexKeyParser();
rangerators = new ArrayList<>();
log.debug("Congratulations, you've reached the BooleanLogicIterator");
}
public static void setLogLevel(Level lev) {
log.setLevel(lev);
}
public void setDebug(Level lev) {
log.setLevel(lev);
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new BooleanLogicIterator(this, env);
}
/**
* <b>init</b> is responsible for setting up the iterator. It will pull the serialized boolean
* parse tree from the options mapping and construct the appropriate sub-iterators
*
* Once initialized, this iterator will automatically seek to the first matching instance. If no
* top key exists, that means an event matching the boolean logic did not exist in the partition.
* Subsequent calls to next will move the iterator and all sub-iterators to the next match.
*
* @param source
* The underlying SortedkeyValueIterator.
* @param options
* A Map<String, String> of options.
* @param env
* The iterator environment
*/
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
validateOptions(options);
try {
if (log.isDebugEnabled()) {
log.debug("Congratulations, you've reached the BooleanLogicIterator.init method");
}
// Copy the source iterator
sourceIterator = source.deepCopy(env);
// Potentially take advantage of term cardinalities
String[] terms = null;
if (null != options.get(TERM_CARDINALITIES)) {
terms = options.get(TERM_CARDINALITIES).split(",");
for (String term : terms) {
int idx = term.indexOf(":");
if (-1 != idx) {
termCardinalities.put(term.substring(0, idx), Long.parseLong(term.substring(idx + 1)));
}
}
}
// Step 1: Parse the query
if (log.isDebugEnabled()) {
log.debug("QueryParser");
}
QueryParser qp = new QueryParser();
qp.execute(this.updatedQuery); // validateOptions updates the updatedQuery
// need to build the query tree based on jexl parsing.
// Step 2: refactor QueryTree - inplace modification
if (log.isDebugEnabled()) {
log.debug("transformTreeNode");
}
TreeNode tree = qp.getIteratorTree();
this.root = transformTreeNode(tree);
if (log.isDebugEnabled()) {
log.debug("refactorTree");
}
this.root = refactorTree(this.root);
if (log.isDebugEnabled()) {
log.debug("collapseBranches");
}
collapseBranches(root);
// Step 3: create iterators where we need them.
createIteratorTree(this.root);
if (log.isDebugEnabled()) {
log.debug("Query tree after iterator creation:\n\t" + this.root.getContents());
}
// Step 4: split the positive and negative leaves
splitLeaves(this.root);
} catch (ParseException ex) {
log.error("ParseException in init: " + ex);
throw new IllegalArgumentException("Failed to parse query", ex);
} catch (Exception ex) {
throw new IllegalArgumentException("probably had no indexed terms", ex);
}
}
/*
* ************************************************************************* Methods for sub
* iterator creation.
*/
private void createIteratorTree(BooleanLogicTreeNode root) throws IOException {
if (log.isDebugEnabled()) {
log.debug("BoolLogic createIteratorTree()");
}
// Walk the tree, if all of your children are leaves, roll you into the
// appropriate iterator.
Enumeration<?> dfe = root.depthFirstEnumeration();
while (dfe.hasMoreElements()) {
BooleanLogicTreeNode node = (BooleanLogicTreeNode) dfe.nextElement();
if (!node.isLeaf() && node.getType() != ParserTreeConstants.JJTJEXLSCRIPT) {
// try to roll up.
if (canRollUp(node)) {
node.setRollUp(true);
if (node.getType() == ParserTreeConstants.JJTANDNODE) {
if (log.isDebugEnabled()) {
log.debug("creating IntersectingIterator");
}
node.setUserObject(createIntersectingIterator(node));
} else if (node.getType() == ParserTreeConstants.JJTORNODE) {
node.setUserObject(createOrIterator(node));
} else {
// throw an error.
log.debug("createIteratorTree, encounterd a node type I do not know about: "
+ node.getType());
log.debug("createIteratorTree, node contents: " + node.getContents());
}
node.removeAllChildren();
}
}
}
// now for remaining leaves, create basic iterators.
// you can add in specialized iterator mappings here if necessary.
dfe = root.depthFirstEnumeration();
while (dfe.hasMoreElements()) {
BooleanLogicTreeNode node = (BooleanLogicTreeNode) dfe.nextElement();
if (node.isLeaf() && node.getType() != ParserTreeConstants.JJTANDNODE
&& node.getType() != ParserTreeConstants.JJTORNODE) {
node.setUserObject(createFieldIndexIterator(node));
}
}
}
private AndIterator createIntersectingIterator(BooleanLogicTreeNode node) throws IOException {
if (log.isDebugEnabled()) {
log.debug("createIntersectingIterator(node)");
log.debug("fName: " + node.getFieldName() + " , fValue: " + node.getFieldValue()
+ " , operator: " + node.getFieldOperator());
}
Text[] columnFamilies = new Text[node.getChildCount()];
Text[] termValues = new Text[node.getChildCount()];
boolean[] negationMask = new boolean[node.getChildCount()];
Enumeration<?> children = node.children();
int i = 0;
while (children.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
columnFamilies[i] = child.getFieldName();
termValues[i] = child.getFieldValue();
negationMask[i] = child.isNegated();
i++;
}
AndIterator ii = new AndIterator();
Map<String,String> options = new HashMap<>();
options.put(AndIterator.columnFamiliesOptionName, AndIterator.encodeColumns(columnFamilies));
options.put(AndIterator.termValuesOptionName, AndIterator.encodeTermValues(termValues));
options.put(AndIterator.notFlagsOptionName, AndIterator.encodeBooleans(negationMask));
ii.init(sourceIterator.deepCopy(env), options, env);
return ii;
}
private OrIterator createOrIterator(BooleanLogicTreeNode node) throws IOException {
if (log.isDebugEnabled()) {
log.debug("createOrIterator(node)");
log.debug("fName: " + node.getFieldName() + " , fValue: " + node.getFieldValue()
+ " , operator: " + node.getFieldOperator());
}
Enumeration<?> children = node.children();
ArrayList<Text> fams = new ArrayList<>();
ArrayList<Text> quals = new ArrayList<>();
while (children.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
fams.add(child.getFieldName());
quals.add(child.getFieldValue());
}
OrIterator iter = new OrIterator();
SortedKeyValueIterator<Key,Value> source = sourceIterator.deepCopy(env);
for (int i = 0; i < fams.size(); i++) {
iter.addTerm(source, fams.get(i), quals.get(i), env);
}
return iter;
}
/*
* This takes the place of the SortedKeyIterator used previously. This iterator is bound to the
* partitioned table structure. When next is called it will jump rows as necessary internally
* versus needing to do it externally as was the case with the SortedKeyIterator.
*/
private FieldIndexIterator createFieldIndexIterator(BooleanLogicTreeNode node)
throws IOException {
if (log.isDebugEnabled()) {
log.debug("BoolLogic.createFieldIndexIterator()");
log.debug("fName: " + node.getFieldName() + " , fValue: " + node.getFieldValue()
+ " , operator: " + node.getFieldOperator());
}
Text rowId = null;
sourceIterator.seek(new Range(), EMPTY_COL_FAMS, false);
if (sourceIterator.hasTop()) {
rowId = sourceIterator.getTopKey().getRow();
}
FieldIndexIterator iter = new FieldIndexIterator(node.getType(), rowId, node.getFieldName(),
node.getFieldValue(), node.isNegated(), node.getFieldOperator());
Map<String,String> options = new HashMap<>();
iter.init(sourceIterator.deepCopy(env), options, env);
if (log.isDebugEnabled()) {
FieldIndexIterator.setLogLevel(Level.DEBUG);
} else {
FieldIndexIterator.setLogLevel(Level.OFF);
}
return iter;
}
/*
* ************************************************************************* Methods for testing
* the tree WRT boolean logic.
*/
// After all iterator pointers have been advanced, test if the current
// record passes the boolean logic.
private boolean testTreeState() {
if (log.isDebugEnabled()) {
log.debug("BoolLogic testTreeState() begin");
}
Enumeration<?> dfe = this.root.depthFirstEnumeration();
while (dfe.hasMoreElements()) {
BooleanLogicTreeNode node = (BooleanLogicTreeNode) dfe.nextElement();
if (!node.isLeaf()) {
int type = node.getType();
if (type == ParserTreeConstants.JJTANDNODE) { // BooleanLogicTreeNode.NodeType.AND) {
handleAND(node);
} else if (type == ParserTreeConstants.JJTORNODE) {// BooleanLogicTreeNode.NodeType.OR) {
handleOR(node);
} else if (type == ParserTreeConstants.JJTJEXLSCRIPT) {// BooleanLogicTreeNode.NodeType.HEAD)
// {
handleHEAD(node);
} else if (type == ParserTreeConstants.JJTNOTNODE) { // BooleanLogicTreeNode.NodeType.NOT) {
// there should not be any "NOT"s.
// throw new Exception();
}
} else {
// it is a leaf, if it is an AND or OR do something
if (node.getType() == ParserTreeConstants.JJTORNODE) {// BooleanLogicTreeNode.NodeType.OR) {
// //OrIterator
node.setValid(node.hasTop());
node.reSet();
node.addToSet(node.getTopKey());
} else if (node.getType() == ParserTreeConstants.JJTANDNODE
|| node.getType() == ParserTreeConstants.JJTEQNODE
|| node.getType() == ParserTreeConstants.JJTERNODE
|| node.getType() == ParserTreeConstants.JJTLENODE
|| node.getType() == ParserTreeConstants.JJTLTNODE
|| node.getType() == ParserTreeConstants.JJTGENODE
|| node.getType() == ParserTreeConstants.JJTGTNODE) {
// sub iterator guarantees it is in its internal range,
// otherwise, no top.
node.setValid(node.hasTop());
}
}
}
if (log.isDebugEnabled()) {
log.debug("BoolLogic.testTreeState end, treeState:: " + this.root.getContents()
+ " , valid: " + root.isValid());
}
return this.root.isValid();
}
private void handleHEAD(BooleanLogicTreeNode node) {
Enumeration<?> children = node.children();
while (children.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
if (child.getType() == ParserTreeConstants.JJTANDNODE) {// BooleanLogicTreeNode.NodeType.AND)
// {
node.setValid(child.isValid());
node.setTopKey(child.getTopKey());
} else if (child.getType() == ParserTreeConstants.JJTORNODE) {// BooleanLogicTreeNode.NodeType.OR)
// {
node.setValid(child.isValid());
node.setTopKey(child.getTopKey());
} else if (child.getType() == ParserTreeConstants.JJTEQNODE
|| child.getType() == ParserTreeConstants.JJTERNODE
|| child.getType() == ParserTreeConstants.JJTGTNODE
|| child.getType() == ParserTreeConstants.JJTGENODE
|| child.getType() == ParserTreeConstants.JJTLTNODE
|| child.getType() == ParserTreeConstants.JJTLENODE) {// BooleanLogicTreeNode.NodeType.SEL)
// {
node.setValid(true);
node.setTopKey(child.getTopKey());
if (child.getTopKey() == null) {
node.setValid(false);
}
}
} // end while
// I have to be valid AND have a top key
if (node.isValid() && !node.hasTop()) {
node.setValid(false);
}
}
private void handleAND(BooleanLogicTreeNode me) {
if (log.isDebugEnabled()) {
log.debug("handleAND::" + me.getContents());
}
Enumeration<?> children = me.children();
me.setValid(true); // it's easier to prove false than true
HashSet<Key> goodSet = new HashSet<>();
HashSet<Key> badSet = new HashSet<>();
while (children.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
if (child.getType() == ParserTreeConstants.JJTEQNODE
|| child.getType() == ParserTreeConstants.JJTANDNODE
|| child.getType() == ParserTreeConstants.JJTERNODE
|| child.getType() == ParserTreeConstants.JJTNENODE
|| child.getType() == ParserTreeConstants.JJTGENODE
|| child.getType() == ParserTreeConstants.JJTLENODE
|| child.getType() == ParserTreeConstants.JJTGTNODE
|| child.getType() == ParserTreeConstants.JJTLTNODE) {
if (child.isNegated()) {
if (child.hasTop()) {
badSet.add(child.getTopKey());
if (goodSet.contains(child.getTopKey())) {
me.setValid(false);
return;
}
if (child.isValid()) {
me.setValid(false);
return;
}
}
} else {
if (child.hasTop()) {
if (log.isDebugEnabled()) {
log.debug("handleAND, child node: " + child.getContents());
}
// if you're in the bad set, you're done.
if (badSet.contains(child.getTopKey())) {
if (log.isDebugEnabled()) {
log.debug("handleAND, child is in bad set, setting parent false");
}
me.setValid(false);
return;
}
// if good set is empty, add it.
if (goodSet.isEmpty()) {
if (log.isDebugEnabled()) {
log.debug("handleAND, goodSet is empty, adding child: " + child.getContents());
}
goodSet.add(child.getTopKey());
} else {
// must be in the good set & not in the bad set
// if either fails, I'm false.
if (!goodSet.contains(child.getTopKey())) {
if (log.isDebugEnabled()) {
log.debug(
"handleAND, goodSet is not empty, and does NOT contain child, setting false. child: "
+ child.getContents());
}
me.setValid(false);
return;
} else {
// trim the good set to this one value
// (handles the case were the initial encounters were ORs)
goodSet = new HashSet<>();
goodSet.add(child.getTopKey());
if (log.isDebugEnabled()) {
log.debug(
"handleAND, child in goodset, trim to this value: " + child.getContents());
}
}
}
} else {
// test if its children are all false
if (child.getChildCount() > 0) {
Enumeration<?> subchildren = child.children();
boolean allFalse = true;
while (subchildren.hasMoreElements()) {
BooleanLogicTreeNode subchild = (BooleanLogicTreeNode) subchildren.nextElement();
if (!subchild.isNegated()) {
allFalse = false;
break;
} else if (subchild.isNegated() && subchild.hasTop()) {
allFalse = false;
break;
}
}
if (!allFalse) {
me.setValid(false);
return;
}
} else {
// child returned a null value and is not a negation, this in turn makes me false.
me.setValid(false);
return;
}
}
}
} else if (child.getType() == ParserTreeConstants.JJTORNODE) {// BooleanLogicTreeNode.NodeType.OR)
// {
// NOTE: The OR may be an OrIterator in which case it will only produce
// a single unique identifier, or it may be a pure logical construct and
// be capable of producing multiple unique identifiers.
// This should handle all cases.
Iterator<?> iter = child.getSetIterator();
boolean goodSetEmpty = goodSet.isEmpty();
boolean matchedOne = false;
boolean pureNegations = true;
if (!child.isValid()) {
if (log.isDebugEnabled()) {
log.debug("handleAND, child is an OR and it is not valid, setting false, ALL NEGATED?: "
+ child.isChildrenAllNegated());
}
me.setValid(false); // I'm an AND if one of my children is false, I'm false.
return;
} else if (child.isValid() && !child.hasTop()) {
// pure negation, do nothing
} else if (child.isValid() && child.hasTop()) { // I need to match one
if (log.isDebugEnabled()) {
log.debug("handleAND, child OR, valid and has top, means not pureNegations");
}
pureNegations = false;
while (iter.hasNext()) {
Key i = (Key) iter.next();
if (child.isNegated()) {
badSet.add(i);
if (goodSet.contains(i)) {
if (log.isDebugEnabled()) {
log.debug("handleAND, child OR, goodSet contains bad value: " + i);
}
me.setValid(false);
return;
}
} else {
// if the good set is empty, then push all of my ids.
if (goodSetEmpty && !badSet.contains(i)) {
goodSet.add(i);
matchedOne = true;
} else {
// I need at least one to match
if (goodSet.contains(i)) {
matchedOne = true;
}
}
}
}
}
// is the goodSet still empty? that means were were only negations
// otherwise, if it's not empty and we didn't match one, false
if (child.isNegated()) {
// we're ok
} else {
if (goodSet.isEmpty() && !pureNegations) {
if (log.isDebugEnabled()) {
log.debug("handleAND, child OR, empty goodset && !pureNegations, set false");
}
// that's bad, we weren't negated, should've pushed something in there.
me.setValid(false);
return;
} else if (!goodSet.isEmpty() && !pureNegations) { // goodSet contains values.
if (!matchedOne) { // but we didn't match any.
if (log.isDebugEnabled()) {
log.debug("handleAND, child OR, goodSet had values but I didn't match any, false");
}
me.setValid(false);
return;
}
// we matched something, trim the set.
// i.e. two child ORs
goodSet = child.getIntersection(goodSet);
}
}
}
} // end while
if (goodSet.isEmpty()) { // && log.isDebugEnabled()) {
if (log.isDebugEnabled()) {
log.debug("handleAND-> goodSet is empty, pure negations?");
}
} else {
me.setTopKey(Collections.min(goodSet));
if (log.isDebugEnabled()) {
log.debug("End of handleAND, this node's topKey: " + me.getTopKey());
}
}
}
private void handleOR(BooleanLogicTreeNode me) {
Enumeration<?> children = me.children();
// I'm an OR node, need at least one positive.
me.setValid(false);
me.reSet();
me.setTopKey(null);
boolean allNegated = true;
while (children.hasMoreElements()) {
// 3 cases for child: SEL, AND, OR
// and negation
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
if (child.getType() == ParserTreeConstants.JJTEQNODE
|| child.getType() == ParserTreeConstants.JJTNENODE
|| child.getType() == ParserTreeConstants.JJTANDNODE
|| child.getType() == ParserTreeConstants.JJTERNODE
|| child.getType() == ParserTreeConstants.JJTNRNODE
|| child.getType() == ParserTreeConstants.JJTLENODE
|| child.getType() == ParserTreeConstants.JJTLTNODE
|| child.getType() == ParserTreeConstants.JJTGENODE
|| child.getType() == ParserTreeConstants.JJTGTNODE) {
if (child.hasTop()) {
if (child.isNegated()) {
// do nothing.
} else {
allNegated = false;
// I have something add it to my set.
if (child.isValid()) {
me.addToSet(child.getTopKey());
}
}
} else if (!child.isNegated()) { // I have a non-negated child
allNegated = false;
// that child could be pure negations in which case I'm true
me.setValid(child.isValid());
}
} else if (child.getType() == ParserTreeConstants.JJTORNODE) {// BooleanLogicTreeNode.NodeType.OR)
// {
if (child.hasTop()) {
if (!child.isNegated()) {
allNegated = false;
// add its rowIds to my rowIds
Iterator<?> iter = child.getSetIterator();
while (iter.hasNext()) {
Key i = (Key) iter.next();
if (i != null) {
me.addToSet(i);
}
}
}
} else {
// Or node that doesn't have a top, check if it's valid or not
// because it could be pure negations itself.
if (child.isValid()) {
me.setValid(true);
}
}
}
} // end while
if (allNegated) {
// do all my children have top?
children = me.children();
while (children.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
if (!child.hasTop()) {
me.setValid(true);
me.setTopKey(null);
return;
}
}
me.setValid(false);
} else {
Key k = me.getMinUniqueID();
if (k == null) {
me.setValid(false);
} else {
me.setValid(true);
me.setTopKey(k);
}
}
}
/*
* ************************************************************************* Utility methods.
*/
// Transforms the TreeNode tree of query.parser into the
// BooleanLogicTreeNodeJexl form.
public BooleanLogicTreeNode transformTreeNode(TreeNode node) throws ParseException {
if (node.getType().equals(ASTEQNode.class) || node.getType().equals(ASTNENode.class)) {
if (log.isDebugEnabled()) {
log.debug("Equals Node");
}
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replace("'", "");
boolean negated = t.getOperator().equals("!=");
if (!fName.startsWith(FIELD_NAME_PREFIX)) {
fName = FIELD_NAME_PREFIX + fName;
}
BooleanLogicTreeNode child =
new BooleanLogicTreeNode(ParserTreeConstants.JJTEQNODE, fName, fValue, negated);
return child;
}
}
}
if (node.getType().equals(ASTERNode.class) || node.getType().equals(ASTNRNode.class)) {
if (log.isDebugEnabled()) {
log.debug("Regex Node");
}
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "");
boolean negated = node.getType().equals(ASTNRNode.class);
if (!fName.startsWith(FIELD_NAME_PREFIX)) {
fName = FIELD_NAME_PREFIX + fName;
}
BooleanLogicTreeNode child =
new BooleanLogicTreeNode(ParserTreeConstants.JJTERNODE, fName, fValue, negated);
return child;
}
}
}
if (node.getType().equals(ASTLTNode.class) || node.getType().equals(ASTLENode.class)
|| node.getType().equals(ASTGTNode.class) || node.getType().equals(ASTGENode.class)) {
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
if (!fName.startsWith(FIELD_NAME_PREFIX)) {
fName = FIELD_NAME_PREFIX + fName;
}
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "").toLowerCase();
boolean negated = false; // to be negated, must be child of Not, which is handled
// elsewhere.
int mytype = JexlOperatorConstants.getJJTNodeType(t.getOperator());
BooleanLogicTreeNode child = new BooleanLogicTreeNode(mytype, fName, fValue, negated);
if (log.isDebugEnabled()) {
log.debug("adding child node: " + child.getContents());
}
return child;
}
}
}
BooleanLogicTreeNode returnNode = null;
if (node.getType().equals(ASTAndNode.class) || node.getType().equals(ASTOrNode.class)) {
int parentType = node.getType().equals(ASTAndNode.class) ? ParserTreeConstants.JJTANDNODE
: ParserTreeConstants.JJTORNODE;
if (log.isDebugEnabled()) {
log.debug("AND/OR node: " + parentType);
}
if (node.isLeaf() || !node.getTerms().isEmpty()) {
returnNode = new BooleanLogicTreeNode(parentType);
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
if (!fName.startsWith(FIELD_NAME_PREFIX)) {
fName = FIELD_NAME_PREFIX + fName;
}
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "");
boolean negated = t.getOperator().equals("!=");
int mytype = JexlOperatorConstants.getJJTNodeType(t.getOperator());
BooleanLogicTreeNode child = new BooleanLogicTreeNode(mytype, fName, fValue, negated);
if (log.isDebugEnabled()) {
log.debug("adding child node: " + child.getContents());
}
returnNode.add(child);
}
}
} else {
returnNode = new BooleanLogicTreeNode(parentType);
}
} else if (node.getType().equals(ASTNotNode.class)) {
if (log.isDebugEnabled()) {
log.debug("NOT node");
}
if (node.isLeaf()) {
// NOTE: this should be cleaned up a bit.
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
if (!fName.startsWith(FIELD_NAME_PREFIX)) {
fName = FIELD_NAME_PREFIX + fName;
}
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "").toLowerCase();
boolean negated = !t.getOperator().equals("!=");
int mytype = JexlOperatorConstants.getJJTNodeType(t.getOperator());
if (!fName.startsWith(FIELD_NAME_PREFIX)) {
fName = FIELD_NAME_PREFIX + fName;
}
return new BooleanLogicTreeNode(mytype, fName, fValue, negated);
}
}
} else {
returnNode = new BooleanLogicTreeNode(ParserTreeConstants.JJTNOTNODE);
}
} else if (node.getType().equals(ASTJexlScript.class)
|| node.getType().getSimpleName().equals("RootNode")) {
if (log.isDebugEnabled()) {
log.debug("ROOT/JexlScript node");
}
if (node.isLeaf()) {
returnNode = new BooleanLogicTreeNode(ParserTreeConstants.JJTJEXLSCRIPT);
// NOTE: this should be cleaned up a bit.
Multimap<String,QueryTerm> terms = node.getTerms();
for (String fName : terms.keySet()) {
Collection<QueryTerm> values = terms.get(fName);
if (!fName.startsWith(FIELD_NAME_PREFIX)) {
fName = FIELD_NAME_PREFIX + fName;
}
for (QueryTerm t : values) {
if (null == t || null == t.getValue()) {
continue;
}
String fValue = t.getValue().toString();
fValue = fValue.replaceAll("'", "").toLowerCase();
boolean negated = t.getOperator().equals("!=");
int mytype = JexlOperatorConstants.getJJTNodeType(t.getOperator());
BooleanLogicTreeNode child = new BooleanLogicTreeNode(mytype, fName, fValue, negated);
returnNode.add(child);
return returnNode;
}
}
} else {
returnNode = new BooleanLogicTreeNode(ParserTreeConstants.JJTJEXLSCRIPT);
}
} else {
log.error(
"Currently Unsupported Node type: " + node.getClass().getName() + " \t" + node.getType());
}
for (TreeNode child : node.getChildren()) {
returnNode.add(transformTreeNode(child));
}
return returnNode;
}
// After tree conflicts have been resolve, we can collapse branches where
// leaves have been pruned.
public static void collapseBranches(BooleanLogicTreeNode myroot) throws Exception {
// NOTE: doing a depth first enumeration didn't wory when I started
// removing nodes halfway through. The following method does work,
// it's essentially a reverse breadth first traversal.
List<BooleanLogicTreeNode> nodes = new ArrayList<>();
Enumeration<?> bfe = myroot.breadthFirstEnumeration();
while (bfe.hasMoreElements()) {
BooleanLogicTreeNode node = (BooleanLogicTreeNode) bfe.nextElement();
nodes.add(node);
}
// walk backwards
for (int i = nodes.size() - 1; i >= 0; i--) {
BooleanLogicTreeNode node = nodes.get(i);
if (log.isDebugEnabled()) {
log.debug(
"collapseBranches, inspecting node: " + node.toString() + " " + node.printNode());
}
if (node.getType() == ParserTreeConstants.JJTANDNODE
|| node.getType() == ParserTreeConstants.JJTORNODE) {
if (node.getChildCount() == 0 && !node.isRangeNode()) {
node.removeFromParent();
} else if (node.getChildCount() == 1) {
BooleanLogicTreeNode p = (BooleanLogicTreeNode) node.getParent();
BooleanLogicTreeNode c = (BooleanLogicTreeNode) node.getFirstChild();
node.removeFromParent();
p.add(c);
}
} else if (node.getType() == ParserTreeConstants.JJTJEXLSCRIPT) {
if (node.getChildCount() == 0) {
if (log.isDebugEnabled()) {
log.debug("collapseBranches, headNode has no children");
}
throw new Exception("Head node has no children.");
}
}
}
}
public BooleanLogicTreeNode refactorTree(BooleanLogicTreeNode myroot) {
List<BooleanLogicTreeNode> nodes = new ArrayList<>();
Enumeration<?> bfe = myroot.breadthFirstEnumeration();
while (bfe.hasMoreElements()) {
BooleanLogicTreeNode node = (BooleanLogicTreeNode) bfe.nextElement();
nodes.add(node);
}
// walk backwards
for (int i = nodes.size() - 1; i >= 0; i--) {
BooleanLogicTreeNode node = nodes.get(i);
if (node.getType() == ParserTreeConstants.JJTANDNODE
|| node.getType() == ParserTreeConstants.JJTORNODE) {
// 1. check to see if all children are negated
// 2. check to see if we have to handle ranges.
Map<Text,RangeBounds> ranges = new HashMap<>();
Enumeration<?> children = node.children();
boolean allNegated = true;
while (children.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
if (!child.isNegated()) {
allNegated = false;
// break;
}
// currently we are not allowing unbounded ranges, so they must sit under an AND node.
if (node.getType() == ParserTreeConstants.JJTANDNODE) {
// check for ranges
if (child.getType() == JexlOperatorConstants.JJTGTNODE) {
if (log.isDebugEnabled()) {
log.debug("refactor: GT " + child.getContents());
}
if (ranges.containsKey(child.getFieldName())) {
RangeBounds rb = ranges.get(child.getFieldName());
rb.setLower(child.getFieldValue());
} else {
RangeBounds rb = new RangeBounds();
rb.setLower(child.getFieldValue());
ranges.put(child.getFieldName(), rb);
}
} else if (child.getType() == JexlOperatorConstants.JJTGENODE) {
if (log.isDebugEnabled()) {
log.debug("refactor: GE " + child.getContents());
}
if (ranges.containsKey(child.getFieldName())) {
RangeBounds rb = ranges.get(child.getFieldName());
rb.setLower(child.getFieldValue());
} else {
RangeBounds rb = new RangeBounds();
rb.setLower(child.getFieldValue());
ranges.put(child.getFieldName(), rb);
}
} else if (child.getType() == JexlOperatorConstants.JJTLTNODE) {
if (log.isDebugEnabled()) {
log.debug("refactor: LT " + child.getContents());
}
if (ranges.containsKey(child.getFieldName())) {
RangeBounds rb = ranges.get(child.getFieldName());
rb.setUpper(child.getFieldValue());
} else {
RangeBounds rb = new RangeBounds();
rb.setUpper(child.getFieldValue());
ranges.put(child.getFieldName(), rb);
}
} else if (child.getType() == JexlOperatorConstants.JJTLENODE) {
if (log.isDebugEnabled()) {
log.debug("refactor: LE " + child.getContents());
}
if (ranges.containsKey(child.getFieldName())) {
RangeBounds rb = ranges.get(child.getFieldName());
rb.setUpper(child.getFieldValue());
} else {
RangeBounds rb = new RangeBounds();
rb.setUpper(child.getFieldValue());
ranges.put(child.getFieldName(), rb);
}
}
}
}
if (allNegated) {
node.setChildrenAllNegated(true);
}
// see if the AND node had a range.
if (node.getType() == ParserTreeConstants.JJTANDNODE) {
// if(ranges.containsKey(node.getFieldName())){
if (!ranges.isEmpty()) {
// we have a range, process it
if (node.getChildCount() <= 2 && ranges.size() == 1) {
if (log.isDebugEnabled()) {
log.debug("AND range 2 children or less");
}
// only has a range, modify the node
node.setType(ParserTreeConstants.JJTORNODE);
node.removeAllChildren();
// RangeBounds rb = ranges.get(node.getFieldName());
for (Entry<Text,RangeBounds> entry : ranges.entrySet()) {
Text fName = entry.getKey();
RangeBounds rb = entry.getValue();
node.setFieldName(fName);
node.setFieldValue(new Text(""));
node.setLowerBound(rb.getLower());
node.setUpperBound(rb.getUpper());
node.setRangeNode(true);
}
rangerators.add(node);
if (log.isDebugEnabled()) {
log.debug("refactor: " + node.getContents());
log.debug("refactor: " + node.getLowerBound() + " " + node.getUpperBound());
}
} else {
if (log.isDebugEnabled()) {
log.debug("AND range more than 2 children");
}
// node has range plus other children, create another node from the range
// remove lt,le,gt,ge from parent and push in a single node
// removing nodes via enumeration doesn't work, push into a list
// and walk backwards
List<BooleanLogicTreeNode> temp = new ArrayList<>();
Enumeration<?> e = node.children();
while (e.hasMoreElements()) {
BooleanLogicTreeNode c = (BooleanLogicTreeNode) e.nextElement();
temp.add(c);
}
for (int j = temp.size() - 1; j >= 0; j--) {
BooleanLogicTreeNode c = temp.get(j);
if (c.getType() == JexlOperatorConstants.JJTLENODE
|| c.getType() == JexlOperatorConstants.JJTLTNODE
|| c.getType() == JexlOperatorConstants.JJTGENODE
|| c.getType() == JexlOperatorConstants.JJTGTNODE) {
c.removeFromParent();
}
}
for (Entry<Text,RangeBounds> entry : ranges.entrySet()) {
Text fName = entry.getKey();
BooleanLogicTreeNode nchild =
new BooleanLogicTreeNode(ParserTreeConstants.JJTORNODE, fName.toString(), "");
RangeBounds rb = entry.getValue();
nchild.setFieldValue(new Text(""));
nchild.setLowerBound(rb.getLower());
nchild.setUpperBound(rb.getUpper());
nchild.setRangeNode(true);
node.add(nchild);
rangerators.add(nchild);
}
if (log.isDebugEnabled()) {
log.debug("refactor: " + node.getContents());
}
}
}
}
}
}
return myroot;
}
// If all children are of type SEL, roll this up into an AND or OR node.
private static boolean canRollUp(BooleanLogicTreeNode parent) {
if (log.isDebugEnabled()) {
log.debug("canRollUp: testing " + parent.getContents());
}
if (parent.getChildCount() < 1) {
if (log.isDebugEnabled()) {
log.debug("canRollUp: child count < 1, return false");
}
return false;
}
Enumeration<?> e = parent.children();
while (e.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) e.nextElement();
if (child.getType() != ParserTreeConstants.JJTEQNODE) {// BooleanLogicTreeNode.NodeType.SEL) {
if (log.isDebugEnabled()) {
log.debug(
"canRollUp: child.getType -> " + ParserTreeConstants.jjtNodeName[child.getType()]
+ " int: " + child.getType() + " return false");
}
return false;
}
if (child.isNegated()) {
if (log.isDebugEnabled()) {
log.debug("canRollUp: child.isNegated, return false");
}
return false;
}
if (child.getFieldValue().toString().contains("*")) {
if (log.isDebugEnabled()) {
log.debug("canRollUp: child has wildcard: " + child.getFieldValue());
}
return false;
}
}
return true;
}
/**
* Small utility function to print out the depth-first enumeration of the tree. Specify the root
* or sub root of the tree you wish to view.
*
* @param root
* The root node of the tree or sub-tree.
*/
public static void showDepthFirstTraversal(BooleanLogicTreeNode root) {
System.out.println("DepthFirstTraversal");
Enumeration<?> e = root.depthFirstEnumeration();
int i = -1;
while (e.hasMoreElements()) {
i += 1;
BooleanLogicTreeNode n = (BooleanLogicTreeNode) e.nextElement();
System.out.println(i + " : " + n);
}
}
public static void showBreadthFirstTraversal(BooleanLogicTreeNode root) {
System.out.println("BreadthFirstTraversal");
log.debug("BooleanLogicIterator.showBreadthFirstTraversal()");
Enumeration<?> e = root.breadthFirstEnumeration();
int i = -1;
while (e.hasMoreElements()) {
i += 1;
BooleanLogicTreeNode n = (BooleanLogicTreeNode) e.nextElement();
System.out.println(i + " : " + n);
log.debug(i + " : " + n);
}
}
private void splitLeaves(BooleanLogicTreeNode node) {
if (log.isDebugEnabled()) {
log.debug("BoolLogic: splitLeaves()");
}
positives = new PriorityQueue<>(10, new BooleanLogicTreeNodeComparator());
// positives = new ArrayList<BooleanLogicTreeNodeJexl>();
negatives.clear();
Enumeration<?> dfe = node.depthFirstEnumeration();
while (dfe.hasMoreElements()) {
BooleanLogicTreeNode elem = (BooleanLogicTreeNode) dfe.nextElement();
if (elem.isLeaf()) {
if (elem.isNegated()) {
negatives.add(elem);
} else {
positives.add(elem);
}
}
}
}
private void reHeapPriorityQueue(BooleanLogicTreeNode node) {
positives.clear();
Enumeration<?> dfe = node.depthFirstEnumeration();
BooleanLogicTreeNode elem;
while (dfe.hasMoreElements()) {
elem = (BooleanLogicTreeNode) dfe.nextElement();
if (elem.isLeaf() && !elem.isNegated()) {
positives.add(elem);
}
}
}
/*
* ************************************************************************* The iterator
* interface methods.
*/
@Override
public boolean hasTop() {
return (topKey != null);
}
@Override
public Key getTopKey() {
if (log.isDebugEnabled()) {
log.debug("getTopKey: " + topKey);
}
return topKey;
}
private void setTopKey(Key key) {
if (this.overallRange != null && key != null) {
if (overallRange.getEndKey() != null) { // if null end key, that means range is to the end of
// the tablet.
if (!this.overallRange.contains(key)) {
topKey = null;
return;
}
}
}
topKey = key;
}
@Override
public Value getTopValue() {
if (topValue == null) {
topValue = new Value(new byte[0]);
}
return topValue;
}
private void resetNegatives() {
for (BooleanLogicTreeNode neg : negatives) {
neg.setTopKey(null);
neg.setValid(true);
}
}
private String getEventKeyUid(Key k) {
if (k == null || k.getColumnFamily() == null) {
return null;
} else {
return k.getColumnFamily().toString();
}
}
private String getIndexKeyUid(Key k) {
try {
int idx = 0;
String sKey = k.getColumnQualifier().toString();
idx = sKey.indexOf("\0");
return sKey.substring(idx + 1);
} catch (Exception e) {
return null;
}
}
/*
* Remember, the Key in the BooleanLogicTreeNode is different structurally than the Key in its sub
* iterator because the key BooleanLogic needs to return is an event key created from the index
* key (which is what the sub iterators are looking at!)
*/
private Key getOptimizedAdvanceKey() throws IOException {
if (log.isDebugEnabled()) {
log.debug("getOptimizedAdvanceKey() called");
}
Enumeration<?> bfe = root.breadthFirstEnumeration();
ArrayList<BooleanLogicTreeNode> bfl = new ArrayList<>();
while (bfe.hasMoreElements()) {
BooleanLogicTreeNode node = (BooleanLogicTreeNode) bfe.nextElement();
if (!node.isNegated()) {
node.setAdvanceKey(node.getTopKey());
node.setDone(false);
bfl.add(node);
}
}
// walk the tree backwards
for (int i = bfl.size() - 1; i >= 0; i--) {
if (bfl.get(i).isLeaf() || bfl.get(i).isNegated()) {
if (log.isDebugEnabled()) {
log.debug("leaf, isDone?: " + bfl.get(i).isDone());
}
continue;
}
BooleanLogicTreeNode node = bfl.get(i);
node.setDone(false);
if (log.isDebugEnabled()) {
log.debug("for loop, node: " + node + " isDone? " + node.isDone());
}
if (node.getType() == ParserTreeConstants.JJTANDNODE) {
// get max
BooleanLogicTreeNode max = null;
Enumeration<?> children = node.children();
boolean firstTime = true;
while (children.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
if (child.isNegated() || child.isChildrenAllNegated()) {
continue;
}
// all advance keys were initially set from topkey for the leaves.
if (child.getAdvanceKey() == null) {
log.debug("\tchild does not advance key: " + child.printNode());
// i'm an and, i have a child that's done, mark me as done.
node.setDone(true);
break;
} else {
log.debug("\tchild advanceKey: " + child.getAdvanceKey());
}
if (firstTime) {
firstTime = false;
max = child;
if (log.isDebugEnabled()) {
log.debug("\tAND block, first valid child: " + child);
}
continue;
}
log.debug("\tAND block, max: " + max);
log.debug("\tAND block, child: " + child);
// first test row
if (max.getAdvanceKey().getRow().compareTo(child.getAdvanceKey().getRow()) < 0) {
max = child;
if (log.isDebugEnabled()) {
log.debug("\tAND block, child row greater, new max.");
}
continue;
}
// if rows are equal, test uids
String uid_max = getEventKeyUid(max.getAdvanceKey());
String uid_child = getEventKeyUid(child.getAdvanceKey());
if (log.isDebugEnabled()) {
if (uid_max == null) {
log.debug("\tuid_max is currently null");
} else {
log.debug("\tuid_max: " + uid_max);
}
if (uid_child == null) {
log.debug("\tuid_child is null");
} else {
log.debug("\tuid_child: " + uid_child);
}
}
if (uid_max != null && uid_child != null) {
if (uid_max.compareTo(uid_child) < 0) {
max = child;
}
} else if (uid_child == null) { // one or the other is null so we want the next row
max = child;
log.debug("uid_child is null, we need to grab the next row.");
break;
} else {
log.debug("max is null and child is not, who should we keep? child: " + child);
break;
}
} // end while
if (log.isDebugEnabled()) {
log.debug("attemptOptimization: AND with children, max: " + max);
}
if (max != null) {
node.setAdvanceKey(max.getAdvanceKey());
} else {
if (log.isDebugEnabled()) {
log.debug("AND block finished, max is null");
}
node.setDone(true);
}
} else if (node.getType() == ParserTreeConstants.JJTORNODE) {
// get min
BooleanLogicTreeNode min = null;
Enumeration<?> children = node.children();
boolean firstTime = true;
int numChildren = node.getChildCount();
int allChildrenDone = 0;
while (children.hasMoreElements()) {
BooleanLogicTreeNode child = (BooleanLogicTreeNode) children.nextElement();
if (log.isDebugEnabled()) {
log.debug("\tOR block start, child: " + child);
}
if (child.isNegated() || child.isChildrenAllNegated()) {
if (log.isDebugEnabled()) {
log.debug("\tskip negated child: " + child);
}
numChildren -= 1;
continue;
}
if (child.isDone()) {
if (log.isDebugEnabled()) {
log.debug("\tchild is done: " + child);
}
allChildrenDone += 1;
if (numChildren == allChildrenDone) {
if (log.isDebugEnabled()) {
log.debug("\tnumChildren==allChildrenDone, setDone & break");
}
// we're done here
node.setDone(true);
break;
}
}
if (child.getAdvanceKey() == null) {
log.debug("\tOR child doesn't have top or an AdvanceKey");
continue;
}
if (firstTime) {
if (log.isDebugEnabled()) {
log.debug("\tOR block, first valid node, min=child: " + child + " advanceKey: "
+ child.getAdvanceKey());
}
firstTime = false;
min = child;
continue;
}
if (log.isDebugEnabled()) {
log.debug("\tOR block, min: " + min);
log.debug("\tOR block, child: " + child);
}
if (min.getAdvanceKey().getRow().toString()
.compareTo(child.getAdvanceKey().getRow().toString()) > 0) {
// child row is less than min, set min to child
min = child;
if (log.isDebugEnabled()) {
log.debug("\tmin row was greater than child, min=child: " + min);
}
continue;
} else if (min.getAdvanceKey().getRow().compareTo(child.getAdvanceKey().getRow()) < 0) {
// min row is less child, skip
if (log.isDebugEnabled()) {
log.debug("\tmin row less than childs, keep min: " + min);
}
continue;
} else { // they're equal, test uids
String uid_min = getEventKeyUid(min.getAdvanceKey());
String uid_child = getEventKeyUid(child.getAdvanceKey());
if (log.isDebugEnabled()) {
log.debug("\ttesting uids, uid_min: " + uid_min + " uid_child: " + uid_child);
}
if (uid_min != null && uid_child != null) {
if (uid_min.compareTo(uid_child) > 0) {
min = child;
if (log.isDebugEnabled()) {
log.debug("\tuid_min > uid_child, set min to child: " + min);
}
}
} else if (uid_min == null) {
if (log.isDebugEnabled()) {
log.debug("\tuid_min is null, take childs: " + uid_child);
}
min = child;
}
}
} // end while
if (log.isDebugEnabled()) {
log.debug("attemptOptimization: OR with children, min: " + min);
}
if (min != null) {
if (log.isDebugEnabled()) {
log.debug("OR block, min != null, advanceKey? " + min.getAdvanceKey());
}
node.setAdvanceKey(min.getAdvanceKey());
} else {
log.debug("OR block, min is null..." + min);
node.setAdvanceKey(null);
node.setDone(true);
}
} else if (node.getType() == ParserTreeConstants.JJTJEXLSCRIPT) { // HEAD node
if (log.isDebugEnabled()) {
log.debug("getOptimizedAdvanceKey, HEAD node");
}
BooleanLogicTreeNode child = (BooleanLogicTreeNode) node.getFirstChild();
if (child.isDone()) {
if (log.isDebugEnabled()) {
log.debug("Head node's child is done, need to move to the next row");
}
Key k = child.getAdvanceKey();
if (k == null) {
if (log.isDebugEnabled()) {
log.debug("HEAD node, advance key is null, try to grab next row from topKey");
}
if (hasTop()) {
k = this.getTopKey();
child.setAdvanceKey(new Key(new Text(k.getRow().toString() + "\1")));
} else {
return null;
}
} else {
Text row = new Text(k.getRow().toString() + "\1");
k = new Key(row);
child.setAdvanceKey(k);
}
}
if (log.isDebugEnabled()) {
log.debug("advance Key: " + child.getAdvanceKey());
}
Key key = new Key(child.getAdvanceKey().getRow(), child.getAdvanceKey().getColumnFamily(),
child.getAdvanceKey().getColumnFamily());
return key;
} // end else
} // end for
return null;
}
/*
* The incoming jump key has been formatted into the structure of an index key, but the leaves are
* eventkeys
*/
private boolean jump(Key jumpKey) throws IOException {
if (log.isDebugEnabled()) {
log.debug("JUMP!");
}
Enumeration<?> bfe = root.breadthFirstEnumeration();
while (bfe.hasMoreElements()) {
BooleanLogicTreeNode n = (BooleanLogicTreeNode) bfe.nextElement();
n.setAdvanceKey(null);
} // now advance all nodes to the advance key
if (log.isDebugEnabled()) {
log.debug("jump, All leaves need to advance to: " + jumpKey);
}
String advanceUid = getIndexKeyUid(jumpKey);
if (log.isDebugEnabled()) {
log.debug("advanceUid => " + advanceUid);
}
boolean ok = true;
for (BooleanLogicTreeNode leaf : positives) {
leaf.jump(jumpKey);
}
return ok;
}
@Override
@SuppressWarnings("unused")
public void next() throws IOException {
if (log.isDebugEnabled()) {
log.debug("next() method called");
}
boolean finished = false;
boolean ok = true;
if (positives.isEmpty()) {
setTopKey(null);
return;
}
Key previousJumpKey = null;
while (!finished) {
Key jumpKey = this.getOptimizedAdvanceKey();
if (jumpKey == null) { // stop?
if (log.isDebugEnabled()) {
log.debug("next(), jump key is null, stopping");
}
setTopKey(null);
return;
}
if (log.isDebugEnabled()) {
if (jumpKey != null) {
log.debug("next(), jumpKey: " + jumpKey);
} else {
log.debug("jumpKey is null");
}
}
boolean same = false;
if (jumpKey != null && topKey != null) {
// check that the uid's are not the same
same = getIndexKeyUid(jumpKey).equals(getEventKeyUid(topKey));
if (log.isDebugEnabled()) {
log.debug(
"jumpKeyUid: " + getIndexKeyUid(jumpKey) + " topKeyUid: " + getEventKeyUid(topKey));
}
}
if (log.isDebugEnabled()) {
log.debug("previousJumpKey: " + previousJumpKey);
log.debug("current JumpKey: " + jumpKey);
}
if (jumpKey != null && !this.overallRange.contains(jumpKey)) {
if (log.isDebugEnabled()) {
log.debug(
"jumpKey is outside of range, that means the next key is out of range, stopping");
log.debug("jumpKey: " + jumpKey + " overallRange.endKey: " + overallRange.getEndKey());
}
// stop
setTopKey(null);
return;
}
boolean previousSame = false;
if (previousJumpKey != null && jumpKey != null) {
previousSame = previousJumpKey.equals(jumpKey);
}
// -----------------------------------
// OPTIMIZED block
if (jumpKey != null && !same && !previousSame && ok) {
previousJumpKey = jumpKey;
ok = jump(jumpKey); // attempt to jump everybody forward to this row and uid.
// tryJump = false;
// now test the tree state.
if (testTreeState()) {
Key tempKey = root.getTopKey();
// it is potentially valid, now we need to seek all of the negatives
if (!negatives.isEmpty()) {
advanceNegatives(this.root.getTopKey());
if (!testTreeState()) {
continue;
}
}
if (root.getTopKey().equals(tempKey)) {
// it's valid set nextKey and make sure it's not the same as topKey.
if (log.isDebugEnabled()) {
if (this.root.hasTop()) {
log.debug("this.root.getTopKey()->" + this.root.getTopKey());
} else {
log.debug("next, this.root.getTopKey() is null");
}
if (topKey != null) {
log.debug("topKey->" + topKey);
} else {
log.debug("topKey is null");
}
}
if (compare(topKey, this.root.getTopKey()) != 0) {
// topKey = this.root.getTopKey();
setTopKey(this.root.getTopKey());
return;
}
}
}
// --------------------------------------
// Regular next block
} else {
reHeapPriorityQueue(this.root);
BooleanLogicTreeNode node;
while (true) {
node = positives.poll();
if (!node.isDone() && node.hasTop()) {
break;
}
if (positives.isEmpty()) {
setTopKey(null);
return;
}
}
if (log.isDebugEnabled()) {
if (jumpKey == null) {
log.debug("no jump, jumpKey is null");
} else if (topKey == null) {
log.debug("no jump, jumpKey: " + jumpKey + " topKey: null");
} else {
log.debug("no jump, jumpKey: " + jumpKey + " topKey: " + topKey);
}
log.debug("next, (no jump) min node: " + node);
log.debug(node);
}
node.next();
resetNegatives();
if (!node.hasTop()) {
// it may be part of an or, so it could be ok.
node.setValid(false);
if (testTreeState()) {
// it's valid set nextKey and make sure it's not the same as topKey.
if (!topKey.equals(this.root.getTopKey())) {
// topKey = this.root.getTopKey();
if (this.overallRange != null) {
if (this.overallRange.contains(root.getTopKey())) {
setTopKey(this.root.getTopKey());
return;
} else {
setTopKey(null);
finished = true;
return;
}
} else {
setTopKey(this.root.getTopKey());
return;
}
}
}
} else {
if (overallRange.contains(node.getTopKey())) {
// the node had something so push it back into priority queue
positives.add(node);
}
// now test the tree state.
if (testTreeState()) {
Key tempKey = root.getTopKey();
// it is potentially valid, now we need to seek all of the negatives
if (!negatives.isEmpty()) {
advanceNegatives(this.root.getTopKey());
if (!testTreeState()) {
continue;
}
}
if (root.getTopKey().equals(tempKey)) {
// it's valid set nextKey and make sure it's not the same as topKey.
if (log.isDebugEnabled()) {
if (this.root.hasTop()) {
log.debug("this.root.getTopKey()->" + this.root.getTopKey());
} else {
log.debug("next, this.root.getTopKey() is null");
}
if (topKey != null) {
log.debug("topKey->" + topKey);
} else {
log.debug("topKey is null");
}
}
if (compare(topKey, this.root.getTopKey()) != 0) {
// topKey = this.root.getTopKey();
if (this.overallRange != null) {
if (overallRange.contains(this.root.getTopKey())) {
setTopKey(this.root.getTopKey());
return;
} else {
topKey = null;
finished = true;
return;
}
} else {
setTopKey(this.root.getTopKey());
return;
}
}
}
}
}
// is the priority queue empty?
if (positives.isEmpty()) {
finished = true;
topKey = null;
}
}
}
}
/*
* create a range for the given row of the
*/
private void advanceNegatives(Key k) throws IOException {
if (log.isDebugEnabled()) {
log.debug("advancingNegatives for Key: " + k);
}
Text rowID = k.getRow();
Text colFam = k.getColumnFamily();
for (BooleanLogicTreeNode neg : negatives) {
Key startKey =
new Key(rowID, neg.getFieldName(), new Text(neg.getFieldValue() + "\0" + colFam));
Key endKey =
new Key(rowID, neg.getFieldName(), new Text(neg.getFieldValue() + "\0" + colFam + "\1"));
Range range = new Range(startKey, true, endKey, false);
if (log.isDebugEnabled()) {
log.debug("range: " + range);
}
neg.seek(range, EMPTY_COL_FAMS, false);
if (neg.hasTop()) {
neg.setValid(false);
}
if (log.isDebugEnabled()) {
if (neg.hasTop()) {
log.debug("neg top key: " + neg.getTopKey());
} else {
log.debug("neg has no top");
}
}
}
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
this.overallRange = range;
if (log.isDebugEnabled()) {
log.debug("seek, overallRange: " + overallRange);
}
// Given some criteria, advance all iterators to that position.
// NOTE: All of our iterators exist in the leaves.
topKey = null;
root.setTopKey(null);
// set up the range iterators for the given seek range.
// these should exist in the positives as OR iterators, but need special setup.
setupRangerators(range);
// don't take this out, if you jump rows on the tablet you could have
// pulled nodes out of the positives priority queue. On a call to seek
// it is usually jumping rows, so everything needs to become possibly
// valid again.
reHeapPriorityQueue(this.root);
for (BooleanLogicTreeNode node : positives) {
node.setDone(false);
node.seek(range, columnFamilies, inclusive);
if (log.isDebugEnabled()) {
String tk = "empty";
if (node.hasTop()) {
tk = node.getTopKey().toString();
}
log.debug("leaf: " + node.getContents() + " topKey: " + tk);
}
}
// Now that all nodes have been seek'd recreate the priorityQueue to sort them properly.
splitLeaves(this.root);
resetNegatives();
// test Tree, if it's not valid, call next
if (testTreeState() && overallRange.contains(root.getTopKey())) {
if (!negatives.isEmpty()) {
// now advance negatives
advanceNegatives(this.root.getTopKey());
if (!testTreeState()) {
next();
}
}
if (log.isDebugEnabled()) {
log.debug("overallRange " + overallRange + " topKey " + this.root.getTopKey() + " contains "
+ overallRange.contains(this.root.getTopKey()));
}
if (overallRange.contains(this.root.getTopKey()) && this.root.isValid()) {
setTopKey(this.root.getTopKey());
} else {
setTopKey(null);
return;
}
} else {
// seek failed in the logic test, but there may be other possible
// values which satisfy the logic tree. Make sure our iterators aren't
// all null, and then call next.
// if(!root.hasTop()){
if (log.isDebugEnabled()) {
log.debug("seek, testTreeState is false, HEAD(root) does not have top");
}
// check nodes in positives to see if they're all null/outside range
// or if nothing percolated up to root yet.
List<BooleanLogicTreeNode> removals = new ArrayList<>();
for (BooleanLogicTreeNode node : positives) {
if (!node.hasTop() || !overallRange.contains(node.getTopKey())) {
removals.add(node);
}
}
for (BooleanLogicTreeNode node : removals) {
positives.remove(node);
}
next();
return;
}
}
private int compare(Key k1, Key k2) {
if (k1 != null && k2 != null) {
return k1.compareTo(k2);
} else if (k1 == null && k2 == null) {
return 0;
} else if (k1 == null) { // in this case, null is considered bigger b/c it's closer to the end
// of the table.
return 1;
} else {
return -1;
}
}
private void setupRangerators(Range range) throws IOException {
if (rangerators == null || rangerators.isEmpty()) {
return;
}
for (BooleanLogicTreeNode node : rangerators) {
Set<String> fValues = new HashSet<>();
OrIterator orIter = new OrIterator();
SortedKeyValueIterator<Key,Value> siter = sourceIterator.deepCopy(env);
// create UniqFieldNameValueIterator to find uniq field names values
UniqFieldNameValueIterator uniq = new UniqFieldNameValueIterator(node.getFieldName(),
node.getLowerBound(), node.getUpperBound());
uniq.setSource(siter);
uniq.seek(range, EMPTY_COL_FAMS, false);
while (uniq.hasTop()) {
// log.debug("uniq.top: "+uniq.getTopKey());
Key k = uniq.getTopKey();
keyParser.parse(k);
String val = keyParser.getFieldValue();
if (!fValues.contains(val)) {
fValues.add(val);
orIter.addTerm(siter, node.getFieldName(), new Text(val), env);
if (log.isDebugEnabled()) {
log.debug("setupRangerators, adding to OR: " + node.getFieldName() + ":" + val);
}
} else {
log.debug("already have this one: " + val);
}
uniq.next();
}
node.setUserObject(orIter);
}
}
/*
* ************************************************************************* Inner classes
*/
public class BooleanLogicTreeNodeComparator implements Comparator<Object> {
@Override
public int compare(Object o1, Object o2) {
BooleanLogicTreeNode n1 = (BooleanLogicTreeNode) o1;
BooleanLogicTreeNode n2 = (BooleanLogicTreeNode) o2;
Key k1 = n1.getTopKey();
Key k2 = n2.getTopKey();
if (log.isDebugEnabled()) {
String t1 = "null";
String t2 = "null";
if (k1 != null) {
t1 = k1.getRow().toString() + "\0" + k1.getColumnFamily().toString();
}
if (k2 != null) {
t2 = k2.getRow().toString() + "\0" + k2.getColumnFamily().toString();
}
log.debug("BooleanLogicTreeNodeComparator \tt1: " + t1 + " t2: " + t2);
}
// return t1.compareTo(t2);
if (k1 != null && k2 != null) {
return k1.compareTo(k2);
} else if (k1 == null && k2 == null) {
return 0;
} else if (k1 == null) {
return 1;
} else {
return -1;
}
}
}
@Override
public IteratorOptions describeOptions() {
return new IteratorOptions(getClass().getSimpleName(),
"evaluates event objects against an expression",
Collections.singletonMap(QUERY_OPTION, "query expression"), null);
}
@Override
public boolean validateOptions(Map<String,String> options) {
if (!options.containsKey(QUERY_OPTION)) {
return false;
}
if (!options.containsKey(FIELD_INDEX_QUERY)) {
return false;
}
this.updatedQuery = options.get(FIELD_INDEX_QUERY);
return true;
}
}
| 6,230 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/DefaultIteratorEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.system.MapFileIterator;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
public class DefaultIteratorEnvironment implements IteratorEnvironment {
AccumuloConfiguration conf;
public DefaultIteratorEnvironment() {
this.conf = DefaultConfiguration.getInstance();
}
@Override
public SortedKeyValueIterator<Key,Value> reserveMapFileReader(String mapFileName)
throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
return new MapFileIterator(fs, mapFileName, conf);
}
@Override
public AccumuloConfiguration getConfig() {
return conf;
}
@Override
public boolean isSamplingEnabled() {
return false;
}
@Override
public IteratorScope getIteratorScope() {
throw new UnsupportedOperationException();
}
@Override
public boolean isFullMajorCompaction() {
throw new UnsupportedOperationException();
}
@Override
public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {
throw new UnsupportedOperationException();
}
@Override
public Authorizations getAuthorizations() {
throw new UnsupportedOperationException();
}
@Override
public SamplerConfiguration getSamplerConfiguration() {
throw new UnsupportedOperationException();
}
@Override
public IteratorEnvironment cloneWithSamplingEnabled() {
throw new UnsupportedOperationException();
}
@Override
public boolean isUserCompaction() {
return false;
}
}
| 6,231 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/AbstractEvaluatingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.OptionDescriber;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.examples.wikisearch.parser.EventFields;
import org.apache.accumulo.examples.wikisearch.parser.QueryEvaluator;
import org.apache.commons.jexl2.parser.ParseException;
import org.apache.log4j.Logger;
import com.esotericsoftware.kryo.Kryo;
/**
*
* This iterator aggregates rows together using the specified key comparator. Subclasses will
* provide their own implementation of fillMap which will fill the supplied EventFields object with
* field names (key) and field values (value). After all fields have been put into the aggregated
* object (by aggregating all columns with the same key), the EventFields object will be compared
* against the supplied expression. If the expression returns true, then the return key and return
* value can be retrieved via getTopKey() and getTopValue().
*
* Optionally, the caller can set an expression (field operator value) that should not be evaluated
* against the event. For example, if the query is "A == 'foo' and B == 'bar'", but for some reason
* B may not be in the data, then setting the UNEVALUATED_EXPRESSIONS option to "B == 'bar'" will
* allow the events to be evaluated against the remainder of the expression and still return as
* true.
*
* By default this iterator will return all Events in the shard. If the START_DATE and END_DATE are
* specified, then this iterator will evaluate the timestamp of the key against the start and end
* dates. If the event date is not within the range of start to end, then it is skipped.
*
* This iterator will return up the stack an EventFields object serialized using Kryo in the cell
* Value.
*
*/
public abstract class AbstractEvaluatingIterator
implements SortedKeyValueIterator<Key,Value>, OptionDescriber {
private static Logger log = Logger.getLogger(AbstractEvaluatingIterator.class);
protected static final byte[] NULL_BYTE = new byte[0];
public static final String QUERY_OPTION = "expr";
public static final String UNEVALUTED_EXPRESSIONS = "unevaluated.expressions";
private PartialKey comparator = null;
protected SortedKeyValueIterator<Key,Value> iterator;
private Key currentKey = new Key();
private Key returnKey;
private Value returnValue;
private String expression;
private QueryEvaluator evaluator;
private EventFields event = null;
private static Kryo kryo = new Kryo();
private Range seekRange = null;
private Set<String> skipExpressions = null;
protected AbstractEvaluatingIterator(AbstractEvaluatingIterator other, IteratorEnvironment env) {
iterator = other.iterator.deepCopy(env);
event = other.event;
}
public AbstractEvaluatingIterator() {}
/**
* Implementations will return the PartialKey value to use for comparing keys for aggregating
* events
*
* @return the type of comparator to use
*/
public abstract PartialKey getKeyComparator();
/**
* When the query expression evaluates to true against the event, the event fields will be
* serialized into the Value and returned up the iterator stack. Implemenations will need to
* provide a key to be used with the event.
*
* @return the key that should be returned with the map of values.
*/
public abstract Key getReturnKey(Key k) throws Exception;
/**
* Implementations will need to fill the map with field visibilities, names, and values. When all
* fields have been aggregated the event will be evaluated against the query expression.
*
* @param event
* Multimap of event names and fields.
* @param key
* current Key
* @param value
* current Value
*/
public abstract void fillMap(EventFields event, Key key, Value value) throws Exception;
/**
* Provides the ability to skip this key and all of the following ones that match using the
* comparator.
*
* @return true if the key should be acted upon, otherwise false.
*/
public abstract boolean isKeyAccepted(Key key) throws IOException;
/**
* Reset state.
*/
public void reset() {
event.clear();
}
private void aggregateRowColumn(EventFields event) throws IOException {
currentKey.set(iterator.getTopKey());
try {
fillMap(event, iterator.getTopKey(), iterator.getTopValue());
iterator.next();
while (iterator.hasTop() && iterator.getTopKey().equals(currentKey, this.comparator)) {
fillMap(event, iterator.getTopKey(), iterator.getTopValue());
iterator.next();
}
// Get the return key
returnKey = getReturnKey(currentKey);
} catch (Exception e) {
throw new IOException("Error aggregating event", e);
}
}
private void findTop() throws IOException {
do {
reset();
// check if aggregation is needed
if (iterator.hasTop()) {
// Check to see if the current key is accepted. For example in the wiki
// table there are field index rows. We don't want to process those in
// some cases so return right away. Consume all of the non-accepted keys
while (iterator.hasTop() && !isKeyAccepted(iterator.getTopKey())) {
iterator.next();
}
if (iterator.hasTop()) {
aggregateRowColumn(event);
// Evaluate the event against the expression
if (event.size() > 0 && this.evaluator.evaluate(event)) {
if (log.isDebugEnabled()) {
log.debug("Event evaluated to true, key = " + returnKey);
}
// Create a byte array
byte[] serializedMap = new byte[event.getByteSize() + (event.size() * 20)];
// Wrap in ByteBuffer to work with Kryo
ByteBuffer buf = ByteBuffer.wrap(serializedMap);
// Serialize the EventFields object
event.writeObjectData(kryo, buf);
// Truncate array to the used size.
returnValue = new Value(Arrays.copyOfRange(serializedMap, 0, buf.position()));
} else {
returnKey = null;
returnValue = null;
}
} else {
if (log.isDebugEnabled()) {
log.debug("Iterator no longer has top.");
}
}
} else {
log.debug("Iterator.hasTop() == false");
}
} while (returnValue == null && iterator.hasTop());
// Sanity check. Make sure both returnValue and returnKey are null or both are not null
if (!((returnKey == null && returnValue == null)
|| (returnKey != null && returnValue != null))) {
log.warn("Key: " + ((returnKey == null) ? "null" : returnKey.toString()));
log.warn("Value: " + ((returnValue == null) ? "null" : returnValue.toString()));
throw new IOException("Return values are inconsistent");
}
}
@Override
public Key getTopKey() {
if (returnKey != null) {
return returnKey;
}
return iterator.getTopKey();
}
@Override
public Value getTopValue() {
if (returnValue != null) {
return returnValue;
}
return iterator.getTopValue();
}
@Override
public boolean hasTop() {
return returnKey != null || iterator.hasTop();
}
@Override
public void next() throws IOException {
if (returnKey != null) {
returnKey = null;
returnValue = null;
} else if (iterator.hasTop()) {
iterator.next();
}
findTop();
}
/**
* Copy of IteratorUtil.maximizeStartKeyTimeStamp due to IllegalAccessError
*/
static Range maximizeStartKeyTimeStamp(Range range) {
Range seekRange = range;
if (range.getStartKey() != null && range.getStartKey().getTimestamp() != Long.MAX_VALUE) {
Key seekKey = new Key(seekRange.getStartKey());
seekKey.setTimestamp(Long.MAX_VALUE);
seekRange = new Range(seekKey, true, range.getEndKey(), range.isEndKeyInclusive());
}
return seekRange;
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
// do not want to seek to the middle of a value that should be
// aggregated...
seekRange = maximizeStartKeyTimeStamp(range);
iterator.seek(seekRange, columnFamilies, inclusive);
findTop();
if (range.getStartKey() != null) {
while (hasTop() && getTopKey().equals(range.getStartKey(), this.comparator)
&& getTopKey().getTimestamp() > range.getStartKey().getTimestamp()) {
// the value has a more recent time stamp, so
// pass it up
// log.debug("skipping "+getTopKey());
next();
}
while (hasTop() && range.beforeStartKey(getTopKey())) {
next();
}
}
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
validateOptions(options);
event = new EventFields();
this.comparator = getKeyComparator();
this.iterator = source;
try {
// Replace any expressions that we should not evaluate.
if (null != this.skipExpressions && this.skipExpressions.size() != 0) {
for (String skip : this.skipExpressions) {
// Expression should have form: field<sp>operator<sp>literal.
// We are going to replace the expression with field == null.
String field = skip.substring(0, skip.indexOf(" ") - 1);
this.expression = this.expression.replaceAll(skip, field + " == null");
}
}
this.evaluator = new QueryEvaluator(this.expression);
} catch (ParseException e) {
throw new IllegalArgumentException("Failed to parse query", e);
}
EventFields.initializeKryo(kryo);
}
@Override
public IteratorOptions describeOptions() {
Map<String,String> options = new HashMap<>();
options.put(QUERY_OPTION, "query expression");
options.put(UNEVALUTED_EXPRESSIONS, "comma separated list of expressions to skip");
return new IteratorOptions(getClass().getSimpleName(),
"evaluates event objects against an expression", options, null);
}
@Override
public boolean validateOptions(Map<String,String> options) {
if (!options.containsKey(QUERY_OPTION)) {
return false;
} else {
this.expression = options.get(QUERY_OPTION);
}
if (options.containsKey(UNEVALUTED_EXPRESSIONS)) {
String expressionList = options.get(UNEVALUTED_EXPRESSIONS);
if (expressionList != null && !expressionList.trim().equals("")) {
this.skipExpressions = new HashSet<>();
for (String e : expressionList.split(",")) {
this.skipExpressions.add(e);
}
}
}
return true;
}
public String getQueryExpression() {
return this.expression;
}
}
| 6,232 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/BooleanLogicTreeNode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Iterator;
import javax.swing.tree.DefaultMutableTreeNode;
import javax.swing.tree.TreeNode;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.examples.wikisearch.parser.JexlOperatorConstants;
import org.apache.commons.jexl2.parser.ParserTreeConstants;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
/**
*
*
*/
public class BooleanLogicTreeNode extends DefaultMutableTreeNode {
private static final long serialVersionUID = 1L;
protected static final Logger log = Logger.getLogger(BooleanLogicTreeNode.class);
private Key myTopKey = null;
private Key advanceKey = null;
private Text fValue = null;
private Text fName = null;
private boolean negated = false;
private int type;
private boolean done = false;
private boolean valid = false;
private boolean rollUp = false;
private String fOperator = null;
private boolean childrenAllNegated = false;
private HashSet<Key> uids;
private Text upperBound;
private Text lowerBound;
private boolean rangeNode;
public BooleanLogicTreeNode() {
super();
uids = new HashSet<Key>();
}
public BooleanLogicTreeNode(int type) {
super();
this.type = type;
uids = new HashSet<Key>();
setOperator();
}
public BooleanLogicTreeNode(int type, boolean negate) {
super();
this.type = type;
this.negated = negate;
uids = new HashSet<Key>();
setOperator();
}
public BooleanLogicTreeNode(int type, String fieldName, String fieldValue) {
super();
this.type = type;
if (fieldValue != null) {
this.fValue = new Text(fieldValue);
}
if (fieldName != null) {
this.fName = new Text(fieldName);
}
uids = new HashSet<Key>();
setOperator();
}
public BooleanLogicTreeNode(int type, String fieldName, String fieldValue, boolean negated) {
super();
this.type = type;
if (fieldValue != null) {
this.fValue = new Text(fieldValue);
}
if (fieldName != null) {
this.fName = new Text(fieldName);
}
uids = new HashSet<Key>();
this.negated = negated;
setOperator();
}
public void setValid(boolean b) {
this.valid = b;
}
public boolean isValid() {
return this.valid;
}
public void setType(int t) {
this.type = t;
}
public int getType() {
return this.type;
}
public void setChildrenAllNegated(boolean childrenAllNegated) {
this.childrenAllNegated = childrenAllNegated;
}
public boolean isChildrenAllNegated() {
return childrenAllNegated;
}
public void setAdvanceKey(Key advanceKey) {
this.advanceKey = advanceKey;
}
public Key getAdvanceKey() {
return advanceKey;
}
public void setNegated(boolean b) {
this.negated = b;
}
public boolean isNegated() {
return negated;
}
public void setTopKey(Key id) {
this.myTopKey = id;
}
public Key getTopKey() {
return myTopKey;
}
public void setDone(boolean done) {
this.done = done;
}
public boolean isDone() {
return done;
}
public void setRollUp(boolean rollUp) {
this.rollUp = rollUp;
}
public boolean isRollUp() {
return rollUp;
}
public Text getFieldValue() {
return fValue;
}
public void setFieldValue(Text term) {
this.fValue = term;
}
public Text getFieldName() {
return fName;
}
public void setFieldName(Text dataLocation) {
this.fName = dataLocation;
}
public String getFieldOperator() {
return fOperator;
}
private void setOperator() {
this.fOperator = JexlOperatorConstants.getOperator(type);
if (negated && this.fOperator.equals("!=")) {
this.fOperator = JexlOperatorConstants.getOperator(JexlOperatorConstants.JJTEQNODE);
}
}
public Text getLowerBound() {
return lowerBound;
}
public void setLowerBound(Text lowerBound) {
this.lowerBound = lowerBound;
}
public Text getUpperBound() {
return upperBound;
}
public void setUpperBound(Text upperBound) {
this.upperBound = upperBound;
}
public boolean isRangeNode() {
return rangeNode;
}
public void setRangeNode(boolean rangeNode) {
this.rangeNode = rangeNode;
}
public String getContents() {
StringBuilder s = new StringBuilder("[");
s.append(toString());
if (children != null) {
Enumeration<?> e = this.children();
while (e.hasMoreElements()) {
BooleanLogicTreeNode n = (BooleanLogicTreeNode) e.nextElement();
s.append(",");
s.append(n.getContents());
}
}
s.append("]");
return s.toString();
}
public String printNode() {
StringBuilder s = new StringBuilder("[");
s.append("Full Location & Term = ");
if (this.fName != null) {
s.append(this.fName.toString());
} else {
s.append("BlankDataLocation");
}
s.append(" ");
if (this.fValue != null) {
s.append(this.fValue.toString());
} else {
s.append("BlankTerm");
}
s.append("]");
return s.toString();
}
@Override
public String toString() {
String uidStr = "none";
if (myTopKey != null) {
String cf = myTopKey.getColumnFamily().toString();
uidStr = cf;
}
switch (type) {
case ParserTreeConstants.JJTEQNODE:
return fName.toString() + ":" + fValue.toString() + ", uid=" + uidStr + " , negation=" + this.isNegated();
case ParserTreeConstants.JJTNENODE:
return fName.toString() + ":" + fValue.toString() + ", uid=" + uidStr + " , negation=" + this.isNegated();
case ParserTreeConstants.JJTERNODE:
return fName.toString() + ":" + fValue.toString() + ", uid=" + uidStr + " , negation=" + this.isNegated();
case ParserTreeConstants.JJTNRNODE:
return fName.toString() + ":" + fValue.toString() + ", uid=" + uidStr + " , negation=" + this.isNegated();
case ParserTreeConstants.JJTLENODE:
return "<=:" + fName.toString() + ":" + fValue.toString() + ", uid=" + uidStr + " , negation=" + this.isNegated();
case ParserTreeConstants.JJTLTNODE:
return "<:" + fName.toString() + ":" + fValue.toString() + ", uid=" + uidStr + " , negation=" + this.isNegated();
case ParserTreeConstants.JJTGENODE:
return ">=:" + fName.toString() + ":" + fValue.toString() + ", uid=" + uidStr + " , negation=" + this.isNegated();
case ParserTreeConstants.JJTGTNODE:
return ">:" + fName.toString() + ":" + fValue.toString() + ", uid=" + uidStr + " , negation=" + this.isNegated();
case ParserTreeConstants.JJTJEXLSCRIPT:
return "HEAD" + ":" + uidStr + ":" + isValid();
case ParserTreeConstants.JJTANDNODE:
return "AND" + ":" + uidStr + ":" + isValid();
case ParserTreeConstants.JJTNOTNODE:
return "NOT";
case ParserTreeConstants.JJTORNODE:
return "OR" + ":" + uidStr + ":" + isValid();
default:
System.out.println("Problem in BLTNODE.toString()");
return null;
}
}
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
// always start fresh
this.setTopKey(null);
this.setDone(false);
// get my user object which should be an iterator
SortedKeyValueIterator<?,?> iter = (SortedKeyValueIterator<?,?>) this.getUserObject();
if (iter != null) {
iter.seek(range, columnFamilies, inclusive);
if (iter.hasTop()) {
Key key = (Key) iter.getTopKey();
key = buildKey(key);
this.setTopKey(key);
if (log.isDebugEnabled()) {
log.debug("BLTNODE.seek() -> found: " + this.getTopKey());
}
} else {
if (log.isDebugEnabled()) {
log.debug("BLTNODE.seek() -> hasTop::false");
}
this.setDone(true);
}
} else {
if (log.isDebugEnabled()) {
log.debug("BLTNODE.seek(), The iterator was null!");
}
this.setTopKey(null);
}
}
public String buildTreePathString(TreeNode[] path) {
StringBuilder s = new StringBuilder("[");
for (TreeNode p : path) {
s.append(p.toString());
s.append(",");
}
s.deleteCharAt(s.length() - 1);
s.append("]");
return s.toString();
}
public void next() throws IOException {
// always start fresh
this.setTopKey(null);
if (log.isDebugEnabled()) {
TreeNode[] path = this.getPath();
log.debug("BLTNODE.next() path-> " + this.buildTreePathString(path));
}
// have I been marked as done?
if (this.isDone()) {
if (log.isDebugEnabled()) {
log.debug("I've been marked as done, returning");
}
return;
}
SortedKeyValueIterator<?,?> iter = (SortedKeyValueIterator<?,?>) this.getUserObject();
iter.next();
if (iter.hasTop()) {
Key key = (Key) iter.getTopKey();
// I have a valid topKey, pull out the piece I want
key = buildKey(key);
this.setTopKey(key);
if (log.isDebugEnabled()) {
log.debug("BLTNODE.next() -> found: " + this.getTopKey());
}
} else {
// no top value has been returned, I'm done.
if (log.isDebugEnabled()) {
log.debug("BLTNODE.next() -> Nothing found");
}
this.setTopKey(null);
this.setDone(true);
}
}
public boolean jump(Key jumpKey) throws IOException {
boolean ok = true;
if (this.getType() == ParserTreeConstants.JJTEQNODE) {
FieldIndexIterator iter = (FieldIndexIterator) this.getUserObject();
ok = iter.jump(jumpKey);
if (iter.hasTop()) {
Key key = (Key) iter.getTopKey();
key = buildKey(key);
this.setTopKey(key);
if (log.isDebugEnabled()) {
log.debug("BLTNODE.jump() -> found: " + this.getTopKey());
}
} else {
if (log.isDebugEnabled()) {
log.debug("FieldIndexIteratorJexl does not have top after jump, marking done.");
}
this.setTopKey(null);
this.setDone(true);
}
} else if (this.getType() == ParserTreeConstants.JJTANDNODE) {
AndIterator iter = (AndIterator) this.getUserObject();
ok = iter.jump(jumpKey);
if (iter.hasTop()) {
Key key = (Key) iter.getTopKey();
key = buildKey(key);
this.setTopKey(key);
if (log.isDebugEnabled()) {
log.debug("BLTNODE.jump() -> found: " + this.getTopKey());
}
} else {
if (log.isDebugEnabled()) {
log.debug("IntersectingIteratorJexl does not have top after jump, marking done.");
}
this.setTopKey(null);
this.setDone(true);
}
} else if (this.getType() == ParserTreeConstants.JJTORNODE) {
OrIterator iter = (OrIterator) this.getUserObject();
ok = iter.jump(jumpKey);
if (iter.hasTop()) {
Key key = (Key) iter.getTopKey();
key = buildKey(key);
this.setTopKey(key);
if (log.isDebugEnabled()) {
log.debug("BLTNODE.jump() -> found: " + this.getTopKey());
}
} else {
if (log.isDebugEnabled()) {
log.debug("OrIteratorJexl does not have top after jump, marking done.");
}
this.setTopKey(null);
this.setDone(true);
}
}
return ok;
}
public void addToSet(Key i) {
uids.add(i);
}
public void reSet() {
uids = new HashSet<Key>();
}
public boolean inSet(Key t) {
return uids.contains(t);
}
public Iterator<Key> getSetIterator() {
return uids.iterator();
}
public HashSet<Key> getIntersection(HashSet<Key> h) {
h.retainAll(uids);
return h;
}
public Key getMinUniqueID() {
Iterator<Key> iter = uids.iterator();
Key min = null;
while (iter.hasNext()) {
Key t = (Key) iter.next();
if (log.isDebugEnabled()) {
log.debug("OR set member: " + t);
}
if (t != null) {
if (min == null) {
min = t;
} else if (t.compareTo(min) < 0) {
min = t;
}
}
}
return min;
}
public boolean hasTop() {
// This part really needs to be cleaned up.
// It was created before I knew what was being passed back.
if (this.getType() == ParserTreeConstants.JJTORNODE) {
// Are you a Logical OR or an OR Iterator
if (!this.isLeaf()) { // logical construct
// I have a set of keys
return this.uids.size() > 0;
} else { // or iterator, you only have possible key
if (this.getTopKey() == null) {
return false;
} else {
return true;
}
}
} else {
return this.getTopKey() != null;
}
}
public static Key buildKey(Key key) {
if (key == null) {
log.error("Problem in BooleanLogicTreeNodeJexl.buildKey");
return null;
}
// Build Key(Text row, Text colfam) where colFam is dataype\0uid
String[] cq = key.getColumnQualifier().toString().split("\0");
Text uuid = new Text(cq[cq.length - 2] + "\0" + cq[cq.length - 1]);
Text row = key.getRow();
if (log.isDebugEnabled()) {
log.debug("Key-> r:" + row + " fam:" + uuid);
}
// System.out.println("Key-> r:"+row+" fam:"+uuid);
Key k = new Key(row, uuid);
// System.out.println("Key: "+k);
return k;
}
}
| 6,233 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/FieldIndexIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.WrappingIterator;
import org.apache.accumulo.examples.wikisearch.function.QueryFunctions;
import org.apache.accumulo.examples.wikisearch.util.FieldIndexKeyParser;
import org.apache.commons.jexl2.Expression;
import org.apache.commons.jexl2.JexlContext;
import org.apache.commons.jexl2.JexlEngine;
import org.apache.commons.jexl2.MapContext;
import org.apache.commons.jexl2.parser.ParserTreeConstants;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
/**
* This iterator should only return keys from the fi\0{fieldName}:{fieldValue} part of the shard table. Expect topKey to be CF, {datatype}\0{UID}
*/
public class FieldIndexIterator extends WrappingIterator {
private Key topKey = null;
private Value topValue = null;
private Range range = null;
private Text currentRow;
private Text fName = null;
private String fNameString = null;
private Text fValue = null;
private String fOperator = null;
private Expression expr = null;
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<ByteSequence>();
protected static final Logger log = Logger.getLogger(FieldIndexIterator.class);
private boolean negated = false;
private int type;
private static final String NULL_BYTE = "\0";
private static final String ONE_BYTE = "\1";
// According to the JEXL 2.0 docs, the engine is thread-safe. Let's create 1 engine per VM and
// cache 128 expressions
private static JexlEngine engine = new JexlEngine();
private Range parentRange;
private Text parentEndRow = null;
private FieldIndexKeyParser keyParser;
static {
engine.setCache(128);
Map<String,Object> functions = new HashMap<String,Object>();
functions.put("f", QueryFunctions.class);
engine.setFunctions(functions);
}
public static void setLogLevel(Level l) {
log.setLevel(l);
}
// -------------------------------------------------------------------------
// ------------- Constructors
public FieldIndexIterator() {}
public FieldIndexIterator(int type, Text rowId, Text fieldName, Text fieldValue, String operator) {
this.fName = fieldName;
this.fNameString = fName.toString().substring(3);
this.fValue = fieldValue;
this.fOperator = operator;
this.range = buildRange(rowId);
this.negated = false;
this.type = type;
// Create the Jexl expression, we need to add the ' around the field value
StringBuilder buf = new StringBuilder();
buf.append(fNameString).append(" ").append(this.fOperator).append(" ").append("'").append(fValue.toString()).append("'");
this.expr = engine.createExpression(buf.toString());
// Set a default KeyParser
keyParser = createDefaultKeyParser();
}
public FieldIndexIterator(int type, Text rowId, Text fieldName, Text fieldValue, boolean neg, String operator) {
this.fName = fieldName;
this.fNameString = fName.toString().substring(3);
this.fValue = fieldValue;
this.fOperator = operator;
this.range = buildRange(rowId);
this.negated = neg;
this.type = type;
// Create the Jexl expression, we need to add the ' around the field value
StringBuilder buf = new StringBuilder();
buf.append(fNameString).append(" ").append(this.fOperator).append(" ").append("'").append(fValue.toString()).append("'");
this.expr = engine.createExpression(buf.toString());
// Set a default KeyParser
keyParser = createDefaultKeyParser();
}
public FieldIndexIterator(FieldIndexIterator other, IteratorEnvironment env) {
setSource(other.getSource().deepCopy(env));
// Set a default KeyParser
keyParser = createDefaultKeyParser();
}
private FieldIndexKeyParser createDefaultKeyParser() {
FieldIndexKeyParser parser = new FieldIndexKeyParser();
return parser;
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new FieldIndexIterator(this, env);
}
@Override
public Key getTopKey() {
return topKey;
}
@Override
public Value getTopValue() {
return topValue;
}
@Override
public boolean hasTop() {
return (topKey != null);
}
@Override
public void next() throws IOException {
if (log.isDebugEnabled()) {
log.debug("next()");
}
if (this.hasTop()) {
currentRow = topKey.getRow();
}
getSource().next();
while (true) {
log.debug("next(), Range: " + range);
if (getSource().hasTop()) {
Key k = getSource().getTopKey();
if (range.contains(k)) {
if (matches(k)) {
topKey = k;
topValue = getSource().getTopValue();
return;
} else {
getSource().next();
}
} else {
if (parentEndRow != null) { // need to check it
if (k.getRow().equals(currentRow)) {
currentRow = getNextRow();
} else if (currentRow == null || k.getRow().compareTo(currentRow) > 0) {
currentRow = k.getRow();
}
if (currentRow == null || parentEndRow.compareTo(currentRow) < 0) {
// you're done
topKey = null;
topValue = null;
return;
}
} else { // we can go to end of the tablet
if (k.getRow().equals(currentRow)) {
currentRow = getNextRow();
if (currentRow == null) {
topKey = null;
topValue = null;
return;
}
} else if (currentRow == null || (k.getRow().compareTo(currentRow) > 0)) {
currentRow = k.getRow();
}
}
// construct new range and seek the source
range = buildRange(currentRow);
if (log.isDebugEnabled()) {
log.debug("next, range: " + range);
}
getSource().seek(range, EMPTY_COL_FAMS, false);
}
} else {
topKey = null;
topValue = null;
return;
}
}
}
/*
* NOTE: there is some special magic here with range modification. If it's negated, assume the range is explicitly set and don't mess with it (this is how
* it's called by the BooleanLogicIterator) Otherwise, modify the range to start at the beginning and set an explicit end point.
*
* In the future, maybe all we need to do is look for an endKey and modifying that.
*/
@Override
public void seek(Range r, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
parentRange = r;
if (log.isDebugEnabled()) {
log.debug("begin seek, range: " + r);
}
if (parentRange.getEndKey() != null) {
if (parentRange.getEndKey().getRow() != null) {
parentEndRow = parentRange.getEndKey().getRow();
if (log.isDebugEnabled()) {
log.debug("begin seek, parentEndRow: " + parentEndRow);
}
}
}
try {
if (isNegated()) {
range = r;
if (log.isDebugEnabled()) {
log.debug("seek, negation, skipping range modification.");
}
} else {
if (r.getStartKey() != null) {
if (r.getStartKey().getRow() == null || r.getStartKey().getRow().toString().isEmpty()) {
currentRow = getFirstRow();
} else {
currentRow = r.getStartKey().getRow();
}
this.range = buildRange(currentRow);
} else {
currentRow = getFirstRow();
this.range = buildRange(currentRow);
}
}
setTopKey(null);
setTopValue(null);
if (log.isDebugEnabled()) {
log.debug("seek, incoming range: " + range);
}
getSource().seek(range, columnFamilies, inclusive);
while (topKey == null) {
if (getSource().hasTop()) {
if (log.isDebugEnabled()) {
log.debug("seek, source has top: " + getSource().getTopKey());
}
Key k = getSource().getTopKey();
if (range.contains(k)) {
if (matches(k)) {
topKey = k;
topValue = getSource().getTopValue();
if (log.isDebugEnabled()) {
log.debug("seek, source has top in valid range");
}
} else {
getSource().next();
}
} else {
if (log.isDebugEnabled()) {
log.debug("seek, top out of range");
String pEndRow = "empty";
if (parentEndRow != null) {
pEndRow = parentEndRow.toString();
}
log.debug("source.topKey.row: " + k.getRow() + "\t currentRow: " + currentRow + "\t parentEndRow: " + pEndRow);
}
if (isNegated()) {
topKey = null;
topValue = null;
return;
}
if (parentEndRow != null) {
// check it
if (k.getRow().equals(currentRow)) {
currentRow = getNextRow();
}
if (currentRow == null || parentEndRow.compareTo(currentRow) < 0) {
// you're done
topKey = null;
topValue = null;
return;
}
} else { // can go to end of the tablet
if (k.getRow().equals(currentRow)) {
currentRow = getNextRow();
if (currentRow == null) {
topKey = null;
topValue = null;
return;
}
}
}
// construct new range and seek the source
range = buildRange(currentRow);
if (log.isDebugEnabled()) {
log.debug("currentRow: " + currentRow);
log.debug("seek, range: " + range);
}
getSource().seek(range, columnFamilies, inclusive);
}
} else {
if (log.isDebugEnabled()) {
log.debug("seek, underlying source had no top key.");
}
topKey = null;
topValue = null;
return;
}
}
if (log.isDebugEnabled()) {
log.debug("seek, topKey found: " + topKey);
}
} catch (IOException e) {
topKey = null;
topValue = null;
throw new IOException();
}
}
// -------------------------------------------------------------------------
// ------------- Public stuff
public boolean isNegated() {
return negated;
}
public Text getCurrentRow() {
return currentRow;
}
public Text getfName() {
return fName;
}
public Text getfValue() {
return fValue;
}
// works like seek, but we need to avoid range issues.
public boolean jump(Key jumpKey) throws IOException {
if (log.isDebugEnabled()) {
String pEndRow = "empty";
if (parentEndRow != null) {
pEndRow = parentEndRow.toString();
}
log.debug("jump, current range: " + range + " parentEndRow is: " + pEndRow);
}
if (parentEndRow != null && jumpKey.getRow().compareTo(parentEndRow) > 0) {
// can't go there.
if (log.isDebugEnabled()) {
log.debug("jumpRow: " + jumpKey.getRow() + " is greater than my parentEndRow: " + parentEndRow);
}
return false;
}
int comp;
if (!this.hasTop()) {
if (log.isDebugEnabled()) {
log.debug("current row: " + this.currentRow);
}
/*
* if I don't have a top, then I should be out of my range for my current row. Need to check parent range to see if I'm supposed to continue to next row
* or not. Current row can be null because maybe I never found anything in this row.
*/
if (parentEndRow != null) {
// if jumpKey row is greater than parentEndRow, stop
if (jumpKey.getRow().compareTo(parentEndRow) > 0) {
if (log.isDebugEnabled()) {
log.debug("jumpKey row is greater than my parentEndRow, done");
}
return false;
}
// if my current row is null, I must have hit the end of the tablet
if (currentRow == null) {
if (log.isDebugEnabled()) {
log.debug("I have parentEndRow, but no current row, must have hit end of tablet, done");
}
return false;
}
// if my current row is greater than jump row stop, a seek will be
// called to get me going again. If my row is equal, but i don't
// have a topkey, i'm done
if (currentRow.compareTo(jumpKey.getRow()) >= 0) {
if (log.isDebugEnabled()) {
log.debug("I have parentEndRow, but topKey, and my currentRow is >= jumpRow, done");
}
return false;
}
} else { // we're allowed to go to the end of the tablet
// if my current row is null, I must have hit the end of the tablet
if (currentRow == null) {
if (log.isDebugEnabled()) {
log.debug("no parentEndRow and current Row is null, must have hit end of tablet, done");
}
return false;
}
if (currentRow.compareTo(jumpKey.getRow()) >= 0) {
// i'm past or equal to the jump point and have no top,
// jumping's not going to help
if (log.isDebugEnabled()) {
log.debug("no parentEndRow, no topKey, and currentRow is >= jumpRow, done");
}
return false;
}
}
// ok, jumpKey is ahead of me I'll mark it and allow the normal
// flow to jump there and see if I have top.
if (log.isDebugEnabled()) {
log.debug("no topKey, but jumpRow is ahead and I'm allowed to go to it, marking");
}
comp = -1;
} else { // I have a topKey, I can do the normal comparisons
if (log.isDebugEnabled()) {
log.debug("have top, can do normal comparisons");
}
comp = this.topKey.getRow().compareTo(jumpKey.getRow());
}
// ------------------
// compare rows
if (comp > 0) { // my row is ahead of jump key
if (canBeInNextRow()) {
if (log.isDebugEnabled()) {
log.debug("I'm ahead of jump row & it's ok.");
log.debug("jumpRow: " + jumpKey.getRow() + " myRow: " + topKey.getRow() + " parentEndRow: " + parentEndRow);
}
return true;
} else {
if (log.isDebugEnabled()) {
log.debug("I'm ahead of jump row & can't be here, or at end of tablet.");
}
topKey = null;
topValue = null;
return false;
}
} else if (comp < 0) { // a row behind jump key, need to move forward
if (log.isDebugEnabled()) {
String myRow = "";
if (hasTop()) {
myRow = topKey.getRow().toString();
} else if (currentRow != null) {
myRow = currentRow.toString();
}
log.debug("My row " + myRow + " is less than jump row: " + jumpKey.getRow() + " seeking");
}
range = buildRange(jumpKey.getRow());
// this.seek(range, EMPTY_COL_FAMS, false);
boolean success = jumpSeek(range);
if (log.isDebugEnabled() && success) {
log.debug("uid forced jump, found topKey: " + topKey);
}
if (!this.hasTop()) {
log.debug("seeked with new row and had no top");
topKey = null;
topValue = null;
return false;
} else if (parentEndRow != null && currentRow.compareTo(parentEndRow) > 0) {
if (log.isDebugEnabled()) {
log.debug("myRow: " + getTopKey().getRow() + " is past parentEndRow: " + parentEndRow);
}
topKey = null;
topValue = null;
return false;
}
if (log.isDebugEnabled()) {
log.debug("jumped, valid top: " + getTopKey());
}
return true;
} else { // rows are equal, check the uid!
keyParser.parse(topKey);
String myUid = keyParser.getUid();
keyParser.parse(jumpKey);
String jumpUid = keyParser.getUid();
int ucomp = myUid.compareTo(jumpUid);
if (log.isDebugEnabled()) {
log.debug("topKeyUid: " + myUid + " jumpUid: " + jumpUid + " myUid.compareTo(jumpUid)->" + ucomp);
}
if (ucomp < 0) { // need to move up
log.debug("my uid is less than jumpUid, topUid: " + myUid + " jumpUid: " + jumpUid);
Text cq = jumpKey.getColumnQualifier();
int index = cq.find(NULL_BYTE);
if (0 <= index) {
cq.set(cq.getBytes(), index + 1, cq.getLength() - index - 1);
} else {
log.error("Expected a NULL separator in the column qualifier");
this.topKey = null;
this.topValue = null;
return false;
}
// note my internal range stays the same, I just need to move forward
Key startKey = new Key(topKey.getRow(), fName, new Text(fValue + NULL_BYTE + cq));
Key endKey = new Key(topKey.getRow(), fName, new Text(fValue + ONE_BYTE));
range = new Range(startKey, true, endKey, false);
log.debug("Using range: " + range + " to seek");
// source.seek(range, EMPTY_COL_FAMS, false);
boolean success = jumpSeek(range);
if (log.isDebugEnabled() && success) {
log.debug("uid forced jump, found topKey: " + topKey);
}
return success;
} else { // else do nothing
log.debug("my uid is greater than jumpUid, topKey: " + topKey + " jumpKey: " + jumpKey);
log.debug("doing nothing");
}
}
return hasTop();
}
// -------------------------------------------------------------------------
// ------------- Private stuff, KEEP OUT!!
private void setTopKey(Key key) {
topKey = key;
}
private void setTopValue(Value v) {
this.topValue = v;
}
private boolean canBeInNextRow() {
if (parentEndRow == null) {
return true;
} else if (currentRow == null) {
return false;
} else if (currentRow.compareTo(parentEndRow) <= 0) {
return true;
} else {
return false;
}
}
private Range buildRange(Text rowId) {
if (type == ParserTreeConstants.JJTGTNODE || type == ParserTreeConstants.JJTGENODE || type == ParserTreeConstants.JJTLTNODE
|| type == ParserTreeConstants.JJTLENODE || type == ParserTreeConstants.JJTERNODE || type == ParserTreeConstants.JJTNRNODE) {
Key startKey = new Key(rowId, fName);
Key endKey = new Key(rowId, new Text(fName + NULL_BYTE));
return (new Range(startKey, true, endKey, false));
} else {
// construct new range
Key startKey = new Key(rowId, fName, new Text(fValue + NULL_BYTE));
Key endKey = new Key(rowId, fName, new Text(fValue + ONE_BYTE));
return (new Range(startKey, true, endKey, false));
}
}
// need to build a range starting at the end of current row and seek the
// source to it. If we get an IOException, that means we hit the end of the tablet.
private Text getNextRow() throws IOException {
if (log.isDebugEnabled()) {
log.debug("getNextRow()");
}
Key fakeKey = new Key(new Text(currentRow + NULL_BYTE));
Range fakeRange = new Range(fakeKey, fakeKey);
getSource().seek(fakeRange, EMPTY_COL_FAMS, false);
if (getSource().hasTop()) {
return getSource().getTopKey().getRow();
} else {
return null;
}
}
private Text getFirstRow() throws IOException {
getSource().seek(new Range(), EMPTY_COL_FAMS, false);
if (getSource().hasTop()) {
return getSource().getTopKey().getRow();
} else {
throw new IOException();
}
}
private boolean matches(Key k) {
if (log.isDebugEnabled()) {
log.debug("You've reached the match function!");
}
JexlContext ctx = new MapContext();
// Add the field value from the key to the context
// String fieldValue = k.getColumnQualifier().toString().split("\0")[0];
// String fieldValue = getFieldValueFromKey(k);
keyParser.parse(k);
String fieldValue = keyParser.getFieldValue();
ctx.set(fNameString, fieldValue);
Object o = expr.evaluate(ctx);
if (o instanceof Boolean && (((Boolean) o) == true)) {
if (log.isDebugEnabled()) {
log.debug("matches:: fName: " + fName + " , fValue: " + fieldValue + " , operator: " + fOperator + " , key: " + k);
}
return true;
} else {
if (log.isDebugEnabled()) {
log.debug("NO MATCH:: fName: " + fName + " , fValue: " + fieldValue + " , operator: " + fOperator + " , key: " + k);
}
return false;
}
}
private boolean jumpSeek(Range r) throws IOException {
range = r;
setTopKey(null);
setTopValue(null);
getSource().seek(range, EMPTY_COL_FAMS, false);
while (topKey == null) {
if (getSource().hasTop()) {
if (log.isDebugEnabled()) {
log.debug("jump, source has top: " + getSource().getTopKey());
}
Key k = getSource().getTopKey();
if (range.contains(k)) {
if (matches(k)) {
topKey = k;
topValue = getSource().getTopValue();
if (log.isDebugEnabled()) {
log.debug("jump, source has top in valid range");
}
} else {
getSource().next();
}
} else {
if (log.isDebugEnabled()) {
log.debug("jump, top out of range");
String pEndRow = "empty";
if (parentEndRow != null) {
pEndRow = parentEndRow.toString();
}
log.debug("source.topKey.row: " + k.getRow() + "\t currentRow: " + currentRow + "\t parentEndRow: " + pEndRow);
}
if (parentEndRow != null) {
if (currentRow == null) {
topKey = null;
topValue = null;
return false;
}
// check it
if (k.getRow().equals(currentRow)) {
currentRow = getNextRow();
} else if (k.getRow().compareTo(currentRow) > 0) {
currentRow = k.getRow();
}
if (currentRow == null || parentEndRow.compareTo(currentRow) < 0) {
// you're done
topKey = null;
topValue = null;
return false;
}
} else { // can go to end of the tablet
if (currentRow == null || k.getRow() == null) {
topKey = null;
topValue = null;
return false;
}
if (k.getRow().equals(currentRow)) {
currentRow = getNextRow();
if (currentRow == null) {
topKey = null;
topValue = null;
return false;
}
} else if (k.getRow().compareTo(currentRow) > 0) {
currentRow = k.getRow();
}
}
// construct new range and seek the source
range = buildRange(currentRow);
if (log.isDebugEnabled()) {
log.debug("jump, new build range: " + range);
}
getSource().seek(range, EMPTY_COL_FAMS, false);
}
} else {
if (log.isDebugEnabled()) {
log.debug("jump, underlying source had no top key.");
}
topKey = null;
topValue = null;
return false;
}
}// end while
return hasTop();
}
}
| 6,234 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/ReadAheadIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.lang.Thread.State;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.OptionDescriber;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
/**
* This iterator takes the source iterator (the one below it in the iterator stack) and puts it in a background thread. The background thread continues
* processing and fills a queue with the Keys and Values from the source iterator. When seek() is called on this iterator, it pauses the background thread,
* clears the queue, calls seek() on the source iterator, then resumes the thread filling the queue.
*
* Users of this iterator can set the queue size, default is five elements. Users must be aware of the potential for OutOfMemory errors when using this iterator
* with large queue sizes or large objects. This iterator copies the Key and Value from the source iterator and puts them into the queue.
*
* This iterator introduces some parallelism into the server side iterator stack. One use case for this would be when an iterator takes a relatively long time
* to process each K,V pair and causes the iterators above it to wait. By putting the longer running iterator in a background thread we should be able to
* achieve greater throughput.
*
* NOTE: Experimental!
*
*/
public class ReadAheadIterator implements SortedKeyValueIterator<Key,Value>, OptionDescriber {
private static Logger log = Logger.getLogger(ReadAheadIterator.class);
public static final String QUEUE_SIZE = "queue.size";
public static final String TIMEOUT = "timeout";
private static final QueueElement noMoreDataElement = new QueueElement();
private int queueSize = 5;
private int timeout = 60;
/**
*
* Class to hold key and value from the producing thread.
*
*/
static class QueueElement {
Key key = null;
Value value = null;
public QueueElement() {}
public QueueElement(Key key, Value value) {
super();
this.key = new Key(key);
this.value = new Value(value.get(), true);
}
public Key getKey() {
return key;
}
public Value getValue() {
return value;
}
}
/**
*
* Thread that produces data from the source iterator and places the results in a queue.
*
*/
class ProducerThread extends ReentrantLock implements Runnable {
private static final long serialVersionUID = 1L;
private Exception e = null;
private int waitTime = timeout;
private SortedKeyValueIterator<Key,Value> sourceIter = null;
public ProducerThread(SortedKeyValueIterator<Key,Value> source) {
this.sourceIter = source;
}
public void run() {
boolean hasMoreData = true;
// Keep this thread running while there is more data to read
// and items left in the queue to be read off.
while (hasMoreData || queue.size() > 0) {
try {
// Acquire the lock, this will wait if the lock is being
// held by the ReadAheadIterator.seek() method.
this.lock();
// Check to see if there is more data from the iterator below.
hasMoreData = sourceIter.hasTop();
// Break out of the loop if no more data.
if (!hasMoreData)
continue;
// Put the next K,V onto the queue.
try {
QueueElement e = new QueueElement(sourceIter.getTopKey(), sourceIter.getTopValue());
boolean inserted = false;
try {
inserted = queue.offer(e, this.waitTime, TimeUnit.SECONDS);
} catch (InterruptedException ie) {
this.e = ie;
break;
}
if (!inserted) {
// Then we either got a timeout, set the error and break out of the loop
this.e = new TimeoutException("Background thread has exceeded wait time of " + this.waitTime + " seconds, aborting...");
break;
}
// Move the iterator to the next K,V for the next iteration of this loop
sourceIter.next();
} catch (Exception e) {
this.e = e;
log.error("Error calling next on source iterator", e);
break;
}
} finally {
this.unlock();
}
}
// If we broke out of the loop because of an error, then don't put the marker on the queue, just to do end.
if (!hasError()) {
// Put the special end of data marker into the queue
try {
queue.put(noMoreDataElement);
} catch (InterruptedException e) {
this.e = e;
log.error("Error putting End of Data marker onto queue");
}
}
}
public boolean hasError() {
return (this.e != null);
}
public Exception getError() {
return this.e;
}
}
private SortedKeyValueIterator<Key,Value> source;
private ArrayBlockingQueue<QueueElement> queue = null;
private QueueElement currentElement = new QueueElement();
private ProducerThread thread = null;
private Thread t = null;
protected ReadAheadIterator(ReadAheadIterator other, IteratorEnvironment env) {
source = other.source.deepCopy(env);
}
public ReadAheadIterator() {}
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new ReadAheadIterator(this, env);
}
public Key getTopKey() {
return currentElement.getKey();
}
public Value getTopValue() {
return currentElement.getValue();
}
public boolean hasTop() {
if (currentElement == noMoreDataElement)
return false;
return currentElement != null || queue.size() > 0 || source.hasTop();
}
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
validateOptions(options);
this.source = source;
queue = new ArrayBlockingQueue<QueueElement>(queueSize);
thread = new ProducerThread(this.source);
t = new Thread(thread, "ReadAheadIterator-SourceThread");
t.start();
}
/**
* Populate the key and value
*/
public void next() throws IOException {
// Thread startup race condition, need to make sure that the
// thread has started before we call this the first time.
while (t.getState().equals(State.NEW)) {
try {
Thread.sleep(1);
} catch (InterruptedException e) {}
}
if (t.getState().equals(State.TERMINATED)) {
// Thread encountered an error.
if (thread.hasError()) {
// and it should
throw new IOException("Background thread has died", thread.getError());
}
}
// Pull an element off the queue, this will wait if there is no data yet.
try {
if (thread.hasError())
throw new IOException("background thread has error", thread.getError());
QueueElement nextElement = null;
while (null == nextElement) {
try {
nextElement = queue.poll(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// TODO: Do we need to do anything here?
}
if (null == nextElement) {
// Then we have no data and timed out, check for error condition in the read ahead thread
if (thread.hasError()) {
throw new IOException("background thread has error", thread.getError());
}
}
}
currentElement = nextElement;
} catch (IOException e) {
throw new IOException("Error getting element from source iterator", e);
}
}
/**
* Seek to the next matching cell and call next to populate the key and value.
*/
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
if (t.isAlive()) {
// Check for error
if (thread.hasError())
throw new IOException("background thread has error", thread.getError());
try {
// Acquire the lock, or wait until its unlocked by the producer thread.
thread.lock();
queue.clear();
currentElement = null;
source.seek(range, columnFamilies, inclusive);
} finally {
thread.unlock();
}
next();
} else {
throw new IOException("source iterator thread has died.");
}
}
public IteratorOptions describeOptions() {
Map<String,String> options = new HashMap<String,String>();
options.put(QUEUE_SIZE, "read ahead queue size");
options.put(TIMEOUT, "timeout in seconds before background thread thinks that the client has aborted");
return new IteratorOptions(getClass().getSimpleName(), "Iterator that puts the source in another thread", options, null);
}
public boolean validateOptions(Map<String,String> options) {
if (options.containsKey(QUEUE_SIZE))
queueSize = Integer.parseInt(options.get(QUEUE_SIZE));
if (options.containsKey(TIMEOUT))
timeout = Integer.parseInt(options.get(TIMEOUT));
return true;
}
}
| 6,235 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/EvaluatingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.wikisearch.parser.EventFields;
import org.apache.accumulo.examples.wikisearch.parser.EventFields.FieldValue;
import org.apache.commons.collections.map.LRUMap;
import org.apache.hadoop.io.Text;
public class EvaluatingIterator extends AbstractEvaluatingIterator {
public static final String NULL_BYTE_STRING = "\u0000";
LRUMap visibilityMap = new LRUMap();
public EvaluatingIterator() {
super();
}
public EvaluatingIterator(AbstractEvaluatingIterator other, IteratorEnvironment env) {
super(other, env);
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new EvaluatingIterator(this, env);
}
@Override
public PartialKey getKeyComparator() {
return PartialKey.ROW_COLFAM;
}
@Override
public Key getReturnKey(Key k) {
// If we were using column visibility, then we would get the merged visibility here and use it
// in the key.
// Remove the COLQ from the key and use the combined visibility
Key r = new Key(k.getRowData().getBackingArray(), k.getColumnFamilyData().getBackingArray(),
NULL_BYTE, k.getColumnVisibility().getBytes(), k.getTimestamp(), k.isDeleted(), false);
return r;
}
@Override
public void fillMap(EventFields event, Key key, Value value) {
// If we were using column visibility, we would have to merge them here.
// Pull the datatype from the colf in case we need to do anything datatype specific.
// String colf = key.getColumnFamily().toString();
// String datatype = colf.substring(0, colf.indexOf(NULL_BYTE_STRING));
// For the partitioned table, the field name and field value are stored in the column qualifier
// separated by a \0.
String colq = key.getColumnQualifier().toString();// .toLowerCase();
int idx = colq.indexOf(NULL_BYTE_STRING);
String fieldName = colq.substring(0, idx);
String fieldValue = colq.substring(idx + 1);
event.put(fieldName, new FieldValue(getColumnVisibility(key), fieldValue.getBytes()));
}
/**
* @return The column visibility
*/
public ColumnVisibility getColumnVisibility(Key key) {
ColumnVisibility result = (ColumnVisibility) visibilityMap.get(key.getColumnVisibility());
if (result != null) {
return result;
}
result = new ColumnVisibility(key.getColumnVisibility().getBytes());
visibilityMap.put(key.getColumnVisibility(), result);
return result;
}
/**
* Don't accept this key if the colf starts with 'fi'
*/
@Override
public boolean isKeyAccepted(Key key) throws IOException {
if (key.getColumnFamily().toString().startsWith("fi")) {
Key copy = new Key(key.getRow(), new Text("fi\01"));
Collection<ByteSequence> columnFamilies = Collections.emptyList();
this.iterator.seek(new Range(copy, copy), columnFamilies, true);
if (this.iterator.hasTop()) {
return isKeyAccepted(this.iterator.getTopKey());
}
return true;
}
return true;
}
}
| 6,236 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/iterator/UniqFieldNameValueIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.iterator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.WrappingIterator;
import org.apache.accumulo.examples.wikisearch.util.FieldIndexKeyParser;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
public class UniqFieldNameValueIterator extends WrappingIterator {
protected static final Logger log = Logger.getLogger(UniqFieldNameValueIterator.class);
// Wrapping iterator only accesses its private source in setSource and getSource
// Since this class overrides these methods, it's safest to keep the source declaration here
private SortedKeyValueIterator<Key,Value> source;
private FieldIndexKeyParser keyParser;
private Key topKey = null;
private Value topValue = null;
private Range overallRange = null;
private Range currentSubRange;
private Text fieldName = null;
private Text fieldValueLowerBound = null;
private Text fieldValueUpperBound = null;
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<ByteSequence>();
private static final String ONE_BYTE = "\1";
private boolean multiRow = false;
private boolean seekInclusive = false;
// -------------------------------------------------------------------------
// ------------- Static Methods
public static void setLogLevel(Level l) {
log.setLevel(l);
}
// -------------------------------------------------------------------------
// ------------- Constructors
public UniqFieldNameValueIterator(Text fName, Text fValLower, Text fValUpper) {
this.fieldName = fName;
this.fieldValueLowerBound = fValLower;
this.fieldValueUpperBound = fValUpper;
keyParser = createDefaultKeyParser();
}
public UniqFieldNameValueIterator(UniqFieldNameValueIterator other, IteratorEnvironment env) {
source = other.getSource().deepCopy(env);
// Set a default KeyParser
keyParser = createDefaultKeyParser();
}
// -------------------------------------------------------------------------
// ------------- Overrides
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
super.init(source, options, env);
source = super.getSource();
}
@Override
protected void setSource(SortedKeyValueIterator<Key,Value> source) {
this.source = source;
}
@Override
protected SortedKeyValueIterator<Key,Value> getSource() {
return source;
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new UniqFieldNameValueIterator(this, env);
}
@Override
public Key getTopKey() {
return this.topKey;
}
@Override
public Value getTopValue() {
return this.topValue;
}
@Override
public boolean hasTop() {
return (topKey != null);
}
@Override
public void next() throws IOException {
if (log.isDebugEnabled()) {
log.debug("next()");
}
if (!source.hasTop()) {
topKey = null;
topValue = null;
return;
}
Key currentKey = topKey;
keyParser.parse(topKey);
String fValue = keyParser.getFieldValue();
Text currentRow = currentKey.getRow();
Text currentFam = currentKey.getColumnFamily();
if (overallRange.getEndKey() != null && overallRange.getEndKey().getRow().compareTo(currentRow) < 0) {
if (log.isDebugEnabled()) {
log.debug("next, overall endRow: " + overallRange.getEndKey().getRow() + " currentRow: " + currentRow);
}
topKey = null;
topValue = null;
return;
}
if (fValue.compareTo(this.fieldValueUpperBound.toString()) > 0) {
topKey = null;
topValue = null;
return;
}
Key followingKey = new Key(currentKey.getRow(), this.fieldName, new Text(fValue + ONE_BYTE));
if (log.isDebugEnabled()) {
log.debug("next, followingKey to seek on: " + followingKey);
}
Range r = new Range(followingKey, followingKey);
source.seek(r, EMPTY_COL_FAMS, false);
while (true) {
if (!source.hasTop()) {
topKey = null;
topValue = null;
return;
}
Key k = source.getTopKey();
if (!overallRange.contains(k)) {
topKey = null;
topValue = null;
return;
}
if (log.isDebugEnabled()) {
log.debug("next(), key: " + k + " subrange: " + this.currentSubRange);
}
// if (this.currentSubRange.contains(k)) {
keyParser.parse(k);
Text currentVal = new Text(keyParser.getFieldValue());
if (k.getRow().equals(currentRow) && k.getColumnFamily().equals(currentFam) && currentVal.compareTo(fieldValueUpperBound) <= 0) {
topKey = k;
topValue = source.getTopValue();
return;
} else { // need to move to next row.
if (this.overallRange.contains(k) && this.multiRow) {
// need to find the next sub range
// STEPS
// 1. check if you moved past your current row on last call to next
// 2. figure out next row
// 3. build new start key with lowerbound fvalue
// 4. seek the source
// 5. test the subrange.
if (k.getRow().equals(currentRow)) {
// get next row
currentRow = getNextRow();
if (currentRow == null) {
topKey = null;
topValue = null;
return;
}
} else {
// i'm already in the next row
currentRow = source.getTopKey().getRow();
}
// build new startKey
Key sKey = new Key(currentRow, fieldName, fieldValueLowerBound);
Key eKey = new Key(currentRow, fieldName, fieldValueUpperBound);
currentSubRange = new Range(sKey, eKey);
source.seek(currentSubRange, EMPTY_COL_FAMS, seekInclusive);
} else { // not multi-row or outside overall range, we're done
topKey = null;
topValue = null;
return;
}
}
}
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
if (log.isDebugEnabled()) {
log.debug("seek, range: " + range);
}
this.overallRange = range;
this.seekInclusive = inclusive;
source.seek(range, EMPTY_COL_FAMS, inclusive);
topKey = null;
topValue = null;
Key sKey;
Key eKey;
if (range.isInfiniteStartKey()) {
sKey = source.getTopKey();
if (sKey == null) {
return;
}
} else {
sKey = range.getStartKey();
}
if (range.isInfiniteStopKey()) {
eKey = null;
this.multiRow = true; // assume we will go to the end of the tablet.
} else {
eKey = range.getEndKey();
if (sKey.getRow().equals(eKey.getRow())) {
this.multiRow = false;
} else {
this.multiRow = true;
}
}
if (log.isDebugEnabled()) {
log.debug("seek, multiRow:" + multiRow + " range:" + range);
}
/*
* NOTE: If the seek range spans multiple rows, we are only interested in the fieldName:fieldValue subranges in each row. Keys will exist in the
* overallRange that we will want to skip over so we need to create subranges per row so we don't have to examine every key in between.
*/
Text sRow = sKey.getRow();
Key ssKey = new Key(sRow, this.fieldName, this.fieldValueLowerBound);
Key eeKey = new Key(sRow, this.fieldName, this.fieldValueUpperBound);
this.currentSubRange = new Range(ssKey, eeKey);
if (log.isDebugEnabled()) {
log.debug("seek, currentSubRange: " + currentSubRange);
}
source.seek(this.currentSubRange, columnFamilies, inclusive);
// cycle until we find a valid topKey, or we get ejected b/c we hit the
// end of the tablet or exceeded the overallRange.
while (topKey == null) {
if (source.hasTop()) {
Key k = source.getTopKey();
if (log.isDebugEnabled()) {
log.debug("seek, source.topKey: " + k);
}
if (currentSubRange.contains(k)) {
topKey = k;
topValue = source.getTopValue();
if (log.isDebugEnabled()) {
log.debug("seek, source has top in valid range");
}
} else { // outside of subRange.
// if multiRow mode, get the next row and seek to it
if (multiRow && overallRange.contains(k)) {
Key fKey = sKey.followingKey(PartialKey.ROW);
Range fRange = new Range(fKey, eKey);
source.seek(fRange, columnFamilies, inclusive);
if (source.hasTop()) {
Text row = source.getTopKey().getRow();
Key nKey = new Key(row, this.fieldName, this.fieldValueLowerBound);
this.currentSubRange = new Range(nKey, eKey);
sKey = this.currentSubRange.getStartKey();
Range nextRange = new Range(sKey, eKey);
source.seek(nextRange, columnFamilies, inclusive);
} else {
topKey = null;
topValue = null;
return;
}
} else { // not multi row & outside range, we're done.
topKey = null;
topValue = null;
return;
}
}
} else { // source does not have top, we're done
topKey = null;
topValue = null;
return;
}
}
}
// -------------------------------------------------------------------------
// ------------- Internal Methods
private FieldIndexKeyParser createDefaultKeyParser() {
FieldIndexKeyParser parser = new FieldIndexKeyParser();
return parser;
}
private Text getNextRow() throws IOException {
if (log.isDebugEnabled()) {
log.debug("getNextRow()");
}
Key fakeKey = new Key(source.getTopKey().followingKey(PartialKey.ROW));
Range fakeRange = new Range(fakeKey, fakeKey);
source.seek(fakeRange, EMPTY_COL_FAMS, false);
if (source.hasTop()) {
return source.getTopKey().getRow();
} else {
return null;
}
}
}
| 6,237 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/sample/Document.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.sample;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
@XmlAccessorType(XmlAccessType.FIELD)
public class Document {
@XmlElement
private String id = null;
@XmlElement
private List<Field> field = new ArrayList<Field>();
public Document() {
super();
}
public Document(String id, List<Field> fields) {
super();
this.id = id;
this.field = fields;
}
public String getId() {
return id;
}
public List<Field> getFields() {
return field;
}
public void setId(String id) {
this.id = id;
}
public void setFields(List<Field> fields) {
this.field = fields;
}
}
| 6,238 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/sample/Results.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.sample;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement
@XmlAccessorType(XmlAccessType.FIELD)
public class Results {
@XmlElement
private List<Document> document = new ArrayList<Document>();
public Results() {
super();
}
public List<Document> getResults() {
return document;
}
public void setResults(List<Document> results) {
this.document = results;
}
public int size() {
if (null == document)
return 0;
else
return document.size();
}
}
| 6,239 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/sample/Field.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.sample;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlValue;
@XmlAccessorType(XmlAccessType.FIELD)
public class Field {
@XmlAttribute
private String name = null;
@XmlValue
private String value = null;
public Field() {
super();
}
public Field(String fieldName, String fieldValue) {
super();
this.name = fieldName;
this.value = fieldValue;
}
public String getFieldName() {
return name;
}
public String getFieldValue() {
return value;
}
public void setFieldName(String fieldName) {
this.name = fieldName;
}
public void setFieldValue(String fieldValue) {
this.value = fieldValue;
}
}
| 6,240 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/query/IQuery.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.query;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import org.apache.accumulo.examples.wikisearch.sample.Results;
@Path("/Query")
public interface IQuery {
@GET
@POST
@Path("/html")
@Consumes("*/*")
public String html(@QueryParam("query") String query, @QueryParam("auths") String auths);
@GET
@POST
@Path("/xml")
@Consumes("*/*")
@Produces("application/xml")
public Results xml(@QueryParam("query") String query, @QueryParam("auths") String auths);
@GET
@POST
@Path("/json")
@Consumes("*/*")
@Produces("application/json")
public Results json(@QueryParam("query") String query, @QueryParam("auths") String auths);
@GET
@POST
@Path("/yaml")
@Consumes("*/*")
@Produces("text/x-yaml")
public Results yaml(@QueryParam("query") String query, @QueryParam("auths") String auths);
@GET
@POST
@Path("/content")
@Consumes("*/*")
@Produces("application/xml")
public Results content(@QueryParam("query") String query, @QueryParam("auths") String auths);
}
| 6,241 |
0 | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch | Create_ds/accumulo-wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/query/Query.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.wikisearch.query;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringReader;
import java.io.StringWriter;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.annotation.Resource;
import javax.ejb.EJBException;
import javax.ejb.Local;
import javax.ejb.Stateless;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.Marshaller;
import javax.xml.transform.Templates;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.stream.StreamResult;
import javax.xml.transform.stream.StreamSource;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.Instance;
import org.apache.accumulo.core.client.ZooKeeperInstance;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.examples.wikisearch.logic.ContentLogic;
import org.apache.accumulo.examples.wikisearch.logic.QueryLogic;
import org.apache.accumulo.examples.wikisearch.sample.Results;
import org.apache.log4j.Logger;
@Stateless
@Local(IQuery.class)
public class Query implements IQuery {
private static final Logger log = Logger.getLogger(Query.class);
// Inject values from XML configuration file
@Resource(name = "instanceName")
private String instanceName;
@Resource(name = "zooKeepers")
private String zooKeepers;
@Resource(name = "username")
private String username;
@Resource(name = "password")
private String password;
@Resource(name = "tableName")
private String tableName;
@Resource(name = "threads")
private int threads;
private static final String XSL = "/accumulo-wikisearch/style.xsl";
@PostConstruct
public void init() {
log.info("Post Construct");
}
@PreDestroy
public void close() {
log.info("Close called.");
}
/*
* (non-Javadoc)
*
* @see sample.query.IQuery#html(java.lang.String, java.lang.String)
*/
@Override
public String html(String query, String auths) {
log.info("HTML query: " + query);
URL u;
try {
u = new URL("http://" + System.getProperty("jboss.bind.address") + ":"
+ System.getProperty("jboss.web.http.port") + XSL);
} catch (MalformedURLException e1) {
throw new EJBException("Unable to load XSL stylesheet", e1);
}
InputStream xslContent;
try {
xslContent = u.openStream();
} catch (IOException e1) {
throw new EJBException("Unable to get xsl content", e1);
}
StringWriter xml = new StringWriter();
StringWriter html = new StringWriter();
Results results = query(query, auths);
try {
// Marshall the query results object
JAXBContext ctx = JAXBContext.newInstance(Results.class);
Marshaller m = ctx.createMarshaller();
m.marshal(results, xml);
// Perform XSL transform on the xml.
StringReader reader = new StringReader(xml.toString());
TransformerFactory tf = TransformerFactory.newInstance();
// Create the transformer from the xsl
Templates xsl = tf.newTemplates(new StreamSource(xslContent));
Transformer t = xsl.newTransformer();
t.transform(new StreamSource(reader), new StreamResult(html));
} catch (Exception e) {
throw new EJBException("Error processing query results", e);
} finally {
try {
xslContent.close();
} catch (IOException e) {
throw new EJBException("Unable to close input stream", e);
}
}
return html.toString();
}
/*
* (non-Javadoc)
*
* @see sample.query.IQuery#xml(java.lang.String, java.lang.String)
*/
@Override
public Results xml(String query, String auths) {
log.info("XML query: " + query);
return query(query, auths);
}
/*
* (non-Javadoc)
*
* @see sample.query.IQuery#json(java.lang.String, java.lang.String)
*/
@Override
public Results json(String query, String auths) {
log.info("JSON query: " + query);
return query(query, auths);
}
/*
* (non-Javadoc)
*
* @see sample.query.IQuery#yaml(java.lang.String, java.lang.String)
*/
@Override
public Results yaml(String query, String auths) {
log.info("YAML query: " + query);
return query(query, auths);
}
/*
* (non-Javadoc)
*
* @see sample.query.IQuery#content(java.lang.String, java.lang.String)
*/
@Override
public Results content(String query, String auths) {
log.info("Content query: " + query);
Connector connector = null;
if (null == instanceName || null == zooKeepers || null == username || null == password) {
throw new EJBException("Required parameters not set. [instanceName = " + this.instanceName
+ ", zookeepers = " + this.zooKeepers + ", username = " + this.username
+ (password == null ? ", password = null" : "") + "]. Check values in ejb-jar.xml");
}
Instance instance = new ZooKeeperInstance(this.instanceName, this.zooKeepers);
try {
log.info("Connecting to [instanceName = " + this.instanceName + ", zookeepers = "
+ this.zooKeepers + ", username = " + this.username + "].");
connector = instance.getConnector(this.username, new PasswordToken(this.password.getBytes()));
} catch (Exception e) {
throw new EJBException("Error getting connector from instance", e);
}
// Create list of auths
List<String> authorizations = new ArrayList<>();
if (auths != null && auths.length() > 0) {
for (String a : auths.split(",")) {
authorizations.add(a);
}
}
ContentLogic table = new ContentLogic();
table.setTableName(tableName);
return table.runQuery(connector, query, authorizations);
}
/**
* calls the query logic with the parameters, returns results
*
* @return The results of a query
*/
public Results query(String query, String auths) {
Connector connector = null;
if (null == instanceName || null == zooKeepers || null == username || null == password) {
throw new EJBException("Required parameters not set. [instanceName = " + this.instanceName
+ ", zookeepers = " + this.zooKeepers + ", username = " + this.username
+ (password == null ? ", password = null" : "") + "]. Check values in ejb-jar.xml");
}
Instance instance = new ZooKeeperInstance(this.instanceName, this.zooKeepers);
try {
log.info("Connecting to [instanceName = " + this.instanceName + ", zookeepers = "
+ this.zooKeepers + ", username = " + this.username + "].");
connector = instance.getConnector(this.username, new PasswordToken(this.password.getBytes()));
} catch (Exception e) {
throw new EJBException("Error getting connector from instance", e);
}
// Create list of auths
List<String> authorizations = new ArrayList<>();
if (auths != null && auths.length() > 0) {
for (String a : auths.split(",")) {
authorizations.add(a);
}
}
QueryLogic table = new QueryLogic();
table.setTableName(tableName);
table.setMetadataTableName(tableName + "Metadata");
table.setIndexTableName(tableName + "Index");
table.setReverseIndexTableName(tableName + "ReverseIndex");
table.setQueryThreads(threads);
table.setUnevaluatedFields("TEXT");
table.setUseReadAheadIterator(false);
return table.runQuery(connector, authorizations, query, null, null, null);
}
}
| 6,242 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/SyntheticSourceJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.sourcejob.synthetic.core.TaggedData;
import io.mantisrx.sourcejob.synthetic.sink.QueryRequestPostProcessor;
import io.mantisrx.sourcejob.synthetic.sink.QueryRequestPreProcessor;
import io.mantisrx.sourcejob.synthetic.sink.TaggedDataSourceSink;
import io.mantisrx.sourcejob.synthetic.source.SyntheticSource;
import io.mantisrx.sourcejob.synthetic.stage.TaggingStage;
/**
* A sample queryable source job that generates synthetic request events.
* Clients connect to this job via the Sink port using an MQL expression. The job then sends only the data
* that matches the query to the client. The client can be another Mantis Job or a user manually running a GET request.
*
* Run this sample by executing the main method of this class. Then look for the SSE port where the output of this job
* will be available for streaming. E.g Serving modern HTTP SSE server sink on port: 8299
* Usage: curl "localhost:<sseport>?clientId=<myId>&subscriptionId=<someid>&criterion=<valid mql query>
*
* E.g <code>curl "localhost:8498?subscriptionId=nj&criterion=select%20country%20from%20stream%20where%20status%3D%3D500&clientId=nj2"</code>
* Here the user is submitted an MQL query select country from stream where status==500.
*/
public class SyntheticSourceJob extends MantisJobProvider<TaggedData> {
@Override
public Job<TaggedData> getJobInstance() {
return
MantisJob
// synthetic source generates random RequestEvents.
.source(new SyntheticSource())
// Tags events with queries that match
.stage(new TaggingStage(), TaggingStage.config())
// A custom sink that processes query parameters to register and deregister MQL queries
.sink(new TaggedDataSourceSink(new QueryRequestPreProcessor(), new QueryRequestPostProcessor()))
// required parameters
.create();
}
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new SyntheticSourceJob().getJobInstance());
}
}
| 6,243 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/core/MQL.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.core;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import io.mantisrx.mql.jvm.core.Query;
import io.mantisrx.mql.shaded.clojure.java.api.Clojure;
import io.mantisrx.mql.shaded.clojure.lang.IFn;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
/**
* The MQL class provides a Java/Scala friendly static interface to MQL functionality which is written in Clojure.
* This class provides a few pieces of functionality;
* - It wraps the Clojure interop so that the user interacts with typed methods via the static interface.
* - It provides methods for accessing individual bits of query functionality, allowing interesting uses
* such as aggregator-mql which uses these components to implement the query in a horizontally scalable / distributed
* fashion on Mantis.
* - It functions as an Rx Transformer of MantisServerSentEvent to MQLResult allowing a user to inline all MQL
* functionality quickly as such: `myObservable.compose(MQL.parse(myQuery));`
*/
public class MQL {
//
// Clojure Interop
//
private static IFn require = Clojure.var("io.mantisrx.mql.shaded.clojure.core", "require");
static {
require.invoke(Clojure.read("io.mantisrx.mql.jvm.interfaces.core"));
require.invoke(Clojure.read("io.mantisrx.mql.jvm.interfaces.server"));
}
private static IFn cljMakeQuery = Clojure.var("io.mantisrx.mql.jvm.interfaces.server", "make-query");
private static IFn cljSuperset = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "queries->superset-projection");
private static IFn parser = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "parser");
private static IFn parses = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "parses?");
private static IFn getParseError = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "get-parse-error");
private static IFn queryToGroupByFn = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->groupby");
private static IFn queryToHavingPred = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->having-pred");
private static IFn queryToOrderBy = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->orderby");
private static IFn queryToLimit = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->limit");
private static IFn queryToExtrapolationFn = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->extrapolator");
private static IFn queryToAggregateFn = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "agg-query->projection");
private static IFn queryToWindow = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->window");
private static Logger logger = LoggerFactory.getLogger(MQL.class);
private static ConcurrentHashMap<HashSet<Query>, IFn> superSetProjectorCache = new ConcurrentHashMap<>();
private final String query;
private final boolean threadingEnabled;
private final Optional<String> sourceJobName;
public static void init() {
logger.info("Initializing MQL runtime.");
}
//
// Constructors and Static Factory Methods
//
public MQL(String query, boolean threadingEnabled) {
if (query == null) {
throw new IllegalArgumentException("MQL cannot be used as an operator with a null query.");
}
this.query = transformLegacyQuery(query);
if (!parses(query)) {
throw new IllegalArgumentException(getParseError(query));
}
this.threadingEnabled = threadingEnabled;
this.sourceJobName = Optional.empty();
}
public MQL(String query, String sourceJobName) {
if (query == null) {
throw new IllegalArgumentException("MQL cannot be used as an operator with a null query.");
}
this.query = transformLegacyQuery(query);
if (!parses(query)) {
throw new IllegalArgumentException(getParseError(query));
}
this.threadingEnabled = false;
this.sourceJobName = Optional.ofNullable(sourceJobName);
}
public static MQL parse(String query) {
return new MQL(query, false);
}
public static MQL parse(String query, boolean threadingEnabled) { return new MQL(query, threadingEnabled); }
public static MQL parse(String query, String sourceName) { return new MQL(query, sourceName); }
//
// Source Job Integration
//
/**
* Constructs an object implementing the Query interface.
* This includes functions;
* matches (Map<String, Object>>) -> Boolean
* Returns true iff the data contained within the map parameter satisfies the query's WHERE clause.
* project (Map<String, Object>>) -> Map<String, Object>>
* Returns the provided map in accordance with the SELECT clause of the query.
* sample (Map<String, Object>>) -> Boolean
* Returns true if the data should be sampled, this function is a tautology if no SAMPLE clause is provided.
*
* @param subscriptionId The ID representing the subscription.
* @param query The (valid) MQL query to parse.
*
* @return An object implementing the Query interface.
*/
public static Query makeQuery(String subscriptionId, String query) {
/*
if (!parses(query)) {
String error = getParseError(query);
logger.error("Failed to parse query [" + query + "]\nError: " + error + ".");
throw new IllegalArgumentException(error);
}
*/
return (Query) cljMakeQuery.invoke(subscriptionId, query.trim());
}
@SuppressWarnings("unchecked")
private static IFn computeSuperSetProjector(HashSet<Query> queries) {
ArrayList<String> qs = new ArrayList<>(queries.size());
for (Query query : queries) {
qs.add(query.getRawQuery());
}
return (IFn) cljSuperset.invoke(new ArrayList(qs));
}
/**
* Projects a single Map<String, Object> which contains a superset of all fields for the provided queries.
* This is useful in use cases such as the mantis-realtime-events library in which we desire to minimize the data
* egressed off box. This should minimize JSON serialization time as well as network bandwidth used to transmit
* the events.
* <p>
* NOTE: This function caches the projectors for performance reasons, this has implications for memory usage as each
* combination of queries results in a new cached function. In practice this has had little impact for <= 100
* queries.
*
* @param queries A Collection of Query objects generated using #makeQuery(String subscriptionId, String query).
* @param datum A Map representing the input event to be projected.
*
* @return A Map representing the union (superset) of all fields required for processing all queries passed in.
*/
@SuppressWarnings("unchecked")
public static Map<String, Object> projectSuperSet(Collection<Query> queries, Map<String, Object> datum) {
IFn superSetProjector = superSetProjectorCache.computeIfAbsent(new HashSet<Query>(queries), (qs) -> {
return computeSuperSetProjector(qs);
});
return (Map<String, Object>) superSetProjector.invoke(datum);
}
//
// Partial Query Functionality
//
public static Func1<Map<String, Object>, Object> getGroupByFn(String query) {
IFn func = (IFn) queryToGroupByFn.invoke(query);
return func::invoke;
}
@SuppressWarnings("unchecked")
public static Func1<Map<String, Object>, Boolean> getHavingPredicate(String query) {
IFn func = (IFn) queryToHavingPred.invoke(query);
return (datum) -> (Boolean) func.invoke(datum);
}
@SuppressWarnings("unchecked")
public static Func1<Observable<Map<String, Object>>, Observable<Map<String, Object>>> getAggregateFn(String query) {
IFn func = (IFn) queryToAggregateFn.invoke(query);
return (obs) -> (Observable<Map<String, Object>>) func.invoke(obs);
}
@SuppressWarnings("unchecked")
public static Func1<Map<String, Object>, Map<String, Object>> getExtrapolationFn(String query) {
IFn func = (IFn) queryToExtrapolationFn.invoke(query);
return (datum) -> (Map<String, Object>) func.invoke(datum);
}
@SuppressWarnings("unchecked")
public static Func1<Observable<Map<String, Object>>, Observable<Map<String, Object>>> getOrderBy(String query) {
IFn func = (IFn) queryToOrderBy.invoke(query);
return obs -> (Observable<Map<String, Object>>) func.invoke(obs);
}
// public static List<Long> getWindow(String query) {
// clojure.lang.PersistentVector result = (clojure.lang.PersistentVector)queryToWindow.invoke(query);
// Long window = (Long)result.nth(0);
// Long shift = (Long)result.nth(1);
// return Arrays.asList(window, shift);
// }
public static Long getLimit(String query) {
return (Long) queryToLimit.invoke(query);
}
//
// Helper Functions
//
/**
* A predicate which indicates whether or not the MQL parser considers query to be a valid query.
*
* @param query A String representing the MQL query.
*
* @return A boolean indicating whether or not the query successfully parses.
*/
public static Boolean parses(String query) {
return (Boolean) parses.invoke(query);
}
/**
* A convenience function allowing a caller to determine what went wrong if a call to #parses(String query) returns
* false.
*
* @param query A String representing the MQL query.
*
* @return A String representing the parse error for an MQL query, null if no parse error occurred.
*/
public static String getParseError(String query) {
return (String) getParseError.invoke(query);
}
/**
* A helper which converts bare true/false queries to MQL.
*
* @param criterion A Mantis Query (old query language) query.
*
* @return A valid MQL query string assuming the input was valid.
*/
public static String transformLegacyQuery(String criterion) {
return criterion.toLowerCase().equals("true") ? "select * where true" :
criterion.toLowerCase().equals("false") ? "select * where false" :
criterion;
}
public static void main(String[] args) {
System.out.println(MQL.makeQuery("abc", "select * from stream where true"));
}
}
| 6,244 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/core/TaggedData.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.core;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.runtime.codec.JsonType;
public class TaggedData implements JsonType {
private final Set<String> matchedClients = new HashSet<String>();
private Map<String, Object> payLoad;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public TaggedData(@JsonProperty("data") Map<String, Object> data) {
this.payLoad = data;
}
public Set<String> getMatchedClients() {
return matchedClients;
}
public boolean matchesClient(String clientId) {
return matchedClients.contains(clientId);
}
public void addMatchedClient(String clientId) {
matchedClients.add(clientId);
}
public Map<String, Object> getPayload() {
return this.payLoad;
}
public void setPayload(Map<String, Object> newPayload) {
this.payLoad = newPayload;
}
public static Codec<TaggedData> taggedDataCodec() {
return new Codec<TaggedData>() {
@Override
public TaggedData decode(byte[] bytes) {
return new TaggedData(new HashMap<>());
}
@Override
public byte[] encode(final TaggedData value) {
return new byte[128];
}
};
}
}
| 6,245 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/core/MQLQueryManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.core;
import java.util.Collection;
import java.util.concurrent.ConcurrentHashMap;
import io.mantisrx.mql.jvm.core.Query;
public class MQLQueryManager {
static class LazyHolder {
private static final MQLQueryManager INSTANCE = new MQLQueryManager();
}
private ConcurrentHashMap<String, Query> queries = new ConcurrentHashMap<>();
public static MQLQueryManager getInstance() {
return LazyHolder.INSTANCE;
}
private MQLQueryManager() { }
public void registerQuery(String id, String query) {
query = MQL.transformLegacyQuery(query);
Query q = MQL.makeQuery(id, query);
queries.put(id, q);
}
public void deregisterQuery(String id) {
queries.remove(id);
}
public Collection<Query> getRegisteredQueries() {
return queries.values();
}
public void clear() {
queries.clear();
}
public static void main(String[] args) throws Exception {
MQLQueryManager qm = getInstance();
String query = "SELECT * WHERE true SAMPLE {\"strategy\":\"RANDOM\",\"threshold\":1}";
qm.registerQuery("fake2", query);
System.out.println(MQL.parses(MQL.transformLegacyQuery(query)));
System.out.println(MQL.getParseError(MQL.transformLegacyQuery(query)));
System.out.println(qm.getRegisteredQueries());
}
}
| 6,246 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/proto/RequestEvent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.proto;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import io.mantisrx.common.codec.Codec;
import lombok.Builder;
import lombok.Data;
/**
* Represents a Request Event a service may receive.
*/
@Data
@Builder
public class RequestEvent {
private static final ObjectMapper mapper = new ObjectMapper();
private static final ObjectReader requestEventReader = mapper.readerFor(RequestEvent.class);
private final String userId;
private final String uri;
private final int status;
private final String country;
private final String deviceType;
public Map<String,Object> toMap() {
Map<String,Object> data = new HashMap<>();
data.put("userId", userId);
data.put("uri", uri);
data.put("status", status);
data.put("country", country);
data.put("deviceType", deviceType);
return data;
}
public String toJsonString() {
try {
return mapper.writeValueAsString(this);
} catch (JsonProcessingException e) {
e.printStackTrace();
return null;
}
}
/**
* The codec defines how this class should be serialized before transporting across network.
* @return
*/
public static Codec<RequestEvent> requestEventCodec() {
return new Codec<RequestEvent>() {
@Override
public RequestEvent decode(byte[] bytes) {
try {
return requestEventReader.readValue(bytes);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public byte[] encode(final RequestEvent value) {
try {
return mapper.writeValueAsBytes(value);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
| 6,247 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/source/SyntheticSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.source;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import io.mantisrx.sourcejob.synthetic.proto.RequestEvent;
import lombok.extern.slf4j.Slf4j;
import net.andreinc.mockneat.MockNeat;
import rx.Observable;
/**
* Generates random set of RequestEvents at a preconfigured interval.
*/
@Slf4j
public class SyntheticSource implements Source<String> {
private static final String DATA_GENERATION_RATE_MSEC_PARAM = "dataGenerationRate";
private MockNeat mockDataGenerator;
private int dataGenerateRateMsec = 250;
@Override
public Observable<Observable<String>> call(Context context, Index index) {
return Observable.just(Observable
.interval(dataGenerateRateMsec, TimeUnit.MILLISECONDS)
.map((tick) -> generateEvent())
.map((event) -> event.toJsonString())
.filter(Objects::nonNull)
.doOnNext((event) -> {
log.debug("Generated Event {}", event);
}));
}
@Override
public void init(Context context, Index index) {
mockDataGenerator = MockNeat.threadLocal();
dataGenerateRateMsec = (int)context.getParameters().get(DATA_GENERATION_RATE_MSEC_PARAM,250);
}
@Override
public List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = new ArrayList<>();
params.add(new IntParameter()
.name(DATA_GENERATION_RATE_MSEC_PARAM)
.description("Rate at which to generate data")
.validator(Validators.range(100,1000000))
.defaultValue(250)
.build());
return params;
}
private RequestEvent generateEvent() {
String path = mockDataGenerator.probabilites(String.class)
.add(0.1, "/login")
.add(0.2, "/genre/horror")
.add(0.5, "/genre/comedy")
.add(0.2, "/mylist")
.get();
String deviceType = mockDataGenerator.probabilites(String.class)
.add(0.1, "ps4")
.add(0.1, "xbox")
.add(0.2, "browser")
.add(0.3, "ios")
.add(0.3, "android")
.get();
String userId = mockDataGenerator.strings().size(10).get();
int status = mockDataGenerator.probabilites(Integer.class)
.add(0.1,500)
.add(0.7,200)
.add(0.2,500)
.get();
String country = mockDataGenerator.countries().names().get();
return RequestEvent.builder()
.status(status)
.uri(path)
.country(country)
.userId(userId)
.deviceType(deviceType)
.build();
}
}
| 6,248 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/sink/QueryRequestPostProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.sink;
import static com.mantisrx.common.utils.MantisSourceJobConstants.CRITERION_PARAM_NAME;
import static com.mantisrx.common.utils.MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME;
import java.util.List;
import java.util.Map;
import io.mantisrx.runtime.Context;
import lombok.extern.slf4j.Slf4j;
import org.apache.log4j.Logger;
import rx.functions.Func2;
/**
* This is a callback that is invoked after a client connected to the sink of this job disconnects. This is used
* to cleanup the queries the client had registered.
*/
@Slf4j
public class QueryRequestPostProcessor implements Func2<Map<String, List<String>>, Context, Void> {
public QueryRequestPostProcessor() { }
@Override
public Void call(Map<String, List<String>> queryParams, Context context) {
log.info("RequestPostProcessor:queryParams: " + queryParams);
if (queryParams != null) {
if (queryParams.containsKey(SUBSCRIPTION_ID_PARAM_NAME) && queryParams.containsKey(CRITERION_PARAM_NAME)) {
final String subId = queryParams.get(SUBSCRIPTION_ID_PARAM_NAME).get(0);
final String query = queryParams.get(CRITERION_PARAM_NAME).get(0);
final String clientId = queryParams.get("clientId").get(0);
if (subId != null && query != null) {
try {
if (clientId != null && !clientId.isEmpty()) {
deregisterQuery(clientId + "_" + subId);
} else {
deregisterQuery(subId);
}
} catch (Throwable t) {
log.error("Error propagating unsubscription notification", t);
}
}
}
}
return null;
}
private void deregisterQuery(String subId) {
QueryRefCountMap.INSTANCE.removeQuery(subId);
}
}
| 6,249 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/sink/TaggedDataSourceSink.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.sink;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.sink.ServerSentEventsSink;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.sink.predicate.Predicate;
import io.mantisrx.sourcejob.synthetic.core.TaggedData;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
import rx.functions.Func2;
/**
* A custom sink that allows clients to connect to this job with an MQL expression and in turn receive events
* matching this expression.
*/
@Slf4j
public class TaggedDataSourceSink implements Sink<TaggedData> {
private Func2<Map<String, List<String>>, Context, Void> preProcessor = new NoOpProcessor();
private Func2<Map<String, List<String>>, Context, Void> postProcessor = new NoOpProcessor();
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
static class NoOpProcessor implements Func2<Map<String, List<String>>, Context, Void> {
@Override
public Void call(Map<String, List<String>> t1, Context t2) {
return null;
}
}
public TaggedDataSourceSink() {
}
public TaggedDataSourceSink(Func2<Map<String, List<String>>, Context, Void> preProcessor,
Func2<Map<String, List<String>>, Context, Void> postProcessor) {
this.postProcessor = postProcessor;
this.preProcessor = preProcessor;
}
@Override
public void call(Context context, PortRequest portRequest,
Observable<TaggedData> observable) {
observable = observable
.filter((t1) -> !t1.getPayload().isEmpty());
ServerSentEventsSink<TaggedData> sink = new ServerSentEventsSink.Builder<TaggedData>()
.withEncoder((data) -> {
try {
return OBJECT_MAPPER.writeValueAsString(data.getPayload());
} catch (JsonProcessingException e) {
e.printStackTrace();
return "{\"error\":" + e.getMessage() + "}";
}
})
.withPredicate(new Predicate<>("description", new TaggedEventFilter()))
.withRequestPreprocessor(preProcessor)
.withRequestPostprocessor(postProcessor)
.build();
observable.subscribe();
sink.call(context, portRequest, observable);
}
}
| 6,250 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/sink/TaggedEventFilter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.sink;
import static com.mantisrx.common.utils.MantisSourceJobConstants.CLIENT_ID_PARAMETER_NAME;
import static com.mantisrx.common.utils.MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import io.mantisrx.sourcejob.synthetic.core.TaggedData;
import lombok.extern.slf4j.Slf4j;
import rx.functions.Func1;
/**
* This is a predicate that decides what data to send to the downstream client. The data is tagged with the clientId
* and subscriptionId of the intended recipient.
*/
@Slf4j
public class TaggedEventFilter implements Func1<Map<String, List<String>>, Func1<TaggedData, Boolean>> {
@Override
public Func1<TaggedData, Boolean> call(Map<String, List<String>> parameters) {
Func1<TaggedData, Boolean> filter = t1 -> true;
if (parameters != null) {
if (parameters.containsKey(SUBSCRIPTION_ID_PARAM_NAME)) {
String subId = parameters.get(SUBSCRIPTION_ID_PARAM_NAME).get(0);
String clientId = parameters.get(CLIENT_ID_PARAMETER_NAME).get(0);
List<String> terms = new ArrayList<String>();
if (clientId != null && !clientId.isEmpty()) {
terms.add(clientId + "_" + subId);
} else {
terms.add(subId);
}
filter = new SourceEventFilter(terms);
}
return filter;
}
return filter;
}
private static class SourceEventFilter implements Func1<TaggedData, Boolean> {
private String jobId = "UNKNOWN";
private String jobName = "UNKNOWN";
private List<String> terms;
SourceEventFilter(List<String> terms) {
this.terms = terms;
String jId = System.getenv("JOB_ID");
if (jId != null && !jId.isEmpty()) {
jobId = jId;
}
String jName = System.getenv("JOB_NAME");
if (jName != null && !jName.isEmpty()) {
jobName = jName;
}
log.info("Created SourceEventFilter! for subId " + terms.toString() + " in Job : " + jobName + " with Id " + jobId);
}
@Override
public Boolean call(TaggedData data) {
boolean match = true;
for (String term : terms) {
if (!data.matchesClient(term)) {
match = false;
break;
}
}
return match;
}
}
}
| 6,251 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/sink/QueryRequestPreProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.sink;
import static com.mantisrx.common.utils.MantisSourceJobConstants.CRITERION_PARAM_NAME;
import static com.mantisrx.common.utils.MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME;
import java.util.List;
import java.util.Map;
import io.mantisrx.runtime.Context;
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Func2;
/**
* This is a callback that is invoked when a new client connects to this sink of this job.
* The callback is used to extract useful query parameters the user may have set on the GET request such as
* the clientId, subscriptionId and the criterion.
* The clientId identifies a group of connections belonging to the same consumer. Data is sent round-robin amongst
* all clients with the same clientId
* The subscriptionId tracks this particular client.
* The criterion is a valid MQL query. It indicates what data this client is interested in.
*/
@Slf4j
public class QueryRequestPreProcessor implements Func2<Map<String, List<String>>, Context, Void> {
public QueryRequestPreProcessor() { }
@Override
public Void call(Map<String, List<String>> queryParams, Context context) {
log.info("QueryRequestPreProcessor:queryParams: {}", queryParams);
if (queryParams != null) {
if (queryParams.containsKey(SUBSCRIPTION_ID_PARAM_NAME) && queryParams.containsKey(CRITERION_PARAM_NAME)) {
final String subId = queryParams.get(SUBSCRIPTION_ID_PARAM_NAME).get(0);
final String query = queryParams.get(CRITERION_PARAM_NAME).get(0);
final String clientId = queryParams.get("clientId").get(0);
if (subId != null && query != null) {
try {
log.info("Registering query {}", query);
if (clientId != null && !clientId.isEmpty()) {
registerQuery(clientId + "_" + subId, query);
} else {
registerQuery(subId, query);
}
} catch (Throwable t) {
log.error("Error registering query", t);
}
}
}
}
return null;
}
private static synchronized void registerQuery(String subId, String query) {
QueryRefCountMap.INSTANCE.addQuery(subId, query);
}
}
| 6,252 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/sink/QueryRefCountMap.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.sink;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import io.mantisrx.sourcejob.synthetic.core.MQLQueryManager;
import lombok.extern.slf4j.Slf4j;
/**
* This class keeps track of number of clients that have the exact same query registered for
* deduplication purposes.
* When all references to a query are gone the query is deregistered.
*/
@Slf4j
final class QueryRefCountMap {
public static final QueryRefCountMap INSTANCE = new QueryRefCountMap();
private final ConcurrentHashMap<String, AtomicInteger> refCntMap = new ConcurrentHashMap<>();
private QueryRefCountMap() { }
void addQuery(String subId, String query) {
log.info("adding query " + subId + " query " + query);
if (refCntMap.containsKey(subId)) {
int newVal = refCntMap.get(subId).incrementAndGet();
log.info("query exists already incrementing refcnt to " + newVal);
} else {
MQLQueryManager.getInstance().registerQuery(subId, query);
refCntMap.putIfAbsent(subId, new AtomicInteger(1));
log.info("new query registering it");
}
}
void removeQuery(String subId) {
if (refCntMap.containsKey(subId)) {
AtomicInteger refCnt = refCntMap.get(subId);
int currVal = refCnt.decrementAndGet();
if (currVal == 0) {
MQLQueryManager.getInstance().deregisterQuery(subId);
refCntMap.remove(subId);
log.info("All references to query are gone removing query");
} else {
log.info("References to query still exist. decrementing refcnt to " + currVal);
}
} else {
log.warn("No query with subscriptionId " + subId);
}
}
/**
* For testing
*
* @param subId
*
* @return
*/
int getQueryRefCount(String subId) {
if (refCntMap.containsKey(subId)) {
return refCntMap.get(subId).get();
} else {
return 0;
}
}
}
| 6,253 |
0 | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis-examples/synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/stage/TaggingStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.stage;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import com.mantisrx.common.utils.JsonUtility;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.mql.jvm.core.Query;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.sourcejob.synthetic.core.MQLQueryManager;
import io.mantisrx.sourcejob.synthetic.core.TaggedData;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* Tags incoming events with ids of queries that evaluate to true against the data.
*
* Each event is first transformed into a Map, next each query from the list of Registered MQL queries
* is applied against the event. The event is tagged with the ids of queries that matched.
*
*/
@Slf4j
public class TaggingStage implements ScalarComputation<String, TaggedData> {
public static final String MANTIS_META_SOURCE_NAME = "mantis.meta.sourceName";
public static final String MANTIS_META_SOURCE_TIMESTAMP = "mantis.meta.timestamp";
public static final String MANTIS_QUERY_COUNTER = "mantis_query_out";
public static final String MQL_COUNTER = "mql_out";
public static final String MQL_FAILURE = "mql_failure";
public static final String MQL_CLASSLOADER_ERROR = "mql_classloader_error";
public static final String SYNTHETIC_REQUEST_SOURCE = "SyntheticRequestSource";
private AtomicBoolean errorLogged = new AtomicBoolean(false);
@Override
public Observable<TaggedData> call(Context context, Observable<String> dataO) {
return dataO
.map((event) -> {
try {
return JsonUtility.jsonToMap(event);
} catch (Exception e) {
log.error(e.getMessage());
return null;
}
})
.filter(Objects::nonNull)
.flatMapIterable(d -> tagData(d, context));
}
@Override
public void init(Context context) {
context.getMetricsRegistry().registerAndGet(new Metrics.Builder()
.name("mql")
.addCounter(MQL_COUNTER)
.addCounter(MQL_FAILURE)
.addCounter(MQL_CLASSLOADER_ERROR)
.addCounter(MANTIS_QUERY_COUNTER).build());
}
private List<TaggedData> tagData(Map<String, Object> d, Context context) {
List<TaggedData> taggedDataList = new ArrayList<>();
Metrics metrics = context.getMetricsRegistry().getMetric(new MetricGroupId("mql"));
Collection<Query> queries = MQLQueryManager.getInstance().getRegisteredQueries();
Iterator<Query> it = queries.iterator();
while (it.hasNext()) {
Query query = it.next();
try {
if (query.matches(d)) {
Map<String, Object> projected = query.project(d);
projected.put(MANTIS_META_SOURCE_NAME, SYNTHETIC_REQUEST_SOURCE);
projected.put(MANTIS_META_SOURCE_TIMESTAMP, System.currentTimeMillis());
TaggedData tg = new TaggedData(projected);
tg.addMatchedClient(query.getSubscriptionId());
taggedDataList.add(tg);
}
} catch (Exception ex) {
if (ex instanceof ClassNotFoundException) {
log.error("Error loading MQL: " + ex.getMessage());
ex.printStackTrace();
metrics.getCounter(MQL_CLASSLOADER_ERROR).increment();
} else {
ex.printStackTrace();
metrics.getCounter(MQL_FAILURE).increment();
log.error("MQL Error: " + ex.getMessage());
log.error("MQL Query: " + query.getRawQuery());
log.error("MQL Datum: " + d);
}
} catch (Error e) {
metrics.getCounter(MQL_FAILURE).increment();
if (!errorLogged.get()) {
log.error("caught Error when processing MQL {} on {}", query.getRawQuery(), d.toString(), e);
errorLogged.set(true);
}
}
}
return taggedDataList;
}
public static ScalarToScalar.Config<String, TaggedData> config() {
return new ScalarToScalar.Config<String, TaggedData>()
.concurrentInput()
.codec(TaggingStage.taggedDataCodec());
}
public static Codec<TaggedData> taggedDataCodec() {
return new Codec<TaggedData>() {
@Override
public TaggedData decode(byte[] bytes) {
return new TaggedData(new HashMap<>());
}
@Override
public byte[] encode(final TaggedData value) {
return new byte[128];
}
};
}
}
| 6,254 |
0 | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web/servlet/HelloServlet.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample.web.servlet;
import java.io.IOException;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.netflix.mantis.examples.mantispublishsample.web.service.MyService;
/**
* A simple servlet that looks for the existence of a name parameter in the request and responds
* with a Hello message.
*/
@Singleton
public class HelloServlet extends HttpServlet {
@Inject
MyService myService;
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
String name = request.getParameter("name");
if (name == null) name = "Universe";
String result = myService.hello(name);
response.getWriter().print(result);
}
protected void doPost(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
String name = request.getParameter("name");
if (name == null) name = "World";
request.setAttribute("user", name);
request.getRequestDispatcher("response.jsp").forward(request, response);
}
} | 6,255 |
0 | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web/config/DefaultGuiceServletConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample.web.config;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.mantis.examples.mantispublishsample.web.filter.CaptureRequestEventFilter;
import com.netflix.mantis.examples.mantispublishsample.web.service.MyService;
import com.netflix.mantis.examples.mantispublishsample.web.service.MyServiceImpl;
import com.netflix.mantis.examples.mantispublishsample.web.servlet.HelloServlet;
import com.netflix.spectator.nflx.SpectatorModule;
import io.mantisrx.publish.netty.guice.MantisRealtimeEventsPublishModule;
/**
* Wire up the servlets, filters and other modules.
*/
public class DefaultGuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return Guice.createInjector(
new ArchaiusModule(), new MantisRealtimeEventsPublishModule(), new SpectatorModule(),
new ServletModule() {
@Override
protected void configureServlets() {
filter("/*").through(CaptureRequestEventFilter.class);
serve("/hello").with(HelloServlet.class);
bind(MyService.class).to(MyServiceImpl.class);
}
}
);
}
} | 6,256 |
0 | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web/filter/CaptureRequestEventFilter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample.web.filter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadLocalRandom;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServletResponseWrapper;
import io.mantisrx.publish.api.Event;
import io.mantisrx.publish.api.EventPublisher;
import io.mantisrx.publish.api.PublishStatus;
import lombok.extern.slf4j.Slf4j;
/**
* A sample filter that captures Request and Response headers and sends them to
* Mantis using the mantis-publish library.
*/
@Slf4j
@Singleton
public class CaptureRequestEventFilter implements Filter {
private static final String RESPONSE_HEADER_PREFIX = "response.header.";
private static final String REQUEST_HEADER_PREFIX = "request.header.";
private static final String VALUE_SEPARATOR = ",";
@Inject
private EventPublisher publisher;
@Override
public void init(FilterConfig filterConfig) {
log.info("Capture Request data filter inited");
}
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
throws IOException, ServletException {
final HttpServletRequest req = (HttpServletRequest) servletRequest;
final HttpServletResponse res = (HttpServletResponse)servletResponse;
log.debug("In do filter");
final long startMillis = System.currentTimeMillis();
// Add a wrapper around the Response object to capture headers.
final ResponseSpy responseSpy = new ResponseSpy(res);
// Send request down the filter chain
filterChain.doFilter(servletRequest,responseSpy);
// request is complete now gather all the request data and send to mantis.
processPostFilter(startMillis, req, responseSpy);
}
/**
* Invoked after the request has been completed. Used to gather all the request and response headers
* associated with this request and publish to mantis.
* @param startMillis The time processing began for this request.
* @param req The servlet request object
* @param responseSpy The spy servlet response.
*/
private void processPostFilter(long startMillis, HttpServletRequest req, ResponseSpy responseSpy) {
try {
Map<String, Object> event = new HashMap<>();
postProcess(req, responseSpy,event);
Event rEvent = new Event(event);
final long duration = System.currentTimeMillis() - startMillis;
rEvent.set("duration", duration);
log.info("sending event {} to stream {}", rEvent);
CompletionStage<PublishStatus> sendResult = publisher.publish(rEvent);
sendResult.whenCompleteAsync((status,throwable) -> {
log.info("Filter send event status=> {}", status);
});
} catch (Exception e) {
log.error("failed to process event", e);
}
}
/**
* Captures the request and response headers associated with this request.
* @param httpServletRequest
* @param responseSpy
* @param event
*/
private void postProcess(HttpServletRequest httpServletRequest, ResponseSpy responseSpy, Map<String,Object> event) {
try {
int rdm = ThreadLocalRandom.current().nextInt();
if(rdm < 0) {
rdm = rdm * (-1);
}
event.put("request.uuid", rdm);
captureRequestData(event, httpServletRequest);
captureResponseData(event, responseSpy);
} catch (Exception e) {
event.put("exception", e.toString());
log.error("Error capturing data in api.RequestEventInfoCollector filter! uri=" +
httpServletRequest.getRequestURI(), e);
}
}
/**
* Captures response headers.
* @param event
* @param res
*/
private void captureResponseData(Map<String, Object> event, ResponseSpy res ) {
log.debug("Capturing response data");
// response headers
for (String name : res.headers.keySet()) {
final StringBuilder valBuilder = new StringBuilder();
boolean firstValue = true;
for (String s : res.headers.get(name)) {
// only prepends separator for non-first header values
if (firstValue) firstValue = false;
else {
valBuilder.append(VALUE_SEPARATOR);
}
valBuilder.append(s);
}
event.put(RESPONSE_HEADER_PREFIX + name, valBuilder.toString());
}
// Set Cookies
if (!res.cookies.isEmpty()) {
Iterator<Cookie> cookies = res.cookies.iterator();
StringBuilder setCookies = new StringBuilder();
while (cookies.hasNext()) {
Cookie cookie = cookies.next();
setCookies.append(cookie.getName()).append("=").append(cookie.getValue());
String domain = cookie.getDomain();
if (domain != null) {
setCookies.append("; Domain=").append(domain);
}
int maxAge = cookie.getMaxAge();
if (maxAge >= 0) {
setCookies.append("; Max-Age=").append(maxAge);
}
String path = cookie.getPath();
if (path != null) {
setCookies.append("; Path=").append(path);
}
if (cookie.getSecure()) {
setCookies.append("; Secure");
}
if (cookie.isHttpOnly()) {
setCookies.append("; HttpOnly");
}
if (cookies.hasNext()) {
setCookies.append(VALUE_SEPARATOR);
}
}
event.put(RESPONSE_HEADER_PREFIX + "set-cookie", setCookies.toString());
}
// status of the request
int status = res.statusCode;
event.put("status", status);
}
/**
* Captures request headers.
* @param event
* @param req
*/
private void captureRequestData(Map<String, Object> event, HttpServletRequest req) {
// basic request properties
String path = req.getRequestURI();
if (path == null) path = "/";
event.put("path", path);
event.put("host", req.getHeader("host"));
event.put("query", req.getQueryString());
event.put("method", req.getMethod());
event.put("currentTime", System.currentTimeMillis());
// request headers
for (final Enumeration<String> names = req.getHeaderNames(); names.hasMoreElements();) {
final String name = (String)names.nextElement();
final StringBuilder valBuilder = new StringBuilder();
boolean firstValue = true;
for (final Enumeration<String> vals = req.getHeaders(name); vals.hasMoreElements();) {
// only prepends separator for non-first header values
if (firstValue) firstValue = false;
else {
valBuilder.append(VALUE_SEPARATOR);
}
valBuilder.append(vals.nextElement());
}
event.put(REQUEST_HEADER_PREFIX + name, valBuilder.toString());
}
// request params
// HTTP POSTs send a param with a weird encoded name, so we strip them out with this regex
if("GET".equals(req.getMethod())) {
final Map<String,String[]> params = req.getParameterMap();
for (final Object key : params.keySet()) {
final String keyString = key.toString();
final Object val = params.get(key);
String valString;
if (val instanceof String[]) {
final String[] valArray = (String[]) val;
if (valArray.length == 1)
valString = valArray[0];
else
valString = Arrays.asList((String[]) val).toString();
} else {
valString = val.toString();
}
event.put("param." + key, valString);
}
}
}
@Override
public void destroy() {
}
/**
* A simple wrapper for {@link HttpServletResponseWrapper} that is used to capture headers
* and cookies associated with the response.
*/
private static final class ResponseSpy extends HttpServletResponseWrapper {
int statusCode = 200;
final Map<String, List<String>> headers = new ConcurrentHashMap<>();
final List<Cookie> cookies = new ArrayList<>();
private ResponseSpy(HttpServletResponse response) {
super(response);
}
@Override
public void setStatus(int sc) {
super.setStatus(sc);
this.statusCode = sc;
}
@Override
public void addCookie(Cookie cookie) {
cookies.add(cookie);
super.addCookie(cookie);
}
@Override public void setHeader(String name, String value) {
List<String> values = new ArrayList<>();
values.add(value);
headers.put(name, values);
super.setHeader(name, value);
}
@Override public void addHeader(String name, String value) {
List<String> values = headers.computeIfAbsent(name, k -> new ArrayList<>());
values.add(value);
super.addHeader(name, value);
}
@Override public void setDateHeader(String name, long date) {
List<String> values = new ArrayList<>();
values.add(Long.toString(date));
headers.put(name, values);
super.setDateHeader(name, date);
}
@Override public void setIntHeader(String name, int val) {
List<String> values = new ArrayList<>();
values.add(Integer.toString(val));
headers.put(name, values);
super.setIntHeader(name, val);
}
}
}
| 6,257 |
0 | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web/service/MyService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample.web.service;
public interface MyService {
String hello(String name);
} | 6,258 |
0 | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web | Create_ds/mantis-examples/mantis-publish-web-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/web/service/MyServiceImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample.web.service;
public class MyServiceImpl implements MyService {
@Override
public String hello(String name) {
return "Hello, " + name;
}
} | 6,259 |
0 | Create_ds/mantis-examples/core/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/core/src/main/java/com/netflix/mantis/examples/core/WordCountPair.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.core;
import lombok.Data;
/**
* A simple class that holds a word and a count of how many times it has occurred.
*/
@Data
public class WordCountPair {
private final String word;
private final int count;
}
| 6,260 |
0 | Create_ds/mantis-examples/core/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/core/src/main/java/com/netflix/mantis/examples/core/ObservableQueue.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.core;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import rx.Observable;
import rx.subjects.PublishSubject;
import rx.subjects.Subject;
/**
* An Observable that acts as a blocking queue. It is backed by a <code>Subject</code>
*
* @param <T>
*/
public class ObservableQueue<T> implements BlockingQueue<T>, Closeable {
private final Subject<T, T> subject = PublishSubject.<T>create().toSerialized();
public Observable<T> observe() {
return subject;
}
@Override
public boolean add(T t) {
return offer(t);
}
@Override
public boolean offer(T t) {
subject.onNext(t);
return true;
}
@Override
public void close() throws IOException {
subject.onCompleted();
}
@Override
public T remove() {
return noSuchElement();
}
@Override
public T poll() {
return null;
}
@Override
public T element() {
return noSuchElement();
}
private T noSuchElement() {
throw new NoSuchElementException();
}
@Override
public T peek() {
return null;
}
@Override
public void put(T t) throws InterruptedException {
offer(t);
}
@Override
public boolean offer(T t, long timeout, TimeUnit unit) throws InterruptedException {
return offer(t);
}
@Override
public T take() throws InterruptedException {
throw new UnsupportedOperationException("Use observe() instead");
}
@Override
public T poll(long timeout, TimeUnit unit) throws InterruptedException {
return null;
}
@Override
public int remainingCapacity() {
return 0;
}
@Override
public boolean remove(Object o) {
return false;
}
@Override
public boolean containsAll(Collection<?> c) {
return false;
}
@Override
public boolean addAll(Collection<? extends T> c) {
c.forEach(this::offer);
return true;
}
@Override
public boolean removeAll(Collection<?> c) {
return false;
}
@Override
public boolean retainAll(Collection<?> c) {
return false;
}
@Override
public void clear() {
}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public boolean contains(Object o) {
return false;
}
@Override
public Iterator<T> iterator() {
return Collections.emptyIterator();
}
@Override
public Object[] toArray() {
return new Object[0];
}
@Override
public <T> T[] toArray(T[] a) {
return a;
}
@Override
public int drainTo(Collection<? super T> c) {
return 0;
}
@Override
public int drainTo(Collection<? super T> c, int maxElements) {
return 0;
}
} | 6,261 |
0 | Create_ds/mantis-examples/core/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/core/src/main/java/com/netflix/mantis/examples/config/StageConfigs.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.config;
import java.util.Map;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.KeyToScalar;
import io.mantisrx.runtime.ScalarToKey;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.codec.JacksonCodecs;
public class StageConfigs {
public static ScalarToScalar.Config<String, String> scalarToScalarConfig() {
return new ScalarToScalar.Config<String, String>()
.codec(Codecs.string());
}
public static KeyToScalar.Config<String, Map<String, Object>, String> keyToScalarConfig() {
return new KeyToScalar.Config<String, Map<String, Object>, String>()
.description("sum events ")
.keyExpireTimeSeconds(10)
.codec(Codecs.string());
}
public static ScalarToKey.Config<String, String, Map<String, Object>> scalarToKeyConfig() {
return new ScalarToKey.Config<String, String, Map<String, Object>>()
.description("Group event data by ip")
.concurrentInput()
.keyExpireTimeSeconds(1)
.codec(JacksonCodecs.mapStringObject());
}
}
| 6,262 |
0 | Create_ds/mantis-examples/sine-function/src/main/java/io/mantisrx/mantis/examples | Create_ds/mantis-examples/sine-function/src/main/java/io/mantisrx/mantis/examples/sinefunction/SineFunctionJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.mantis.examples.sinefunction;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import io.mantisrx.mantis.examples.sinefunction.core.Point;
import io.mantisrx.mantis.examples.sinefunction.stages.SinePointGeneratorStage;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.codec.JacksonCodecs;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.runtime.parameter.type.BooleanParameter;
import io.mantisrx.runtime.parameter.type.DoubleParameter;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.sink.SelfDocumentingSink;
import io.mantisrx.runtime.sink.ServerSentEventsSink;
import io.mantisrx.runtime.sink.predicate.Predicate;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import rx.Observable;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
public class SineFunctionJob extends MantisJobProvider<Point> {
public static final String INTERVAL_SEC = "intervalSec";
public static final String RANGE_MAX = "max";
public static final String RANGE_MIN = "min";
public static final String AMPLITUDE = "amplitude";
public static final String FREQUENCY = "frequency";
public static final String PHASE = "phase";
public static final String RANDOM_RATE = "randomRate";
public static final String USE_RANDOM_FLAG = "useRandom";
/**
* The SSE sink sets up an SSE server that can be connected to using SSE clients(curl etc.) to see
* a real-time stream of (x, y) tuples on a sine curve.
*/
private final SelfDocumentingSink<Point> sseSink = new ServerSentEventsSink.Builder<Point>()
.withEncoder(point -> String.format("{\"x\": %f, \"y\": %f}", point.getX(), point.getY()))
.withPredicate(new Predicate<>(
"filter=even, returns even x parameters; filter=odd, returns odd x parameters.",
parameters -> {
Func1<Point, Boolean> filter = point -> {
return true;
};
if (parameters != null && parameters.containsKey("filter")) {
String filterBy = parameters.get("filter").get(0);
// create filter function based on parameter value
filter = point -> {
// filter by evens or odds for x values
if ("even".equalsIgnoreCase(filterBy)) {
return (point.getX() % 2 == 0);
} else if ("odd".equalsIgnoreCase(filterBy)) {
return (point.getX() % 2 != 0);
}
return true; // if not even/odd
};
}
return filter;
}
))
.build();
/**
* The Stage com.netflix.mantis.examples.config defines how the output of the stage is serialized onto the next stage or sink.
*/
static ScalarToScalar.Config<Integer, Point> stageConfig() {
return new ScalarToScalar.Config<Integer, Point>()
.codec(JacksonCodecs.pojo(Point.class));
}
/**
* Run this in the IDE and look for
* {@code AbstractServer:95 main - Rx server started at port: <PORT_NUMBER>} in the console output.
* Connect to the port using {@code curl localhost:<PORT_NUMBER>}
* to see a stream of (x, y) coordinates on a sine curve.
*/
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new SineFunctionJob().getJobInstance(),
new Parameter("useRandom", "false"));
}
@Override
public Job<Point> getJobInstance() {
return MantisJob
// Define the data source for this job.
.source(new TimerSource())
// Add stages to transform the event stream received from the Source.
.stage(new SinePointGeneratorStage(), stageConfig())
// Define a sink to output the transformed stream over SSE or an external system like Cassandra, etc.
.sink(sseSink)
// Add Job parameters that can be passed in by the user when submitting a job.
.parameterDefinition(new BooleanParameter()
.name(USE_RANDOM_FLAG)
.required()
.description("If true, produce a random sequence of integers. If false,"
+ " produce a sequence of integers starting at 0 and increasing by 1.")
.build())
.parameterDefinition(new DoubleParameter()
.name(RANDOM_RATE)
.defaultValue(1.0)
.description("The chance a random integer is generated, for the given period")
.validator(Validators.range(0, 1))
.build())
.parameterDefinition(new IntParameter()
.name(INTERVAL_SEC)
.defaultValue(1)
.description("Period at which to generate a random integer value to send to sine function")
.validator(Validators.range(1, 60))
.build())
.parameterDefinition(new IntParameter()
.name(RANGE_MIN)
.defaultValue(0)
.description("Minimun of random integer value")
.validator(Validators.range(0, 100))
.build())
.parameterDefinition(new IntParameter()
.name(RANGE_MAX)
.defaultValue(100)
.description("Maximum of random integer value")
.validator(Validators.range(1, 100))
.build())
.parameterDefinition(new DoubleParameter()
.name(AMPLITUDE)
.defaultValue(10.0)
.description("Amplitude for sine function")
.validator(Validators.range(1, 100))
.build())
.parameterDefinition(new DoubleParameter()
.name(FREQUENCY)
.defaultValue(1.0)
.description("Frequency for sine function")
.validator(Validators.range(1, 100))
.build())
.parameterDefinition(new DoubleParameter()
.name(PHASE)
.defaultValue(0.0)
.description("Phase for sine function")
.validator(Validators.range(0, 100))
.build())
.metadata(new Metadata.Builder()
.name("Sine function")
.description("Produces an infinite stream of points, along the sine function, using the"
+ " following function definition: f(x) = amplitude * sin(frequency * x + phase)."
+ " The input to the function is either random between [min, max], or an integer sequence starting "
+ " at 0. The output is served via HTTP server using SSE protocol.")
.build())
.create();
}
/**
* This source generates a monotonically increasingly value per tick as per INTERVAL_SEC Job parameter.
* If USE_RANDOM_FLAG is set, the source generates a random value per tick.
*/
class TimerSource implements Source<Integer> {
@Override
public Observable<Observable<Integer>> call(Context context, Index index) {
// If you want to be informed of scaleup/scale down of the source stage of this job you can subscribe
// to getTotalNumWorkersObservable like the following.
index.getTotalNumWorkersObservable().subscribeOn(Schedulers.io()).subscribe((workerCount) -> {
System.out.println("Total worker count changed to -> " + workerCount);
});
final int period = (int)
context.getParameters().get(INTERVAL_SEC);
final int max = (int)
context.getParameters().get(RANGE_MAX);
final int min = (int)
context.getParameters().get(RANGE_MIN);
final double randomRate = (double)
context.getParameters().get(RANDOM_RATE);
final boolean useRandom = (boolean)
context.getParameters().get(USE_RANDOM_FLAG);
final Random randomNumGenerator = new Random();
final Random randomRateVariable = new Random();
return Observable.just(
Observable.interval(0, period, TimeUnit.SECONDS)
.map(time -> {
if (useRandom) {
return randomNumGenerator.nextInt((max - min) + 1) + min;
} else {
return (int) (long) time;
}
})
.filter(x -> {
double value = randomRateVariable.nextDouble();
return (value <= randomRate);
})
);
}
}
}
| 6,263 |
0 | Create_ds/mantis-examples/sine-function/src/main/java/io/mantisrx/mantis/examples/sinefunction | Create_ds/mantis-examples/sine-function/src/main/java/io/mantisrx/mantis/examples/sinefunction/core/Point.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.mantis.examples.sinefunction.core;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.runtime.codec.JsonType;
public class Point implements JsonType {
private double x;
private double y;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Point(@JsonProperty("x") double x,
@JsonProperty("y") double y) {
this.x = x;
this.y = y;
}
public double getX() {
return x;
}
public double getY() {
return y;
}
}
| 6,264 |
0 | Create_ds/mantis-examples/sine-function/src/main/java/io/mantisrx/mantis/examples/sinefunction | Create_ds/mantis-examples/sine-function/src/main/java/io/mantisrx/mantis/examples/sinefunction/stages/SinePointGeneratorStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.mantis.examples.sinefunction.stages;
import io.mantisrx.mantis.examples.sinefunction.SineFunctionJob;
import io.mantisrx.mantis.examples.sinefunction.core.Point;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.computation.ScalarComputation;
import rx.Observable;
/**
* This class implements the ScalarComputation type of Mantis Stage and
* transforms the value received from the Source into a Point on a sine-function curve
* based on AMPLITUDE, FREQUENCY and PHASE job parameters.
*/
public class SinePointGeneratorStage implements ScalarComputation<Integer, Point> {
@Override
public Observable<Point> call(Context context, Observable<Integer> o) {
final double amplitude = (double)
context.getParameters().get(SineFunctionJob.AMPLITUDE);
final double frequency = (double)
context.getParameters().get(SineFunctionJob.FREQUENCY);
final double phase = (double)
context.getParameters().get(SineFunctionJob.PHASE);
return
o
.filter(x -> x % 2 == 0)
.map(x -> new Point(x, amplitude * Math.sin((frequency * x) + phase)));
}
}
| 6,265 |
0 | Create_ds/mantis-examples/wordcount/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/wordcount/src/main/java/com/netflix/mantis/examples/wordcount/WordCountJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit;
import com.netflix.mantis.examples.config.StageConfigs;
import com.netflix.mantis.examples.core.WordCountPair;
import com.netflix.mantis.examples.wordcount.sources.IlliadSource;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.sink.Sinks;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This sample demonstrates ingesting data from a text file and counting the number of occurrences of words within a 10
* sec hopping window.
* Run the main method of this class and then look for a the SSE port in the output
* E.g
* <code> Serving modern HTTP SSE server sink on port: 8650 </code>
* You can curl this port <code> curl localhost:8650</code> to view the output of the job.
*
* To run via gradle
* /gradlew :mantis-examples-wordcount:execute
*/
@Slf4j
public class WordCountJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
return MantisJob
.source(new IlliadSource())
// Simply echoes the tweet
.stage((context, dataO) -> dataO
// Tokenize
.flatMap((text) -> Observable.from(tokenize(text)))
// On a hopping window of 10 seconds
.window(10, TimeUnit.SECONDS)
.flatMap((wordCountPairObservable) -> wordCountPairObservable
// count how many times a word appears
.groupBy(WordCountPair::getWord)
.flatMap((groupO) -> groupO.reduce(0, (cnt, wordCntPair) -> cnt + 1)
.map((cnt) -> new WordCountPair(groupO.getKey(), cnt))))
.map(WordCountPair::toString)
, StageConfigs.scalarToScalarConfig())
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(Sinks.eagerSubscribe(Sinks.sse((String data) -> data)))
.metadata(new Metadata.Builder()
.name("WordCount")
.description("Reads Homer's The Illiad faster than we can.")
.build())
.create();
}
private List<WordCountPair> tokenize(String text) {
StringTokenizer tokenizer = new StringTokenizer(text);
List<WordCountPair> wordCountPairs = new ArrayList<>();
while(tokenizer.hasMoreTokens()) {
String word = tokenizer.nextToken().replaceAll("\\s*", "").toLowerCase();
wordCountPairs.add(new WordCountPair(word,1));
}
return wordCountPairs;
}
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new WordCountJob().getJobInstance());
}
}
| 6,266 |
0 | Create_ds/mantis-examples/wordcount/src/main/java/com/netflix/mantis/examples/wordcount | Create_ds/mantis-examples/wordcount/src/main/java/com/netflix/mantis/examples/wordcount/sources/IlliadSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount.sources;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import lombok.extern.log4j.Log4j;
import rx.Observable;
/**
* Ignore the contents of this file for the tutorial. The purpose is just to generate a stream of interesting data
* on which we can word count.
*/
@Log4j
public class IlliadSource implements Source<String> {
@Override
public Observable<Observable<String>> call(Context context, Index index) {
return Observable.interval(10, TimeUnit.SECONDS)
.map(__ -> {
try {
Path path = Paths.get(getClass().getClassLoader()
.getResource("illiad.txt").toURI());
return Observable.from(() -> {
try {
return Files.lines(path).iterator();
} catch (IOException ex) {
log.error("IOException while reading illiad.txt from resources", ex);
}
return Stream.<String>empty().iterator();
}
);
} catch (URISyntaxException ex) {
log.error("URISyntaxException while loading illiad.txt from resources.", ex);
}
return Observable.empty();
});
}
}
| 6,267 |
0 | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples/core/WordCountPair.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.core;
import lombok.Data;
/**
* A simple class that holds a word and a count of how many times it has occurred.
*/
@Data
public class WordCountPair {
private final String word;
private final int count;
}
| 6,268 |
0 | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples/core/ObservableQueue.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.core;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import rx.Observable;
import rx.subjects.PublishSubject;
import rx.subjects.Subject;
/**
* An Observable that acts as a blocking queue. It is backed by a <code>Subject</code>
*
* @param <T>
*/
public class ObservableQueue<T> implements BlockingQueue<T>, Closeable {
private final Subject<T, T> subject = PublishSubject.<T>create().toSerialized();
public Observable<T> observe() {
return subject;
}
@Override
public boolean add(T t) {
return offer(t);
}
@Override
public boolean offer(T t) {
subject.onNext(t);
return true;
}
@Override
public void close() throws IOException {
subject.onCompleted();
}
@Override
public T remove() {
return noSuchElement();
}
@Override
public T poll() {
return null;
}
@Override
public T element() {
return noSuchElement();
}
private T noSuchElement() {
throw new NoSuchElementException();
}
@Override
public T peek() {
return null;
}
@Override
public void put(T t) throws InterruptedException {
offer(t);
}
@Override
public boolean offer(T t, long timeout, TimeUnit unit) throws InterruptedException {
return offer(t);
}
@Override
public T take() throws InterruptedException {
throw new UnsupportedOperationException("Use observe() instead");
}
@Override
public T poll(long timeout, TimeUnit unit) throws InterruptedException {
return null;
}
@Override
public int remainingCapacity() {
return 0;
}
@Override
public boolean remove(Object o) {
return false;
}
@Override
public boolean containsAll(Collection<?> c) {
return false;
}
@Override
public boolean addAll(Collection<? extends T> c) {
c.forEach(this::offer);
return true;
}
@Override
public boolean removeAll(Collection<?> c) {
return false;
}
@Override
public boolean retainAll(Collection<?> c) {
return false;
}
@Override
public void clear() {
}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public boolean contains(Object o) {
return false;
}
@Override
public Iterator<T> iterator() {
return Collections.emptyIterator();
}
@Override
public Object[] toArray() {
return new Object[0];
}
@Override
public <T> T[] toArray(T[] a) {
return a;
}
@Override
public int drainTo(Collection<? super T> c) {
return 0;
}
@Override
public int drainTo(Collection<? super T> c, int maxElements) {
return 0;
}
} | 6,269 |
0 | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples/wordcount/TwitterJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit;
import com.mantisrx.common.utils.JsonUtility;
import com.netflix.mantis.examples.config.StageConfigs;
import com.netflix.mantis.examples.core.WordCountPair;
import com.netflix.mantis.examples.wordcount.sources.TwitterSource;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.runtime.sink.Sinks;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This sample demonstrates connecting to a twitter feed and counting the number of occurrences of words within a 10
* sec hopping window.
* Run the main method of this class and then look for a the SSE port in the output
* E.g
* <code> Serving modern HTTP SSE server sink on port: 8650 </code>
* You can curl this port <code> curl localhost:8650</code> to view the output of the job.
*
* To run via gradle
* ../gradlew execute --args='consumerKey consumerSecret token tokensecret'
*/
@Slf4j
public class TwitterJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
return MantisJob
.source(new TwitterSource())
// Simply echoes the tweet
.stage((context, dataO) -> dataO
.map(JsonUtility::jsonToMap)
// filter out english tweets
.filter((eventMap) -> {
if(eventMap.containsKey("lang") && eventMap.containsKey("text")) {
String lang = (String)eventMap.get("lang");
return "en".equalsIgnoreCase(lang);
}
return false;
}).map((eventMap) -> (String)eventMap.get("text"))
// tokenize the tweets into words
.flatMap((text) -> Observable.from(tokenize(text)))
// On a hopping window of 10 seconds
.window(10, TimeUnit.SECONDS)
.flatMap((wordCountPairObservable) -> wordCountPairObservable
// count how many times a word appears
.groupBy(WordCountPair::getWord)
.flatMap((groupO) -> groupO.reduce(0, (cnt, wordCntPair) -> cnt + 1)
.map((cnt) -> new WordCountPair(groupO.getKey(), cnt))))
.map(WordCountPair::toString)
.doOnNext((cnt) -> log.info(cnt))
, StageConfigs.scalarToScalarConfig())
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(Sinks.eagerSubscribe(Sinks.sse((String data) -> data)))
.metadata(new Metadata.Builder()
.name("TwitterSample")
.description("Connects to a Twitter feed")
.build())
.create();
}
private List<WordCountPair> tokenize(String text) {
StringTokenizer tokenizer = new StringTokenizer(text);
List<WordCountPair> wordCountPairs = new ArrayList<>();
while(tokenizer.hasMoreTokens()) {
String word = tokenizer.nextToken().replaceAll("\\s*", "").toLowerCase();
wordCountPairs.add(new WordCountPair(word,1));
}
return wordCountPairs;
}
public static void main(String[] args) {
String consumerKey = null;
String consumerSecret = null;
String token = null;
String tokenSecret = null;
if(args.length != 4) {
System.out.println("Usage: java com.netflix.mantis.examples.TwitterJob <consumerKey> <consumerSecret> <token> <tokenSecret");
System.exit(0);
} else {
consumerKey = args[0].trim();
consumerSecret = args[1].trim();
token = args[2].trim();
tokenSecret = args[3].trim();
}
LocalJobExecutorNetworked.execute(new TwitterJob().getJobInstance(),
new Parameter(TwitterSource.CONSUMER_KEY_PARAM,consumerKey),
new Parameter(TwitterSource.CONSUMER_SECRET_PARAM, consumerSecret),
new Parameter(TwitterSource.TOKEN_PARAM, token),
new Parameter(TwitterSource.TOKEN_SECRET_PARAM, tokenSecret)
);
}
}
| 6,270 |
0 | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples/wordcount | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples/wordcount/sources/TwitterSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount.sources;
import java.util.Arrays;
import java.util.List;
import com.google.common.collect.Lists;
import com.netflix.mantis.examples.core.ObservableQueue;
import com.twitter.hbc.ClientBuilder;
import com.twitter.hbc.core.Constants;
import com.twitter.hbc.core.endpoint.StatusesFilterEndpoint;
import com.twitter.hbc.core.processor.StringDelimitedProcessor;
import com.twitter.hbc.httpclient.BasicClient;
import com.twitter.hbc.httpclient.auth.Authentication;
import com.twitter.hbc.httpclient.auth.OAuth1;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import rx.Observable;
/**
* A Mantis Source that wraps an underlying Twitter source based on the HorseBirdClient.
*/
public class TwitterSource implements Source<String> {
public static final String CONSUMER_KEY_PARAM = "consumerKey";
public static final String CONSUMER_SECRET_PARAM = "consumerSecret";
public static final String TOKEN_PARAM = "token";
public static final String TOKEN_SECRET_PARAM = "tokenSecret";
public static final String TERMS_PARAM = "terms";
private final ObservableQueue<String> twitterObservable = new ObservableQueue<>();
private transient BasicClient client;
@Override
public Observable<Observable<String>> call(Context context, Index index) {
return Observable.just(twitterObservable.observe());
}
/**
* Define parameters required by this source.
*
* @return
*/
@Override
public List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = Lists.newArrayList();
// Consumer key
params.add(new StringParameter()
.name(CONSUMER_KEY_PARAM)
.description("twitter consumer key")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.add(new StringParameter()
.name(CONSUMER_SECRET_PARAM)
.description("twitter consumer secret")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.add(new StringParameter()
.name(TOKEN_PARAM)
.description("twitter token")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.add(new StringParameter()
.name(TOKEN_SECRET_PARAM)
.description("twitter token secret")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.add(new StringParameter()
.name(TERMS_PARAM)
.description("terms to follow")
.validator(Validators.notNullOrEmpty())
.defaultValue("Netflix,Dark")
.build());
return params;
}
/**
* Init method is called only once during initialization. It is the ideal place to perform one time
* configuration actions.
*
* @param context Provides access to Mantis system information like JobId, Job parameters etc
* @param index This provides access to the unique workerIndex assigned to this container. It also provides
* the total number of workers of this job.
*/
@Override
public void init(Context context, Index index) {
String consumerKey = (String) context.getParameters().get(CONSUMER_KEY_PARAM);
String consumerSecret = (String) context.getParameters().get(CONSUMER_SECRET_PARAM);
String token = (String) context.getParameters().get(TOKEN_PARAM);
String tokenSecret = (String) context.getParameters().get(TOKEN_SECRET_PARAM);
String terms = (String) context.getParameters().get(TERMS_PARAM);
Authentication auth = new OAuth1(consumerKey,
consumerSecret,
token,
tokenSecret);
StatusesFilterEndpoint endpoint = new StatusesFilterEndpoint();
String[] termArray = terms.split(",");
List<String> termsList = Arrays.asList(termArray);
endpoint.trackTerms(termsList);
client = new ClientBuilder()
.name("twitter-source")
.hosts(Constants.STREAM_HOST)
.endpoint(endpoint)
.authentication(auth)
.processor(new StringDelimitedProcessor(twitterObservable))
.build();
client.connect();
}
}
| 6,271 |
0 | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/twitter-sample/src/main/java/com/netflix/mantis/examples/config/StageConfigs.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.config;
import java.util.Map;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.KeyToScalar;
import io.mantisrx.runtime.ScalarToKey;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.codec.JacksonCodecs;
public class StageConfigs {
public static ScalarToScalar.Config<String, String> scalarToScalarConfig() {
return new ScalarToScalar.Config<String, String>()
.codec(Codecs.string());
}
public static KeyToScalar.Config<String, Map<String, Object>, String> keyToScalarConfig() {
return new KeyToScalar.Config<String, Map<String, Object>, String>()
.description("sum events ")
.keyExpireTimeSeconds(10)
.codec(Codecs.string());
}
public static ScalarToKey.Config<String, String, Map<String, Object>> scalarToKeyConfig() {
return new ScalarToKey.Config<String, String, Map<String, Object>>()
.description("Group event data by ip")
.concurrentInput()
.keyExpireTimeSeconds(1)
.codec(JacksonCodecs.mapStringObject());
}
}
| 6,272 |
0 | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples/RequestAggregationJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples;
import com.netflix.mantis.samples.source.RandomRequestSource;
import com.netflix.mantis.samples.stage.AggregationStage;
import com.netflix.mantis.samples.stage.CollectStage;
import com.netflix.mantis.samples.stage.GroupByStage;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.sink.Sinks;
import lombok.extern.slf4j.Slf4j;
/**
* This sample demonstrates the use of a multi-stage job in Mantis. Multi-stage jobs are useful when a single
* container is incapable of processing the entire stream of events.
* Each stage represents one of these types of
* computations Scalar->Scalar, Scalar->Group, Group->Scalar, Group->Group.
*
* At deploy time the user can configure the number workers for each stage and the resource requirements for each worker.
* This sample has 3 stages
* 1. {@link GroupByStage} Receives the raw events, groups them by their category and sends it to the workers of stage 2 in such a way
* that all events for a particular group will land on the exact same worker of stage 2.
* 2. {@link AggregationStage} Receives events tagged by their group from the previous stage. Windows over them and
* sums up the counts of each group it has seen.
* 3. {@link CollectStage} Recieves the aggregates generated by the previous stage, collects them over a window and
* generates a consolidated report which is sent to the default Server Sent Event (SSE) sink.
*
* Run this sample by executing the main method of this class. Then look for the SSE port where the output of this job
* will be available for streaming. E.g Serving modern HTTP SSE server sink on port: 8299
* via command line do ../gradlew execute
*/
@Slf4j
public class RequestAggregationJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
return MantisJob
// Stream Request Events from our random data generator source
.source(new RandomRequestSource())
// Groups requests by path
.stage(new GroupByStage(), GroupByStage.config())
// Computes count per path over a window
.stage(new AggregationStage(), AggregationStage.config())
// Collects the data and makes it availabe over SSE
.stage(new CollectStage(), CollectStage.config())
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(Sinks.eagerSubscribe(
Sinks.sse((String data) -> data)))
.metadata(new Metadata.Builder()
.name("GroupByPath")
.description("Connects to a random data generator source"
+ " and counts the number of requests for each uri within a window")
.build())
.create();
}
public static void main(String[] args) {
// To run locally we use the LocalJobExecutor
LocalJobExecutorNetworked.execute(new RequestAggregationJob().getJobInstance());
}
} | 6,273 |
0 | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples/proto/RequestEvent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.proto;
import java.io.IOException;
import io.mantisrx.common.codec.Codec;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import lombok.Builder;
import lombok.Data;
/**
* A simple POJO that holds data about a request event.
*/
@Data
@Builder
public class RequestEvent {
private static final ObjectMapper mapper = new ObjectMapper();
private static final ObjectReader requestEventReader = mapper.readerFor(RequestEvent.class);
private final String requestPath;
private final String ipAddress;
/**
* The codec defines how this class should be serialized before transporting across network.
* @return
*/
public static Codec<RequestEvent> requestEventCodec() {
return new Codec<RequestEvent>() {
@Override
public RequestEvent decode(byte[] bytes) {
try {
return requestEventReader.readValue(bytes);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public byte[] encode(final RequestEvent value) {
try {
return mapper.writeValueAsBytes(value);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
| 6,274 |
0 | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples/proto/RequestAggregation.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.proto;
import java.io.IOException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import io.mantisrx.common.codec.Codec;
import lombok.Builder;
import lombok.Data;
/**
* A simple POJO that holds the count of how many times a particular request path was invoked.
*/
@Data
@Builder
public class RequestAggregation {
private static final ObjectMapper mapper = new ObjectMapper();
private static final ObjectReader requestAggregationReader = mapper.readerFor(RequestAggregation.class);
private final String path;
private final int count;
/**
* Codec is used to customize how data is serialized before transporting across network boundaries.
* @return
*/
public static Codec<RequestAggregation> requestAggregationCodec() {
return new Codec<RequestAggregation>() {
@Override
public RequestAggregation decode(byte[] bytes) {
try {
return requestAggregationReader.readValue(bytes);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public byte[] encode(final RequestAggregation value) {
try {
return mapper.writeValueAsBytes(value);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
| 6,275 |
0 | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples/proto/AggregationReport.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.proto;
import java.util.Map;
import lombok.Data;
/**
* A simple POJO which holds the result of aggregating counts per request path.
*/
@Data
public class AggregationReport {
private final Map<String, Integer> pathToCountMap;
}
| 6,276 |
0 | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples/source/RandomRequestSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.source;
import java.util.concurrent.TimeUnit;
import com.netflix.mantis.samples.proto.RequestEvent;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import lombok.extern.slf4j.Slf4j;
import net.andreinc.mockneat.MockNeat;
import rx.Observable;
/**
* Generates random set of RequestEvents at a preconfigured interval.
*/
@Slf4j
public class RandomRequestSource implements Source<RequestEvent> {
private MockNeat mockDataGenerator;
@Override
public Observable<Observable<RequestEvent>> call(Context context, Index index) {
return Observable.just(Observable.interval(250, TimeUnit.MILLISECONDS).map((tick) -> {
String ip = mockDataGenerator.ipv4s().get();
String path = mockDataGenerator.probabilites(String.class)
.add(0.1, "/login")
.add(0.2, "/genre/horror")
.add(0.5, "/genre/comedy")
.add(0.2, "/mylist")
.get();
return RequestEvent.builder().ipAddress(ip).requestPath(path).build();
}).doOnNext((event) -> {
log.debug("Generated Event {}", event);
}));
}
@Override
public void init(Context context, Index index) {
mockDataGenerator = MockNeat.threadLocal();
}
}
| 6,277 |
0 | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples/stage/AggregationStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.stage;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.netflix.mantis.samples.proto.RequestAggregation;
import com.netflix.mantis.samples.proto.RequestEvent;
import io.mantisrx.common.MantisGroup;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.GroupToScalar;
import io.mantisrx.runtime.computation.GroupToScalarComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This is the 2nd stage of this three stage job. It receives events from {@link GroupByStage}
* This stage converts Grouped Events to Scalar events {@link GroupToScalarComputation} Typically used in the
* reduce portion of a map reduce computation.
*
* This stage receives an <code>Observable<MantisGroup<String,RequestEvent>></code>. This represents a stream of
* request events tagged by the URI Path they belong to.
* This stage then groups the events by the path and and counts the number of invocations of each path over a window.
*/
@Slf4j
public class AggregationStage implements GroupToScalarComputation<String, RequestEvent, RequestAggregation> {
public static final String AGGREGATION_DURATION_MSEC_PARAM = "AggregationDurationMsec";
int aggregationDurationMsec;
/**
* The call method is invoked by the Mantis runtime while executing the job.
* @param context Provides metadata information related to the current job.
* @param mantisGroupO This is an Observable of {@link MantisGroup} events. Each event is a pair of the Key -> uri Path and
* the {@link RequestEvent} event itself.
* @return
*/
@Override
public Observable<RequestAggregation> call(Context context, Observable<MantisGroup<String, RequestEvent>> mantisGroupO) {
return mantisGroupO
.window(aggregationDurationMsec, TimeUnit.MILLISECONDS)
.flatMap((omg) -> omg.groupBy(MantisGroup::getKeyValue)
.flatMap((go) -> go.reduce(0, (accumulator, value) -> accumulator = accumulator + 1)
.map((count) -> RequestAggregation.builder().count(count).path(go.getKey()).build())
.doOnNext((aggregate) -> {
log.debug("Generated aggregate {}", aggregate);
})
));
}
/**
* Invoked only once during job startup. A good place to add one time initialization actions.
* @param context
*/
@Override
public void init(Context context) {
aggregationDurationMsec = (int)context.getParameters().get(AGGREGATION_DURATION_MSEC_PARAM, 1000);
}
/**
* Provides the Mantis runtime configuration information about the type of computation done by this stage.
* E.g in this case it specifies this is a GroupToScalar computation and also provides a {@link Codec} on how to
* serialize the {@link RequestAggregation} events before sending it to the {@link CollectStage}
* @return
*/
public static GroupToScalar.Config<String, RequestEvent, RequestAggregation> config(){
return new GroupToScalar.Config<String, RequestEvent,RequestAggregation>()
.description("sum events for a path")
.codec(RequestAggregation.requestAggregationCodec())
.withParameters(getParameters());
}
/**
* Here we declare stage specific parameters.
* @return
*/
public static List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = new ArrayList<>();
// Aggregation duration
params.add(new IntParameter()
.name(AGGREGATION_DURATION_MSEC_PARAM)
.description("window size for aggregation")
.validator(Validators.range(100, 10000))
.defaultValue(5000)
.build()) ;
return params;
}
}
| 6,278 |
0 | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples/stage/CollectStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.stage;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.mantis.samples.proto.RequestAggregation;
import com.netflix.mantis.samples.proto.AggregationReport;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This is the final stage of this 3 stage job. It receives events from {@link AggregationStage}
* The role of this stage is to collect aggregates generated by the previous stage for all the groups within
* a window and generate a unified report of them.
*/
@Slf4j
public class CollectStage implements ScalarComputation<RequestAggregation,String> {
private static final ObjectMapper mapper = new ObjectMapper();
@Override
public Observable<String> call(Context context, Observable<RequestAggregation> requestAggregationO) {
return requestAggregationO
.window(5, TimeUnit.SECONDS)
.flatMap((requestAggO) -> requestAggO
.reduce(new RequestAggregationAccumulator(),(acc, requestAgg) -> acc.addAggregation(requestAgg))
.map(RequestAggregationAccumulator::generateReport)
.doOnNext((report) -> {
log.debug("Generated Collection report {}", report);
})
)
.map((report) -> {
try {
return mapper.writeValueAsString(report);
} catch (JsonProcessingException e) {
log.error(e.getMessage());
return null;
}
}).filter(Objects::nonNull);
}
@Override
public void init(Context context) {
}
public static ScalarToScalar.Config<RequestAggregation,String> config(){
return new ScalarToScalar.Config<RequestAggregation,String>()
.codec(Codecs.string());
}
/**
* The accumulator class as the name suggests accumulates all aggregates seen during a window and
* generates a consolidated report at the end.
*/
static class RequestAggregationAccumulator {
private final Map<String, Integer> pathToCountMap = new HashMap<>();
public RequestAggregationAccumulator() {}
public RequestAggregationAccumulator addAggregation(RequestAggregation agg) {
pathToCountMap.put(agg.getPath(), agg.getCount());
return this;
}
public AggregationReport generateReport() {
log.info("Generated report from=> {}", pathToCountMap);
return new AggregationReport(pathToCountMap);
}
}
}
| 6,279 |
0 | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis-examples/groupby-sample/src/main/java/com/netflix/mantis/samples/stage/GroupByStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.stage;
import java.util.ArrayList;
import java.util.List;
import com.netflix.mantis.samples.proto.RequestEvent;
import io.mantisrx.common.MantisGroup;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToGroup;
import io.mantisrx.runtime.computation.ToGroupComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This is the first stage of this 3 stage job. It is at the head of the computation DAG
* This stage converts Scalar Events to Grouped Events {@link ToGroupComputation}. The grouped events are then
* send to the next stage of the Mantis Job in a way such that all events belonging to a particular group will
* land on the same worker of the next stage.
*
* It receives a stream of {@link RequestEvent} and groups them by either the path or the IP address
* based on the parameters passed by the user.
*/
@Slf4j
public class GroupByStage implements ToGroupComputation<RequestEvent, String, RequestEvent> {
private static final String GROUPBY_FIELD_PARAM = "groupByField";
private boolean groupByPath = true;
@Override
public Observable<MantisGroup<String, RequestEvent>> call(Context context, Observable<RequestEvent> requestEventO) {
return requestEventO
.map((requestEvent) -> {
if(groupByPath) {
return new MantisGroup<>(requestEvent.getRequestPath(), requestEvent);
} else {
return new MantisGroup<>(requestEvent.getIpAddress(), requestEvent);
}
});
}
@Override
public void init(Context context) {
String groupByField = (String)context.getParameters().get(GROUPBY_FIELD_PARAM,"path");
groupByPath = groupByField.equalsIgnoreCase("path") ? true : false;
}
/**
* Here we declare stage specific parameters.
* @return
*/
public static List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = new ArrayList<>();
// Group by field
params.add(new StringParameter()
.name(GROUPBY_FIELD_PARAM)
.description("The key to group events by")
.validator(Validators.notNullOrEmpty())
.defaultValue("path")
.build()) ;
return params;
}
public static ScalarToGroup.Config<RequestEvent, String, RequestEvent> config(){
return new ScalarToGroup.Config<RequestEvent, String, RequestEvent>()
.description("Group event data by path/ip")
.concurrentInput() // signifies events can be processed parallely
.withParameters(getParameters())
.codec(RequestEvent.requestEventCodec());
}
}
| 6,280 |
0 | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/Application.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.spectator.nflx.SpectatorModule;
import io.mantisrx.publish.api.EventPublisher;
import io.mantisrx.publish.netty.guice.MantisRealtimeEventsPublishModule;
/**
* A simple example that uses Guice to inject the {@link EventPublisher} part of the mantis-publish library
* to send events to Mantis.
*
* The mantis-publish library provides on-demand source side filtering via MQL. When a user publishes
* events via this library the events may not be actually shipped to Mantis. A downstream consumer needs
* to first register a query and the query needs to match events published by the user.
*/
public class Application {
public static void main(String [] args) {
Injector injector = Guice.createInjector(new BasicModule(), new ArchaiusModule(),
new MantisRealtimeEventsPublishModule(), new SpectatorModule());
IDataPublisher publisher = injector.getInstance(IDataPublisher.class);
publisher.generateAndSendEventsToMantis();
}
}
| 6,281 |
0 | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/BasicModule.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.google.inject.AbstractModule;
public class BasicModule extends AbstractModule {
@Override
protected void configure() {
bind(IDataPublisher.class).to(SampleDataPublisher.class);
bind(IDataGenerator.class).to(DataGenerator.class);
}
}
| 6,282 |
0 | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/SampleDataPublisher.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.google.inject.Inject;
import io.mantisrx.publish.api.Event;
import io.mantisrx.publish.api.EventPublisher;
import io.mantisrx.publish.api.PublishStatus;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* A simple example that uses Guice to inject the {@link EventPublisher} part of the mantis-publish library
* to send events to Mantis.
*
* The mantis-publish library provides on-demand source side filtering via MQL. When a user publishes
* events via this library the events may not be actually shipped to Mantis. A downstream consumer needs
* to first register a query and the query needs to match events published by the user.
*
*/
@Slf4j
public class SampleDataPublisher implements IDataPublisher{
@Inject
EventPublisher publisher;
@Inject
DataGenerator dataGenerator;
/**
* Generates random events at a fixed rate and publishes them to the mantis-publish library.
* Here the events are published to the defaultStream.
*/
@Override
public void generateAndSendEventsToMantis() {
dataGenerator
.generateEvents()
.map((requestEvent) -> new Event(requestEvent.toMap()))
.flatMap((event) -> Observable.from(publisher.publish(event)
.toCompletableFuture()))
.toBlocking()
.subscribe((status) -> {
log.info("Mantis publish JavaApp send event status => {}", status);
});
}
}
| 6,283 |
0 | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/IDataPublisher.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
public interface IDataPublisher {
void generateAndSendEventsToMantis();
}
| 6,284 |
0 | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/IDataGenerator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.netflix.mantis.examples.mantispublishsample.proto.RequestEvent;
import rx.Observable;
/**
* A data generator that generates a stream of {@link RequestEvent} at a fixed interval.
*/
public interface IDataGenerator {
Observable<RequestEvent> generateEvents();
}
| 6,285 |
0 | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/DataGenerator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import java.util.concurrent.TimeUnit;
import com.netflix.mantis.examples.mantispublishsample.proto.RequestEvent;
import net.andreinc.mockneat.MockNeat;
import rx.Observable;
/**
* Uses MockNeat to generate a random stream of events. Each event represents a hypothetical request
* made by an end user to this service.
*/
public class DataGenerator implements IDataGenerator {
private final int rateMs = 1000;
private final MockNeat mockDataGenerator = MockNeat.threadLocal();
@Override
public Observable<RequestEvent> generateEvents() {
return Observable
.interval(rateMs, TimeUnit.MILLISECONDS)
.map((tick) -> generateEvent());
}
private RequestEvent generateEvent() {
String path = mockDataGenerator.probabilites(String.class)
.add(0.1, "/login")
.add(0.2, "/genre/horror")
.add(0.5, "/genre/comedy")
.add(0.2, "/mylist")
.get();
String deviceType = mockDataGenerator.probabilites(String.class)
.add(0.1, "ps4")
.add(0.1, "xbox")
.add(0.2, "browser")
.add(0.3, "ios")
.add(0.3, "android")
.get();
String userId = mockDataGenerator.strings().size(10).get();
int status = mockDataGenerator.probabilites(Integer.class)
.add(0.1,500)
.add(0.7,200)
.add(0.2,500)
.get();
String country = mockDataGenerator.countries().names().get();
return RequestEvent.builder()
.status(status)
.uri(path)
.country(country)
.userId(userId)
.deviceType(deviceType)
.build();
}
}
| 6,286 |
0 | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample | Create_ds/mantis-examples/mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/proto/RequestEvent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample.proto;
import java.util.HashMap;
import java.util.Map;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import lombok.Builder;
import lombok.Data;
/**
* Represents a Request Event a service may receive.
*/
@Data
@Builder
public class RequestEvent {
private static final ObjectMapper mapper = new ObjectMapper();
private static final ObjectReader requestEventReader = mapper.readerFor(RequestEvent.class);
private final String userId;
private final String uri;
private final int status;
private final String country;
private final String deviceType;
public Map<String,Object> toMap() {
Map<String,Object> data = new HashMap<>();
data.put("userId", userId);
data.put("uri", uri);
data.put("status", status);
data.put("country", country);
data.put("deviceType", deviceType);
return data;
}
public String toJsonString() {
try {
return mapper.writeValueAsString(this);
} catch (JsonProcessingException e) {
e.printStackTrace();
return null;
}
}
}
| 6,287 |
0 | Create_ds/mantis-examples/jobconnector-sample/src/main/java/com/netflix/mantis | Create_ds/mantis-examples/jobconnector-sample/src/main/java/com/netflix/mantis/samples/JobConnectorJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.mantis.samples.stage.EchoStage;
import io.mantisrx.connector.job.core.MantisSourceJobConnector;
import io.mantisrx.connector.job.source.JobSource;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.runtime.sink.Sinks;
import lombok.extern.slf4j.Slf4j;
/**
* This sample demonstrates how to connect to the output of another job using the {@link JobSource}
* If the target job is a source job then you can request a filtered stream of events from the source job
* by passing an MQL query.
* In this example we connect to the latest running instance of SyntheticSourceJob using the query
* select country from stream where status==500 and simply echo the output.
*
* Run this sample by executing the main method of this class. Then look for the SSE port where the output of this job
* will be available for streaming. E.g Serving modern HTTP SSE server sink on port: 8299
* via command line do ../gradlew execute
*
* Note: this sample may not work in your IDE as the Mantis runtime needs to discover the location of the
* SyntheticSourceJob.
*/
@Slf4j
public class JobConnectorJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
return MantisJob
// Stream Events from a job specified via job parameters
.source(new JobSource())
// Simple echoes the data
.stage(new EchoStage(), EchoStage.config())
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(Sinks.eagerSubscribe(
Sinks.sse((String data) -> data)))
.metadata(new Metadata.Builder()
.name("ConnectToJob")
.description("Connects to the output of another job"
+ " and simply echoes the data")
.build())
.create();
}
public static void main(String[] args) throws JsonProcessingException {
Map<String,Object> targetMap = new HashMap<>();
List<JobSource.TargetInfo> targetInfos = new ArrayList<>();
JobSource.TargetInfo targetInfo = new JobSource.TargetInfoBuilder().withClientId("abc")
.withSourceJobName("SyntheticSourceJob")
.withQuery("select country from stream where status==500")
.build();
targetInfos.add(targetInfo);
targetMap.put("targets",targetInfos);
ObjectMapper mapper = new ObjectMapper();
String target = mapper.writeValueAsString(targetMap);
// To run locally we use the LocalJobExecutor
LocalJobExecutorNetworked.execute(new JobConnectorJob().getJobInstance(),
new Parameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_TARGET_KEY, target));
}
} | 6,288 |
0 | Create_ds/mantis-examples/jobconnector-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis-examples/jobconnector-sample/src/main/java/com/netflix/mantis/samples/stage/EchoStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.stage;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* A simple stage that extracts data from the incoming {@link MantisServerSentEvent} and echoes it.
*/
@Slf4j
public class EchoStage implements ScalarComputation<MantisServerSentEvent,String> {
private static final ObjectMapper mapper = new ObjectMapper();
@Override
public Observable<String> call(Context context, Observable<MantisServerSentEvent> eventsO) {
return eventsO
.map(MantisServerSentEvent::getEventAsString)
.map((event) -> {
log.info("Received: {}", event);
return event;
});
}
@Override
public void init(Context context) {
}
public static ScalarToScalar.Config<MantisServerSentEvent,String> config(){
return new ScalarToScalar.Config<MantisServerSentEvent,String>()
.codec(Codecs.string());
}
}
| 6,289 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/EventingExpirationTypesTest.java | /*
* Copyright 2004,2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.axis2.savan;
import junit.framework.TestCase;
import org.apache.axiom.om.OMAbstractFactory;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axis2.addressing.EndpointReference;
import org.apache.axis2.client.Options;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.databinding.types.Duration;
import org.apache.axis2.databinding.utils.ConverterUtil;
import org.apache.savan.SavanMessageContext;
import org.apache.savan.eventing.EventingSubscriptionProcessor;
import org.apache.savan.subscription.ExpirationBean;
import org.apache.savan.util.CommonUtil;
import javax.xml.namespace.QName;
import java.io.File;
import java.io.IOException;
import java.util.Date;
public class EventingExpirationTypesTest extends TestCase {
public void testDuration() throws Exception {
SavanMessageContext renewMessage = getRenewMessage("eventing-renew-duration.xml");
EventingSubscriptionProcessor processor = new EventingSubscriptionProcessor();
ExpirationBean expirationBean = processor.getExpirationBean(renewMessage);
assertTrue(expirationBean.isDuration());
Duration duration = ConverterUtil.convertToDuration("P1Y2M3DT10H30M");
assertEquals(duration, expirationBean.getDurationValue());
assertEquals(expirationBean.getSubscriberID(), "UUID:DummySubscriberID");
}
public void testDateTime() throws Exception {
SavanMessageContext renewMessage = getRenewMessage("eventing-renew-datetime.xml");
EventingSubscriptionProcessor processor = new EventingSubscriptionProcessor();
ExpirationBean expirationBean = processor.getExpirationBean(renewMessage);
assertFalse(expirationBean.isDuration());
Date date = ConverterUtil.convertToDateTime("2004-06-26T21:07:00.000-08:00").getTime();
assertEquals(expirationBean.getDateValue(), date);
assertEquals(expirationBean.getSubscriberID(), "UUID:DummySubscriberID");
}
private SavanMessageContext getRenewMessage(String name) throws IOException {
File baseDir = new File("");
String testRource = baseDir.getAbsolutePath() + File.separator + "src" + File.separator +
"test" + File.separator + "resources";
SOAPEnvelope envelope = CommonUtil
.getTestEnvelopeFromFile(testRource, name, OMAbstractFactory.getSOAP12Factory());
MessageContext mc = new MessageContext();
SavanMessageContext smc = new SavanMessageContext(mc);
mc.setEnvelope(envelope);
Options options = new Options();
options.setTo(new EndpointReference("http://DummyToAddress/"));
EndpointReference replyToEPR = new EndpointReference("http://DummyReplyToAddress/");
replyToEPR.addReferenceParameter(new QName("RefParam1"), "RefParamVal1");
options.setTo(replyToEPR);
options.setAction("urn:uuid:DummyAction");
return smc;
}
}
| 6,290 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/ConfigurationManagerTest.java | /*
* Copyright 2004,2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.axis2.savan;
import junit.framework.TestCase;
import org.apache.savan.SavanException;
import org.apache.savan.configuration.ConfigurationManager;
import java.io.File;
public class ConfigurationManagerTest extends TestCase {
public void testFromXMLFile() throws SavanException {
File baseDir = new File("");
String testRource = baseDir.getAbsolutePath() + File.separator + "src" + File.separator +
"test" + File.separator + "resources";
String testConfigurationFile = testRource + File.separator + "savan-config-test.xml";
File f = new File(testConfigurationFile); //test-resources configuration file.
if (!f.isFile())
throw new SavanException("Cant find the test configuration file");
ConfigurationManager cm = new ConfigurationManager();
cm.configure(f);
}
}
| 6,291 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/XPathBasedFilterTest.java | /*
* Copyright 1999-2004 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.axis2.savan;
import junit.framework.TestCase;
import org.apache.axiom.om.OMAbstractFactory;
import org.apache.axiom.om.OMElement;
import org.apache.axiom.om.OMNode;
import org.apache.axiom.om.OMText;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axiom.soap.SOAPFactory;
import org.apache.axis2.AxisFault;
import org.apache.savan.filters.Filter;
import org.apache.savan.filters.XPathBasedFilter;
public class XPathBasedFilterTest extends TestCase {
String filterString = "//elem1";
public void testMessageFiltering() throws AxisFault {
SOAPEnvelope envelope = createTestEnvelope();
OMNode filterNode = getFilterElement();
Filter filter = new XPathBasedFilter();
filter.setUp(filterNode);
assertTrue(filter.checkCompliance(envelope));
}
private SOAPEnvelope createTestEnvelope() {
SOAPFactory factory = OMAbstractFactory.getSOAP11Factory();
SOAPEnvelope envelope = factory.getDefaultEnvelope();
OMElement elem1 = factory.createOMElement("elem1", null);
OMElement elem2 = factory.createOMElement("elem2", null);
OMElement elem3 = factory.createOMElement("elem3", null);
elem2.addChild(elem3);
elem1.addChild(elem2);
envelope.getBody().addChild(elem1);
factory.createOMDocument().addChild(envelope);
return envelope;
}
private OMNode getFilterElement() {
SOAPFactory factory = OMAbstractFactory.getSOAP11Factory();
OMText text = factory.createOMText(filterString);
return text;
}
}
| 6,292 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/EventingSubscripitonProcessorTest.java | /*
* Copyright 2004,2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.axis2.savan;
import junit.framework.TestCase;
import org.apache.axiom.om.OMAbstractFactory;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axis2.addressing.EndpointReference;
import org.apache.axis2.client.Options;
import org.apache.axis2.context.ConfigurationContext;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.databinding.utils.ConverterUtil;
import org.apache.axis2.description.AxisService;
import org.apache.axis2.engine.AxisConfiguration;
import org.apache.savan.SavanConstants;
import org.apache.savan.SavanMessageContext;
import org.apache.savan.configuration.ConfigurationManager;
import org.apache.savan.configuration.Protocol;
import org.apache.savan.eventing.EventingConstants;
import org.apache.savan.eventing.EventingSubscriptionProcessor;
import org.apache.savan.eventing.subscribers.EventingSubscriber;
import org.apache.savan.storage.DefaultSubscriberStore;
import org.apache.savan.storage.SubscriberStore;
import org.apache.savan.subscription.ExpirationBean;
import org.apache.savan.util.CommonUtil;
import javax.xml.namespace.QName;
import java.io.File;
import java.io.IOException;
import java.util.Date;
public class EventingSubscripitonProcessorTest extends TestCase {
private final String TEST_SAVAN_CONFIG = "savan-config-test.xml";
private final String EVENTING_PROTOCOL_NAME = "eventing";
public void testSubscriberExtraction() throws Exception {
SavanMessageContext smc = getSubscriptionMessage();
// Protocol protocol = new Protocol ();
// protocol.setName("eventing");
// protocol.setUtilFactory(new EventingUtilFactory ());
// protocol.setDefaultSubscriber("org.apache.savan.eventing.subscribers.EventingLeafSubscriber");
SubscriberStore store = new DefaultSubscriberStore();
// smc.setProtocol(protocol);
smc.setSubscriberStore(store);
EventingSubscriptionProcessor esp = new EventingSubscriptionProcessor();
EventingSubscriber eventingSubscriber =
(EventingSubscriber)esp.getSubscriberFromMessage(smc);
assertNotNull(eventingSubscriber);
assertNotNull(eventingSubscriber.getDelivery());
assertNotNull(eventingSubscriber.getDelivery().getDeliveryEPR());
assertNotNull(eventingSubscriber.getFilter());
assertNotNull(eventingSubscriber.getEndToEPr());
assertNotNull(eventingSubscriber.getId());
assertNotNull(eventingSubscriber.getSubscriptionEndingTime());
assertEquals(eventingSubscriber.getDelivery().getDeliveryMode(),
EventingConstants.DEFAULT_DELIVERY_MODE);
assertEquals(eventingSubscriber.getDelivery().getDeliveryEPR().getAddress(),
"http://www.other.example.com/OnStormWarning");
assertEquals(eventingSubscriber.getEndToEPr().getAddress(),
"http://www.example.com/MyEventSink");
Date date = ConverterUtil.convertToDateTime("2004-06-26T21:07:00.000-08:00").getTime();
assertEquals(eventingSubscriber.getSubscriptionEndingTime(), date);
}
public void testExpirationBeanExtraction() throws Exception {
SavanMessageContext smc = getRenewMessage();
EventingSubscriptionProcessor esp = new EventingSubscriptionProcessor();
ExpirationBean expirationBean = esp.getExpirationBean(smc);
assertNotNull(expirationBean);
assertNotNull(expirationBean.getSubscriberID());
Date date = ConverterUtil.convertToDateTime("2004-06-26T21:07:00.000-08:00").getTime();
assertEquals(expirationBean.getDateValue(), date);
}
private SavanMessageContext getSubscriptionMessage() throws IOException {
File baseDir = new File("");
String testRource = baseDir.getAbsolutePath() + File.separator + "src" + File.separator +
"test" + File.separator + "resources";
SOAPEnvelope envelope = CommonUtil.getTestEnvelopeFromFile(testRource,
"eventing-subscription.xml",
OMAbstractFactory.getSOAP12Factory());
AxisConfiguration axisConfiguration = new AxisConfiguration();
ConfigurationContext configurationContext = new ConfigurationContext(axisConfiguration);
MessageContext mc = new MessageContext();
SavanMessageContext smc = new SavanMessageContext(mc);
mc.setEnvelope(envelope);
mc.setConfigurationContext(configurationContext);
Options options = new Options();
options.setTo(new EndpointReference("http://DummyToAddress/"));
EndpointReference replyToEPR = new EndpointReference("http://DummyReplyToAddress/");
replyToEPR.addReferenceParameter(new QName("RefParam1"), "RefParamVal1");
options.setTo(replyToEPR);
//adding a dummy AxisService to avoid NullPointer Exceptions.
mc.setAxisService(new AxisService("DummyService"));
options.setAction("urn:uuid:DummyAction");
String savan_concig_file = testRource + File.separator + TEST_SAVAN_CONFIG;
File file = new File(savan_concig_file);
if (!file.exists())
throw new IOException(TEST_SAVAN_CONFIG + " file is not available in test-resources.");
ConfigurationManager configurationManager = new ConfigurationManager();
configurationManager.configure(file);
configurationContext
.setProperty(SavanConstants.CONFIGURATION_MANAGER, configurationManager);
Protocol protocol = configurationManager.getProtocol(EVENTING_PROTOCOL_NAME);
smc.setProtocol(protocol);
return smc;
}
private SavanMessageContext getRenewMessage() throws IOException {
File baseDir = new File("");
String testRource = baseDir.getAbsolutePath() + File.separator + "src" + File.separator +
"test" + File.separator + "resources";
SOAPEnvelope envelope = CommonUtil.getTestEnvelopeFromFile(testRource,
"eventing-renew-datetime.xml",
OMAbstractFactory.getSOAP12Factory());
MessageContext mc = new MessageContext();
SavanMessageContext smc = new SavanMessageContext(mc);
mc.setEnvelope(envelope);
Options options = new Options();
options.setTo(new EndpointReference("http://DummyToAddress/"));
EndpointReference replyToEPR = new EndpointReference("http://DummyReplyToAddress/");
replyToEPR.addReferenceParameter(new QName("RefParam1"), "RefParamVal1");
options.setTo(replyToEPR);
options.setAction("urn:uuid:DummyAction");
return smc;
}
}
| 6,293 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/atom/UtilServer.java | package org.apache.axis2.savan.atom;
import junit.framework.TestCase;
import org.apache.axis2.AxisFault;
import org.apache.axis2.context.ConfigurationContext;
import org.apache.axis2.context.ConfigurationContextFactory;
import org.apache.axis2.context.ServiceContext;
import org.apache.axis2.context.ServiceGroupContext;
import org.apache.axis2.deployment.DeploymentEngine;
import org.apache.axis2.description.AxisModule;
import org.apache.axis2.description.AxisService;
import org.apache.axis2.description.AxisServiceGroup;
import org.apache.axis2.engine.ListenerManager;
import org.apache.axis2.transport.http.SimpleHTTPServer;
import javax.xml.namespace.QName;
import java.io.File;
import java.io.FilenameFilter;
/**
*
*/
public class UtilServer {
private static int count = 0;
private static SimpleHTTPServer receiver;
public static final int TESTING_PORT = 5555;
public static synchronized void deployService(AxisService service)
throws AxisFault {
receiver.getConfigurationContext().getAxisConfiguration().addService(
service);
}
public static synchronized void unDeployService(QName service)
throws AxisFault {
receiver.getConfigurationContext().getAxisConfiguration()
.removeService(service.getLocalPart());
}
public static synchronized void unDeployClientService() throws AxisFault {
if (receiver.getConfigurationContext().getAxisConfiguration() != null) {
receiver.getConfigurationContext().getAxisConfiguration()
.removeService("AnonymousService");
}
}
public static synchronized void start() throws Exception {
start(org.apache.axis2.Constants.TESTING_REPOSITORY);
}
public static synchronized void start(String repository) throws Exception {
if (count == 0) {
ConfigurationContext er = getNewConfigurationContext(repository);
// er.getAxisConfiguration().addModule(new DeploymentEngine().)
receiver = new SimpleHTTPServer(er, TESTING_PORT);
receiver.start();
System.out
.print("Server started on port " + TESTING_PORT + ".....");
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
throw new AxisFault("Thread interupted", e1);
}
}
count++;
}
public static ConfigurationContext getNewConfigurationContext(
String repository) throws Exception {
File file = new File(repository);
if (!file.exists()) {
throw new Exception("repository directory "
+ file.getAbsolutePath() + " does not exists");
}
return ConfigurationContextFactory
.createConfigurationContextFromFileSystem(file
.getAbsolutePath(), file.getAbsolutePath() + "/conf/axis2.xml");
}
public static synchronized void stop() throws AxisFault {
if (count == 1) {
receiver.stop();
while (receiver.isRunning()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
}
}
count = 0;
System.out.print("Server stopped .....");
} else {
count--;
}
ListenerManager listenerManager = receiver.getConfigurationContext()
.getListenerManager();
if (listenerManager != null) {
listenerManager.stop();
}
}
public static ConfigurationContext getConfigurationContext() {
return receiver.getConfigurationContext();
}
static class AddressingFilter implements FilenameFilter {
public boolean accept(File dir, String name) {
return name.startsWith("addressing") && name.endsWith(".mar");
}
}
private static File getAddressingMARFile() {
File dir = new File(org.apache.axis2.Constants.TESTING_REPOSITORY);
File[] files = dir.listFiles(new AddressingFilter());
TestCase.assertTrue(files.length == 1);
File file = files[0];
TestCase.assertTrue(file.exists());
return file;
}
public static ServiceContext createAdressedEnabledClientSide(
AxisService service) throws AxisFault {
File file = getAddressingMARFile();
TestCase.assertTrue(file.exists());
ConfigurationContext configContext = ConfigurationContextFactory
.createConfigurationContextFromFileSystem(
"target/test-resources/integrationRepo", null);
AxisModule axisModule = DeploymentEngine.buildModule(file,
configContext.getAxisConfiguration());
configContext.getAxisConfiguration().addModule(axisModule);
configContext.getAxisConfiguration().addService(service);
return new ServiceGroupContext(configContext,
(AxisServiceGroup)service.getParent())
.getServiceContext(service);
}
public static ConfigurationContext createClientConfigurationContext()
throws AxisFault {
File file = getAddressingMARFile();
TestCase.assertTrue(file.exists());
ConfigurationContext configContext = ConfigurationContextFactory
.createConfigurationContextFromFileSystem(
"target/test-resources/integrationRepo", null);
AxisModule axisModule = DeploymentEngine.buildModule(file,
configContext.getAxisConfiguration());
configContext.getAxisConfiguration().addModule(axisModule);
configContext.getAxisConfiguration().engageModule(
new QName("addressing"));
return configContext;
}
public static ServiceContext createAdressedEnabledClientSide(
AxisService service, String clientHome) throws AxisFault {
File file = getAddressingMARFile();
TestCase.assertTrue(file.exists());
ConfigurationContext configContext = ConfigurationContextFactory
.createConfigurationContextFromFileSystem(clientHome, null);
AxisModule axisModule = DeploymentEngine.buildModule(file,
configContext.getAxisConfiguration());
configContext.getAxisConfiguration().addModule(axisModule);
// sysContext.getAxisConfiguration().engageModule(moduleDesc.getName());
configContext.getAxisConfiguration().addService(service);
return new ServiceGroupContext(configContext,
(AxisServiceGroup)service.getParent())
.getServiceContext(service);
}
}
| 6,294 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/atom/PublisherService.java | /*
* Copyright 2004,2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.axis2.savan.atom;
import org.apache.axiom.om.OMAbstractFactory;
import org.apache.axiom.om.OMElement;
import org.apache.axiom.om.OMNamespace;
import org.apache.axiom.soap.SOAPFactory;
import org.apache.axis2.AxisFault;
import org.apache.axis2.context.ServiceContext;
import org.apache.savan.publication.client.PublicationClient;
import org.apache.savan.storage.SubscriberStore;
import org.apache.savan.util.CommonUtil;
import java.util.Random;
public class PublisherService {
ServiceContext serviceContext = null;
private String eventName = "testTopic";
public void init(ServiceContext serviceContext) throws AxisFault {
try {
System.out.println("Eventing Service INIT called");
this.serviceContext = serviceContext;
PublisherThread thread = new PublisherThread();
thread.start();
Thread.sleep(10000);
} catch (InterruptedException e) {
throw AxisFault.makeFault(e);
}
}
public void dummyMethod(OMElement param) throws Exception {
System.out.println("Eventing Service dummy method called");
}
private class PublisherThread extends Thread {
String Publication = "Publication";
// String publicationNamespaceValue = "http://tempuri/publication/";
String publicationNamespaceValue = "http://eventing.sample";
Random r = new Random();
public void run() {
try {
while (true) {
//publishing
System.out.println("Publishing next publication...");
SubscriberStore store =
CommonUtil.getSubscriberStore(serviceContext.getAxisService());
if (store != null) {
OMElement envelope = getNextPublicationEvent();
PublicationClient client =
new PublicationClient(serviceContext.getConfigurationContext());
client.sendPublication(envelope, serviceContext.getAxisService(), null);
Thread.sleep(10000);
}
}
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private int eventID = 0;
public OMElement getNextPublicationEvent() {
SOAPFactory factory = OMAbstractFactory.getSOAP11Factory();
OMNamespace namespace = factory.createOMNamespace(publicationNamespaceValue, "ns1");
OMElement publicationElement = factory.createOMElement(Publication, namespace);
factory.createOMElement("foo", namespace, publicationElement)
.setText("Event " + eventID);
OMElement publishMethod = factory.createOMElement("publish", namespace);
publishMethod.addChild(publicationElement);
return publishMethod;
}
}
}
| 6,295 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/atom/UtilServerBasedTestCase.java | /*
* Copyright 2004-2006 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.axis2.savan.atom;
import junit.extensions.TestSetup;
import junit.framework.Test;
import junit.framework.TestCase;
public class UtilServerBasedTestCase extends TestCase {
public UtilServerBasedTestCase() {
super(UtilServerBasedTestCase.class.getName());
}
public UtilServerBasedTestCase(java.lang.String string) {
super(string);
}
protected static Test getTestSetup(Test test) {
return new TestSetup(test) {
public void setUp() throws Exception {
UtilServer.start();
}
public void tearDown() throws Exception {
UtilServer.stop();
}
};
}
protected static Test getTestSetup2(Test test, final String param) {
return new TestSetup(test) {
public void setUp() throws Exception {
UtilServer.start(param);
}
public void tearDown() throws Exception {
UtilServer.stop();
}
};
}
protected static Test getTestSetup3(Test test, final String param1, final String param2) {
return new TestSetup(test) {
public void setUp() throws Exception {
UtilServer.start(param1);
}
public void tearDown() throws Exception {
UtilServer.stop();
}
};
}
}
| 6,296 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/atom/AtomTest.java | /*
* Copyright 2004,2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.axis2.savan.atom;
//todo
import com.wso2.eventing.atom.CreateFeedResponseDocument.CreateFeedResponse;
import org.apache.axiom.om.*;
import org.apache.axis2.addressing.EndpointReference;
import org.apache.axis2.client.Options;
import org.apache.axis2.client.ServiceClient;
import org.apache.axis2.context.ConfigurationContext;
import org.apache.axis2.context.ConfigurationContextFactory;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.context.ServiceContext;
import org.apache.axis2.description.AxisService;
import org.apache.axis2.engine.AxisConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.savan.atom.AtomEventingClient;
import org.apache.savan.eventing.client.EventingClient;
import javax.xml.namespace.QName;
import java.io.IOException;
import java.io.InputStream;
public class AtomTest extends UtilServerBasedTestCase {
private static final Log log = LogFactory.getLog(AtomTest.class);
protected QName transportName = new QName("http://localhost/my",
"NullTransport");
private final int MIN_OPTION = 1;
private final int MAX_OPTION = 9;
private final String SUBSCRIBER_1_ID = "subscriber1";
private final String SUBSCRIBER_2_ID = "subscriber2";
private final String AXIS2_REPO = "target/repository/";
private ServiceClient serviceClient = null;
private Options options = null;
private EventingClient eventingClient = null;
private String toAddressPart = "/axis2/services/PublisherService";
private String listener1AddressPart = "/axis2/services/ListenerService1";
private String listener2AddressPart = "/axis2/services/ListenerService2";
private final String applicationNamespaceName = "http://tempuri.org/";
private final String dummyMethod = "dummyMethod";
private static String repo = null;
private static int port = 5556;
private static String serverIP = "127.0.0.1";
protected AxisConfiguration engineRegistry;
protected MessageContext mc;
protected ServiceContext serviceContext;
protected AxisService service;
private QName serviceName = new QName("PublisherService");
protected boolean finish = false;
public AtomTest() {
super(AtomTest.class.getName());
}
public AtomTest(String testName) {
super(testName);
}
protected void setUp() throws Exception {
UtilServer.start(AXIS2_REPO);
// service = Utils.createSimpleService(serviceName,
// PublisherService.class.getName(),
// new QName("dummyMethod"));
// service.addModuleref("savan");
//
// UtilServer.deployService(service);
// AxisService service1 = Utils.createSimpleService(new QName("ListenerService1"),
// SavenTest.class.getName(),
// new QName("publish"));
// UtilServer.deployService(service1);
}
protected void tearDown() throws Exception {
//UtilServer.unDeployService(serviceName);
UtilServer.unDeployClientService();
}
public void testAtomSubcription() throws Exception {
//Thread.sleep(1000*60*100);
ConfigurationContext configContext = ConfigurationContextFactory
.createConfigurationContextFromFileSystem(AXIS2_REPO,
AXIS2_REPO + "/conf/axis2.xml");
serviceClient = new ServiceClient(configContext, null); //TODO give a repo
options = new Options();
serviceClient.setOptions(options);
serviceClient.engageModule(new QName("addressing"));
eventingClient = new EventingClient(serviceClient);
String toAddress = "http://" + serverIP + ":" + port + toAddressPart;
//String toAddress = "http://" + serverIP + ":" + port + "/axis2/services/RMSampleService";
options.setTo(new EndpointReference(toAddress));
AtomEventingClient atomEventingClient = new AtomEventingClient(serviceClient);
CreateFeedResponse createFeedResponse =
atomEventingClient.createFeed("test Title", "Srinath Perera");
options.setAction("http://wso2.com/eventing/dummyMethod");
serviceClient.fireAndForget(getDummyMethodRequestElement(0));
// options.setAction(EventingConstants.Actions.Publish);
// serviceClient.fireAndForget(getDummyMethodRequestElement ());
atomEventingClient.publishWithSOAP(toAddress, getDummyMethodRequestElement(1), null);
atomEventingClient.publishWithREST(toAddress, getDummyMethodRequestElement(2), null);
//Thread.sleep(1000*10*1000);
// int i = 0;
// while(i<1){
System.out.println(createFeedResponse.getFeedUrl());
OMElement feedAsXml = atomEventingClient.fetchFeed(createFeedResponse.getFeedUrl());
feedAsXml.serialize(System.out, new OMOutputFormat());
// URL url = new URL(createFeedResponse.getFeedUrl());
// System.out.println(readFromStream(url.openStream()));
// Thread.sleep(1000*10);
// i++;
// }
//
feedAsXml = atomEventingClient.fetchFeed(createFeedResponse.getFeedUrl());
feedAsXml.serialize(System.out, new OMOutputFormat());
atomEventingClient.deleteFeed();
}
// public void testEvents() throws Exception{
// initClient ();
// performAction (1);
// }
public void publish(OMElement param) throws Exception {
System.out.println("\n");
System.out.println("'1' got a new publication...");
System.out.println(param);
System.out.println("\n");
}
// private void initClient () throws AxisFault {
//
//// String CLIENT_REPO = null;
//// String AXIS2_XML = null;
////
//// if (repo!=null) {
//// CLIENT_REPO = repo;
//// AXIS2_XML = repo + File.separator + "axis2.xml";
//// } else {
////// throw new AxisFault ("Please specify the client repository as a program argument.Use '-h' for help.");
//// }
//
// ConfigurationContext configContext = ConfigurationContextFactory.createConfigurationContextFromFileSystem(AXIS2_REPO,AXIS2_REPO+"/conf/axis2.xml");
// serviceClient = new ServiceClient (configContext,null); //TODO give a repo
//
// options = new Options ();
// serviceClient.setOptions(options);
// serviceClient.engageModule(new QName ("addressing"));
//
// eventingClient = new EventingClient (serviceClient);
//
// String toAddress = "http://" + serverIP + ":" + port + toAddressPart;
//
// //String toAddress = "http://" + serverIP + ":" + port + "/axis2/services/RMSampleService";
// options.setTo(new EndpointReference (toAddress));
// }
//
// private void performAction (int action) throws Exception {
//
// switch (action) {
// case 1:
// doSubscribe(SUBSCRIBER_1_ID);
// break;
// case 2:
// doSubscribe(SUBSCRIBER_2_ID);
// break;
// case 3:
// doSubscribe(SUBSCRIBER_1_ID);
// doSubscribe(SUBSCRIBER_2_ID);
// break;
// case 4:
// doUnsubscribe(SUBSCRIBER_1_ID);
// break;
// case 5:
// doUnsubscribe(SUBSCRIBER_2_ID);
// break;
// case 6:
// doUnsubscribe(SUBSCRIBER_1_ID);
// doUnsubscribe(SUBSCRIBER_2_ID);
// break;
// case 7:
// doGetStatus(SUBSCRIBER_1_ID);
// break;
// case 8:
// doGetStatus(SUBSCRIBER_2_ID);
// break;
// case 9:
// System.exit(0);
// break;
// default:
// break;
// }
// }
// private void doSubscribe (String ID) throws Exception {
// EventingClientBean bean = new EventingClientBean ();
//
// String subscribingAddress = null;
// if (SUBSCRIBER_1_ID.equals(ID)) {
// subscribingAddress = "http://" + serverIP + ":" + port + listener1AddressPart;
// } else if (SUBSCRIBER_2_ID.equals(ID)) {
// subscribingAddress = "http://" + serverIP + ":" + port + listener2AddressPart;
// }
//
// bean.setDeliveryEPR(new EndpointReference (subscribingAddress));
//
// //uncomment following to set an expiration time of 10 minutes.
//// Date date = new Date ();
//// date.setMinutes(date.getMinutes()+10);
//// bean.setExpirationTime(date);
//
// eventingClient.subscribe(bean,ID);
// Thread.sleep(1000); //TODO remove if not sequired
// }
//
// private void doUnsubscribe (String ID) throws Exception {
// eventingClient.unsubscribe(ID);
// Thread.sleep(1000); //TODO remove if not sequired
// }
//
// private void doGetStatus (String ID) throws Exception {
// SubscriptionStatus status = eventingClient.getSubscriptionStatus(ID);
// Thread.sleep(1000); //TODO remove if not sequired
//
// String statusValue = status.getExpirationValue();
// System.out.println("Status of the subscriber '" + ID +"' is" + statusValue);
// }
private OMElement getDummyMethodRequestElement(int i) {
OMFactory fac = OMAbstractFactory.getOMFactory();
OMNamespace namespace = fac.createOMNamespace(applicationNamespaceName, "ns1");
OMElement de = fac.createOMElement(dummyMethod, namespace);
de.setText(String.valueOf(i));
return de;
}
public static String readFromStream(InputStream in) throws Exception {
try {
StringBuffer wsdlStr = new StringBuffer();
int read;
byte[] buf = new byte[1024];
while ((read = in.read(buf)) > 0) {
wsdlStr.append(new String(buf, 0, read));
}
in.close();
return wsdlStr.toString();
} catch (IOException e) {
throw new Exception(e);
}
}
}
| 6,297 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/atom/DerbyTest.java | package org.apache.axis2.savan.atom;
import junit.framework.TestCase;
import org.apache.axiom.om.OMAbstractFactory;
import org.apache.axiom.om.OMElement;
import org.apache.axiom.om.OMFactory;
import org.apache.axiom.om.OMNamespace;
import org.apache.savan.atom.AtomDataSource;
import java.io.StringWriter;
import java.util.Date;
import java.util.Random;
public class DerbyTest extends TestCase {
/* the default framework is embedded*/
public String framework = "embedded";
public String driver = "org.apache.derby.jdbc.EmbeddedDriver";
public String protocol = "jdbc:derby:";
public void testDataSource() throws Exception {
AtomDataSource dataSource = new AtomDataSource();
String id = "id" + new Random().nextDouble();
dataSource.addFeed(id, "foo", new Date(), "Srinath");
dataSource.addEntry(id, getDummyMethodRequestElement());
StringWriter w = new StringWriter();
OMElement result = dataSource.getFeedAsXml(id);
// Iterator it = result.getChildElements();
// while(it.hasNext()){
// System.out.println(it.next());
// }
result.serialize(w);
System.out.println(w.getBuffer().toString());
}
// public static void main(String[] args)
// {
// new DerbyTest().go(args);
// }
// void go(String[] args)
// {
// /* parse the arguments to determine which framework is desired*/
// parseArguments(args);
//
// System.out.println("SimpleApp starting in " + framework + " mode.");
//
// try
// {
// /*
// The driver is installed by loading its class.
// In an embedded environment, this will start up Derby, since it is not already running.
// */
// Class.forName(driver).newInstance();
// System.out.println("Loaded the appropriate driver.");
//
// Connection conn = null;
// Properties props = new Properties();
// props.put("user", "user1");
// props.put("password", "user1");
//
// /*
// The connection specifies create=true to cause
// the database to be created. To remove the database,
// remove the directory derbyDB and its contents.
// The directory derbyDB will be created under
// the directory that the system property
// derby.system.home points to, or the current
// directory if derby.system.home is not set.
// */
// conn = DriverManager.getConnection(protocol +
// "derbyDB;create=true", props);
//
// System.out.println("Connected to and created database derbyDB");
//
// conn.setAutoCommit(false);
//
// /*
// Creating a statement lets us issue commands against
// the connection.
// */
// Statement s = conn.createStatement();
//
// /*
// We create a table, add a few rows, and update one.
// */
// s.execute("create table derbyDB(num int, addr varchar(40))");
// System.out.println("Created table derbyDB");
// s.execute("insert into derbyDB values (1956,'Webster St.')");
// System.out.println("Inserted 1956 Webster");
// s.execute("insert into derbyDB values (1910,'Union St.')");
// System.out.println("Inserted 1910 Union");
// s.execute(
// "update derbyDB set num=180, addr='Grand Ave.' where num=1956");
// System.out.println("Updated 1956 Webster to 180 Grand");
//
// s.execute(
// "update derbyDB set num=300, addr='Lakeshore Ave.' where num=180");
// System.out.println("Updated 180 Grand to 300 Lakeshore");
//
// /*
// We select the rows and verify the results.
// */
// ResultSet rs = s.executeQuery(
// "SELECT num, addr FROM derbyDB ORDER BY num");
//
// if (!rs.next())
// {
// throw new Exception("Wrong number of rows");
// }
//
// if (rs.getInt(1) != 300)
// {
// throw new Exception("Wrong row returned");
// }
//
// if (!rs.next())
// {
// throw new Exception("Wrong number of rows");
// }
//
// if (rs.getInt(1) != 1910)
// {
// throw new Exception("Wrong row returned");
// }
//
// if (rs.next())
// {
// throw new Exception("Wrong number of rows");
// }
//
// System.out.println("Verified the rows");
//
// s.execute("drop table derbyDB");
// System.out.println("Dropped table derbyDB");
//
// /*
// We release the result and statement resources.
// */
// rs.close();
// s.close();
// System.out.println("Closed result set and statement");
//
// /*
// We end the transaction and the connection.
// */
// conn.commit();
// conn.close();
// System.out.println("Committed transaction and closed connection");
//
// /*
// In embedded mode, an application should shut down Derby.
// If the application fails to shut down Derby explicitly,
// the Derby does not perform a checkpoint when the JVM shuts down, which means
// that the next connection will be slower.
// Explicitly shutting down Derby with the URL is preferred.
// This style of shutdown will always throw an "exception".
// */
// boolean gotSQLExc = false;
//
// if (framework.equals("embedded"))
// {
// try
// {
// DriverManager.getConnection("jdbc:derby:;shutdown=true");
// }
// catch (SQLException se)
// {
// gotSQLExc = true;
// }
//
// if (!gotSQLExc)
// {
// System.out.println("Database did not shut down normally");
// }
// else
// {
// System.out.println("Database shut down normally");
// }
// }
// }
// catch (Throwable e)
// {
// System.out.println("exception thrown:");
//
// if (e instanceof SQLException)
// {
// printSQLError((SQLException) e);
// }
// else
// {
// e.printStackTrace();
// }
// }
//
// System.out.println("SimpleApp finished");
// }
//
// static void printSQLError(SQLException e)
// {
// while (e != null)
// {
// System.out.println(e.toString());
// e = e.getNextException();
// }
// }
//
// private void parseArguments(String[] args)
// {
// int length = args.length;
//
// for (int index = 0; index < length; index++)
// {
// if (args[index].equalsIgnoreCase("jccjdbcclient"))
// {
// framework = "jccjdbc";
// driver = "com.ibm.db2.jcc.DB2Driver";
// protocol = "jdbc:derby:net://localhost:1527/";
// }
// if (args[index].equalsIgnoreCase("derbyclient"))
// {
// framework = "derbyclient";
// driver = "org.apache.derby.jdbc.ClientDriver";
// protocol = "jdbc:derby://localhost:1527/";
// }
// }
// }
private final String applicationNamespaceName = "http://tempuri.org/";
private final String dummyMethod = "dummyMethod";
private OMElement getDummyMethodRequestElement() {
OMFactory fac = OMAbstractFactory.getOMFactory();
OMNamespace namespace = fac.createOMNamespace(applicationNamespaceName, "ns1");
return fac.createOMElement(dummyMethod, namespace);
}
}
| 6,298 |
0 | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan | Create_ds/axis-axis2-java-savan/modules/core/src/test/java/org/apache/axis2/savan/atom/AtomSample.java | package org.apache.axis2.savan.atom;
import com.wso2.eventing.atom.CreateFeedResponseDocument.CreateFeedResponse;
import org.apache.axiom.om.*;
import org.apache.savan.atom.AtomEventingClient;
public class AtomSample {
/**
* To run the sample <ol> <li>Install Axis2 with addressing Module</li> <li>Install some service,
* engage Savan with that service</li> <li>Set up Axis2 client repository in client machine with
* addressing module</li> <li>Run the sample with http://serviceHost:servicePort/services/<Service-Name>
* and <client-repostiory-location></li> </ol>
* <p/>
* Samples shows how to Create,Delete, publish to with SOAP/REST, and retrive a Feed.
*
* @param args
*/
public static void main(String[] args) {
if (args.length != 2) {
System.out.println("Usage: serviceUrl clientRepository");
} else {
try {
String serviceUrl = args[0];
AtomEventingClient atomEventingClient = new AtomEventingClient(serviceUrl, args[1]);
CreateFeedResponse createFeedResponse =
atomEventingClient.createFeed("test Title", "Srinath Perera");
System.out.println(
"Created Feed " + createFeedResponse.getFeedUrl() + " Sucessfully");
//publish to service using SOAP
atomEventingClient
.publishWithSOAP(serviceUrl, getDummyMethodRequestElement(1), null);
//publish service using REST
atomEventingClient
.publishWithREST(serviceUrl, getDummyMethodRequestElement(2), null);
//Get the feed using http GET
OMElement feedAsXml = atomEventingClient.fetchFeed(createFeedResponse.getFeedUrl());
feedAsXml.serialize(System.out, new OMOutputFormat());
System.out.println("Fetch Feed using HTTP Get, copy and paste url " +
createFeedResponse.getFeedUrl() +
" in browser to retirve the feed ");
System.out.println("Press any key to delete the feed");
System.in.read();
atomEventingClient.deleteFeed();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
private static final String applicationNamespaceName = "http://tempuri.org/";
private static final String dummyMethod = "dummyMethod";
private static OMElement getDummyMethodRequestElement(int i) {
OMFactory fac = OMAbstractFactory.getOMFactory();
OMNamespace namespace = fac.createOMNamespace(applicationNamespaceName, "ns1");
OMElement de = fac.createOMElement(dummyMethod, namespace);
de.setText(String.valueOf(i));
return de;
}
}
| 6,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.