index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/RowHash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce;
import java.io.IOException;
import java.util.Base64;
import java.util.Collections;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
import org.apache.accumulo.hadoop.mapreduce.InputFormatBuilder;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import com.beust.jcommander.Parameter;
public class RowHash {
/**
* The Mapper class that given a row number, will generate the appropriate output line.
*/
public static class HashDataMapper extends Mapper<Key,Value,Text,Mutation> {
@Override
public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
Mutation m = new Mutation(row.getRow());
m.put("cf-HASHTYPE", "cq-MD5BASE64",
new Value(Base64.getEncoder().encode(MD5Hash.digest(data.toString()).getDigest())));
context.write(null, m);
context.progress();
}
@Override
public void setup(Context job) {}
}
private static class Opts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
String tableName;
@Parameter(names = "--column", required = true)
String column;
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(RowHash.class.getName(), args);
Job job = Job.getInstance(opts.getHadoopConfig());
job.setJobName(RowHash.class.getName());
job.setJarByClass(RowHash.class);
job.setInputFormatClass(AccumuloInputFormat.class);
InputFormatBuilder.InputFormatOptions<Job> inputOpts = AccumuloInputFormat.configure()
.clientProperties(opts.getClientProperties()).table(opts.tableName);
String col = opts.column;
int idx = col.indexOf(":");
String cf = idx < 0 ? col : col.substring(0, idx);
String cq = idx < 0 ? null : col.substring(idx + 1);
if (cf.length() > 0) {
inputOpts.fetchColumns(Collections.singleton(new IteratorSetting.Column(cf, cq)));
}
inputOpts.store(job);
job.setMapperClass(HashDataMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Mutation.class);
job.setNumReduceTasks(0);
job.setOutputFormatClass(AccumuloOutputFormat.class);
AccumuloOutputFormat.configure().clientProperties(opts.getClientProperties())
.defaultTable(opts.tableName).store(job);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 3,300 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import com.beust.jcommander.Parameter;
/**
* A simple map reduce job that computes the unique column families and column qualifiers in a
* table. This example shows one way to run against an offline table.
*/
public class UniqueColumns {
private static final Text EMPTY = new Text();
public static class UMapper extends Mapper<Key,Value,Text,Text> {
private final Text temp = new Text();
private static final Text CF = new Text("cf:");
private static final Text CQ = new Text("cq:");
@Override
public void map(Key key, Value value, Context context)
throws IOException, InterruptedException {
temp.set(CF);
ByteSequence cf = key.getColumnFamilyData();
temp.append(cf.getBackingArray(), cf.offset(), cf.length());
context.write(temp, EMPTY);
temp.set(CQ);
ByteSequence cq = key.getColumnQualifierData();
temp.append(cq.getBackingArray(), cq.offset(), cq.length());
context.write(temp, EMPTY);
}
}
public static class UReducer extends Reducer<Text,Text,Text,Text> {
@Override
public void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
context.write(key, EMPTY);
}
}
static class Opts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
String tableName;
@Parameter(names = "--output", description = "output directory")
String output;
@Parameter(names = "--reducers", description = "number of reducers to use", required = true)
int reducers;
@Parameter(names = "--offline", description = "run against an offline table")
boolean offline = false;
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(UniqueColumns.class.getName(), args);
try (AccumuloClient client = opts.createAccumuloClient()) {
Job job = Job.getInstance(opts.getHadoopConfig());
String jobName = UniqueColumns.class.getSimpleName() + "_" + System.currentTimeMillis();
job.setJobName(UniqueColumns.class.getSimpleName() + "_" + System.currentTimeMillis());
job.setJarByClass(UniqueColumns.class);
job.setInputFormatClass(AccumuloInputFormat.class);
String table = opts.tableName;
if (opts.offline) {
/*
* this example clones the table and takes it offline. If you plan to run map reduce jobs
* over a table many times, it may be more efficient to compact the table, clone it, and
* then keep using the same clone as input for map reduce.
*/
table = opts.tableName + "_" + jobName;
client.tableOperations().clone(opts.tableName, table, true, new HashMap<>(),
new HashSet<>());
client.tableOperations().offline(table);
}
AccumuloInputFormat.configure().clientProperties(opts.getClientProperties()).table(table)
.offlineScan(opts.offline).store(job);
job.setMapperClass(UMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setCombinerClass(UReducer.class);
job.setReducerClass(UReducer.class);
job.setNumReduceTasks(opts.reducers);
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job, new Path(opts.output));
job.waitForCompletion(true);
if (opts.offline) {
client.tableOperations().delete(table);
}
System.exit(job.isSuccessful() ? 0 : 1);
}
System.exit(1);
}
}
| 3,301 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
/**
* Generate the *almost* official terasort input data set. (See below) The user specifies the number
* of rows and the output directory and this class runs a map/reduce program to generate the data.
* The format of the data is:
* <ul>
* <li>(10 bytes key) (10 bytes rowid) (78 bytes filler) \r \n
* <li>The keys are random characters from the set ' ' .. '~'.
* <li>The rowid is the right justified row id as a int.
* <li>The filler consists of 7 runs of 10 characters from 'A' to 'Z'.
* </ul>
*
* This TeraSort is slightly modified to allow for variable length key sizes and value sizes. The
* row length isn't variable. To generate a terabyte of data in the same way TeraSort does use
* 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte
* row id and \r\n this gives you 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and
* value parameters are inclusive/inclusive respectively.
*/
public class TeraSortIngest {
private static final Logger log = LoggerFactory.getLogger(TeraSortIngest.class);
/**
* An input format that assigns ranges of longs to each mapper.
*/
static class RangeInputFormat extends InputFormat<LongWritable,NullWritable> {
/**
* An input split consisting of a range on numbers.
*/
static class RangeInputSplit extends InputSplit implements Writable {
long firstRow;
long rowCount;
public RangeInputSplit() {}
public RangeInputSplit(long offset, long length) {
firstRow = offset;
rowCount = length;
}
@Override
public long getLength() {
return 0;
}
@Override
public String[] getLocations() {
return new String[] {};
}
@Override
public void readFields(DataInput in) throws IOException {
firstRow = WritableUtils.readVLong(in);
rowCount = WritableUtils.readVLong(in);
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, firstRow);
WritableUtils.writeVLong(out, rowCount);
}
}
/**
* A record reader that will generate a range of numbers.
*/
static class RangeRecordReader extends RecordReader<LongWritable,NullWritable> {
final long startRow;
long finishedRows;
final long totalRows;
public RangeRecordReader(RangeInputSplit split) {
startRow = split.firstRow;
finishedRows = 0;
totalRows = split.rowCount;
}
@Override
public void close() throws IOException {}
@Override
public float getProgress() {
return finishedRows / (float) totalRows;
}
@Override
public LongWritable getCurrentKey() {
return new LongWritable(startRow + finishedRows);
}
@Override
public NullWritable getCurrentValue() {
return NullWritable.get();
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context) {}
@Override
public boolean nextKeyValue() {
if (finishedRows < totalRows) {
++finishedRows;
return true;
}
return false;
}
}
@Override
public RecordReader<LongWritable,NullWritable> createRecordReader(InputSplit split,
TaskAttemptContext context) {
return new RangeRecordReader((RangeInputSplit) split);
}
/**
* Create the desired number of splits, dividing the number of rows between the mappers.
*/
@Override
public List<InputSplit> getSplits(JobContext job) {
long totalRows = job.getConfiguration().getLong(NUMROWS, 0);
int numSplits = job.getConfiguration().getInt(NUMSPLITS, 1);
long rowsPerSplit = totalRows / numSplits;
log.info(
"Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit);
ArrayList<InputSplit> splits = new ArrayList<>(numSplits);
long currentRow = 0;
for (int split = 0; split < numSplits - 1; ++split) {
splits.add(new RangeInputSplit(currentRow, rowsPerSplit));
currentRow += rowsPerSplit;
}
splits.add(new RangeInputSplit(currentRow, totalRows - currentRow));
log.info("Done Generating.");
return splits;
}
}
private static final String NUMSPLITS = "terasort.overridesplits";
private static final String NUMROWS = "terasort.numrows";
static class RandomGenerator {
private long seed = 0;
private static final long mask32 = (1L << 32) - 1;
/**
* The number of iterations separating the precomputed seeds.
*/
private static final int seedSkip = 128 * 1024 * 1024;
/**
* The precomputed seed values after every seedSkip iterations. There should be enough values so
* that a 2**32 iterations are covered.
*/
private static final long[] seeds = new long[] {0L, 4160749568L, 4026531840L, 3892314112L,
3758096384L, 3623878656L, 3489660928L, 3355443200L, 3221225472L, 3087007744L, 2952790016L,
2818572288L, 2684354560L, 2550136832L, 2415919104L, 2281701376L, 2147483648L, 2013265920L,
1879048192L, 1744830464L, 1610612736L, 1476395008L, 1342177280L, 1207959552L, 1073741824L,
939524096L, 805306368L, 671088640L, 536870912L, 402653184L, 268435456L, 134217728L,};
/**
* Start the random number generator on the given iteration.
*
* @param initalIteration
* the iteration number to start on
*/
RandomGenerator(long initalIteration) {
int baseIndex = (int) ((initalIteration & mask32) / seedSkip);
seed = seeds[baseIndex];
for (int i = 0; i < initalIteration % seedSkip; ++i) {
next();
}
}
RandomGenerator() {
this(0);
}
long next() {
seed = (seed * 3141592621L + 663896637) & mask32;
return seed;
}
}
/**
* The Mapper class that given a row number, will generate the appropriate output line.
*/
public static class SortGenMapper extends Mapper<LongWritable,NullWritable,Text,Mutation> {
private Text tableName = null;
private int minkeylength = 0;
private int maxkeylength = 0;
private int minvaluelength = 0;
private int maxvaluelength = 0;
private final Text key = new Text();
private final Text value = new Text();
private RandomGenerator rand;
private final String spaces = " ";
private final byte[][] filler = new byte[26][];
{
for (int i = 0; i < 26; ++i) {
filler[i] = new byte[10];
for (int j = 0; j < 10; ++j) {
filler[i][j] = (byte) ('A' + i);
}
}
}
/**
* Add a random key to the text
*/
private final Random random = new Random();
private void addKey() {
int range = random.nextInt(maxkeylength - minkeylength + 1);
int keylen = range + minkeylength;
int keyceil = keylen + (4 - (keylen % 4));
byte[] keyBytes = new byte[keyceil];
long temp = 0;
for (int i = 0; i < keyceil / 4; i++) {
temp = rand.next() / 52;
keyBytes[3 + 4 * i] = (byte) (' ' + (temp % 95));
temp /= 95;
keyBytes[2 + 4 * i] = (byte) (' ' + (temp % 95));
temp /= 95;
keyBytes[1 + 4 * i] = (byte) (' ' + (temp % 95));
temp /= 95;
keyBytes[4 * i] = (byte) (' ' + (temp % 95));
}
key.set(keyBytes, 0, keylen);
}
/**
* Add the rowid to the row.
*/
private String getRowIdString(long rowId) {
StringBuilder paddedRowIdString = new StringBuilder();
String rowid = Integer.toString((int) rowId);
int padSpace = 10 - rowid.length();
if (padSpace > 0) {
paddedRowIdString.append(spaces, 0, padSpace);
}
paddedRowIdString.append(rowid, 0, Math.min(rowid.length(), 10));
return paddedRowIdString.toString();
}
/**
* Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of
* 8 characters.
*
* @param rowId
* the current row number
*/
private void addFiller(long rowId) {
int base = (int) ((rowId * 8) % 26);
// Get Random var
Random random = new Random(rand.seed);
int range = random.nextInt(maxvaluelength - minvaluelength + 1);
int valuelen = range + minvaluelength;
while (valuelen > 10) {
value.append(filler[(base + valuelen) % 26], 0, 10);
valuelen -= 10;
}
if (valuelen > 0)
value.append(filler[(base + valuelen) % 26], 0, valuelen);
}
@Override
public void map(LongWritable row, NullWritable ignored, Context context)
throws IOException, InterruptedException {
context.setStatus("Entering");
long rowId = row.get();
if (rand == null) {
// we use 3 random numbers per a row
rand = new RandomGenerator(rowId * 3);
}
addKey();
value.clear();
// addRowId(rowId);
addFiller(rowId);
// New
Mutation m = new Mutation(key);
m.put("c", // column family
getRowIdString(rowId), // column qual
new Value(value.toString().getBytes())); // data
context.setStatus("About to add to accumulo");
context.write(tableName, m);
context.setStatus("Added to accumulo " + key);
}
@Override
public void setup(Context job) {
minkeylength = job.getConfiguration().getInt("cloudgen.minkeylength", 0);
maxkeylength = job.getConfiguration().getInt("cloudgen.maxkeylength", 0);
minvaluelength = job.getConfiguration().getInt("cloudgen.minvaluelength", 0);
maxvaluelength = job.getConfiguration().getInt("cloudgen.maxvaluelength", 0);
tableName = new Text(job.getConfiguration().get("cloudgen.tablename"));
}
}
static class Opts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
String tableName;
@Parameter(names = "--count", description = "number of rows to ingest", required = true)
long numRows;
@Parameter(names = {"-nk", "--minKeySize"}, description = "miniumum key size", required = true)
int minKeyLength;
@Parameter(names = {"-xk", "--maxKeySize"}, description = "maximum key size", required = true)
int maxKeyLength;
@Parameter(names = {"-nv", "--minValueSize"}, description = "minimum key size", required = true)
int minValueLength;
@Parameter(names = {"-xv", "--maxValueSize"}, description = "maximum key size", required = true)
int maxValueLength;
@Parameter(names = "--splits", description = "number of splits to create in the table")
int splits = 0;
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(TeraSortIngest.class.getName(), args);
Job job = Job.getInstance(opts.getHadoopConfig());
job.setJobName(TeraSortIngest.class.getName());
job.setJarByClass(TeraSortIngest.class);
job.setInputFormatClass(RangeInputFormat.class);
job.setMapperClass(SortGenMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Mutation.class);
job.setNumReduceTasks(0);
job.setOutputFormatClass(AccumuloOutputFormat.class);
AccumuloOutputFormat.configure().clientProperties(opts.getClientProperties())
.defaultTable(opts.tableName).createTables(true).store(job);
Configuration conf = job.getConfiguration();
conf.setLong(NUMROWS, opts.numRows);
conf.setInt("cloudgen.minkeylength", opts.minKeyLength);
conf.setInt("cloudgen.maxkeylength", opts.maxKeyLength);
conf.setInt("cloudgen.minvaluelength", opts.minValueLength);
conf.setInt("cloudgen.maxvaluelength", opts.maxValueLength);
conf.set("cloudgen.tablename", opts.tableName);
if (opts.splits != 0)
conf.setInt(NUMSPLITS, opts.splits);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 3,302 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/RegexExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce;
import java.io.IOException;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.user.RegExFilter;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
public class RegexExample {
private static final Logger log = LoggerFactory.getLogger(RegexExample.class);
public static class RegexMapper extends Mapper<Key,Value,Key,Value> {
@Override
public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
context.write(row, data);
}
}
static class Opts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
String tableName;
@Parameter(names = "--rowRegex")
String rowRegex;
@Parameter(names = "--columnFamilyRegex")
String columnFamilyRegex;
@Parameter(names = "--columnQualifierRegex")
String columnQualifierRegex;
@Parameter(names = "--valueRegex")
String valueRegex;
@Parameter(names = "--output", required = true)
String destination;
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(RegexExample.class.getName(), args);
Job job = Job.getInstance(opts.getHadoopConfig());
job.setJobName(RegexExample.class.getSimpleName());
job.setJarByClass(RegexExample.class);
job.setInputFormatClass(AccumuloInputFormat.class);
IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
RegExFilter.setRegexs(regex, opts.rowRegex, opts.columnFamilyRegex, opts.columnQualifierRegex,
opts.valueRegex, false);
AccumuloInputFormat.configure().clientProperties(opts.getClientProperties())
.table(opts.tableName).addIterator(regex).store(job);
job.setMapperClass(RegexMapper.class);
job.setMapOutputKeyClass(Key.class);
job.setMapOutputValueClass(Value.class);
job.setNumReduceTasks(0);
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job, new Path(opts.destination));
log.info("setRowRegex: " + opts.rowRegex);
log.info("setColumnFamilyRegex: " + opts.columnFamilyRegex);
log.info("setColumnQualifierRegex: " + opts.columnQualifierRegex);
log.info("setValueRegex: " + opts.valueRegex);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 3,303 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce;
import java.io.IOException;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
/**
* Map job to ingest n-gram files from
* http://storage.googleapis.com/books/ngrams/books/datasetsv2.html
*/
public class NGramIngest {
private static final Logger log = LoggerFactory.getLogger(NGramIngest.class);
static class Opts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
String tableName;
@Parameter(names = {"-i", "--input"}, required = true, description = "HDFS input directory")
String inputDirectory;
}
static class NGramMapper extends Mapper<LongWritable,Text,Text,Mutation> {
@Override
protected void map(LongWritable location, Text value, Context context)
throws IOException, InterruptedException {
String[] parts = value.toString().split("\\t");
if (parts.length >= 4) {
Mutation m = new Mutation(parts[0]);
m.put(parts[1], String.format("%010d", Long.parseLong(parts[2])),
new Value(parts[3].trim().getBytes()));
context.write(null, m);
}
}
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(NGramIngest.class.getName(), args);
Job job = Job.getInstance(opts.getHadoopConfig());
job.setJobName(NGramIngest.class.getSimpleName());
job.setJarByClass(NGramIngest.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(AccumuloOutputFormat.class);
AccumuloOutputFormat.configure().clientProperties(opts.getClientProperties())
.defaultTable(opts.tableName).store(job);
job.setMapperClass(NGramMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Mutation.class);
job.setNumReduceTasks(0);
job.setSpeculativeExecution(false);
try (AccumuloClient client = opts.createAccumuloClient()) {
if (!client.tableOperations().exists(opts.tableName)) {
String[] numbers = "1 2 3 4 5 6 7 8 9".split("\\s");
String[] lower = "a b c d e f g h i j k l m n o p q r s t u v w x y z".split("\\s");
String[] upper = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z".split("\\s");
SortedSet<Text> splits = Stream.of(numbers, lower, upper).flatMap(Stream::of).map(Text::new)
.collect(Collectors.toCollection(TreeSet::new));
var newTableConfig = new NewTableConfiguration().withSplits(splits);
log.info("Creating table " + opts.tableName);
Common.createTableWithNamespace(client, opts.tableName, newTableConfig);
}
}
TextInputFormat.addInputPath(job, new Path(opts.inputDirectory));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 3,304 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/TableToFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce;
import java.io.IOException;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.examples.util.FormatUtil;
import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
import org.apache.accumulo.hadoop.mapreduce.InputFormatBuilder;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import com.beust.jcommander.Parameter;
/**
* Takes a table and outputs the specified column to a set of part files on hdfs
*/
public class TableToFile {
static class Opts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
String tableName;
@Parameter(names = "--output", required = true, description = "output directory")
String output;
@Parameter(names = "--columns", description = "columns to extract, in cf:cq{,cf:cq,...} form")
String columns = "";
}
/**
* The Mapper class that given a row number, will generate the appropriate output line.
*/
public static class TTFMapper extends Mapper<Key,Value,NullWritable,Text> {
@Override
public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
Map.Entry<Key,Value> entry = new SimpleImmutableEntry<>(row, data);
context.write(NullWritable.get(), new Text(FormatUtil.formatTableEntry(entry, false)));
context.setStatus("Outputed Value");
}
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(TableToFile.class.getName(), args);
List<IteratorSetting.Column> columnsToFetch = new ArrayList<>();
for (String col : opts.columns.split(",")) {
int idx = col.indexOf(":");
String cf = idx < 0 ? col : col.substring(0, idx);
String cq = idx < 0 ? null : col.substring(idx + 1);
if (!cf.isEmpty())
columnsToFetch.add(new IteratorSetting.Column(cf, cq));
}
Job job = Job.getInstance(opts.getHadoopConfig());
job.setJobName(TableToFile.class.getSimpleName() + "_" + System.currentTimeMillis());
job.setJarByClass(TableToFile.class);
job.setInputFormatClass(AccumuloInputFormat.class);
InputFormatBuilder.InputFormatOptions<Job> inputOpts = AccumuloInputFormat.configure()
.clientProperties(opts.getClientProperties()).table(opts.tableName);
if (!columnsToFetch.isEmpty()) {
inputOpts.fetchColumns(columnsToFetch);
}
inputOpts.store(job);
job.setMapperClass(TTFMapper.class);
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(Text.class);
job.setNumReduceTasks(0);
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job, new Path(opts.output));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 3,305 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Collections;
import java.util.Date;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.iterators.user.SummingCombiner;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
/**
* A simple MapReduce job that inserts word counts into Accumulo. See docs/mapred.md
*/
public final class WordCount {
private static final Logger log = LoggerFactory.getLogger(WordCount.class);
private WordCount() {}
static class Opts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, description = "Name of output Accumulo table")
String tableName = Common.NAMESPACE + ".wordcount";
@Parameter(names = {"-i", "--input"}, required = true, description = "HDFS input directory")
String inputDirectory;
@Parameter(names = {"-d", "--dfsPath"},
description = "HDFS Path where accumulo-client.properties exists")
String hdfsPath;
}
public static class MapClass extends Mapper<LongWritable,Text,Text,Mutation> {
@Override
public void map(LongWritable key, Text value, Context output) throws IOException {
String today = new SimpleDateFormat("yyyyMMdd").format(new Date());
String[] words = value.toString().split("\\s+");
for (String word : words) {
Mutation mutation = new Mutation(word);
mutation.at().family("count").qualifier(today).put("1");
try {
output.write(null, mutation);
} catch (InterruptedException e) {
log.error("Could not write mutation to Context.", e);
}
}
}
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(WordCount.class.getName(), args);
// Create Accumulo table with Summing iterator attached
try (AccumuloClient client = opts.createAccumuloClient()) {
IteratorSetting is = new IteratorSetting(10, SummingCombiner.class);
SummingCombiner.setColumns(is,
Collections.singletonList(new IteratorSetting.Column("count")));
SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
Common.createTableWithNamespace(client, opts.tableName,
new NewTableConfiguration().attachIterator(is));
}
// Create M/R job
Job job = Job.getInstance(opts.getHadoopConfig());
job.setJobName(WordCount.class.getName());
job.setJarByClass(WordCount.class);
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.setInputPaths(job, new Path(opts.inputDirectory));
job.setMapperClass(MapClass.class);
job.setNumReduceTasks(0);
job.setOutputFormatClass(AccumuloOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Mutation.class);
if (opts.hdfsPath != null) {
AccumuloOutputFormat.configure().clientPropertiesPath(opts.hdfsPath)
.defaultTable(opts.tableName).store(job);
} else {
AccumuloOutputFormat.configure().clientProperties(opts.getClientProperties())
.defaultTable(opts.tableName).store(job);
}
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 3,306 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce.bulk;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class VerifyIngest {
private static final Logger log = LoggerFactory.getLogger(VerifyIngest.class);
private static final String ROW_FORMAT = "row_%010d";
private static final String VALUE_FORMAT = "value_%010d";
private VerifyIngest() {}
public static void main(String[] args) throws TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(VerifyIngest.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build();
Scanner scanner = client.createScanner(SetupTable.BULK_INGEST_TABLE,
Authorizations.EMPTY)) {
scanner.setRange(new Range(String.format(ROW_FORMAT, 0), null));
Iterator<Entry<Key,Value>> si = scanner.iterator();
boolean ok = true;
for (int i = 0; i < BulkIngestExample.numRows; i++) {
if (si.hasNext()) {
Entry<Key,Value> entry = si.next();
if (!entry.getKey().getRow().toString().equals(String.format(ROW_FORMAT, i))) {
String formattedRow = String.format(ROW_FORMAT, i);
log.error("unexpected row key {}; expected {}", entry.getKey().getRow(), formattedRow);
ok = false;
}
if (!entry.getValue().toString().equals(String.format(VALUE_FORMAT, i))) {
var formattedValue = String.format(VALUE_FORMAT, i);
log.error("unexpected value {}; expected {}", entry.getValue(), formattedValue);
ok = false;
}
} else {
var formattedRow = String.format(ROW_FORMAT, i);
log.error("no more rows, expected {}", formattedRow);
ok = false;
break;
}
}
if (ok) {
log.info("Data verification succeeded!");
System.exit(0);
} else {
log.info("Data verification failed!");
System.exit(1);
}
}
}
}
| 3,307 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce.bulk;
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.hadoop.io.Text;
public final class SetupTable {
static final String BULK_INGEST_TABLE = Common.NAMESPACE + ".test_bulk";
private SetupTable() {}
public static void main(String[] args)
throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
final Stream<String> splits = Stream.of("row_00000333", "row_00000666");
ClientOpts opts = new ClientOpts();
opts.parseArgs(SetupTable.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
// create a table with initial partitions
TreeSet<Text> initialPartitions = splits.map(Text::new)
.collect(Collectors.toCollection(TreeSet::new));
Common.createTableWithNamespace(client, BULK_INGEST_TABLE,
new NewTableConfiguration().withSplits(initialPartitions));
}
}
}
| 3,308 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.mapreduce.bulk;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Base64;
import java.util.Collection;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.accumulo.hadoop.mapreduce.AccumuloFileOutputFormat;
import org.apache.accumulo.hadoop.mapreduce.partition.RangePartitioner;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
/**
* Example map reduce job that bulk ingest data into an accumulo table. The expected input is text
* files containing tab separated key value pairs on each line.
*/
public final class BulkIngestExample {
static final String workDir = "tmp/bulkWork";
static final String inputDir = "bulk";
static final String outputFile = "bulk/test_1.txt";
static final int numRows = 1000;
static final String SLASH_FILES = "/files";
static final String FAILURES = "failures";
static final String SPLITS_TXT = "/splits.txt";
private BulkIngestExample() {}
public static class MapClass extends Mapper<LongWritable,Text,Text,Text> {
private final Text outputKey = new Text();
private final Text outputValue = new Text();
@Override
public void map(LongWritable key, Text value, Context output)
throws IOException, InterruptedException {
// split on tab
int index = -1;
for (int i = 0; i < value.getLength(); i++) {
if (value.getBytes()[i] == '\t') {
index = i;
break;
}
}
if (index > 0) {
outputKey.set(value.getBytes(), 0, index);
outputValue.set(value.getBytes(), index + 1, value.getLength() - (index + 1));
output.write(outputKey, outputValue);
}
}
}
public static class ReduceClass extends Reducer<Text,Text,Key,Value> {
@Override
public void reduce(Text key, Iterable<Text> values, Context output)
throws IOException, InterruptedException {
// be careful with the timestamp... if you run on a cluster
// where the time is whacked you may not see your updates in
// accumulo if there is already an existing value with a later
// timestamp in accumulo... so make sure ntp is running on the
// cluster or consider using logical time... one options is
// to let accumulo set the time
long timestamp = System.currentTimeMillis();
int index = 0;
for (Text value : values) {
Key outputKey = new Key(key, new Text("colf"), new Text(String.format("col_%07d", index)),
timestamp);
index++;
Value outputValue = new Value(value.getBytes(), 0, value.getLength());
output.write(outputKey, outputValue);
}
}
}
public static void main(String[] args) throws Exception {
ClientOpts opts = new ClientOpts();
opts.parseArgs(BulkIngestExample.class.getName(), args);
FileSystem fs = FileSystem.get(opts.getHadoopConfig());
generateTestData(fs);
ingestTestData(fs, opts);
}
private static void generateTestData(FileSystem fs) throws IOException {
try (PrintStream out = new PrintStream(
new BufferedOutputStream(fs.create(new Path(outputFile))))) {
for (int i = 0; i < numRows; i++) {
out.printf("row_%010d\tvalue_%010d%n", i, i);
}
}
}
private static int ingestTestData(FileSystem fs, ClientOpts opts) throws Exception {
Job job = Job.getInstance(opts.getHadoopConfig());
job.setJobName(BulkIngestExample.class.getSimpleName());
job.setJarByClass(BulkIngestExample.class);
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(MapClass.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setReducerClass(ReduceClass.class);
job.setOutputFormatClass(AccumuloFileOutputFormat.class);
TextInputFormat.setInputPaths(job, new Path(inputDir));
AccumuloFileOutputFormat.configure().outputPath(new Path(workDir + SLASH_FILES)).store(job);
try (AccumuloClient client = opts.createAccumuloClient()) {
try (PrintStream out = new PrintStream(
new BufferedOutputStream(fs.create(new Path(workDir + SPLITS_TXT))))) {
Collection<Text> splits = client.tableOperations().listSplits(SetupTable.BULK_INGEST_TABLE,
100);
for (Text split : splits)
out.println(Base64.getEncoder().encodeToString(split.copyBytes()));
job.setNumReduceTasks(splits.size() + 1);
}
job.setPartitionerClass(RangePartitioner.class);
RangePartitioner.setSplitFile(job, workDir + SPLITS_TXT);
job.waitForCompletion(true);
Path failures = new Path(workDir, FAILURES);
fs.delete(failures, true);
fs.mkdirs(new Path(workDir, FAILURES));
// With HDFS permissions on, we need to make sure the Accumulo user can read/move the rfiles
FsShell fsShell = new FsShell(opts.getHadoopConfig());
fsShell.run(new String[] {"-chmod", "-R", "777", workDir});
System.err.println("Importing Directory '" + workDir + SLASH_FILES + "' to table '"
+ SetupTable.BULK_INGEST_TABLE + "'");
client.tableOperations().importDirectory(workDir + SLASH_FILES)
.to(SetupTable.BULK_INGEST_TABLE).load();
}
return job.isSuccessful() ? 0 : 1;
}
}
| 3,309 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/client/TracingExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.client;
import java.time.Instant;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
import org.apache.accumulo.examples.cli.ScannerOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
import io.opentelemetry.api.GlobalOpenTelemetry;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.api.trace.Tracer;
import io.opentelemetry.context.Scope;
/**
* A simple example showing how to use the distributed tracing API in client code
*
*/
public class TracingExample {
private static final Logger log = LoggerFactory.getLogger(TracingExample.class);
private static final String DEFAULT_TABLE_NAME = "test";
private final AccumuloClient client;
private final Tracer tracer;
static class Opts extends ClientOnDefaultTable {
@Parameter(names = {"--createtable"}, description = "create table before doing anything")
boolean createtable = false;
@Parameter(names = {"--deletetable"}, description = "delete table when finished")
boolean deletetable = false;
@Parameter(names = {"--create"}, description = "create entries before any deletes")
boolean createEntries = false;
@Parameter(names = {"--read"}, description = "read entries after any creates/deletes")
boolean readEntries = false;
public Opts() {
super(DEFAULT_TABLE_NAME);
auths = new Authorizations();
}
}
private TracingExample(AccumuloClient client) {
this.client = client;
this.tracer = GlobalOpenTelemetry.get().getTracer(TracingExample.class.getSimpleName());
}
private void execute(Opts opts)
throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
Span span = tracer.spanBuilder("trace example").startSpan();
try (Scope scope = span.makeCurrent()) {
if (opts.createtable) {
Common.createTableWithNamespace(client, opts.getTableName());
}
if (opts.createEntries) {
createEntries(opts);
}
if (opts.readEntries) {
readEntries(opts);
}
if (opts.deletetable) {
client.tableOperations().delete(opts.getTableName());
}
} finally {
span.end();
}
}
private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException {
// Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
// the write operation as it is occurs asynchronously. You can optionally create additional
// Spans
// within a given Trace as seen below around the flush
Span span = tracer.spanBuilder("createEntries").startSpan();
try (Scope scope = span.makeCurrent()) {
try (BatchWriter batchWriter = client.createBatchWriter(opts.getTableName())) {
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
batchWriter.addMutation(m);
// You can add timeline annotations to Spans which will be able to be viewed in the Monitor
span.addEvent("Initiating Flush", Instant.now());
batchWriter.flush();
}
} finally {
span.end();
}
}
private void readEntries(Opts opts) throws TableNotFoundException {
try (Scanner scanner = client.createScanner(opts.getTableName(), opts.auths)) {
// Trace the read operation.
Span span = tracer.spanBuilder("readEntries").startSpan();
try (Scope scope = span.makeCurrent()) {
long numberOfEntriesRead = scanner.stream().peek(entry -> System.out
.println(entry.getKey().toString() + " -> " + entry.getValue().toString())).count();
// You can add additional metadata (key, values) to Spans
span.setAttribute("Number of Entries Read", numberOfEntriesRead);
} finally {
span.end();
}
}
}
public static void main(String[] args) {
Opts opts = new Opts();
ScannerOpts scannerOpts = new ScannerOpts();
opts.parseArgs(TracingExample.class.getName(), args, scannerOpts);
try (AccumuloClient client = opts.createAccumuloClient()) {
TracingExample tracingExample = new TracingExample(client);
tracingExample.execute(opts);
} catch (Exception e) {
log.error("Caught exception running TraceExample", e);
System.exit(1);
}
}
}
| 3,310 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.client;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ReadWriteExample {
private static final Logger log = LoggerFactory.getLogger(ReadWriteExample.class);
private static final String READWRITE_TABLE = Common.NAMESPACE + ".readwrite";
private ReadWriteExample() {}
public static void main(String[] args) throws AccumuloSecurityException, AccumuloException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(ReadWriteExample.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Common.createTableWithNamespace(client, READWRITE_TABLE);
// write data
try (BatchWriter writer = client.createBatchWriter(READWRITE_TABLE)) {
for (int i = 0; i < 10; i++) {
Mutation m = new Mutation("hello" + i);
m.put("cf", "cq", new Value("world" + i));
writer.addMutation(m);
}
} catch (TableNotFoundException e) {
log.error("Could not find table {}: {}", e.getTableName(), e.getMessage());
System.exit(1);
}
// read data
try (Scanner scanner = client.createScanner(READWRITE_TABLE, Authorizations.EMPTY)) {
for (Entry<Key,Value> entry : scanner) {
log.info("{} -> {}", entry.getKey().toString(), entry.getValue().toString());
}
} catch (TableNotFoundException e) {
log.error("Could not find table {}: {}", e.getTableName(), e.getMessage());
System.exit(1);
}
// delete table
try {
client.tableOperations().delete(READWRITE_TABLE);
} catch (TableNotFoundException e) {
log.error("Unable to delete table '{}': {}", e.getTableName(), e.getMessage());
}
}
}
}
| 3,311 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/client/Flush.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.client;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
/**
* Simple example for using tableOperations() (like create, delete, flush, etc).
*/
public class Flush {
public static void main(String[] args)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
ClientOnRequiredTable opts = new ClientOnRequiredTable();
opts.parseArgs(Flush.class.getName(), args);
try (AccumuloClient client = opts.createAccumuloClient()) {
client.tableOperations().flush(opts.getTableName(), null, null, true);
}
}
}
| 3,312 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.client;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.security.SecurityErrorCode;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.cli.BatchWriterOpts;
import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
import com.beust.jcommander.Parameter;
/**
* Simple example for writing random data to Accumulo.
*
* The rows of the entries will be randomly generated numbers between a specified min and max
* (prefixed by "row_"). The column families will be "foo" and column qualifiers will be "1". The
* values will be random byte arrays of a specified size.
*/
public class RandomBatchWriter {
/**
* Creates a random byte array of specified size using the specified seed.
*
* @param rowid
* the seed to use for the random number generator
* @param dataSize
* the size of the array
* @return a random byte array
*/
public static byte[] createValue(long rowid, int dataSize) {
Random r = new Random(rowid);
byte[] value = new byte[dataSize];
r.nextBytes(value);
// transform to printable chars
for (int j = 0; j < value.length; j++) {
value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
}
return value;
}
/**
* Creates a mutation on a specified row with column family "foo", column qualifier "1", specified
* visibility, and a random value of specified size.
*
* @param rowid
* the row of the mutation
* @param dataSize
* the size of the random value
* @param visibility
* the visibility of the entry to insert
* @return a mutation
*/
public static Mutation createMutation(long rowid, int dataSize, ColumnVisibility visibility) {
Mutation m = new Mutation(String.format("row_%010d", rowid));
// create a random value that is a function of the
// row id for verification purposes
byte[] value = createValue(rowid, dataSize);
m.put("foo", "1", visibility, new Value(value));
return m;
}
static class Opts extends ClientOnRequiredTable {
@Parameter(names = "--num", required = true)
int num = 0;
@Parameter(names = "--min")
long min = 0;
@Parameter(names = "--max")
long max = Long.MAX_VALUE;
@Parameter(names = "--size", required = true, description = "size of the value to write")
int size = 0;
@Parameter(names = "--vis", converter = VisibilityConverter.class)
ColumnVisibility visiblity = new ColumnVisibility("");
@Parameter(names = "--seed", description = "seed for pseudo-random number generator")
Long seed = null;
}
public static long abs(long l) {
l = Math.abs(l); // abs(Long.MIN_VALUE) == Long.MIN_VALUE...
if (l < 0)
return 0;
return l;
}
/**
* Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
*/
public static void main(String[] args) throws AccumuloException, TableNotFoundException {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
if ((opts.max - opts.min) < 1L * opts.num) { // right-side multiplied by 1L to convert to long
// in a way that doesn't trigger FindBugs
System.err.printf(
"You must specify a min and a max that allow for at least num possible values. "
+ "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.%n",
opts.num, opts.min, opts.max, (opts.max - opts.min));
System.exit(1);
}
Random r;
if (opts.seed == null)
r = new Random();
else {
r = new Random(opts.seed);
}
try (AccumuloClient client = opts.createAccumuloClient();
BatchWriter bw = client.createBatchWriter(opts.getTableName(),
bwOpts.getBatchWriterConfig())) {
// reuse the ColumnVisibility object to improve performance
ColumnVisibility cv = opts.visiblity;
// Generate num unique row ids in the given range
HashSet<Long> rowids = new HashSet<>(opts.num);
while (rowids.size() < opts.num) {
rowids.add((abs(r.nextLong()) % (opts.max - opts.min)) + opts.min);
}
for (long rowid : rowids) {
Mutation m = createMutation(rowid, opts.size, cv);
bw.addMutation(m);
}
} catch (MutationsRejectedException e) {
if (e.getSecurityErrorCodes().size() > 0) {
HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<>();
for (Entry<TabletId,Set<SecurityErrorCode>> ke : e.getSecurityErrorCodes().entrySet()) {
String tableId = ke.getKey().getTable().toString();
Set<SecurityErrorCode> secCodes = tables.computeIfAbsent(tableId, k -> new HashSet<>());
secCodes.addAll(ke.getValue());
}
System.err.println("ERROR : Not authorized to write to tables : " + tables);
}
if (e.getConstraintViolationSummaries().size() > 0) {
System.err.println(
"ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
}
System.exit(1);
}
}
}
| 3,313 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.client;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.Arrays;
import java.util.HashMap;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Internal class used to verify validity of data read.
*/
class CountingVerifyingReceiver {
private static final Logger log = LoggerFactory.getLogger(CountingVerifyingReceiver.class);
long count = 0;
int expectedValueSize = 0;
final HashMap<String,Boolean> expectedRows;
CountingVerifyingReceiver(HashMap<String,Boolean> expectedRows, int expectedValueSize) {
this.expectedRows = expectedRows;
this.expectedValueSize = expectedValueSize;
}
public void receive(Key key, Value value) {
String row = key.getRow().toString();
long rowid = Integer.parseInt(row.split("_")[1]);
byte[] expectedValue = RandomBatchWriter.createValue(rowid, expectedValueSize);
if (!Arrays.equals(expectedValue, value.get())) {
log.error("Got unexpected value for " + key + " expected : "
+ new String(expectedValue, UTF_8) + " got : " + new String(value.get(), UTF_8));
}
if (!expectedRows.containsKey(key.getRow().toString())) {
log.error("Got unexpected key " + key);
} else {
expectedRows.put(key.getRow().toString(), true);
}
count++;
}
}
| 3,314 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.client;
import java.util.Random;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
/**
* Simple example for writing random data in sequential order to Accumulo.
*/
public final class SequentialBatchWriter {
private static final Logger log = LoggerFactory.getLogger(SequentialBatchWriter.class);
static final String BATCH_TABLE = Common.NAMESPACE + ".batch";
private SequentialBatchWriter() {}
public static Value createValue(long rowId, int size) {
Random r = new Random(rowId);
byte[] value = new byte[size];
r.nextBytes(value);
// transform to printable chars
for (int j = 0; j < value.length; j++) {
value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
}
return new Value(value);
}
static class Opts extends ClientOpts {
@Parameter(names = {"-t"}, description = "table to use")
public String tableName = BATCH_TABLE;
@Parameter(names = {"--start"}, description = "starting row")
public Integer start = 0;
@Parameter(names = {"--num"}, description = "number of rows")
public Integer num = 10_000;
@Parameter(names = {"--size"}, description = "size of values")
public Integer size = 50;
}
/**
* Writes 1000 entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be
* sequential starting from 0. The column families will be "foo" and column qualifiers will be
* "1". The values will be random 50 byte arrays.
*/
public static void main(String[] args)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Opts opts = new Opts();
opts.parseArgs(SequentialBatchWriter.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Common.createTableWithNamespace(client, opts.tableName);
try (BatchWriter bw = client.createBatchWriter(opts.tableName)) {
for (int i = 0; i < opts.num; i++) {
int row = i + opts.start;
Mutation m = new Mutation(String.format("row_%010d", row));
// create a random value that is a function of row id for verification purposes
m.put("foo", "1", createValue(row, opts.size));
bw.addMutation(m);
if (i % 1000 == 0) {
log.trace("wrote {} entries", i);
}
}
}
}
}
}
| 3,315 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.client;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.examples.client.RandomBatchWriter.abs;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Random;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple example for reading random batches of data from Accumulo.
*/
public final class RandomBatchScanner {
private static final Logger log = LoggerFactory.getLogger(RandomBatchScanner.class);
private RandomBatchScanner() {}
public static void main(String[] args) throws TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(RandomBatchScanner.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
if (!client.tableOperations().exists(SequentialBatchWriter.BATCH_TABLE)) {
log.error(
"Table " + SequentialBatchWriter.BATCH_TABLE + " does not exist. Nothing to scan!");
log.error("Try running './bin/runex client.SequentialBatchWriter' first");
return;
}
int totalLookups = 1000;
int totalEntries = 10000;
Random r = new Random();
HashSet<Range> ranges = new HashSet<>();
HashMap<String,Boolean> expectedRows = new HashMap<>();
log.info("Generating {} random ranges for BatchScanner to read", totalLookups);
while (ranges.size() < totalLookups) {
long rowId = abs(r.nextLong()) % totalEntries;
String row = String.format("row_%010d", rowId);
ranges.add(new Range(row));
expectedRows.put(row, false);
}
long t1 = System.currentTimeMillis();
long lookups = 0;
log.info("Reading ranges using BatchScanner");
try (BatchScanner scan = client.createBatchScanner(SequentialBatchWriter.BATCH_TABLE,
Authorizations.EMPTY, 20)) {
scan.setRanges(ranges);
for (Entry<Key,Value> entry : scan) {
Key key = entry.getKey();
Value value = entry.getValue();
String row = key.getRow().toString();
long rowId = Integer.parseInt(row.split("_")[1]);
Value expectedValue = SequentialBatchWriter.createValue(rowId, 50);
if (!Arrays.equals(expectedValue.get(), value.get())) {
log.error("Unexpected value for key: {} expected: {} actual: {}", key,
new String(expectedValue.get(), UTF_8), new String(value.get(), UTF_8));
}
if (expectedRows.containsKey(key.getRow().toString())) {
expectedRows.put(key.getRow().toString(), true);
} else {
log.error("Encountered unexpected key: {} ", key);
}
lookups++;
if (lookups % 100 == 0) {
log.trace("{} lookups", lookups);
}
}
}
long t2 = System.currentTimeMillis();
log.info(String.format("Scan finished! %6.2f lookups/sec, %.2f secs, %d results",
lookups / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0), lookups));
int count = 0;
for (Entry<String,Boolean> entry : expectedRows.entrySet()) {
if (!entry.getValue()) {
count++;
}
}
if (count > 0) {
log.warn("Did not find {} rows", count);
System.exit(1);
}
log.info("All expected rows were scanned");
}
}
}
| 3,316 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/client/RowOperations.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.client;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A demonstration of reading entire rows and deleting entire rows.
*/
public final class RowOperations {
private static final Logger log = LoggerFactory.getLogger(RowOperations.class);
static final String ROWOPS_TABLE = Common.NAMESPACE + ".rowops";
private RowOperations() {}
private static void printAll(AccumuloClient client) throws TableNotFoundException {
try (Scanner scanner = client.createScanner(ROWOPS_TABLE, Authorizations.EMPTY)) {
for (Entry<Key,Value> entry : scanner) {
log.info("Key: {} Value: {}", entry.getKey().toString(), entry.getValue().toString());
}
}
}
private static void printRow(String row, AccumuloClient client) throws TableNotFoundException {
try (Scanner scanner = client.createScanner(ROWOPS_TABLE, Authorizations.EMPTY)) {
scanner.setRange(Range.exact(row));
for (Entry<Key,Value> entry : scanner) {
log.info("Key: {} Value: {}", entry.getKey().toString(), entry.getValue().toString());
}
}
}
private static void deleteRow(String row, AccumuloClient client, BatchWriter bw)
throws MutationsRejectedException, TableNotFoundException {
Mutation mut = new Mutation(row);
try (Scanner scanner = client.createScanner(ROWOPS_TABLE, Authorizations.EMPTY)) {
scanner.setRange(Range.exact(row));
for (Entry<Key,Value> entry : scanner) {
mut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
}
}
bw.addMutation(mut);
bw.flush();
}
public static void main(String[] args)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ClientOpts opts = new ClientOpts();
opts.parseArgs(RowOperations.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
Common.createTableWithNamespace(client, ROWOPS_TABLE);
// lets create 3 rows of information
Mutation mut1 = new Mutation("row1");
Mutation mut2 = new Mutation("row2");
Mutation mut3 = new Mutation("row3");
mut1.put("col", "1", "v1");
mut1.put("col", "2", "v2");
mut1.put("col", "3", "v3");
mut2.put("col", "1", "v1");
mut2.put("col", "2", "v2");
mut2.put("col", "3", "v3");
mut3.put("col", "1", "v1");
mut3.put("col", "2", "v2");
mut3.put("col", "3", "v3");
// Now we'll make a Batch Writer
try (BatchWriter bw = client.createBatchWriter(ROWOPS_TABLE)) {
// And add the mutations
bw.addMutation(mut1);
bw.addMutation(mut2);
bw.addMutation(mut3);
// Force a send
bw.flush();
log.info("This is only row2");
printRow("row2", client);
log.info("This is everything");
printAll(client);
deleteRow("row2", client, bw);
log.info("This is row1 and row3");
printAll(client);
deleteRow("row1", client, bw);
}
log.info("This is just row3");
printAll(client);
client.tableOperations().delete(ROWOPS_TABLE);
}
}
}
| 3,317 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/shard/Index.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.shard;
import java.io.File;
import java.io.FileReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.examples.cli.ClientOpts;
import com.beust.jcommander.Parameter;
/**
* This program indexes a set of documents given on the command line into a shard table.
*
* What it writes to the table is row = partition id, column family = term, column qualifier =
* document id.
*/
public class Index {
static String genPartition(int partition) {
return String.format("%08x", Math.abs(partition));
}
public static void index(int numPartitions, String docId, String doc, String splitRegex,
BatchWriter bw) throws Exception {
String[] tokens = doc.split(splitRegex);
String partition = genPartition(doc.hashCode() % numPartitions);
Mutation m = new Mutation(partition);
HashSet<String> tokensSeen = new HashSet<>();
for (String token : tokens) {
token = token.toLowerCase();
if (!tokensSeen.contains(token)) {
tokensSeen.add(token);
m.put(token, docId, new Value(new byte[0]));
}
}
if (m.size() > 0)
bw.addMutation(m);
}
public static void index(int numPartitions, File src, String splitRegex, BatchWriter bw)
throws Exception {
if (src.isDirectory()) {
File[] files = src.listFiles();
if (files != null) {
for (File child : files) {
index(numPartitions, child, splitRegex, bw);
}
}
} else {
StringBuilder sb = new StringBuilder();
try (FileReader fr = new FileReader(src)) {
char[] data = new char[4096];
int len;
while ((len = fr.read(data)) != -1) {
sb.append(data, 0, len);
}
}
index(numPartitions, src.getAbsolutePath(), sb.toString(), splitRegex, bw);
}
}
static class IndexOpts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
private String tableName;
@Parameter(names = "--partitions", required = true,
description = "the number of shards to create")
int partitions;
@Parameter(required = true, description = "<file> { <file> ... }")
List<String> files = new ArrayList<>();
}
public static void main(String[] args) throws Exception {
IndexOpts opts = new IndexOpts();
opts.parseArgs(Index.class.getName(), args);
String splitRegex = "\\W+";
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build();
BatchWriter bw = client.createBatchWriter(opts.tableName)) {
for (String filename : opts.files) {
index(opts.partitions, new File(filename), splitRegex, bw);
}
}
}
}
| 3,318 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/shard/CutoffIntersectingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.shard;
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.sample.RowColumnSampler;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.user.IntersectingIterator;
/**
* This iterator uses a sample built from the Column Qualifier to quickly avoid intersecting
* iterator queries that may return too many documents.
*/
public class CutoffIntersectingIterator extends IntersectingIterator {
private IntersectingIterator sampleII;
private int sampleMax;
private boolean hasTop;
public static void setCutoff(IteratorSetting iterCfg, int cutoff) {
checkArgument(cutoff >= 0);
iterCfg.addOption("cutoff", cutoff + "");
}
@Override
public boolean hasTop() {
return hasTop && super.hasTop();
}
@Override
public void seek(Range range, Collection<ByteSequence> seekColumnFamilies, boolean inclusive)
throws IOException {
sampleII.seek(range, seekColumnFamilies, inclusive);
// this check will be redone whenever iterator stack is torn down and recreated.
int count = 0;
while (count <= sampleMax && sampleII.hasTop()) {
sampleII.next();
count++;
}
if (count > sampleMax) {
// In a real application would probably want to return a key value that indicates too much
// data. Since this would execute for each tablet, some tablets
// may return data. For tablets that did not return data, would want an indication.
hasTop = false;
} else {
hasTop = true;
super.seek(range, seekColumnFamilies, inclusive);
}
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
super.init(source, options, env);
IteratorEnvironment sampleEnv = env.cloneWithSamplingEnabled();
setMax(sampleEnv, options);
SortedKeyValueIterator<Key,Value> sampleDC = source.deepCopy(sampleEnv);
sampleII = new IntersectingIterator();
sampleII.init(sampleDC, options, env);
}
static void validateSamplerConfig(SamplerConfiguration sampleConfig) {
requireNonNull(sampleConfig);
checkArgument(sampleConfig.getSamplerClassName().equals(RowColumnSampler.class.getName()),
"Unexpected Sampler " + sampleConfig.getSamplerClassName());
checkArgument(sampleConfig.getOptions().get("qualifier").equals("true"),
"Expected sample on column qualifier");
checkArgument(isNullOrFalse(sampleConfig.getOptions(), "row", "family", "visibility"),
"Expected sample on column qualifier only");
}
private void setMax(IteratorEnvironment sampleEnv, Map<String,String> options) {
String cutoffValue = options.get("cutoff");
SamplerConfiguration sampleConfig = sampleEnv.getSamplerConfiguration();
// Ensure the sample was constructed in an expected way. If the sample is not built as expected,
// then can not draw conclusions based on sample.
requireNonNull(cutoffValue, "Expected cutoff option is missing");
validateSamplerConfig(sampleConfig);
int modulus = Integer.parseInt(sampleConfig.getOptions().get("modulus"));
sampleMax = Math.round(Float.parseFloat(cutoffValue) / modulus);
}
private static boolean isNullOrFalse(Map<String,String> options, String... keys) {
for (String key : keys) {
String val = options.get(key);
if (val != null && val.equals("true")) {
return false;
}
}
return true;
}
}
| 3,319 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/shard/Reverse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.shard;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.hadoop.io.Text;
import com.beust.jcommander.Parameter;
/**
* The program reads an accumulo table written by {@link Index} and writes out to another table. It
* writes out a mapping of documents to terms. The document to term mapping is used by
* {@link ContinuousQuery}.
*/
public class Reverse {
static class Opts extends ClientOpts {
@Parameter(names = "--shardTable", description = "name of the shard table")
String shardTable;
@Parameter(names = "--doc2Term", description = "name of the doc2Term table")
String doc2TermTable;
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(Reverse.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build();
Scanner scanner = client.createScanner(opts.shardTable, Authorizations.EMPTY);
BatchWriter bw = client.createBatchWriter(opts.doc2TermTable)) {
for (Entry<Key,Value> entry : scanner) {
Key key = entry.getKey();
Mutation m = new Mutation(key.getColumnQualifier());
m.put(key.getColumnFamily(), new Text(), new Value(new byte[0]));
bw.addMutation(m);
}
}
}
}
| 3,320 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/shard/Query.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.shard;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.user.IntersectingIterator;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.hadoop.io.Text;
import com.beust.jcommander.Parameter;
/**
* This program queries a set of terms in the shard table (populated by {@link Index}) using the
* {@link IntersectingIterator}.
*/
public class Query {
static class QueryOpts extends ClientOpts {
@Parameter(description = " term { <term> ... }")
List<String> terms = new ArrayList<>();
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
String tableName;
@Parameter(names = {"--sample"},
description = "Do queries against sample, useful when sample is built using column qualifier")
boolean useSample = false;
@Parameter(names = {"--sampleCutoff"},
description = "Use sample data to determine if a query might return a number of documents over the cutoff. This check is per tablet.")
Integer sampleCutoff = null;
}
public static List<String> query(BatchScanner bs, List<String> terms, Integer cutoff) {
Text[] columns = new Text[terms.size()];
int i = 0;
for (String term : terms) {
columns[i++] = new Text(term);
}
IteratorSetting ii;
if (cutoff != null) {
ii = new IteratorSetting(20, "ii", CutoffIntersectingIterator.class);
CutoffIntersectingIterator.setCutoff(ii, cutoff);
} else {
ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
}
IntersectingIterator.setColumnFamilies(ii, columns);
bs.addScanIterator(ii);
bs.setRanges(Collections.singleton(new Range()));
List<String> result = new ArrayList<>();
for (Entry<Key,Value> entry : bs) {
result.add(entry.getKey().getColumnQualifier().toString());
}
return result;
}
public static void main(String[] args) throws Exception {
QueryOpts opts = new QueryOpts();
opts.parseArgs(Query.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build();
BatchScanner bs = client.createBatchScanner(opts.tableName, Authorizations.EMPTY, 10)) {
if (opts.useSample) {
SamplerConfiguration samplerConfig = client.tableOperations()
.getSamplerConfiguration(opts.tableName);
CutoffIntersectingIterator.validateSamplerConfig(
client.tableOperations().getSamplerConfiguration(opts.tableName));
bs.setSamplerConfiguration(samplerConfig);
}
for (String entry : query(bs, opts.terms, opts.sampleCutoff)) {
System.out.println(" " + entry);
}
}
}
}
| 3,321 |
0 | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples | Create_ds/accumulo-examples/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.examples.shard;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map.Entry;
import java.util.Random;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.user.IntersectingIterator;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.cli.ClientOpts;
import org.apache.hadoop.io.Text;
import com.beust.jcommander.Parameter;
import com.google.common.collect.Iterators;
/**
* Using the doc2word table created by Reverse.java, this program randomly selects N words per
* document. Then it continually queries a random set of words in the shard table (created by
* {@link Index}) using the {@link IntersectingIterator}.
*/
public class ContinuousQuery {
static class Opts extends ClientOpts {
@Parameter(names = "--shardTable", required = true, description = "name of the shard table")
String tableName = null;
@Parameter(names = "--doc2Term", required = true, description = "name of the doc2Term table")
String doc2Term;
@Parameter(names = "--terms", required = true, description = "the number of terms in the query")
int numTerms;
@Parameter(names = "--count", description = "the number of queries to run")
long iterations = Long.MAX_VALUE;
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(ContinuousQuery.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
ArrayList<Text[]> randTerms;
try (Scanner scanner = client.createScanner(opts.doc2Term, Authorizations.EMPTY)) {
randTerms = findRandomTerms(scanner, opts.numTerms);
}
Random rand = new Random();
try (BatchScanner bs = client.createBatchScanner(opts.tableName, Authorizations.EMPTY, 5)) {
for (long i = 0; i < opts.iterations; i += 1) {
Text[] columns = randTerms.get(rand.nextInt(randTerms.size()));
bs.clearScanIterators();
bs.clearColumns();
IteratorSetting ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
IntersectingIterator.setColumnFamilies(ii, columns);
bs.addScanIterator(ii);
bs.setRanges(Collections.singleton(new Range()));
long t1 = System.currentTimeMillis();
int count = Iterators.size(bs.iterator());
long t2 = System.currentTimeMillis();
System.out.printf(" %s %,d %6.3f%n", Arrays.asList(columns), count, (t2 - t1) / 1000.0);
}
}
}
}
private static ArrayList<Text[]> findRandomTerms(Scanner scanner, int numTerms) {
Text currentRow = null;
ArrayList<Text> words = new ArrayList<>();
ArrayList<Text[]> ret = new ArrayList<>();
Random rand = new Random();
for (Entry<Key,Value> entry : scanner) {
Key key = entry.getKey();
if (currentRow == null)
currentRow = key.getRow();
if (!currentRow.equals(key.getRow())) {
selectRandomWords(words, ret, rand, numTerms);
words.clear();
currentRow = key.getRow();
}
words.add(key.getColumnFamily());
}
selectRandomWords(words, ret, rand, numTerms);
return ret;
}
private static void selectRandomWords(ArrayList<Text> words, ArrayList<Text[]> ret, Random rand,
int numTerms) {
if (words.size() >= numTerms) {
Collections.shuffle(words, rand);
Text[] docWords = new Text[numTerms];
for (int i = 0; i < docWords.length; i++) {
docWords[i] = words.get(i);
}
ret.add(docWords);
}
}
}
| 3,322 |
0 | Create_ds/cordova-plugin-media-capture/src | Create_ds/cordova-plugin-media-capture/src/android/FileHelper.java | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package org.apache.cordova.mediacapture;
import android.net.Uri;
import android.webkit.MimeTypeMap;
import org.apache.cordova.CordovaInterface;
import java.util.Locale;
// TODO: Replace with CordovaResourceApi.getMimeType() post 3.1.
public class FileHelper {
public static String getMimeTypeForExtension(String path) {
String extension = path;
int lastDot = extension.lastIndexOf('.');
if (lastDot != -1) {
extension = extension.substring(lastDot + 1);
}
// Convert the URI string to lower case to ensure compatibility with MimeTypeMap (see CB-2185).
extension = extension.toLowerCase(Locale.getDefault());
if (extension.equals("3ga")) {
return "audio/3gpp";
}
return MimeTypeMap.getSingleton().getMimeTypeFromExtension(extension);
}
/**
* Returns the mime type of the data specified by the given URI string.
*
* @param uriString the URI string of the data
* @return the mime type of the specified data
*/
public static String getMimeType(Uri uri, CordovaInterface cordova) {
String mimeType = null;
if ("content".equals(uri.getScheme())) {
mimeType = cordova.getActivity().getContentResolver().getType(uri);
} else {
mimeType = getMimeTypeForExtension(uri.getPath());
}
return mimeType;
}
}
| 3,323 |
0 | Create_ds/cordova-plugin-media-capture/src | Create_ds/cordova-plugin-media-capture/src/android/Capture.java | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package org.apache.cordova.mediacapture;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.CordovaPlugin;
import org.apache.cordova.LOG;
import org.apache.cordova.PermissionHelper;
import org.apache.cordova.PluginManager;
import org.apache.cordova.file.FileUtils;
import org.apache.cordova.file.LocalFilesystemURL;
import org.apache.cordova.mediacapture.PendingRequests.Request;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import android.Manifest;
import android.app.Activity;
import android.content.ActivityNotFoundException;
import android.content.ContentResolver;
import android.content.ContentValues;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.PackageManager.NameNotFoundException;
import android.database.Cursor;
import android.graphics.BitmapFactory;
import android.media.MediaPlayer;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.os.Environment;
import android.provider.MediaStore;
public class Capture extends CordovaPlugin {
private static final String VIDEO_3GPP = "video/3gpp";
private static final String VIDEO_MP4 = "video/mp4";
private static final String AUDIO_3GPP = "audio/3gpp";
private static final String[] AUDIO_TYPES = new String[] {"audio/3gpp", "audio/aac", "audio/amr", "audio/wav"};
private static final String IMAGE_JPEG = "image/jpeg";
private static final int CAPTURE_AUDIO = 0; // Constant for capture audio
private static final int CAPTURE_IMAGE = 1; // Constant for capture image
private static final int CAPTURE_VIDEO = 2; // Constant for capture video
private static final String LOG_TAG = "Capture";
private static final int CAPTURE_INTERNAL_ERR = 0;
// private static final int CAPTURE_APPLICATION_BUSY = 1;
// private static final int CAPTURE_INVALID_ARGUMENT = 2;
private static final int CAPTURE_NO_MEDIA_FILES = 3;
private static final int CAPTURE_PERMISSION_DENIED = 4;
private static final int CAPTURE_NOT_SUPPORTED = 20;
private static String[] storagePermissions;
static {
if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
storagePermissions = new String[]{
};
} else {
storagePermissions = new String[] {
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.WRITE_EXTERNAL_STORAGE
};
}
}
private boolean cameraPermissionInManifest; // Whether or not the CAMERA permission is declared in AndroidManifest.xml
private final PendingRequests pendingRequests = new PendingRequests();
private int numPics; // Number of pictures before capture activity
private Uri imageUri;
// public void setContext(Context mCtx)
// {
// if (CordovaInterface.class.isInstance(mCtx))
// cordova = (CordovaInterface) mCtx;
// else
// LOG.d(LOG_TAG, "ERROR: You must use the CordovaInterface for this to work correctly. Please implement it in your activity");
// }
@Override
protected void pluginInitialize() {
super.pluginInitialize();
// CB-10670: The CAMERA permission does not need to be requested unless it is declared
// in AndroidManifest.xml. This plugin does not declare it, but others may and so we must
// check the package info to determine if the permission is present.
cameraPermissionInManifest = false;
try {
PackageManager packageManager = this.cordova.getActivity().getPackageManager();
String[] permissionsInPackage = packageManager.getPackageInfo(this.cordova.getActivity().getPackageName(), PackageManager.GET_PERMISSIONS).requestedPermissions;
if (permissionsInPackage != null) {
for (String permission : permissionsInPackage) {
if (permission.equals(Manifest.permission.CAMERA)) {
cameraPermissionInManifest = true;
break;
}
}
}
} catch (NameNotFoundException e) {
// We are requesting the info for our package, so this should
// never be caught
LOG.e(LOG_TAG, "Failed checking for CAMERA permission in manifest", e);
}
}
@Override
public boolean execute(String action, JSONArray args, CallbackContext callbackContext) throws JSONException {
if (action.equals("getFormatData")) {
JSONObject obj = getFormatData(args.getString(0), args.getString(1));
callbackContext.success(obj);
return true;
}
JSONObject options = args.optJSONObject(0);
if (action.equals("captureAudio")) {
this.captureAudio(pendingRequests.createRequest(CAPTURE_AUDIO, options, callbackContext));
}
else if (action.equals("captureImage")) {
this.captureImage(pendingRequests.createRequest(CAPTURE_IMAGE, options, callbackContext));
}
else if (action.equals("captureVideo")) {
this.captureVideo(pendingRequests.createRequest(CAPTURE_VIDEO, options, callbackContext));
}
else {
return false;
}
return true;
}
/**
* Provides the media data file data depending on it's mime type
*
* @param filePath path to the file
* @param mimeType of the file
* @return a MediaFileData object
*/
private JSONObject getFormatData(String filePath, String mimeType) throws JSONException {
Uri fileUrl = filePath.startsWith("file:") ? Uri.parse(filePath) : Uri.fromFile(new File(filePath));
JSONObject obj = new JSONObject();
// setup defaults
obj.put("height", 0);
obj.put("width", 0);
obj.put("bitrate", 0);
obj.put("duration", 0);
obj.put("codecs", "");
// If the mimeType isn't set the rest will fail
// so let's see if we can determine it.
if (mimeType == null || mimeType.equals("") || "null".equals(mimeType)) {
mimeType = FileHelper.getMimeType(fileUrl, cordova);
}
LOG.d(LOG_TAG, "Mime type = " + mimeType);
if (mimeType.equals(IMAGE_JPEG) || filePath.endsWith(".jpg")) {
obj = getImageData(fileUrl, obj);
}
else if (Arrays.asList(AUDIO_TYPES).contains(mimeType)) {
obj = getAudioVideoData(filePath, obj, false);
}
else if (mimeType.equals(VIDEO_3GPP) || mimeType.equals(VIDEO_MP4)) {
obj = getAudioVideoData(filePath, obj, true);
}
return obj;
}
/**
* Get the Image specific attributes
*
* @param fileUrl url pointing to the file
* @param obj represents the Media File Data
* @return a JSONObject that represents the Media File Data
* @throws JSONException
*/
private JSONObject getImageData(Uri fileUrl, JSONObject obj) throws JSONException {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
BitmapFactory.decodeFile(fileUrl.getPath(), options);
obj.put("height", options.outHeight);
obj.put("width", options.outWidth);
return obj;
}
/**
* Get the Image specific attributes
*
* @param filePath path to the file
* @param obj represents the Media File Data
* @param video if true get video attributes as well
* @return a JSONObject that represents the Media File Data
* @throws JSONException
*/
private JSONObject getAudioVideoData(String filePath, JSONObject obj, boolean video) throws JSONException {
MediaPlayer player = new MediaPlayer();
try {
player.setDataSource(filePath);
player.prepare();
obj.put("duration", player.getDuration() / 1000);
if (video) {
obj.put("height", player.getVideoHeight());
obj.put("width", player.getVideoWidth());
}
} catch (IOException e) {
LOG.d(LOG_TAG, "Error: loading video file");
}
return obj;
}
private boolean isMissingPermissions(Request req, ArrayList<String> permissions) {
ArrayList<String> missingPermissions = new ArrayList<>();
for (String permission: permissions) {
if (!PermissionHelper.hasPermission(this, permission)) {
missingPermissions.add(permission);
}
}
boolean isMissingPermissions = missingPermissions.size() > 0;
if (isMissingPermissions) {
String[] missing = missingPermissions.toArray(new String[missingPermissions.size()]);
PermissionHelper.requestPermissions(this, req.requestCode, missing);
}
return isMissingPermissions;
}
private boolean isMissingPermissions(Request req, String mediaPermission) {
ArrayList<String> permissions = new ArrayList<>(Arrays.asList(storagePermissions));
if (mediaPermission != null && android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
permissions.add(mediaPermission);
}
return isMissingPermissions(req, permissions);
}
private boolean isMissingCameraPermissions(Request req, String mediaPermission) {
ArrayList<String> cameraPermissions = new ArrayList<>(Arrays.asList(storagePermissions));
if (cameraPermissionInManifest) {
cameraPermissions.add(Manifest.permission.CAMERA);
}
if (mediaPermission != null && android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
cameraPermissions.add(mediaPermission);
}
return isMissingPermissions(req, cameraPermissions);
}
/**
* Sets up an intent to capture audio. Result handled by onActivityResult()
*/
private void captureAudio(Request req) {
if (isMissingPermissions(req, Manifest.permission.READ_MEDIA_AUDIO)) return;
try {
Intent intent = new Intent(android.provider.MediaStore.Audio.Media.RECORD_SOUND_ACTION);
this.cordova.startActivityForResult((CordovaPlugin) this, intent, req.requestCode);
} catch (ActivityNotFoundException ex) {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_NOT_SUPPORTED, "No Activity found to handle Audio Capture."));
}
}
/**
* Sets up an intent to capture images. Result handled by onActivityResult()
*/
private void captureImage(Request req) {
if (isMissingCameraPermissions(req, Manifest.permission.READ_MEDIA_IMAGES)) return;
// Save the number of images currently on disk for later
this.numPics = queryImgDB(whichContentStore()).getCount();
Intent intent = new Intent(android.provider.MediaStore.ACTION_IMAGE_CAPTURE);
ContentResolver contentResolver = this.cordova.getActivity().getContentResolver();
ContentValues cv = new ContentValues();
cv.put(MediaStore.Images.Media.MIME_TYPE, IMAGE_JPEG);
imageUri = contentResolver.insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, cv);
LOG.d(LOG_TAG, "Taking a picture and saving to: " + imageUri.toString());
intent.putExtra(android.provider.MediaStore.EXTRA_OUTPUT, imageUri);
this.cordova.startActivityForResult((CordovaPlugin) this, intent, req.requestCode);
}
/**
* Sets up an intent to capture video. Result handled by onActivityResult()
*/
private void captureVideo(Request req) {
if (isMissingCameraPermissions(req, Manifest.permission.READ_MEDIA_VIDEO)) return;
Intent intent = new Intent(android.provider.MediaStore.ACTION_VIDEO_CAPTURE);
if(Build.VERSION.SDK_INT > 7){
intent.putExtra("android.intent.extra.durationLimit", req.duration);
intent.putExtra("android.intent.extra.videoQuality", req.quality);
}
this.cordova.startActivityForResult((CordovaPlugin) this, intent, req.requestCode);
}
/**
* Called when the video view exits.
*
* @param requestCode The request code originally supplied to startActivityForResult(),
* allowing you to identify who this result came from.
* @param resultCode The integer result code returned by the child activity through its setResult().
* @param intent An Intent, which can return result data to the caller (various data can be attached to Intent "extras").
* @throws JSONException
*/
public void onActivityResult(int requestCode, int resultCode, final Intent intent) {
final Request req = pendingRequests.get(requestCode);
// Result received okay
if (resultCode == Activity.RESULT_OK) {
Runnable processActivityResult = new Runnable() {
@Override
public void run() {
switch(req.action) {
case CAPTURE_AUDIO:
onAudioActivityResult(req, intent);
break;
case CAPTURE_IMAGE:
onImageActivityResult(req);
break;
case CAPTURE_VIDEO:
onVideoActivityResult(req, intent);
break;
}
}
};
this.cordova.getThreadPool().execute(processActivityResult);
}
// If canceled
else if (resultCode == Activity.RESULT_CANCELED) {
// If we have partial results send them back to the user
if (req.results.length() > 0) {
pendingRequests.resolveWithSuccess(req);
}
// user canceled the action
else {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_NO_MEDIA_FILES, "Canceled."));
}
}
// If something else
else {
// If we have partial results send them back to the user
if (req.results.length() > 0) {
pendingRequests.resolveWithSuccess(req);
}
// something bad happened
else {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_NO_MEDIA_FILES, "Did not complete!"));
}
}
}
public void onAudioActivityResult(Request req, Intent intent) {
// Get the uri of the audio clip
Uri data = intent.getData();
if (data == null) {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_NO_MEDIA_FILES, "Error: data is null"));
return;
}
// Create a file object from the uri
JSONObject mediaFile = createMediaFile(data);
if (mediaFile == null) {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_INTERNAL_ERR, "Error: no mediaFile created from " + data));
return;
}
req.results.put(mediaFile);
if (req.results.length() >= req.limit) {
// Send Uri back to JavaScript for listening to audio
pendingRequests.resolveWithSuccess(req);
} else {
// still need to capture more audio clips
captureAudio(req);
}
}
public void onImageActivityResult(Request req) {
// Get the uri of the image
Uri data = imageUri;
if (data == null) {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_NO_MEDIA_FILES, "Error: data is null"));
return;
}
// Create a file object from the uri
JSONObject mediaFile = createMediaFile(data);
if (mediaFile == null) {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_INTERNAL_ERR, "Error: no mediaFile created from " + data));
return;
}
req.results.put(mediaFile);
checkForDuplicateImage();
if (req.results.length() >= req.limit) {
// Send Uri back to JavaScript for viewing image
pendingRequests.resolveWithSuccess(req);
} else {
// still need to capture more images
captureImage(req);
}
}
public void onVideoActivityResult(Request req, Intent intent) {
// Get the uri of the video clip
Uri data = intent.getData();
if (data == null) {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_NO_MEDIA_FILES, "Error: data is null"));
return;
}
// Create a file object from the uri
JSONObject mediaFile = createMediaFile(data);
if (mediaFile == null) {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_INTERNAL_ERR, "Error: no mediaFile created from " + data));
return;
}
req.results.put(mediaFile);
if (req.results.length() >= req.limit) {
// Send Uri back to JavaScript for viewing video
pendingRequests.resolveWithSuccess(req);
} else {
// still need to capture more video clips
captureVideo(req);
}
}
/**
* Creates a JSONObject that represents a File from the Uri
*
* @param data the Uri of the audio/image/video
* @return a JSONObject that represents a File
* @throws IOException
*/
private JSONObject createMediaFile(Uri data) {
File fp = webView.getResourceApi().mapUriToFile(data);
if (fp == null) {
return null;
}
JSONObject obj = new JSONObject();
Class webViewClass = webView.getClass();
PluginManager pm = null;
try {
Method gpm = webViewClass.getMethod("getPluginManager");
pm = (PluginManager) gpm.invoke(webView);
} catch (NoSuchMethodException e) {
} catch (IllegalAccessException e) {
} catch (InvocationTargetException e) {
}
if (pm == null) {
try {
Field pmf = webViewClass.getField("pluginManager");
pm = (PluginManager)pmf.get(webView);
} catch (NoSuchFieldException e) {
} catch (IllegalAccessException e) {
}
}
FileUtils filePlugin = (FileUtils) pm.getPlugin("File");
LocalFilesystemURL url = filePlugin.filesystemURLforLocalPath(fp.getAbsolutePath());
try {
// File properties
obj.put("name", fp.getName());
obj.put("fullPath", Uri.fromFile(fp));
if (url != null) {
obj.put("localURL", url.toString());
}
// Because of an issue with MimeTypeMap.getMimeTypeFromExtension() all .3gpp files
// are reported as video/3gpp. I'm doing this hacky check of the URI to see if it
// is stored in the audio or video content store.
if (fp.getAbsoluteFile().toString().endsWith(".3gp") || fp.getAbsoluteFile().toString().endsWith(".3gpp")) {
if (data.toString().contains("/audio/")) {
obj.put("type", AUDIO_3GPP);
} else {
obj.put("type", VIDEO_3GPP);
}
} else {
obj.put("type", FileHelper.getMimeType(Uri.fromFile(fp), cordova));
}
obj.put("lastModifiedDate", fp.lastModified());
obj.put("size", fp.length());
} catch (JSONException e) {
// this will never happen
e.printStackTrace();
}
return obj;
}
private JSONObject createErrorObject(int code, String message) {
JSONObject obj = new JSONObject();
try {
obj.put("code", code);
obj.put("message", message);
} catch (JSONException e) {
// This will never happen
}
return obj;
}
/**
* Creates a cursor that can be used to determine how many images we have.
*
* @return a cursor
*/
private Cursor queryImgDB(Uri contentStore) {
return this.cordova.getActivity().getContentResolver().query(
contentStore,
new String[] { MediaStore.Images.Media._ID },
null,
null,
null);
}
/**
* Used to find out if we are in a situation where the Camera Intent adds to images
* to the content store.
*/
private void checkForDuplicateImage() {
Uri contentStore = whichContentStore();
Cursor cursor = queryImgDB(contentStore);
int currentNumOfImages = cursor.getCount();
// delete the duplicate file if the difference is 2
if ((currentNumOfImages - numPics) == 2) {
cursor.moveToLast();
int id = Integer.valueOf(cursor.getString(cursor.getColumnIndex(MediaStore.Images.Media._ID))) - 1;
Uri uri = Uri.parse(contentStore + "/" + id);
this.cordova.getActivity().getContentResolver().delete(uri, null, null);
}
}
/**
* Determine if we are storing the images in internal or external storage
* @return Uri
*/
private Uri whichContentStore() {
if (Environment.getExternalStorageState().equals(Environment.MEDIA_MOUNTED)) {
return android.provider.MediaStore.Images.Media.EXTERNAL_CONTENT_URI;
} else {
return android.provider.MediaStore.Images.Media.INTERNAL_CONTENT_URI;
}
}
private void executeRequest(Request req) {
switch (req.action) {
case CAPTURE_AUDIO:
this.captureAudio(req);
break;
case CAPTURE_IMAGE:
this.captureImage(req);
break;
case CAPTURE_VIDEO:
this.captureVideo(req);
break;
}
}
public void onRequestPermissionResult(int requestCode, String[] permissions,
int[] grantResults) throws JSONException {
Request req = pendingRequests.get(requestCode);
if (req != null) {
boolean success = true;
for(int r:grantResults) {
if (r == PackageManager.PERMISSION_DENIED) {
success = false;
break;
}
}
if (success) {
executeRequest(req);
} else {
pendingRequests.resolveWithFailure(req, createErrorObject(CAPTURE_PERMISSION_DENIED, "Permission denied."));
}
}
}
public Bundle onSaveInstanceState() {
return pendingRequests.toBundle();
}
public void onRestoreStateForActivityResult(Bundle state, CallbackContext callbackContext) {
pendingRequests.setLastSavedState(state, callbackContext);
}
}
| 3,324 |
0 | Create_ds/cordova-plugin-media-capture/src | Create_ds/cordova-plugin-media-capture/src/android/PendingRequests.java | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package org.apache.cordova.mediacapture;
import android.os.Bundle;
import android.util.SparseArray;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.LOG;
import org.apache.cordova.PluginResult;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
/**
* Holds the pending javascript requests for the plugin
*/
public class PendingRequests {
private static final String LOG_TAG = "PendingCaptureRequests";
private static final String CURRENT_ID_KEY = "currentReqId";
private static final String REQUEST_KEY_PREFIX = "request_";
private int currentReqId = 0;
private SparseArray<Request> requests = new SparseArray<Request>();
private Bundle lastSavedState;
private CallbackContext resumeContext;
/**
* Creates a request and adds it to the array of pending requests. Each created request gets a
* unique result code for use with startActivityForResult() and requestPermission()
* @param action The action this request corresponds to (capture image, capture audio, etc.)
* @param options The options for this request passed from the javascript
* @param callbackContext The CallbackContext to return the result to
* @return The newly created Request object with a unique result code
* @throws JSONException
*/
public synchronized Request createRequest(int action, JSONObject options, CallbackContext callbackContext) throws JSONException {
Request req = new Request(action, options, callbackContext);
requests.put(req.requestCode, req);
return req;
}
/**
* Gets the request corresponding to this request code
* @param requestCode The request code for the desired request
* @return The request corresponding to the given request code or null if such a
* request is not found
*/
public synchronized Request get(int requestCode) {
// Check to see if this request was saved
if (lastSavedState != null && lastSavedState.containsKey(REQUEST_KEY_PREFIX + requestCode)) {
Request r = new Request(lastSavedState.getBundle(REQUEST_KEY_PREFIX + requestCode), this.resumeContext, requestCode);
requests.put(requestCode, r);
// Only one of the saved requests will get restored, because that's all cordova-android
// supports. Having more than one is an extremely unlikely scenario anyway
this.lastSavedState = null;
this.resumeContext = null;
return r;
}
return requests.get(requestCode);
}
/**
* Removes the request from the array of pending requests and sends an error plugin result
* to the CallbackContext that contains the given error object
* @param req The request to be resolved
* @param error The error to be returned to the CallbackContext
*/
public synchronized void resolveWithFailure(Request req, JSONObject error) {
req.callbackContext.error(error);
requests.remove(req.requestCode);
}
/**
* Removes the request from the array of pending requests and sends a successful plugin result
* to the CallbackContext that contains the result of the request
* @param req The request to be resolved
*/
public synchronized void resolveWithSuccess(Request req) {
req.callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK, req.results));
requests.remove(req.requestCode);
}
/**
* Each request gets a unique ID that represents its request code when calls are made to
* Activities and for permission requests
* @return A unique request code
*/
private synchronized int incrementCurrentReqId() {
return currentReqId ++;
}
/**
* Restore state saved by calling toBundle along with a callbackContext to be used in
* delivering the results of a pending callback
*
* @param lastSavedState The bundle received from toBundle()
* @param resumeContext The callbackContext to return results to
*/
public synchronized void setLastSavedState(Bundle lastSavedState, CallbackContext resumeContext) {
this.lastSavedState = lastSavedState;
this.resumeContext = resumeContext;
this.currentReqId = lastSavedState.getInt(CURRENT_ID_KEY);
}
/**
* Save the current pending requests to a bundle for saving when the Activity gets destroyed.
*
* @return A Bundle that can be used to restore state using setLastSavedState()
*/
public synchronized Bundle toBundle() {
Bundle bundle = new Bundle();
bundle.putInt(CURRENT_ID_KEY, currentReqId);
for (int i = 0; i < requests.size(); i++) {
Request r = requests.valueAt(i);
int requestCode = requests.keyAt(i);
bundle.putBundle(REQUEST_KEY_PREFIX + requestCode, r.toBundle());
}
if (requests.size() > 1) {
// This scenario is hopefully very unlikely because there isn't much that can be
// done about it. Should only occur if an external Activity is launched while
// there is a pending permission request and the device is on low memory
LOG.w(LOG_TAG, "More than one media capture request pending on Activity destruction. Some requests will be dropped!");
}
return bundle;
}
/**
* Holds the options and CallbackContext for a capture request made to the plugin.
*/
public class Request {
// Keys for use in saving requests to a bundle
private static final String ACTION_KEY = "action";
private static final String LIMIT_KEY = "limit";
private static final String DURATION_KEY = "duration";
private static final String QUALITY_KEY = "quality";
private static final String RESULTS_KEY = "results";
// Unique int used to identify this request in any Android Permission or Activity callbacks
public int requestCode;
// The action that this request is performing
public int action;
// The number of pics/vids/audio clips to take (CAPTURE_IMAGE, CAPTURE_VIDEO, CAPTURE_AUDIO)
public long limit = 1;
// Optional max duration of recording in seconds (CAPTURE_VIDEO only)
public int duration = 0;
// Quality level for video capture 0 low, 1 high (CAPTURE_VIDEO only)
public int quality = 1;
// The array of results to be returned to the javascript callback on success
public JSONArray results = new JSONArray();
// The callback context for this plugin request
private CallbackContext callbackContext;
private Request(int action, JSONObject options, CallbackContext callbackContext) throws JSONException {
this.callbackContext = callbackContext;
this.action = action;
if (options != null) {
this.limit = options.optLong("limit", 1);
this.duration = options.optInt("duration", 0);
this.quality = options.optInt("quality", 1);
}
this.requestCode = incrementCurrentReqId();
}
private Request(Bundle bundle, CallbackContext callbackContext, int requestCode) {
this.callbackContext = callbackContext;
this.requestCode = requestCode;
this.action = bundle.getInt(ACTION_KEY);
this.limit = bundle.getLong(LIMIT_KEY);
this.duration = bundle.getInt(DURATION_KEY);
this.quality = bundle.getInt(QUALITY_KEY);
try {
this.results = new JSONArray(bundle.getString(RESULTS_KEY));
} catch(JSONException e) {
// This should never be caught
LOG.e(LOG_TAG, "Error parsing results for request from saved bundle", e);
}
}
private Bundle toBundle() {
Bundle bundle = new Bundle();
bundle.putInt(ACTION_KEY, this.action);
bundle.putLong(LIMIT_KEY, this.limit);
bundle.putInt(DURATION_KEY, this.duration);
bundle.putInt(QUALITY_KEY, this.quality);
bundle.putString(RESULTS_KEY, this.results.toString());
return bundle;
}
}
}
| 3,325 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/interface-plugin-class/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/interface-plugin-class/src/main/java/its/TestPluginImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public class TestPluginImpl implements ITestPlugin {
}
| 3,326 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/interface-plugin-class/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/interface-plugin-class/src/main/java/its/ITestPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public interface ITestPlugin extends DolphinSchedulerPlugin {
}
| 3,327 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/excluded-dependency/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/excluded-dependency/src/main/java/its/SimplestPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public class SimplestPlugin implements DolphinSchedulerPlugin {
}
| 3,328 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/error-scope-spi/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/error-scope-spi/src/main/java/its/ErrorScopeSpiPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public class ErrorScopeSpiPlugin implements DolphinSchedulerPlugin {
}
| 3,329 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/simplest/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/simplest/src/main/java/its/SimplestPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public class SimplestPlugin implements DolphinSchedulerPlugin {
}
| 3,330 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/error-scope-dependency/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/error-scope-dependency/src/main/java/its/ErrorScopeDependencyPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public class ErrorScopeDependencyPlugin implements DolphinSchedulerPlugin {
}
| 3,331 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/more-excluded-dependency/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/more-excluded-dependency/src/main/java/its/SimplestPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public class SimplestPlugin implements DolphinSchedulerPlugin {
}
| 3,332 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/error-scope-but-skip/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/error-scope-but-skip/src/main/java/its/ErrorScopeButSkipPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public class ErrorScopeButSkipPlugin implements DolphinSchedulerPlugin {
}
| 3,333 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/abstract-plugin-class/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/abstract-plugin-class/src/main/java/its/AbsTestPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public abstract class AbsTestPlugin implements DolphinSchedulerPlugin {
}
| 3,334 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/abstract-plugin-class/src/main/java | Create_ds/dolphinscheduler-maven-plugin/src/test/projects/abstract-plugin-class/src/main/java/its/TestPluginImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package its;
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
public class TestPluginImpl extends AbsTestPlugin {
}
| 3,335 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/java/org/apache/dolphinscheduler | Create_ds/dolphinscheduler-maven-plugin/src/test/java/org/apache/dolphinscheduler/maven/SpiDependencyCheckerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.maven;
import io.takari.maven.testing.TestResources;
import io.takari.maven.testing.executor.MavenExecutionResult;
import io.takari.maven.testing.executor.MavenRuntime;
import io.takari.maven.testing.executor.MavenVersions;
import io.takari.maven.testing.executor.junit.MavenJUnitTestRunner;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import java.io.File;
@RunWith(MavenJUnitTestRunner.class)
@MavenVersions({"3.3.9", "3.5.4", "3.6.2"})
@SuppressWarnings({"JUnitTestNG", "PublicField"})
public class SpiDependencyCheckerTest {
@Rule
public final TestResources resources = new TestResources();
public final MavenRuntime maven;
public SpiDependencyCheckerTest(MavenRuntime.MavenRuntimeBuilder mavenRuntimeBuilder) throws Exception {
this.maven = mavenRuntimeBuilder.withCliOptions("-B", "-U").build();
}
@Test
public void testBasic() throws Exception
{
File basedir = resources.getBasedir("simplest");
maven.forProject(basedir)
.execute("verify")
.assertErrorFreeLog();
}
@Test
public void testErrorScopeSpi() throws Exception
{
File basedir = resources.getBasedir("error-scope-spi");
MavenExecutionResult verify = maven.forProject(basedir)
.execute("verify");
verify.assertLogText("[ERROR] Failed to execute goal org.apache.dolphinscheduler:dolphinscheduler-maven-plugin:1.0.0-SNAPSHOT:spi-dependencies-check (default-spi-dependencies-check) on project error-scope-spi: DolphinScheduler plugin dependency org.apache.dolphinscheduler:dolphinscheduler-spi must have scope 'provided'. ");
}
@Test
public void testAbstractPluginClass() throws Exception
{
File basedir = resources.getBasedir("abstract-plugin-class");
maven.forProject(basedir)
.execute("verify")
.assertErrorFreeLog();
}
//
@Test
public void testInterfacePluginClass() throws Exception
{
File basedir = resources.getBasedir("interface-plugin-class");
maven.forProject(basedir)
.execute("verify")
.assertErrorFreeLog();
}
@Test
public void testExcludedDependency() throws Exception
{
File basedir = resources.getBasedir("excluded-dependency");
maven.forProject(basedir)
.execute("verify")
.assertErrorFreeLog();
}
@Test
public void testMoreExcludedDependency() throws Exception
{
File basedir = resources.getBasedir("more-excluded-dependency");
maven.forProject(basedir)
.execute("verify")
.assertErrorFreeLog();
}
@Test
public void testErrorScopeDependency() throws Exception
{
File basedir = resources.getBasedir("error-scope-dependency");
MavenExecutionResult verify = maven.forProject(basedir)
.execute("verify");
verify.assertLogText("[ERROR] Dolphinscheduler plugin dependency com.google.guava:guava must not have scope 'provided'. It is not part of the SPI and will not be available at runtime.");
}
@Test
public void testErrorScopeButSkip() throws Exception
{
File basedir = resources.getBasedir("error-scope-but-skip");
MavenExecutionResult verify = maven.forProject(basedir)
.execute("verify");
verify.assertErrorFreeLog();
}
}
| 3,336 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/test/java/org/apache/dolphinscheduler | Create_ds/dolphinscheduler-maven-plugin/src/test/java/org/apache/dolphinscheduler/maven/DolphinDescriptorGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.maven;
import io.takari.maven.testing.TestResources;
import io.takari.maven.testing.executor.MavenRuntime;
import io.takari.maven.testing.executor.MavenVersions;
import io.takari.maven.testing.executor.junit.MavenJUnitTestRunner;
import org.junit.Rule;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.junit.Test;
import org.junit.runner.RunWith;
import static java.nio.file.Files.readAllLines;
import static org.junit.Assert.assertEquals;
import static java.util.Collections.singletonList;
import java.io.File;
import java.util.List;
@RunWith(MavenJUnitTestRunner.class)
@MavenVersions({"3.3.9", "3.5.4", "3.6.2"})
@SuppressWarnings({"JUnitTestNG", "PublicField"})
public class DolphinDescriptorGeneratorTest {
private static final String DESCRIPTOR = "META-INF/services/org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin";
@Rule
public final TestResources resources = new TestResources();
public final MavenRuntime maven;
public DolphinDescriptorGeneratorTest(MavenRuntime.MavenRuntimeBuilder mavenBuilder)
throws Exception
{
this.maven = mavenBuilder.withCliOptions("-B", "-U").build();
}
@Test
public void testSimplest() throws Exception
{
testProjectPackaging("simplest", "its.SimplestPlugin");
}
@Test
public void testAbstractPluginClass() throws Exception
{
testProjectPackaging("abstract-plugin-class", "its.TestPluginImpl");
}
@Test
public void testInterfacePluginClass() throws Exception
{
testProjectPackaging("interface-plugin-class", "its.TestPluginImpl");
}
protected void testProjectPackaging(String projectId, String expectedPluginClass)
throws Exception
{
File basedir = resources.getBasedir(projectId);
maven.forProject(basedir)
.execute("package")
.assertErrorFreeLog();
File output = new File(basedir, "target/classes/" + DESCRIPTOR);
List<String> lines = readAllLines(output.toPath(), UTF_8);
assertEquals(singletonList(expectedPluginClass), lines);
}
}
| 3,337 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/main/java/org/apache/dolphinscheduler | Create_ds/dolphinscheduler-maven-plugin/src/main/java/org/apache/dolphinscheduler/maven/DolphinDescriptorGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.maven;
import org.apache.maven.artifact.Artifact;
import org.apache.maven.plugin.AbstractMojo;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.plugins.annotations.ResolutionScope;
import org.apache.maven.project.MavenProject;
import org.codehaus.plexus.util.FileUtils;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Modifier;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
/**
* create the spi services file
*/
@Mojo(name = "generate-dolphin-service-descriptor",
defaultPhase = LifecyclePhase.PACKAGE,
requiresDependencyResolution = ResolutionScope.COMPILE)
public class DolphinDescriptorGenerator extends AbstractMojo {
private static final String LS_ALIAS = System.getProperty("line.separator");
@Parameter(defaultValue = "org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin")
private String pluginClassName;
@Parameter(defaultValue = "${project.build.outputDirectory}/META-INF/services")
private File servicesDirectory;
@Parameter(defaultValue = "${project.build.outputDirectory}")
private File classesDirectory;
@Parameter(defaultValue = "${project}")
private MavenProject project;
@Override
public void execute()
throws MojoExecutionException
{
File spiServicesFile = new File(servicesDirectory, pluginClassName);
// If users have already provided their own service file then we will not overwrite it
if (spiServicesFile.exists()) {
return;
}
if (!spiServicesFile.getParentFile().exists()) {
File file = spiServicesFile.getParentFile();
file.mkdirs();
if (!file.isDirectory()) {
throw new MojoExecutionException(String.format("%n%nFailed to create directory: %s", file));
}
}
List<Class<?>> pluginImplClasses;
try {
URLClassLoader loader = createCLFromCompileTimeDependencies();
pluginImplClasses = findPluginImplClasses(loader);
}
catch (Exception e) {
throw new MojoExecutionException(String.format("%n%nError for find the classes that implements %s.", pluginClassName), e);
}
if (pluginImplClasses.isEmpty()) {
throw new MojoExecutionException(String.format("%n%nNot find classes implements %s, You must have at least one class that implements %s.", pluginClassName, pluginClassName));
}
if (pluginImplClasses.size() > 1) {
StringBuilder sb = new StringBuilder();
for (Class<?> pluginClass : pluginImplClasses) {
sb.append(pluginClass.getName()).append(LS_ALIAS);
}
throw new MojoExecutionException(String.format("%n%nFound more than one class that implements %s:%n%n%s%nYou can only have one per plugin project.", pluginClassName, sb));
}
try {
Class<?> pluginClass = pluginImplClasses.get(0);
Files.write(spiServicesFile.toPath(), pluginClass.getName().getBytes(UTF_8));
getLog().info(String.format("Wrote %s to %s", pluginClass.getName(), spiServicesFile));
}
catch (IOException e) {
throw new MojoExecutionException("Failed to write services JAR file.", e);
}
}
private URLClassLoader createCLFromCompileTimeDependencies()
throws Exception
{
List<URL> classesUrls = new ArrayList<>();
classesUrls.add(classesDirectory.toURI().toURL());
for (Artifact artifact : project.getArtifacts()) {
if (artifact.getFile() != null) {
classesUrls.add(artifact.getFile().toURI().toURL());
}
}
return new URLClassLoader(classesUrls.toArray(new URL[0]));
}
private List<Class<?>> findPluginImplClasses(URLClassLoader urlClassLoader)
throws IOException, MojoExecutionException
{
List<Class<?>> implementations = new ArrayList<>();
List<String> classes = FileUtils.getFileNames(classesDirectory, "**/*.class", null, false);
for (String classPath : classes) {
String className = classPath.substring(0, classPath.length() - 6).replace(File.separatorChar, '.');
try {
Class<?> pluginClass = urlClassLoader.loadClass(pluginClassName);
Class<?> clazz = urlClassLoader.loadClass(className);
if (isImplementation(clazz, pluginClass)) {
implementations.add(clazz);
}
}
catch (ClassNotFoundException e) {
throw new MojoExecutionException("Failed to load class.", e);
}
}
return implementations;
}
private static boolean isImplementation(Class<?> clazz, Class<?> pluginClass)
{
return pluginClass.isAssignableFrom(clazz) && !Modifier.isAbstract(clazz.getModifiers()) && !Modifier.isInterface(clazz.getModifiers());
}
}
| 3,338 |
0 | Create_ds/dolphinscheduler-maven-plugin/src/main/java/org/apache/dolphinscheduler | Create_ds/dolphinscheduler-maven-plugin/src/main/java/org/apache/dolphinscheduler/maven/SpiDependencyChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.maven;
import org.apache.maven.artifact.Artifact;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.Component;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.plugins.annotations.ResolutionScope;
import org.apache.maven.project.MavenProject;
import org.eclipse.aether.RepositorySystem;
import org.eclipse.aether.RepositorySystemSession;
import org.eclipse.aether.artifact.DefaultArtifact;
import org.eclipse.aether.collection.CollectRequest;
import org.eclipse.aether.collection.CollectResult;
import org.eclipse.aether.collection.DependencyCollectionException;
import org.eclipse.aether.graph.Dependency;
import org.eclipse.aether.graph.DependencyNode;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
/**
* 检查spi依赖
*/
@Mojo(name = "spi-dependencies-check",
defaultPhase = LifecyclePhase.VALIDATE,
requiresDependencyResolution = ResolutionScope.COMPILE_PLUS_RUNTIME)
public class SpiDependencyChecker extends AbstractMojo {
@Parameter(defaultValue = "org.apache.dolphinscheduler")
private String spiGroupId;
@Parameter(defaultValue = "dolphinscheduler-spi")
private String spiArtifactId;
@Parameter(defaultValue = "false")
private boolean skipCheckSpiDependencies;
@Parameter(defaultValue = "${project}")
private MavenProject mavenProject;
@Parameter
private final Set<String> allowedProvidedDependencies = new HashSet<>();
@Parameter(defaultValue = "${repositorySystemSession}")
private RepositorySystemSession repositorySession;
@Component
private RepositorySystem repositorySystem;
@Override
public void execute() throws MojoExecutionException {
if (skipCheckSpiDependencies) {
getLog().info("Skipping Dolphinscheduler SPI dependency checks");
return;
}
Set<String> spiDependencies = getTheSpiDependencies();
getLog().debug("SPI dependencies: " + spiDependencies);
for (Artifact artifact : mavenProject.getArtifacts()) {
if (isSpiArtifact(artifact)) {
continue;
}
String name = artifact.getGroupId() + ":" + artifact.getArtifactId();
if (spiDependencies.contains(name)) {
if (!"jar".equals(artifact.getType())) {
throw new MojoExecutionException(String.format("%n%nDolphinscheduler plugin dependency %s must have type 'jar'.", name));
}
if (artifact.getClassifier() != null) {
throw new MojoExecutionException(String.format("%n%nDolphinscheduler plugin dependency %s must not have a classifier.", name));
}
if (!"provided".equals(artifact.getScope())) {
throw new MojoExecutionException(String.format("%n%nDolphinscheduler plugin dependency %s must have scope 'provided'. It is part of the SPI and will be provided at runtime.", name));
}
}
else if ("provided".equals(artifact.getScope()) && !allowedProvidedDependencies.contains(name)) {
throw new MojoExecutionException(String.format("%n%nDolphinscheduler plugin dependency %s must not have scope 'provided'. It is not part of the SPI and will not be available at runtime.", name));
}
}
}
private Set<String> getTheSpiDependencies()
throws MojoExecutionException
{
return getArtifactDependencies(getSpiDependency())
.getRoot().getChildren().stream()
.filter(node -> !node.getDependency().isOptional())
.map(DependencyNode::getArtifact)
.map(artifact -> artifact.getGroupId() + ":" + artifact.getArtifactId())
.collect(Collectors.toSet());
}
private CollectResult getArtifactDependencies(Artifact artifact)
throws MojoExecutionException
{
try {
org.eclipse.aether.artifact.Artifact artifact1 = aetherArtifact(artifact);
Dependency projectDependency = new Dependency(artifact1, null);
return repositorySystem.collectDependencies(repositorySession, new CollectRequest(projectDependency, null));
}
catch (DependencyCollectionException e) {
throw new MojoExecutionException("Failed to resolve dependencies.", e);
}
}
private Artifact getSpiDependency()
throws MojoExecutionException
{
for (Artifact artifact : mavenProject.getArtifacts()) {
if (!isSpiArtifact(artifact)) {
continue;
}
if (!"provided".equals(artifact.getScope())) {
throw new MojoExecutionException(String.format("DolphinScheduler plugin dependency %s must have scope 'provided'.", spiName()));
}
return artifact;
}
throw new MojoExecutionException(String.format("DolphinScheduler plugin must depend on %s.", spiName()));
}
private boolean isSpiArtifact(Artifact artifact)
{
return spiGroupId.equals(artifact.getGroupId())
&& spiArtifactId.equals(artifact.getArtifactId())
&& "jar".equals(artifact.getType())
&& (artifact.getClassifier() == null);
}
private String spiName()
{
return spiGroupId + ":" + spiArtifactId;
}
private static org.eclipse.aether.artifact.Artifact aetherArtifact(Artifact artifact)
{
return new DefaultArtifact(
artifact.getGroupId(),
artifact.getArtifactId(),
artifact.getClassifier(),
artifact.getType(),
artifact.getVersion());
}
}
| 3,339 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/serialization/SerializationUtils.java | package com.netflix.serialization;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import com.google.common.base.Preconditions;
public class SerializationUtils {
public static <T> T deserializeFromString(Deserializer<T> deserializer, String content, TypeDef<T> typeDef)
throws IOException {
Preconditions.checkNotNull(deserializer);
ByteArrayInputStream in = new ByteArrayInputStream(content.getBytes("UTF-8"));
return deserializer.deserialize(in, typeDef);
}
public static <T> String serializeToString(Serializer<T> serializer, T obj, TypeDef<?> typeDef)
throws IOException {
return new String(serializeToBytes(serializer, obj, typeDef), "UTF-8");
}
public static <T> byte[] serializeToBytes(Serializer<T> serializer, T obj, TypeDef<?> typeDef)
throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
serializer.serialize(out, obj, typeDef);
return out.toByteArray();
}
}
| 3,340 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/serialization/Deserializer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.serialization;
import java.io.IOException;
import java.io.InputStream;
public interface Deserializer<T> {
public T deserialize(InputStream in, TypeDef<T> type) throws IOException;
}
| 3,341 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/serialization/JacksonCodec.java | package com.netflix.serialization;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.lang.reflect.Type;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectWriter;
import org.codehaus.jackson.type.TypeReference;
import com.google.common.base.Charsets;
import com.google.common.io.CharStreams;
public class JacksonCodec<T extends Object> implements Serializer<T>, Deserializer<T> {
private static final JacksonCodec instance = new JacksonCodec();
private final ObjectMapper mapper = new ObjectMapper();
@Override
public T deserialize(InputStream in, TypeDef<T> type)
throws IOException {
if (String.class.equals(type.getRawType())) {
return (T) CharStreams.toString(new InputStreamReader(in, Charsets.UTF_8));
}
return mapper.readValue(in, new TypeTokenBasedReference<T>(type));
}
@Override
public void serialize(OutputStream out, T object, TypeDef<?> type) throws IOException {
if (type == null) {
mapper.writeValue(out, object);
} else {
ObjectWriter writer = mapper.writerWithType(new TypeTokenBasedReference(type));
writer.writeValue(out, object);
}
}
public static final <T> JacksonCodec<T> getInstance() {
return instance;
}
}
class TypeTokenBasedReference<T> extends TypeReference<T> {
final Type type;
public TypeTokenBasedReference(TypeDef<T> typeToken) {
type = typeToken.getType();
}
@Override
public Type getType() {
return type;
}
}
| 3,342 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/serialization/TypeDef.java | package com.netflix.serialization;
import static com.google.common.base.Preconditions.checkArgument;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import com.google.common.reflect.TypeToken;
public abstract class TypeDef<T> {
// private final Type runtimeType;
private TypeToken<?> delegate;
@SuppressWarnings("unchecked")
protected TypeDef() {
Type superclass = getClass().getGenericSuperclass();
checkArgument(superclass instanceof ParameterizedType,
"%s isn't parameterized", superclass);
Type runtimeType = ((ParameterizedType) superclass).getActualTypeArguments()[0];
this.delegate = (TypeToken<T>) TypeToken.of(runtimeType);
}
public static <T> TypeDef<T> fromClass(Class<T> classType) {
TypeDef<T> spec = new TypeDef<T>() {
};
spec.delegate = TypeToken.of(classType);
return spec;
}
public static TypeDef<?> fromType(Type type) {
TypeDef<Object> spec = new TypeDef<Object>() {
};
spec.delegate = TypeToken.of(type);
return spec;
}
public Class<? super T> getRawType() {
return (Class<? super T>) delegate.getRawType();
}
public Type getType() {
return delegate.getType();
}
}
| 3,343 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/serialization/StringDeserializer.java | package com.netflix.serialization;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import com.google.common.base.Charsets;
import com.google.common.io.CharStreams;
import com.google.common.io.Closeables;
public class StringDeserializer implements Deserializer<String> {
private static final StringDeserializer instance = new StringDeserializer();
private StringDeserializer() {
}
public static final StringDeserializer getInstance() {
return instance;
}
@Override
public String deserialize(InputStream in, TypeDef<String> type)
throws IOException {
try {
String content = CharStreams.toString(new InputStreamReader(in, Charsets.UTF_8));
return content;
} finally {
Closeables.close(in, true);
}
}
}
| 3,344 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/serialization/Serializer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.serialization;
import java.io.IOException;
import java.io.OutputStream;
public interface Serializer<T> {
public void serialize(OutputStream out, T object, TypeDef<?> type) throws IOException;
}
| 3,345 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/ribbon/test | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/ribbon/test/resources/EmbeddedResources.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ribbon.test.resources;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.StatusType;
import javax.ws.rs.core.StreamingOutput;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Ignore;
import com.google.common.collect.Lists;
@Ignore
@Path("/testAsync")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public class EmbeddedResources {
public static class Person {
public String name;
public int age;
public Person() {}
public Person(String name, int age) {
super();
this.name = name;
this.age = age;
}
@Override
public String toString() {
return "Person [name=" + name + ", age=" + age + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + age;
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Person other = (Person) obj;
if (age != other.age)
return false;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
return true;
}
}
private static ObjectMapper mapper = new ObjectMapper();
public static final Person defaultPerson = new Person("ribbon", 1);
public static final List<String> streamContent = Lists.newArrayList();
public static final List<Person> entityStream = Lists.newArrayList();
static {
for (int i = 0; i < 1000; i++) {
streamContent.add("data: line " + i);
}
for (int i = 0; i < 1000; i++) {
entityStream.add(new Person("ribbon", i));
}
}
@GET
@Path("/person")
public Response getPerson() throws IOException {
String content = mapper.writeValueAsString(defaultPerson);
return Response.ok(content).build();
}
@GET
@Path("/context")
@Produces(MediaType.TEXT_PLAIN)
public Response echoContext(@HeaderParam("X-RXNETTY-REQUEST-ID") String requestId) throws IOException {
return Response.ok(requestId).build();
}
@GET
@Path("/noEntity")
public Response getNoEntity() {
return Response.ok().build();
}
@GET
@Path("/readTimeout")
public Response getReadTimeout() throws IOException, InterruptedException {
Thread.sleep(10000);
String content = mapper.writeValueAsString(defaultPerson);
return Response.ok(content).build();
}
@POST
@Path("/person")
public Response createPerson(String content) throws IOException {
Person person = mapper.readValue(content, Person.class);
return Response.ok(mapper.writeValueAsString(person)).build();
}
@GET
@Path("/personQuery")
public Response queryPerson(@QueryParam("name") String name, @QueryParam("age") int age) throws IOException {
Person person = new Person(name, age);
return Response.ok(mapper.writeValueAsString(person)).build();
}
@POST
@Path("/postTimeout")
public Response postWithTimeout(String content) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
}
return Response.ok().build();
}
@GET
@Path("/throttle")
public Response throttle() {
return Response.status(Response.Status.SERVICE_UNAVAILABLE).entity("Rate exceeds limit").build();
}
@GET
@Path("/stream")
@Produces("text/event-stream")
public StreamingOutput getStream() {
return new StreamingOutput() {
@Override
public void write(OutputStream output) throws IOException,
WebApplicationException {
for (String line: streamContent) {
String eventLine = line + "\n";
output.write(eventLine.getBytes("UTF-8"));
}
}
};
}
@GET
@Path("/redirect")
public Response redirect(@QueryParam("port") int port) {
return Response.status(301).header("Location", "http://localhost:" + port + "/testAsync/person").build();
}
@GET
@Path("/personStream")
@Produces("text/event-stream")
public StreamingOutput getEntityStream() {
return new StreamingOutput() {
@Override
public void write(OutputStream output) throws IOException,
WebApplicationException {
for (Person person: entityStream) {
String eventLine = "data: " + mapper.writeValueAsString(person) + "\n\n";
output.write(eventLine.getBytes("UTF-8"));
}
}
};
}
}
| 3,346 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/ribbon/testutils/TestUtils.java | /*
*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ribbon.testutils;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import rx.functions.Func0;
public class TestUtils {
public static void waitUntilTrueOrTimeout(int timeoutMilliseconds, final Func0<Boolean> func) {
final Lock lock = new ReentrantLock();
final Condition condition = lock.newCondition();
final AtomicBoolean stopThread = new AtomicBoolean(false);
if (!func.call()) {
(new Thread() {
@Override
public void run() {
while (!stopThread.get()) {
if (func.call()) {
lock.lock();
try {
condition.signalAll();
} finally {
lock.unlock();
}
}
try {
Thread.sleep(20);
} catch (Exception e) {
e.printStackTrace();
}
}
}
}).start();
lock.lock();
try {
condition.await(timeoutMilliseconds, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
lock.unlock();
stopThread.set(true);
}
}
assertTrue(func.call());
}
}
| 3,347 |
0 | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-test/src/main/java/com/netflix/ribbon/testutils/MockedDiscoveryServerListTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.testutils;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.MyDataCenterInfo;
import com.netflix.discovery.DefaultEurekaClientConfig;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.loadbalancer.Server;
import org.easymock.EasyMock;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.runner.RunWith;
import org.powermock.api.easymock.PowerMock;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import java.util.ArrayList;
import java.util.List;
import static org.easymock.EasyMock.expect;
import static org.powermock.api.easymock.PowerMock.createMock;
import static org.powermock.api.easymock.PowerMock.replay;
@RunWith(PowerMockRunner.class)
@PrepareForTest( {DiscoveryManager.class, DiscoveryClient.class} )
@PowerMockIgnore({"javax.management.*", "com.sun.jersey.*", "com.sun.*", "org.apache.*", "weblogic.*", "com.netflix.config.*", "com.sun.jndi.dns.*",
"javax.naming.*", "com.netflix.logging.*", "javax.ws.*"})
@Ignore
public abstract class MockedDiscoveryServerListTest {
protected abstract List<Server> getMockServerList();
protected abstract String getVipAddress();
static List<InstanceInfo> getDummyInstanceInfo(String appName, List<Server> serverList){
List<InstanceInfo> list = new ArrayList<InstanceInfo>();
for (Server server: serverList) {
InstanceInfo info = InstanceInfo.Builder.newBuilder().setAppName(appName)
.setHostName(server.getHost())
.setPort(server.getPort())
.setDataCenterInfo(new MyDataCenterInfo(DataCenterInfo.Name.MyOwn))
.build();
list.add(info);
}
return list;
}
@Before
public void setupMock(){
List<InstanceInfo> instances = getDummyInstanceInfo("dummy", getMockServerList());
PowerMock.mockStatic(DiscoveryManager.class);
PowerMock.mockStatic(DiscoveryClient.class);
DiscoveryClient mockedDiscoveryClient = createMock(DiscoveryClient.class);
DiscoveryManager mockedDiscoveryManager = createMock(DiscoveryManager.class);
expect(mockedDiscoveryClient.getEurekaClientConfig()).andReturn(new DefaultEurekaClientConfig()).anyTimes();
expect(DiscoveryManager.getInstance()).andReturn(mockedDiscoveryManager).anyTimes();
expect(mockedDiscoveryManager.getDiscoveryClient()).andReturn(mockedDiscoveryClient).anyTimes();
expect(mockedDiscoveryClient.getInstancesByVipAddress(getVipAddress(), false, null)).andReturn(instances).anyTimes();
replay(DiscoveryManager.class);
replay(DiscoveryClient.class);
replay(mockedDiscoveryManager);
replay(mockedDiscoveryClient);
}
}
| 3,348 |
0 | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client/config/ClientConfigTest.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.config;
import static org.junit.Assert.*;
import com.netflix.config.ConfigurationManager;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runners.MethodSorters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Objects;
import java.util.Properties;
/**
* Test cases to verify the correctness of the Client Configuration settings
*
* @author stonse
*
*/
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class ClientConfigTest {
private static final Logger LOG = LoggerFactory.getLogger(ClientConfigTest.class);
@Rule
public TestName testName = new TestName();
IClientConfigKey<Integer> INTEGER_PROPERTY;
IClientConfigKey<Integer> DEFAULT_INTEGER_PROPERTY;
@Before
public void setUp() throws Exception {
INTEGER_PROPERTY = new CommonClientConfigKey<Integer>(
"niws.loadbalancer.%s." + testName.getMethodName(), 10) {};
DEFAULT_INTEGER_PROPERTY = new CommonClientConfigKey<Integer>(
"niws.loadbalancer.default." + testName.getMethodName(), 30) {};
}
@AfterClass
public static void shutdown() throws Exception {
}
@Test
public void testNiwsConfigViaProperties() throws Exception {
DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
DefaultClientConfigImpl override = new DefaultClientConfigImpl();
clientConfig.loadDefaultValues();
Properties props = new Properties();
final String restClientName = "testRestClient";
props.setProperty("netflix.appinfo.stack","xbox");
props.setProperty("netflix.environment","test");
props.setProperty("appname", "movieservice");
props.setProperty(restClientName + ".ribbon." + CommonClientConfigKey.AppName.key(), "movieservice");
props.setProperty(restClientName + ".ribbon." + CommonClientConfigKey.DeploymentContextBasedVipAddresses.key(), "${appname}-${netflix.appinfo.stack}-${netflix.environment},movieservice--${netflix.environment}");
props.setProperty(restClientName + ".ribbon." + CommonClientConfigKey.EnableZoneAffinity.key(), "false");
ConfigurationManager.loadProperties(props);
ConfigurationManager.getConfigInstance().setProperty("testRestClient.ribbon.customProperty", "abc");
clientConfig.loadProperties(restClientName);
clientConfig.set(CommonClientConfigKey.ConnectTimeout, 1000);
override.set(CommonClientConfigKey.Port, 8000);
override.set(CommonClientConfigKey.ConnectTimeout, 5000);
clientConfig.applyOverride(override);
Assert.assertEquals("movieservice", clientConfig.get(CommonClientConfigKey.AppName));
Assert.assertEquals(false, clientConfig.get(CommonClientConfigKey.EnableZoneAffinity));
Assert.assertEquals("movieservice-xbox-test,movieservice--test", clientConfig.resolveDeploymentContextbasedVipAddresses());
Assert.assertEquals(5000, clientConfig.get(CommonClientConfigKey.ConnectTimeout).longValue());
Assert.assertEquals(8000, clientConfig.get(CommonClientConfigKey.Port).longValue());
System.out.println("AutoVipAddress:" + clientConfig.resolveDeploymentContextbasedVipAddresses());
ConfigurationManager.getConfigInstance().setProperty("testRestClient.ribbon.EnableZoneAffinity", "true");
assertEquals(true, clientConfig.get(CommonClientConfigKey.EnableZoneAffinity));
}
@Test
public void testresolveDeploymentContextbasedVipAddresses() throws Exception {
final String restClientName = "testRestClient2";
DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
clientConfig.loadDefaultValues();
Properties props = new Properties();
props.setProperty(restClientName + ".ribbon." + CommonClientConfigKey.AppName.key(), "movieservice");
props.setProperty(restClientName + ".ribbon." + CommonClientConfigKey.DeploymentContextBasedVipAddresses.key(), "${<appname>}-${netflix.appinfo.stack}-${netflix.environment}:${<port>},${<appname>}--${netflix.environment}:${<port>}");
props.setProperty(restClientName + ".ribbon." + CommonClientConfigKey.Port.key(), "7001");
props.setProperty(restClientName + ".ribbon." + CommonClientConfigKey.EnableZoneAffinity.key(), "true");
ConfigurationManager.loadProperties(props);
clientConfig.loadProperties(restClientName);
Assert.assertEquals("movieservice", clientConfig.get(CommonClientConfigKey.AppName));
Assert.assertEquals(true, clientConfig.get(CommonClientConfigKey.EnableZoneAffinity));
ConfigurationManager.getConfigInstance().setProperty("testRestClient2.ribbon.DeploymentContextBasedVipAddresses", "movieservice-xbox-test:7001");
assertEquals("movieservice-xbox-test:7001", clientConfig.get(CommonClientConfigKey.DeploymentContextBasedVipAddresses));
ConfigurationManager.getConfigInstance().clearProperty("testRestClient2.ribbon.EnableZoneAffinity");
assertNull(clientConfig.get(CommonClientConfigKey.EnableZoneAffinity));
assertFalse(clientConfig.getOrDefault(CommonClientConfigKey.EnableZoneAffinity));
}
@Test
public void testFallback_noneSet() {
DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
Property<Integer> prop = clientConfig.getGlobalProperty(INTEGER_PROPERTY.format(testName.getMethodName()))
.fallbackWith(clientConfig.getGlobalProperty(DEFAULT_INTEGER_PROPERTY));
Assert.assertEquals(30, prop.getOrDefault().intValue());
}
@Test
public void testFallback_fallbackSet() {
ConfigurationManager.getConfigInstance().setProperty(DEFAULT_INTEGER_PROPERTY.key(), "100");
DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
Property<Integer> prop = clientConfig.getGlobalProperty(INTEGER_PROPERTY.format(testName.getMethodName()))
.fallbackWith(clientConfig.getGlobalProperty(DEFAULT_INTEGER_PROPERTY));
Assert.assertEquals(100, prop.getOrDefault().intValue());
}
@Test
public void testFallback_primarySet() {
ConfigurationManager.getConfigInstance().setProperty(INTEGER_PROPERTY.format(testName.getMethodName()).key(), "200");
DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
Property<Integer> prop = clientConfig.getGlobalProperty(INTEGER_PROPERTY.format(testName.getMethodName()))
.fallbackWith(clientConfig.getGlobalProperty(DEFAULT_INTEGER_PROPERTY));
Assert.assertEquals(200, prop.getOrDefault().intValue());
}
static class CustomValueOf {
private final String value;
public static CustomValueOf valueOf(String value) {
return new CustomValueOf(value);
}
public CustomValueOf(String value) {
this.value = value;
}
public String getValue() {
return value;
}
@Override
public String toString() {
return value;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CustomValueOf that = (CustomValueOf) o;
return Objects.equals(value, that.value);
}
@Override
public int hashCode() {
return Objects.hash(value);
}
}
public static IClientConfigKey<CustomValueOf> CUSTOM_KEY = new CommonClientConfigKey<CustomValueOf>("CustomValueOf", new CustomValueOf("default")) {};
@Test
public void testValueOfWithDefault() {
DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
CustomValueOf prop = clientConfig.getOrDefault(CUSTOM_KEY);
Assert.assertEquals("default", prop.getValue());
}
@Test
public void testValueOf() {
ConfigurationManager.getConfigInstance().setProperty("testValueOf.ribbon.CustomValueOf", "value");
DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
clientConfig.loadProperties("testValueOf");
Property<CustomValueOf> prop = clientConfig.getDynamicProperty(CUSTOM_KEY);
Assert.assertEquals("value", prop.getOrDefault().getValue());
ConfigurationManager.getConfigInstance().setProperty("testValueOf.ribbon.CustomValueOf", "value2");
Assert.assertEquals("value2", prop.getOrDefault().getValue());
}
@Test
public void testDynamicConfig() {
ConfigurationManager.getConfigInstance().setProperty("testValueOf.ribbon.CustomValueOf", "value");
DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
clientConfig.loadProperties("testValueOf");
Assert.assertEquals("value", clientConfig.get(CUSTOM_KEY).getValue());
ConfigurationManager.getConfigInstance().setProperty("testValueOf.ribbon.CustomValueOf", "value2");
Assert.assertEquals("value2", clientConfig.get(CUSTOM_KEY).getValue());
ConfigurationManager.getConfigInstance().clearProperty("testValueOf.ribbon.CustomValueOf");
Assert.assertNull(clientConfig.get(CUSTOM_KEY));
}
}
| 3,349 |
0 | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client/config/DefaultClientConfigImplTest.java | package com.netflix.client.config;
import static org.junit.Assert.*;
import java.util.Collections;
import java.util.Date;
import java.util.Map;
import java.util.TreeMap;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import com.netflix.config.ConfigurationManager;
import org.junit.rules.TestName;
public class DefaultClientConfigImplTest {
class NewConfigKey<T> extends CommonClientConfigKey<T> {
protected NewConfigKey(String configKey) {
super(configKey);
}
}
@Rule
public TestName testName = new TestName();
@Test
public void testTypedValue() {
ConfigurationManager.getConfigInstance().setProperty("myclient.ribbon." + CommonClientConfigKey.ConnectTimeout, "1500");
DefaultClientConfigImpl config = new DefaultClientConfigImpl();
config.loadProperties("myclient");
assertEquals(1500, config.get(CommonClientConfigKey.ConnectTimeout).intValue());
config.set(CommonClientConfigKey.ConnectTimeout, 2000);
// The archaius property should override code override
assertEquals(1500, config.get(CommonClientConfigKey.ConnectTimeout).intValue());
}
@Test
public void testNewType() {
CommonClientConfigKey<Date> key = new CommonClientConfigKey<Date>("date") {};
assertEquals(Date.class, key.type());
}
@Test
public void testSubClass() {
NewConfigKey<Date> key = new NewConfigKey<Date>("date") {};
assertEquals(Date.class, key.type());
}
public static class CustomType {
private final Map<String, String> value;
public CustomType(Map<String, String> value) {
this.value = new TreeMap<>(value);
}
public static CustomType valueOf(Map<String, String> value) {
return new CustomType(value);
}
}
final CommonClientConfigKey<CustomType> CustomTypeKey = new CommonClientConfigKey<CustomType>("customMapped", new CustomType(Collections.emptyMap())) {};
@Test
public void testMappedProperties() {
String clientName = testName.getMethodName();
ConfigurationManager.getConfigInstance().setProperty("ribbon.customMapped.a", "1");
ConfigurationManager.getConfigInstance().setProperty("ribbon.customMapped.b", "2");
ConfigurationManager.getConfigInstance().setProperty("ribbon.customMapped.c", "3");
ConfigurationManager.getConfigInstance().setProperty(clientName + ".ribbon.customMapped.c", "4");
ConfigurationManager.getConfigInstance().setProperty(clientName + ".ribbon.customMapped.d", "5");
ConfigurationManager.getConfigInstance().setProperty(clientName + ".ribbon.customMapped.e", "6");
DefaultClientConfigImpl config = new DefaultClientConfigImpl();
config.loadProperties(clientName);
CustomType customType = config.getPrefixMappedProperty(CustomTypeKey).get().get();
TreeMap<String, String> expected = new TreeMap<>();
expected.put("a", "1");
expected.put("b", "2");
expected.put("c", "4");
expected.put("d", "5");
expected.put("e", "6");
Assert.assertEquals(expected, customType.value);
}
}
| 3,350 |
0 | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client/config/ArchaiusPropertyResolverTest.java | package com.netflix.client.config;
import com.netflix.config.ConfigurationManager;
import org.apache.commons.configuration.AbstractConfiguration;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import java.util.Map;
import java.util.TreeMap;
public class ArchaiusPropertyResolverTest {
@Rule
public TestName testName = new TestName();
@Test
public void mapFromPrefixedKeys() {
final String prefix = "client.ribbon." + testName.getMethodName();
final AbstractConfiguration config = ConfigurationManager.getConfigInstance();
config.setProperty(prefix + ".a", "1");
config.setProperty(prefix + ".b", "2");
config.setProperty(prefix + ".c", "3");
final ArchaiusPropertyResolver resolver = ArchaiusPropertyResolver.INSTANCE;
final Map<String, String> map = new TreeMap<>();
resolver.forEach(prefix, map::put);
final Map<String, String> expected = new TreeMap<>();
expected.put("a", "1");
expected.put("b", "2");
expected.put("c", "3");
Assert.assertEquals(expected, map);
}
@Test
public void noCallbackIfNoValues() {
final String prefix = "client.ribbon." + testName.getMethodName();
final ArchaiusPropertyResolver resolver = ArchaiusPropertyResolver.INSTANCE;
final Map<String, String> map = new TreeMap<>();
resolver.forEach(prefix, map::put);
Assert.assertTrue(map.toString(), map.isEmpty());
}
}
| 3,351 |
0 | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client/config/CommonClientConfigKeyTest.java | package com.netflix.client.config;
import static org.junit.Assert.*;
import org.junit.Test;
import com.google.common.collect.Sets;
public class CommonClientConfigKeyTest {
@Test
public void testCommonKeys() {
IClientConfigKey[] keys = CommonClientConfigKey.values();
assertTrue(keys.length > 30);
assertEquals(Sets.newHashSet(keys), CommonClientConfigKey.keys());
assertTrue(CommonClientConfigKey.keys().contains(CommonClientConfigKey.ConnectTimeout));
}
}
| 3,352 |
0 | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/test/java/com/netflix/client/config/ReloadableClientConfigTest.java | package com.netflix.client.config;
import com.netflix.config.ConfigurationManager;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
public class ReloadableClientConfigTest {
@Rule
public TestName testName = new TestName();
private CommonClientConfigKey<Integer> testKey;
@Before
public void before() {
this.testKey = new CommonClientConfigKey<Integer>(getClass().getName() + "." + testName.getMethodName(), -1) {};
}
@Test
public void testOverrideLoadedConfig() {
final DefaultClientConfigImpl overrideconfig = new DefaultClientConfigImpl();
overrideconfig.set(testKey, 123);
final DefaultClientConfigImpl config = new DefaultClientConfigImpl();
config.loadDefaultValues();
config.applyOverride(overrideconfig);
Assert.assertEquals(123, config.get(testKey).intValue());
}
@Test
public void setBeforeLoading() {
// Ensure property is set before config is created
ConfigurationManager.getConfigInstance().setProperty("ribbon." + testKey.key(), "123");
// Load config and attempt to set value to 0
final DefaultClientConfigImpl config = new DefaultClientConfigImpl();
config.loadProperties("foo");
config.set(testKey, 0);
// Value should be 123 because of fast property
Assert.assertEquals(123, config.get(testKey).intValue());
// Clearing property should make it null
ConfigurationManager.getConfigInstance().clearProperty("ribbon." + testKey.key());
Assert.assertNull(config.get(testKey));
// Setting property again should give new value
ConfigurationManager.getConfigInstance().setProperty("ribbon." + testKey.key(), "124");
Assert.assertEquals(124, config.get(testKey).intValue());
}
@Test
public void setAfterLoading() {
final DefaultClientConfigImpl config = new DefaultClientConfigImpl();
config.loadProperties("foo");
config.set(testKey, 456);
ConfigurationManager.getConfigInstance().setProperty("ribbon." + testKey.key(), "123");
Assert.assertEquals(123, config.get(testKey).intValue());
}
}
| 3,353 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/VipAddressResolver.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import com.netflix.client.config.IClientConfig;
/**
* A "VipAddress" is a logical name for a Target Server farm.
*
* @author stonse
*
*/
public interface VipAddressResolver {
public String resolve(String vipAddress, IClientConfig niwsClientConfig);
}
| 3,354 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/IClientConfigAware.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import com.netflix.client.config.IClientConfig;
/**
* There are multiple classes (and components) that need access to the configuration.
* Its easier to do this by using {@link IClientConfig} as the object that carries these configurations
* and to define a common interface that components that need this can implement and hence be aware of.
*
* @author stonse
* @author awang
*
*/
public interface IClientConfigAware {
interface Factory {
Object create(String type, IClientConfig config) throws InstantiationException, IllegalAccessException, ClassNotFoundException;
}
/**
* Concrete implementation should implement this method so that the configuration set via
* {@link IClientConfig} (which in turn were set via Archaius properties) will be taken into consideration
*
* @param clientConfig
*/
default void initWithNiwsConfig(IClientConfig clientConfig) {
}
default void initWithNiwsConfig(IClientConfig clientConfig, Factory factory) {
initWithNiwsConfig(clientConfig);
}
}
| 3,355 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/RetryHandler.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import java.net.ConnectException;
/**
* A handler that determines if an exception is retriable for load balancer,
* and if an exception or error response should be treated as circuit related failures
* so that the load balancer can avoid such server.
*
* @author awang
*/
public interface RetryHandler {
public static final RetryHandler DEFAULT = new DefaultLoadBalancerRetryHandler();
/**
* Test if an exception is retriable for the load balancer
*
* @param e the original exception
* @param sameServer if true, the method is trying to determine if retry can be
* done on the same server. Otherwise, it is testing whether retry can be
* done on a different server
*/
public boolean isRetriableException(Throwable e, boolean sameServer);
/**
* Test if an exception should be treated as circuit failure. For example,
* a {@link ConnectException} is a circuit failure. This is used to determine
* whether successive exceptions of such should trip the circuit breaker to a particular
* host by the load balancer. If false but a server response is absent,
* load balancer will also close the circuit upon getting such exception.
*/
public boolean isCircuitTrippingException(Throwable e);
/**
* @return Number of maximal retries to be done on one server
*/
public int getMaxRetriesOnSameServer();
/**
* @return Number of maximal different servers to retry
*/
public int getMaxRetriesOnNextServer();
}
| 3,356 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/DefaultLoadBalancerRetryHandler.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import com.google.common.collect.Lists;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import java.net.ConnectException;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.util.List;
/**
* A default {@link RetryHandler}. The implementation is limited to
* known exceptions in java.net. Specific client implementation should provide its own
* {@link RetryHandler}
*
* @author awang
*/
public class DefaultLoadBalancerRetryHandler implements RetryHandler {
@SuppressWarnings("unchecked")
private List<Class<? extends Throwable>> retriable =
Lists.<Class<? extends Throwable>>newArrayList(ConnectException.class, SocketTimeoutException.class);
@SuppressWarnings("unchecked")
private List<Class<? extends Throwable>> circuitRelated =
Lists.<Class<? extends Throwable>>newArrayList(SocketException.class, SocketTimeoutException.class);
protected final int retrySameServer;
protected final int retryNextServer;
protected final boolean retryEnabled;
public DefaultLoadBalancerRetryHandler() {
this.retrySameServer = 0;
this.retryNextServer = 0;
this.retryEnabled = false;
}
public DefaultLoadBalancerRetryHandler(int retrySameServer, int retryNextServer, boolean retryEnabled) {
this.retrySameServer = retrySameServer;
this.retryNextServer = retryNextServer;
this.retryEnabled = retryEnabled;
}
public DefaultLoadBalancerRetryHandler(IClientConfig clientConfig) {
this.retrySameServer = clientConfig.getOrDefault(CommonClientConfigKey.MaxAutoRetries);
this.retryNextServer = clientConfig.getOrDefault(CommonClientConfigKey.MaxAutoRetriesNextServer);
this.retryEnabled = clientConfig.getOrDefault(CommonClientConfigKey.OkToRetryOnAllOperations);
}
@Override
public boolean isRetriableException(Throwable e, boolean sameServer) {
if (retryEnabled) {
if (sameServer) {
return Utils.isPresentAsCause(e, getRetriableExceptions());
} else {
return true;
}
}
return false;
}
/**
* @return true if {@link SocketException} or {@link SocketTimeoutException} is a cause in the Throwable.
*/
@Override
public boolean isCircuitTrippingException(Throwable e) {
return Utils.isPresentAsCause(e, getCircuitRelatedExceptions());
}
@Override
public int getMaxRetriesOnSameServer() {
return retrySameServer;
}
@Override
public int getMaxRetriesOnNextServer() {
return retryNextServer;
}
protected List<Class<? extends Throwable>> getRetriableExceptions() {
return retriable;
}
protected List<Class<? extends Throwable>> getCircuitRelatedExceptions() {
return circuitRelated;
}
}
| 3,357 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/ClientException.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.HashMap;
public class ClientException extends Exception{
/**
*
*/
private static final long serialVersionUID = -7697654244064441234L;
/**
* define your error codes here
*
*/
public enum ErrorType{
GENERAL,
CONFIGURATION,
NUMBEROF_RETRIES_EXEEDED,
NUMBEROF_RETRIES_NEXTSERVER_EXCEEDED,
SOCKET_TIMEOUT_EXCEPTION,
READ_TIMEOUT_EXCEPTION,
UNKNOWN_HOST_EXCEPTION,
CONNECT_EXCEPTION,
CLIENT_THROTTLED,
SERVER_THROTTLED,
NO_ROUTE_TO_HOST_EXCEPTION,
CACHE_MISSING;
// https://www.gamlor.info/wordpress/2017/08/javas-enum-values-hidden-allocations/
private static final ErrorType[] ERROR_TYPE_VALUES = values();
static String getName(int errorCode){
if (ERROR_TYPE_VALUES.length >= errorCode){
return ERROR_TYPE_VALUES[errorCode].name();
}else{
return "UNKNOWN ERROR CODE";
}
}
}
protected int errorCode;
protected String message;
protected Object errorObject;
protected ErrorType errorType = ErrorType.GENERAL;
public ClientException(String message) {
this(0, message, null);
}
public ClientException(int errorCode) {
this(errorCode, null, null);
}
public ClientException(int errorCode, String message) {
this(errorCode, message, null);
}
public ClientException(Throwable chainedException) {
this(0, null, chainedException);
}
public ClientException(String message, Throwable chainedException) {
this(0, message, chainedException);
}
public ClientException(int errorCode, String message, Throwable chainedException) {
super((message == null && errorCode != 0) ? ", code=" + errorCode + "->" + ErrorType.getName(errorCode): message,
chainedException);
this.errorCode = errorCode;
this.message = message;
}
public ClientException(ErrorType error) {
this(error.ordinal(), null, null);
this.errorType = error;
}
public ClientException(ErrorType error, String message) {
this(error.ordinal(), message, null);
this.errorType = error;
}
public ClientException( ErrorType error, String message, Throwable chainedException) {
super((message == null && error.ordinal() != 0) ? ", code=" + error.ordinal() + "->" + error.name() : message,
chainedException);
this.errorCode = error.ordinal();
this.message = message;
this.errorType = error;
}
public ErrorType getErrorType(){
return errorType;
}
public int getErrorCode() {
return this.errorCode;
}
public void setErrorCode(int errorCode) {
this.errorCode = errorCode;
}
public String getErrorMessage() {
return this.message;
}
public void setErrorMessage(String msg) {
this.message = msg;
}
public Object getErrorObject() {
return this.errorObject;
}
public void setErrorObject(Object errorObject) {
this.errorObject = errorObject;
}
/**
* Return the message associated with such an exception.
*
* @return a message asssociated with current exception
*/
public String getInternalMessage () {
return "{no message: " + errorCode + "}";
}
/**
* Return the codes that are defined on a subclass of our class.
*
* @param clazz a class that is a subclass of us.
* @return a hashmap of int error codes mapped to the string names.
*/
static public HashMap getErrorCodes ( Class clazz ) {
HashMap map = new HashMap(23);
// Use reflection to populte the erroCodeMap to have the reverse mapping
// of error codes to symbolic names.
Field flds[] = clazz.getDeclaredFields();
for (int i = 0; i < flds.length; i++) {
int mods = flds[i].getModifiers();
if (Modifier.isFinal(mods) && Modifier.isStatic(mods) && Modifier.isPublic(mods)) {
try {
map.put(flds[i].get(null), flds[i].getName());
} catch (Throwable t) { // NOPMD
// ignore this.
}
}
}
return map;
}
}
| 3,358 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/IResponse.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import java.io.Closeable;
import java.net.URI;
import java.util.Map;
/**
* Response interface for the client framework.
*
*/
public interface IResponse extends Closeable
{
/**
* Returns the raw entity if available from the response
*/
public Object getPayload() throws ClientException;
/**
* A "peek" kinda API. Use to check if your service returned a response with an Entity
*/
public boolean hasPayload();
/**
* @return true if the response is deemed success, for example, 200 response code for http protocol.
*/
public boolean isSuccess();
/**
* Return the Request URI that generated this response
*/
public URI getRequestedURI();
/**
*
* @return Headers if any in the response.
*/
public Map<String, ?> getHeaders();
}
| 3,359 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/IClient.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import com.netflix.client.config.IClientConfig;
/**
* A client that can execute a single request.
*
* @author awang
*
*/
public interface IClient<S extends ClientRequest, T extends IResponse> {
/**
* Execute the request and return the response. It is expected that there is no retry and all exceptions
* are thrown directly.
*/
public T execute(S request, IClientConfig requestConfig) throws Exception;
}
| 3,360 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/ClientRequest.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import java.net.URI;
import com.netflix.client.config.IClientConfig;
/**
* An object that represents a common client request that is suitable for all communication protocol.
* It is expected that this object is immutable.
*
* @author awang
*
*/
public class ClientRequest implements Cloneable {
protected URI uri;
protected Object loadBalancerKey = null;
protected Boolean isRetriable = null;
protected IClientConfig overrideConfig;
public ClientRequest() {
}
public ClientRequest(URI uri) {
this.uri = uri;
}
/**
* Constructor to set all fields.
* @deprecated request configuration should be now be passed
* as a method parameter to client's execution API
*
*
* @param uri URI to set
* @param loadBalancerKey the object that is used by {@code com.netflix.loadbalancer.ILoadBalancer#chooseServer(Object)}, can be null
* @param isRetriable if the operation is retriable on failures
* @param overrideConfig client configuration that is used for this specific request. can be null.
*/
@Deprecated
public ClientRequest(URI uri, Object loadBalancerKey, boolean isRetriable, IClientConfig overrideConfig) {
this.uri = uri;
this.loadBalancerKey = loadBalancerKey;
this.isRetriable = isRetriable;
this.overrideConfig = overrideConfig;
}
public ClientRequest(URI uri, Object loadBalancerKey, boolean isRetriable) {
this.uri = uri;
this.loadBalancerKey = loadBalancerKey;
this.isRetriable = isRetriable;
}
public ClientRequest(ClientRequest request) {
this.uri = request.uri;
this.loadBalancerKey = request.loadBalancerKey;
this.overrideConfig = request.overrideConfig;
this.isRetriable = request.isRetriable;
}
public final URI getUri() {
return uri;
}
protected final ClientRequest setUri(URI uri) {
this.uri = uri;
return this;
}
public final Object getLoadBalancerKey() {
return loadBalancerKey;
}
protected final ClientRequest setLoadBalancerKey(Object loadBalancerKey) {
this.loadBalancerKey = loadBalancerKey;
return this;
}
public boolean isRetriable() {
return (Boolean.TRUE.equals(isRetriable));
}
protected final ClientRequest setRetriable(boolean isRetriable) {
this.isRetriable = isRetriable;
return this;
}
/**
* @deprecated request configuration should be now be passed
* as a method parameter to client's execution API
*/
@Deprecated
public final IClientConfig getOverrideConfig() {
return overrideConfig;
}
/**
* @deprecated request configuration should be now be passed
* as a method parameter to client's execution API
*/
@Deprecated
protected final ClientRequest setOverrideConfig(IClientConfig overrideConfig) {
this.overrideConfig = overrideConfig;
return this;
}
/**
* Create a client request using a new URI. This is used by {@code com.netflix.client.AbstractLoadBalancerAwareClient#computeFinalUriWithLoadBalancer(ClientRequest)}.
* It first tries to clone the request and if that fails it will use the copy constructor {@link #ClientRequest(ClientRequest)}.
* Sub classes are recommended to override this method to provide more efficient implementation.
*
* @param newURI
*/
public ClientRequest replaceUri(URI newURI) {
ClientRequest req;
try {
req = (ClientRequest) this.clone();
} catch (CloneNotSupportedException e) {
req = new ClientRequest(this);
}
req.uri = newURI;
return req;
}
}
| 3,361 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/Utils.java | package com.netflix.client;
import java.util.Collection;
public class Utils {
public static boolean isPresentAsCause(Throwable throwableToSearchIn,
Collection<Class<? extends Throwable>> throwableToSearchFor) {
int infiniteLoopPreventionCounter = 10;
while (throwableToSearchIn != null && infiniteLoopPreventionCounter > 0) {
infiniteLoopPreventionCounter--;
for (Class<? extends Throwable> c: throwableToSearchFor) {
if (c.isAssignableFrom(throwableToSearchIn.getClass())) {
return true;
}
}
throwableToSearchIn = throwableToSearchIn.getCause();
}
return false;
}
}
| 3,362 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/RequestSpecificRetryHandler.java | package com.netflix.client;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import javax.annotation.Nullable;
import java.net.SocketException;
import java.util.List;
import java.util.Optional;
/**
* Implementation of RetryHandler created for each request which allows for request
* specific override
*/
public class RequestSpecificRetryHandler implements RetryHandler {
private final RetryHandler fallback;
private int retrySameServer = -1;
private int retryNextServer = -1;
private final boolean okToRetryOnConnectErrors;
private final boolean okToRetryOnAllErrors;
protected List<Class<? extends Throwable>> connectionRelated =
Lists.<Class<? extends Throwable>>newArrayList(SocketException.class);
public RequestSpecificRetryHandler(boolean okToRetryOnConnectErrors, boolean okToRetryOnAllErrors) {
this(okToRetryOnConnectErrors, okToRetryOnAllErrors, RetryHandler.DEFAULT, null);
}
public RequestSpecificRetryHandler(boolean okToRetryOnConnectErrors, boolean okToRetryOnAllErrors, RetryHandler baseRetryHandler, @Nullable IClientConfig requestConfig) {
Preconditions.checkNotNull(baseRetryHandler);
this.okToRetryOnConnectErrors = okToRetryOnConnectErrors;
this.okToRetryOnAllErrors = okToRetryOnAllErrors;
this.fallback = baseRetryHandler;
if (requestConfig != null) {
requestConfig.getIfSet(CommonClientConfigKey.MaxAutoRetries).ifPresent(
value -> retrySameServer = value
);
requestConfig.getIfSet(CommonClientConfigKey.MaxAutoRetriesNextServer).ifPresent(
value -> retryNextServer = value
);
}
}
public boolean isConnectionException(Throwable e) {
return Utils.isPresentAsCause(e, connectionRelated);
}
@Override
public boolean isRetriableException(Throwable e, boolean sameServer) {
if (okToRetryOnAllErrors) {
return true;
}
else if (e instanceof ClientException) {
ClientException ce = (ClientException) e;
if (ce.getErrorType() == ClientException.ErrorType.SERVER_THROTTLED) {
return !sameServer;
} else {
return false;
}
}
else {
return okToRetryOnConnectErrors && isConnectionException(e);
}
}
@Override
public boolean isCircuitTrippingException(Throwable e) {
return fallback.isCircuitTrippingException(e);
}
@Override
public int getMaxRetriesOnSameServer() {
if (retrySameServer >= 0) {
return retrySameServer;
}
return fallback.getMaxRetriesOnSameServer();
}
@Override
public int getMaxRetriesOnNextServer() {
if (retryNextServer >= 0) {
return retryNextServer;
}
return fallback.getMaxRetriesOnNextServer();
}
}
| 3,363 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/ssl/AbstractSslContextFactory.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.ssl;
import java.security.KeyManagementException;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.UnrecoverableKeyException;
import javax.net.ssl.KeyManager;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* Abstract class to represent what we logically associate with the ssl context on the client side,
* namely, the keystore and truststore.
*
* @author jzarfoss
*
*/
public abstract class AbstractSslContextFactory {
private final static Logger LOGGER = LoggerFactory.getLogger(AbstractSslContextFactory.class);
/** The secure socket algorithm that is to be used. */
public static final String SOCKET_ALGORITHM = "SSL";
/** The keystore resulting from loading keystore URL */
private KeyStore keyStore;
/** The truststore resulting from loading the truststore URL */
private KeyStore trustStore;
/** The password for the keystore */
private String keyStorePassword;
private final int trustStorePasswordLength;
private final int keyStorePasswordLength;
protected AbstractSslContextFactory(final KeyStore trustStore, final String trustStorePassword, final KeyStore keyStore, final String keyStorePassword){
this.trustStore = trustStore;
this.keyStorePassword = keyStorePassword;
this.keyStore = keyStore;
this.keyStorePasswordLength = keyStorePassword != null ? keyStorePassword.length() : -1;
this.trustStorePasswordLength = trustStorePassword != null ? trustStorePassword.length() : -1;
}
public KeyStore getKeyStore(){
return this.keyStore;
}
public KeyStore getTrustStore(){
return this.trustStore;
}
public int getKeyStorePasswordLength(){
return this.keyStorePasswordLength;
}
public int getTrustStorePasswordLength(){
return this.trustStorePasswordLength;
}
/**
* Creates the SSL context needed to create the socket factory used by this factory. The key and
* trust store parameters are optional. If they are null then the JRE defaults will be used.
*
* @return the newly created SSL context
* @throws ClientSslSocketFactoryException if an error is detected loading the specified key or
* trust stores
*/
private SSLContext createSSLContext() throws ClientSslSocketFactoryException {
final KeyManager[] keyManagers = this.keyStore != null ? createKeyManagers() : null;
final TrustManager[] trustManagers = this.trustStore != null ? createTrustManagers() : null;
try {
final SSLContext sslcontext = SSLContext.getInstance(SOCKET_ALGORITHM);
sslcontext.init(keyManagers, trustManagers, null);
return sslcontext;
} catch (NoSuchAlgorithmException e) {
throw new ClientSslSocketFactoryException(String.format("Failed to create an SSL context that supports algorithm %s: %s", SOCKET_ALGORITHM, e.getMessage()), e);
} catch (KeyManagementException e) {
throw new ClientSslSocketFactoryException(String.format("Failed to initialize an SSL context: %s", e.getMessage()), e);
}
}
/**
* Creates the key managers to be used by the factory from the associated key store and password.
*
* @return the newly created array of key managers
* @throws ClientSslSocketFactoryException if an exception is detected in loading the key store
*/
private KeyManager[] createKeyManagers() throws ClientSslSocketFactoryException {
final KeyManagerFactory factory;
try {
factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
factory.init(this.keyStore, this.keyStorePassword.toCharArray());
} catch (NoSuchAlgorithmException e) {
throw new ClientSslSocketFactoryException(
String.format("Failed to create the key store because the algorithm %s is not supported. ",
KeyManagerFactory.getDefaultAlgorithm()), e);
} catch (UnrecoverableKeyException e) {
throw new ClientSslSocketFactoryException("Unrecoverable Key Exception initializing key manager factory; this is probably fatal", e);
} catch (KeyStoreException e) {
throw new ClientSslSocketFactoryException("KeyStore exception initializing key manager factory; this is probably fatal", e);
}
KeyManager[] managers = factory.getKeyManagers();
LOGGER.debug("Key managers are initialized. Total {} managers. ", managers.length);
return managers;
}
/**
* Creates the trust managers to be used by the factory from the specified trust store file and
* password.
*
* @return the newly created array of trust managers
* @throws ClientSslSocketFactoryException if an error is detected in loading the trust store
*/
private TrustManager[] createTrustManagers() throws ClientSslSocketFactoryException {
final TrustManagerFactory factory;
try {
factory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
factory.init(this.trustStore);
} catch (NoSuchAlgorithmException e) {
throw new ClientSslSocketFactoryException(String.format("Failed to create the trust store because the algorithm %s is not supported. ",
KeyManagerFactory.getDefaultAlgorithm()), e);
} catch (KeyStoreException e) {
throw new ClientSslSocketFactoryException("KeyStore exception initializing trust manager factory; this is probably fatal", e);
}
final TrustManager[] managers = factory.getTrustManagers();
LOGGER.debug("TrustManagers are initialized. Total {} managers: ", managers.length);
return managers;
}
public SSLContext getSSLContext() throws ClientSslSocketFactoryException{
return createSSLContext();
}
}
| 3,364 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/ssl/URLSslContextFactory.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.ssl;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.CertificateException;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
/**
* Secure socket factory that is used the NIWS code if a non-standard key store or trust store
* is specified.
*
* @author Danny Yuan
* @author Peter D. Stout
*/
public class URLSslContextFactory extends AbstractSslContextFactory{
private final static Logger LOGGER = LoggerFactory.getLogger(URLSslContextFactory.class);
private final URL keyStoreUrl;
private final URL trustStoreUrl;
/**
* Creates a {@code ClientSSLSocketFactory} instance. This instance loads only the given trust
* store file and key store file. Both trust store and key store must be protected by passwords,
* even though it is not mandated by JSSE.
*
* @param trustStoreUrl A {@link URL} that points to a trust store file. If non-null, this URL
* must refer to a JKS key store file that contains trusted certificates.
* @param trustStorePassword The password of the given trust store file. If a trust store is
* specified, then the password may not be empty.
* @param keyStoreUrl A {@code URL} that points to a key store file that contains both client
* certificate and the client's private key. If non-null, this URL must be of JKS format.
* @param keyStorePassword the password of the given key store file. If a key store is
* specified, then the password may not be empty.
* @throws ClientSslSocketFactoryException thrown if creating this instance fails.
*/
public URLSslContextFactory(final URL trustStoreUrl, final String trustStorePassword, final URL keyStoreUrl, final String keyStorePassword) throws ClientSslSocketFactoryException {
super(createKeyStore(trustStoreUrl, trustStorePassword), trustStorePassword, createKeyStore(keyStoreUrl, keyStorePassword), keyStorePassword);
this.keyStoreUrl = keyStoreUrl;
this.trustStoreUrl = trustStoreUrl;
LOGGER.info("Loaded keyStore from: {}", keyStoreUrl);
LOGGER.info("loaded trustStore from: {}", trustStoreUrl);
}
/**
* Opens the specified key or trust store using the given password.
*
* In case of failure {@link com.netflix.client.ssl.ClientSslSocketFactoryException} is thrown, and wrapps the
* underlying cause exception. That could be:
* <ul>
* <li>KeyStoreException if the JRE doesn't support the standard Java Keystore format, in other words: never</li>
* <li>NoSuchAlgorithmException if the algorithm used to check the integrity of the keystore cannot be found</li>
* <li>CertificateException if any of the certificates in the keystore could not be loaded</li>
* <li>
* IOException if there is an I/O or format problem with the keystore data, if a
* password is required but not given, or if the given password was incorrect. If the
* error is due to a wrong password, the cause of the IOException should be an UnrecoverableKeyException.
* </li>
* </ul>
*
* @param storeFile the location of the store to load
* @param password the password protecting the store
* @return the newly loaded key store
* @throws ClientSslSocketFactoryException a wrapper exception for any problems encountered during keystore creation.
*/
private static KeyStore createKeyStore(final URL storeFile, final String password) throws ClientSslSocketFactoryException {
if(storeFile == null){
return null;
}
Preconditions.checkArgument(StringUtils.isNotEmpty(password), "Null keystore should have empty password, defined keystore must have password");
KeyStore keyStore = null;
try{
keyStore = KeyStore.getInstance("jks");
InputStream is = storeFile.openStream();
try {
keyStore.load(is, password.toCharArray());
} catch (NoSuchAlgorithmException e) {
throw new ClientSslSocketFactoryException(String.format("Failed to create a keystore that supports algorithm %s: %s", SOCKET_ALGORITHM, e.getMessage()), e);
} catch (CertificateException e) {
throw new ClientSslSocketFactoryException(String.format("Failed to create keystore with algorithm %s due to certificate exception: %s", SOCKET_ALGORITHM, e.getMessage()), e);
} finally {
try {
is.close();
} catch (IOException ignore) { // NOPMD
}
}
}catch(KeyStoreException e){
throw new ClientSslSocketFactoryException(String.format("KeyStore exception creating keystore: %s", e.getMessage()), e);
} catch (IOException e) {
throw new ClientSslSocketFactoryException(String.format("IO exception creating keystore: %s", e.getMessage()), e);
}
return keyStore;
}
@Override
public String toString() {
final StringBuilder builder = new StringBuilder();
builder.append("ClientSslSocketFactory [trustStoreUrl=").append(trustStoreUrl);
if (trustStoreUrl != null) {
builder.append(", trustStorePassword=");
builder.append(Strings.repeat("*", this.getTrustStorePasswordLength()));
}
builder.append(", keyStoreUrl=").append(keyStoreUrl);
if (keyStoreUrl != null) {
builder.append(", keystorePassword = ");
builder.append(Strings.repeat("*", this.getKeyStorePasswordLength()));
}
builder.append(']');
return builder.toString();
}
}
| 3,365 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/ssl/ClientSslSocketFactoryException.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.ssl;
/**
* Reports problems detected by the ClientSslSocketFactory class.
*
* @author pstout@netflix.com (Peter D. Stout)
*/
public class ClientSslSocketFactoryException extends Exception {
/** Serial version identifier for this class. */
private static final long serialVersionUID = 1L;
/** Constructs a new instance with the specified message and cause. */
public ClientSslSocketFactoryException(final String message, final Throwable cause) {
super(message, cause);
}
}
| 3,366 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/util/Resources.java | package com.netflix.client.util;
import java.io.File;
import java.net.URL;
import java.net.URLDecoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class Resources {
private static final Logger logger = LoggerFactory.getLogger(Resources.class);
public static URL getResource(String resourceName) {
URL url = null;
// attempt to load from the context classpath
ClassLoader loader = Thread.currentThread().getContextClassLoader();
if (loader != null) {
url = loader.getResource(resourceName);
}
if (url == null) {
// attempt to load from the system classpath
url = ClassLoader.getSystemResource(resourceName);
}
if (url == null) {
try {
resourceName = URLDecoder.decode(resourceName, "UTF-8");
url = (new File(resourceName)).toURI().toURL();
} catch (Exception e) {
logger.error("Problem loading resource", e);
}
}
return url;
}
}
| 3,367 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/ReloadableClientConfig.java | package com.netflix.client.config;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* Base implementation of an IClientConfig with configuration that can be reloaded at runtime from an underlying
* property source while optimizing access to property values.
*
* Properties can either be scoped to a specific client or default properties that span all clients. By default
* properties follow the name convention `{clientname}.{namespace}.{key}` and then fallback to `{namespace}.{key}`
* if not found
*
* Internally the config tracks two maps, one for dynamic properties and one for code settable default values to use
* when a property is not defined in the underlying property source.
*/
public abstract class ReloadableClientConfig implements IClientConfig {
private static final Logger LOG = LoggerFactory.getLogger(ReloadableClientConfig.class);
private static final String DEFAULT_CLIENT_NAME = "";
private static final String DEFAULT_NAMESPACE = "ribbon";
// Map of raw property names (without namespace or client name) to values. All values are non-null and properly
// typed to match the key type
private final Map<IClientConfigKey, Optional<?>> internalProperties = new ConcurrentHashMap<>();
private final Map<IClientConfigKey, ReloadableProperty<?>> dynamicProperties = new ConcurrentHashMap<>();
// List of actions to perform when configuration changes. This includes both updating the Property instances
// as well as external consumers.
private final Map<IClientConfigKey, Runnable> changeActions = new ConcurrentHashMap<>();
private final AtomicLong refreshCounter = new AtomicLong();
private final PropertyResolver resolver;
private String clientName = DEFAULT_CLIENT_NAME;
private String namespace = DEFAULT_NAMESPACE;
private boolean isDynamic = false;
protected ReloadableClientConfig(PropertyResolver resolver) {
this.resolver = resolver;
}
protected PropertyResolver getPropertyResolver() {
return this.resolver;
}
/**
* Refresh all seen properties from the underlying property storage
*/
public final void reload() {
changeActions.values().forEach(Runnable::run);
dynamicProperties.values().forEach(ReloadableProperty::reload);
cachedToString = null;
}
/**
* @deprecated Use {@link #loadProperties(String)}
*/
@Deprecated
public void setClientName(String clientName){
this.clientName = clientName;
}
@Override
public final String getClientName() {
return clientName;
}
@Override
public String getNameSpace() {
return namespace;
}
@Override
public final void setNameSpace(String nameSpace) {
this.namespace = nameSpace;
}
@Override
public void loadProperties(String clientName) {
LOG.info("[{}] loading config", clientName);
this.clientName = clientName;
this.isDynamic = true;
loadDefaultValues();
resolver.onChange(this::reload);
internalProperties.forEach((key, value) -> LOG.info("[{}] {}={}", clientName, key, value.orElse(null)));
}
/**
* @return use {@link #forEach(BiConsumer)}
*/
@Override
@Deprecated
public final Map<String, Object> getProperties() {
final Map<String, Object> result = new HashMap<>(internalProperties.size());
forEach((key, value) -> result.put(key.key(), String.valueOf(value)));
return result;
}
@Override
public void forEach(BiConsumer<IClientConfigKey<?>, Object> consumer) {
internalProperties.forEach((key, value) -> {
if (value.isPresent()) {
consumer.accept(key, value.get());
}
});
}
/**
* Register an action that will refresh the cached value for key. Uses the current value as a reference and will
* update from the dynamic property source to either delete or set a new value.
*
* @param key - Property key without client name or namespace
*/
private <T> void autoRefreshFromPropertyResolver(final IClientConfigKey<T> key) {
changeActions.computeIfAbsent(key, ignore -> {
final Supplier<Optional<T>> valueSupplier = () -> resolveFromPropertyResolver(key);
final Optional<T> current = valueSupplier.get();
if (current.isPresent()) {
internalProperties.put(key, current);
}
final AtomicReference<Optional<T>> previous = new AtomicReference<>(current);
return () -> {
final Optional<T> next = valueSupplier.get();
if (!next.equals(previous.get())) {
LOG.info("[{}] new value for {}: {} -> {}", clientName, key.key(), previous.get(), next);
previous.set(next);
internalProperties.put(key, next);
}
};
});
}
interface ReloadableProperty<T> extends Property<T> {
void reload();
}
private synchronized <T> Property<T> getOrCreateProperty(final IClientConfigKey<T> key, final Supplier<Optional<T>> valueSupplier, final Supplier<T> defaultSupplier) {
Preconditions.checkNotNull(valueSupplier, "defaultValueSupplier cannot be null");
return (Property<T>)dynamicProperties.computeIfAbsent(key, ignore -> new ReloadableProperty<T>() {
private volatile Optional<T> current = Optional.empty();
private List<Consumer<T>> consumers = new CopyOnWriteArrayList<>();
{
reload();
}
@Override
public void onChange(Consumer<T> consumer) {
consumers.add(consumer);
}
@Override
public Optional<T> get() {
return current;
}
@Override
public T getOrDefault() {
return current.orElse(defaultSupplier.get());
}
@Override
public void reload() {
refreshCounter.incrementAndGet();
Optional<T> next = valueSupplier.get();
if (!next.equals(current)) {
current = next;
consumers.forEach(consumer -> consumer.accept(next.orElseGet(defaultSupplier::get)));
}
}
@Override
public String toString() {
return String.valueOf(get());
}
});
}
@Override
public final <T> T get(IClientConfigKey<T> key) {
Optional<T> value = (Optional<T>)internalProperties.get(key);
if (value == null) {
if (!isDynamic) {
return null;
} else {
set(key, null);
value = (Optional<T>) internalProperties.get(key);
}
}
return value.orElse(null);
}
@Override
public final <T> Property<T> getGlobalProperty(IClientConfigKey<T> key) {
LOG.debug("[{}] get global property '{}' with default '{}'", clientName, key.key(), key.defaultValue());
return getOrCreateProperty(
key,
() -> resolver.get(key.key(), key.type()),
key::defaultValue);
}
@Override
public final <T> Property<T> getDynamicProperty(IClientConfigKey<T> key) {
LOG.debug("[{}] get dynamic property key={} ns={}", clientName, key.key(), getNameSpace());
get(key);
return getOrCreateProperty(
key,
() -> (Optional<T>)internalProperties.getOrDefault(key, Optional.empty()),
key::defaultValue);
}
@Override
public <T> Property<T> getPrefixMappedProperty(IClientConfigKey<T> key) {
LOG.debug("[{}] get dynamic property key={} ns={} client={}", clientName, key.key(), getNameSpace());
return getOrCreateProperty(
key,
getPrefixedMapPropertySupplier(key),
key::defaultValue);
}
/**
* Resolve a property's final value from the property value.
* - client scope
* - default scope
*/
private <T> Optional<T> resolveFromPropertyResolver(IClientConfigKey<T> key) {
Optional<T> value;
if (!StringUtils.isEmpty(clientName)) {
value = resolver.get(clientName + "." + getNameSpace() + "." + key.key(), key.type());
if (value.isPresent()) {
return value;
}
}
return resolver.get(getNameSpace() + "." + key.key(), key.type());
}
@Override
public <T> Optional<T> getIfSet(IClientConfigKey<T> key) {
return (Optional<T>)internalProperties.getOrDefault(key, Optional.empty());
}
private <T> T resolveValueToType(IClientConfigKey<T> key, Object value) {
if (value == null) {
return null;
}
final Class<T> type = key.type();
// Unfortunately there's some legacy code setting string values for typed keys. Here are do our best to parse
// and store the typed value
if (!value.getClass().equals(type)) {
try {
if (type.equals(String.class)) {
return (T) value.toString();
} else if (value.getClass().equals(String.class)) {
final String strValue = (String) value;
if (Integer.class.equals(type)) {
return (T) Integer.valueOf(strValue);
} else if (Boolean.class.equals(type)) {
return (T) Boolean.valueOf(strValue);
} else if (Float.class.equals(type)) {
return (T) Float.valueOf(strValue);
} else if (Long.class.equals(type)) {
return (T) Long.valueOf(strValue);
} else if (Double.class.equals(type)) {
return (T) Double.valueOf(strValue);
} else if (TimeUnit.class.equals(type)) {
return (T) TimeUnit.valueOf(strValue);
} else {
return PropertyUtils.resolveWithValueOf(type, strValue)
.orElseThrow(() -> new IllegalArgumentException("Unsupported value type `" + type + "'"));
}
} else {
return PropertyUtils.resolveWithValueOf(type, value.toString())
.orElseThrow(() -> new IllegalArgumentException("Incompatible value type `" + value.getClass() + "` while expecting '" + type + "`"));
}
} catch (Exception e) {
throw new IllegalArgumentException("Error parsing value '" + value + "' for '" + key.key() + "'", e);
}
} else {
return (T)value;
}
}
private <T> Supplier<Optional<T>> getPrefixedMapPropertySupplier(IClientConfigKey<T> key) {
final Method method;
try {
method = key.type().getDeclaredMethod("valueOf", Map.class);
} catch (NoSuchMethodException e) {
throw new UnsupportedOperationException("Class '" + key.type().getName() + "' must have static method valueOf(Map<String, String>)", e);
}
return () -> {
final Map<String, String> values = new HashMap<>();
resolver.forEach(getNameSpace() + "." + key.key(), values::put);
if (!StringUtils.isEmpty(clientName)) {
resolver.forEach(clientName + "." + getNameSpace() + "." + key.key(), values::put);
}
try {
return Optional.ofNullable((T)method.invoke(null, values));
} catch (Exception e) {
LOG.warn("Unable to map value for '{}'", key.key(), e);
return Optional.empty();
}
};
}
@Override
public final <T> T get(IClientConfigKey<T> key, T defaultValue) {
return Optional.ofNullable(get(key)).orElse(defaultValue);
}
/**
* Store the implicit default value for key while giving precedence to default values in the property resolver
*/
protected final <T> void setDefault(IClientConfigKey<T> key) {
setDefault(key, key.defaultValue());
}
/**
* Store the default value for key while giving precedence to default values in the property resolver
*/
protected final <T> void setDefault(IClientConfigKey<T> key, T value) {
Preconditions.checkArgument(key != null, "key cannot be null");
value = resolveFromPropertyResolver(key).orElse(value);
internalProperties.put(key, Optional.ofNullable(value));
if (isDynamic) {
autoRefreshFromPropertyResolver(key);
}
cachedToString = null;
}
@Override
public <T> IClientConfig set(IClientConfigKey<T> key, T value) {
Preconditions.checkArgument(key != null, "key cannot be null");
value = resolveValueToType(key, value);
if (isDynamic) {
internalProperties.put(key, Optional.ofNullable(resolveFromPropertyResolver(key).orElse(value)));
autoRefreshFromPropertyResolver(key);
} else {
internalProperties.put(key, Optional.ofNullable(value));
}
cachedToString = null;
return this;
}
@Override
@Deprecated
public void setProperty(IClientConfigKey key, Object value) {
Preconditions.checkArgument(value != null, "Value may not be null");
set(key, value);
}
@Override
@Deprecated
public Object getProperty(IClientConfigKey key) {
return get(key);
}
@Override
@Deprecated
public Object getProperty(IClientConfigKey key, Object defaultVal) {
return Optional.ofNullable(get(key)).orElse(defaultVal);
}
@Override
@Deprecated
public boolean containsProperty(IClientConfigKey key) {
return internalProperties.containsKey(key);
}
@Override
@Deprecated
public int getPropertyAsInteger(IClientConfigKey key, int defaultValue) {
return Optional.ofNullable(getProperty(key)).map(Integer.class::cast).orElse(defaultValue);
}
@Override
@Deprecated
public String getPropertyAsString(IClientConfigKey key, String defaultValue) {
return Optional.ofNullable(getProperty(key)).map(Object::toString).orElse(defaultValue);
}
@Override
@Deprecated
public boolean getPropertyAsBoolean(IClientConfigKey key, boolean defaultValue) {
return Optional.ofNullable(getProperty(key)).map(Boolean.class::cast).orElse(defaultValue);
}
public IClientConfig applyOverride(IClientConfig override) {
if (override == null) {
return this;
}
override.forEach((key, value) -> setProperty(key, value));
return this;
}
private volatile String cachedToString = null;
@Override
public String toString() {
if (cachedToString == null) {
String newToString = generateToString();
cachedToString = newToString;
return newToString;
}
return cachedToString;
}
/**
* @return Number of individual properties refreshed. This can be used to identify patterns of excessive updates.
*/
public long getRefreshCount() {
return refreshCounter.get();
}
private String generateToString() {
return "ClientConfig:" + internalProperties.entrySet().stream()
.map(t -> {
if (t.getKey().key().endsWith("Password") && t.getValue().isPresent()) {
return t.getKey() + ":***";
}
return t.getKey() + ":" + t.getValue().orElse(null);
})
.collect(Collectors.joining(", "));
}
}
| 3,368 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/PropertyResolver.java | package com.netflix.client.config;
import java.util.Optional;
import java.util.function.BiConsumer;
/**
* Internal abstraction to decouple the property source from Ribbon's internal configuration.
*/
public interface PropertyResolver {
/**
* @return Get the value of a property or Optional.empty() if not set
*/
<T> Optional<T> get(String key, Class<T> type);
/**
* Iterate through all properties with the specified prefix
*/
void forEach(String prefix, BiConsumer<String, String> consumer);
/**
* Provide action to invoke when config changes
* @param action
*/
void onChange(Runnable action);
}
| 3,369 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/Property.java | package com.netflix.client.config;
import java.util.Optional;
import java.util.function.Consumer;
/**
* Ribbon specific encapsulation of a dynamic configuration property
* @param <T>
*/
public interface Property<T> {
/**
* Register a consumer to be called when the configuration changes
* @param consumer
*/
void onChange(Consumer<T> consumer);
/**
* @return Get the current value. Can be null if not set
*/
Optional<T> get();
/**
* @return Get the current value or the default value if not set
*/
T getOrDefault();
default Property<T> fallbackWith(Property<T> fallback) {
return new FallbackProperty<>(this, fallback);
}
static <T> Property<T> of(T value) {
return new Property<T>() {
@Override
public void onChange(Consumer<T> consumer) {
// It's a static property so no need to track the consumer
}
@Override
public Optional<T> get() {
return Optional.ofNullable(value);
}
@Override
public T getOrDefault() {
return value;
}
@Override
public String toString( ){
return String.valueOf(value);
}
};
}
}
| 3,370 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/FallbackProperty.java | package com.netflix.client.config;
import java.util.Optional;
import java.util.function.Consumer;
public final class FallbackProperty<T> implements Property<T> {
private final Property<T> primary;
private final Property<T> fallback;
public FallbackProperty(Property<T> primary, Property<T> fallback) {
this.primary = primary;
this.fallback = fallback;
}
@Override
public void onChange(Consumer<T> consumer) {
primary.onChange(ignore -> consumer.accept(getOrDefault()));
fallback.onChange(ignore -> consumer.accept(getOrDefault()));
}
@Override
public Optional<T> get() {
Optional<T> value = primary.get();
if (value.isPresent()) {
return value;
}
return fallback.get();
}
@Override
public T getOrDefault() {
return primary.get().orElseGet(fallback::getOrDefault);
}
@Override
public String toString() {
return String.valueOf(get());
}
}
| 3,371 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/IClientConfig.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.config;
import java.util.Map;
import java.util.Optional;
import java.util.function.BiConsumer;
/**
* Defines the client configuration used by various APIs to initialize clients or load balancers
* and for method execution.
*
* @author awang
*/
public interface IClientConfig {
String getClientName();
String getNameSpace();
void setNameSpace(String nameSpace);
/**
* Load the properties for a given client and/or load balancer.
* @param clientName
*/
void loadProperties(String clientName);
/**
* load default values for this configuration
*/
void loadDefaultValues();
Map<String, Object> getProperties();
/**
* Iterate all properties and report the final value. Can be null if a default value is not specified.
* @param consumer
*/
default void forEach(BiConsumer<IClientConfigKey<?>, Object> consumer) {
throw new UnsupportedOperationException();
}
/**
* @deprecated use {@link #set(IClientConfigKey, Object)}
*/
@Deprecated
void setProperty(IClientConfigKey key, Object value);
/**
* @deprecated use {@link #get(IClientConfigKey)}
*/
@Deprecated
Object getProperty(IClientConfigKey key);
/**
* @deprecated use {@link #get(IClientConfigKey, Object)}
*/
@Deprecated
Object getProperty(IClientConfigKey key, Object defaultVal);
/**
* @deprecated use {@link #getIfSet(IClientConfigKey)}
*/
@Deprecated
boolean containsProperty(IClientConfigKey key);
/**
* Returns the applicable virtual addresses ("vip") used by this client configuration.
*/
String resolveDeploymentContextbasedVipAddresses();
@Deprecated
int getPropertyAsInteger(IClientConfigKey key, int defaultValue);
@Deprecated
String getPropertyAsString(IClientConfigKey key, String defaultValue);
@Deprecated
boolean getPropertyAsBoolean(IClientConfigKey key, boolean defaultValue);
/**
* Returns a typed property. If the property of IClientConfigKey is not set, it returns null.
* <p>
* <ul>
* <li>Integer</li>
* <li>Boolean</li>
* <li>Float</li>
* <li>Long</li>
* <li>Double</li>
* </ul>
* <br><br>
*/
<T> T get(IClientConfigKey<T> key);
/**
* Returns a typed property. If the property of IClientConfigKey is not set, it returns the default value, which
* could be null.
* <p>
* <ul>
* <li>Integer</li>
* <li>Boolean</li>
* <li>Float</li>
* <li>Long</li>
* <li>Double</li>
* </ul>
* <br><br>
*/
default <T> T getOrDefault(IClientConfigKey<T> key) {
return get(key, key.defaultValue());
}
/**
* Return a typed property if and only if it was explicitly set, skipping configuration loading.
* @param key
* @param <T>
* @return
*/
default <T> Optional<T> getIfSet(IClientConfigKey<T> key) {
return Optional.ofNullable(get(key));
}
/**
* @return Return a global dynamic property not scoped to the specific client. The property will be looked up as is using the
* key without any client name or namespace prefix
*/
<T> Property<T> getGlobalProperty(IClientConfigKey<T> key);
/**
* @return Return a dynamic property scoped to the client name or namespace.
*/
<T> Property<T> getDynamicProperty(IClientConfigKey<T> key);
/**
* @return Return a dynamically updated property that is a mapping of all properties prefixed by the key name to an
* object with static method valueOf(Map{@literal <}String, String{@literal >})
*/
default <T> Property<T> getPrefixMappedProperty(IClientConfigKey<T> key) {
throw new UnsupportedOperationException();
}
/**
* Returns a typed property. If the property of IClientConfigKey is not set,
* it returns the default value passed in as the parameter.
*/
<T> T get(IClientConfigKey<T> key, T defaultValue);
/**
* Set the typed property with the given value.
*/
<T> IClientConfig set(IClientConfigKey<T> key, T value);
@Deprecated
class Builder {
private IClientConfig config;
Builder() {
}
/**
* Create a builder with no initial property and value for the configuration to be built.
*/
public static Builder newBuilder() {
Builder builder = new Builder();
builder.config = ClientConfigFactory.findDefaultConfigFactory().newConfig();
return builder;
}
/**
* Create a builder with properties for the specific client loaded. The default
* {@link IClientConfig} implementation loads properties from <a href="https://github.com/Netflix/archaius">Archaius</a>
*
* @param clientName Name of client. clientName.ribbon will be used as a prefix to find corresponding properties from
* <a href="https://github.com/Netflix/archaius">Archaius</a>
*/
public static Builder newBuilder(String clientName) {
Builder builder = new Builder();
builder.config = ClientConfigFactory.findDefaultConfigFactory().newConfig();
builder.config.loadProperties(clientName);
return builder;
}
/**
* Create a builder with properties for the specific client loaded. The default
* {@link IClientConfig} implementation loads properties from <a href="https://github.com/Netflix/archaius">Archaius</a>
*
* @param clientName Name of client. clientName.propertyNameSpace will be used as a prefix to find corresponding properties from
* <a href="https://github.com/Netflix/archaius">Archaius</a>
*/
public static Builder newBuilder(String clientName, String propertyNameSpace) {
Builder builder = new Builder();
builder.config = ClientConfigFactory.findDefaultConfigFactory().newConfig();
builder.config.setNameSpace(propertyNameSpace);
builder.config.loadProperties(clientName);
return builder;
}
/**
* Create a builder with properties for the specific client loaded.
*
* @param implClass the class of {@link IClientConfig} object to be built
*/
public static Builder newBuilder(Class<? extends IClientConfig> implClass, String clientName) {
Builder builder = new Builder();
try {
builder.config = implClass.newInstance();
builder.config.loadProperties(clientName);
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
return builder;
}
/**
* Create a builder to build the configuration with no initial properties set
*
* @param implClass the class of {@link IClientConfig} object to be built
*/
public static Builder newBuilder(Class<? extends IClientConfig> implClass) {
Builder builder = new Builder();
try {
builder.config = implClass.newInstance();
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
return builder;
}
public IClientConfig build() {
return config;
}
/**
* Load a set of default values for the configuration
*/
public Builder withDefaultValues() {
config.loadDefaultValues();
return this;
}
public Builder withDeploymentContextBasedVipAddresses(String vipAddress) {
config.set(CommonClientConfigKey.DeploymentContextBasedVipAddresses, vipAddress);
return this;
}
public Builder withForceClientPortConfiguration(boolean forceClientPortConfiguration) {
config.set(CommonClientConfigKey.ForceClientPortConfiguration, forceClientPortConfiguration);
return this;
}
public Builder withMaxAutoRetries(int value) {
config.set(CommonClientConfigKey.MaxAutoRetries, value);
return this;
}
public Builder withMaxAutoRetriesNextServer(int value) {
config.set(CommonClientConfigKey.MaxAutoRetriesNextServer, value);
return this;
}
public Builder withRetryOnAllOperations(boolean value) {
config.set(CommonClientConfigKey.OkToRetryOnAllOperations, value);
return this;
}
public Builder withRequestSpecificRetryOn(boolean value) {
config.set(CommonClientConfigKey.RequestSpecificRetryOn, value);
return this;
}
public Builder withEnablePrimeConnections(boolean value) {
config.set(CommonClientConfigKey.EnablePrimeConnections, value);
return this;
}
public Builder withMaxConnectionsPerHost(int value) {
config.set(CommonClientConfigKey.MaxHttpConnectionsPerHost, value);
config.set(CommonClientConfigKey.MaxConnectionsPerHost, value);
return this;
}
public Builder withMaxTotalConnections(int value) {
config.set(CommonClientConfigKey.MaxTotalHttpConnections, value);
config.set(CommonClientConfigKey.MaxTotalConnections, value);
return this;
}
public Builder withSecure(boolean secure) {
config.set(CommonClientConfigKey.IsSecure, secure);
return this;
}
public Builder withConnectTimeout(int value) {
config.set(CommonClientConfigKey.ConnectTimeout, value);
return this;
}
public Builder withReadTimeout(int value) {
config.set(CommonClientConfigKey.ReadTimeout, value);
return this;
}
public Builder withConnectionManagerTimeout(int value) {
config.set(CommonClientConfigKey.ConnectionManagerTimeout, value);
return this;
}
public Builder withFollowRedirects(boolean value) {
config.set(CommonClientConfigKey.FollowRedirects, value);
return this;
}
public Builder withConnectionPoolCleanerTaskEnabled(boolean value) {
config.set(CommonClientConfigKey.ConnectionPoolCleanerTaskEnabled, value);
return this;
}
public Builder withConnIdleEvictTimeMilliSeconds(int value) {
config.set(CommonClientConfigKey.ConnIdleEvictTimeMilliSeconds, value);
return this;
}
public Builder withConnectionCleanerRepeatIntervalMills(int value) {
config.set(CommonClientConfigKey.ConnectionCleanerRepeatInterval, value);
return this;
}
public Builder withGZIPContentEncodingFilterEnabled(boolean value) {
config.set(CommonClientConfigKey.EnableGZIPContentEncodingFilter, value);
return this;
}
public Builder withProxyHost(String proxyHost) {
config.set(CommonClientConfigKey.ProxyHost, proxyHost);
return this;
}
public Builder withProxyPort(int value) {
config.set(CommonClientConfigKey.ProxyPort, value);
return this;
}
public Builder withKeyStore(String value) {
config.set(CommonClientConfigKey.KeyStore, value);
return this;
}
public Builder withKeyStorePassword(String value) {
config.set(CommonClientConfigKey.KeyStorePassword, value);
return this;
}
public Builder withTrustStore(String value) {
config.set(CommonClientConfigKey.TrustStore, value);
return this;
}
public Builder withTrustStorePassword(String value) {
config.set(CommonClientConfigKey.TrustStorePassword, value);
return this;
}
public Builder withClientAuthRequired(boolean value) {
config.set(CommonClientConfigKey.IsClientAuthRequired, value);
return this;
}
public Builder withCustomSSLSocketFactoryClassName(String value) {
config.set(CommonClientConfigKey.CustomSSLSocketFactoryClassName, value);
return this;
}
public Builder withHostnameValidationRequired(boolean value) {
config.set(CommonClientConfigKey.IsHostnameValidationRequired, value);
return this;
}
// see also http://hc.apache.org/httpcomponents-client-ga/tutorial/html/advanced.html
public Builder ignoreUserTokenInConnectionPoolForSecureClient(boolean value) {
config.set(CommonClientConfigKey.IgnoreUserTokenInConnectionPoolForSecureClient, value);
return this;
}
public Builder withLoadBalancerEnabled(boolean value) {
config.set(CommonClientConfigKey.InitializeNFLoadBalancer, value);
return this;
}
public Builder withServerListRefreshIntervalMills(int value) {
config.set(CommonClientConfigKey.ServerListRefreshInterval, value);
return this;
}
public Builder withZoneAffinityEnabled(boolean value) {
config.set(CommonClientConfigKey.EnableZoneAffinity, value);
return this;
}
public Builder withZoneExclusivityEnabled(boolean value) {
config.set(CommonClientConfigKey.EnableZoneExclusivity, value);
return this;
}
public Builder prioritizeVipAddressBasedServers(boolean value) {
config.set(CommonClientConfigKey.PrioritizeVipAddressBasedServers, value);
return this;
}
public Builder withTargetRegion(String value) {
config.set(CommonClientConfigKey.TargetRegion, value);
return this;
}
}
}
| 3,372 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/UnboxedIntProperty.java | package com.netflix.client.config;
public class UnboxedIntProperty {
private volatile int value;
public UnboxedIntProperty(Property<Integer> delegate) {
this.value = delegate.getOrDefault();
delegate.onChange(newValue -> this.value = newValue);
}
public UnboxedIntProperty(int constantValue) {
this.value = constantValue;
}
public int get() {
return value;
}
}
| 3,373 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/ClientConfigFactory.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.client.config;
import java.util.Comparator;
import java.util.ServiceLoader;
import java.util.stream.StreamSupport;
/**
* Created by awang on 7/18/14.
*/
public interface ClientConfigFactory {
IClientConfig newConfig();
ClientConfigFactory DEFAULT = findDefaultConfigFactory();
default int getPriority() { return 0; }
static ClientConfigFactory findDefaultConfigFactory() {
return StreamSupport.stream(ServiceLoader.load(ClientConfigFactory.class).spliterator(), false)
.sorted(Comparator
.comparingInt(ClientConfigFactory::getPriority)
.thenComparing(f -> f.getClass().getCanonicalName())
.reversed())
.findFirst()
.orElseGet(() -> {
throw new IllegalStateException("Expecting at least one implementation of ClientConfigFactory discoverable via the ServiceLoader");
});
}
}
| 3,374 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/AbstractDefaultClientConfigImpl.java | package com.netflix.client.config;
import com.netflix.client.VipAddressResolver;
import java.util.concurrent.TimeUnit;
/**
* This class only exists to support code that depends on these constants that are deprecated now that defaults have
* moved into the key.
*/
@Deprecated
public abstract class AbstractDefaultClientConfigImpl extends ReloadableClientConfig {
@Deprecated
public static final Boolean DEFAULT_PRIORITIZE_VIP_ADDRESS_BASED_SERVERS = CommonClientConfigKey.PrioritizeVipAddressBasedServers.defaultValue();
@Deprecated
public static final String DEFAULT_NFLOADBALANCER_PING_CLASSNAME = CommonClientConfigKey.NFLoadBalancerPingClassName.defaultValue();
@Deprecated
public static final String DEFAULT_NFLOADBALANCER_RULE_CLASSNAME = CommonClientConfigKey.NFLoadBalancerRuleClassName.defaultValue();
@Deprecated
public static final String DEFAULT_NFLOADBALANCER_CLASSNAME = CommonClientConfigKey.NFLoadBalancerClassName.defaultValue();
@Deprecated
public static final boolean DEFAULT_USEIPADDRESS_FOR_SERVER = CommonClientConfigKey.UseIPAddrForServer.defaultValue();
@Deprecated
public static final String DEFAULT_CLIENT_CLASSNAME = CommonClientConfigKey.ClientClassName.defaultValue();
@Deprecated
public static final String DEFAULT_VIPADDRESS_RESOLVER_CLASSNAME = CommonClientConfigKey.VipAddressResolverClassName.defaultValue();
@Deprecated
public static final String DEFAULT_PRIME_CONNECTIONS_URI = CommonClientConfigKey.PrimeConnectionsURI.defaultValue();
@Deprecated
public static final int DEFAULT_MAX_TOTAL_TIME_TO_PRIME_CONNECTIONS = CommonClientConfigKey.MaxTotalTimeToPrimeConnections.defaultValue();
@Deprecated
public static final int DEFAULT_MAX_RETRIES_PER_SERVER_PRIME_CONNECTION = CommonClientConfigKey.MaxRetriesPerServerPrimeConnection.defaultValue();
@Deprecated
public static final Boolean DEFAULT_ENABLE_PRIME_CONNECTIONS = CommonClientConfigKey.EnablePrimeConnections.defaultValue();
@Deprecated
public static final int DEFAULT_MAX_REQUESTS_ALLOWED_PER_WINDOW = Integer.MAX_VALUE;
@Deprecated
public static final int DEFAULT_REQUEST_THROTTLING_WINDOW_IN_MILLIS = 60000;
@Deprecated
public static final Boolean DEFAULT_ENABLE_REQUEST_THROTTLING = Boolean.FALSE;
@Deprecated
public static final Boolean DEFAULT_ENABLE_GZIP_CONTENT_ENCODING_FILTER = CommonClientConfigKey.EnableGZIPContentEncodingFilter.defaultValue();
@Deprecated
public static final Boolean DEFAULT_CONNECTION_POOL_CLEANER_TASK_ENABLED = CommonClientConfigKey.ConnectionPoolCleanerTaskEnabled.defaultValue();
@Deprecated
public static final Boolean DEFAULT_FOLLOW_REDIRECTS = CommonClientConfigKey.FollowRedirects.defaultValue();
@Deprecated
public static final float DEFAULT_PERCENTAGE_NIWS_EVENT_LOGGED = 0.0f;
@Deprecated
public static final int DEFAULT_MAX_AUTO_RETRIES_NEXT_SERVER = CommonClientConfigKey.MaxAutoRetriesNextServer.defaultValue();
@Deprecated
public static final int DEFAULT_MAX_AUTO_RETRIES = CommonClientConfigKey.MaxAutoRetries.defaultValue();
@Deprecated
public static final int DEFAULT_BACKOFF_INTERVAL = CommonClientConfigKey.BackoffInterval.defaultValue();
@Deprecated
public static final int DEFAULT_READ_TIMEOUT = CommonClientConfigKey.ReadTimeout.defaultValue();
@Deprecated
public static final int DEFAULT_CONNECTION_MANAGER_TIMEOUT = CommonClientConfigKey.ConnectionManagerTimeout.defaultValue();
@Deprecated
public static final int DEFAULT_CONNECT_TIMEOUT = CommonClientConfigKey.ConnectTimeout.defaultValue();
@Deprecated
public static final Boolean DEFAULT_ENABLE_CONNECTION_POOL = CommonClientConfigKey.EnableConnectionPool.defaultValue();
@Deprecated
public static final int DEFAULT_MAX_HTTP_CONNECTIONS_PER_HOST = CommonClientConfigKey.MaxHttpConnectionsPerHost.defaultValue();
@Deprecated
public static final int DEFAULT_MAX_TOTAL_HTTP_CONNECTIONS = CommonClientConfigKey.MaxTotalHttpConnections.defaultValue();
@Deprecated
public static final int DEFAULT_MAX_CONNECTIONS_PER_HOST = CommonClientConfigKey.MaxConnectionsPerHost.defaultValue();
@Deprecated
public static final int DEFAULT_MAX_TOTAL_CONNECTIONS = CommonClientConfigKey.MaxTotalConnections.defaultValue();
@Deprecated
public static final float DEFAULT_MIN_PRIME_CONNECTIONS_RATIO = CommonClientConfigKey.MinPrimeConnectionsRatio.defaultValue();
@Deprecated
public static final String DEFAULT_PRIME_CONNECTIONS_CLASS = CommonClientConfigKey.PrimeConnectionsClassName.defaultValue();
@Deprecated
public static final String DEFAULT_SEVER_LIST_CLASS = CommonClientConfigKey.NIWSServerListClassName.defaultValue();
@Deprecated
public static final String DEFAULT_SERVER_LIST_UPDATER_CLASS = CommonClientConfigKey.ServerListUpdaterClassName.defaultValue();
@Deprecated
public static final int DEFAULT_CONNECTION_IDLE_TIMERTASK_REPEAT_IN_MSECS = CommonClientConfigKey.ConnectionCleanerRepeatInterval.defaultValue(); // every half minute (30 secs)
@Deprecated
public static final int DEFAULT_CONNECTIONIDLE_TIME_IN_MSECS = CommonClientConfigKey.ConnIdleEvictTimeMilliSeconds.defaultValue(); // all connections idle for 30 secs
@Deprecated
public static final int DEFAULT_POOL_MAX_THREADS = CommonClientConfigKey.MaxTotalHttpConnections.defaultValue();
@Deprecated
public static final int DEFAULT_POOL_MIN_THREADS = CommonClientConfigKey.PoolMinThreads.defaultValue();
@Deprecated
public static final long DEFAULT_POOL_KEEP_ALIVE_TIME = CommonClientConfigKey.PoolKeepAliveTime.defaultValue();
@Deprecated
public static final TimeUnit DEFAULT_POOL_KEEP_ALIVE_TIME_UNITS = TimeUnit.valueOf(CommonClientConfigKey.PoolKeepAliveTimeUnits.defaultValue());
@Deprecated
public static final Boolean DEFAULT_ENABLE_ZONE_AFFINITY = CommonClientConfigKey.EnableZoneAffinity.defaultValue();
@Deprecated
public static final Boolean DEFAULT_ENABLE_ZONE_EXCLUSIVITY = CommonClientConfigKey.EnableZoneExclusivity.defaultValue();
@Deprecated
public static final int DEFAULT_PORT = CommonClientConfigKey.Port.defaultValue();
@Deprecated
public static final Boolean DEFAULT_ENABLE_LOADBALANCER = CommonClientConfigKey.InitializeNFLoadBalancer.defaultValue();
@Deprecated
public static final Boolean DEFAULT_OK_TO_RETRY_ON_ALL_OPERATIONS = CommonClientConfigKey.OkToRetryOnAllOperations.defaultValue();
@Deprecated
public static final Boolean DEFAULT_ENABLE_NIWS_EVENT_LOGGING = Boolean.TRUE;
@Deprecated
public static final Boolean DEFAULT_IS_CLIENT_AUTH_REQUIRED = Boolean.FALSE;
private volatile VipAddressResolver vipResolver = null;
protected AbstractDefaultClientConfigImpl(PropertyResolver resolver) {
super(resolver);
}
public void setVipAddressResolver(VipAddressResolver resolver) {
this.vipResolver = resolver;
}
public VipAddressResolver getResolver() {
return vipResolver;
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DC_DOUBLECHECK")
private VipAddressResolver getVipAddressResolver() {
if (vipResolver == null) {
synchronized (this) {
if (vipResolver == null) {
try {
vipResolver = (VipAddressResolver) Class
.forName(getOrDefault(CommonClientConfigKey.VipAddressResolverClassName))
.newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new RuntimeException("Cannot instantiate VipAddressResolver", e);
}
}
}
}
return vipResolver;
}
@Override
public String resolveDeploymentContextbasedVipAddresses(){
String deploymentContextBasedVipAddressesMacro = get(CommonClientConfigKey.DeploymentContextBasedVipAddresses);
if (deploymentContextBasedVipAddressesMacro == null) {
return null;
}
return getVipAddressResolver().resolve(deploymentContextBasedVipAddressesMacro, this);
}
@Deprecated
public String getAppName(){
return get(CommonClientConfigKey.AppName);
}
@Deprecated
public String getVersion(){
return get(CommonClientConfigKey.Version);
}
}
| 3,375 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/IClientConfigKey.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.config;
/**
* Defines the key used in {@link IClientConfig}. See {@link CommonClientConfigKey}
* for the commonly defined client configuration keys.
*
* @author awang
*
*/
public interface IClientConfigKey<T> {
@SuppressWarnings("rawtypes")
final class Keys extends CommonClientConfigKey {
private Keys(String configKey) {
super(configKey);
}
}
/**
* @return string representation of the key used for hash purpose.
*/
String key();
/**
* @return Data type for the key. For example, Integer.class.
*/
Class<T> type();
default T defaultValue() { return null; }
default IClientConfigKey<T> format(Object ... args) {
return create(String.format(key(), args), type(), defaultValue());
}
default IClientConfigKey<T> create(String key, Class<T> type, T defaultValue) {
return new IClientConfigKey<T>() {
@Override
public int hashCode() {
return key().hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof IClientConfigKey) {
return key().equals(((IClientConfigKey)obj).key());
}
return false;
}
@Override
public String toString() {
return key();
}
@Override
public String key() {
return key;
}
@Override
public Class<T> type() {
return type;
}
@Override
public T defaultValue() { return defaultValue; }
};
}
}
| 3,376 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/CommonClientConfigKey.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.config;
import com.google.common.reflect.TypeToken;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import static com.google.common.base.Preconditions.checkArgument;
public abstract class CommonClientConfigKey<T> implements IClientConfigKey<T> {
public static final String DEFAULT_NAME_SPACE = "ribbon";
public static final IClientConfigKey<String> AppName = new CommonClientConfigKey<String>("AppName"){};
public static final IClientConfigKey<String> Version = new CommonClientConfigKey<String>("Version"){};
public static final IClientConfigKey<Integer> Port = new CommonClientConfigKey<Integer>("Port", 7001){};
public static final IClientConfigKey<Integer> SecurePort = new CommonClientConfigKey<Integer>("SecurePort", 7001){};
public static final IClientConfigKey<String> VipAddress = new CommonClientConfigKey<String>("VipAddress"){};
public static final IClientConfigKey<Boolean> ForceClientPortConfiguration = new CommonClientConfigKey<Boolean>("ForceClientPortConfiguration"){}; // use client defined port regardless of server advert
public static final IClientConfigKey<String> DeploymentContextBasedVipAddresses = new CommonClientConfigKey<String>("DeploymentContextBasedVipAddresses"){};
public static final IClientConfigKey<Integer> MaxAutoRetries = new CommonClientConfigKey<Integer>("MaxAutoRetries", 0){};
public static final IClientConfigKey<Integer> MaxAutoRetriesNextServer = new CommonClientConfigKey<Integer>("MaxAutoRetriesNextServer", 1){};
public static final IClientConfigKey<Boolean> OkToRetryOnAllOperations = new CommonClientConfigKey<Boolean>("OkToRetryOnAllOperations", false){};
public static final IClientConfigKey<Boolean> RequestSpecificRetryOn = new CommonClientConfigKey<Boolean>("RequestSpecificRetryOn"){};
public static final IClientConfigKey<Integer> ReceiveBufferSize = new CommonClientConfigKey<Integer>("ReceiveBufferSize"){};
public static final IClientConfigKey<Boolean> EnablePrimeConnections = new CommonClientConfigKey<Boolean>("EnablePrimeConnections", false){};
public static final IClientConfigKey<String> PrimeConnectionsClassName = new CommonClientConfigKey<String>("PrimeConnectionsClassName", "com.netflix.niws.client.http.HttpPrimeConnection"){};
public static final IClientConfigKey<Integer> MaxRetriesPerServerPrimeConnection = new CommonClientConfigKey<Integer>("MaxRetriesPerServerPrimeConnection", 9){};
public static final IClientConfigKey<Integer> MaxTotalTimeToPrimeConnections = new CommonClientConfigKey<Integer>("MaxTotalTimeToPrimeConnections", 30000){};
public static final IClientConfigKey<Float> MinPrimeConnectionsRatio = new CommonClientConfigKey<Float>("MinPrimeConnectionsRatio", 1.0f){};
public static final IClientConfigKey<String> PrimeConnectionsURI = new CommonClientConfigKey<String>("PrimeConnectionsURI", "/"){};
public static final IClientConfigKey<Integer> PoolMaxThreads = new CommonClientConfigKey<Integer>("PoolMaxThreads", 200){};
public static final IClientConfigKey<Integer> PoolMinThreads = new CommonClientConfigKey<Integer>("PoolMinThreads", 1){};
public static final IClientConfigKey<Integer> PoolKeepAliveTime = new CommonClientConfigKey<Integer>("PoolKeepAliveTime", 15 * 60){};
public static final IClientConfigKey<String> PoolKeepAliveTimeUnits = new CommonClientConfigKey<String>("PoolKeepAliveTimeUnits", TimeUnit.SECONDS.toString()){};
public static final IClientConfigKey<Boolean> EnableConnectionPool = new CommonClientConfigKey<Boolean>("EnableConnectionPool", true) {};
/**
* Use {@link #MaxConnectionsPerHost}
*/
@Deprecated
public static final IClientConfigKey<Integer> MaxHttpConnectionsPerHost = new CommonClientConfigKey<Integer>("MaxHttpConnectionsPerHost", 50){};
/**
* Use {@link #MaxTotalConnections}
*/
@Deprecated
public static final IClientConfigKey<Integer> MaxTotalHttpConnections = new CommonClientConfigKey<Integer>("MaxTotalHttpConnections", 200){};
public static final IClientConfigKey<Integer> MaxConnectionsPerHost = new CommonClientConfigKey<Integer>("MaxConnectionsPerHost", 50){};
public static final IClientConfigKey<Integer> MaxTotalConnections = new CommonClientConfigKey<Integer>("MaxTotalConnections", 200){};
public static final IClientConfigKey<Boolean> IsSecure = new CommonClientConfigKey<Boolean>("IsSecure"){};
public static final IClientConfigKey<Boolean> GZipPayload = new CommonClientConfigKey<Boolean>("GZipPayload"){};
public static final IClientConfigKey<Integer> ConnectTimeout = new CommonClientConfigKey<Integer>("ConnectTimeout", 2000){};
public static final IClientConfigKey<Integer> BackoffInterval = new CommonClientConfigKey<Integer>("BackoffTimeout", 0){};
public static final IClientConfigKey<Integer> ReadTimeout = new CommonClientConfigKey<Integer>("ReadTimeout", 5000){};
public static final IClientConfigKey<Integer> SendBufferSize = new CommonClientConfigKey<Integer>("SendBufferSize"){};
public static final IClientConfigKey<Boolean> StaleCheckingEnabled = new CommonClientConfigKey<Boolean>("StaleCheckingEnabled", false){};
public static final IClientConfigKey<Integer> Linger = new CommonClientConfigKey<Integer>("Linger", 0){};
public static final IClientConfigKey<Integer> ConnectionManagerTimeout = new CommonClientConfigKey<Integer>("ConnectionManagerTimeout", 2000){};
public static final IClientConfigKey<Boolean> FollowRedirects = new CommonClientConfigKey<Boolean>("FollowRedirects", false){};
public static final IClientConfigKey<Boolean> ConnectionPoolCleanerTaskEnabled = new CommonClientConfigKey<Boolean>("ConnectionPoolCleanerTaskEnabled", true){};
public static final IClientConfigKey<Integer> ConnIdleEvictTimeMilliSeconds = new CommonClientConfigKey<Integer>("ConnIdleEvictTimeMilliSeconds", 30*1000){};
public static final IClientConfigKey<Integer> ConnectionCleanerRepeatInterval = new CommonClientConfigKey<Integer>("ConnectionCleanerRepeatInterval", 30*1000){};
public static final IClientConfigKey<Boolean> EnableGZIPContentEncodingFilter = new CommonClientConfigKey<Boolean>("EnableGZIPContentEncodingFilter", false){};
public static final IClientConfigKey<String> ProxyHost = new CommonClientConfigKey<String>("ProxyHost"){};
public static final IClientConfigKey<Integer> ProxyPort = new CommonClientConfigKey<Integer>("ProxyPort"){};
public static final IClientConfigKey<String> KeyStore = new CommonClientConfigKey<String>("KeyStore"){};
public static final IClientConfigKey<String> KeyStorePassword = new CommonClientConfigKey<String>("KeyStorePassword"){};
public static final IClientConfigKey<String> TrustStore = new CommonClientConfigKey<String>("TrustStore"){};
public static final IClientConfigKey<String> TrustStorePassword = new CommonClientConfigKey<String>("TrustStorePassword"){};
// if this is a secure rest client, must we use client auth too?
public static final IClientConfigKey<Boolean> IsClientAuthRequired = new CommonClientConfigKey<Boolean>("IsClientAuthRequired", false){};
public static final IClientConfigKey<String> CustomSSLSocketFactoryClassName = new CommonClientConfigKey<String>("CustomSSLSocketFactoryClassName"){};
// must host name match name in certificate?
public static final IClientConfigKey<Boolean> IsHostnameValidationRequired = new CommonClientConfigKey<Boolean>("IsHostnameValidationRequired"){};
// see also http://hc.apache.org/httpcomponents-client-ga/tutorial/html/advanced.html
public static final IClientConfigKey<Boolean> IgnoreUserTokenInConnectionPoolForSecureClient = new CommonClientConfigKey<Boolean>("IgnoreUserTokenInConnectionPoolForSecureClient"){};
// Client implementation
public static final IClientConfigKey<String> ClientClassName = new CommonClientConfigKey<String>("ClientClassName", "com.netflix.niws.client.http.RestClient"){};
//LoadBalancer Related
public static final IClientConfigKey<Boolean> InitializeNFLoadBalancer = new CommonClientConfigKey<Boolean>("InitializeNFLoadBalancer", true){};
public static final IClientConfigKey<String> NFLoadBalancerClassName = new CommonClientConfigKey<String>("NFLoadBalancerClassName", "com.netflix.loadbalancer.ZoneAwareLoadBalancer"){};
public static final IClientConfigKey<String> NFLoadBalancerRuleClassName = new CommonClientConfigKey<String>("NFLoadBalancerRuleClassName", "com.netflix.loadbalancer.AvailabilityFilteringRule"){};
public static final IClientConfigKey<String> NFLoadBalancerPingClassName = new CommonClientConfigKey<String>("NFLoadBalancerPingClassName", "com.netflix.loadbalancer.DummyPing"){};
public static final IClientConfigKey<Integer> NFLoadBalancerPingInterval = new CommonClientConfigKey<Integer>("NFLoadBalancerPingInterval"){};
public static final IClientConfigKey<Integer> NFLoadBalancerMaxTotalPingTime = new CommonClientConfigKey<Integer>("NFLoadBalancerMaxTotalPingTime"){};
public static final IClientConfigKey<String> NFLoadBalancerStatsClassName = new CommonClientConfigKey<String>("NFLoadBalancerStatsClassName", "com.netflix.loadbalancer.LoadBalancerStats"){};
public static final IClientConfigKey<String> NIWSServerListClassName = new CommonClientConfigKey<String>("NIWSServerListClassName", "com.netflix.loadbalancer.ConfigurationBasedServerList"){};
public static final IClientConfigKey<String> ServerListUpdaterClassName = new CommonClientConfigKey<String>("ServerListUpdaterClassName", "com.netflix.loadbalancer.PollingServerListUpdater"){};
public static final IClientConfigKey<String> NIWSServerListFilterClassName = new CommonClientConfigKey<String>("NIWSServerListFilterClassName"){};
public static final IClientConfigKey<Integer> ServerListRefreshInterval = new CommonClientConfigKey<Integer>("ServerListRefreshInterval"){};
public static final IClientConfigKey<Boolean> EnableMarkingServerDownOnReachingFailureLimit = new CommonClientConfigKey<Boolean>("EnableMarkingServerDownOnReachingFailureLimit"){};
public static final IClientConfigKey<Integer> ServerDownFailureLimit = new CommonClientConfigKey<Integer>("ServerDownFailureLimit"){};
public static final IClientConfigKey<Integer> ServerDownStatWindowInMillis = new CommonClientConfigKey<Integer>("ServerDownStatWindowInMillis"){};
public static final IClientConfigKey<Boolean> EnableZoneAffinity = new CommonClientConfigKey<Boolean>("EnableZoneAffinity", false){};
public static final IClientConfigKey<Boolean> EnableZoneExclusivity = new CommonClientConfigKey<Boolean>("EnableZoneExclusivity", false){};
public static final IClientConfigKey<Boolean> PrioritizeVipAddressBasedServers = new CommonClientConfigKey<Boolean>("PrioritizeVipAddressBasedServers", true){};
public static final IClientConfigKey<String> VipAddressResolverClassName = new CommonClientConfigKey<String>("VipAddressResolverClassName", "com.netflix.client.SimpleVipAddressResolver"){};
public static final IClientConfigKey<String> TargetRegion = new CommonClientConfigKey<String>("TargetRegion"){};
public static final IClientConfigKey<String> RulePredicateClasses = new CommonClientConfigKey<String>("RulePredicateClasses"){};
public static final IClientConfigKey<String> RequestIdHeaderName = new CommonClientConfigKey<String>("RequestIdHeaderName") {};
public static final IClientConfigKey<Boolean> UseIPAddrForServer = new CommonClientConfigKey<Boolean>("UseIPAddrForServer", false) {};
public static final IClientConfigKey<String> ListOfServers = new CommonClientConfigKey<String>("listOfServers", "") {};
private static final Set<IClientConfigKey> keys = new HashSet<IClientConfigKey>();
static {
for (Field f: CommonClientConfigKey.class.getDeclaredFields()) {
if (Modifier.isStatic(f.getModifiers()) //&& Modifier.isPublic(f.getModifiers())
&& IClientConfigKey.class.isAssignableFrom(f.getType())) {
try {
keys.add((IClientConfigKey) f.get(null));
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
}
/**
* @deprecated see {@link #keys()}
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings
@Deprecated
public static IClientConfigKey[] values() {
return keys().toArray(new IClientConfigKey[0]);
}
/**
* return all the public static keys defined in this class
*/
public static Set<IClientConfigKey> keys() {
return keys;
}
public static IClientConfigKey valueOf(final String name) {
for (IClientConfigKey key: keys()) {
if (key.key().equals(name)) {
return key;
}
}
return new IClientConfigKey() {
@Override
public String key() {
return name;
}
@Override
public Class type() {
return String.class;
}
};
}
private final String configKey;
private final Class<T> type;
private T defaultValue;
@SuppressWarnings("unchecked")
protected CommonClientConfigKey(String configKey) {
this(configKey, null);
}
protected CommonClientConfigKey(String configKey, T defaultValue) {
this.configKey = configKey;
Type superclass = getClass().getGenericSuperclass();
checkArgument(superclass instanceof ParameterizedType,
"%s isn't parameterized", superclass);
Type runtimeType = ((ParameterizedType) superclass).getActualTypeArguments()[0];
type = (Class<T>) TypeToken.of(runtimeType).getRawType();
this.defaultValue = defaultValue;
}
@Override
public Class<T> type() {
return type;
}
/* (non-Javadoc)
* @see com.netflix.niws.client.ClientConfig#key()
*/
@Override
public String key() {
return configKey;
}
@Override
public String toString() {
return configKey;
}
@Override
public T defaultValue() { return defaultValue; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CommonClientConfigKey<?> that = (CommonClientConfigKey<?>) o;
return Objects.equals(configKey, that.configKey);
}
@Override
public int hashCode() {
return Objects.hash(configKey);
}
}
| 3,377 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/config/PropertyUtils.java | package com.netflix.client.config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Method;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
public class PropertyUtils {
private static Logger LOG = LoggerFactory.getLogger(PropertyUtils.class);
private PropertyUtils() {}
/**
* Returns the internal property to the desiredn type
*/
private static Map<Class<?>, Optional<Method>> valueOfMethods = new ConcurrentHashMap<>();
public static <T> Optional<T> resolveWithValueOf(Class<T> type, String value) {
return valueOfMethods.computeIfAbsent(type, ignore -> {
try {
return Optional.of(type.getDeclaredMethod("valueOf", String.class));
} catch (NoSuchMethodException e) {
return Optional.empty();
} catch (Exception e) {
LOG.warn("Unable to determine if type " + type + " has a valueOf() static method", e);
return Optional.empty();
}
}).map(method -> {
try {
return (T)method.invoke(null, value);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
}
| 3,378 |
0 | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-core/src/main/java/com/netflix/client/http/UnexpectedHttpResponseException.java | package com.netflix.client.http;
public class UnexpectedHttpResponseException extends Exception {
/**
*
*/
private static final long serialVersionUID = 1L;
private final int statusCode;
private final String line;
public UnexpectedHttpResponseException(int statusCode, String statusLine) {
super(statusLine);
this.statusCode = statusCode;
this.line = statusLine;
}
public int getStatusCode() {
return statusCode;
}
public String getStatusLine() {
return this.line;
}
}
| 3,379 |
0 | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx/RxMovieServerTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx;
import com.netflix.ribbon.examples.rx.common.Movie;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.util.internal.ConcurrentSet;
import io.reactivex.netty.RxNetty;
import io.reactivex.netty.channel.StringTransformer;
import io.reactivex.netty.protocol.http.client.HttpClientResponse;
import io.reactivex.netty.protocol.http.server.HttpServer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import rx.Observable;
import rx.functions.Func1;
import java.nio.charset.Charset;
import java.util.Random;
import java.util.Set;
import static com.netflix.ribbon.examples.rx.common.Movie.*;
import static junit.framework.Assert.*;
/**
* @author Tomasz Bak
*/
public class RxMovieServerTest {
private static final String TEST_USER_ID = "user1";
private static final Random RANDOM = new Random();
private int port = RANDOM.nextInt(1000) + 8000;
private String baseURL = "http://localhost:" + port;
private RxMovieServer movieServer;
private HttpServer<ByteBuf, ByteBuf> httpServer;
@Before
public void setUp() throws Exception {
movieServer = new RxMovieServer(port);
httpServer = movieServer.createServer().start();
}
@After
public void tearDown() throws Exception {
httpServer.shutdown();
}
@Test
public void testMovieRegistration() {
String movieFormatted = ORANGE_IS_THE_NEW_BLACK.toString();
HttpResponseStatus statusCode = RxNetty.createHttpPost(baseURL + "/movies", Observable.just(movieFormatted), new StringTransformer())
.flatMap(new Func1<HttpClientResponse<ByteBuf>, Observable<HttpResponseStatus>>() {
@Override
public Observable<HttpResponseStatus> call(HttpClientResponse<ByteBuf> httpClientResponse) {
return Observable.just(httpClientResponse.getStatus());
}
}).toBlocking().first();
assertEquals(HttpResponseStatus.CREATED, statusCode);
assertEquals(ORANGE_IS_THE_NEW_BLACK, movieServer.movies.get(ORANGE_IS_THE_NEW_BLACK.getId()));
}
@Test
public void testUpateRecommendations() {
movieServer.movies.put(ORANGE_IS_THE_NEW_BLACK.getId(), ORANGE_IS_THE_NEW_BLACK);
HttpResponseStatus statusCode = RxNetty.createHttpPost(baseURL + "/users/" + TEST_USER_ID + "/recommendations", Observable.just(ORANGE_IS_THE_NEW_BLACK.getId()), new StringTransformer())
.flatMap(new Func1<HttpClientResponse<ByteBuf>, Observable<HttpResponseStatus>>() {
@Override
public Observable<HttpResponseStatus> call(HttpClientResponse<ByteBuf> httpClientResponse) {
return Observable.just(httpClientResponse.getStatus());
}
}).toBlocking().first();
assertEquals(HttpResponseStatus.OK, statusCode);
assertTrue(movieServer.userRecommendations.get(TEST_USER_ID).contains(ORANGE_IS_THE_NEW_BLACK.getId()));
}
@Test
public void testRecommendationsByUserId() throws Exception {
movieServer.movies.put(ORANGE_IS_THE_NEW_BLACK.getId(), ORANGE_IS_THE_NEW_BLACK);
movieServer.movies.put(BREAKING_BAD.getId(), BREAKING_BAD);
Set<String> userRecom = new ConcurrentSet<String>();
userRecom.add(ORANGE_IS_THE_NEW_BLACK.getId());
userRecom.add(BREAKING_BAD.getId());
movieServer.userRecommendations.put(TEST_USER_ID, userRecom);
Observable<HttpClientResponse<ByteBuf>> httpGet = RxNetty.createHttpGet(baseURL + "/users/" + TEST_USER_ID + "/recommendations");
Movie[] movies = handleGetMoviesReply(httpGet);
assertTrue(movies[0] != movies[1]);
assertTrue(userRecom.contains(movies[0].getId()));
assertTrue(userRecom.contains(movies[1].getId()));
}
@Test
public void testRecommendationsByMultipleCriteria() throws Exception {
movieServer.movies.put(ORANGE_IS_THE_NEW_BLACK.getId(), ORANGE_IS_THE_NEW_BLACK);
movieServer.movies.put(BREAKING_BAD.getId(), BREAKING_BAD);
movieServer.movies.put(HOUSE_OF_CARDS.getId(), HOUSE_OF_CARDS);
String relativeURL = String.format("/recommendations?category=%s&ageGroup=%s", BREAKING_BAD.getCategory(), BREAKING_BAD.getAgeGroup());
Movie[] movies = handleGetMoviesReply(RxNetty.createHttpGet(baseURL + relativeURL));
assertEquals(1, movies.length);
assertEquals(BREAKING_BAD, movies[0]);
}
private Movie[] handleGetMoviesReply(Observable<HttpClientResponse<ByteBuf>> httpGet) {
return httpGet
.flatMap(new Func1<HttpClientResponse<ByteBuf>, Observable<Movie[]>>() {
@Override
public Observable<Movie[]> call(HttpClientResponse<ByteBuf> httpClientResponse) {
return httpClientResponse.getContent().map(new Func1<ByteBuf, Movie[]>() {
@Override
public Movie[] call(ByteBuf byteBuf) {
String[] lines = byteBuf.toString(Charset.defaultCharset()).split("\n");
Movie[] movies = new Movie[lines.length];
for (int i = 0; i < movies.length; i++) {
movies[i] = Movie.from(lines[i]);
}
return movies;
}
});
}
}).toBlocking().first();
}
}
| 3,380 |
0 | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx/RxMovieClientTestBase.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.server.HttpServer;
import org.junit.After;
import org.junit.Before;
/**
* @author Tomasz Bak
*/
public class RxMovieClientTestBase {
protected int port = 0;
private RxMovieServer movieServer;
private HttpServer<ByteBuf, ByteBuf> httpServer;
@Before
public void setUp() throws Exception {
movieServer = new RxMovieServer(port);
httpServer = movieServer.createServer().start();
port = httpServer.getServerPort();
}
@After
public void tearDown() throws Exception {
httpServer.shutdown();
}
}
| 3,381 |
0 | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx/transport/RxMovieTransportExampleTest.java | package com.netflix.ribbon.examples.rx.transport;
import com.netflix.ribbon.examples.rx.RxMovieClientTestBase;
import org.junit.Test;
import static junit.framework.Assert.*;
/**
* @author Tomasz Bak
*/
public class RxMovieTransportExampleTest extends RxMovieClientTestBase {
@Test
public void testTemplateExample() throws Exception {
assertTrue(new RxMovieTransportExample(port).runExample());
}
} | 3,382 |
0 | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx/proxy/RxMovieProxyExampleTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.proxy;
import com.netflix.ribbon.examples.rx.RxMovieClientTestBase;
import org.junit.Test;
import static junit.framework.Assert.*;
/**
* @author Tomasz Bak
*/
public class RxMovieProxyExampleTest extends RxMovieClientTestBase {
@Test
public void testProxyExample() throws Exception {
assertTrue(new RxMovieProxyExample(port).runExample());
}
} | 3,383 |
0 | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx/template/RxMovieTemplateExampleTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.template;
import com.netflix.ribbon.examples.rx.RxMovieClientTestBase;
import org.junit.Test;
import static junit.framework.Assert.*;
/**
* @author Tomasz Bak
*/
public class RxMovieTemplateExampleTest extends RxMovieClientTestBase {
@Test
public void testTemplateExample() throws Exception {
assertTrue(new RxMovieTemplateExample(port).runExample());
}
} | 3,384 |
0 | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx/common/MovieTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.common;
import org.junit.Test;
import static com.netflix.ribbon.examples.rx.common.Movie.*;
import static org.junit.Assert.*;
/**
* @author Tomasz Bak
*/
public class MovieTest {
@Test
public void testStringParsing() {
Movie fromString = Movie.from(ORANGE_IS_THE_NEW_BLACK.toString());
assertEquals(ORANGE_IS_THE_NEW_BLACK, fromString);
}
}
| 3,385 |
0 | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/test/java/com/netflix/ribbon/examples/rx/common/RecommendationsTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.common;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import static junit.framework.Assert.*;
/**
* @author Tomasz Bak
*/
public class RecommendationsTest {
@Test
public void testStringParsing() throws Exception {
List<Movie> movies = new ArrayList<Movie>();
movies.add(Movie.ORANGE_IS_THE_NEW_BLACK);
movies.add(Movie.BREAKING_BAD);
Recommendations recommendations = new Recommendations(movies);
Recommendations fromString = Recommendations.from(recommendations.toString());
assertEquals(recommendations, fromString);
}
} | 3,386 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/ExampleAppWithLocalResource.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ribbon.examples;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import com.sun.jersey.api.container.httpserver.HttpServerFactory;
import com.sun.jersey.api.core.PackagesResourceConfig;
import com.sun.net.httpserver.HttpServer;
/**
* A base class for some sample applications that starts and stops a local server
*
* @author awang
*
*/
public abstract class ExampleAppWithLocalResource {
public int port = (new Random()).nextInt(1000) + 4000;
public String SERVICE_URI = "http://localhost:" + port + "/";
HttpServer server = null;
public abstract void run() throws Exception;
@edu.umd.cs.findbugs.annotations.SuppressWarnings
public final void runApp() throws Exception {
PackagesResourceConfig resourceConfig = new PackagesResourceConfig("com.netflix.ribbon.examples.server");
ExecutorService service = Executors.newFixedThreadPool(50);
try{
server = HttpServerFactory.create(SERVICE_URI, resourceConfig);
server.setExecutor(service);
server.start();
run();
} finally {
System.err.println("Shut down server ...");
if (server != null) {
server.stop(1);
}
service.shutdownNow();
}
System.exit(0);
}
}
| 3,387 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/loadbalancer/URLConnectionLoadBalancer.java | package com.netflix.ribbon.examples.loadbalancer;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.List;
import rx.Observable;
import com.google.common.collect.Lists;
import com.netflix.client.DefaultLoadBalancerRetryHandler;
import com.netflix.client.RetryHandler;
import com.netflix.loadbalancer.BaseLoadBalancer;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.loadbalancer.LoadBalancerBuilder;
import com.netflix.loadbalancer.LoadBalancerStats;
import com.netflix.loadbalancer.Server;
import com.netflix.loadbalancer.reactive.LoadBalancerCommand;
import com.netflix.loadbalancer.reactive.ServerOperation;
/**
*
* @author Allen Wang
*
*/
public class URLConnectionLoadBalancer {
private final ILoadBalancer loadBalancer;
// retry handler that does not retry on same server, but on a different server
private final RetryHandler retryHandler = new DefaultLoadBalancerRetryHandler(0, 1, true);
public URLConnectionLoadBalancer(List<Server> serverList) {
loadBalancer = LoadBalancerBuilder.newBuilder().buildFixedServerListLoadBalancer(serverList);
}
public String call(final String path) throws Exception {
return LoadBalancerCommand.<String>builder()
.withLoadBalancer(loadBalancer)
.build()
.submit(new ServerOperation<String>() {
@Override
public Observable<String> call(Server server) {
URL url;
try {
url = new URL("http://" + server.getHost() + ":" + server.getPort() + path);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
return Observable.just(conn.getResponseMessage());
} catch (Exception e) {
return Observable.error(e);
}
}
}).toBlocking().first();
}
public LoadBalancerStats getLoadBalancerStats() {
return ((BaseLoadBalancer) loadBalancer).getLoadBalancerStats();
}
public static void main(String[] args) throws Exception {
URLConnectionLoadBalancer urlLoadBalancer = new URLConnectionLoadBalancer(Lists.newArrayList(
new Server("www.google.com", 80),
new Server("www.linkedin.com", 80),
new Server("www.yahoo.com", 80)));
for (int i = 0; i < 6; i++) {
System.out.println(urlLoadBalancer.call("/"));
}
System.out.println("=== Load balancer stats ===");
System.out.println(urlLoadBalancer.getLoadBalancerStats());
}
}
| 3,388 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/server/ServerResources.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ribbon.examples.server;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.StreamingOutput;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.collect.Lists;
import com.thoughtworks.xstream.XStream;
@Path("/testAsync")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public class ServerResources {
public static class Person {
public String name;
public int age;
public Person() {}
public Person(String name, int age) {
super();
this.name = name;
this.age = age;
}
@Override
public String toString() {
return "Person [name=" + name + ", age=" + age + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + age;
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Person other = (Person) obj;
if (age != other.age)
return false;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
return true;
}
}
private static ObjectMapper mapper = new ObjectMapper();
public static final Person defaultPerson = new Person("ribbon", 1);
public static final List<Person> persons = Lists.newArrayList();
public static final List<String> streamContent = Lists.newArrayList();
static {
for (int i = 0; i < 1000; i++) {
streamContent.add("data: line " + i);
}
for (int i = 0; i < 100; i++) {
persons.add(new Person(String.valueOf(i), 10));
}
}
@GET
@Path("/person")
public Response getPerson() throws IOException {
String content = mapper.writeValueAsString(defaultPerson);
return Response.ok(content).build();
}
@GET
@Path("/persons")
public Response getPersons() throws IOException {
String content = mapper.writeValueAsString(persons);
return Response.ok(content).build();
}
@GET
@Path("/noEntity")
public Response getNoEntity() {
return Response.ok().build();
}
@GET
@Path("/readTimeout")
public Response getReadTimeout() throws IOException, InterruptedException {
Thread.sleep(10000);
String content = mapper.writeValueAsString(defaultPerson);
return Response.ok(content).build();
}
@POST
@Path("/person")
public Response createPerson(String content) throws IOException {
System.err.println("uploaded: " + content);
Person person = mapper.readValue(content, Person.class);
return Response.ok(mapper.writeValueAsString(person)).build();
}
@GET
@Path("/personQuery")
public Response queryPerson(@QueryParam("name") String name, @QueryParam("age") int age) throws IOException {
Person person = new Person(name, age);
return Response.ok(mapper.writeValueAsString(person)).build();
}
@GET
@Path("/stream")
@Produces("text/event-stream")
public StreamingOutput getStream() {
return new StreamingOutput() {
@Override
public void write(OutputStream output) throws IOException,
WebApplicationException {
for (String line: streamContent) {
String eventLine = line + "\n\n";
output.write(eventLine.getBytes("UTF-8"));
}
output.close();
}
};
}
@GET
@Path("/personStream")
@Produces("text/event-stream")
public StreamingOutput getPersonStream() {
return new StreamingOutput() {
@Override
public void write(OutputStream output) throws IOException,
WebApplicationException {
for (Person p: persons) {
String eventLine = "data: " + mapper.writeValueAsString(p)
+ "\n\n";
output.write(eventLine.getBytes("UTF-8"));
}
output.close();
}
};
}
@GET
@Path("/customEvent")
public StreamingOutput getCustomeEvents() {
return new StreamingOutput() {
@Override
public void write(OutputStream output) throws IOException,
WebApplicationException {
for (String line: streamContent) {
String eventLine = line + "\n";
output.write(eventLine.getBytes("UTF-8"));
}
output.close();
}
};
}
@GET
@Path("/getXml")
@Produces("application/xml")
public Response getXml() {
XStream xstream = new XStream();
String content = xstream.toXML(new Person("I am from XML", 1));
return Response.ok(content).build();
}
}
| 3,389 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/netty | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/netty/http/LoadBalancingExample.java | package com.netflix.ribbon.examples.netty.http;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.client.HttpClientRequest;
import io.reactivex.netty.protocol.http.client.HttpClientResponse;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import rx.Observer;
import com.google.common.collect.Lists;
import com.netflix.ribbon.transport.netty.RibbonTransport;
import com.netflix.ribbon.transport.netty.http.LoadBalancingHttpClient;
import com.netflix.loadbalancer.BaseLoadBalancer;
import com.netflix.loadbalancer.LoadBalancerBuilder;
import com.netflix.loadbalancer.Server;
public class LoadBalancingExample {
public static void main(String[] args) throws Exception {
List<Server> servers = Lists.newArrayList(new Server("www.google.com:80"), new Server("www.examples.com:80"), new Server("www.wikipedia.org:80"));
BaseLoadBalancer lb = LoadBalancerBuilder.newBuilder()
.buildFixedServerListLoadBalancer(servers);
LoadBalancingHttpClient<ByteBuf, ByteBuf> client = RibbonTransport.newHttpClient(lb);
final CountDownLatch latch = new CountDownLatch(servers.size());
Observer<HttpClientResponse<ByteBuf>> observer = new Observer<HttpClientResponse<ByteBuf>>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
e.printStackTrace();
}
@Override
public void onNext(HttpClientResponse<ByteBuf> args) {
latch.countDown();
System.out.println("Got response: " + args.getStatus());
}
};
for (int i = 0; i < servers.size(); i++) {
HttpClientRequest<ByteBuf> request = HttpClientRequest.createGet("/");
client.submit(request).subscribe(observer);
}
latch.await();
System.out.println(lb.getLoadBalancerStats());
}
}
| 3,390 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/netty | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/netty/http/SimpleGet.java | package com.netflix.ribbon.examples.netty.http;
import com.netflix.ribbon.transport.netty.RibbonTransport;
import com.netflix.ribbon.transport.netty.http.LoadBalancingHttpClient;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.client.HttpClientRequest;
import io.reactivex.netty.protocol.http.client.HttpClientResponse;
import rx.functions.Action1;
import java.nio.charset.Charset;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
public class SimpleGet {
@edu.umd.cs.findbugs.annotations.SuppressWarnings
public static void main(String[] args) throws Exception {
LoadBalancingHttpClient<ByteBuf, ByteBuf> client = RibbonTransport.newHttpClient();
HttpClientRequest<ByteBuf> request = HttpClientRequest.createGet("http://www.google.com/");
final CountDownLatch latch = new CountDownLatch(1);
client.submit(request)
.toBlocking()
.forEach(new Action1<HttpClientResponse<ByteBuf>>() {
@Override
public void call(HttpClientResponse<ByteBuf> t1) {
System.out.println("Status code: " + t1.getStatus());
t1.getContent().subscribe(new Action1<ByteBuf>() {
@Override
public void call(ByteBuf content) {
System.out.println("Response content: " + content.toString(Charset.defaultCharset()));
latch.countDown();
}
});
}
});
latch.await(2, TimeUnit.SECONDS);
}
}
| 3,391 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/restclient/SampleApp.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ribbon.examples.restclient;
import java.net.URI;
import com.netflix.client.ClientFactory;
import com.netflix.client.http.HttpRequest;
import com.netflix.client.http.HttpResponse;
import com.netflix.config.ConfigurationManager;
import com.netflix.loadbalancer.ZoneAwareLoadBalancer;
import com.netflix.niws.client.http.RestClient;
public class SampleApp {
public static void main(String[] args) throws Exception {
ConfigurationManager.loadPropertiesFromResources("sample-client.properties"); // 1
System.out.println(ConfigurationManager.getConfigInstance().getProperty("sample-client.ribbon.listOfServers"));
RestClient client = (RestClient) ClientFactory.getNamedClient("sample-client"); // 2
HttpRequest request = HttpRequest.newBuilder().uri(new URI("/")).build(); // 3
for (int i = 0; i < 20; i++) {
HttpResponse response = client.executeWithLoadBalancer(request); // 4
System.out.println("Status code for " + response.getRequestedURI() + " :" + response.getStatus());
}
@SuppressWarnings("rawtypes")
ZoneAwareLoadBalancer lb = (ZoneAwareLoadBalancer) client.getLoadBalancer();
System.out.println(lb.getLoadBalancerStats());
ConfigurationManager.getConfigInstance().setProperty(
"sample-client.ribbon.listOfServers", "www.linkedin.com:80,www.google.com:80"); // 5
System.out.println("changing servers ...");
Thread.sleep(3000); // 6
for (int i = 0; i < 20; i++) {
HttpResponse response = null;
try {
response = client.executeWithLoadBalancer(request);
System.out.println("Status code for " + response.getRequestedURI() + " : " + response.getStatus());
} finally {
if (response != null) {
response.close();
}
}
}
System.out.println(lb.getLoadBalancerStats()); // 7
}
}
| 3,392 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx/AbstractRxMovieClient.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx;
import com.netflix.hystrix.util.HystrixTimer;
import com.netflix.ribbon.examples.rx.common.Movie;
import io.netty.buffer.ByteBuf;
import rx.Notification;
import rx.Observable;
import rx.functions.Func1;
import rx.functions.Func2;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.regex.Pattern;
import static java.lang.String.*;
/**
* Base class for the transport/template and proxy examples. It orchestrates application flow, and
* handles result(s) from the server. The request execution is implemented in the derived classes, in a way
* specific for each API abstration. This separation of concerns makes it is easy to compare complexity of different APIs.
*
* @author Tomasz Bak
*/
public abstract class AbstractRxMovieClient {
protected static final String TEST_USER = "user1";
protected static final Pattern NEW_LINE_SPLIT_RE = Pattern.compile("\n");
protected abstract Observable<ByteBuf>[] triggerMoviesRegistration();
protected abstract Observable<ByteBuf>[] triggerRecommendationsUpdate();
protected abstract Observable<ByteBuf>[] triggerRecommendationsSearch();
protected Observable<ByteBuf> registerMovies() {
return Observable.concat(Observable.from(triggerMoviesRegistration()));
}
protected Observable<ByteBuf> updateRecommendations() {
return Observable.concat(Observable.from(triggerRecommendationsUpdate()));
}
protected Observable<Void> searchCatalog() {
List<String> searches = new ArrayList<String>(2);
Collections.addAll(searches, "findById", "findRawMovieById", "findMovie(name, category)");
return Observable
.concat(Observable.from(triggerRecommendationsSearch()))
.flatMap(new Func1<ByteBuf, Observable<List<Movie>>>() {
@Override
public Observable<List<Movie>> call(ByteBuf byteBuf) {
List<Movie> movies = new ArrayList<Movie>();
String lines = byteBuf.toString(Charset.defaultCharset());
for (String line : NEW_LINE_SPLIT_RE.split(lines)) {
movies.add(Movie.from(line));
}
return Observable.just(movies);
}
})
.zipWith(searches, new Func2<List<Movie>, String, Void>() {
@Override
public Void call(List<Movie> movies, String query) {
System.out.println(format(" %s=%s", query, movies));
return null;
}
});
}
public boolean runExample() {
boolean allGood = true;
try {
System.out.println("Registering movies...");
Notification<Void> result = executeServerCalls();
allGood = !result.isOnError();
if (allGood) {
System.out.println("Application finished");
} else {
System.err.println("ERROR: execution failure");
result.getThrowable().printStackTrace();
}
} catch (Exception e) {
e.printStackTrace();
allGood = false;
} finally {
shutdown();
}
return allGood;
}
Notification<Void> executeServerCalls() {
Observable<Void> resultObservable = registerMovies().materialize().flatMap(
new Func1<Notification<ByteBuf>, Observable<Void>>() {
@Override
public Observable<Void> call(Notification<ByteBuf> notif) {
if (!verifyStatus(notif)) {
return Observable.error(notif.getThrowable());
}
System.out.print("Updating user recommendations...");
return updateRecommendations().materialize().flatMap(
new Func1<Notification<ByteBuf>, Observable<Void>>() {
@Override
public Observable<Void> call(Notification<ByteBuf> notif) {
if (!verifyStatus(notif)) {
return Observable.error(notif.getThrowable());
}
System.out.println("Searching through the movie catalog...");
return searchCatalog();
}
});
}
}
);
return resultObservable.materialize().toBlocking().last();
}
protected void shutdown() {
HystrixTimer.reset();
}
private static boolean verifyStatus(Notification<ByteBuf> notif) {
if (notif.isOnError()) {
System.out.println("ERROR");
return false;
}
System.out.println("DONE");
return true;
}
}
| 3,393 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx/RxMovieServer.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.netflix.ribbon.examples.rx.common.Movie;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.logging.LogLevel;
import io.netty.util.internal.ConcurrentSet;
import io.reactivex.netty.RxNetty;
import io.reactivex.netty.pipeline.PipelineConfigurators;
import io.reactivex.netty.protocol.http.server.HttpServer;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import io.reactivex.netty.protocol.http.server.RequestHandler;
import rx.Observable;
import rx.functions.Func1;
import static java.lang.String.*;
/**
* The client examples assume that the movie server runs on the default port 8080.
*
* @author Tomasz Bak
*/
public class RxMovieServer {
public static final int DEFAULT_PORT = 8080;
private static final Pattern USER_RECOMMENDATIONS_PATH_RE = Pattern.compile(".*/users/([^/]*)/recommendations");
private final int port;
final Map<String, Movie> movies = new ConcurrentHashMap<String, Movie>();
final Map<String, Set<String>> userRecommendations = new ConcurrentHashMap<String, Set<String>>();
public RxMovieServer(int port) {
this.port = port;
}
public HttpServer<ByteBuf, ByteBuf> createServer() {
HttpServer<ByteBuf, ByteBuf> server = RxNetty.newHttpServerBuilder(port, new RequestHandler<ByteBuf, ByteBuf>() {
@Override
public Observable<Void> handle(HttpServerRequest<ByteBuf> request, final HttpServerResponse<ByteBuf> response) {
if (request.getPath().contains("/users")) {
if (request.getHttpMethod().equals(HttpMethod.GET)) {
return handleRecommendationsByUserId(request, response);
} else {
return handleUpdateRecommendationsForUser(request, response);
}
}
if (request.getPath().contains("/recommendations")) {
return handleRecommendationsBy(request, response);
}
if (request.getPath().contains("/movies")) {
return handleRegisterMovie(request, response);
}
response.setStatus(HttpResponseStatus.NOT_FOUND);
return response.close();
}
}).pipelineConfigurator(PipelineConfigurators.<ByteBuf, ByteBuf>httpServerConfigurator()).enableWireLogging(LogLevel.ERROR).build();
System.out.println("RxMovie server started...");
return server;
}
private Observable<Void> handleRecommendationsByUserId(HttpServerRequest<ByteBuf> request, HttpServerResponse<ByteBuf> response) {
System.out.println("HTTP request -> recommendations by user id request: " + request.getPath());
final String userId = userIdFromPath(request.getPath());
if (userId == null) {
response.setStatus(HttpResponseStatus.BAD_REQUEST);
return response.close();
}
if (!userRecommendations.containsKey(userId)) {
response.setStatus(HttpResponseStatus.NOT_FOUND);
return response.close();
}
StringBuilder builder = new StringBuilder();
for (String movieId : userRecommendations.get(userId)) {
System.out.println(" returning: " + movies.get(movieId));
builder.append(movies.get(movieId)).append('\n');
}
ByteBuf byteBuf = UnpooledByteBufAllocator.DEFAULT.buffer();
byteBuf.writeBytes(builder.toString().getBytes(Charset.defaultCharset()));
response.write(byteBuf);
return response.close();
}
private Observable<Void> handleRecommendationsBy(HttpServerRequest<ByteBuf> request, HttpServerResponse<ByteBuf> response) {
System.out.println(format("HTTP request -> recommendations by multiple criteria: %s?%s", request.getPath(), request.getQueryString()));
List<String> category = request.getQueryParameters().get("category");
List<String> ageGroup = request.getQueryParameters().get("ageGroup");
if (category.isEmpty() || ageGroup.isEmpty()) {
response.setStatus(HttpResponseStatus.BAD_REQUEST);
return response.close();
}
boolean any = false;
StringBuilder builder = new StringBuilder();
for (Movie movie : movies.values()) {
if (movie.getCategory().equals(category.get(0)) && movie.getAgeGroup().equals(ageGroup.get(0))) {
System.out.println(" returning: " + movie);
builder.append(movie).append('\n');
any = true;
}
}
if (!any) {
System.out.println("No movie matched the given criteria:");
for (Movie movie : movies.values()) {
System.out.print(" ");
System.out.println(movie);
}
}
ByteBuf byteBuf = UnpooledByteBufAllocator.DEFAULT.buffer();
byteBuf.writeBytes(builder.toString().getBytes(Charset.defaultCharset()));
response.write(byteBuf);
return response.close();
}
private Observable<Void> handleUpdateRecommendationsForUser(HttpServerRequest<ByteBuf> request, final HttpServerResponse<ByteBuf> response) {
System.out.println("HTTP request -> update recommendations for user: " + request.getPath());
final String userId = userIdFromPath(request.getPath());
if (userId == null) {
response.setStatus(HttpResponseStatus.BAD_REQUEST);
return response.close();
}
return request.getContent().flatMap(new Func1<ByteBuf, Observable<Void>>() {
@Override
public Observable<Void> call(ByteBuf byteBuf) {
String movieId = byteBuf.toString(Charset.defaultCharset());
System.out.println(format(" updating: {user=%s, movie=%s}", userId, movieId));
synchronized (this) {
Set<String> recommendations;
if (userRecommendations.containsKey(userId)) {
recommendations = userRecommendations.get(userId);
} else {
recommendations = new ConcurrentSet<String>();
userRecommendations.put(userId, recommendations);
}
recommendations.add(movieId);
}
response.setStatus(HttpResponseStatus.OK);
return response.close();
}
});
}
private Observable<Void> handleRegisterMovie(HttpServerRequest<ByteBuf> request, final HttpServerResponse<ByteBuf> response) {
System.out.println("Http request -> register movie: " + request.getPath());
return request.getContent().flatMap(new Func1<ByteBuf, Observable<Void>>() {
@Override
public Observable<Void> call(ByteBuf byteBuf) {
String formatted = byteBuf.toString(Charset.defaultCharset());
System.out.println(" movie: " + formatted);
try {
Movie movie = Movie.from(formatted);
movies.put(movie.getId(), movie);
response.setStatus(HttpResponseStatus.CREATED);
} catch (Exception e) {
System.err.println("Invalid movie content");
e.printStackTrace();
response.setStatus(HttpResponseStatus.BAD_REQUEST);
}
return response.close();
}
});
}
private static String userIdFromPath(String path) {
Matcher matcher = USER_RECOMMENDATIONS_PATH_RE.matcher(path);
return matcher.matches() ? matcher.group(1) : null;
}
public static void main(final String[] args) {
new RxMovieServer(DEFAULT_PORT).createServer().startAndWait();
}
}
| 3,394 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx/transport/RxMovieTransportExample.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.transport;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import com.netflix.ribbon.transport.netty.RibbonTransport;
import com.netflix.ribbon.transport.netty.http.LoadBalancingHttpClient;
import com.netflix.ribbon.examples.rx.AbstractRxMovieClient;
import com.netflix.ribbon.examples.rx.RxMovieServer;
import com.netflix.ribbon.examples.rx.common.Movie;
import com.netflix.ribbon.examples.rx.common.RxMovieTransformer;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.channel.StringTransformer;
import io.reactivex.netty.protocol.http.client.HttpClientRequest;
import io.reactivex.netty.protocol.http.client.HttpClientResponse;
import rx.Observable;
import rx.functions.Func1;
import static java.lang.String.*;
/**
* Run {@link com.netflix.ribbon.examples.rx.RxMovieServer} prior to runnng this example!
*
* @author Tomasz Bak
*/
public class RxMovieTransportExample extends AbstractRxMovieClient {
private final LoadBalancingHttpClient<ByteBuf, ByteBuf> client;
public RxMovieTransportExample(int port) {
IClientConfig clientConfig = IClientConfig.Builder.newBuilder("movieServiceClient").build();
clientConfig.set(CommonClientConfigKey.ListOfServers, "localhost:" + port);
client = RibbonTransport.newHttpClient(clientConfig);
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerMoviesRegistration() {
return new Observable[]{
registerMovie(Movie.ORANGE_IS_THE_NEW_BLACK),
registerMovie(Movie.BREAKING_BAD),
registerMovie(Movie.HOUSE_OF_CARDS)
};
}
private Observable<Void> registerMovie(Movie movie) {
HttpClientRequest<ByteBuf> httpRequest = HttpClientRequest.createPost("/movies")
.withHeader("X-Platform-Version", "xyz")
.withHeader("X-Auth-Token", "abc")
.withRawContentSource(Observable.just(movie), new RxMovieTransformer());
return client.submit(httpRequest).flatMap(new Func1<HttpClientResponse<ByteBuf>, Observable<Void>>() {
@Override
public Observable<Void> call(HttpClientResponse<ByteBuf> httpClientResponse) {
if (httpClientResponse.getStatus().code() / 100 != 2) {
return Observable.error(new RuntimeException(
format("HTTP request failed (status code=%s)", httpClientResponse.getStatus())));
}
return Observable.empty();
}
});
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerRecommendationsUpdate() {
return new Observable[]{
updateRecommendation(TEST_USER, Movie.ORANGE_IS_THE_NEW_BLACK),
updateRecommendation(TEST_USER, Movie.BREAKING_BAD)
};
}
private Observable<Void> updateRecommendation(String user, Movie movie) {
HttpClientRequest<ByteBuf> httpRequest = HttpClientRequest.createPost(format("/users/%s/recommendations", user))
.withHeader("X-Platform-Version", "xyz")
.withHeader("X-Auth-Token", "abc")
.withRawContentSource(Observable.just(movie.getId()), new StringTransformer());
return client.submit(httpRequest).flatMap(new Func1<HttpClientResponse<ByteBuf>, Observable<Void>>() {
@Override
public Observable<Void> call(HttpClientResponse<ByteBuf> httpClientResponse) {
if (httpClientResponse.getStatus().code() / 100 != 2) {
return Observable.error(new RuntimeException(
format("HTTP request failed (status code=%s)", httpClientResponse.getStatus())));
}
return Observable.empty();
}
});
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerRecommendationsSearch() {
HttpClientRequest<ByteBuf> httpRequest = HttpClientRequest.createGet(format("/users/%s/recommendations", TEST_USER))
.withHeader("X-Platform-Version", "xyz")
.withHeader("X-Auth-Token", "abc");
Observable<ByteBuf> searchByUserObservable = client.submit(httpRequest).flatMap(new Func1<HttpClientResponse<ByteBuf>, Observable<ByteBuf>>() {
@Override
public Observable<ByteBuf> call(HttpClientResponse<ByteBuf> httpClientResponse) {
if (httpClientResponse.getStatus().code() / 100 != 2) {
return Observable.error(new RuntimeException(
format("HTTP request failed (status code=%s)", httpClientResponse.getStatus())));
}
return httpClientResponse.getContent();
}
});
httpRequest = HttpClientRequest.createGet("/recommendations?category=Drama&ageGroup=Adults")
.withHeader("X-Platform-Version", "xyz")
.withHeader("X-Auth-Token", "abc");
Observable<ByteBuf> searchByCriteriaObservable = client.submit(httpRequest).flatMap(new Func1<HttpClientResponse<ByteBuf>, Observable<ByteBuf>>() {
@Override
public Observable<ByteBuf> call(HttpClientResponse<ByteBuf> httpClientResponse) {
if (httpClientResponse.getStatus().code() / 100 != 2) {
return Observable.error(new RuntimeException(
format("HTTP request failed (status code=%s)", httpClientResponse.getStatus())));
}
return httpClientResponse.getContent();
}
});
return new Observable[]{searchByUserObservable, searchByCriteriaObservable};
}
public static void main(String[] args) {
System.out.println("Starting transport based movie service...");
new RxMovieTransportExample(RxMovieServer.DEFAULT_PORT).runExample();
}
}
| 3,395 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx/proxy/MovieService.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.proxy;
import com.netflix.ribbon.RibbonRequest;
import com.netflix.ribbon.examples.rx.common.InMemoryCacheProviderFactory;
import com.netflix.ribbon.examples.rx.common.Movie;
import com.netflix.ribbon.examples.rx.common.RecommendationServiceFallbackHandler;
import com.netflix.ribbon.examples.rx.common.RecommendationServiceResponseValidator;
import com.netflix.ribbon.examples.rx.common.RxMovieTransformer;
import com.netflix.ribbon.proxy.annotation.CacheProvider;
import com.netflix.ribbon.proxy.annotation.ClientProperties;
import com.netflix.ribbon.proxy.annotation.ClientProperties.Property;
import com.netflix.ribbon.proxy.annotation.Content;
import com.netflix.ribbon.proxy.annotation.ContentTransformerClass;
import com.netflix.ribbon.proxy.annotation.Http;
import com.netflix.ribbon.proxy.annotation.Http.Header;
import com.netflix.ribbon.proxy.annotation.Http.HttpMethod;
import com.netflix.ribbon.proxy.annotation.Hystrix;
import com.netflix.ribbon.proxy.annotation.TemplateName;
import com.netflix.ribbon.proxy.annotation.Var;
import io.netty.buffer.ByteBuf;
/**
* @author Tomasz Bak
*/
@ClientProperties(properties = {
@Property(name="ReadTimeout", value="2000"),
@Property(name="ConnectTimeout", value="1000"),
@Property(name="MaxAutoRetriesNextServer", value="2")
}, exportToArchaius = true)
public interface MovieService {
@TemplateName("recommendationsByUserId")
@Http(
method = HttpMethod.GET,
uri = "/users/{userId}/recommendations",
headers = {
@Header(name = "X-Platform-Version", value = "xyz"),
@Header(name = "X-Auth-Token", value = "abc")
})
@Hystrix(
validator = RecommendationServiceResponseValidator.class,
fallbackHandler = RecommendationServiceFallbackHandler.class)
@CacheProvider(key = "{userId}", provider = InMemoryCacheProviderFactory.class)
RibbonRequest<ByteBuf> recommendationsByUserId(@Var("userId") String userId);
@TemplateName("recommendationsBy")
@Http(
method = HttpMethod.GET,
uri = "/recommendations?category={category}&ageGroup={ageGroup}",
headers = {
@Header(name = "X-Platform-Version", value = "xyz"),
@Header(name = "X-Auth-Token", value = "abc")
})
@Hystrix(
validator = RecommendationServiceResponseValidator.class,
fallbackHandler = RecommendationServiceFallbackHandler.class)
@CacheProvider(key = "{category},{ageGroup}", provider = InMemoryCacheProviderFactory.class)
RibbonRequest<ByteBuf> recommendationsBy(@Var("category") String category, @Var("ageGroup") String ageGroup);
@TemplateName("registerMovie")
@Http(
method = HttpMethod.POST,
uri = "/movies",
headers = {
@Header(name = "X-Platform-Version", value = "xyz"),
@Header(name = "X-Auth-Token", value = "abc")
})
@Hystrix(validator = RecommendationServiceResponseValidator.class)
@ContentTransformerClass(RxMovieTransformer.class)
RibbonRequest<ByteBuf> registerMovie(@Content Movie movie);
@TemplateName("updateRecommendations")
@Http(
method = HttpMethod.POST,
uri = "/users/{userId}/recommendations",
headers = {
@Header(name = "X-Platform-Version", value = "xyz"),
@Header(name = "X-Auth-Token", value = "abc")
})
@Hystrix(validator = RecommendationServiceResponseValidator.class)
RibbonRequest<ByteBuf> updateRecommendations(@Var("userId") String userId, @Content String movieId);
}
| 3,396 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx/proxy/RxMovieProxyExample.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.proxy;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.config.ConfigurationManager;
import com.netflix.ribbon.Ribbon;
import com.netflix.ribbon.examples.rx.AbstractRxMovieClient;
import com.netflix.ribbon.examples.rx.RxMovieServer;
import com.netflix.ribbon.examples.rx.common.Movie;
import com.netflix.ribbon.proxy.ProxyLifeCycle;
import io.netty.buffer.ByteBuf;
import rx.Observable;
/**
* Run {@link com.netflix.ribbon.examples.rx.RxMovieServer} prior to running this example!
*
* @author Tomasz Bak
*/
public class RxMovieProxyExample extends AbstractRxMovieClient {
private final MovieService movieService;
public RxMovieProxyExample(int port) {
ConfigurationManager.getConfigInstance().setProperty("MovieService.ribbon." + CommonClientConfigKey.MaxAutoRetriesNextServer, "3");
ConfigurationManager.getConfigInstance().setProperty("MovieService.ribbon." + CommonClientConfigKey.ListOfServers, "localhost:" + port);
movieService = Ribbon.from(MovieService.class);
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerMoviesRegistration() {
return new Observable[]{
movieService.registerMovie(Movie.ORANGE_IS_THE_NEW_BLACK).toObservable(),
movieService.registerMovie(Movie.BREAKING_BAD).toObservable(),
movieService.registerMovie(Movie.HOUSE_OF_CARDS).toObservable()
};
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerRecommendationsUpdate() {
return new Observable[]{
movieService.updateRecommendations(TEST_USER, Movie.ORANGE_IS_THE_NEW_BLACK.getId()).toObservable(),
movieService.updateRecommendations(TEST_USER, Movie.BREAKING_BAD.getId()).toObservable()
};
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerRecommendationsSearch() {
return new Observable[]{
movieService.recommendationsByUserId(TEST_USER).toObservable(),
movieService.recommendationsBy("Drama", "Adults").toObservable()
};
}
@Override
public void shutdown() {
super.shutdown();
((ProxyLifeCycle) movieService).shutdown();
}
public static void main(String[] args) {
System.out.println("Starting proxy based movie service...");
new RxMovieProxyExample(RxMovieServer.DEFAULT_PORT).runExample();
}
}
| 3,397 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx/template/RxMovieTemplateExample.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.template;
import com.netflix.ribbon.ClientOptions;
import com.netflix.ribbon.Ribbon;
import com.netflix.ribbon.examples.rx.AbstractRxMovieClient;
import com.netflix.ribbon.examples.rx.RxMovieServer;
import com.netflix.ribbon.examples.rx.common.Movie;
import com.netflix.ribbon.examples.rx.common.RecommendationServiceFallbackHandler;
import com.netflix.ribbon.examples.rx.common.RecommendationServiceResponseValidator;
import com.netflix.ribbon.examples.rx.common.RxMovieTransformer;
import com.netflix.ribbon.http.HttpRequestTemplate;
import com.netflix.ribbon.http.HttpResourceGroup;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.channel.StringTransformer;
import rx.Observable;
/**
* Run {@link com.netflix.ribbon.examples.rx.RxMovieServer} prior to runnng this example!
*
* @author Tomasz Bak
*/
public class RxMovieTemplateExample extends AbstractRxMovieClient {
private final HttpResourceGroup httpResourceGroup;
private final HttpRequestTemplate<ByteBuf> registerMovieTemplate;
private final HttpRequestTemplate<ByteBuf> updateRecommendationTemplate;
private final HttpRequestTemplate<ByteBuf> recommendationsByUserIdTemplate;
private final HttpRequestTemplate<ByteBuf> recommendationsByTemplate;
public RxMovieTemplateExample(int port) {
httpResourceGroup = Ribbon.createHttpResourceGroup("movieServiceClient",
ClientOptions.create()
.withMaxAutoRetriesNextServer(3)
.withConfigurationBasedServerList("localhost:" + port));
registerMovieTemplate = httpResourceGroup.newTemplateBuilder("registerMovie", ByteBuf.class)
.withMethod("POST")
.withUriTemplate("/movies")
.withHeader("X-Platform-Version", "xyz")
.withHeader("X-Auth-Token", "abc")
.withResponseValidator(new RecommendationServiceResponseValidator()).build();
updateRecommendationTemplate = httpResourceGroup.newTemplateBuilder("updateRecommendation", ByteBuf.class)
.withMethod("POST")
.withUriTemplate("/users/{userId}/recommendations")
.withHeader("X-Platform-Version", "xyz")
.withHeader("X-Auth-Token", "abc")
.withResponseValidator(new RecommendationServiceResponseValidator()).build();
recommendationsByUserIdTemplate = httpResourceGroup.newTemplateBuilder("recommendationsByUserId", ByteBuf.class)
.withMethod("GET")
.withUriTemplate("/users/{userId}/recommendations")
.withHeader("X-Platform-Version", "xyz")
.withHeader("X-Auth-Token", "abc")
.withFallbackProvider(new RecommendationServiceFallbackHandler())
.withResponseValidator(new RecommendationServiceResponseValidator()).build();
recommendationsByTemplate = httpResourceGroup.newTemplateBuilder("recommendationsBy", ByteBuf.class)
.withMethod("GET")
.withUriTemplate("/recommendations?category={category}&ageGroup={ageGroup}")
.withHeader("X-Platform-Version", "xyz")
.withHeader("X-Auth-Token", "abc")
.withFallbackProvider(new RecommendationServiceFallbackHandler())
.withResponseValidator(new RecommendationServiceResponseValidator()).build();
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerMoviesRegistration() {
return new Observable[]{
registerMovieTemplate.requestBuilder()
.withRawContentSource(Observable.just(Movie.ORANGE_IS_THE_NEW_BLACK), new RxMovieTransformer())
.build().toObservable(),
registerMovieTemplate.requestBuilder()
.withRawContentSource(Observable.just(Movie.BREAKING_BAD), new RxMovieTransformer())
.build().toObservable(),
registerMovieTemplate.requestBuilder()
.withRawContentSource(Observable.just(Movie.HOUSE_OF_CARDS), new RxMovieTransformer())
.build().toObservable()
};
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerRecommendationsUpdate() {
return new Observable[]{
updateRecommendationTemplate.requestBuilder()
.withRawContentSource(Observable.just(Movie.ORANGE_IS_THE_NEW_BLACK.getId()), new StringTransformer())
.withRequestProperty("userId", TEST_USER)
.build().toObservable(),
updateRecommendationTemplate.requestBuilder()
.withRawContentSource(Observable.just(Movie.BREAKING_BAD.getId()), new StringTransformer())
.withRequestProperty("userId", TEST_USER)
.build().toObservable()
};
}
@SuppressWarnings("unchecked")
@Override
protected Observable<ByteBuf>[] triggerRecommendationsSearch() {
return new Observable[]{
recommendationsByUserIdTemplate.requestBuilder()
.withRequestProperty("userId", TEST_USER)
.build().toObservable(),
recommendationsByTemplate.requestBuilder()
.withRequestProperty("category", "Drama")
.withRequestProperty("ageGroup", "Adults")
.build().toObservable()
};
}
public static void main(String[] args) {
System.out.println("Starting templates based movie service...");
new RxMovieTemplateExample(RxMovieServer.DEFAULT_PORT).runExample();
}
}
| 3,398 |
0 | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx | Create_ds/ribbon/ribbon-examples/src/main/java/com/netflix/ribbon/examples/rx/common/Recommendations.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.examples.rx.common;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* @author Tomasz Bak
*/
public class Recommendations {
private static final Pattern FORMAT_RE = Pattern.compile("\\{movies=\\[(\\{[^\\}]*\\})?(, \\{[^\\}]*\\})*\\]\\}");
private final List<Movie> movies;
public Recommendations(List<Movie> movies) {
this.movies = Collections.unmodifiableList(movies);
}
public List<Movie> getMovies() {
return movies;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Recommendations that = (Recommendations) o;
if (movies != null ? !movies.equals(that.movies) : that.movies != null) return false;
return true;
}
@Override
public int hashCode() {
return movies != null ? movies.hashCode() : 0;
}
@Override
public String toString() {
return "{movies=" + movies + '}';
}
public static Recommendations from(String formatted) {
Matcher matcher = FORMAT_RE.matcher(formatted);
if (!matcher.matches()) {
throw new IllegalArgumentException("Syntax error in recommendations string: " + formatted);
}
List<Movie> movies = new ArrayList<Movie>();
for (int i = 1; i <= matcher.groupCount(); i++) {
String movie = matcher.group(i);
if (movie.startsWith(",")) {
movie = movie.substring(1).trim();
}
movies.add(Movie.from(movie));
}
return new Recommendations(movies);
}
}
| 3,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.