index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/linalg/JavaMatricesSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg;
import java.util.Random;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
public class JavaMatricesSuite {
@Test
public void randMatrixConstruction() {
Random rng = new Random(24);
Matrix r = Matrices.rand(3, 4, rng);
rng.setSeed(24);
DenseMatrix dr = DenseMatrix.rand(3, 4, rng);
assertArrayEquals(r.toArray(), dr.toArray(), 0.0);
rng.setSeed(24);
Matrix rn = Matrices.randn(3, 4, rng);
rng.setSeed(24);
DenseMatrix drn = DenseMatrix.randn(3, 4, rng);
assertArrayEquals(rn.toArray(), drn.toArray(), 0.0);
rng.setSeed(24);
Matrix s = Matrices.sprand(3, 4, 0.5, rng);
rng.setSeed(24);
SparseMatrix sr = SparseMatrix.sprand(3, 4, 0.5, rng);
assertArrayEquals(s.toArray(), sr.toArray(), 0.0);
rng.setSeed(24);
Matrix sn = Matrices.sprandn(3, 4, 0.5, rng);
rng.setSeed(24);
SparseMatrix srn = SparseMatrix.sprandn(3, 4, 0.5, rng);
assertArrayEquals(sn.toArray(), srn.toArray(), 0.0);
}
@Test
public void identityMatrixConstruction() {
Matrix r = Matrices.eye(2);
DenseMatrix dr = DenseMatrix.eye(2);
SparseMatrix sr = SparseMatrix.speye(2);
assertArrayEquals(r.toArray(), dr.toArray(), 0.0);
assertArrayEquals(sr.toArray(), dr.toArray(), 0.0);
assertArrayEquals(r.toArray(), new double[]{1.0, 0.0, 0.0, 1.0}, 0.0);
}
@Test
public void diagonalMatrixConstruction() {
Vector v = Vectors.dense(1.0, 0.0, 2.0);
Vector sv = Vectors.sparse(3, new int[]{0, 2}, new double[]{1.0, 2.0});
Matrix m = Matrices.diag(v);
Matrix sm = Matrices.diag(sv);
DenseMatrix d = DenseMatrix.diag(v);
DenseMatrix sd = DenseMatrix.diag(sv);
SparseMatrix s = SparseMatrix.spdiag(v);
SparseMatrix ss = SparseMatrix.spdiag(sv);
assertArrayEquals(m.toArray(), sm.toArray(), 0.0);
assertArrayEquals(d.toArray(), sm.toArray(), 0.0);
assertArrayEquals(d.toArray(), sd.toArray(), 0.0);
assertArrayEquals(sd.toArray(), s.toArray(), 0.0);
assertArrayEquals(s.toArray(), ss.toArray(), 0.0);
assertArrayEquals(s.values(), ss.values(), 0.0);
assertEquals(2, s.values().length);
assertEquals(2, ss.values().length);
assertEquals(4, s.colPtrs().length);
assertEquals(4, ss.colPtrs().length);
}
@Test
public void zerosMatrixConstruction() {
Matrix z = Matrices.zeros(2, 2);
Matrix one = Matrices.ones(2, 2);
DenseMatrix dz = DenseMatrix.zeros(2, 2);
DenseMatrix done = DenseMatrix.ones(2, 2);
assertArrayEquals(z.toArray(), new double[]{0.0, 0.0, 0.0, 0.0}, 0.0);
assertArrayEquals(dz.toArray(), new double[]{0.0, 0.0, 0.0, 0.0}, 0.0);
assertArrayEquals(one.toArray(), new double[]{1.0, 1.0, 1.0, 1.0}, 0.0);
assertArrayEquals(done.toArray(), new double[]{1.0, 1.0, 1.0, 1.0}, 0.0);
}
@Test
public void sparseDenseConversion() {
int m = 3;
int n = 2;
double[] values = new double[]{1.0, 2.0, 4.0, 5.0};
double[] allValues = new double[]{1.0, 2.0, 0.0, 0.0, 4.0, 5.0};
int[] colPtrs = new int[]{0, 2, 4};
int[] rowIndices = new int[]{0, 1, 1, 2};
SparseMatrix spMat1 = new SparseMatrix(m, n, colPtrs, rowIndices, values);
DenseMatrix deMat1 = new DenseMatrix(m, n, allValues);
SparseMatrix spMat2 = deMat1.toSparse();
DenseMatrix deMat2 = spMat1.toDense();
assertArrayEquals(spMat1.toArray(), spMat2.toArray(), 0.0);
assertArrayEquals(deMat1.toArray(), deMat2.toArray(), 0.0);
}
@Test
public void concatenateMatrices() {
int m = 3;
int n = 2;
Random rng = new Random(42);
SparseMatrix spMat1 = SparseMatrix.sprand(m, n, 0.5, rng);
rng.setSeed(42);
DenseMatrix deMat1 = DenseMatrix.rand(m, n, rng);
Matrix deMat2 = Matrices.eye(3);
Matrix spMat2 = Matrices.speye(3);
Matrix deMat3 = Matrices.eye(2);
Matrix spMat3 = Matrices.speye(2);
Matrix spHorz = Matrices.horzcat(new Matrix[]{spMat1, spMat2});
Matrix deHorz1 = Matrices.horzcat(new Matrix[]{deMat1, deMat2});
Matrix deHorz2 = Matrices.horzcat(new Matrix[]{spMat1, deMat2});
Matrix deHorz3 = Matrices.horzcat(new Matrix[]{deMat1, spMat2});
assertEquals(3, deHorz1.numRows());
assertEquals(3, deHorz2.numRows());
assertEquals(3, deHorz3.numRows());
assertEquals(3, spHorz.numRows());
assertEquals(5, deHorz1.numCols());
assertEquals(5, deHorz2.numCols());
assertEquals(5, deHorz3.numCols());
assertEquals(5, spHorz.numCols());
Matrix spVert = Matrices.vertcat(new Matrix[]{spMat1, spMat3});
Matrix deVert1 = Matrices.vertcat(new Matrix[]{deMat1, deMat3});
Matrix deVert2 = Matrices.vertcat(new Matrix[]{spMat1, deMat3});
Matrix deVert3 = Matrices.vertcat(new Matrix[]{deMat1, spMat3});
assertEquals(5, deVert1.numRows());
assertEquals(5, deVert2.numRows());
assertEquals(5, deVert3.numRows());
assertEquals(5, spVert.numRows());
assertEquals(2, deVert1.numCols());
assertEquals(2, deVert2.numCols());
assertEquals(2, deVert3.numCols());
assertEquals(2, spVert.numCols());
}
}
| 9,700 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/linalg/JavaVectorsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg;
import java.util.Arrays;
import static org.junit.Assert.assertArrayEquals;
import scala.Tuple2;
import org.junit.Test;
public class JavaVectorsSuite {
@Test
public void denseArrayConstruction() {
Vector v = Vectors.dense(1.0, 2.0, 3.0);
assertArrayEquals(new double[]{1.0, 2.0, 3.0}, v.toArray(), 0.0);
}
@Test
public void sparseArrayConstruction() {
@SuppressWarnings("unchecked")
Vector v = Vectors.sparse(3, Arrays.asList(
new Tuple2<>(0, 2.0),
new Tuple2<>(2, 3.0)));
assertArrayEquals(new double[]{2.0, 0.0, 3.0}, v.toArray(), 0.0);
}
}
| 9,701 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/linalg | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/linalg/distributed/JavaRowMatrixSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg.distributed;
import java.util.Arrays;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Matrix;
import org.apache.spark.mllib.linalg.QRDecomposition;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
public class JavaRowMatrixSuite extends SharedSparkSession {
@Test
public void rowMatrixQRDecomposition() {
Vector v1 = Vectors.dense(1.0, 10.0, 100.0);
Vector v2 = Vectors.dense(2.0, 20.0, 200.0);
Vector v3 = Vectors.dense(3.0, 30.0, 300.0);
JavaRDD<Vector> rows = jsc.parallelize(Arrays.asList(v1, v2, v3), 1);
RowMatrix mat = new RowMatrix(rows.rdd());
QRDecomposition<RowMatrix, Matrix> result = mat.tallSkinnyQR(true);
}
}
| 9,702 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/util/JavaMLUtilsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.util;
import java.util.Arrays;
import java.util.Collections;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.mllib.linalg.*;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaMLUtilsSuite extends SharedSparkSession {
@Test
public void testConvertVectorColumnsToAndFromML() {
Vector x = Vectors.dense(2.0);
Dataset<Row> dataset = spark.createDataFrame(
Collections.singletonList(new LabeledPoint(1.0, x)), LabeledPoint.class
).select("label", "features");
Dataset<Row> newDataset1 = MLUtils.convertVectorColumnsToML(dataset);
Row new1 = newDataset1.first();
Assert.assertEquals(RowFactory.create(1.0, x.asML()), new1);
Row new2 = MLUtils.convertVectorColumnsToML(dataset, "features").first();
Assert.assertEquals(new1, new2);
Row old1 = MLUtils.convertVectorColumnsFromML(newDataset1).first();
Assert.assertEquals(RowFactory.create(1.0, x), old1);
}
@Test
public void testConvertMatrixColumnsToAndFromML() {
Matrix x = Matrices.dense(2, 1, new double[]{1.0, 2.0});
StructType schema = new StructType(new StructField[]{
new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("features", new MatrixUDT(), false, Metadata.empty())
});
Dataset<Row> dataset = spark.createDataFrame(
Arrays.asList(
RowFactory.create(1.0, x)),
schema);
Dataset<Row> newDataset1 = MLUtils.convertMatrixColumnsToML(dataset);
Row new1 = newDataset1.first();
Assert.assertEquals(RowFactory.create(1.0, x.asML()), new1);
Row new2 = MLUtils.convertMatrixColumnsToML(dataset, "features").first();
Assert.assertEquals(new1, new2);
Row old1 = MLUtils.convertMatrixColumnsFromML(newDataset1).first();
Assert.assertEquals(RowFactory.create(1.0, x), old1);
}
}
| 9,703 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaKMeansSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering;
import java.util.Arrays;
import java.util.List;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
public class JavaKMeansSuite extends SharedSparkSession {
@Test
public void runKMeansUsingStaticMethods() {
List<Vector> points = Arrays.asList(
Vectors.dense(1.0, 2.0, 6.0),
Vectors.dense(1.0, 3.0, 0.0),
Vectors.dense(1.0, 4.0, 6.0)
);
Vector expectedCenter = Vectors.dense(1.0, 3.0, 4.0);
JavaRDD<Vector> data = jsc.parallelize(points, 2);
KMeansModel model = KMeans.train(data.rdd(), 1, 1, 1, KMeans.K_MEANS_PARALLEL());
assertEquals(1, model.clusterCenters().length);
assertEquals(expectedCenter, model.clusterCenters()[0]);
model = KMeans.train(data.rdd(), 1, 1, 1, KMeans.RANDOM());
assertEquals(expectedCenter, model.clusterCenters()[0]);
}
@Test
public void runKMeansUsingConstructor() {
List<Vector> points = Arrays.asList(
Vectors.dense(1.0, 2.0, 6.0),
Vectors.dense(1.0, 3.0, 0.0),
Vectors.dense(1.0, 4.0, 6.0)
);
Vector expectedCenter = Vectors.dense(1.0, 3.0, 4.0);
JavaRDD<Vector> data = jsc.parallelize(points, 2);
KMeansModel model = new KMeans().setK(1).setMaxIterations(5).run(data.rdd());
assertEquals(1, model.clusterCenters().length);
assertEquals(expectedCenter, model.clusterCenters()[0]);
model = new KMeans()
.setK(1)
.setMaxIterations(1)
.setInitializationMode(KMeans.RANDOM())
.run(data.rdd());
assertEquals(expectedCenter, model.clusterCenters()[0]);
}
@Test
public void testPredictJavaRDD() {
List<Vector> points = Arrays.asList(
Vectors.dense(1.0, 2.0, 6.0),
Vectors.dense(1.0, 3.0, 0.0),
Vectors.dense(1.0, 4.0, 6.0)
);
JavaRDD<Vector> data = jsc.parallelize(points, 2);
KMeansModel model = new KMeans().setK(1).setMaxIterations(5).run(data.rdd());
JavaRDD<Integer> predictions = model.predict(data);
// Should be able to get the first prediction.
predictions.first();
}
}
| 9,704 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import scala.Tuple2;
import scala.Tuple3;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Matrix;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
public class JavaLDASuite extends SharedSparkSession {
@Override
public void setUp() throws IOException {
super.setUp();
List<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<>();
for (int i = 0; i < LDASuite.tinyCorpus().length; i++) {
tinyCorpus.add(new Tuple2<>((Long) LDASuite.tinyCorpus()[i]._1(),
LDASuite.tinyCorpus()[i]._2()));
}
JavaRDD<Tuple2<Long, Vector>> tmpCorpus = jsc.parallelize(tinyCorpus, 2);
corpus = JavaPairRDD.fromJavaRDD(tmpCorpus);
}
@Test
public void localLDAModel() {
Matrix topics = LDASuite.tinyTopics();
double[] topicConcentration = new double[topics.numRows()];
Arrays.fill(topicConcentration, 1.0D / topics.numRows());
LocalLDAModel model = new LocalLDAModel(topics, Vectors.dense(topicConcentration), 1.0, 100.0);
// Check: basic parameters
assertEquals(model.k(), tinyK);
assertEquals(model.vocabSize(), tinyVocabSize);
assertEquals(model.topicsMatrix(), tinyTopics);
// Check: describeTopics() with all terms
Tuple2<int[], double[]>[] fullTopicSummary = model.describeTopics();
assertEquals(fullTopicSummary.length, tinyK);
for (int i = 0; i < fullTopicSummary.length; i++) {
assertArrayEquals(fullTopicSummary[i]._1(), tinyTopicDescription[i]._1());
assertArrayEquals(fullTopicSummary[i]._2(), tinyTopicDescription[i]._2(), 1e-5);
}
}
@Test
public void distributedLDAModel() {
int k = 3;
double topicSmoothing = 1.2;
double termSmoothing = 1.2;
// Train a model
LDA lda = new LDA();
lda.setK(k)
.setDocConcentration(topicSmoothing)
.setTopicConcentration(termSmoothing)
.setMaxIterations(5)
.setSeed(12345);
DistributedLDAModel model = (DistributedLDAModel) lda.run(corpus);
// Check: basic parameters
LocalLDAModel localModel = model.toLocal();
assertEquals(k, model.k());
assertEquals(k, localModel.k());
assertEquals(tinyVocabSize, model.vocabSize());
assertEquals(tinyVocabSize, localModel.vocabSize());
assertEquals(localModel.topicsMatrix(), model.topicsMatrix());
// Check: topic summaries
Tuple2<int[], double[]>[] roundedTopicSummary = model.describeTopics();
assertEquals(k, roundedTopicSummary.length);
Tuple2<int[], double[]>[] roundedLocalTopicSummary = localModel.describeTopics();
assertEquals(k, roundedLocalTopicSummary.length);
// Check: log probabilities
assertTrue(model.logLikelihood() < 0.0);
assertTrue(model.logPrior() < 0.0);
// Check: topic distributions
JavaPairRDD<Long, Vector> topicDistributions = model.javaTopicDistributions();
// SPARK-5562. since the topicDistribution returns the distribution of the non empty docs
// over topics. Compare it against nonEmptyCorpus instead of corpus
JavaPairRDD<Long, Vector> nonEmptyCorpus =
corpus.filter(tuple2 -> Vectors.norm(tuple2._2(), 1.0) != 0.0);
assertEquals(topicDistributions.count(), nonEmptyCorpus.count());
// Check: javaTopTopicsPerDocuments
Tuple3<Long, int[], double[]> topTopics = model.javaTopTopicsPerDocument(3).first();
Long docId = topTopics._1(); // confirm doc ID type
int[] topicIndices = topTopics._2();
double[] topicWeights = topTopics._3();
assertEquals(3, topicIndices.length);
assertEquals(3, topicWeights.length);
// Check: topTopicAssignments
Tuple3<Long, int[], int[]> topicAssignment = model.javaTopicAssignments().first();
Long docId2 = topicAssignment._1();
int[] termIndices2 = topicAssignment._2();
int[] topicIndices2 = topicAssignment._3();
assertEquals(termIndices2.length, topicIndices2.length);
}
@Test
public void onlineOptimizerCompatibility() {
int k = 3;
double topicSmoothing = 1.2;
double termSmoothing = 1.2;
// Train a model
OnlineLDAOptimizer op = new OnlineLDAOptimizer()
.setTau0(1024)
.setKappa(0.51)
.setGammaShape(1e40)
.setMiniBatchFraction(0.5);
LDA lda = new LDA();
lda.setK(k)
.setDocConcentration(topicSmoothing)
.setTopicConcentration(termSmoothing)
.setMaxIterations(5)
.setSeed(12345)
.setOptimizer(op);
LDAModel model = lda.run(corpus);
// Check: basic parameters
assertEquals(k, model.k());
assertEquals(tinyVocabSize, model.vocabSize());
// Check: topic summaries
Tuple2<int[], double[]>[] roundedTopicSummary = model.describeTopics();
assertEquals(k, roundedTopicSummary.length);
Tuple2<int[], double[]>[] roundedLocalTopicSummary = model.describeTopics();
assertEquals(k, roundedLocalTopicSummary.length);
}
@Test
public void localLdaMethods() {
JavaRDD<Tuple2<Long, Vector>> docs = jsc.parallelize(toyData, 2);
JavaPairRDD<Long, Vector> pairedDocs = JavaPairRDD.fromJavaRDD(docs);
// check: topicDistributions
assertEquals(toyModel.topicDistributions(pairedDocs).count(), pairedDocs.count());
// check: logPerplexity
double logPerplexity = toyModel.logPerplexity(pairedDocs);
// check: logLikelihood.
List<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<>();
docsSingleWord.add(new Tuple2<>(0L, Vectors.dense(1.0, 0.0, 0.0)));
JavaPairRDD<Long, Vector> single = JavaPairRDD.fromJavaRDD(jsc.parallelize(docsSingleWord));
double logLikelihood = toyModel.logLikelihood(single);
}
private static int tinyK = LDASuite.tinyK();
private static int tinyVocabSize = LDASuite.tinyVocabSize();
private static Matrix tinyTopics = LDASuite.tinyTopics();
private static Tuple2<int[], double[]>[] tinyTopicDescription =
LDASuite.tinyTopicDescription();
private JavaPairRDD<Long, Vector> corpus;
private LocalLDAModel toyModel = LDASuite.toyModel();
private List<Tuple2<Long, Vector>> toyData = LDASuite.javaToyData();
}
| 9,705 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaStreamingKMeansSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering;
import java.util.Arrays;
import java.util.List;
import scala.Tuple2;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.spark.SparkConf;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import static org.apache.spark.streaming.JavaTestUtils.*;
public class JavaStreamingKMeansSuite {
protected transient JavaStreamingContext ssc;
@Before
public void setUp() {
SparkConf conf = new SparkConf()
.setMaster("local[2]")
.setAppName("test")
.set("spark.streaming.clock", "org.apache.spark.util.ManualClock");
ssc = new JavaStreamingContext(conf, new Duration(1000));
ssc.checkpoint("checkpoint");
}
@After
public void tearDown() {
ssc.stop();
ssc = null;
}
@Test
@SuppressWarnings("unchecked")
public void javaAPI() {
List<Vector> trainingBatch = Arrays.asList(
Vectors.dense(1.0),
Vectors.dense(0.0));
JavaDStream<Vector> training =
attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 2);
List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
new Tuple2<>(10, Vectors.dense(1.0)),
new Tuple2<>(11, Vectors.dense(0.0)));
JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
StreamingKMeans skmeans = new StreamingKMeans()
.setK(1)
.setDecayFactor(1.0)
.setInitialCenters(new Vector[]{Vectors.dense(1.0)}, new double[]{0.0});
skmeans.trainOn(training);
JavaPairDStream<Integer, Integer> prediction = skmeans.predictOnValues(test);
attachTestOutputStream(prediction.count());
runStreams(ssc, 2, 2);
}
}
| 9,706 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaGaussianMixtureSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering;
import java.util.Arrays;
import java.util.List;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
public class JavaGaussianMixtureSuite extends SharedSparkSession {
@Test
public void runGaussianMixture() {
List<Vector> points = Arrays.asList(
Vectors.dense(1.0, 2.0, 6.0),
Vectors.dense(1.0, 3.0, 0.0),
Vectors.dense(1.0, 4.0, 6.0)
);
JavaRDD<Vector> data = jsc.parallelize(points, 2);
GaussianMixtureModel model = new GaussianMixture().setK(2).setMaxIterations(1).setSeed(1234)
.run(data);
assertEquals(model.gaussians().length, 2);
JavaRDD<Integer> predictions = model.predict(data);
predictions.first();
}
}
| 9,707 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaBisectingKMeansSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering;
import java.util.Arrays;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
public class JavaBisectingKMeansSuite extends SharedSparkSession {
@Test
public void twoDimensionalData() {
JavaRDD<Vector> points = jsc.parallelize(Arrays.asList(
Vectors.dense(4, -1),
Vectors.dense(4, 1),
Vectors.sparse(2, new int[]{0}, new double[]{1.0})
), 2);
BisectingKMeans bkm = new BisectingKMeans()
.setK(4)
.setMaxIterations(2)
.setSeed(1L);
BisectingKMeansModel model = bkm.run(points);
Assert.assertEquals(3, model.k());
Assert.assertArrayEquals(new double[]{3.0, 0.0}, model.root().center().toArray(), 1e-12);
for (ClusteringTreeNode child : model.root().children()) {
double[] center = child.center().toArray();
if (center[0] > 2) {
Assert.assertEquals(2, child.size());
Assert.assertArrayEquals(new double[]{4.0, 0.0}, center, 1e-12);
} else {
Assert.assertEquals(1, child.size());
Assert.assertArrayEquals(new double[]{1.0, 0.0}, center, 1e-12);
}
}
}
}
| 9,708 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/fpm/JavaFPGrowthSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.fpm;
import java.io.File;
import java.util.Arrays;
import java.util.List;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.util.Utils;
public class JavaFPGrowthSuite extends SharedSparkSession {
@Test
public void runFPGrowth() {
@SuppressWarnings("unchecked")
JavaRDD<List<String>> rdd = jsc.parallelize(Arrays.asList(
Arrays.asList("r z h k p".split(" ")),
Arrays.asList("z y x w v u t s".split(" ")),
Arrays.asList("s x o n r".split(" ")),
Arrays.asList("x z y m t s q e".split(" ")),
Arrays.asList("z".split(" ")),
Arrays.asList("x z y r q t p".split(" "))), 2);
FPGrowthModel<String> model = new FPGrowth()
.setMinSupport(0.5)
.setNumPartitions(2)
.run(rdd);
List<FPGrowth.FreqItemset<String>> freqItemsets = model.freqItemsets().toJavaRDD().collect();
assertEquals(18, freqItemsets.size());
for (FPGrowth.FreqItemset<String> itemset : freqItemsets) {
// Test return types.
List<String> items = itemset.javaItems();
long freq = itemset.freq();
}
}
@Test
public void runFPGrowthSaveLoad() {
@SuppressWarnings("unchecked")
JavaRDD<List<String>> rdd = jsc.parallelize(Arrays.asList(
Arrays.asList("r z h k p".split(" ")),
Arrays.asList("z y x w v u t s".split(" ")),
Arrays.asList("s x o n r".split(" ")),
Arrays.asList("x z y m t s q e".split(" ")),
Arrays.asList("z".split(" ")),
Arrays.asList("x z y r q t p".split(" "))), 2);
FPGrowthModel<String> model = new FPGrowth()
.setMinSupport(0.5)
.setNumPartitions(2)
.run(rdd);
File tempDir = Utils.createTempDir(
System.getProperty("java.io.tmpdir"), "JavaFPGrowthSuite");
String outputPath = tempDir.getPath();
try {
model.save(spark.sparkContext(), outputPath);
@SuppressWarnings("unchecked")
FPGrowthModel<String> newModel =
(FPGrowthModel<String>) FPGrowthModel.load(spark.sparkContext(), outputPath);
List<FPGrowth.FreqItemset<String>> freqItemsets = newModel.freqItemsets().toJavaRDD()
.collect();
assertEquals(18, freqItemsets.size());
for (FPGrowth.FreqItemset<String> itemset : freqItemsets) {
// Test return types.
List<String> items = itemset.javaItems();
long freq = itemset.freq();
}
} finally {
Utils.deleteRecursively(tempDir);
}
}
}
| 9,709 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/fpm/JavaAssociationRulesSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.fpm;
import java.util.Arrays;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.fpm.FPGrowth.FreqItemset;
public class JavaAssociationRulesSuite extends SharedSparkSession {
@Test
public void runAssociationRules() {
@SuppressWarnings("unchecked")
JavaRDD<FPGrowth.FreqItemset<String>> freqItemsets = jsc.parallelize(Arrays.asList(
new FreqItemset<>(new String[]{"a"}, 15L),
new FreqItemset<>(new String[]{"b"}, 35L),
new FreqItemset<>(new String[]{"a", "b"}, 12L)
));
JavaRDD<AssociationRules.Rule<String>> results = (new AssociationRules()).run(freqItemsets);
}
}
| 9,710 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/fpm/JavaPrefixSpanSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.fpm;
import java.io.File;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.fpm.PrefixSpan.FreqSequence;
import org.apache.spark.util.Utils;
public class JavaPrefixSpanSuite extends SharedSparkSession {
@Test
public void runPrefixSpan() {
JavaRDD<List<List<Integer>>> sequences = jsc.parallelize(Arrays.asList(
Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3)),
Arrays.asList(Arrays.asList(1), Arrays.asList(3, 2), Arrays.asList(1, 2)),
Arrays.asList(Arrays.asList(1, 2), Arrays.asList(5)),
Arrays.asList(Arrays.asList(6))
), 2);
PrefixSpan prefixSpan = new PrefixSpan()
.setMinSupport(0.5)
.setMaxPatternLength(5);
PrefixSpanModel<Integer> model = prefixSpan.run(sequences);
JavaRDD<FreqSequence<Integer>> freqSeqs = model.freqSequences().toJavaRDD();
List<FreqSequence<Integer>> localFreqSeqs = freqSeqs.collect();
Assert.assertEquals(5, localFreqSeqs.size());
// Check that each frequent sequence could be materialized.
for (PrefixSpan.FreqSequence<Integer> freqSeq : localFreqSeqs) {
List<List<Integer>> seq = freqSeq.javaSequence();
long freq = freqSeq.freq();
}
}
@Test
public void runPrefixSpanSaveLoad() {
JavaRDD<List<List<Integer>>> sequences = jsc.parallelize(Arrays.asList(
Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3)),
Arrays.asList(Arrays.asList(1), Arrays.asList(3, 2), Arrays.asList(1, 2)),
Arrays.asList(Arrays.asList(1, 2), Arrays.asList(5)),
Arrays.asList(Arrays.asList(6))
), 2);
PrefixSpan prefixSpan = new PrefixSpan()
.setMinSupport(0.5)
.setMaxPatternLength(5);
PrefixSpanModel<Integer> model = prefixSpan.run(sequences);
File tempDir = Utils.createTempDir(
System.getProperty("java.io.tmpdir"), "JavaPrefixSpanSuite");
String outputPath = tempDir.getPath();
try {
model.save(spark.sparkContext(), outputPath);
@SuppressWarnings("unchecked")
PrefixSpanModel<Integer> newModel =
(PrefixSpanModel<Integer>) PrefixSpanModel.load(spark.sparkContext(), outputPath);
JavaRDD<FreqSequence<Integer>> freqSeqs = newModel.freqSequences().toJavaRDD();
List<FreqSequence<Integer>> localFreqSeqs = freqSeqs.collect();
Assert.assertEquals(5, localFreqSeqs.size());
// Check that each frequent sequence could be materialized.
for (PrefixSpan.FreqSequence<Integer> freqSeq : localFreqSeqs) {
List<List<Integer>> seq = freqSeq.javaSequence();
long freq = freqSeq.freq();
}
} finally {
Utils.deleteRecursively(tempDir);
}
}
}
| 9,711 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/stat/JavaStatisticsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.stat;
import java.util.Arrays;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaDoubleRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.stat.test.BinarySample;
import org.apache.spark.mllib.stat.test.ChiSqTestResult;
import org.apache.spark.mllib.stat.test.KolmogorovSmirnovTestResult;
import org.apache.spark.mllib.stat.test.StreamingTest;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import static org.apache.spark.streaming.JavaTestUtils.*;
public class JavaStatisticsSuite {
private transient SparkSession spark;
private transient JavaSparkContext jsc;
private transient JavaStreamingContext ssc;
@Before
public void setUp() {
SparkConf conf = new SparkConf()
.set("spark.streaming.clock", "org.apache.spark.util.ManualClock");
spark = SparkSession.builder()
.master("local[2]")
.appName("JavaStatistics")
.config(conf)
.getOrCreate();
jsc = new JavaSparkContext(spark.sparkContext());
ssc = new JavaStreamingContext(jsc, new Duration(1000));
ssc.checkpoint("checkpoint");
}
@After
public void tearDown() {
spark.stop();
ssc.stop();
spark = null;
}
@Test
public void testCorr() {
JavaRDD<Double> x = jsc.parallelize(Arrays.asList(1.0, 2.0, 3.0, 4.0));
JavaRDD<Double> y = jsc.parallelize(Arrays.asList(1.1, 2.2, 3.1, 4.3));
Double corr1 = Statistics.corr(x, y);
Double corr2 = Statistics.corr(x, y, "pearson");
// Check default method
assertEquals(corr1, corr2, 1e-5);
}
@Test
public void kolmogorovSmirnovTest() {
JavaDoubleRDD data = jsc.parallelizeDoubles(Arrays.asList(0.2, 1.0, -1.0, 2.0));
KolmogorovSmirnovTestResult testResult1 = Statistics.kolmogorovSmirnovTest(data, "norm");
KolmogorovSmirnovTestResult testResult2 = Statistics.kolmogorovSmirnovTest(
data, "norm", 0.0, 1.0);
}
@Test
public void chiSqTest() {
JavaRDD<LabeledPoint> data = jsc.parallelize(Arrays.asList(
new LabeledPoint(0.0, Vectors.dense(0.1, 2.3)),
new LabeledPoint(1.0, Vectors.dense(1.5, 5.1)),
new LabeledPoint(0.0, Vectors.dense(2.4, 8.1))));
ChiSqTestResult[] testResults = Statistics.chiSqTest(data);
}
@Test
public void streamingTest() {
List<BinarySample> trainingBatch = Arrays.asList(
new BinarySample(true, 1.0),
new BinarySample(false, 2.0));
JavaDStream<BinarySample> training =
attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 2);
int numBatches = 2;
StreamingTest model = new StreamingTest()
.setWindowSize(0)
.setPeacePeriod(0)
.setTestMethod("welch");
model.registerStream(training);
attachTestOutputStream(training);
runStreams(ssc, numBatches, numBatches);
}
}
| 9,712 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/evaluation/JavaRankingMetricsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.evaluation;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import scala.Tuple2;
import scala.Tuple2$;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
public class JavaRankingMetricsSuite extends SharedSparkSession {
private transient JavaRDD<Tuple2<List<Integer>, List<Integer>>> predictionAndLabels;
@Override
public void setUp() throws IOException {
super.setUp();
predictionAndLabels = jsc.parallelize(Arrays.asList(
Tuple2$.MODULE$.apply(
Arrays.asList(1, 6, 2, 7, 8, 3, 9, 10, 4, 5), Arrays.asList(1, 2, 3, 4, 5)),
Tuple2$.MODULE$.apply(
Arrays.asList(4, 1, 5, 6, 2, 7, 3, 8, 9, 10), Arrays.asList(1, 2, 3)),
Tuple2$.MODULE$.apply(
Arrays.asList(1, 2, 3, 4, 5), Arrays.<Integer>asList())), 2);
}
@Test
public void rankingMetrics() {
@SuppressWarnings("unchecked")
RankingMetrics<?> metrics = RankingMetrics.of(predictionAndLabels);
Assert.assertEquals(0.355026, metrics.meanAveragePrecision(), 1e-5);
Assert.assertEquals(0.75 / 3.0, metrics.precisionAt(4), 1e-5);
}
}
| 9,713 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/feature/JavaTfIdfSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.feature;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Vector;
public class JavaTfIdfSuite extends SharedSparkSession {
@Test
public void tfIdf() {
// The tests are to check Java compatibility.
HashingTF tf = new HashingTF();
@SuppressWarnings("unchecked")
JavaRDD<List<String>> documents = jsc.parallelize(Arrays.asList(
Arrays.asList("this is a sentence".split(" ")),
Arrays.asList("this is another sentence".split(" ")),
Arrays.asList("this is still a sentence".split(" "))), 2);
JavaRDD<Vector> termFreqs = tf.transform(documents);
termFreqs.collect();
IDF idf = new IDF();
JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
List<Vector> localTfIdfs = tfIdfs.collect();
int indexOfThis = tf.indexOf("this");
for (Vector v : localTfIdfs) {
Assert.assertEquals(0.0, v.apply(indexOfThis), 1e-15);
}
}
@Test
public void tfIdfMinimumDocumentFrequency() {
// The tests are to check Java compatibility.
HashingTF tf = new HashingTF();
@SuppressWarnings("unchecked")
JavaRDD<List<String>> documents = jsc.parallelize(Arrays.asList(
Arrays.asList("this is a sentence".split(" ")),
Arrays.asList("this is another sentence".split(" ")),
Arrays.asList("this is still a sentence".split(" "))), 2);
JavaRDD<Vector> termFreqs = tf.transform(documents);
termFreqs.collect();
IDF idf = new IDF(2);
JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
List<Vector> localTfIdfs = tfIdfs.collect();
int indexOfThis = tf.indexOf("this");
for (Vector v : localTfIdfs) {
Assert.assertEquals(0.0, v.apply(indexOfThis), 1e-15);
}
}
}
| 9,714 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/feature/JavaWord2VecSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.feature;
import java.util.Arrays;
import java.util.List;
import com.google.common.base.Strings;
import scala.Tuple2;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
public class JavaWord2VecSuite extends SharedSparkSession {
@Test
@SuppressWarnings("unchecked")
public void word2Vec() {
// The tests are to check Java compatibility.
String sentence = Strings.repeat("a b ", 100) + Strings.repeat("a c ", 10);
List<String> words = Arrays.asList(sentence.split(" "));
List<List<String>> localDoc = Arrays.asList(words, words);
JavaRDD<List<String>> doc = jsc.parallelize(localDoc);
Word2Vec word2vec = new Word2Vec()
.setVectorSize(10)
.setSeed(42L);
Word2VecModel model = word2vec.fit(doc);
Tuple2<String, Object>[] syms = model.findSynonyms("a", 2);
Assert.assertEquals(2, syms.length);
Assert.assertEquals("b", syms[0]._1());
Assert.assertEquals("c", syms[1]._1());
}
}
| 9,715 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/random/JavaRandomRDDsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.random;
import java.io.Serializable;
import java.util.Arrays;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaDoubleRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Vector;
import static org.apache.spark.mllib.random.RandomRDDs.*;
public class JavaRandomRDDsSuite extends SharedSparkSession {
@Test
public void testUniformRDD() {
long m = 1000L;
int p = 2;
long seed = 1L;
JavaDoubleRDD rdd1 = uniformJavaRDD(jsc, m);
JavaDoubleRDD rdd2 = uniformJavaRDD(jsc, m, p);
JavaDoubleRDD rdd3 = uniformJavaRDD(jsc, m, p, seed);
for (JavaDoubleRDD rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
}
}
@Test
public void testNormalRDD() {
long m = 1000L;
int p = 2;
long seed = 1L;
JavaDoubleRDD rdd1 = normalJavaRDD(jsc, m);
JavaDoubleRDD rdd2 = normalJavaRDD(jsc, m, p);
JavaDoubleRDD rdd3 = normalJavaRDD(jsc, m, p, seed);
for (JavaDoubleRDD rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
}
}
@Test
public void testLNormalRDD() {
double mean = 4.0;
double std = 2.0;
long m = 1000L;
int p = 2;
long seed = 1L;
JavaDoubleRDD rdd1 = logNormalJavaRDD(jsc, mean, std, m);
JavaDoubleRDD rdd2 = logNormalJavaRDD(jsc, mean, std, m, p);
JavaDoubleRDD rdd3 = logNormalJavaRDD(jsc, mean, std, m, p, seed);
for (JavaDoubleRDD rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
}
}
@Test
public void testPoissonRDD() {
double mean = 2.0;
long m = 1000L;
int p = 2;
long seed = 1L;
JavaDoubleRDD rdd1 = poissonJavaRDD(jsc, mean, m);
JavaDoubleRDD rdd2 = poissonJavaRDD(jsc, mean, m, p);
JavaDoubleRDD rdd3 = poissonJavaRDD(jsc, mean, m, p, seed);
for (JavaDoubleRDD rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
}
}
@Test
public void testExponentialRDD() {
double mean = 2.0;
long m = 1000L;
int p = 2;
long seed = 1L;
JavaDoubleRDD rdd1 = exponentialJavaRDD(jsc, mean, m);
JavaDoubleRDD rdd2 = exponentialJavaRDD(jsc, mean, m, p);
JavaDoubleRDD rdd3 = exponentialJavaRDD(jsc, mean, m, p, seed);
for (JavaDoubleRDD rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
}
}
@Test
public void testGammaRDD() {
double shape = 1.0;
double jscale = 2.0;
long m = 1000L;
int p = 2;
long seed = 1L;
JavaDoubleRDD rdd1 = gammaJavaRDD(jsc, shape, jscale, m);
JavaDoubleRDD rdd2 = gammaJavaRDD(jsc, shape, jscale, m, p);
JavaDoubleRDD rdd3 = gammaJavaRDD(jsc, shape, jscale, m, p, seed);
for (JavaDoubleRDD rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
}
}
@Test
@SuppressWarnings("unchecked")
public void testUniformVectorRDD() {
long m = 100L;
int n = 10;
int p = 2;
long seed = 1L;
JavaRDD<Vector> rdd1 = uniformJavaVectorRDD(jsc, m, n);
JavaRDD<Vector> rdd2 = uniformJavaVectorRDD(jsc, m, n, p);
JavaRDD<Vector> rdd3 = uniformJavaVectorRDD(jsc, m, n, p, seed);
for (JavaRDD<Vector> rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
Assert.assertEquals(n, rdd.first().size());
}
}
@Test
@SuppressWarnings("unchecked")
public void testNormalVectorRDD() {
long m = 100L;
int n = 10;
int p = 2;
long seed = 1L;
JavaRDD<Vector> rdd1 = normalJavaVectorRDD(jsc, m, n);
JavaRDD<Vector> rdd2 = normalJavaVectorRDD(jsc, m, n, p);
JavaRDD<Vector> rdd3 = normalJavaVectorRDD(jsc, m, n, p, seed);
for (JavaRDD<Vector> rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
Assert.assertEquals(n, rdd.first().size());
}
}
@Test
@SuppressWarnings("unchecked")
public void testLogNormalVectorRDD() {
double mean = 4.0;
double std = 2.0;
long m = 100L;
int n = 10;
int p = 2;
long seed = 1L;
JavaRDD<Vector> rdd1 = logNormalJavaVectorRDD(jsc, mean, std, m, n);
JavaRDD<Vector> rdd2 = logNormalJavaVectorRDD(jsc, mean, std, m, n, p);
JavaRDD<Vector> rdd3 = logNormalJavaVectorRDD(jsc, mean, std, m, n, p, seed);
for (JavaRDD<Vector> rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
Assert.assertEquals(n, rdd.first().size());
}
}
@Test
@SuppressWarnings("unchecked")
public void testPoissonVectorRDD() {
double mean = 2.0;
long m = 100L;
int n = 10;
int p = 2;
long seed = 1L;
JavaRDD<Vector> rdd1 = poissonJavaVectorRDD(jsc, mean, m, n);
JavaRDD<Vector> rdd2 = poissonJavaVectorRDD(jsc, mean, m, n, p);
JavaRDD<Vector> rdd3 = poissonJavaVectorRDD(jsc, mean, m, n, p, seed);
for (JavaRDD<Vector> rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
Assert.assertEquals(n, rdd.first().size());
}
}
@Test
@SuppressWarnings("unchecked")
public void testExponentialVectorRDD() {
double mean = 2.0;
long m = 100L;
int n = 10;
int p = 2;
long seed = 1L;
JavaRDD<Vector> rdd1 = exponentialJavaVectorRDD(jsc, mean, m, n);
JavaRDD<Vector> rdd2 = exponentialJavaVectorRDD(jsc, mean, m, n, p);
JavaRDD<Vector> rdd3 = exponentialJavaVectorRDD(jsc, mean, m, n, p, seed);
for (JavaRDD<Vector> rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
Assert.assertEquals(n, rdd.first().size());
}
}
@Test
@SuppressWarnings("unchecked")
public void testGammaVectorRDD() {
double shape = 1.0;
double jscale = 2.0;
long m = 100L;
int n = 10;
int p = 2;
long seed = 1L;
JavaRDD<Vector> rdd1 = gammaJavaVectorRDD(jsc, shape, jscale, m, n);
JavaRDD<Vector> rdd2 = gammaJavaVectorRDD(jsc, shape, jscale, m, n, p);
JavaRDD<Vector> rdd3 = gammaJavaVectorRDD(jsc, shape, jscale, m, n, p, seed);
for (JavaRDD<Vector> rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
Assert.assertEquals(n, rdd.first().size());
}
}
@Test
public void testArbitrary() {
long size = 10;
long seed = 1L;
int numPartitions = 0;
StringGenerator gen = new StringGenerator();
JavaRDD<String> rdd1 = randomJavaRDD(jsc, gen, size);
JavaRDD<String> rdd2 = randomJavaRDD(jsc, gen, size, numPartitions);
JavaRDD<String> rdd3 = randomJavaRDD(jsc, gen, size, numPartitions, seed);
for (JavaRDD<String> rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(size, rdd.count());
Assert.assertEquals(2, rdd.first().length());
}
}
@Test
@SuppressWarnings("unchecked")
public void testRandomVectorRDD() {
UniformGenerator generator = new UniformGenerator();
long m = 100L;
int n = 10;
int p = 2;
long seed = 1L;
JavaRDD<Vector> rdd1 = randomJavaVectorRDD(jsc, generator, m, n);
JavaRDD<Vector> rdd2 = randomJavaVectorRDD(jsc, generator, m, n, p);
JavaRDD<Vector> rdd3 = randomJavaVectorRDD(jsc, generator, m, n, p, seed);
for (JavaRDD<Vector> rdd : Arrays.asList(rdd1, rdd2, rdd3)) {
Assert.assertEquals(m, rdd.count());
Assert.assertEquals(n, rdd.first().size());
}
}
}
// This is just a test generator, it always returns a string of 42
class StringGenerator implements RandomDataGenerator<String>, Serializable {
@Override
public String nextValue() {
return "42";
}
@Override
public StringGenerator copy() {
return new StringGenerator();
}
@Override
public void setSeed(long seed) {
}
}
| 9,716 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression;
import java.util.List;
import java.util.Random;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.util.LinearDataGenerator;
public class JavaRidgeRegressionSuite extends SharedSparkSession {
private static double predictionError(List<LabeledPoint> validationData,
RidgeRegressionModel model) {
double errorSum = 0;
for (LabeledPoint point : validationData) {
Double prediction = model.predict(point.features());
errorSum += (prediction - point.label()) * (prediction - point.label());
}
return errorSum / validationData.size();
}
private static List<LabeledPoint> generateRidgeData(int numPoints, int numFeatures, double std) {
// Pick weights as random values distributed uniformly in [-0.5, 0.5]
Random random = new Random(42);
double[] w = new double[numFeatures];
for (int i = 0; i < w.length; i++) {
w[i] = random.nextDouble() - 0.5;
}
return LinearDataGenerator.generateLinearInputAsList(0.0, w, numPoints, 42, std);
}
@Test
public void runRidgeRegressionUsingConstructor() {
int numExamples = 50;
int numFeatures = 20;
List<LabeledPoint> data = generateRidgeData(2 * numExamples, numFeatures, 10.0);
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(data.subList(0, numExamples));
List<LabeledPoint> validationData = data.subList(numExamples, 2 * numExamples);
RidgeRegressionWithSGD ridgeSGDImpl = new RidgeRegressionWithSGD();
ridgeSGDImpl.optimizer()
.setStepSize(1.0)
.setRegParam(0.0)
.setNumIterations(200);
RidgeRegressionModel model = ridgeSGDImpl.run(testRDD.rdd());
double unRegularizedErr = predictionError(validationData, model);
ridgeSGDImpl.optimizer().setRegParam(0.1);
model = ridgeSGDImpl.run(testRDD.rdd());
double regularizedErr = predictionError(validationData, model);
Assert.assertTrue(regularizedErr < unRegularizedErr);
}
@Test
public void runRidgeRegressionUsingStaticMethods() {
int numExamples = 50;
int numFeatures = 20;
List<LabeledPoint> data = generateRidgeData(2 * numExamples, numFeatures, 10.0);
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(data.subList(0, numExamples));
List<LabeledPoint> validationData = data.subList(numExamples, 2 * numExamples);
RidgeRegressionModel model = RidgeRegressionWithSGD.train(testRDD.rdd(), 200, 1.0, 0.0);
double unRegularizedErr = predictionError(validationData, model);
model = RidgeRegressionWithSGD.train(testRDD.rdd(), 200, 1.0, 0.1);
double regularizedErr = predictionError(validationData, model);
Assert.assertTrue(regularizedErr < unRegularizedErr);
}
}
| 9,717 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/regression/JavaStreamingLinearRegressionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression;
import java.util.Arrays;
import java.util.List;
import scala.Tuple2;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.spark.SparkConf;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import static org.apache.spark.streaming.JavaTestUtils.*;
public class JavaStreamingLinearRegressionSuite {
protected transient JavaStreamingContext ssc;
@Before
public void setUp() {
SparkConf conf = new SparkConf()
.setMaster("local[2]")
.setAppName("test")
.set("spark.streaming.clock", "org.apache.spark.util.ManualClock");
ssc = new JavaStreamingContext(conf, new Duration(1000));
ssc.checkpoint("checkpoint");
}
@After
public void tearDown() {
ssc.stop();
ssc = null;
}
@Test
@SuppressWarnings("unchecked")
public void javaAPI() {
List<LabeledPoint> trainingBatch = Arrays.asList(
new LabeledPoint(1.0, Vectors.dense(1.0)),
new LabeledPoint(0.0, Vectors.dense(0.0)));
JavaDStream<LabeledPoint> training =
attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 2);
List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
new Tuple2<>(10, Vectors.dense(1.0)),
new Tuple2<>(11, Vectors.dense(0.0)));
JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
StreamingLinearRegressionWithSGD slr = new StreamingLinearRegressionWithSGD()
.setNumIterations(2)
.setInitialWeights(Vectors.dense(0.0));
slr.trainOn(training);
JavaPairDStream<Integer, Double> prediction = slr.predictOnValues(test);
attachTestOutputStream(prediction.count());
runStreams(ssc, 2, 2);
}
}
| 9,718 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/regression/JavaLassoSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.util.LinearDataGenerator;
public class JavaLassoSuite extends SharedSparkSession {
int validatePrediction(List<LabeledPoint> validationData, LassoModel model) {
int numAccurate = 0;
for (LabeledPoint point : validationData) {
Double prediction = model.predict(point.features());
// A prediction is off if the prediction is more than 0.5 away from expected value.
if (Math.abs(prediction - point.label()) <= 0.5) {
numAccurate++;
}
}
return numAccurate;
}
@Test
public void runLassoUsingConstructor() {
int nPoints = 10000;
double A = 0.0;
double[] weights = {-1.5, 1.0e-2};
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(LinearDataGenerator.generateLinearInputAsList(A,
weights, nPoints, 42, 0.1), 2).cache();
List<LabeledPoint> validationData =
LinearDataGenerator.generateLinearInputAsList(A, weights, nPoints, 17, 0.1);
LassoWithSGD lassoSGDImpl = new LassoWithSGD();
lassoSGDImpl.optimizer().setStepSize(1.0)
.setRegParam(0.01)
.setNumIterations(20);
LassoModel model = lassoSGDImpl.run(testRDD.rdd());
int numAccurate = validatePrediction(validationData, model);
Assert.assertTrue(numAccurate > nPoints * 4.0 / 5.0);
}
@Test
public void runLassoUsingStaticMethods() {
int nPoints = 10000;
double A = 0.0;
double[] weights = {-1.5, 1.0e-2};
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(LinearDataGenerator.generateLinearInputAsList(A,
weights, nPoints, 42, 0.1), 2).cache();
List<LabeledPoint> validationData =
LinearDataGenerator.generateLinearInputAsList(A, weights, nPoints, 17, 0.1);
LassoModel model = LassoWithSGD.train(testRDD.rdd(), 100, 1.0, 0.01, 1.0);
int numAccurate = validatePrediction(validationData, model);
Assert.assertTrue(numAccurate > nPoints * 4.0 / 5.0);
}
}
| 9,719 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/regression/JavaLinearRegressionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.util.LinearDataGenerator;
public class JavaLinearRegressionSuite extends SharedSparkSession {
private static int validatePrediction(
List<LabeledPoint> validationData, LinearRegressionModel model) {
int numAccurate = 0;
for (LabeledPoint point : validationData) {
Double prediction = model.predict(point.features());
// A prediction is off if the prediction is more than 0.5 away from expected value.
if (Math.abs(prediction - point.label()) <= 0.5) {
numAccurate++;
}
}
return numAccurate;
}
@Test
public void runLinearRegressionUsingConstructor() {
int nPoints = 100;
double A = 3.0;
double[] weights = {10, 10};
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
LinearDataGenerator.generateLinearInputAsList(A, weights, nPoints, 42, 0.1), 2).cache();
List<LabeledPoint> validationData =
LinearDataGenerator.generateLinearInputAsList(A, weights, nPoints, 17, 0.1);
LinearRegressionWithSGD linSGDImpl = new LinearRegressionWithSGD();
linSGDImpl.setIntercept(true);
LinearRegressionModel model = linSGDImpl.run(testRDD.rdd());
int numAccurate = validatePrediction(validationData, model);
Assert.assertTrue(numAccurate > nPoints * 4.0 / 5.0);
}
@Test
public void runLinearRegressionUsingStaticMethods() {
int nPoints = 100;
double A = 0.0;
double[] weights = {10, 10};
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
LinearDataGenerator.generateLinearInputAsList(A, weights, nPoints, 42, 0.1), 2).cache();
List<LabeledPoint> validationData =
LinearDataGenerator.generateLinearInputAsList(A, weights, nPoints, 17, 0.1);
LinearRegressionModel model = LinearRegressionWithSGD.train(testRDD.rdd(), 100);
int numAccurate = validatePrediction(validationData, model);
Assert.assertTrue(numAccurate > nPoints * 4.0 / 5.0);
}
@Test
public void testPredictJavaRDD() {
int nPoints = 100;
double A = 0.0;
double[] weights = {10, 10};
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
LinearDataGenerator.generateLinearInputAsList(A, weights, nPoints, 42, 0.1), 2).cache();
LinearRegressionWithSGD linSGDImpl = new LinearRegressionWithSGD();
LinearRegressionModel model = linSGDImpl.run(testRDD.rdd());
JavaRDD<Vector> vectors = testRDD.map(LabeledPoint::features);
JavaRDD<Double> predictions = model.predict(vectors);
// Should be able to get the first prediction.
predictions.first();
}
}
| 9,720 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib | Create_ds/spark/mllib/src/test/java/org/apache/spark/mllib/regression/JavaIsotonicRegressionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import scala.Tuple3;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaDoubleRDD;
import org.apache.spark.api.java.JavaRDD;
public class JavaIsotonicRegressionSuite extends SharedSparkSession {
private static List<Tuple3<Double, Double, Double>> generateIsotonicInput(double[] labels) {
List<Tuple3<Double, Double, Double>> input = new ArrayList<>(labels.length);
for (int i = 1; i <= labels.length; i++) {
input.add(new Tuple3<>(labels[i - 1], (double) i, 1.0));
}
return input;
}
private IsotonicRegressionModel runIsotonicRegression(double[] labels) {
JavaRDD<Tuple3<Double, Double, Double>> trainRDD =
jsc.parallelize(generateIsotonicInput(labels), 2).cache();
return new IsotonicRegression().run(trainRDD);
}
@Test
public void testIsotonicRegressionJavaRDD() {
IsotonicRegressionModel model =
runIsotonicRegression(new double[]{1, 2, 3, 3, 1, 6, 7, 8, 11, 9, 10, 12});
Assert.assertArrayEquals(
new double[]{1, 2, 7.0 / 3, 7.0 / 3, 6, 7, 8, 10, 10, 12}, model.predictions(), 1.0e-14);
}
@Test
public void testIsotonicRegressionPredictionsJavaRDD() {
IsotonicRegressionModel model =
runIsotonicRegression(new double[]{1, 2, 3, 3, 1, 6, 7, 8, 11, 9, 10, 12});
JavaDoubleRDD testRDD = jsc.parallelizeDoubles(Arrays.asList(0.0, 1.0, 9.5, 12.0, 13.0));
List<Double> predictions = model.predict(testRDD).collect();
Assert.assertEquals(1.0, predictions.get(0).doubleValue(), 1.0e-14);
Assert.assertEquals(1.0, predictions.get(1).doubleValue(), 1.0e-14);
Assert.assertEquals(10.0, predictions.get(2).doubleValue(), 1.0e-14);
Assert.assertEquals(12.0, predictions.get(3).doubleValue(), 1.0e-14);
Assert.assertEquals(12.0, predictions.get(4).doubleValue(), 1.0e-14);
}
}
| 9,721 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml;
import java.io.IOException;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.classification.LogisticRegression;
import static org.apache.spark.ml.classification.LogisticRegressionSuite.generateLogisticInputAsList;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.feature.StandardScaler;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
/**
* Test Pipeline construction and fitting in Java.
*/
public class JavaPipelineSuite extends SharedSparkSession {
private transient Dataset<Row> dataset;
@Override
public void setUp() throws IOException {
super.setUp();
JavaRDD<LabeledPoint> points =
jsc.parallelize(generateLogisticInputAsList(1.0, 1.0, 100, 42), 2);
dataset = spark.createDataFrame(points, LabeledPoint.class);
}
@Test
public void pipeline() {
StandardScaler scaler = new StandardScaler()
.setInputCol("features")
.setOutputCol("scaledFeatures");
LogisticRegression lr = new LogisticRegression()
.setFeaturesCol("scaledFeatures");
Pipeline pipeline = new Pipeline()
.setStages(new PipelineStage[]{scaler, lr});
PipelineModel model = pipeline.fit(dataset);
model.transform(dataset).createOrReplaceTempView("prediction");
Dataset<Row> predictions = spark.sql("SELECT label, probability, prediction FROM prediction");
predictions.collectAsList();
}
}
| 9,722 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.tree.impl.TreeTests;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaGBTClassifierSuite extends SharedSparkSession {
@Test
public void runDT() {
int nPoints = 20;
double A = 2.0;
double B = -1.5;
JavaRDD<LabeledPoint> data = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
Dataset<Row> dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
// This tests setters. Training with various options is tested in Scala.
GBTClassifier rf = new GBTClassifier()
.setMaxDepth(2)
.setMaxBins(10)
.setMinInstancesPerNode(5)
.setMinInfoGain(0.0)
.setMaxMemoryInMB(256)
.setCacheNodeIds(false)
.setCheckpointInterval(10)
.setSubsamplingRate(1.0)
.setSeed(1234)
.setMaxIter(3)
.setStepSize(0.1)
.setMaxDepth(2); // duplicate setMaxDepth to check builder pattern
for (String lossType : GBTClassifier.supportedLossTypes()) {
rf.setLossType(lossType);
}
GBTClassificationModel model = rf.fit(dataFrame);
model.transform(dataFrame);
model.totalNumNodes();
model.toDebugString();
model.trees();
model.treeWeights();
/*
// TODO: Add test once save/load are implemented. SPARK-6725
File tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
String path = tempDir.toURI().toString();
try {
model3.save(sc.sc(), path);
GBTClassificationModel sameModel = GBTClassificationModel.load(sc.sc(), path);
TreeTests.checkEqual(model3, sameModel);
} finally {
Utils.deleteRecursively(tempDir);
}
*/
}
}
| 9,723 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification;
import java.io.IOException;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import static org.apache.spark.ml.classification.LogisticRegressionSuite.generateLogisticInputAsList;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaLogisticRegressionSuite extends SharedSparkSession {
private transient Dataset<Row> dataset;
private transient JavaRDD<LabeledPoint> datasetRDD;
private double eps = 1e-5;
@Override
public void setUp() throws IOException {
super.setUp();
List<LabeledPoint> points = generateLogisticInputAsList(1.0, 1.0, 100, 42);
datasetRDD = jsc.parallelize(points, 2);
dataset = spark.createDataFrame(datasetRDD, LabeledPoint.class);
dataset.createOrReplaceTempView("dataset");
}
@Test
public void logisticRegressionDefaultParams() {
LogisticRegression lr = new LogisticRegression();
Assert.assertEquals(lr.getLabelCol(), "label");
LogisticRegressionModel model = lr.fit(dataset);
model.transform(dataset).createOrReplaceTempView("prediction");
Dataset<Row> predictions = spark.sql("SELECT label, probability, prediction FROM prediction");
predictions.collectAsList();
// Check defaults
Assert.assertEquals(0.5, model.getThreshold(), eps);
Assert.assertEquals("features", model.getFeaturesCol());
Assert.assertEquals("prediction", model.getPredictionCol());
Assert.assertEquals("probability", model.getProbabilityCol());
}
@Test
public void logisticRegressionWithSetters() {
// Set params, train, and check as many params as we can.
LogisticRegression lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(1.0)
.setThreshold(0.6)
.setProbabilityCol("myProbability");
LogisticRegressionModel model = lr.fit(dataset);
LogisticRegression parent = (LogisticRegression) model.parent();
Assert.assertEquals(10, parent.getMaxIter());
Assert.assertEquals(1.0, parent.getRegParam(), eps);
Assert.assertEquals(0.4, parent.getThresholds()[0], eps);
Assert.assertEquals(0.6, parent.getThresholds()[1], eps);
Assert.assertEquals(0.6, parent.getThreshold(), eps);
Assert.assertEquals(0.6, model.getThreshold(), eps);
// Modify model params, and check that the params worked.
model.setThreshold(1.0);
model.transform(dataset).createOrReplaceTempView("predAllZero");
Dataset<Row> predAllZero = spark.sql("SELECT prediction, myProbability FROM predAllZero");
for (Row r : predAllZero.collectAsList()) {
Assert.assertEquals(0.0, r.getDouble(0), eps);
}
// Call transform with params, and check that the params worked.
model.transform(dataset, model.threshold().w(0.0), model.probabilityCol().w("myProb"))
.createOrReplaceTempView("predNotAllZero");
Dataset<Row> predNotAllZero = spark.sql("SELECT prediction, myProb FROM predNotAllZero");
boolean foundNonZero = false;
for (Row r : predNotAllZero.collectAsList()) {
if (r.getDouble(0) != 0.0) foundNonZero = true;
}
Assert.assertTrue(foundNonZero);
// Call fit() with new params, and check as many params as we can.
LogisticRegressionModel model2 = lr.fit(dataset, lr.maxIter().w(5), lr.regParam().w(0.1),
lr.threshold().w(0.4), lr.probabilityCol().w("theProb"));
LogisticRegression parent2 = (LogisticRegression) model2.parent();
Assert.assertEquals(5, parent2.getMaxIter());
Assert.assertEquals(0.1, parent2.getRegParam(), eps);
Assert.assertEquals(0.4, parent2.getThreshold(), eps);
Assert.assertEquals(0.4, model2.getThreshold(), eps);
Assert.assertEquals("theProb", model2.getProbabilityCol());
}
@SuppressWarnings("unchecked")
@Test
public void logisticRegressionPredictorClassifierMethods() {
LogisticRegression lr = new LogisticRegression();
LogisticRegressionModel model = lr.fit(dataset);
Assert.assertEquals(2, model.numClasses());
model.transform(dataset).createOrReplaceTempView("transformed");
Dataset<Row> trans1 = spark.sql("SELECT rawPrediction, probability FROM transformed");
for (Row row : trans1.collectAsList()) {
Vector raw = (Vector) row.get(0);
Vector prob = (Vector) row.get(1);
Assert.assertEquals(raw.size(), 2);
Assert.assertEquals(prob.size(), 2);
double probFromRaw1 = 1.0 / (1.0 + Math.exp(-raw.apply(1)));
Assert.assertEquals(0, Math.abs(prob.apply(1) - probFromRaw1), eps);
Assert.assertEquals(0, Math.abs(prob.apply(0) - (1.0 - probFromRaw1)), eps);
}
Dataset<Row> trans2 = spark.sql("SELECT prediction, probability FROM transformed");
for (Row row : trans2.collectAsList()) {
double pred = row.getDouble(0);
Vector prob = (Vector) row.get(1);
double probOfPred = prob.apply((int) pred);
for (int i = 0; i < prob.size(); ++i) {
Assert.assertTrue(probOfPred >= prob.apply(i));
}
}
}
@Test
public void logisticRegressionTrainingSummary() {
LogisticRegression lr = new LogisticRegression();
LogisticRegressionModel model = lr.fit(dataset);
LogisticRegressionTrainingSummary summary = model.summary();
Assert.assertEquals(summary.totalIterations(), summary.objectiveHistory().length);
}
}
| 9,724 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.tree.impl.TreeTests;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaDecisionTreeClassifierSuite extends SharedSparkSession {
@Test
public void runDT() {
int nPoints = 20;
double A = 2.0;
double B = -1.5;
JavaRDD<LabeledPoint> data = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
Dataset<Row> dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
// This tests setters. Training with various options is tested in Scala.
DecisionTreeClassifier dt = new DecisionTreeClassifier()
.setMaxDepth(2)
.setMaxBins(10)
.setMinInstancesPerNode(5)
.setMinInfoGain(0.0)
.setMaxMemoryInMB(256)
.setCacheNodeIds(false)
.setCheckpointInterval(10)
.setMaxDepth(2); // duplicate setMaxDepth to check builder pattern
for (String impurity : DecisionTreeClassifier.supportedImpurities()) {
dt.setImpurity(impurity);
}
DecisionTreeClassificationModel model = dt.fit(dataFrame);
model.transform(dataFrame);
model.numNodes();
model.depth();
model.toDebugString();
/*
// TODO: Add test once save/load are implemented. SPARK-6725
File tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
String path = tempDir.toURI().toString();
try {
model3.save(sc.sc(), path);
DecisionTreeClassificationModel sameModel =
DecisionTreeClassificationModel.load(sc.sc(), path);
TreeTests.checkEqual(model3, sameModel);
} finally {
Utils.deleteRecursively(tempDir);
}
*/
}
}
| 9,725 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/classification/JavaNaiveBayesSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.VectorUDT;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaNaiveBayesSuite extends SharedSparkSession {
public void validatePrediction(Dataset<Row> predictionAndLabels) {
for (Row r : predictionAndLabels.collectAsList()) {
double prediction = r.getAs(0);
double label = r.getAs(1);
assertEquals(label, prediction, 1E-5);
}
}
@Test
public void naiveBayesDefaultParams() {
NaiveBayes nb = new NaiveBayes();
assertEquals("label", nb.getLabelCol());
assertEquals("features", nb.getFeaturesCol());
assertEquals("prediction", nb.getPredictionCol());
assertEquals(1.0, nb.getSmoothing(), 1E-5);
assertEquals("multinomial", nb.getModelType());
}
@Test
public void testNaiveBayes() {
List<Row> data = Arrays.asList(
RowFactory.create(0.0, Vectors.dense(1.0, 0.0, 0.0)),
RowFactory.create(0.0, Vectors.dense(2.0, 0.0, 0.0)),
RowFactory.create(1.0, Vectors.dense(0.0, 1.0, 0.0)),
RowFactory.create(1.0, Vectors.dense(0.0, 2.0, 0.0)),
RowFactory.create(2.0, Vectors.dense(0.0, 0.0, 1.0)),
RowFactory.create(2.0, Vectors.dense(0.0, 0.0, 2.0)));
StructType schema = new StructType(new StructField[]{
new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("features", new VectorUDT(), false, Metadata.empty())
});
Dataset<Row> dataset = spark.createDataFrame(data, schema);
NaiveBayes nb = new NaiveBayes().setSmoothing(0.5).setModelType("multinomial");
NaiveBayesModel model = nb.fit(dataset);
Dataset<Row> predictionAndLabels = model.transform(dataset).select("prediction", "label");
validatePrediction(predictionAndLabels);
}
}
| 9,726 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/classification/JavaOneVsRestSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification;
import java.io.IOException;
import java.util.List;
import scala.collection.JavaConverters;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import static org.apache.spark.ml.classification.LogisticRegressionSuite.generateMultinomialLogisticInput;
public class JavaOneVsRestSuite extends SharedSparkSession {
private transient Dataset<Row> dataset;
private transient JavaRDD<LabeledPoint> datasetRDD;
@Override
public void setUp() throws IOException {
super.setUp();
int nPoints = 3;
// The following coefficients and xMean/xVariance are computed from iris dataset with
// lambda=0.2.
// As a result, we are drawing samples from probability distribution of an actual model.
double[] coefficients = {
-0.57997, 0.912083, -0.371077, -0.819866, 2.688191,
-0.16624, -0.84355, -0.048509, -0.301789, 4.170682};
double[] xMean = {5.843, 3.057, 3.758, 1.199};
double[] xVariance = {0.6856, 0.1899, 3.116, 0.581};
List<LabeledPoint> points = JavaConverters.seqAsJavaListConverter(
generateMultinomialLogisticInput(coefficients, xMean, xVariance, true, nPoints, 42)
).asJava();
datasetRDD = jsc.parallelize(points, 2);
dataset = spark.createDataFrame(datasetRDD, LabeledPoint.class);
}
@Test
public void oneVsRestDefaultParams() {
OneVsRest ova = new OneVsRest();
ova.setClassifier(new LogisticRegression());
Assert.assertEquals(ova.getLabelCol(), "label");
Assert.assertEquals(ova.getPredictionCol(), "prediction");
OneVsRestModel ovaModel = ova.fit(dataset);
Dataset<Row> predictions = ovaModel.transform(dataset).select("label", "prediction");
predictions.collectAsList();
Assert.assertEquals(ovaModel.getLabelCol(), "label");
Assert.assertEquals(ovaModel.getPredictionCol(), "prediction");
}
}
| 9,727 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/classification/JavaMultilayerPerceptronClassifierSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaMultilayerPerceptronClassifierSuite extends SharedSparkSession {
@Test
public void testMLPC() {
List<LabeledPoint> data = Arrays.asList(
new LabeledPoint(0.0, Vectors.dense(0.0, 0.0)),
new LabeledPoint(1.0, Vectors.dense(0.0, 1.0)),
new LabeledPoint(1.0, Vectors.dense(1.0, 0.0)),
new LabeledPoint(0.0, Vectors.dense(1.0, 1.0))
);
Dataset<Row> dataFrame = spark.createDataFrame(data, LabeledPoint.class);
MultilayerPerceptronClassifier mlpc = new MultilayerPerceptronClassifier()
.setLayers(new int[]{2, 5, 2})
.setBlockSize(1)
.setSeed(123L)
.setMaxIter(100);
MultilayerPerceptronClassificationModel model = mlpc.fit(dataFrame);
Dataset<Row> result = model.transform(dataFrame);
List<Row> predictionAndLabels = result.select("prediction", "label").collectAsList();
for (Row r : predictionAndLabels) {
Assert.assertEquals((int) r.getDouble(0), (int) r.getDouble(1));
}
}
}
| 9,728 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification;
import java.util.HashMap;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.tree.impl.TreeTests;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaRandomForestClassifierSuite extends SharedSparkSession {
@Test
public void runDT() {
int nPoints = 20;
double A = 2.0;
double B = -1.5;
JavaRDD<LabeledPoint> data = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
Dataset<Row> dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
// This tests setters. Training with various options is tested in Scala.
RandomForestClassifier rf = new RandomForestClassifier()
.setMaxDepth(2)
.setMaxBins(10)
.setMinInstancesPerNode(5)
.setMinInfoGain(0.0)
.setMaxMemoryInMB(256)
.setCacheNodeIds(false)
.setCheckpointInterval(10)
.setSubsamplingRate(1.0)
.setSeed(1234)
.setNumTrees(3)
.setMaxDepth(2); // duplicate setMaxDepth to check builder pattern
for (String impurity : RandomForestClassifier.supportedImpurities()) {
rf.setImpurity(impurity);
}
for (String featureSubsetStrategy : RandomForestClassifier.supportedFeatureSubsetStrategies()) {
rf.setFeatureSubsetStrategy(featureSubsetStrategy);
}
String[] realStrategies = {".1", ".10", "0.10", "0.1", "0.9", "1.0"};
for (String strategy : realStrategies) {
rf.setFeatureSubsetStrategy(strategy);
}
String[] integerStrategies = {"1", "10", "100", "1000", "10000"};
for (String strategy : integerStrategies) {
rf.setFeatureSubsetStrategy(strategy);
}
String[] invalidStrategies = {"-.1", "-.10", "-0.10", ".0", "0.0", "1.1", "0"};
for (String strategy : invalidStrategies) {
try {
rf.setFeatureSubsetStrategy(strategy);
Assert.fail("Expected exception to be thrown for invalid strategies");
} catch (Exception e) {
Assert.assertTrue(e instanceof IllegalArgumentException);
}
}
RandomForestClassificationModel model = rf.fit(dataFrame);
model.transform(dataFrame);
model.totalNumNodes();
model.toDebugString();
model.trees();
model.treeWeights();
Vector importances = model.featureImportances();
/*
// TODO: Add test once save/load are implemented. SPARK-6725
File tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
String path = tempDir.toURI().toString();
try {
model3.save(sc.sc(), path);
RandomForestClassificationModel sameModel =
RandomForestClassificationModel.load(sc.sc(), path);
TreeTests.checkEqual(model3, sameModel);
} finally {
Utils.deleteRecursively(tempDir);
}
*/
}
}
| 9,729 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/linalg/JavaSQLDataTypesSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.linalg;
import org.junit.Assert;
import org.junit.Test;
import static org.apache.spark.ml.linalg.SQLDataTypes.*;
public class JavaSQLDataTypesSuite {
@Test
public void testSQLDataTypes() {
Assert.assertEquals(new VectorUDT(), VectorType());
Assert.assertEquals(new MatrixUDT(), MatrixType());
}
}
| 9,730 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/attribute/JavaAttributeGroupSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.attribute;
import org.junit.Assert;
import org.junit.Test;
public class JavaAttributeGroupSuite {
@Test
public void testAttributeGroup() {
Attribute[] attrs = new Attribute[]{
NumericAttribute.defaultAttr(),
NominalAttribute.defaultAttr(),
BinaryAttribute.defaultAttr().withIndex(0),
NumericAttribute.defaultAttr().withName("age").withSparsity(0.8),
NominalAttribute.defaultAttr().withName("size").withValues("small", "medium", "large"),
BinaryAttribute.defaultAttr().withName("clicked").withValues("no", "yes"),
NumericAttribute.defaultAttr(),
NumericAttribute.defaultAttr()
};
AttributeGroup group = new AttributeGroup("user", attrs);
Assert.assertEquals(8, group.size());
Assert.assertEquals("user", group.name());
Assert.assertEquals(NumericAttribute.defaultAttr().withIndex(0), group.getAttr(0));
Assert.assertEquals(3, group.indexOf("age"));
Assert.assertFalse(group.hasAttr("abc"));
Assert.assertEquals(group, AttributeGroup.fromStructField(group.toStructField()));
}
}
| 9,731 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/attribute/JavaAttributeSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.attribute;
import org.junit.Assert;
import org.junit.Test;
public class JavaAttributeSuite {
@Test
public void testAttributeType() {
AttributeType numericType = AttributeType.Numeric();
AttributeType nominalType = AttributeType.Nominal();
AttributeType binaryType = AttributeType.Binary();
Assert.assertEquals(numericType, NumericAttribute.defaultAttr().attrType());
Assert.assertEquals(nominalType, NominalAttribute.defaultAttr().attrType());
Assert.assertEquals(binaryType, BinaryAttribute.defaultAttr().attrType());
}
@Test
public void testNumericAttribute() {
NumericAttribute attr = NumericAttribute.defaultAttr()
.withName("age").withIndex(0).withMin(0.0).withMax(1.0).withStd(0.5).withSparsity(0.4);
Assert.assertEquals(attr.withoutIndex(), Attribute.fromStructField(attr.toStructField()));
}
@Test
public void testNominalAttribute() {
NominalAttribute attr = NominalAttribute.defaultAttr()
.withName("size").withIndex(1).withValues("small", "medium", "large");
Assert.assertEquals(attr.withoutIndex(), Attribute.fromStructField(attr.toStructField()));
}
@Test
public void testBinaryAttribute() {
BinaryAttribute attr = BinaryAttribute.defaultAttr()
.withName("clicked").withIndex(2).withValues("no", "yes");
Assert.assertEquals(attr.withoutIndex(), Attribute.fromStructField(attr.toStructField()));
}
}
| 9,732 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/util/JavaDefaultReadWriteSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.util;
import java.io.File;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.util.Utils;
public class JavaDefaultReadWriteSuite extends SharedSparkSession {
File tempDir = null;
@Override
public void setUp() throws IOException {
super.setUp();
tempDir = Utils.createTempDir(
System.getProperty("java.io.tmpdir"), "JavaDefaultReadWriteSuite");
}
@Override
public void tearDown() {
super.tearDown();
Utils.deleteRecursively(tempDir);
}
@Test
public void testDefaultReadWrite() throws IOException {
String uid = "my_params";
MyParams instance = new MyParams(uid);
instance.set(instance.intParam(), 2);
String outputPath = new File(tempDir, uid).getPath();
instance.save(outputPath);
try {
instance.save(outputPath);
Assert.fail(
"Write without overwrite enabled should fail if the output directory already exists.");
} catch (IOException e) {
// expected
}
instance.write().session(spark).overwrite().save(outputPath);
MyParams newInstance = MyParams.load(outputPath);
Assert.assertEquals("UID should match.", instance.uid(), newInstance.uid());
Assert.assertEquals("Params should be preserved.",
2, newInstance.getOrDefault(newInstance.intParam()));
}
}
| 9,733 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/source | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/source/libsvm/JavaLibSVMRelationSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.source.libsvm;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import com.google.common.io.Files;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.DenseVector;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.util.Utils;
/**
* Test LibSVMRelation in Java.
*/
public class JavaLibSVMRelationSuite extends SharedSparkSession {
private File tempDir;
private String path;
@Override
public void setUp() throws IOException {
super.setUp();
tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "datasource");
File file = new File(tempDir, "part-00000");
String s = "1 1:1.0 3:2.0 5:3.0\n0\n0 2:4.0 4:5.0 6:6.0";
Files.write(s, file, StandardCharsets.UTF_8);
path = tempDir.toURI().toString();
}
@Override
public void tearDown() {
super.tearDown();
Utils.deleteRecursively(tempDir);
}
@Test
public void verifyLibSVMDF() {
Dataset<Row> dataset = spark.read().format("libsvm").option("vectorType", "dense")
.load(path);
Assert.assertEquals("label", dataset.columns()[0]);
Assert.assertEquals("features", dataset.columns()[1]);
Row r = dataset.first();
Assert.assertEquals(1.0, r.getDouble(0), 1e-15);
DenseVector v = r.getAs(1);
Assert.assertEquals(Vectors.dense(1.0, 0.0, 2.0, 0.0, 3.0, 0.0), v);
}
}
| 9,734 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/clustering/JavaKMeansSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.clustering;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaKMeansSuite extends SharedSparkSession {
private transient int k = 5;
private transient Dataset<Row> dataset;
@Override
public void setUp() throws IOException {
super.setUp();
dataset = KMeansSuite.generateKMeansData(spark, 50, 3, k);
}
@Test
public void fitAndTransform() {
KMeans kmeans = new KMeans().setK(k).setSeed(1);
KMeansModel model = kmeans.fit(dataset);
Vector[] centers = model.clusterCenters();
assertEquals(k, centers.length);
Dataset<Row> transformed = model.transform(dataset);
List<String> columns = Arrays.asList(transformed.columns());
List<String> expectedColumns = Arrays.asList("features", "prediction");
for (String column : expectedColumns) {
assertTrue(columns.contains(column));
}
}
}
| 9,735 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/param/JavaParamsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.param;
import java.util.Arrays;
import org.junit.Assert;
import org.junit.Test;
/**
* Test Param and related classes in Java
*/
public class JavaParamsSuite {
@Test
public void testParams() {
JavaTestParams testParams = new JavaTestParams();
Assert.assertEquals(testParams.getMyIntParam(), 1);
testParams.setMyIntParam(2).setMyDoubleParam(0.4).setMyStringParam("a");
Assert.assertEquals(testParams.getMyDoubleParam(), 0.4, 0.0);
Assert.assertEquals(testParams.getMyStringParam(), "a");
Assert.assertArrayEquals(testParams.getMyDoubleArrayParam(), new double[]{1.0, 2.0}, 0.0);
}
@Test
public void testParamValidate() {
ParamValidators.gt(1.0);
ParamValidators.gtEq(1.0);
ParamValidators.lt(1.0);
ParamValidators.ltEq(1.0);
ParamValidators.inRange(0, 1, true, false);
ParamValidators.inRange(0, 1);
ParamValidators.inArray(Arrays.asList(0, 1, 3));
ParamValidators.inArray(Arrays.asList("a", "b"));
}
}
| 9,736 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/param/JavaTestParams.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.param;
import java.util.Arrays;
import java.util.List;
import org.apache.spark.ml.util.Identifiable$;
/**
* A subclass of Params for testing.
*/
public class JavaTestParams extends JavaParams {
public JavaTestParams() {
this.uid_ = Identifiable$.MODULE$.randomUID("javaTestParams");
init();
}
public JavaTestParams(String uid) {
this.uid_ = uid;
init();
}
private String uid_;
@Override
public String uid() {
return uid_;
}
private IntParam myIntParam_;
public IntParam myIntParam() {
return myIntParam_;
}
public int getMyIntParam() {
return (Integer) getOrDefault(myIntParam_);
}
public JavaTestParams setMyIntParam(int value) {
set(myIntParam_, value);
return this;
}
private DoubleParam myDoubleParam_;
public DoubleParam myDoubleParam() {
return myDoubleParam_;
}
public double getMyDoubleParam() {
return (Double) getOrDefault(myDoubleParam_);
}
public JavaTestParams setMyDoubleParam(double value) {
set(myDoubleParam_, value);
return this;
}
private Param<String> myStringParam_;
public Param<String> myStringParam() {
return myStringParam_;
}
public String getMyStringParam() {
return getOrDefault(myStringParam_);
}
public JavaTestParams setMyStringParam(String value) {
set(myStringParam_, value);
return this;
}
private DoubleArrayParam myDoubleArrayParam_;
public DoubleArrayParam myDoubleArrayParam() {
return myDoubleArrayParam_;
}
public double[] getMyDoubleArrayParam() {
return getOrDefault(myDoubleArrayParam_);
}
public JavaTestParams setMyDoubleArrayParam(double[] value) {
set(myDoubleArrayParam_, value);
return this;
}
private void init() {
myIntParam_ = new IntParam(this, "myIntParam", "this is an int param", ParamValidators.gt(0));
myDoubleParam_ = new DoubleParam(this, "myDoubleParam", "this is a double param",
ParamValidators.inRange(0.0, 1.0));
List<String> validStrings = Arrays.asList("a", "b");
myStringParam_ = new Param<>(this, "myStringParam", "this is a string param",
ParamValidators.inArray(validStrings));
myDoubleArrayParam_ =
new DoubleArrayParam(this, "myDoubleArrayParam", "this is a double param");
setDefault(myIntParam(), 1);
setDefault(myDoubleParam(), 0.5);
setDefault(myDoubleArrayParam(), new double[]{1.0, 2.0});
}
@Override
public JavaTestParams copy(ParamMap extra) {
return defaultCopy(extra);
}
}
| 9,737 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.stat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertArrayEquals;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.Dataset;
import static org.apache.spark.sql.functions.col;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.linalg.Vectors;
public class JavaSummarizerSuite extends SharedSparkSession {
private transient Dataset<Row> dataset;
@Override
public void setUp() throws IOException {
super.setUp();
List<LabeledPoint> points = new ArrayList<LabeledPoint>();
points.add(new LabeledPoint(0.0, Vectors.dense(1.0, 2.0)));
points.add(new LabeledPoint(0.0, Vectors.dense(3.0, 4.0)));
dataset = spark.createDataFrame(jsc.parallelize(points, 2), LabeledPoint.class);
}
@Test
public void testSummarizer() {
dataset.select(col("features"));
Row result = dataset
.select(Summarizer.metrics("mean", "max", "count").summary(col("features")))
.first().getStruct(0);
Vector meanVec = result.getAs("mean");
Vector maxVec = result.getAs("max");
long count = result.getAs("count");
assertEquals(2L, count);
assertArrayEquals(new double[]{2.0, 3.0}, meanVec.toArray(), 0.0);
assertArrayEquals(new double[]{3.0, 4.0}, maxVec.toArray(), 0.0);
}
}
| 9,738 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/stat/JavaKolmogorovSmirnovTestSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.stat;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.math3.distribution.NormalDistribution;
import org.apache.spark.sql.Encoders;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaKolmogorovSmirnovTestSuite extends SharedSparkSession {
private transient Dataset<Row> dataset;
@Override
public void setUp() throws IOException {
super.setUp();
List<java.lang.Double> points = Arrays.asList(0.1, 1.1, 10.1, -1.1);
dataset = spark.createDataset(points, Encoders.DOUBLE()).toDF("sample");
}
@Test
public void testKSTestCDF() {
// Create theoretical distributions
NormalDistribution stdNormalDist = new NormalDistribution(0, 1);
// set seeds
Long seed = 10L;
stdNormalDist.reseedRandomGenerator(seed);
Function<Double, Double> stdNormalCDF = (x) -> stdNormalDist.cumulativeProbability(x);
double pThreshold = 0.05;
// Comparing a standard normal sample to a standard normal distribution
Row results = KolmogorovSmirnovTest
.test(dataset, "sample", stdNormalCDF).head();
double pValue1 = results.getDouble(0);
// Cannot reject null hypothesis
assert(pValue1 > pThreshold);
}
@Test
public void testKSTestNamedDistribution() {
double pThreshold = 0.05;
// Comparing a standard normal sample to a standard normal distribution
Row results = KolmogorovSmirnovTest
.test(dataset, "sample", "norm", 0.0, 1.0).head();
double pValue1 = results.getDouble(0);
// Cannot reject null hypothesis
assert(pValue1 > pThreshold);
}
}
| 9,739 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/tuning/JavaCrossValidatorSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tuning;
import java.io.IOException;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.classification.LogisticRegression;
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.param.ParamMap;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import static org.apache.spark.ml.classification.LogisticRegressionSuite.generateLogisticInputAsList;
public class JavaCrossValidatorSuite extends SharedSparkSession {
private transient Dataset<Row> dataset;
@Override
public void setUp() throws IOException {
super.setUp();
List<LabeledPoint> points = generateLogisticInputAsList(1.0, 1.0, 100, 42);
dataset = spark.createDataFrame(jsc.parallelize(points, 2), LabeledPoint.class);
}
@Test
public void crossValidationWithLogisticRegression() {
LogisticRegression lr = new LogisticRegression();
ParamMap[] lrParamMaps = new ParamGridBuilder()
.addGrid(lr.regParam(), new double[]{0.001, 1000.0})
.addGrid(lr.maxIter(), new int[]{0, 10})
.build();
BinaryClassificationEvaluator eval = new BinaryClassificationEvaluator();
CrossValidator cv = new CrossValidator()
.setEstimator(lr)
.setEstimatorParamMaps(lrParamMaps)
.setEvaluator(eval)
.setNumFolds(3);
CrossValidatorModel cvModel = cv.fit(dataset);
LogisticRegression parent = (LogisticRegression) cvModel.bestModel().parent();
Assert.assertEquals(0.001, parent.getRegParam(), 0.0);
Assert.assertEquals(10, parent.getMaxIter());
}
}
| 9,740 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaStandardScalerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaStandardScalerSuite extends SharedSparkSession {
@Test
public void standardScaler() {
// The tests are to check Java compatibility.
List<VectorIndexerSuite.FeatureData> points = Arrays.asList(
new VectorIndexerSuite.FeatureData(Vectors.dense(0.0, -2.0)),
new VectorIndexerSuite.FeatureData(Vectors.dense(1.0, 3.0)),
new VectorIndexerSuite.FeatureData(Vectors.dense(1.0, 4.0))
);
Dataset<Row> dataFrame = spark.createDataFrame(jsc.parallelize(points, 2),
VectorIndexerSuite.FeatureData.class);
StandardScaler scaler = new StandardScaler()
.setInputCol("features")
.setOutputCol("scaledFeatures")
.setWithStd(true)
.setWithMean(false);
// Compute summary statistics by fitting the StandardScaler
StandardScalerModel scalerModel = scaler.fit(dataFrame);
// Normalize each feature to have unit standard deviation.
Dataset<Row> scaledData = scalerModel.transform(dataFrame);
scaledData.count();
}
}
| 9,741 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaNormalizerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaNormalizerSuite extends SharedSparkSession {
@Test
public void normalizer() {
// The tests are to check Java compatibility.
JavaRDD<VectorIndexerSuite.FeatureData> points = jsc.parallelize(Arrays.asList(
new VectorIndexerSuite.FeatureData(Vectors.dense(0.0, -2.0)),
new VectorIndexerSuite.FeatureData(Vectors.dense(1.0, 3.0)),
new VectorIndexerSuite.FeatureData(Vectors.dense(1.0, 4.0))
));
Dataset<Row> dataFrame = spark.createDataFrame(points, VectorIndexerSuite.FeatureData.class);
Normalizer normalizer = new Normalizer()
.setInputCol("features")
.setOutputCol("normFeatures");
// Normalize each Vector using $L^2$ norm.
Dataset<Row> l2NormData = normalizer.transform(dataFrame, normalizer.p().w(2));
l2NormData.count();
// Normalize each Vector using $L^\infty$ norm.
Dataset<Row> lInfNormData =
normalizer.transform(dataFrame, normalizer.p().w(Double.POSITIVE_INFINITY));
lInfNormData.count();
}
}
| 9,742 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaPolynomialExpansionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.linalg.VectorUDT;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaPolynomialExpansionSuite extends SharedSparkSession {
@Test
public void polynomialExpansionTest() {
PolynomialExpansion polyExpansion = new PolynomialExpansion()
.setInputCol("features")
.setOutputCol("polyFeatures")
.setDegree(3);
List<Row> data = Arrays.asList(
RowFactory.create(
Vectors.dense(-2.0, 2.3),
Vectors.dense(-2.0, 4.0, -8.0, 2.3, -4.6, 9.2, 5.29, -10.58, 12.17)
),
RowFactory.create(Vectors.dense(0.0, 0.0), Vectors.dense(new double[9])),
RowFactory.create(
Vectors.dense(0.6, -1.1),
Vectors.dense(0.6, 0.36, 0.216, -1.1, -0.66, -0.396, 1.21, 0.726, -1.331)
)
);
StructType schema = new StructType(new StructField[]{
new StructField("features", new VectorUDT(), false, Metadata.empty()),
new StructField("expected", new VectorUDT(), false, Metadata.empty())
});
Dataset<Row> dataset = spark.createDataFrame(data, schema);
List<Row> pairs = polyExpansion.transform(dataset)
.select("polyFeatures", "expected")
.collectAsList();
for (Row r : pairs) {
double[] polyFeatures = ((Vector) r.get(0)).toArray();
double[] expected = ((Vector) r.get(1)).toArray();
Assert.assertArrayEquals(polyFeatures, expected, 1e-1);
}
}
}
| 9,743 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaHashingTFSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaHashingTFSuite extends SharedSparkSession {
@Test
public void hashingTF() {
List<Row> data = Arrays.asList(
RowFactory.create(0.0, "Hi I heard about Spark"),
RowFactory.create(0.0, "I wish Java could use case classes"),
RowFactory.create(1.0, "Logistic regression models are neat")
);
StructType schema = new StructType(new StructField[]{
new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("sentence", DataTypes.StringType, false, Metadata.empty())
});
Dataset<Row> sentenceData = spark.createDataFrame(data, schema);
Tokenizer tokenizer = new Tokenizer()
.setInputCol("sentence")
.setOutputCol("words");
Dataset<Row> wordsData = tokenizer.transform(sentenceData);
int numFeatures = 20;
HashingTF hashingTF = new HashingTF()
.setInputCol("words")
.setOutputCol("rawFeatures")
.setNumFeatures(numFeatures);
Dataset<Row> featurizedData = hashingTF.transform(wordsData);
IDF idf = new IDF().setInputCol("rawFeatures").setOutputCol("features");
IDFModel idfModel = idf.fit(featurizedData);
Dataset<Row> rescaledData = idfModel.transform(featurizedData);
for (Row r : rescaledData.select("features", "label").takeAsList(3)) {
Vector features = r.getAs(0);
Assert.assertEquals(features.size(), numFeatures);
}
}
}
| 9,744 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaPCASuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.mllib.linalg.DenseVector;
import org.apache.spark.mllib.linalg.Matrix;
import org.apache.spark.mllib.linalg.distributed.RowMatrix;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaPCASuite extends SharedSparkSession {
public static class VectorPair implements Serializable {
private Vector features = Vectors.dense(0.0);
private Vector expected = Vectors.dense(0.0);
public void setFeatures(Vector features) {
this.features = features;
}
public Vector getFeatures() {
return this.features;
}
public void setExpected(Vector expected) {
this.expected = expected;
}
public Vector getExpected() {
return this.expected;
}
}
@Test
public void testPCA() {
List<Vector> points = Arrays.asList(
Vectors.sparse(5, new int[]{1, 3}, new double[]{1.0, 7.0}),
Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
);
JavaRDD<Vector> dataRDD = jsc.parallelize(points, 2);
RowMatrix mat = new RowMatrix(dataRDD.map(
(Vector vector) -> (org.apache.spark.mllib.linalg.Vector) new DenseVector(vector.toArray())
).rdd());
Matrix pc = mat.computePrincipalComponents(3);
mat.multiply(pc).rows().toJavaRDD();
JavaRDD<Vector> expected = mat.multiply(pc).rows().toJavaRDD()
.map(org.apache.spark.mllib.linalg.Vector::asML);
JavaRDD<VectorPair> featuresExpected = dataRDD.zip(expected).map(pair -> {
VectorPair featuresExpected1 = new VectorPair();
featuresExpected1.setFeatures(pair._1());
featuresExpected1.setExpected(pair._2());
return featuresExpected1;
});
Dataset<Row> df = spark.createDataFrame(featuresExpected, VectorPair.class);
PCAModel pca = new PCA()
.setInputCol("features")
.setOutputCol("pca_features")
.setK(3)
.fit(df);
List<Row> result = pca.transform(df).select("pca_features", "expected").toJavaRDD().collect();
for (Row r : result) {
Vector calculatedVector = (Vector) r.get(0);
Vector expectedVector = (Vector) r.get(1);
for (int i = 0; i < calculatedVector.size(); i++) {
Assert.assertEquals(calculatedVector.apply(i), expectedVector.apply(i), 1.0e-8);
}
}
}
}
| 9,745 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaTokenizerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaTokenizerSuite extends SharedSparkSession {
@Test
public void regexTokenizer() {
RegexTokenizer myRegExTokenizer = new RegexTokenizer()
.setInputCol("rawText")
.setOutputCol("tokens")
.setPattern("\\s")
.setGaps(true)
.setToLowercase(false)
.setMinTokenLength(3);
JavaRDD<TokenizerTestData> rdd = jsc.parallelize(Arrays.asList(
new TokenizerTestData("Test of tok.", new String[]{"Test", "tok."}),
new TokenizerTestData("Te,st. punct", new String[]{"Te,st.", "punct"})
));
Dataset<Row> dataset = spark.createDataFrame(rdd, TokenizerTestData.class);
List<Row> pairs = myRegExTokenizer.transform(dataset)
.select("tokens", "wantedTokens")
.collectAsList();
for (Row r : pairs) {
Assert.assertEquals(r.get(0), r.get(1));
}
}
}
| 9,746 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaVectorSlicerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.attribute.Attribute;
import org.apache.spark.ml.attribute.AttributeGroup;
import org.apache.spark.ml.attribute.NumericAttribute;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.StructType;
public class JavaVectorSlicerSuite extends SharedSparkSession {
@Test
public void vectorSlice() {
Attribute[] attrs = new Attribute[]{
NumericAttribute.defaultAttr().withName("f1"),
NumericAttribute.defaultAttr().withName("f2"),
NumericAttribute.defaultAttr().withName("f3")
};
AttributeGroup group = new AttributeGroup("userFeatures", attrs);
List<Row> data = Arrays.asList(
RowFactory.create(Vectors.sparse(3, new int[]{0, 1}, new double[]{-2.0, 2.3})),
RowFactory.create(Vectors.dense(-2.0, 2.3, 0.0))
);
Dataset<Row> dataset =
spark.createDataFrame(data, (new StructType()).add(group.toStructField()));
VectorSlicer vectorSlicer = new VectorSlicer()
.setInputCol("userFeatures").setOutputCol("features");
vectorSlicer.setIndices(new int[]{1}).setNames(new String[]{"f3"});
Dataset<Row> output = vectorSlicer.transform(dataset);
for (Row r : output.select("userFeatures", "features").takeAsList(2)) {
Vector features = r.getAs(1);
Assert.assertEquals(features.size(), 2);
}
}
}
| 9,747 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaDCTSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import edu.emory.mathcs.jtransforms.dct.DoubleDCT_1D;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.linalg.VectorUDT;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaDCTSuite extends SharedSparkSession {
@Test
public void javaCompatibilityTest() {
double[] input = new double[]{1D, 2D, 3D, 4D};
Dataset<Row> dataset = spark.createDataFrame(
Arrays.asList(RowFactory.create(Vectors.dense(input))),
new StructType(new StructField[]{
new StructField("vec", (new VectorUDT()), false, Metadata.empty())
}));
double[] expectedResult = input.clone();
(new DoubleDCT_1D(input.length)).forward(expectedResult, true);
DCT dct = new DCT()
.setInputCol("vec")
.setOutputCol("resultVec");
List<Row> result = dct.transform(dataset).select("resultVec").collectAsList();
Vector resultVec = result.get(0).getAs("resultVec");
Assert.assertArrayEquals(expectedResult, resultVec.toArray(), 1e-6);
}
}
| 9,748 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaVectorIndexerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.feature.VectorIndexerSuite.FeatureData;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaVectorIndexerSuite extends SharedSparkSession {
@Test
public void vectorIndexerAPI() {
// The tests are to check Java compatibility.
List<FeatureData> points = Arrays.asList(
new FeatureData(Vectors.dense(0.0, -2.0)),
new FeatureData(Vectors.dense(1.0, 3.0)),
new FeatureData(Vectors.dense(1.0, 4.0))
);
Dataset<Row> data = spark.createDataFrame(jsc.parallelize(points, 2), FeatureData.class);
VectorIndexer indexer = new VectorIndexer()
.setInputCol("features")
.setOutputCol("indexed")
.setMaxCategories(2);
VectorIndexerModel model = indexer.fit(data);
Assert.assertEquals(model.numFeatures(), 2);
Map<Integer, Map<Double, Integer>> categoryMaps = model.javaCategoryMaps();
Assert.assertEquals(categoryMaps.size(), 1);
Dataset<Row> indexedData = model.transform(data);
}
}
| 9,749 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaStringIndexerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import static org.apache.spark.sql.types.DataTypes.*;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaStringIndexerSuite extends SharedSparkSession {
@Test
public void testStringIndexer() {
StructType schema = createStructType(new StructField[]{
createStructField("id", IntegerType, false),
createStructField("label", StringType, false)
});
List<Row> data = Arrays.asList(
cr(0, "a"), cr(1, "b"), cr(2, "c"), cr(3, "a"), cr(4, "a"), cr(5, "c"));
Dataset<Row> dataset = spark.createDataFrame(data, schema);
StringIndexer indexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("labelIndex");
Dataset<Row> output = indexer.fit(dataset).transform(dataset);
Assert.assertEquals(
Arrays.asList(cr(0, 0.0), cr(1, 2.0), cr(2, 1.0), cr(3, 0.0), cr(4, 0.0), cr(5, 1.0)),
output.orderBy("id").select("id", "labelIndex").collectAsList());
}
/**
* An alias for RowFactory.create.
*/
private Row cr(Object... values) {
return RowFactory.create(values);
}
}
| 9,750 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaStopWordsRemoverSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaStopWordsRemoverSuite extends SharedSparkSession {
@Test
public void javaCompatibilityTest() {
StopWordsRemover remover = new StopWordsRemover()
.setInputCol("raw")
.setOutputCol("filtered");
List<Row> data = Arrays.asList(
RowFactory.create(Arrays.asList("I", "saw", "the", "red", "baloon")),
RowFactory.create(Arrays.asList("Mary", "had", "a", "little", "lamb"))
);
StructType schema = new StructType(new StructField[]{
new StructField("raw", DataTypes.createArrayType(DataTypes.StringType), false,
Metadata.empty())
});
Dataset<Row> dataset = spark.createDataFrame(data, schema);
remover.transform(dataset).collect();
}
}
| 9,751 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaWord2VecSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.*;
public class JavaWord2VecSuite extends SharedSparkSession {
@Test
public void testJavaWord2Vec() {
StructType schema = new StructType(new StructField[]{
new StructField("text", new ArrayType(DataTypes.StringType, true), false, Metadata.empty())
});
Dataset<Row> documentDF = spark.createDataFrame(
Arrays.asList(
RowFactory.create(Arrays.asList("Hi I heard about Spark".split(" "))),
RowFactory.create(Arrays.asList("I wish Java could use case classes".split(" "))),
RowFactory.create(Arrays.asList("Logistic regression models are neat".split(" ")))),
schema);
Word2Vec word2Vec = new Word2Vec()
.setInputCol("text")
.setOutputCol("result")
.setVectorSize(3)
.setMinCount(0);
Word2VecModel model = word2Vec.fit(documentDF);
Dataset<Row> result = model.transform(documentDF);
for (Row r : result.select("result").collectAsList()) {
double[] polyFeatures = ((Vector) r.get(0)).toArray();
Assert.assertEquals(polyFeatures.length, 3);
}
}
}
| 9,752 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaVectorAssemblerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import static org.apache.spark.sql.types.DataTypes.*;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.linalg.VectorUDT;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaVectorAssemblerSuite extends SharedSparkSession {
@Test
public void testVectorAssembler() {
StructType schema = createStructType(new StructField[]{
createStructField("id", IntegerType, false),
createStructField("x", DoubleType, false),
createStructField("y", new VectorUDT(), false),
createStructField("name", StringType, false),
createStructField("z", new VectorUDT(), false),
createStructField("n", LongType, false)
});
Row row = RowFactory.create(
0, 0.0, Vectors.dense(1.0, 2.0), "a",
Vectors.sparse(2, new int[]{1}, new double[]{3.0}), 10L);
Dataset<Row> dataset = spark.createDataFrame(Arrays.asList(row), schema);
VectorAssembler assembler = new VectorAssembler()
.setInputCols(new String[]{"x", "y", "z", "n"})
.setOutputCol("features");
Dataset<Row> output = assembler.transform(dataset);
Assert.assertEquals(
Vectors.sparse(6, new int[]{1, 2, 4, 5}, new double[]{1.0, 2.0, 3.0, 10.0}),
output.select("features").first().<Vector>getAs(0));
}
}
| 9,753 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/feature/JavaBucketizerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
public class JavaBucketizerSuite extends SharedSparkSession {
@Test
public void bucketizerTest() {
double[] splits = {-0.5, 0.0, 0.5};
StructType schema = new StructType(new StructField[]{
new StructField("feature", DataTypes.DoubleType, false, Metadata.empty())
});
Dataset<Row> dataset = spark.createDataFrame(
Arrays.asList(
RowFactory.create(-0.5),
RowFactory.create(-0.3),
RowFactory.create(0.0),
RowFactory.create(0.2)),
schema);
Bucketizer bucketizer = new Bucketizer()
.setInputCol("feature")
.setOutputCol("result")
.setSplits(splits);
List<Row> result = bucketizer.transform(dataset).select("result").collectAsList();
for (Row r : result) {
double index = r.getDouble(0);
Assert.assertTrue((index >= 0) && (index <= 1));
}
}
@Test
public void bucketizerMultipleColumnsTest() {
double[][] splitsArray = {
{-0.5, 0.0, 0.5},
{-0.5, 0.0, 0.2, 0.5}
};
StructType schema = new StructType(new StructField[]{
new StructField("feature1", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("feature2", DataTypes.DoubleType, false, Metadata.empty()),
});
Dataset<Row> dataset = spark.createDataFrame(
Arrays.asList(
RowFactory.create(-0.5, -0.5),
RowFactory.create(-0.3, -0.3),
RowFactory.create(0.0, 0.0),
RowFactory.create(0.2, 0.3)),
schema);
Bucketizer bucketizer = new Bucketizer()
.setInputCols(new String[] {"feature1", "feature2"})
.setOutputCols(new String[] {"result1", "result2"})
.setSplitsArray(splitsArray);
List<Row> result = bucketizer.transform(dataset).select("result1", "result2").collectAsList();
for (Row r : result) {
double index1 = r.getDouble(0);
Assert.assertTrue((index1 >= 0) && (index1 <= 1));
double index2 = r.getDouble(1);
Assert.assertTrue((index2 >= 0) && (index2 <= 2));
}
}
}
| 9,754 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.classification.LogisticRegressionSuite;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.tree.impl.TreeTests;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaDecisionTreeRegressorSuite extends SharedSparkSession {
@Test
public void runDT() {
int nPoints = 20;
double A = 2.0;
double B = -1.5;
JavaRDD<LabeledPoint> data = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
Dataset<Row> dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
// This tests setters. Training with various options is tested in Scala.
DecisionTreeRegressor dt = new DecisionTreeRegressor()
.setMaxDepth(2)
.setMaxBins(10)
.setMinInstancesPerNode(5)
.setMinInfoGain(0.0)
.setMaxMemoryInMB(256)
.setCacheNodeIds(false)
.setCheckpointInterval(10)
.setMaxDepth(2); // duplicate setMaxDepth to check builder pattern
for (String impurity : DecisionTreeRegressor.supportedImpurities()) {
dt.setImpurity(impurity);
}
DecisionTreeRegressionModel model = dt.fit(dataFrame);
model.transform(dataFrame);
model.numNodes();
model.depth();
model.toDebugString();
/*
// TODO: Add test once save/load are implemented. SPARK-6725
File tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
String path = tempDir.toURI().toString();
try {
model2.save(sc.sc(), path);
DecisionTreeRegressionModel sameModel = DecisionTreeRegressionModel.load(sc.sc(), path);
TreeTests.checkEqual(model2, sameModel);
} finally {
Utils.deleteRecursively(tempDir);
}
*/
}
}
| 9,755 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression;
import java.io.IOException;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import static org.apache.spark.ml.classification.LogisticRegressionSuite.generateLogisticInputAsList;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaLinearRegressionSuite extends SharedSparkSession {
private transient Dataset<Row> dataset;
private transient JavaRDD<LabeledPoint> datasetRDD;
@Override
public void setUp() throws IOException {
super.setUp();
List<LabeledPoint> points = generateLogisticInputAsList(1.0, 1.0, 100, 42);
datasetRDD = jsc.parallelize(points, 2);
dataset = spark.createDataFrame(datasetRDD, LabeledPoint.class);
dataset.createOrReplaceTempView("dataset");
}
@Test
public void linearRegressionDefaultParams() {
LinearRegression lr = new LinearRegression();
assertEquals("label", lr.getLabelCol());
assertEquals("auto", lr.getSolver());
LinearRegressionModel model = lr.fit(dataset);
model.transform(dataset).createOrReplaceTempView("prediction");
Dataset<Row> predictions = spark.sql("SELECT label, prediction FROM prediction");
predictions.collect();
// Check defaults
assertEquals("features", model.getFeaturesCol());
assertEquals("prediction", model.getPredictionCol());
}
@Test
public void linearRegressionWithSetters() {
// Set params, train, and check as many params as we can.
LinearRegression lr = new LinearRegression()
.setMaxIter(10)
.setRegParam(1.0).setSolver("l-bfgs");
LinearRegressionModel model = lr.fit(dataset);
LinearRegression parent = (LinearRegression) model.parent();
assertEquals(10, parent.getMaxIter());
assertEquals(1.0, parent.getRegParam(), 0.0);
// Call fit() with new params, and check as many params as we can.
LinearRegressionModel model2 =
lr.fit(dataset, lr.maxIter().w(5), lr.regParam().w(0.1), lr.predictionCol().w("thePred"));
LinearRegression parent2 = (LinearRegression) model2.parent();
assertEquals(5, parent2.getMaxIter());
assertEquals(0.1, parent2.getRegParam(), 0.0);
assertEquals("thePred", model2.getPredictionCol());
}
}
| 9,756 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression;
import java.util.HashMap;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.classification.LogisticRegressionSuite;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.tree.impl.TreeTests;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaRandomForestRegressorSuite extends SharedSparkSession {
@Test
public void runDT() {
int nPoints = 20;
double A = 2.0;
double B = -1.5;
JavaRDD<LabeledPoint> data = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
Dataset<Row> dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
// This tests setters. Training with various options is tested in Scala.
RandomForestRegressor rf = new RandomForestRegressor()
.setMaxDepth(2)
.setMaxBins(10)
.setMinInstancesPerNode(5)
.setMinInfoGain(0.0)
.setMaxMemoryInMB(256)
.setCacheNodeIds(false)
.setCheckpointInterval(10)
.setSubsamplingRate(1.0)
.setSeed(1234)
.setNumTrees(3)
.setMaxDepth(2); // duplicate setMaxDepth to check builder pattern
for (String impurity : RandomForestRegressor.supportedImpurities()) {
rf.setImpurity(impurity);
}
for (String featureSubsetStrategy : RandomForestRegressor.supportedFeatureSubsetStrategies()) {
rf.setFeatureSubsetStrategy(featureSubsetStrategy);
}
String[] realStrategies = {".1", ".10", "0.10", "0.1", "0.9", "1.0"};
for (String strategy : realStrategies) {
rf.setFeatureSubsetStrategy(strategy);
}
String[] integerStrategies = {"1", "10", "100", "1000", "10000"};
for (String strategy : integerStrategies) {
rf.setFeatureSubsetStrategy(strategy);
}
String[] invalidStrategies = {"-.1", "-.10", "-0.10", ".0", "0.0", "1.1", "0"};
for (String strategy : invalidStrategies) {
try {
rf.setFeatureSubsetStrategy(strategy);
Assert.fail("Expected exception to be thrown for invalid strategies");
} catch (Exception e) {
Assert.assertTrue(e instanceof IllegalArgumentException);
}
}
RandomForestRegressionModel model = rf.fit(dataFrame);
model.transform(dataFrame);
model.totalNumNodes();
model.toDebugString();
model.trees();
model.treeWeights();
Vector importances = model.featureImportances();
/*
// TODO: Add test once save/load are implemented. SPARK-6725
File tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
String path = tempDir.toURI().toString();
try {
model2.save(sc.sc(), path);
RandomForestRegressionModel sameModel = RandomForestRegressionModel.load(sc.sc(), path);
TreeTests.checkEqual(model2, sameModel);
} finally {
Utils.deleteRecursively(tempDir);
}
*/
}
}
| 9,757 |
0 | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml | Create_ds/spark/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import org.apache.spark.SharedSparkSession;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.classification.LogisticRegressionSuite;
import org.apache.spark.ml.feature.LabeledPoint;
import org.apache.spark.ml.tree.impl.TreeTests;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
public class JavaGBTRegressorSuite extends SharedSparkSession {
@Test
public void runDT() {
int nPoints = 20;
double A = 2.0;
double B = -1.5;
JavaRDD<LabeledPoint> data = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
Dataset<Row> dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
GBTRegressor rf = new GBTRegressor()
.setMaxDepth(2)
.setMaxBins(10)
.setMinInstancesPerNode(5)
.setMinInfoGain(0.0)
.setMaxMemoryInMB(256)
.setCacheNodeIds(false)
.setCheckpointInterval(10)
.setSubsamplingRate(1.0)
.setSeed(1234)
.setMaxIter(3)
.setStepSize(0.1)
.setMaxDepth(2); // duplicate setMaxDepth to check builder pattern
for (String lossType : GBTRegressor.supportedLossTypes()) {
rf.setLossType(lossType);
}
GBTRegressionModel model = rf.fit(dataFrame);
model.transform(dataFrame);
model.totalNumNodes();
model.toDebugString();
model.trees();
model.treeWeights();
/*
// TODO: Add test once save/load are implemented. SPARK-6725
File tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
String path = tempDir.toURI().toString();
try {
model2.save(sc.sc(), path);
GBTRegressionModel sameModel = GBTRegressionModel.load(sc.sc(), path);
TreeTests.checkEqual(model2, sameModel);
} finally {
Utils.deleteRecursively(tempDir);
}
*/
}
}
| 9,758 |
0 | Create_ds/spark/mllib/src/main/scala/org/apache/spark | Create_ds/spark/mllib/src/main/scala/org/apache/spark/mllib/JavaPackage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib;
import org.apache.spark.annotation.AlphaComponent;
/**
* A dummy class as a workaround to show the package doc of <code>spark.mllib</code> in generated
* Java API docs.
* @see <a href="http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4492654" target="_blank">
* JDK-4492654</a>
*/
@AlphaComponent
public class JavaPackage {
private JavaPackage() {}
}
| 9,759 |
0 | Create_ds/spark/mllib/src/main/scala/org/apache/spark | Create_ds/spark/mllib/src/main/scala/org/apache/spark/mllib/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* RDD-based machine learning APIs (in maintenance mode).
*
* The <code>spark.mllib</code> package is in maintenance mode as of the Spark 2.0.0 release to
* encourage migration to the DataFrame-based APIs under the <code>spark.ml</code> package.
* While in maintenance mode,
* <ul>
* <li>
* no new features in the RDD-based <code>spark.mllib</code> package will be accepted, unless
* they block implementing new features in the DataFrame-based <code>spark.ml</code> package;
* </li>
* <li>
* bug fixes in the RDD-based APIs will still be accepted.
* </li>
* </ul>
*
* The developers will continue adding more features to the DataFrame-based APIs in the 2.x series
* to reach feature parity with the RDD-based APIs.
* And once we reach feature parity, this package will be deprecated.
*
* @see <a href="https://issues.apache.org/jira/browse/SPARK-4591" target="_blank">SPARK-4591</a> to
* track the progress of feature parity
*/
package org.apache.spark.mllib;
| 9,760 |
0 | Create_ds/spark/mllib/src/main/scala/org/apache/spark | Create_ds/spark/mllib/src/main/scala/org/apache/spark/ml/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DataFrame-based machine learning APIs to let users quickly assemble and configure practical
* machine learning pipelines.
*/
package org.apache.spark.ml;
| 9,761 |
0 | Create_ds/spark/mllib/src/main/scala/org/apache/spark/ml | Create_ds/spark/mllib/src/main/scala/org/apache/spark/ml/attribute/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The content here should be in sync with `package.scala`.
/**
* <h2>ML attributes</h2>
*
* The ML pipeline API uses {@link org.apache.spark.sql.Dataset}s as ML datasets.
* Each dataset consists of typed columns, e.g., string, double, vector, etc.
* However, knowing only the column type may not be sufficient to handle the data properly.
* For instance, a double column with values 0.0, 1.0, 2.0, ... may represent some label indices,
* which cannot be treated as numeric values in ML algorithms, and, for another instance, we may
* want to know the names and types of features stored in a vector column.
* ML attributes are used to provide additional information to describe columns in a dataset.
*
* <h3>ML columns</h3>
*
* A column with ML attributes attached is called an ML column.
* The data in ML columns are stored as double values, i.e., an ML column is either a scalar column
* of double values or a vector column.
* Columns of other types must be encoded into ML columns using transformers.
* We use {@link org.apache.spark.ml.attribute.Attribute} to describe a scalar ML column, and
* {@link org.apache.spark.ml.attribute.AttributeGroup} to describe a vector ML column.
* ML attributes are stored in the metadata field of the column schema.
*/
package org.apache.spark.ml.attribute;
| 9,762 |
0 | Create_ds/spark/mllib/src/main/scala/org/apache/spark/ml | Create_ds/spark/mllib/src/main/scala/org/apache/spark/ml/feature/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Feature transformers
*
* The `ml.feature` package provides common feature transformers that help convert raw data or
* features into more suitable forms for model fitting.
* Most feature transformers are implemented as {@link org.apache.spark.ml.Transformer}s, which
* transforms one {@link org.apache.spark.sql.Dataset} into another, e.g.,
* {@link org.apache.spark.ml.feature.HashingTF}.
* Some feature transformers are implemented as {@link org.apache.spark.ml.Estimator}}s, because the
* transformation requires some aggregated information of the dataset, e.g., document
* frequencies in {@link org.apache.spark.ml.feature.IDF}.
* For those feature transformers, calling {@link org.apache.spark.ml.Estimator#fit} is required to
* obtain the model first, e.g., {@link org.apache.spark.ml.feature.IDFModel}, in order to apply
* transformation.
* The transformation is usually done by appending new columns to the input
* {@link org.apache.spark.sql.Dataset}, so all input columns are carried over.
*
* We try to make each transformer minimal, so it becomes flexible to assemble feature
* transformation pipelines.
* {@link org.apache.spark.ml.Pipeline} can be used to chain feature transformers, and
* {@link org.apache.spark.ml.feature.VectorAssembler} can be used to combine multiple feature
* transformations, for example:
*
* <pre>
* <code>
* import java.util.Arrays;
*
* import org.apache.spark.api.java.JavaRDD;
* import static org.apache.spark.sql.types.DataTypes.*;
* import org.apache.spark.sql.types.StructType;
* import org.apache.spark.sql.Dataset;
* import org.apache.spark.sql.RowFactory;
* import org.apache.spark.sql.Row;
*
* import org.apache.spark.ml.feature.*;
* import org.apache.spark.ml.Pipeline;
* import org.apache.spark.ml.PipelineStage;
* import org.apache.spark.ml.PipelineModel;
*
* // a DataFrame with three columns: id (integer), text (string), and rating (double).
* StructType schema = createStructType(
* Arrays.asList(
* createStructField("id", IntegerType, false),
* createStructField("text", StringType, false),
* createStructField("rating", DoubleType, false)));
* JavaRDD<Row> rowRDD = jsc.parallelize(
* Arrays.asList(
* RowFactory.create(0, "Hi I heard about Spark", 3.0),
* RowFactory.create(1, "I wish Java could use case classes", 4.0),
* RowFactory.create(2, "Logistic regression models are neat", 4.0)));
* Dataset<Row> dataset = jsql.createDataFrame(rowRDD, schema);
* // define feature transformers
* RegexTokenizer tok = new RegexTokenizer()
* .setInputCol("text")
* .setOutputCol("words");
* StopWordsRemover sw = new StopWordsRemover()
* .setInputCol("words")
* .setOutputCol("filtered_words");
* HashingTF tf = new HashingTF()
* .setInputCol("filtered_words")
* .setOutputCol("tf")
* .setNumFeatures(10000);
* IDF idf = new IDF()
* .setInputCol("tf")
* .setOutputCol("tf_idf");
* VectorAssembler assembler = new VectorAssembler()
* .setInputCols(new String[] {"tf_idf", "rating"})
* .setOutputCol("features");
*
* // assemble and fit the feature transformation pipeline
* Pipeline pipeline = new Pipeline()
* .setStages(new PipelineStage[] {tok, sw, tf, idf, assembler});
* PipelineModel model = pipeline.fit(dataset);
*
* // save transformed features with raw data
* model.transform(dataset)
* .select("id", "text", "rating", "features")
* .write().format("parquet").save("/output/path");
* </code>
* </pre>
*
* Some feature transformers implemented in MLlib are inspired by those implemented in scikit-learn.
* The major difference is that most scikit-learn feature transformers operate eagerly on the entire
* input dataset, while MLlib's feature transformers operate lazily on individual columns,
* which is more efficient and flexible to handle large and complex datasets.
*
* @see <a href="http://scikit-learn.org/stable/modules/preprocessing.html" target="_blank">
* scikit-learn.preprocessing</a>
*/
package org.apache.spark.ml.feature;
| 9,763 |
0 | Create_ds/spark/streaming/src/test/java/test/org/apache/spark | Create_ds/spark/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.org.apache.spark.streaming;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.JavaCheckpointTestUtils;
import org.apache.spark.streaming.JavaTestUtils;
import org.apache.spark.streaming.LocalJavaStreamingContext;
import org.apache.spark.streaming.Seconds;
import org.apache.spark.streaming.StreamingContextState;
import org.apache.spark.streaming.StreamingContextSuite;
import scala.Tuple2;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.io.Files;
import com.google.common.collect.Sets;
import org.apache.spark.HashPartitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.*;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.api.java.*;
import org.apache.spark.util.LongAccumulator;
import org.apache.spark.util.Utils;
// The test suite itself is Serializable so that anonymous Function implementations can be
// serialized, as an alternative to converting these anonymous classes to static inner classes;
// see http://stackoverflow.com/questions/758570/.
public class JavaAPISuite extends LocalJavaStreamingContext implements Serializable {
public static void equalIterator(Iterator<?> a, Iterator<?> b) {
while (a.hasNext() && b.hasNext()) {
Assert.assertEquals(a.next(), b.next());
}
Assert.assertEquals(a.hasNext(), b.hasNext());
}
public static void equalIterable(Iterable<?> a, Iterable<?> b) {
equalIterator(a.iterator(), b.iterator());
}
@Test
public void testInitialization() {
Assert.assertNotNull(ssc.sparkContext());
}
@SuppressWarnings("unchecked")
@Test
public void testContextState() {
List<List<Integer>> inputData = Arrays.asList(Arrays.asList(1, 2, 3, 4));
Assert.assertEquals(StreamingContextState.INITIALIZED, ssc.getState());
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaTestUtils.attachTestOutputStream(stream);
Assert.assertEquals(StreamingContextState.INITIALIZED, ssc.getState());
ssc.start();
Assert.assertEquals(StreamingContextState.ACTIVE, ssc.getState());
ssc.stop();
Assert.assertEquals(StreamingContextState.STOPPED, ssc.getState());
}
@SuppressWarnings("unchecked")
@Test
public void testCount() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1,2,3,4),
Arrays.asList(3,4,5),
Arrays.asList(3));
List<List<Long>> expected = Arrays.asList(
Arrays.asList(4L),
Arrays.asList(3L),
Arrays.asList(1L));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Long> count = stream.count();
JavaTestUtils.attachTestOutputStream(count);
List<List<Long>> result = JavaTestUtils.runStreams(ssc, 3, 3);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testMap() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("hello", "world"),
Arrays.asList("goodnight", "moon"));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(5,5),
Arrays.asList(9,4));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> letterCount = stream.map(String::length);
JavaTestUtils.attachTestOutputStream(letterCount);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 2, 2);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testWindow() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1,2,3),
Arrays.asList(4,5,6),
Arrays.asList(7,8,9));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(1,2,3),
Arrays.asList(4,5,6,1,2,3),
Arrays.asList(7,8,9,4,5,6),
Arrays.asList(7,8,9));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> windowed = stream.window(new Duration(2000));
JavaTestUtils.attachTestOutputStream(windowed);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 4, 4);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testWindowWithSlideDuration() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1,2,3),
Arrays.asList(4,5,6),
Arrays.asList(7,8,9),
Arrays.asList(10,11,12),
Arrays.asList(13,14,15),
Arrays.asList(16,17,18));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(1,2,3,4,5,6),
Arrays.asList(1,2,3,4,5,6,7,8,9,10,11,12),
Arrays.asList(7,8,9,10,11,12,13,14,15,16,17,18),
Arrays.asList(13,14,15,16,17,18));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> windowed = stream.window(new Duration(4000), new Duration(2000));
JavaTestUtils.attachTestOutputStream(windowed);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 8, 4);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testFilter() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants", "dodgers"),
Arrays.asList("yankees", "red sox"));
List<List<String>> expected = Arrays.asList(
Arrays.asList("giants"),
Arrays.asList("yankees"));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<String> filtered = stream.filter(s -> s.contains("a"));
JavaTestUtils.attachTestOutputStream(filtered);
List<List<String>> result = JavaTestUtils.runStreams(ssc, 2, 2);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testRepartitionMorePartitions() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
JavaDStream<Integer> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 2);
JavaDStreamLike<Integer,JavaDStream<Integer>,JavaRDD<Integer>> repartitioned =
stream.repartition(4);
JavaTestUtils.attachTestOutputStream(repartitioned);
List<List<List<Integer>>> result = JavaTestUtils.runStreamsWithPartitions(ssc, 2, 2);
Assert.assertEquals(2, result.size());
for (List<List<Integer>> rdd : result) {
Assert.assertEquals(4, rdd.size());
Assert.assertEquals(
10, rdd.get(0).size() + rdd.get(1).size() + rdd.get(2).size() + rdd.get(3).size());
}
}
@SuppressWarnings("unchecked")
@Test
public void testRepartitionFewerPartitions() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
JavaDStream<Integer> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 4);
JavaDStreamLike<Integer,JavaDStream<Integer>,JavaRDD<Integer>> repartitioned =
stream.repartition(2);
JavaTestUtils.attachTestOutputStream(repartitioned);
List<List<List<Integer>>> result = JavaTestUtils.runStreamsWithPartitions(ssc, 2, 2);
Assert.assertEquals(2, result.size());
for (List<List<Integer>> rdd : result) {
Assert.assertEquals(2, rdd.size());
Assert.assertEquals(10, rdd.get(0).size() + rdd.get(1).size());
}
}
@SuppressWarnings("unchecked")
@Test
public void testGlom() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants", "dodgers"),
Arrays.asList("yankees", "red sox"));
List<List<List<String>>> expected = Arrays.asList(
Arrays.asList(Arrays.asList("giants", "dodgers")),
Arrays.asList(Arrays.asList("yankees", "red sox")));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<List<String>> glommed = stream.glom();
JavaTestUtils.attachTestOutputStream(glommed);
List<List<List<String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testMapPartitions() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants", "dodgers"),
Arrays.asList("yankees", "red sox"));
List<List<String>> expected = Arrays.asList(
Arrays.asList("GIANTSDODGERS"),
Arrays.asList("YANKEESRED SOX"));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<String> mapped = stream.mapPartitions(in -> {
StringBuilder out = new StringBuilder();
while (in.hasNext()) {
out.append(in.next().toUpperCase(Locale.ROOT));
}
return Arrays.asList(out.toString()).iterator();
});
JavaTestUtils.attachTestOutputStream(mapped);
List<List<String>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
private static class IntegerSum implements Function2<Integer, Integer, Integer> {
@Override
public Integer call(Integer i1, Integer i2) {
return i1 + i2;
}
}
private static class IntegerDifference implements Function2<Integer, Integer, Integer> {
@Override
public Integer call(Integer i1, Integer i2) {
return i1 - i2;
}
}
@SuppressWarnings("unchecked")
@Test
public void testReduce() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1,2,3),
Arrays.asList(4,5,6),
Arrays.asList(7,8,9));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(6),
Arrays.asList(15),
Arrays.asList(24));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> reduced = stream.reduce(new IntegerSum());
JavaTestUtils.attachTestOutputStream(reduced);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testReduceByWindowWithInverse() {
testReduceByWindow(true);
}
@SuppressWarnings("unchecked")
@Test
public void testReduceByWindowWithoutInverse() {
testReduceByWindow(false);
}
@SuppressWarnings("unchecked")
private void testReduceByWindow(boolean withInverse) {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1,2,3),
Arrays.asList(4,5,6),
Arrays.asList(7,8,9));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(6),
Arrays.asList(21),
Arrays.asList(39),
Arrays.asList(24));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> reducedWindowed;
if (withInverse) {
reducedWindowed = stream.reduceByWindow(new IntegerSum(),
new IntegerDifference(),
new Duration(2000),
new Duration(1000));
} else {
reducedWindowed = stream.reduceByWindow(new IntegerSum(),
new Duration(2000), new Duration(1000));
}
JavaTestUtils.attachTestOutputStream(reducedWindowed);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 4, 4);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testQueueStream() {
ssc.stop();
// Create a new JavaStreamingContext without checkpointing
SparkConf conf = new SparkConf()
.setMaster("local[2]")
.setAppName("test")
.set("spark.streaming.clock", "org.apache.spark.util.ManualClock");
ssc = new JavaStreamingContext(conf, new Duration(1000));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(1,2,3),
Arrays.asList(4,5,6),
Arrays.asList(7,8,9));
JavaSparkContext jsc = new JavaSparkContext(ssc.ssc().sc());
JavaRDD<Integer> rdd1 = jsc.parallelize(Arrays.asList(1, 2, 3));
JavaRDD<Integer> rdd2 = jsc.parallelize(Arrays.asList(4, 5, 6));
JavaRDD<Integer> rdd3 = jsc.parallelize(Arrays.asList(7,8,9));
Queue<JavaRDD<Integer>> rdds = new LinkedList<>();
rdds.add(rdd1);
rdds.add(rdd2);
rdds.add(rdd3);
JavaDStream<Integer> stream = ssc.queueStream(rdds);
JavaTestUtils.attachTestOutputStream(stream);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testTransform() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1,2,3),
Arrays.asList(4,5,6),
Arrays.asList(7,8,9));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(3,4,5),
Arrays.asList(6,7,8),
Arrays.asList(9,10,11));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> transformed = stream.transform(in -> in.map(i -> i + 2));
JavaTestUtils.attachTestOutputStream(transformed);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 3, 3);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testVariousTransform() {
// tests whether all variations of transform can be called from Java
List<List<Integer>> inputData = Arrays.asList(Arrays.asList(1));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
List<List<Tuple2<String, Integer>>> pairInputData =
Arrays.asList(Arrays.asList(new Tuple2<>("x", 1)));
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(
JavaTestUtils.attachTestInputStream(ssc, pairInputData, 1));
stream.transform(in -> null);
stream.transform((in, time) -> null);
stream.transformToPair(in -> null);
stream.transformToPair((in, time) -> null);
pairStream.transform(in -> null);
pairStream.transform((in, time) -> null);
pairStream.transformToPair(in -> null);
pairStream.transformToPair((in, time) -> null);
}
@SuppressWarnings("unchecked")
@Test
public void testTransformWith() {
List<List<Tuple2<String, String>>> stringStringKVStream1 = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", "dodgers"),
new Tuple2<>("new york", "yankees")),
Arrays.asList(
new Tuple2<>("california", "sharks"),
new Tuple2<>("new york", "rangers")));
List<List<Tuple2<String, String>>> stringStringKVStream2 = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", "giants"),
new Tuple2<>("new york", "mets")),
Arrays.asList(
new Tuple2<>("california", "ducks"),
new Tuple2<>("new york", "islanders")));
List<HashSet<Tuple2<String, Tuple2<String, String>>>> expected = Arrays.asList(
Sets.newHashSet(
new Tuple2<>("california",
new Tuple2<>("dodgers", "giants")),
new Tuple2<>("new york",
new Tuple2<>("yankees", "mets"))),
Sets.newHashSet(
new Tuple2<>("california",
new Tuple2<>("sharks", "ducks")),
new Tuple2<>("new york",
new Tuple2<>("rangers", "islanders"))));
JavaDStream<Tuple2<String, String>> stream1 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream1, 1);
JavaPairDStream<String, String> pairStream1 = JavaPairDStream.fromJavaDStream(stream1);
JavaDStream<Tuple2<String, String>> stream2 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream2, 1);
JavaPairDStream<String, String> pairStream2 = JavaPairDStream.fromJavaDStream(stream2);
JavaPairDStream<String, Tuple2<String, String>> joined = pairStream1.transformWithToPair(
pairStream2,
(rdd1, rdd2, time) -> rdd1.join(rdd2)
);
JavaTestUtils.attachTestOutputStream(joined);
List<List<Tuple2<String, Tuple2<String, String>>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
List<HashSet<Tuple2<String, Tuple2<String, String>>>> unorderedResult = new ArrayList<>();
for (List<Tuple2<String, Tuple2<String, String>>> res: result) {
unorderedResult.add(Sets.newHashSet(res));
}
Assert.assertEquals(expected, unorderedResult);
}
@SuppressWarnings("unchecked")
@Test
public void testVariousTransformWith() {
// tests whether all variations of transformWith can be called from Java
List<List<Integer>> inputData1 = Arrays.asList(Arrays.asList(1));
List<List<String>> inputData2 = Arrays.asList(Arrays.asList("x"));
JavaDStream<Integer> stream1 = JavaTestUtils.attachTestInputStream(ssc, inputData1, 1);
JavaDStream<String> stream2 = JavaTestUtils.attachTestInputStream(ssc, inputData2, 1);
List<List<Tuple2<String, Integer>>> pairInputData1 =
Arrays.asList(Arrays.asList(new Tuple2<>("x", 1)));
List<List<Tuple2<Double, Character>>> pairInputData2 =
Arrays.asList(Arrays.asList(new Tuple2<>(1.0, 'x')));
JavaPairDStream<String, Integer> pairStream1 = JavaPairDStream.fromJavaDStream(
JavaTestUtils.attachTestInputStream(ssc, pairInputData1, 1));
JavaPairDStream<Double, Character> pairStream2 = JavaPairDStream.fromJavaDStream(
JavaTestUtils.attachTestInputStream(ssc, pairInputData2, 1));
stream1.transformWith(stream2, (rdd1, rdd2, time) -> null);
stream1.transformWith(pairStream1, (rdd1, rdd2, time) -> null);
stream1.transformWithToPair(stream2, (rdd1, rdd2, time) -> null);
stream1.transformWithToPair(pairStream1, (rdd1, rdd2, time) -> null);
pairStream1.transformWith(stream2, (rdd1, rdd2, time) -> null);
pairStream1.transformWith(pairStream1, (rdd1, rdd2, time) -> null);
pairStream1.transformWithToPair(stream2, (rdd1, rdd2, time) -> null);
pairStream1.transformWithToPair(pairStream2, (rdd1, rdd2, time) -> null);
}
@SuppressWarnings("unchecked")
@Test
public void testStreamingContextTransform(){
List<List<Integer>> stream1input = Arrays.asList(
Arrays.asList(1),
Arrays.asList(2)
);
List<List<Integer>> stream2input = Arrays.asList(
Arrays.asList(3),
Arrays.asList(4)
);
List<List<Tuple2<Integer, String>>> pairStream1input = Arrays.asList(
Arrays.asList(new Tuple2<>(1, "x")),
Arrays.asList(new Tuple2<>(2, "y"))
);
List<List<Tuple2<Integer, Tuple2<Integer, String>>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>(1, new Tuple2<>(1, "x"))),
Arrays.asList(new Tuple2<>(2, new Tuple2<>(2, "y")))
);
JavaDStream<Integer> stream1 = JavaTestUtils.attachTestInputStream(ssc, stream1input, 1);
JavaDStream<Integer> stream2 = JavaTestUtils.attachTestInputStream(ssc, stream2input, 1);
JavaPairDStream<Integer, String> pairStream1 = JavaPairDStream.fromJavaDStream(
JavaTestUtils.attachTestInputStream(ssc, pairStream1input, 1));
List<JavaDStream<?>> listOfDStreams1 = Arrays.asList(stream1, stream2);
// This is just to test whether this transform to JavaStream compiles
ssc.transform(
listOfDStreams1,
(listOfRDDs, time) -> {
Assert.assertEquals(2, listOfRDDs.size());
return null;
}
);
List<JavaDStream<?>> listOfDStreams2 =
Arrays.asList(stream1, stream2, pairStream1.toJavaDStream());
JavaPairDStream<Integer, Tuple2<Integer, String>> transformed2 = ssc.transformToPair(
listOfDStreams2,
(listOfRDDs, time) -> {
Assert.assertEquals(3, listOfRDDs.size());
JavaRDD<Integer> rdd1 = (JavaRDD<Integer>)listOfRDDs.get(0);
JavaRDD<Integer> rdd2 = (JavaRDD<Integer>)listOfRDDs.get(1);
JavaRDD<Tuple2<Integer, String>> rdd3 =
(JavaRDD<Tuple2<Integer, String>>)listOfRDDs.get(2);
JavaPairRDD<Integer, String> prdd3 = JavaPairRDD.fromJavaRDD(rdd3);
PairFunction<Integer, Integer, Integer> mapToTuple =
(PairFunction<Integer, Integer, Integer>) i -> new Tuple2<>(i, i);
return rdd1.union(rdd2).mapToPair(mapToTuple).join(prdd3);
}
);
JavaTestUtils.attachTestOutputStream(transformed2);
List<List<Tuple2<Integer, Tuple2<Integer, String>>>> result =
JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testFlatMap() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("go", "giants"),
Arrays.asList("boo", "dodgers"),
Arrays.asList("athletics"));
List<List<String>> expected = Arrays.asList(
Arrays.asList("g","o","g","i","a","n","t","s"),
Arrays.asList("b", "o", "o", "d","o","d","g","e","r","s"),
Arrays.asList("a","t","h","l","e","t","i","c","s"));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<String> flatMapped =
stream.flatMap(x -> Arrays.asList(x.split("(?!^)")).iterator());
JavaTestUtils.attachTestOutputStream(flatMapped);
List<List<String>> result = JavaTestUtils.runStreams(ssc, 3, 3);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testForeachRDD() {
final LongAccumulator accumRdd = ssc.sparkContext().sc().longAccumulator();
final LongAccumulator accumEle = ssc.sparkContext().sc().longAccumulator();
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1,1,1),
Arrays.asList(1,1,1));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaTestUtils.attachTestOutputStream(stream.count()); // dummy output
stream.foreachRDD(rdd -> {
accumRdd.add(1);
rdd.foreach(i -> accumEle.add(1));
});
// This is a test to make sure foreachRDD(VoidFunction2) can be called from Java
stream.foreachRDD((rdd, time) -> {});
JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(2, accumRdd.value().intValue());
Assert.assertEquals(6, accumEle.value().intValue());
}
@SuppressWarnings("unchecked")
@Test
public void testPairFlatMap() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants"),
Arrays.asList("dodgers"),
Arrays.asList("athletics"));
List<List<Tuple2<Integer, String>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(6, "g"),
new Tuple2<>(6, "i"),
new Tuple2<>(6, "a"),
new Tuple2<>(6, "n"),
new Tuple2<>(6, "t"),
new Tuple2<>(6, "s")),
Arrays.asList(
new Tuple2<>(7, "d"),
new Tuple2<>(7, "o"),
new Tuple2<>(7, "d"),
new Tuple2<>(7, "g"),
new Tuple2<>(7, "e"),
new Tuple2<>(7, "r"),
new Tuple2<>(7, "s")),
Arrays.asList(
new Tuple2<>(9, "a"),
new Tuple2<>(9, "t"),
new Tuple2<>(9, "h"),
new Tuple2<>(9, "l"),
new Tuple2<>(9, "e"),
new Tuple2<>(9, "t"),
new Tuple2<>(9, "i"),
new Tuple2<>(9, "c"),
new Tuple2<>(9, "s")));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<Integer, String> flatMapped = stream.flatMapToPair(in -> {
List<Tuple2<Integer, String>> out = new ArrayList<>();
for (String letter : in.split("(?!^)")) {
out.add(new Tuple2<>(in.length(), letter));
}
return out.iterator();
});
JavaTestUtils.attachTestOutputStream(flatMapped);
List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testUnion() {
List<List<Integer>> inputData1 = Arrays.asList(
Arrays.asList(1,1),
Arrays.asList(2,2),
Arrays.asList(3,3));
List<List<Integer>> inputData2 = Arrays.asList(
Arrays.asList(4,4),
Arrays.asList(5,5),
Arrays.asList(6,6));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(1,1,4,4),
Arrays.asList(2,2,5,5),
Arrays.asList(3,3,6,6));
JavaDStream<Integer> stream1 = JavaTestUtils.attachTestInputStream(ssc, inputData1, 2);
JavaDStream<Integer> stream2 = JavaTestUtils.attachTestInputStream(ssc, inputData2, 2);
JavaDStream<Integer> unioned = stream1.union(stream2);
JavaTestUtils.attachTestOutputStream(unioned);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 3, 3);
assertOrderInvariantEquals(expected, result);
}
/*
* Performs an order-invariant comparison of lists representing two RDD streams. This allows
* us to account for ordering variation within individual RDD's which occurs during windowing.
*/
public static <T> void assertOrderInvariantEquals(
List<List<T>> expected, List<List<T>> actual) {
List<Set<T>> expectedSets = new ArrayList<>();
for (List<T> list: expected) {
expectedSets.add(Collections.unmodifiableSet(new HashSet<>(list)));
}
List<Set<T>> actualSets = new ArrayList<>();
for (List<T> list: actual) {
actualSets.add(Collections.unmodifiableSet(new HashSet<>(list)));
}
Assert.assertEquals(expectedSets, actualSets);
}
// PairDStream Functions
@SuppressWarnings("unchecked")
@Test
public void testPairFilter() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants", "dodgers"),
Arrays.asList("yankees", "red sox"));
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("giants", 6)),
Arrays.asList(new Tuple2<>("yankees", 7)));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream =
stream.mapToPair(in -> new Tuple2<>(in, in.length()));
JavaPairDStream<String, Integer> filtered = pairStream.filter(in -> in._1().contains("a"));
JavaTestUtils.attachTestOutputStream(filtered);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
private final List<List<Tuple2<String, String>>> stringStringKVStream = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "dodgers"),
new Tuple2<>("california", "giants"),
new Tuple2<>("new york", "yankees"),
new Tuple2<>("new york", "mets")),
Arrays.asList(new Tuple2<>("california", "sharks"),
new Tuple2<>("california", "ducks"),
new Tuple2<>("new york", "rangers"),
new Tuple2<>("new york", "islanders")));
@SuppressWarnings("unchecked")
private final List<List<Tuple2<String, Integer>>> stringIntKVStream = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", 1),
new Tuple2<>("california", 3),
new Tuple2<>("new york", 4),
new Tuple2<>("new york", 1)),
Arrays.asList(
new Tuple2<>("california", 5),
new Tuple2<>("california", 5),
new Tuple2<>("new york", 3),
new Tuple2<>("new york", 1)));
@SuppressWarnings("unchecked")
@Test
public void testPairMap() { // Maps pair -> pair of different type
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<Integer, String>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(1, "california"),
new Tuple2<>(3, "california"),
new Tuple2<>(4, "new york"),
new Tuple2<>(1, "new york")),
Arrays.asList(
new Tuple2<>(5, "california"),
new Tuple2<>(5, "california"),
new Tuple2<>(3, "new york"),
new Tuple2<>(1, "new york")));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<Integer, String> reversed = pairStream.mapToPair(Tuple2::swap);
JavaTestUtils.attachTestOutputStream(reversed);
List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testPairMapPartitions() { // Maps pair -> pair of different type
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<Integer, String>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(1, "california"),
new Tuple2<>(3, "california"),
new Tuple2<>(4, "new york"),
new Tuple2<>(1, "new york")),
Arrays.asList(
new Tuple2<>(5, "california"),
new Tuple2<>(5, "california"),
new Tuple2<>(3, "new york"),
new Tuple2<>(1, "new york")));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<Integer, String> reversed = pairStream.mapPartitionsToPair(in -> {
List<Tuple2<Integer, String>> out = new LinkedList<>();
while (in.hasNext()) {
Tuple2<String, Integer> next = in.next();
out.add(next.swap());
}
return out.iterator();
});
JavaTestUtils.attachTestOutputStream(reversed);
List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testPairMap2() { // Maps pair -> single
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(1, 3, 4, 1),
Arrays.asList(5, 5, 3, 1));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaDStream<Integer> reversed = pairStream.map(in -> in._2());
JavaTestUtils.attachTestOutputStream(reversed);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testPairToPairFlatMapWithChangingTypes() { // Maps pair -> pair
List<List<Tuple2<String, Integer>>> inputData = Arrays.asList(
Arrays.asList(
new Tuple2<>("hi", 1),
new Tuple2<>("ho", 2)),
Arrays.asList(
new Tuple2<>("hi", 1),
new Tuple2<>("ho", 2)));
List<List<Tuple2<Integer, String>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(1, "h"),
new Tuple2<>(1, "i"),
new Tuple2<>(2, "h"),
new Tuple2<>(2, "o")),
Arrays.asList(
new Tuple2<>(1, "h"),
new Tuple2<>(1, "i"),
new Tuple2<>(2, "h"),
new Tuple2<>(2, "o")));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<Integer, String> flatMapped = pairStream.flatMapToPair(in -> {
List<Tuple2<Integer, String>> out = new LinkedList<>();
for (Character s : in._1().toCharArray()) {
out.add(new Tuple2<>(in._2(), s.toString()));
}
return out.iterator();
});
JavaTestUtils.attachTestOutputStream(flatMapped);
List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testPairGroupByKey() {
List<List<Tuple2<String, String>>> inputData = stringStringKVStream;
List<List<Tuple2<String, List<String>>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", Arrays.asList("dodgers", "giants")),
new Tuple2<>("new york", Arrays.asList("yankees", "mets"))),
Arrays.asList(
new Tuple2<>("california", Arrays.asList("sharks", "ducks")),
new Tuple2<>("new york", Arrays.asList("rangers", "islanders"))));
JavaDStream<Tuple2<String, String>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, String> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Iterable<String>> grouped = pairStream.groupByKey();
JavaTestUtils.attachTestOutputStream(grouped);
List<List<Tuple2<String, Iterable<String>>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected.size(), result.size());
Iterator<List<Tuple2<String, Iterable<String>>>> resultItr = result.iterator();
Iterator<List<Tuple2<String, List<String>>>> expectedItr = expected.iterator();
while (resultItr.hasNext() && expectedItr.hasNext()) {
Iterator<Tuple2<String, Iterable<String>>> resultElements = resultItr.next().iterator();
Iterator<Tuple2<String, List<String>>> expectedElements = expectedItr.next().iterator();
while (resultElements.hasNext() && expectedElements.hasNext()) {
Tuple2<String, Iterable<String>> resultElement = resultElements.next();
Tuple2<String, List<String>> expectedElement = expectedElements.next();
Assert.assertEquals(expectedElement._1(), resultElement._1());
equalIterable(expectedElement._2(), resultElement._2());
}
Assert.assertEquals(resultElements.hasNext(), expectedElements.hasNext());
}
}
@SuppressWarnings("unchecked")
@Test
public void testPairReduceByKey() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(
new Tuple2<>("california", 10),
new Tuple2<>("new york", 4)));
JavaDStream<Tuple2<String, Integer>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> reduced = pairStream.reduceByKey(new IntegerSum());
JavaTestUtils.attachTestOutputStream(reduced);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testCombineByKey() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(
new Tuple2<>("california", 10),
new Tuple2<>("new york", 4)));
JavaDStream<Tuple2<String, Integer>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> combined = pairStream.combineByKey(
i -> i, new IntegerSum(), new IntegerSum(), new HashPartitioner(2));
JavaTestUtils.attachTestOutputStream(combined);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testCountByValue() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("hello", "world"),
Arrays.asList("hello", "moon"),
Arrays.asList("hello"));
List<List<Tuple2<String, Long>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("hello", 1L),
new Tuple2<>("world", 1L)),
Arrays.asList(
new Tuple2<>("hello", 1L),
new Tuple2<>("moon", 1L)),
Arrays.asList(
new Tuple2<>("hello", 1L)));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Long> counted = stream.countByValue();
JavaTestUtils.attachTestOutputStream(counted);
List<List<Tuple2<String, Long>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testGroupByKeyAndWindow() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, List<Integer>>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", Arrays.asList(1, 3)),
new Tuple2<>("new york", Arrays.asList(1, 4))
),
Arrays.asList(
new Tuple2<>("california", Arrays.asList(1, 3, 5, 5)),
new Tuple2<>("new york", Arrays.asList(1, 1, 3, 4))
),
Arrays.asList(
new Tuple2<>("california", Arrays.asList(5, 5)),
new Tuple2<>("new york", Arrays.asList(1, 3))
)
);
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Iterable<Integer>> groupWindowed =
pairStream.groupByKeyAndWindow(new Duration(2000), new Duration(1000));
JavaTestUtils.attachTestOutputStream(groupWindowed);
List<List<Tuple2<String, List<Integer>>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected.size(), result.size());
for (int i = 0; i < result.size(); i++) {
Assert.assertEquals(convert(expected.get(i)), convert(result.get(i)));
}
}
private static Set<Tuple2<String, HashSet<Integer>>>
convert(List<Tuple2<String, List<Integer>>> listOfTuples) {
List<Tuple2<String, HashSet<Integer>>> newListOfTuples = new ArrayList<>();
for (Tuple2<String, List<Integer>> tuple: listOfTuples) {
newListOfTuples.add(convert(tuple));
}
return new HashSet<>(newListOfTuples);
}
private static Tuple2<String, HashSet<Integer>> convert(Tuple2<String, List<Integer>> tuple) {
return new Tuple2<>(tuple._1(), new HashSet<>(tuple._2()));
}
@SuppressWarnings("unchecked")
@Test
public void testReduceByKeyAndWindow() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(new Tuple2<>("california", 14),
new Tuple2<>("new york", 9)),
Arrays.asList(new Tuple2<>("california", 10),
new Tuple2<>("new york", 4)));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> reduceWindowed =
pairStream.reduceByKeyAndWindow(new IntegerSum(), new Duration(2000), new Duration(1000));
JavaTestUtils.attachTestOutputStream(reduceWindowed);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testUpdateStateByKey() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(new Tuple2<>("california", 14),
new Tuple2<>("new york", 9)),
Arrays.asList(new Tuple2<>("california", 14),
new Tuple2<>("new york", 9)));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> updated = pairStream.updateStateByKey((values, state) -> {
int out = 0;
if (state.isPresent()) {
out += state.get();
}
for (Integer v : values) {
out += v;
}
return Optional.of(out);
});
JavaTestUtils.attachTestOutputStream(updated);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testUpdateStateByKeyWithInitial() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<Tuple2<String, Integer>> initial = Arrays.asList(
new Tuple2<>("california", 1),
new Tuple2<>("new york", 2));
JavaRDD<Tuple2<String, Integer>> tmpRDD = ssc.sparkContext().parallelize(initial);
JavaPairRDD<String, Integer> initialRDD = JavaPairRDD.fromJavaRDD(tmpRDD);
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", 5),
new Tuple2<>("new york", 7)),
Arrays.asList(new Tuple2<>("california", 15),
new Tuple2<>("new york", 11)),
Arrays.asList(new Tuple2<>("california", 15),
new Tuple2<>("new york", 11)));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> updated = pairStream.updateStateByKey((values, state) -> {
int out = 0;
if (state.isPresent()) {
out += state.get();
}
for (Integer v : values) {
out += v;
}
return Optional.of(out);
}, new HashPartitioner(1), initialRDD);
JavaTestUtils.attachTestOutputStream(updated);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testReduceByKeyAndWindowWithInverse() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(new Tuple2<>("california", 14),
new Tuple2<>("new york", 9)),
Arrays.asList(new Tuple2<>("california", 10),
new Tuple2<>("new york", 4)));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> reduceWindowed =
pairStream.reduceByKeyAndWindow(new IntegerSum(), new IntegerDifference(),
new Duration(2000), new Duration(1000));
JavaTestUtils.attachTestOutputStream(reduceWindowed);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testCountByValueAndWindow() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("hello", "world"),
Arrays.asList("hello", "moon"),
Arrays.asList("hello"));
List<HashSet<Tuple2<String, Long>>> expected = Arrays.asList(
Sets.newHashSet(
new Tuple2<>("hello", 1L),
new Tuple2<>("world", 1L)),
Sets.newHashSet(
new Tuple2<>("hello", 2L),
new Tuple2<>("world", 1L),
new Tuple2<>("moon", 1L)),
Sets.newHashSet(
new Tuple2<>("hello", 2L),
new Tuple2<>("moon", 1L)));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, Long> counted =
stream.countByValueAndWindow(new Duration(2000), new Duration(1000));
JavaTestUtils.attachTestOutputStream(counted);
List<List<Tuple2<String, Long>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
List<Set<Tuple2<String, Long>>> unorderedResult = new ArrayList<>();
for (List<Tuple2<String, Long>> res: result) {
unorderedResult.add(Sets.newHashSet(res));
}
Assert.assertEquals(expected, unorderedResult);
}
@SuppressWarnings("unchecked")
@Test
public void testPairTransform() {
List<List<Tuple2<Integer, Integer>>> inputData = Arrays.asList(
Arrays.asList(
new Tuple2<>(3, 5),
new Tuple2<>(1, 5),
new Tuple2<>(4, 5),
new Tuple2<>(2, 5)),
Arrays.asList(
new Tuple2<>(2, 5),
new Tuple2<>(3, 5),
new Tuple2<>(4, 5),
new Tuple2<>(1, 5)));
List<List<Tuple2<Integer, Integer>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(1, 5),
new Tuple2<>(2, 5),
new Tuple2<>(3, 5),
new Tuple2<>(4, 5)),
Arrays.asList(
new Tuple2<>(1, 5),
new Tuple2<>(2, 5),
new Tuple2<>(3, 5),
new Tuple2<>(4, 5)));
JavaDStream<Tuple2<Integer, Integer>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<Integer, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<Integer, Integer> sorted = pairStream.transformToPair(in -> in.sortByKey());
JavaTestUtils.attachTestOutputStream(sorted);
List<List<Tuple2<Integer, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testPairToNormalRDDTransform() {
List<List<Tuple2<Integer, Integer>>> inputData = Arrays.asList(
Arrays.asList(
new Tuple2<>(3, 5),
new Tuple2<>(1, 5),
new Tuple2<>(4, 5),
new Tuple2<>(2, 5)),
Arrays.asList(
new Tuple2<>(2, 5),
new Tuple2<>(3, 5),
new Tuple2<>(4, 5),
new Tuple2<>(1, 5)));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(3,1,4,2),
Arrays.asList(2,3,4,1));
JavaDStream<Tuple2<Integer, Integer>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<Integer, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaDStream<Integer> firstParts = pairStream.transform(in -> in.map(in2 -> in2._1()));
JavaTestUtils.attachTestOutputStream(firstParts);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testMapValues() {
List<List<Tuple2<String, String>>> inputData = stringStringKVStream;
List<List<Tuple2<String, String>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "DODGERS"),
new Tuple2<>("california", "GIANTS"),
new Tuple2<>("new york", "YANKEES"),
new Tuple2<>("new york", "METS")),
Arrays.asList(new Tuple2<>("california", "SHARKS"),
new Tuple2<>("california", "DUCKS"),
new Tuple2<>("new york", "RANGERS"),
new Tuple2<>("new york", "ISLANDERS")));
JavaDStream<Tuple2<String, String>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, String> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, String> mapped =
pairStream.mapValues(s -> s.toUpperCase(Locale.ROOT));
JavaTestUtils.attachTestOutputStream(mapped);
List<List<Tuple2<String, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testFlatMapValues() {
List<List<Tuple2<String, String>>> inputData = stringStringKVStream;
List<List<Tuple2<String, String>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "dodgers1"),
new Tuple2<>("california", "dodgers2"),
new Tuple2<>("california", "giants1"),
new Tuple2<>("california", "giants2"),
new Tuple2<>("new york", "yankees1"),
new Tuple2<>("new york", "yankees2"),
new Tuple2<>("new york", "mets1"),
new Tuple2<>("new york", "mets2")),
Arrays.asList(new Tuple2<>("california", "sharks1"),
new Tuple2<>("california", "sharks2"),
new Tuple2<>("california", "ducks1"),
new Tuple2<>("california", "ducks2"),
new Tuple2<>("new york", "rangers1"),
new Tuple2<>("new york", "rangers2"),
new Tuple2<>("new york", "islanders1"),
new Tuple2<>("new york", "islanders2")));
JavaDStream<Tuple2<String, String>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, String> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, String> flatMapped = pairStream.flatMapValues(in -> {
List<String> out = new ArrayList<>();
out.add(in + "1");
out.add(in + "2");
return out;
});
JavaTestUtils.attachTestOutputStream(flatMapped);
List<List<Tuple2<String, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testCoGroup() {
List<List<Tuple2<String, String>>> stringStringKVStream1 = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "dodgers"),
new Tuple2<>("new york", "yankees")),
Arrays.asList(new Tuple2<>("california", "sharks"),
new Tuple2<>("new york", "rangers")));
List<List<Tuple2<String, String>>> stringStringKVStream2 = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "giants"),
new Tuple2<>("new york", "mets")),
Arrays.asList(new Tuple2<>("california", "ducks"),
new Tuple2<>("new york", "islanders")));
List<List<Tuple2<String, Tuple2<List<String>, List<String>>>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("california",
new Tuple2<>(Arrays.asList("dodgers"), Arrays.asList("giants"))),
new Tuple2<>("new york",
new Tuple2<>(Arrays.asList("yankees"), Arrays.asList("mets")))),
Arrays.asList(
new Tuple2<>("california",
new Tuple2<>(Arrays.asList("sharks"), Arrays.asList("ducks"))),
new Tuple2<>("new york",
new Tuple2<>(Arrays.asList("rangers"), Arrays.asList("islanders")))));
JavaDStream<Tuple2<String, String>> stream1 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream1, 1);
JavaPairDStream<String, String> pairStream1 = JavaPairDStream.fromJavaDStream(stream1);
JavaDStream<Tuple2<String, String>> stream2 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream2, 1);
JavaPairDStream<String, String> pairStream2 = JavaPairDStream.fromJavaDStream(stream2);
JavaPairDStream<String, Tuple2<Iterable<String>, Iterable<String>>> grouped =
pairStream1.cogroup(pairStream2);
JavaTestUtils.attachTestOutputStream(grouped);
List<List<Tuple2<String, Tuple2<Iterable<String>, Iterable<String>>>>> result =
JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected.size(), result.size());
Iterator<List<Tuple2<String, Tuple2<Iterable<String>, Iterable<String>>>>> resultItr =
result.iterator();
Iterator<List<Tuple2<String, Tuple2<List<String>, List<String>>>>> expectedItr =
expected.iterator();
while (resultItr.hasNext() && expectedItr.hasNext()) {
Iterator<Tuple2<String, Tuple2<Iterable<String>, Iterable<String>>>> resultElements =
resultItr.next().iterator();
Iterator<Tuple2<String, Tuple2<List<String>, List<String>>>> expectedElements =
expectedItr.next().iterator();
while (resultElements.hasNext() && expectedElements.hasNext()) {
Tuple2<String, Tuple2<Iterable<String>, Iterable<String>>> resultElement =
resultElements.next();
Tuple2<String, Tuple2<List<String>, List<String>>> expectedElement =
expectedElements.next();
Assert.assertEquals(expectedElement._1(), resultElement._1());
equalIterable(expectedElement._2()._1(), resultElement._2()._1());
equalIterable(expectedElement._2()._2(), resultElement._2()._2());
}
Assert.assertEquals(resultElements.hasNext(), expectedElements.hasNext());
}
}
@SuppressWarnings("unchecked")
@Test
public void testJoin() {
List<List<Tuple2<String, String>>> stringStringKVStream1 = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "dodgers"),
new Tuple2<>("new york", "yankees")),
Arrays.asList(new Tuple2<>("california", "sharks"),
new Tuple2<>("new york", "rangers")));
List<List<Tuple2<String, String>>> stringStringKVStream2 = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "giants"),
new Tuple2<>("new york", "mets")),
Arrays.asList(new Tuple2<>("california", "ducks"),
new Tuple2<>("new york", "islanders")));
List<List<Tuple2<String, Tuple2<String, String>>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("california",
new Tuple2<>("dodgers", "giants")),
new Tuple2<>("new york",
new Tuple2<>("yankees", "mets"))),
Arrays.asList(
new Tuple2<>("california",
new Tuple2<>("sharks", "ducks")),
new Tuple2<>("new york",
new Tuple2<>("rangers", "islanders"))));
JavaDStream<Tuple2<String, String>> stream1 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream1, 1);
JavaPairDStream<String, String> pairStream1 = JavaPairDStream.fromJavaDStream(stream1);
JavaDStream<Tuple2<String, String>> stream2 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream2, 1);
JavaPairDStream<String, String> pairStream2 = JavaPairDStream.fromJavaDStream(stream2);
JavaPairDStream<String, Tuple2<String, String>> joined = pairStream1.join(pairStream2);
JavaTestUtils.attachTestOutputStream(joined);
List<List<Tuple2<String, Tuple2<String, String>>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testLeftOuterJoin() {
List<List<Tuple2<String, String>>> stringStringKVStream1 = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "dodgers"),
new Tuple2<>("new york", "yankees")),
Arrays.asList(new Tuple2<>("california", "sharks") ));
List<List<Tuple2<String, String>>> stringStringKVStream2 = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "giants") ),
Arrays.asList(new Tuple2<>("new york", "islanders") )
);
List<List<Long>> expected = Arrays.asList(Arrays.asList(2L), Arrays.asList(1L));
JavaDStream<Tuple2<String, String>> stream1 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream1, 1);
JavaPairDStream<String, String> pairStream1 = JavaPairDStream.fromJavaDStream(stream1);
JavaDStream<Tuple2<String, String>> stream2 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream2, 1);
JavaPairDStream<String, String> pairStream2 = JavaPairDStream.fromJavaDStream(stream2);
JavaPairDStream<String, Tuple2<String, Optional<String>>> joined =
pairStream1.leftOuterJoin(pairStream2);
JavaDStream<Long> counted = joined.count();
JavaTestUtils.attachTestOutputStream(counted);
List<List<Long>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testCheckpointMasterRecovery() throws InterruptedException {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("this", "is"),
Arrays.asList("a", "test"),
Arrays.asList("counting", "letters"));
List<List<Integer>> expectedInitial = Arrays.asList(
Arrays.asList(4,2));
List<List<Integer>> expectedFinal = Arrays.asList(
Arrays.asList(1,4),
Arrays.asList(8,7));
File tempDir = Files.createTempDir();
tempDir.deleteOnExit();
ssc.checkpoint(tempDir.getAbsolutePath());
JavaDStream<String> stream = JavaCheckpointTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> letterCount = stream.map(String::length);
JavaCheckpointTestUtils.attachTestOutputStream(letterCount);
List<List<Integer>> initialResult = JavaTestUtils.runStreams(ssc, 1, 1);
assertOrderInvariantEquals(expectedInitial, initialResult);
Thread.sleep(1000);
ssc.stop();
ssc = new JavaStreamingContext(tempDir.getAbsolutePath());
// Tweak to take into consideration that the last batch before failure
// will be re-processed after recovery
List<List<Integer>> finalResult = JavaCheckpointTestUtils.runStreams(ssc, 2, 3);
assertOrderInvariantEquals(expectedFinal, finalResult.subList(1, 3));
ssc.stop();
Utils.deleteRecursively(tempDir);
}
@SuppressWarnings("unchecked")
@Test
public void testContextGetOrCreate() throws InterruptedException {
ssc.stop();
SparkConf conf = new SparkConf()
.setMaster("local[2]")
.setAppName("test")
.set("newContext", "true");
File emptyDir = Files.createTempDir();
emptyDir.deleteOnExit();
StreamingContextSuite contextSuite = new StreamingContextSuite();
String corruptedCheckpointDir = contextSuite.createCorruptedCheckpoint();
String checkpointDir = contextSuite.createValidCheckpoint();
// Function to create JavaStreamingContext without any output operations
// (used to detect the new context)
AtomicBoolean newContextCreated = new AtomicBoolean(false);
Function0<JavaStreamingContext> creatingFunc = () -> {
newContextCreated.set(true);
return new JavaStreamingContext(conf, Seconds.apply(1));
};
newContextCreated.set(false);
ssc = JavaStreamingContext.getOrCreate(emptyDir.getAbsolutePath(), creatingFunc);
Assert.assertTrue("new context not created", newContextCreated.get());
ssc.stop();
newContextCreated.set(false);
ssc = JavaStreamingContext.getOrCreate(corruptedCheckpointDir, creatingFunc,
new Configuration(), true);
Assert.assertTrue("new context not created", newContextCreated.get());
ssc.stop();
newContextCreated.set(false);
ssc = JavaStreamingContext.getOrCreate(checkpointDir, creatingFunc,
new Configuration());
Assert.assertTrue("old context not recovered", !newContextCreated.get());
ssc.stop();
newContextCreated.set(false);
JavaSparkContext sc = new JavaSparkContext(conf);
ssc = JavaStreamingContext.getOrCreate(checkpointDir, creatingFunc,
new Configuration());
Assert.assertTrue("old context not recovered", !newContextCreated.get());
ssc.stop();
}
/* TEST DISABLED: Pending a discussion about checkpoint() semantics with TD
@SuppressWarnings("unchecked")
@Test
public void testCheckpointofIndividualStream() throws InterruptedException {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("this", "is"),
Arrays.asList("a", "test"),
Arrays.asList("counting", "letters"));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(4,2),
Arrays.asList(1,4),
Arrays.asList(8,7));
JavaDStream stream = JavaCheckpointTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream letterCount = stream.map(new Function<String, Integer>() {
@Override
public Integer call(String s) {
return s.length();
}
});
JavaCheckpointTestUtils.attachTestOutputStream(letterCount);
letterCount.checkpoint(new Duration(1000));
List<List<Integer>> result1 = JavaCheckpointTestUtils.runStreams(ssc, 3, 3);
assertOrderInvariantEquals(expected, result1);
}
*/
// Input stream tests. These mostly just test that we can instantiate a given InputStream with
// Java arguments and assign it to a JavaDStream without producing type errors. Testing of the
// InputStream functionality is deferred to the existing Scala tests.
@Test
public void testSocketTextStream() {
ssc.socketTextStream("localhost", 12345);
}
@Test
public void testSocketString() {
ssc.socketStream(
"localhost",
12345,
in -> {
List<String> out = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(in, StandardCharsets.UTF_8))) {
for (String line; (line = reader.readLine()) != null;) {
out.add(line);
}
}
return out;
},
StorageLevel.MEMORY_ONLY());
}
@SuppressWarnings("unchecked")
@Test
public void testTextFileStream() throws IOException {
File testDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
List<List<String>> expected = fileTestPrepare(testDir);
JavaDStream<String> input = ssc.textFileStream(testDir.toString());
JavaTestUtils.attachTestOutputStream(input);
List<List<String>> result = JavaTestUtils.runStreams(ssc, 1, 1);
assertOrderInvariantEquals(expected, result);
}
@SuppressWarnings("unchecked")
@Test
public void testFileStream() throws IOException {
File testDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
List<List<String>> expected = fileTestPrepare(testDir);
JavaPairInputDStream<LongWritable, Text> inputStream = ssc.fileStream(
testDir.toString(),
LongWritable.class,
Text.class,
TextInputFormat.class,
v1 -> Boolean.TRUE,
true);
JavaDStream<String> test = inputStream.map(v1 -> v1._2().toString());
JavaTestUtils.attachTestOutputStream(test);
List<List<String>> result = JavaTestUtils.runStreams(ssc, 1, 1);
assertOrderInvariantEquals(expected, result);
}
@Test
public void testRawSocketStream() {
ssc.rawSocketStream("localhost", 12345);
}
private static List<List<String>> fileTestPrepare(File testDir) throws IOException {
File existingFile = new File(testDir, "0");
Files.write("0\n", existingFile, StandardCharsets.UTF_8);
Assert.assertTrue(existingFile.setLastModified(1000));
Assert.assertEquals(1000, existingFile.lastModified());
return Arrays.asList(Arrays.asList("0"));
}
@SuppressWarnings("unchecked")
// SPARK-5795: no logic assertions, just testing that intended API invocations compile
private void compileSaveAsJavaAPI(JavaPairDStream<LongWritable,Text> pds) {
pds.saveAsNewAPIHadoopFiles(
"", "", LongWritable.class, Text.class,
org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat.class);
pds.saveAsHadoopFiles(
"", "", LongWritable.class, Text.class,
org.apache.hadoop.mapred.SequenceFileOutputFormat.class);
// Checks that a previous common workaround for this API still compiles
pds.saveAsNewAPIHadoopFiles(
"", "", LongWritable.class, Text.class,
(Class) org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat.class);
pds.saveAsHadoopFiles(
"", "", LongWritable.class, Text.class,
(Class) org.apache.hadoop.mapred.SequenceFileOutputFormat.class);
}
}
| 9,764 |
0 | Create_ds/spark/streaming/src/test/java/test/org/apache/spark | Create_ds/spark/streaming/src/test/java/test/org/apache/spark/streaming/Java8APISuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.org.apache.spark.streaming;
import java.io.Serializable;
import java.util.*;
import org.apache.spark.api.java.function.Function3;
import org.apache.spark.api.java.function.Function4;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.JavaTestUtils;
import org.apache.spark.streaming.LocalJavaStreamingContext;
import org.apache.spark.streaming.State;
import org.apache.spark.streaming.StateSpec;
import org.apache.spark.streaming.Time;
import scala.Tuple2;
import com.google.common.collect.Sets;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.HashPartitioner;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaMapWithStateDStream;
/**
* Most of these tests replicate org.apache.spark.streaming.JavaAPISuite using java 8
* lambda syntax.
*/
@SuppressWarnings("unchecked")
public class Java8APISuite extends LocalJavaStreamingContext implements Serializable {
@Test
public void testMap() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("hello", "world"),
Arrays.asList("goodnight", "moon"));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(5, 5),
Arrays.asList(9, 4));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> letterCount = stream.map(String::length);
JavaTestUtils.attachTestOutputStream(letterCount);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 2, 2);
assertOrderInvariantEquals(expected, result);
}
@Test
public void testFilter() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants", "dodgers"),
Arrays.asList("yankees", "red sox"));
List<List<String>> expected = Arrays.asList(
Arrays.asList("giants"),
Arrays.asList("yankees"));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<String> filtered = stream.filter(s -> s.contains("a"));
JavaTestUtils.attachTestOutputStream(filtered);
List<List<String>> result = JavaTestUtils.runStreams(ssc, 2, 2);
assertOrderInvariantEquals(expected, result);
}
@Test
public void testMapPartitions() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants", "dodgers"),
Arrays.asList("yankees", "red sox"));
List<List<String>> expected = Arrays.asList(
Arrays.asList("GIANTSDODGERS"),
Arrays.asList("YANKEESRED SOX"));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<String> mapped = stream.mapPartitions(in -> {
String out = "";
while (in.hasNext()) {
out = out + in.next().toUpperCase(Locale.ROOT);
}
return Arrays.asList(out).iterator();
});
JavaTestUtils.attachTestOutputStream(mapped);
List<List<String>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testReduce() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1, 2, 3),
Arrays.asList(4, 5, 6),
Arrays.asList(7, 8, 9));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(6),
Arrays.asList(15),
Arrays.asList(24));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> reduced = stream.reduce((x, y) -> x + y);
JavaTestUtils.attachTestOutputStream(reduced);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@Test
public void testReduceByWindow() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1, 2, 3),
Arrays.asList(4, 5, 6),
Arrays.asList(7, 8, 9));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(6),
Arrays.asList(21),
Arrays.asList(39),
Arrays.asList(24));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> reducedWindowed = stream.reduceByWindow(
(x, y) -> x + y, (x, y) -> x - y, new Duration(2000), new Duration(1000));
JavaTestUtils.attachTestOutputStream(reducedWindowed);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 4, 4);
Assert.assertEquals(expected, result);
}
@Test
public void testTransform() {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1, 2, 3),
Arrays.asList(4, 5, 6),
Arrays.asList(7, 8, 9));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(3, 4, 5),
Arrays.asList(6, 7, 8),
Arrays.asList(9, 10, 11));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<Integer> transformed = stream.transform(in -> in.map(i -> i + 2));
JavaTestUtils.attachTestOutputStream(transformed);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 3, 3);
assertOrderInvariantEquals(expected, result);
}
@Test
public void testVariousTransform() {
// tests whether all variations of transform can be called from Java
List<List<Integer>> inputData = Arrays.asList(Arrays.asList(1));
JavaDStream<Integer> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
List<List<Tuple2<String, Integer>>> pairInputData =
Arrays.asList(Arrays.asList(new Tuple2<>("x", 1)));
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(
JavaTestUtils.attachTestInputStream(ssc, pairInputData, 1));
JavaDStream<Integer> transformed1 = stream.transform(in -> null);
JavaDStream<Integer> transformed2 = stream.transform((x, time) -> null);
JavaPairDStream<String, Integer> transformed3 = stream.transformToPair(x -> null);
JavaPairDStream<String, Integer> transformed4 = stream.transformToPair((x, time) -> null);
JavaDStream<Integer> pairTransformed1 = pairStream.transform(x -> null);
JavaDStream<Integer> pairTransformed2 = pairStream.transform((x, time) -> null);
JavaPairDStream<String, String> pairTransformed3 = pairStream.transformToPair(x -> null);
JavaPairDStream<String, String> pairTransformed4 =
pairStream.transformToPair((x, time) -> null);
}
@Test
public void testTransformWith() {
List<List<Tuple2<String, String>>> stringStringKVStream1 = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", "dodgers"),
new Tuple2<>("new york", "yankees")),
Arrays.asList(
new Tuple2<>("california", "sharks"),
new Tuple2<>("new york", "rangers")));
List<List<Tuple2<String, String>>> stringStringKVStream2 = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", "giants"),
new Tuple2<>("new york", "mets")),
Arrays.asList(
new Tuple2<>("california", "ducks"),
new Tuple2<>("new york", "islanders")));
List<Set<Tuple2<String, Tuple2<String, String>>>> expected = Arrays.asList(
Sets.newHashSet(
new Tuple2<>("california",
new Tuple2<>("dodgers", "giants")),
new Tuple2<>("new york",
new Tuple2<>("yankees", "mets"))),
Sets.newHashSet(
new Tuple2<>("california",
new Tuple2<>("sharks", "ducks")),
new Tuple2<>("new york",
new Tuple2<>("rangers", "islanders"))));
JavaDStream<Tuple2<String, String>> stream1 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream1, 1);
JavaPairDStream<String, String> pairStream1 = JavaPairDStream.fromJavaDStream(stream1);
JavaDStream<Tuple2<String, String>> stream2 = JavaTestUtils.attachTestInputStream(
ssc, stringStringKVStream2, 1);
JavaPairDStream<String, String> pairStream2 = JavaPairDStream.fromJavaDStream(stream2);
JavaPairDStream<String, Tuple2<String, String>> joined =
pairStream1.transformWithToPair(pairStream2,(x, y, z) -> x.join(y));
JavaTestUtils.attachTestOutputStream(joined);
List<List<Tuple2<String, Tuple2<String, String>>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
List<Set<Tuple2<String, Tuple2<String, String>>>> unorderedResult = new ArrayList<>();
for (List<Tuple2<String, Tuple2<String, String>>> res : result) {
unorderedResult.add(Sets.newHashSet(res));
}
Assert.assertEquals(expected, unorderedResult);
}
@Test
public void testVariousTransformWith() {
// tests whether all variations of transformWith can be called from Java
List<List<Integer>> inputData1 = Arrays.asList(Arrays.asList(1));
List<List<String>> inputData2 = Arrays.asList(Arrays.asList("x"));
JavaDStream<Integer> stream1 = JavaTestUtils.attachTestInputStream(ssc, inputData1, 1);
JavaDStream<String> stream2 = JavaTestUtils.attachTestInputStream(ssc, inputData2, 1);
List<List<Tuple2<String, Integer>>> pairInputData1 =
Arrays.asList(Arrays.asList(new Tuple2<>("x", 1)));
List<List<Tuple2<Double, Character>>> pairInputData2 =
Arrays.asList(Arrays.asList(new Tuple2<>(1.0, 'x')));
JavaPairDStream<String, Integer> pairStream1 = JavaPairDStream.fromJavaDStream(
JavaTestUtils.attachTestInputStream(ssc, pairInputData1, 1));
JavaPairDStream<Double, Character> pairStream2 = JavaPairDStream.fromJavaDStream(
JavaTestUtils.attachTestInputStream(ssc, pairInputData2, 1));
JavaDStream<Double> transformed1 = stream1.transformWith(stream2, (x, y, z) -> null);
JavaDStream<Double> transformed2 = stream1.transformWith(pairStream1,(x, y, z) -> null);
JavaPairDStream<Double, Double> transformed3 =
stream1.transformWithToPair(stream2,(x, y, z) -> null);
JavaPairDStream<Double, Double> transformed4 =
stream1.transformWithToPair(pairStream1,(x, y, z) -> null);
JavaDStream<Double> pairTransformed1 = pairStream1.transformWith(stream2,(x, y, z) -> null);
JavaDStream<Double> pairTransformed2_ =
pairStream1.transformWith(pairStream1,(x, y, z) -> null);
JavaPairDStream<Double, Double> pairTransformed3 =
pairStream1.transformWithToPair(stream2,(x, y, z) -> null);
JavaPairDStream<Double, Double> pairTransformed4 =
pairStream1.transformWithToPair(pairStream2,(x, y, z) -> null);
}
@Test
public void testStreamingContextTransform() {
List<List<Integer>> stream1input = Arrays.asList(
Arrays.asList(1),
Arrays.asList(2)
);
List<List<Integer>> stream2input = Arrays.asList(
Arrays.asList(3),
Arrays.asList(4)
);
List<List<Tuple2<Integer, String>>> pairStream1input = Arrays.asList(
Arrays.asList(new Tuple2<>(1, "x")),
Arrays.asList(new Tuple2<>(2, "y"))
);
List<List<Tuple2<Integer, Tuple2<Integer, String>>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>(1, new Tuple2<>(1, "x"))),
Arrays.asList(new Tuple2<>(2, new Tuple2<>(2, "y")))
);
JavaDStream<Integer> stream1 = JavaTestUtils.attachTestInputStream(ssc, stream1input, 1);
JavaDStream<Integer> stream2 = JavaTestUtils.attachTestInputStream(ssc, stream2input, 1);
JavaPairDStream<Integer, String> pairStream1 = JavaPairDStream.fromJavaDStream(
JavaTestUtils.attachTestInputStream(ssc, pairStream1input, 1));
List<JavaDStream<?>> listOfDStreams1 = Arrays.asList(stream1, stream2);
// This is just to test whether this transform to JavaStream compiles
JavaDStream<Long> transformed1 = ssc.transform(
listOfDStreams1, (List<JavaRDD<?>> listOfRDDs, Time time) -> {
Assert.assertEquals(2, listOfRDDs.size());
return null;
});
List<JavaDStream<?>> listOfDStreams2 =
Arrays.asList(stream1, stream2, pairStream1.toJavaDStream());
JavaPairDStream<Integer, Tuple2<Integer, String>> transformed2 = ssc.transformToPair(
listOfDStreams2, (List<JavaRDD<?>> listOfRDDs, Time time) -> {
Assert.assertEquals(3, listOfRDDs.size());
JavaRDD<Integer> rdd1 = (JavaRDD<Integer>) listOfRDDs.get(0);
JavaRDD<Integer> rdd2 = (JavaRDD<Integer>) listOfRDDs.get(1);
JavaRDD<Tuple2<Integer, String>> rdd3 = (JavaRDD<Tuple2<Integer, String>>) listOfRDDs.get(2);
JavaPairRDD<Integer, String> prdd3 = JavaPairRDD.fromJavaRDD(rdd3);
PairFunction<Integer, Integer, Integer> mapToTuple =
(Integer i) -> new Tuple2<>(i, i);
return rdd1.union(rdd2).mapToPair(mapToTuple).join(prdd3);
});
JavaTestUtils.attachTestOutputStream(transformed2);
List<List<Tuple2<Integer, Tuple2<Integer, String>>>> result =
JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testFlatMap() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("go", "giants"),
Arrays.asList("boo", "dodgers"),
Arrays.asList("athletics"));
List<List<String>> expected = Arrays.asList(
Arrays.asList("g", "o", "g", "i", "a", "n", "t", "s"),
Arrays.asList("b", "o", "o", "d", "o", "d", "g", "e", "r", "s"),
Arrays.asList("a", "t", "h", "l", "e", "t", "i", "c", "s"));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaDStream<String> flatMapped = stream.flatMap(
s -> Arrays.asList(s.split("(?!^)")).iterator());
JavaTestUtils.attachTestOutputStream(flatMapped);
List<List<String>> result = JavaTestUtils.runStreams(ssc, 3, 3);
assertOrderInvariantEquals(expected, result);
}
@Test
public void testPairFlatMap() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants"),
Arrays.asList("dodgers"),
Arrays.asList("athletics"));
List<List<Tuple2<Integer, String>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(6, "g"),
new Tuple2<>(6, "i"),
new Tuple2<>(6, "a"),
new Tuple2<>(6, "n"),
new Tuple2<>(6, "t"),
new Tuple2<>(6, "s")),
Arrays.asList(
new Tuple2<>(7, "d"),
new Tuple2<>(7, "o"),
new Tuple2<>(7, "d"),
new Tuple2<>(7, "g"),
new Tuple2<>(7, "e"),
new Tuple2<>(7, "r"),
new Tuple2<>(7, "s")),
Arrays.asList(
new Tuple2<>(9, "a"),
new Tuple2<>(9, "t"),
new Tuple2<>(9, "h"),
new Tuple2<>(9, "l"),
new Tuple2<>(9, "e"),
new Tuple2<>(9, "t"),
new Tuple2<>(9, "i"),
new Tuple2<>(9, "c"),
new Tuple2<>(9, "s")));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<Integer, String> flatMapped = stream.flatMapToPair(s -> {
List<Tuple2<Integer, String>> out = new ArrayList<>();
for (String letter : s.split("(?!^)")) {
out.add(new Tuple2<>(s.length(), letter));
}
return out.iterator();
});
JavaTestUtils.attachTestOutputStream(flatMapped);
List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
/*
* Performs an order-invariant comparison of lists representing two RDD streams. This allows
* us to account for ordering variation within individual RDD's which occurs during windowing.
*/
public static <T extends Comparable<T>> void assertOrderInvariantEquals(
List<List<T>> expected, List<List<T>> actual) {
expected.forEach(Collections::sort);
List<List<T>> sortedActual = new ArrayList<>();
actual.forEach(list -> {
List<T> sortedList = new ArrayList<>(list);
Collections.sort(sortedList);
sortedActual.add(sortedList);
});
Assert.assertEquals(expected, sortedActual);
}
@Test
public void testPairFilter() {
List<List<String>> inputData = Arrays.asList(
Arrays.asList("giants", "dodgers"),
Arrays.asList("yankees", "red sox"));
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("giants", 6)),
Arrays.asList(new Tuple2<>("yankees", 7)));
JavaDStream<String> stream = JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream =
stream.mapToPair(x -> new Tuple2<>(x, x.length()));
JavaPairDStream<String, Integer> filtered = pairStream.filter(x -> x._1().contains("a"));
JavaTestUtils.attachTestOutputStream(filtered);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
List<List<Tuple2<String, String>>> stringStringKVStream = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "dodgers"),
new Tuple2<>("california", "giants"),
new Tuple2<>("new york", "yankees"),
new Tuple2<>("new york", "mets")),
Arrays.asList(new Tuple2<>("california", "sharks"),
new Tuple2<>("california", "ducks"),
new Tuple2<>("new york", "rangers"),
new Tuple2<>("new york", "islanders")));
List<List<Tuple2<String, Integer>>> stringIntKVStream = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", 1),
new Tuple2<>("california", 3),
new Tuple2<>("new york", 4),
new Tuple2<>("new york", 1)),
Arrays.asList(
new Tuple2<>("california", 5),
new Tuple2<>("california", 5),
new Tuple2<>("new york", 3),
new Tuple2<>("new york", 1)));
@Test
public void testPairMap() { // Maps pair -> pair of different type
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<Integer, String>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(1, "california"),
new Tuple2<>(3, "california"),
new Tuple2<>(4, "new york"),
new Tuple2<>(1, "new york")),
Arrays.asList(
new Tuple2<>(5, "california"),
new Tuple2<>(5, "california"),
new Tuple2<>(3, "new york"),
new Tuple2<>(1, "new york")));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<Integer, String> reversed = pairStream.mapToPair(Tuple2::swap);
JavaTestUtils.attachTestOutputStream(reversed);
List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testPairMapPartitions() { // Maps pair -> pair of different type
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<Integer, String>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(1, "california"),
new Tuple2<>(3, "california"),
new Tuple2<>(4, "new york"),
new Tuple2<>(1, "new york")),
Arrays.asList(
new Tuple2<>(5, "california"),
new Tuple2<>(5, "california"),
new Tuple2<>(3, "new york"),
new Tuple2<>(1, "new york")));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<Integer, String> reversed = pairStream.mapPartitionsToPair(in -> {
LinkedList<Tuple2<Integer, String>> out = new LinkedList<>();
while (in.hasNext()) {
Tuple2<String, Integer> next = in.next();
out.add(next.swap());
}
return out.iterator();
});
JavaTestUtils.attachTestOutputStream(reversed);
List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testPairMap2() { // Maps pair -> single
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(1, 3, 4, 1),
Arrays.asList(5, 5, 3, 1));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaDStream<Integer> reversed = pairStream.map(Tuple2::_2);
JavaTestUtils.attachTestOutputStream(reversed);
List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testPairToPairFlatMapWithChangingTypes() { // Maps pair -> pair
List<List<Tuple2<String, Integer>>> inputData = Arrays.asList(
Arrays.asList(
new Tuple2<>("hi", 1),
new Tuple2<>("ho", 2)),
Arrays.asList(
new Tuple2<>("hi", 1),
new Tuple2<>("ho", 2)));
List<List<Tuple2<Integer, String>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(1, "h"),
new Tuple2<>(1, "i"),
new Tuple2<>(2, "h"),
new Tuple2<>(2, "o")),
Arrays.asList(
new Tuple2<>(1, "h"),
new Tuple2<>(1, "i"),
new Tuple2<>(2, "h"),
new Tuple2<>(2, "o")));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<Integer, String> flatMapped = pairStream.flatMapToPair(in -> {
List<Tuple2<Integer, String>> out = new LinkedList<>();
for (Character s : in._1().toCharArray()) {
out.add(new Tuple2<>(in._2(), s.toString()));
}
return out.iterator();
});
JavaTestUtils.attachTestOutputStream(flatMapped);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testPairReduceByKey() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(
new Tuple2<>("california", 10),
new Tuple2<>("new york", 4)));
JavaDStream<Tuple2<String, Integer>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> reduced = pairStream.reduceByKey((x, y) -> x + y);
JavaTestUtils.attachTestOutputStream(reduced);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testCombineByKey() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(
new Tuple2<>("california", 10),
new Tuple2<>("new york", 4)));
JavaDStream<Tuple2<String, Integer>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> combined = pairStream.combineByKey(i -> i,
(x, y) -> x + y, (x, y) -> x + y, new HashPartitioner(2));
JavaTestUtils.attachTestOutputStream(combined);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testReduceByKeyAndWindow() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(new Tuple2<>("california", 14),
new Tuple2<>("new york", 9)),
Arrays.asList(new Tuple2<>("california", 10),
new Tuple2<>("new york", 4)));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> reduceWindowed =
pairStream.reduceByKeyAndWindow((x, y) -> x + y, new Duration(2000), new Duration(1000));
JavaTestUtils.attachTestOutputStream(reduceWindowed);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@Test
public void testUpdateStateByKey() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(new Tuple2<>("california", 14),
new Tuple2<>("new york", 9)),
Arrays.asList(new Tuple2<>("california", 14),
new Tuple2<>("new york", 9)));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> updated = pairStream.updateStateByKey((values, state) -> {
int out = 0;
if (state.isPresent()) {
out = out + state.get();
}
for (Integer v : values) {
out = out + v;
}
return Optional.of(out);
});
JavaTestUtils.attachTestOutputStream(updated);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@Test
public void testReduceByKeyAndWindowWithInverse() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", 4),
new Tuple2<>("new york", 5)),
Arrays.asList(new Tuple2<>("california", 14),
new Tuple2<>("new york", 9)),
Arrays.asList(new Tuple2<>("california", 10),
new Tuple2<>("new york", 4)));
JavaDStream<Tuple2<String, Integer>> stream =
JavaTestUtils.attachTestInputStream(ssc, inputData, 1);
JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, Integer> reduceWindowed =
pairStream.reduceByKeyAndWindow((x, y) -> x + y, (x, y) -> x - y, new Duration(2000),
new Duration(1000));
JavaTestUtils.attachTestOutputStream(reduceWindowed);
List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
Assert.assertEquals(expected, result);
}
@Test
public void testPairTransform() {
List<List<Tuple2<Integer, Integer>>> inputData = Arrays.asList(
Arrays.asList(
new Tuple2<>(3, 5),
new Tuple2<>(1, 5),
new Tuple2<>(4, 5),
new Tuple2<>(2, 5)),
Arrays.asList(
new Tuple2<>(2, 5),
new Tuple2<>(3, 5),
new Tuple2<>(4, 5),
new Tuple2<>(1, 5)));
List<List<Tuple2<Integer, Integer>>> expected = Arrays.asList(
Arrays.asList(
new Tuple2<>(1, 5),
new Tuple2<>(2, 5),
new Tuple2<>(3, 5),
new Tuple2<>(4, 5)),
Arrays.asList(
new Tuple2<>(1, 5),
new Tuple2<>(2, 5),
new Tuple2<>(3, 5),
new Tuple2<>(4, 5)));
JavaDStream<Tuple2<Integer, Integer>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<Integer, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<Integer, Integer> sorted = pairStream.transformToPair(in -> in.sortByKey());
JavaTestUtils.attachTestOutputStream(sorted);
List<List<Tuple2<String, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testPairToNormalRDDTransform() {
List<List<Tuple2<Integer, Integer>>> inputData = Arrays.asList(
Arrays.asList(
new Tuple2<>(3, 5),
new Tuple2<>(1, 5),
new Tuple2<>(4, 5),
new Tuple2<>(2, 5)),
Arrays.asList(
new Tuple2<>(2, 5),
new Tuple2<>(3, 5),
new Tuple2<>(4, 5),
new Tuple2<>(1, 5)));
List<List<Integer>> expected = Arrays.asList(
Arrays.asList(3, 1, 4, 2),
Arrays.asList(2, 3, 4, 1));
JavaDStream<Tuple2<Integer, Integer>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<Integer, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaDStream<Integer> firstParts = pairStream.transform(in -> in.map(x -> x._1()));
JavaTestUtils.attachTestOutputStream(firstParts);
List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testMapValues() {
List<List<Tuple2<String, String>>> inputData = stringStringKVStream;
List<List<Tuple2<String, String>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "DODGERS"),
new Tuple2<>("california", "GIANTS"),
new Tuple2<>("new york", "YANKEES"),
new Tuple2<>("new york", "METS")),
Arrays.asList(new Tuple2<>("california", "SHARKS"),
new Tuple2<>("california", "DUCKS"),
new Tuple2<>("new york", "RANGERS"),
new Tuple2<>("new york", "ISLANDERS")));
JavaDStream<Tuple2<String, String>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, String> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, String> mapped =
pairStream.mapValues(s -> s.toUpperCase(Locale.ROOT));
JavaTestUtils.attachTestOutputStream(mapped);
List<List<Tuple2<String, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@Test
public void testFlatMapValues() {
List<List<Tuple2<String, String>>> inputData = stringStringKVStream;
List<List<Tuple2<String, String>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", "dodgers1"),
new Tuple2<>("california", "dodgers2"),
new Tuple2<>("california", "giants1"),
new Tuple2<>("california", "giants2"),
new Tuple2<>("new york", "yankees1"),
new Tuple2<>("new york", "yankees2"),
new Tuple2<>("new york", "mets1"),
new Tuple2<>("new york", "mets2")),
Arrays.asList(new Tuple2<>("california", "sharks1"),
new Tuple2<>("california", "sharks2"),
new Tuple2<>("california", "ducks1"),
new Tuple2<>("california", "ducks2"),
new Tuple2<>("new york", "rangers1"),
new Tuple2<>("new york", "rangers2"),
new Tuple2<>("new york", "islanders1"),
new Tuple2<>("new york", "islanders2")));
JavaDStream<Tuple2<String, String>> stream = JavaTestUtils.attachTestInputStream(
ssc, inputData, 1);
JavaPairDStream<String, String> pairStream = JavaPairDStream.fromJavaDStream(stream);
JavaPairDStream<String, String> flatMapped =
pairStream.flatMapValues(in -> Arrays.asList(in + "1", in + "2"));
JavaTestUtils.attachTestOutputStream(flatMapped);
List<List<Tuple2<String, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
/**
* This test is only for testing the APIs. It's not necessary to run it.
*/
public void testMapWithStateAPI() {
JavaPairRDD<String, Boolean> initialRDD = null;
JavaPairDStream<String, Integer> wordsDstream = null;
Function4<Time, String, Optional<Integer>, State<Boolean>, Optional<Double>> mapFn =
(time, key, value, state) -> {
// Use all State's methods here
state.exists();
state.get();
state.isTimingOut();
state.remove();
state.update(true);
return Optional.of(2.0);
};
JavaMapWithStateDStream<String, Integer, Boolean, Double> stateDstream =
wordsDstream.mapWithState(
StateSpec.function(mapFn)
.initialState(initialRDD)
.numPartitions(10)
.partitioner(new HashPartitioner(10))
.timeout(Durations.seconds(10)));
JavaPairDStream<String, Boolean> emittedRecords = stateDstream.stateSnapshots();
Function3<String, Optional<Integer>, State<Boolean>, Double> mapFn2 =
(key, value, state) -> {
state.exists();
state.get();
state.isTimingOut();
state.remove();
state.update(true);
return 2.0;
};
JavaMapWithStateDStream<String, Integer, Boolean, Double> stateDstream2 =
wordsDstream.mapWithState(
StateSpec.function(mapFn2)
.initialState(initialRDD)
.numPartitions(10)
.partitioner(new HashPartitioner(10))
.timeout(Durations.seconds(10)));
JavaPairDStream<String, Boolean> mappedDStream = stateDstream2.stateSnapshots();
}
}
| 9,765 |
0 | Create_ds/spark/streaming/src/test/java/org/apache/spark | Create_ds/spark/streaming/src/test/java/org/apache/spark/streaming/LocalJavaStreamingContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming;
import org.apache.spark.SparkConf;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.junit.After;
import org.junit.Before;
public abstract class LocalJavaStreamingContext {
protected transient JavaStreamingContext ssc;
@Before
public void setUp() {
SparkConf conf = new SparkConf()
.setMaster("local[2]")
.setAppName("test")
.set("spark.streaming.clock", "org.apache.spark.util.ManualClock");
ssc = new JavaStreamingContext(conf, new Duration(1000));
ssc.checkpoint("checkpoint");
}
@After
public void tearDown() {
ssc.stop();
ssc = null;
}
}
| 9,766 |
0 | Create_ds/spark/streaming/src/test/java/org/apache/spark | Create_ds/spark/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import scala.Tuple2;
import com.google.common.collect.Sets;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.util.ManualClock;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.HashPartitioner;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.Function3;
import org.apache.spark.api.java.function.Function4;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaMapWithStateDStream;
public class JavaMapWithStateSuite extends LocalJavaStreamingContext implements Serializable {
/**
* This test is only for testing the APIs. It's not necessary to run it.
*/
public void testAPI() {
JavaPairRDD<String, Boolean> initialRDD = null;
JavaPairDStream<String, Integer> wordsDstream = null;
Function4<Time, String, Optional<Integer>, State<Boolean>, Optional<Double>> mappingFunc =
(time, word, one, state) -> {
// Use all State's methods here
state.exists();
state.get();
state.isTimingOut();
state.remove();
state.update(true);
return Optional.of(2.0);
};
JavaMapWithStateDStream<String, Integer, Boolean, Double> stateDstream =
wordsDstream.mapWithState(
StateSpec.function(mappingFunc)
.initialState(initialRDD)
.numPartitions(10)
.partitioner(new HashPartitioner(10))
.timeout(Durations.seconds(10)));
stateDstream.stateSnapshots();
Function3<String, Optional<Integer>, State<Boolean>, Double> mappingFunc2 =
(key, one, state) -> {
// Use all State's methods here
state.exists();
state.get();
state.isTimingOut();
state.remove();
state.update(true);
return 2.0;
};
JavaMapWithStateDStream<String, Integer, Boolean, Double> stateDstream2 =
wordsDstream.mapWithState(
StateSpec.function(mappingFunc2)
.initialState(initialRDD)
.numPartitions(10)
.partitioner(new HashPartitioner(10))
.timeout(Durations.seconds(10)));
stateDstream2.stateSnapshots();
}
@Test
public void testBasicFunction() {
List<List<String>> inputData = Arrays.asList(
Collections.<String>emptyList(),
Arrays.asList("a"),
Arrays.asList("a", "b"),
Arrays.asList("a", "b", "c"),
Arrays.asList("a", "b"),
Arrays.asList("a"),
Collections.<String>emptyList()
);
List<Set<Integer>> outputData = Arrays.asList(
Collections.<Integer>emptySet(),
Sets.newHashSet(1),
Sets.newHashSet(2, 1),
Sets.newHashSet(3, 2, 1),
Sets.newHashSet(4, 3),
Sets.newHashSet(5),
Collections.<Integer>emptySet()
);
@SuppressWarnings("unchecked")
List<Set<Tuple2<String, Integer>>> stateData = Arrays.asList(
Collections.<Tuple2<String, Integer>>emptySet(),
Sets.newHashSet(new Tuple2<>("a", 1)),
Sets.newHashSet(new Tuple2<>("a", 2), new Tuple2<>("b", 1)),
Sets.newHashSet(new Tuple2<>("a", 3), new Tuple2<>("b", 2), new Tuple2<>("c", 1)),
Sets.newHashSet(new Tuple2<>("a", 4), new Tuple2<>("b", 3), new Tuple2<>("c", 1)),
Sets.newHashSet(new Tuple2<>("a", 5), new Tuple2<>("b", 3), new Tuple2<>("c", 1)),
Sets.newHashSet(new Tuple2<>("a", 5), new Tuple2<>("b", 3), new Tuple2<>("c", 1))
);
Function3<String, Optional<Integer>, State<Integer>, Integer> mappingFunc =
(key, value, state) -> {
int sum = value.orElse(0) + (state.exists() ? state.get() : 0);
state.update(sum);
return sum;
};
testOperation(
inputData,
StateSpec.function(mappingFunc),
outputData,
stateData);
}
private <K, S, T> void testOperation(
List<List<K>> input,
StateSpec<K, Integer, S, T> mapWithStateSpec,
List<Set<T>> expectedOutputs,
List<Set<Tuple2<K, S>>> expectedStateSnapshots) {
int numBatches = expectedOutputs.size();
JavaDStream<K> inputStream = JavaTestUtils.attachTestInputStream(ssc, input, 2);
JavaMapWithStateDStream<K, Integer, S, T> mapWithStateDStream = JavaPairDStream.fromJavaDStream(
inputStream.map(x -> new Tuple2<>(x, 1))).mapWithState(mapWithStateSpec);
List<Set<T>> collectedOutputs =
Collections.synchronizedList(new ArrayList<Set<T>>());
mapWithStateDStream.foreachRDD(rdd -> collectedOutputs.add(Sets.newHashSet(rdd.collect())));
List<Set<Tuple2<K, S>>> collectedStateSnapshots =
Collections.synchronizedList(new ArrayList<Set<Tuple2<K, S>>>());
mapWithStateDStream.stateSnapshots().foreachRDD(rdd ->
collectedStateSnapshots.add(Sets.newHashSet(rdd.collect())));
BatchCounter batchCounter = new BatchCounter(ssc.ssc());
ssc.start();
((ManualClock) ssc.ssc().scheduler().clock())
.advance(ssc.ssc().progressListener().batchDuration() * numBatches + 1);
batchCounter.waitUntilBatchesCompleted(numBatches, 10000);
Assert.assertEquals(expectedOutputs, collectedOutputs);
Assert.assertEquals(expectedStateSnapshots, collectedStateSnapshots);
}
}
| 9,767 |
0 | Create_ds/spark/streaming/src/test/java/org/apache/spark | Create_ds/spark/streaming/src/test/java/org/apache/spark/streaming/JavaStreamingListenerAPISuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming;
import org.apache.spark.streaming.api.java.*;
public class JavaStreamingListenerAPISuite extends JavaStreamingListener {
@Override
public void onStreamingStarted(JavaStreamingListenerStreamingStarted streamingStarted) {
super.onStreamingStarted(streamingStarted);
}
@Override
public void onReceiverStarted(JavaStreamingListenerReceiverStarted receiverStarted) {
JavaReceiverInfo receiverInfo = receiverStarted.receiverInfo();
receiverInfo.streamId();
receiverInfo.name();
receiverInfo.active();
receiverInfo.location();
receiverInfo.executorId();
receiverInfo.lastErrorMessage();
receiverInfo.lastError();
receiverInfo.lastErrorTime();
}
@Override
public void onReceiverError(JavaStreamingListenerReceiverError receiverError) {
JavaReceiverInfo receiverInfo = receiverError.receiverInfo();
receiverInfo.streamId();
receiverInfo.name();
receiverInfo.active();
receiverInfo.location();
receiverInfo.executorId();
receiverInfo.lastErrorMessage();
receiverInfo.lastError();
receiverInfo.lastErrorTime();
}
@Override
public void onReceiverStopped(JavaStreamingListenerReceiverStopped receiverStopped) {
JavaReceiverInfo receiverInfo = receiverStopped.receiverInfo();
receiverInfo.streamId();
receiverInfo.name();
receiverInfo.active();
receiverInfo.location();
receiverInfo.executorId();
receiverInfo.lastErrorMessage();
receiverInfo.lastError();
receiverInfo.lastErrorTime();
}
@Override
public void onBatchSubmitted(JavaStreamingListenerBatchSubmitted batchSubmitted) {
super.onBatchSubmitted(batchSubmitted);
}
@Override
public void onBatchStarted(JavaStreamingListenerBatchStarted batchStarted) {
super.onBatchStarted(batchStarted);
}
@Override
public void onBatchCompleted(JavaStreamingListenerBatchCompleted batchCompleted) {
super.onBatchCompleted(batchCompleted);
}
@Override
public void onOutputOperationStarted(
JavaStreamingListenerOutputOperationStarted outputOperationStarted) {
super.onOutputOperationStarted(outputOperationStarted);
}
@Override
public void onOutputOperationCompleted(
JavaStreamingListenerOutputOperationCompleted outputOperationCompleted) {
super.onOutputOperationCompleted(outputOperationCompleted);
}
}
| 9,768 |
0 | Create_ds/spark/streaming/src/test/java/org/apache/spark | Create_ds/spark/streaming/src/test/java/org/apache/spark/streaming/JavaDurationSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming;
import org.junit.Assert;
import org.junit.Test;
public class JavaDurationSuite {
// Just testing the methods that are specially exposed for Java.
// This does not repeat all tests found in the Scala suite.
@Test
public void testLess() {
Assert.assertTrue(new Duration(999).less(new Duration(1000)));
}
@Test
public void testLessEq() {
Assert.assertTrue(new Duration(1000).lessEq(new Duration(1000)));
}
@Test
public void testGreater() {
Assert.assertTrue(new Duration(1000).greater(new Duration(999)));
}
@Test
public void testGreaterEq() {
Assert.assertTrue(new Duration(1000).greaterEq(new Duration(1000)));
}
@Test
public void testPlus() {
Assert.assertEquals(new Duration(1100), new Duration(1000).plus(new Duration(100)));
}
@Test
public void testMinus() {
Assert.assertEquals(new Duration(900), new Duration(1000).minus(new Duration(100)));
}
@Test
public void testTimes() {
Assert.assertEquals(new Duration(200), new Duration(100).times(2));
}
@Test
public void testDiv() {
Assert.assertEquals(200.0, new Duration(1000).div(new Duration(5)), 1.0e-12);
}
@Test
public void testMilliseconds() {
Assert.assertEquals(new Duration(100), Durations.milliseconds(100));
}
@Test
public void testSeconds() {
Assert.assertEquals(new Duration(30 * 1000), Durations.seconds(30));
}
@Test
public void testMinutes() {
Assert.assertEquals(new Duration(2 * 60 * 1000), Durations.minutes(2));
}
}
| 9,769 |
0 | Create_ds/spark/streaming/src/test/java/org/apache/spark | Create_ds/spark/streaming/src/test/java/org/apache/spark/streaming/JavaTimeSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming;
import org.junit.Assert;
import org.junit.Test;
public class JavaTimeSuite {
// Just testing the methods that are specially exposed for Java.
// This does not repeat all tests found in the Scala suite.
@Test
public void testLess() {
Assert.assertTrue(new Time(999).less(new Time(1000)));
}
@Test
public void testLessEq() {
Assert.assertTrue(new Time(1000).lessEq(new Time(1000)));
}
@Test
public void testGreater() {
Assert.assertTrue(new Time(1000).greater(new Time(999)));
}
@Test
public void testGreaterEq() {
Assert.assertTrue(new Time(1000).greaterEq(new Time(1000)));
}
@Test
public void testPlus() {
Assert.assertEquals(new Time(1100), new Time(1000).plus(new Duration(100)));
}
@Test
public void testMinusTime() {
Assert.assertEquals(new Duration(900), new Time(1000).minus(new Time(100)));
}
@Test
public void testMinusDuration() {
Assert.assertEquals(new Time(900), new Time(1000).minus(new Duration(100)));
}
}
| 9,770 |
0 | Create_ds/spark/streaming/src/test/java/org/apache/spark | Create_ds/spark/streaming/src/test/java/org/apache/spark/streaming/JavaWriteAheadLogSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming;
import java.util.ArrayList;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import com.google.common.collect.Iterators;
import org.apache.spark.SparkConf;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.streaming.util.WriteAheadLog;
import org.apache.spark.streaming.util.WriteAheadLogRecordHandle;
import org.apache.spark.streaming.util.WriteAheadLogUtils;
import org.junit.Test;
import org.junit.Assert;
public class JavaWriteAheadLogSuite extends WriteAheadLog {
static class JavaWriteAheadLogSuiteHandle extends WriteAheadLogRecordHandle {
int index = -1;
JavaWriteAheadLogSuiteHandle(int idx) {
index = idx;
}
}
static class Record {
long time;
int index;
ByteBuffer buffer;
Record(long tym, int idx, ByteBuffer buf) {
index = idx;
time = tym;
buffer = buf;
}
}
private int index = -1;
private final List<Record> records = new ArrayList<>();
// Methods for WriteAheadLog
@Override
public WriteAheadLogRecordHandle write(ByteBuffer record, long time) {
index += 1;
records.add(new Record(time, index, record));
return new JavaWriteAheadLogSuiteHandle(index);
}
@Override
public ByteBuffer read(WriteAheadLogRecordHandle handle) {
if (handle instanceof JavaWriteAheadLogSuiteHandle) {
int reqdIndex = ((JavaWriteAheadLogSuiteHandle) handle).index;
for (Record record: records) {
if (record.index == reqdIndex) {
return record.buffer;
}
}
}
return null;
}
@Override
public Iterator<ByteBuffer> readAll() {
return Iterators.transform(records.iterator(), input -> input.buffer);
}
@Override
public void clean(long threshTime, boolean waitForCompletion) {
for (int i = 0; i < records.size(); i++) {
if (records.get(i).time < threshTime) {
records.remove(i);
i--;
}
}
}
@Override
public void close() {
records.clear();
}
@Test
public void testCustomWAL() {
SparkConf conf = new SparkConf();
conf.set("spark.streaming.driver.writeAheadLog.class", JavaWriteAheadLogSuite.class.getName());
conf.set("spark.streaming.driver.writeAheadLog.allowBatching", "false");
WriteAheadLog wal = WriteAheadLogUtils.createLogForDriver(conf, null, null);
String data1 = "data1";
WriteAheadLogRecordHandle handle = wal.write(JavaUtils.stringToBytes(data1), 1234);
Assert.assertTrue(handle instanceof JavaWriteAheadLogSuiteHandle);
Assert.assertEquals(data1, JavaUtils.bytesToString(wal.read(handle)));
wal.write(JavaUtils.stringToBytes("data2"), 1235);
wal.write(JavaUtils.stringToBytes("data3"), 1236);
wal.write(JavaUtils.stringToBytes("data4"), 1237);
wal.clean(1236, false);
Iterator<ByteBuffer> dataIterator = wal.readAll();
List<String> readData = new ArrayList<>();
while (dataIterator.hasNext()) {
readData.add(JavaUtils.bytesToString(dataIterator.next()));
}
Assert.assertEquals(readData, Arrays.asList("data3", "data4"));
}
}
| 9,771 |
0 | Create_ds/spark/streaming/src/test/java/org/apache/spark | Create_ds/spark/streaming/src/test/java/org/apache/spark/streaming/JavaReceiverAPISuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import com.google.common.io.Closeables;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.receiver.Receiver;
import org.apache.spark.api.java.function.Function;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.Serializable;
import java.net.ConnectException;
import java.net.Socket;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.atomic.AtomicLong;
public class JavaReceiverAPISuite implements Serializable {
@Before
public void setUp() {
System.clearProperty("spark.streaming.clock");
}
@After
public void tearDown() {
System.clearProperty("spark.streaming.clock");
}
@Test
public void testReceiver() throws InterruptedException {
TestServer server = new TestServer(0);
server.start();
AtomicLong dataCounter = new AtomicLong(0);
try {
JavaStreamingContext ssc = new JavaStreamingContext("local[2]", "test", new Duration(200));
JavaReceiverInputDStream<String> input =
ssc.receiverStream(new JavaSocketReceiver("localhost", server.port()));
JavaDStream<String> mapped = input.map((Function<String, String>) v1 -> v1 + ".");
mapped.foreachRDD((VoidFunction<JavaRDD<String>>) rdd -> {
long count = rdd.count();
dataCounter.addAndGet(count);
});
ssc.start();
long startTime = System.currentTimeMillis();
long timeout = 10000;
Thread.sleep(200);
for (int i = 0; i < 6; i++) {
server.send(i + "\n"); // \n to make sure these are separate lines
Thread.sleep(100);
}
while (dataCounter.get() == 0 && System.currentTimeMillis() - startTime < timeout) {
Thread.sleep(100);
}
ssc.stop();
Assert.assertTrue(dataCounter.get() > 0);
} finally {
server.stop();
}
}
private static class JavaSocketReceiver extends Receiver<String> {
private String host = null;
private int port = -1;
JavaSocketReceiver(String host_ , int port_) {
super(StorageLevel.MEMORY_AND_DISK());
host = host_;
port = port_;
}
@Override
public void onStart() {
new Thread(this::receive).start();
}
@Override
public void onStop() {
}
private void receive() {
try {
Socket socket = null;
BufferedReader in = null;
try {
socket = new Socket(host, port);
in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
String userInput;
while ((userInput = in.readLine()) != null) {
store(userInput);
}
} finally {
Closeables.close(in, /* swallowIOException = */ true);
Closeables.close(socket, /* swallowIOException = */ true);
}
} catch(ConnectException ce) {
ce.printStackTrace();
restart("Could not connect", ce);
} catch(Throwable t) {
t.printStackTrace();
restart("Error receiving data", t);
}
}
}
}
| 9,772 |
0 | Create_ds/spark/streaming/src/main/java/org/apache/spark/status/api/v1 | Create_ds/spark/streaming/src/main/java/org/apache/spark/status/api/v1/streaming/BatchStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1.streaming;
import org.apache.spark.util.EnumUtil;
public enum BatchStatus {
COMPLETED,
QUEUED,
PROCESSING;
public static BatchStatus fromString(String str) {
return EnumUtil.parseIgnoreCase(BatchStatus.class, str);
}
}
| 9,773 |
0 | Create_ds/spark/streaming/src/main/java/org/apache/spark | Create_ds/spark/streaming/src/main/java/org/apache/spark/streaming/StreamingContextState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming;
import org.apache.spark.annotation.DeveloperApi;
/**
* :: DeveloperApi ::
*
* Represents the state of a StreamingContext.
*/
@DeveloperApi
public enum StreamingContextState {
/**
* The context has been created, but not been started yet.
* Input DStreams, transformations and output operations can be created on the context.
*/
INITIALIZED,
/**
* The context has been started, and been not stopped.
* Input DStreams, transformations and output operations cannot be created on the context.
*/
ACTIVE,
/**
* The context has been stopped and cannot be used any more.
*/
STOPPED
}
| 9,774 |
0 | Create_ds/spark/streaming/src/main/java/org/apache/spark/streaming | Create_ds/spark/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.util;
import java.nio.ByteBuffer;
import java.util.Iterator;
/**
* :: DeveloperApi ::
*
* This abstract class represents a write ahead log (aka journal) that is used by Spark Streaming
* to save the received data (by receivers) and associated metadata to a reliable storage, so that
* they can be recovered after driver failures. See the Spark documentation for more information
* on how to plug in your own custom implementation of a write ahead log.
*/
@org.apache.spark.annotation.DeveloperApi
public abstract class WriteAheadLog {
/**
* Write the record to the log and return a record handle, which contains all the information
* necessary to read back the written record. The time is used to the index the record,
* such that it can be cleaned later. Note that implementations of this abstract class must
* ensure that the written data is durable and readable (using the record handle) by the
* time this function returns.
*/
public abstract WriteAheadLogRecordHandle write(ByteBuffer record, long time);
/**
* Read a written record based on the given record handle.
*/
public abstract ByteBuffer read(WriteAheadLogRecordHandle handle);
/**
* Read and return an iterator of all the records that have been written but not yet cleaned up.
*/
public abstract Iterator<ByteBuffer> readAll();
/**
* Clean all the records that are older than the threshold time. It can wait for
* the completion of the deletion.
*/
public abstract void clean(long threshTime, boolean waitForCompletion);
/**
* Close this log and release any resources. It must be idempotent.
*/
public abstract void close();
}
| 9,775 |
0 | Create_ds/spark/streaming/src/main/java/org/apache/spark/streaming | Create_ds/spark/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.util;
/**
* :: DeveloperApi ::
*
* This abstract class represents a handle that refers to a record written in a
* {@link org.apache.spark.streaming.util.WriteAheadLog WriteAheadLog}.
* It must contain all the information necessary for the record to be read and returned by
* an implementation of the WriteAheadLog class.
*
* @see org.apache.spark.streaming.util.WriteAheadLog
*/
@org.apache.spark.annotation.DeveloperApi
public abstract class WriteAheadLogRecordHandle implements java.io.Serializable {
}
| 9,776 |
0 | Create_ds/spark/streaming/src/main/scala/org/apache/spark/streaming | Create_ds/spark/streaming/src/main/scala/org/apache/spark/streaming/dstream/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Various implementations of DStreams.
*/
package org.apache.spark.streaming.dstream;
| 9,777 |
0 | Create_ds/spark/streaming/src/main/scala/org/apache/spark/streaming/api | Create_ds/spark/streaming/src/main/scala/org/apache/spark/streaming/api/java/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Java APIs for spark streaming.
*/
package org.apache.spark.streaming.api.java;
| 9,778 |
0 | Create_ds/spark/common/network-yarn/src/main/java/org/apache/spark/network | Create_ds/spark/common/network-yarn/src/main/java/org/apache/spark/network/yarn/YarnShuffleService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.yarn;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.api.*;
import org.apache.spark.network.util.LevelDBProvider;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.crypto.AuthServerBootstrap;
import org.apache.spark.network.sasl.ShuffleSecretManager;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.shuffle.ExternalShuffleBlockHandler;
import org.apache.spark.network.util.TransportConf;
import org.apache.spark.network.yarn.util.HadoopConfigProvider;
/**
* An external shuffle service used by Spark on Yarn.
*
* This is intended to be a long-running auxiliary service that runs in the NodeManager process.
* A Spark application may connect to this service by setting `spark.shuffle.service.enabled`.
* The application also automatically derives the service port through `spark.shuffle.service.port`
* specified in the Yarn configuration. This is so that both the clients and the server agree on
* the same port to communicate on.
*
* The service also optionally supports authentication. This ensures that executors from one
* application cannot read the shuffle files written by those from another. This feature can be
* enabled by setting `spark.authenticate` in the Yarn configuration before starting the NM.
* Note that the Spark application must also set `spark.authenticate` manually and, unlike in
* the case of the service port, will not inherit this setting from the Yarn configuration. This
* is because an application running on the same Yarn cluster may choose to not use the external
* shuffle service, in which case its setting of `spark.authenticate` should be independent of
* the service's.
*/
public class YarnShuffleService extends AuxiliaryService {
private static final Logger logger = LoggerFactory.getLogger(YarnShuffleService.class);
// Port on which the shuffle server listens for fetch requests
private static final String SPARK_SHUFFLE_SERVICE_PORT_KEY = "spark.shuffle.service.port";
private static final int DEFAULT_SPARK_SHUFFLE_SERVICE_PORT = 7337;
// Whether the shuffle server should authenticate fetch requests
private static final String SPARK_AUTHENTICATE_KEY = "spark.authenticate";
private static final boolean DEFAULT_SPARK_AUTHENTICATE = false;
private static final String RECOVERY_FILE_NAME = "registeredExecutors.ldb";
private static final String SECRETS_RECOVERY_FILE_NAME = "sparkShuffleRecovery.ldb";
// Whether failure during service initialization should stop the NM.
@VisibleForTesting
static final String STOP_ON_FAILURE_KEY = "spark.yarn.shuffle.stopOnFailure";
private static final boolean DEFAULT_STOP_ON_FAILURE = false;
// just for testing when you want to find an open port
@VisibleForTesting
static int boundPort = -1;
private static final ObjectMapper mapper = new ObjectMapper();
private static final String APP_CREDS_KEY_PREFIX = "AppCreds";
private static final LevelDBProvider.StoreVersion CURRENT_VERSION = new LevelDBProvider
.StoreVersion(1, 0);
// just for integration tests that want to look at this file -- in general not sensible as
// a static
@VisibleForTesting
static YarnShuffleService instance;
// An entity that manages the shuffle secret per application
// This is used only if authentication is enabled
@VisibleForTesting
ShuffleSecretManager secretManager;
// The actual server that serves shuffle files
private TransportServer shuffleServer = null;
private Configuration _conf = null;
// The recovery path used to shuffle service recovery
@VisibleForTesting
Path _recoveryPath = null;
// Handles registering executors and opening shuffle blocks
@VisibleForTesting
ExternalShuffleBlockHandler blockHandler;
// Where to store & reload executor info for recovering state after an NM restart
@VisibleForTesting
File registeredExecutorFile;
// Where to store & reload application secrets for recovering state after an NM restart
@VisibleForTesting
File secretsFile;
private DB db;
public YarnShuffleService() {
super("spark_shuffle");
logger.info("Initializing YARN shuffle service for Spark");
instance = this;
}
/**
* Return whether authentication is enabled as specified by the configuration.
* If so, fetch requests will fail unless the appropriate authentication secret
* for the application is provided.
*/
private boolean isAuthenticationEnabled() {
return secretManager != null;
}
/**
* Start the shuffle server with the given configuration.
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
_conf = conf;
boolean stopOnFailure = conf.getBoolean(STOP_ON_FAILURE_KEY, DEFAULT_STOP_ON_FAILURE);
try {
// In case this NM was killed while there were running spark applications, we need to restore
// lost state for the existing executors. We look for an existing file in the NM's local dirs.
// If we don't find one, then we choose a file to use to save the state next time. Even if
// an application was stopped while the NM was down, we expect yarn to call stopApplication()
// when it comes back
if (_recoveryPath != null) {
registeredExecutorFile = initRecoveryDb(RECOVERY_FILE_NAME);
}
TransportConf transportConf = new TransportConf("shuffle", new HadoopConfigProvider(conf));
blockHandler = new ExternalShuffleBlockHandler(transportConf, registeredExecutorFile);
// If authentication is enabled, set up the shuffle server to use a
// special RPC handler that filters out unauthenticated fetch requests
List<TransportServerBootstrap> bootstraps = Lists.newArrayList();
boolean authEnabled = conf.getBoolean(SPARK_AUTHENTICATE_KEY, DEFAULT_SPARK_AUTHENTICATE);
if (authEnabled) {
secretManager = new ShuffleSecretManager();
if (_recoveryPath != null) {
loadSecretsFromDb();
}
bootstraps.add(new AuthServerBootstrap(transportConf, secretManager));
}
int port = conf.getInt(
SPARK_SHUFFLE_SERVICE_PORT_KEY, DEFAULT_SPARK_SHUFFLE_SERVICE_PORT);
TransportContext transportContext = new TransportContext(transportConf, blockHandler);
shuffleServer = transportContext.createServer(port, bootstraps);
// the port should normally be fixed, but for tests its useful to find an open port
port = shuffleServer.getPort();
boundPort = port;
String authEnabledString = authEnabled ? "enabled" : "not enabled";
logger.info("Started YARN shuffle service for Spark on port {}. " +
"Authentication is {}. Registered executor file is {}", port, authEnabledString,
registeredExecutorFile);
} catch (Exception e) {
if (stopOnFailure) {
throw e;
} else {
noteFailure(e);
}
}
}
private void loadSecretsFromDb() throws IOException {
secretsFile = initRecoveryDb(SECRETS_RECOVERY_FILE_NAME);
// Make sure this is protected in case its not in the NM recovery dir
FileSystem fs = FileSystem.getLocal(_conf);
fs.mkdirs(new Path(secretsFile.getPath()), new FsPermission((short) 0700));
db = LevelDBProvider.initLevelDB(secretsFile, CURRENT_VERSION, mapper);
logger.info("Recovery location is: " + secretsFile.getPath());
if (db != null) {
logger.info("Going to reload spark shuffle data");
DBIterator itr = db.iterator();
itr.seek(APP_CREDS_KEY_PREFIX.getBytes(StandardCharsets.UTF_8));
while (itr.hasNext()) {
Map.Entry<byte[], byte[]> e = itr.next();
String key = new String(e.getKey(), StandardCharsets.UTF_8);
if (!key.startsWith(APP_CREDS_KEY_PREFIX)) {
break;
}
String id = parseDbAppKey(key);
ByteBuffer secret = mapper.readValue(e.getValue(), ByteBuffer.class);
logger.info("Reloading tokens for app: " + id);
secretManager.registerApp(id, secret);
}
}
}
private static String parseDbAppKey(String s) throws IOException {
if (!s.startsWith(APP_CREDS_KEY_PREFIX)) {
throw new IllegalArgumentException("expected a string starting with " + APP_CREDS_KEY_PREFIX);
}
String json = s.substring(APP_CREDS_KEY_PREFIX.length() + 1);
AppId parsed = mapper.readValue(json, AppId.class);
return parsed.appId;
}
private static byte[] dbAppKey(AppId appExecId) throws IOException {
// we stick a common prefix on all the keys so we can find them in the DB
String appExecJson = mapper.writeValueAsString(appExecId);
String key = (APP_CREDS_KEY_PREFIX + ";" + appExecJson);
return key.getBytes(StandardCharsets.UTF_8);
}
@Override
public void initializeApplication(ApplicationInitializationContext context) {
String appId = context.getApplicationId().toString();
try {
ByteBuffer shuffleSecret = context.getApplicationDataForService();
if (isAuthenticationEnabled()) {
AppId fullId = new AppId(appId);
if (db != null) {
byte[] key = dbAppKey(fullId);
byte[] value = mapper.writeValueAsString(shuffleSecret).getBytes(StandardCharsets.UTF_8);
db.put(key, value);
}
secretManager.registerApp(appId, shuffleSecret);
}
} catch (Exception e) {
logger.error("Exception when initializing application {}", appId, e);
}
}
@Override
public void stopApplication(ApplicationTerminationContext context) {
String appId = context.getApplicationId().toString();
try {
if (isAuthenticationEnabled()) {
AppId fullId = new AppId(appId);
if (db != null) {
try {
db.delete(dbAppKey(fullId));
} catch (IOException e) {
logger.error("Error deleting {} from executor state db", appId, e);
}
}
secretManager.unregisterApp(appId);
}
blockHandler.applicationRemoved(appId, false /* clean up local dirs */);
} catch (Exception e) {
logger.error("Exception when stopping application {}", appId, e);
}
}
@Override
public void initializeContainer(ContainerInitializationContext context) {
ContainerId containerId = context.getContainerId();
logger.info("Initializing container {}", containerId);
}
@Override
public void stopContainer(ContainerTerminationContext context) {
ContainerId containerId = context.getContainerId();
logger.info("Stopping container {}", containerId);
}
/**
* Close the shuffle server to clean up any associated state.
*/
@Override
protected void serviceStop() {
try {
if (shuffleServer != null) {
shuffleServer.close();
}
if (blockHandler != null) {
blockHandler.close();
}
if (db != null) {
db.close();
}
} catch (Exception e) {
logger.error("Exception when stopping service", e);
}
}
// Not currently used
@Override
public ByteBuffer getMetaData() {
return ByteBuffer.allocate(0);
}
/**
* Set the recovery path for shuffle service recovery when NM is restarted. This will be call
* by NM if NM recovery is enabled.
*/
@Override
public void setRecoveryPath(Path recoveryPath) {
_recoveryPath = recoveryPath;
}
/**
* Get the path specific to this auxiliary service to use for recovery.
*/
protected Path getRecoveryPath(String fileName) {
return _recoveryPath;
}
/**
* Figure out the recovery path and handle moving the DB if YARN NM recovery gets enabled
* and DB exists in the local dir of NM by old version of shuffle service.
*/
protected File initRecoveryDb(String dbName) {
Preconditions.checkNotNull(_recoveryPath,
"recovery path should not be null if NM recovery is enabled");
File recoveryFile = new File(_recoveryPath.toUri().getPath(), dbName);
if (recoveryFile.exists()) {
return recoveryFile;
}
// db doesn't exist in recovery path go check local dirs for it
String[] localDirs = _conf.getTrimmedStrings("yarn.nodemanager.local-dirs");
for (String dir : localDirs) {
File f = new File(new Path(dir).toUri().getPath(), dbName);
if (f.exists()) {
// If the recovery path is set then either NM recovery is enabled or another recovery
// DB has been initialized. If NM recovery is enabled and had set the recovery path
// make sure to move all DBs to the recovery path from the old NM local dirs.
// If another DB was initialized first just make sure all the DBs are in the same
// location.
Path newLoc = new Path(_recoveryPath, dbName);
Path copyFrom = new Path(f.toURI());
if (!newLoc.equals(copyFrom)) {
logger.info("Moving " + copyFrom + " to: " + newLoc);
try {
// The move here needs to handle moving non-empty directories across NFS mounts
FileSystem fs = FileSystem.getLocal(_conf);
fs.rename(copyFrom, newLoc);
} catch (Exception e) {
// Fail to move recovery file to new path, just continue on with new DB location
logger.error("Failed to move recovery file {} to the path {}",
dbName, _recoveryPath.toString(), e);
}
}
return new File(newLoc.toUri().getPath());
}
}
return new File(_recoveryPath.toUri().getPath(), dbName);
}
/**
* Simply encodes an application ID.
*/
public static class AppId {
public final String appId;
@JsonCreator
public AppId(@JsonProperty("appId") String appId) {
this.appId = appId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AppId appExecId = (AppId) o;
return Objects.equal(appId, appExecId.appId);
}
@Override
public int hashCode() {
return Objects.hashCode(appId);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("appId", appId)
.toString();
}
}
}
| 9,779 |
0 | Create_ds/spark/common/network-yarn/src/main/java/org/apache/spark/network/yarn | Create_ds/spark/common/network-yarn/src/main/java/org/apache/spark/network/yarn/util/HadoopConfigProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.yarn.util;
import java.util.Map;
import java.util.NoSuchElementException;
import org.apache.hadoop.conf.Configuration;
import org.apache.spark.network.util.ConfigProvider;
/** Use the Hadoop configuration to obtain config values. */
public class HadoopConfigProvider extends ConfigProvider {
private final Configuration conf;
public HadoopConfigProvider(Configuration conf) {
this.conf = conf;
}
@Override
public String get(String name) {
String value = conf.get(name);
if (value == null) {
throw new NoSuchElementException(name);
}
return value;
}
@Override
public String get(String name, String defaultValue) {
String value = conf.get(name);
return value == null ? defaultValue : value;
}
@Override
public Iterable<Map.Entry<String, String>> getAll() {
return conf;
}
}
| 9,780 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/TestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.net.InetAddress;
public class TestUtils {
public static String getLocalHost() {
try {
return InetAddress.getLocalHost().getHostAddress();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 9,781 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/TestManagedBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import com.google.common.base.Preconditions;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/**
* A ManagedBuffer implementation that contains 0, 1, 2, 3, ..., (len-1).
*
* Used for testing.
*/
public class TestManagedBuffer extends ManagedBuffer {
private final int len;
private NettyManagedBuffer underlying;
public TestManagedBuffer(int len) {
Preconditions.checkArgument(len <= Byte.MAX_VALUE);
this.len = len;
byte[] byteArray = new byte[len];
for (int i = 0; i < len; i ++) {
byteArray[i] = (byte) i;
}
this.underlying = new NettyManagedBuffer(Unpooled.wrappedBuffer(byteArray));
}
@Override
public long size() {
return underlying.size();
}
@Override
public ByteBuffer nioByteBuffer() throws IOException {
return underlying.nioByteBuffer();
}
@Override
public InputStream createInputStream() throws IOException {
return underlying.createInputStream();
}
@Override
public ManagedBuffer retain() {
underlying.retain();
return this;
}
@Override
public ManagedBuffer release() {
underlying.release();
return this;
}
@Override
public Object convertToNetty() throws IOException {
return underlying.convertToNetty();
}
@Override
public int hashCode() {
return underlying.hashCode();
}
@Override
public boolean equals(Object other) {
if (other instanceof ManagedBuffer) {
try {
ByteBuffer nioBuf = ((ManagedBuffer) other).nioByteBuffer();
if (nioBuf.remaining() != len) {
return false;
} else {
for (int i = 0; i < len; i ++) {
if (nioBuf.get() != i) {
return false;
}
}
return true;
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return false;
}
}
| 9,782 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.io.File;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Sets;
import com.google.common.io.Closeables;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.spark.network.buffer.FileSegmentManagedBuffer;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.client.ChunkReceivedCallback;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientFactory;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class ChunkFetchIntegrationSuite {
static final long STREAM_ID = 1;
static final int BUFFER_CHUNK_INDEX = 0;
static final int FILE_CHUNK_INDEX = 1;
static TransportServer server;
static TransportClientFactory clientFactory;
static StreamManager streamManager;
static File testFile;
static ManagedBuffer bufferChunk;
static ManagedBuffer fileChunk;
@BeforeClass
public static void setUp() throws Exception {
int bufSize = 100000;
final ByteBuffer buf = ByteBuffer.allocate(bufSize);
for (int i = 0; i < bufSize; i ++) {
buf.put((byte) i);
}
buf.flip();
bufferChunk = new NioManagedBuffer(buf);
testFile = File.createTempFile("shuffle-test-file", "txt");
testFile.deleteOnExit();
RandomAccessFile fp = new RandomAccessFile(testFile, "rw");
boolean shouldSuppressIOException = true;
try {
byte[] fileContent = new byte[1024];
new Random().nextBytes(fileContent);
fp.write(fileContent);
shouldSuppressIOException = false;
} finally {
Closeables.close(fp, shouldSuppressIOException);
}
final TransportConf conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
fileChunk = new FileSegmentManagedBuffer(conf, testFile, 10, testFile.length() - 25);
streamManager = new StreamManager() {
@Override
public ManagedBuffer getChunk(long streamId, int chunkIndex) {
assertEquals(STREAM_ID, streamId);
if (chunkIndex == BUFFER_CHUNK_INDEX) {
return new NioManagedBuffer(buf);
} else if (chunkIndex == FILE_CHUNK_INDEX) {
return new FileSegmentManagedBuffer(conf, testFile, 10, testFile.length() - 25);
} else {
throw new IllegalArgumentException("Invalid chunk index: " + chunkIndex);
}
}
};
RpcHandler handler = new RpcHandler() {
@Override
public void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
throw new UnsupportedOperationException();
}
@Override
public StreamManager getStreamManager() {
return streamManager;
}
};
TransportContext context = new TransportContext(conf, handler);
server = context.createServer();
clientFactory = context.createClientFactory();
}
@AfterClass
public static void tearDown() {
bufferChunk.release();
server.close();
clientFactory.close();
testFile.delete();
}
static class FetchResult {
public Set<Integer> successChunks;
public Set<Integer> failedChunks;
public List<ManagedBuffer> buffers;
public void releaseBuffers() {
for (ManagedBuffer buffer : buffers) {
buffer.release();
}
}
}
private FetchResult fetchChunks(List<Integer> chunkIndices) throws Exception {
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
final Semaphore sem = new Semaphore(0);
final FetchResult res = new FetchResult();
res.successChunks = Collections.synchronizedSet(new HashSet<Integer>());
res.failedChunks = Collections.synchronizedSet(new HashSet<Integer>());
res.buffers = Collections.synchronizedList(new LinkedList<ManagedBuffer>());
ChunkReceivedCallback callback = new ChunkReceivedCallback() {
@Override
public void onSuccess(int chunkIndex, ManagedBuffer buffer) {
buffer.retain();
res.successChunks.add(chunkIndex);
res.buffers.add(buffer);
sem.release();
}
@Override
public void onFailure(int chunkIndex, Throwable e) {
res.failedChunks.add(chunkIndex);
sem.release();
}
};
for (int chunkIndex : chunkIndices) {
client.fetchChunk(STREAM_ID, chunkIndex, callback);
}
if (!sem.tryAcquire(chunkIndices.size(), 5, TimeUnit.SECONDS)) {
fail("Timeout getting response from the server");
}
client.close();
return res;
}
@Test
public void fetchBufferChunk() throws Exception {
FetchResult res = fetchChunks(Arrays.asList(BUFFER_CHUNK_INDEX));
assertEquals(Sets.newHashSet(BUFFER_CHUNK_INDEX), res.successChunks);
assertTrue(res.failedChunks.isEmpty());
assertBufferListsEqual(Arrays.asList(bufferChunk), res.buffers);
res.releaseBuffers();
}
@Test
public void fetchFileChunk() throws Exception {
FetchResult res = fetchChunks(Arrays.asList(FILE_CHUNK_INDEX));
assertEquals(Sets.newHashSet(FILE_CHUNK_INDEX), res.successChunks);
assertTrue(res.failedChunks.isEmpty());
assertBufferListsEqual(Arrays.asList(fileChunk), res.buffers);
res.releaseBuffers();
}
@Test
public void fetchNonExistentChunk() throws Exception {
FetchResult res = fetchChunks(Arrays.asList(12345));
assertTrue(res.successChunks.isEmpty());
assertEquals(Sets.newHashSet(12345), res.failedChunks);
assertTrue(res.buffers.isEmpty());
}
@Test
public void fetchBothChunks() throws Exception {
FetchResult res = fetchChunks(Arrays.asList(BUFFER_CHUNK_INDEX, FILE_CHUNK_INDEX));
assertEquals(Sets.newHashSet(BUFFER_CHUNK_INDEX, FILE_CHUNK_INDEX), res.successChunks);
assertTrue(res.failedChunks.isEmpty());
assertBufferListsEqual(Arrays.asList(bufferChunk, fileChunk), res.buffers);
res.releaseBuffers();
}
@Test
public void fetchChunkAndNonExistent() throws Exception {
FetchResult res = fetchChunks(Arrays.asList(BUFFER_CHUNK_INDEX, 12345));
assertEquals(Sets.newHashSet(BUFFER_CHUNK_INDEX), res.successChunks);
assertEquals(Sets.newHashSet(12345), res.failedChunks);
assertBufferListsEqual(Arrays.asList(bufferChunk), res.buffers);
res.releaseBuffers();
}
private static void assertBufferListsEqual(List<ManagedBuffer> list0, List<ManagedBuffer> list1)
throws Exception {
assertEquals(list0.size(), list1.size());
for (int i = 0; i < list0.size(); i ++) {
assertBuffersEqual(list0.get(i), list1.get(i));
}
}
private static void assertBuffersEqual(ManagedBuffer buffer0, ManagedBuffer buffer1)
throws Exception {
ByteBuffer nio0 = buffer0.nioByteBuffer();
ByteBuffer nio1 = buffer1.nioByteBuffer();
int len = nio0.remaining();
assertEquals(nio0.remaining(), nio1.remaining());
for (int i = 0; i < len; i ++) {
assertEquals(nio0.get(), nio1.get());
}
}
}
| 9,783 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.client.*;
import org.apache.spark.network.server.*;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class RpcIntegrationSuite {
static TransportConf conf;
static TransportServer server;
static TransportClientFactory clientFactory;
static RpcHandler rpcHandler;
static List<String> oneWayMsgs;
static StreamTestHelper testData;
static ConcurrentHashMap<String, VerifyingStreamCallback> streamCallbacks =
new ConcurrentHashMap<>();
@BeforeClass
public static void setUp() throws Exception {
conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
testData = new StreamTestHelper();
rpcHandler = new RpcHandler() {
@Override
public void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
String msg = JavaUtils.bytesToString(message);
String[] parts = msg.split("/");
if (parts[0].equals("hello")) {
callback.onSuccess(JavaUtils.stringToBytes("Hello, " + parts[1] + "!"));
} else if (parts[0].equals("return error")) {
callback.onFailure(new RuntimeException("Returned: " + parts[1]));
} else if (parts[0].equals("throw error")) {
throw new RuntimeException("Thrown: " + parts[1]);
}
}
@Override
public StreamCallbackWithID receiveStream(
TransportClient client,
ByteBuffer messageHeader,
RpcResponseCallback callback) {
return receiveStreamHelper(JavaUtils.bytesToString(messageHeader));
}
@Override
public void receive(TransportClient client, ByteBuffer message) {
oneWayMsgs.add(JavaUtils.bytesToString(message));
}
@Override
public StreamManager getStreamManager() { return new OneForOneStreamManager(); }
};
TransportContext context = new TransportContext(conf, rpcHandler);
server = context.createServer();
clientFactory = context.createClientFactory();
oneWayMsgs = new ArrayList<>();
}
private static StreamCallbackWithID receiveStreamHelper(String msg) {
try {
if (msg.startsWith("fail/")) {
String[] parts = msg.split("/");
switch (parts[1]) {
case "exception-ondata":
return new StreamCallbackWithID() {
@Override
public void onData(String streamId, ByteBuffer buf) throws IOException {
throw new IOException("failed to read stream data!");
}
@Override
public void onComplete(String streamId) throws IOException {
}
@Override
public void onFailure(String streamId, Throwable cause) throws IOException {
}
@Override
public String getID() {
return msg;
}
};
case "exception-oncomplete":
return new StreamCallbackWithID() {
@Override
public void onData(String streamId, ByteBuffer buf) throws IOException {
}
@Override
public void onComplete(String streamId) throws IOException {
throw new IOException("exception in onComplete");
}
@Override
public void onFailure(String streamId, Throwable cause) throws IOException {
}
@Override
public String getID() {
return msg;
}
};
case "null":
return null;
default:
throw new IllegalArgumentException("unexpected msg: " + msg);
}
} else {
VerifyingStreamCallback streamCallback = new VerifyingStreamCallback(msg);
streamCallbacks.put(msg, streamCallback);
return streamCallback;
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@AfterClass
public static void tearDown() {
server.close();
clientFactory.close();
testData.cleanup();
}
static class RpcResult {
public Set<String> successMessages;
public Set<String> errorMessages;
}
private RpcResult sendRPC(String ... commands) throws Exception {
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
final Semaphore sem = new Semaphore(0);
final RpcResult res = new RpcResult();
res.successMessages = Collections.synchronizedSet(new HashSet<String>());
res.errorMessages = Collections.synchronizedSet(new HashSet<String>());
RpcResponseCallback callback = new RpcResponseCallback() {
@Override
public void onSuccess(ByteBuffer message) {
String response = JavaUtils.bytesToString(message);
res.successMessages.add(response);
sem.release();
}
@Override
public void onFailure(Throwable e) {
res.errorMessages.add(e.getMessage());
sem.release();
}
};
for (String command : commands) {
client.sendRpc(JavaUtils.stringToBytes(command), callback);
}
if (!sem.tryAcquire(commands.length, 5, TimeUnit.SECONDS)) {
fail("Timeout getting response from the server");
}
client.close();
return res;
}
private RpcResult sendRpcWithStream(String... streams) throws Exception {
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
final Semaphore sem = new Semaphore(0);
RpcResult res = new RpcResult();
res.successMessages = Collections.synchronizedSet(new HashSet<String>());
res.errorMessages = Collections.synchronizedSet(new HashSet<String>());
for (String stream : streams) {
int idx = stream.lastIndexOf('/');
ManagedBuffer meta = new NioManagedBuffer(JavaUtils.stringToBytes(stream));
String streamName = (idx == -1) ? stream : stream.substring(idx + 1);
ManagedBuffer data = testData.openStream(conf, streamName);
client.uploadStream(meta, data, new RpcStreamCallback(stream, res, sem));
}
if (!sem.tryAcquire(streams.length, 5, TimeUnit.SECONDS)) {
fail("Timeout getting response from the server");
}
streamCallbacks.values().forEach(streamCallback -> {
try {
streamCallback.verify();
} catch (IOException e) {
throw new RuntimeException(e);
}
});
client.close();
return res;
}
private static class RpcStreamCallback implements RpcResponseCallback {
final String streamId;
final RpcResult res;
final Semaphore sem;
RpcStreamCallback(String streamId, RpcResult res, Semaphore sem) {
this.streamId = streamId;
this.res = res;
this.sem = sem;
}
@Override
public void onSuccess(ByteBuffer message) {
res.successMessages.add(streamId);
sem.release();
}
@Override
public void onFailure(Throwable e) {
res.errorMessages.add(e.getMessage());
sem.release();
}
}
@Test
public void singleRPC() throws Exception {
RpcResult res = sendRPC("hello/Aaron");
assertEquals(res.successMessages, Sets.newHashSet("Hello, Aaron!"));
assertTrue(res.errorMessages.isEmpty());
}
@Test
public void doubleRPC() throws Exception {
RpcResult res = sendRPC("hello/Aaron", "hello/Reynold");
assertEquals(res.successMessages, Sets.newHashSet("Hello, Aaron!", "Hello, Reynold!"));
assertTrue(res.errorMessages.isEmpty());
}
@Test
public void returnErrorRPC() throws Exception {
RpcResult res = sendRPC("return error/OK");
assertTrue(res.successMessages.isEmpty());
assertErrorsContain(res.errorMessages, Sets.newHashSet("Returned: OK"));
}
@Test
public void throwErrorRPC() throws Exception {
RpcResult res = sendRPC("throw error/uh-oh");
assertTrue(res.successMessages.isEmpty());
assertErrorsContain(res.errorMessages, Sets.newHashSet("Thrown: uh-oh"));
}
@Test
public void doubleTrouble() throws Exception {
RpcResult res = sendRPC("return error/OK", "throw error/uh-oh");
assertTrue(res.successMessages.isEmpty());
assertErrorsContain(res.errorMessages, Sets.newHashSet("Returned: OK", "Thrown: uh-oh"));
}
@Test
public void sendSuccessAndFailure() throws Exception {
RpcResult res = sendRPC("hello/Bob", "throw error/the", "hello/Builder", "return error/!");
assertEquals(res.successMessages, Sets.newHashSet("Hello, Bob!", "Hello, Builder!"));
assertErrorsContain(res.errorMessages, Sets.newHashSet("Thrown: the", "Returned: !"));
}
@Test
public void sendOneWayMessage() throws Exception {
final String message = "no reply";
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
try {
client.send(JavaUtils.stringToBytes(message));
assertEquals(0, client.getHandler().numOutstandingRequests());
// Make sure the message arrives.
long deadline = System.nanoTime() + TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS);
while (System.nanoTime() < deadline && oneWayMsgs.size() == 0) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals(1, oneWayMsgs.size());
assertEquals(message, oneWayMsgs.get(0));
} finally {
client.close();
}
}
@Test
public void sendRpcWithStreamOneAtATime() throws Exception {
for (String stream : StreamTestHelper.STREAMS) {
RpcResult res = sendRpcWithStream(stream);
assertTrue("there were error messages!" + res.errorMessages, res.errorMessages.isEmpty());
assertEquals(Sets.newHashSet(stream), res.successMessages);
}
}
@Test
public void sendRpcWithStreamConcurrently() throws Exception {
String[] streams = new String[10];
for (int i = 0; i < 10; i++) {
streams[i] = StreamTestHelper.STREAMS[i % StreamTestHelper.STREAMS.length];
}
RpcResult res = sendRpcWithStream(streams);
assertEquals(Sets.newHashSet(StreamTestHelper.STREAMS), res.successMessages);
assertTrue(res.errorMessages.isEmpty());
}
@Test
public void sendRpcWithStreamFailures() throws Exception {
// when there is a failure reading stream data, we don't try to keep the channel usable,
// just send back a decent error msg.
RpcResult exceptionInCallbackResult =
sendRpcWithStream("fail/exception-ondata/smallBuffer", "smallBuffer");
assertErrorAndClosed(exceptionInCallbackResult, "Destination failed while reading stream");
RpcResult nullStreamHandler =
sendRpcWithStream("fail/null/smallBuffer", "smallBuffer");
assertErrorAndClosed(exceptionInCallbackResult, "Destination failed while reading stream");
// OTOH, if there is a failure during onComplete, the channel should still be fine
RpcResult exceptionInOnComplete =
sendRpcWithStream("fail/exception-oncomplete/smallBuffer", "smallBuffer");
assertErrorsContain(exceptionInOnComplete.errorMessages,
Sets.newHashSet("Failure post-processing"));
assertEquals(Sets.newHashSet("smallBuffer"), exceptionInOnComplete.successMessages);
}
private void assertErrorsContain(Set<String> errors, Set<String> contains) {
assertEquals("Expected " + contains.size() + " errors, got " + errors.size() + "errors: " +
errors, contains.size(), errors.size());
Pair<Set<String>, Set<String>> r = checkErrorsContain(errors, contains);
assertTrue("Could not find error containing " + r.getRight() + "; errors: " + errors,
r.getRight().isEmpty());
assertTrue(r.getLeft().isEmpty());
}
private void assertErrorAndClosed(RpcResult result, String expectedError) {
assertTrue("unexpected success: " + result.successMessages, result.successMessages.isEmpty());
Set<String> errors = result.errorMessages;
assertEquals("Expected 2 errors, got " + errors.size() + "errors: " +
errors, 2, errors.size());
// We expect 1 additional error due to closed connection and here are possible keywords in the
// error message.
Set<String> possibleClosedErrors = Sets.newHashSet(
"closed",
"Connection reset",
"java.nio.channels.ClosedChannelException",
"java.io.IOException: Broken pipe"
);
Set<String> containsAndClosed = Sets.newHashSet(expectedError);
containsAndClosed.addAll(possibleClosedErrors);
Pair<Set<String>, Set<String>> r = checkErrorsContain(errors, containsAndClosed);
assertTrue("Got a non-empty set " + r.getLeft(), r.getLeft().isEmpty());
Set<String> errorsNotFound = r.getRight();
assertEquals(
"The size of " + errorsNotFound + " was not " + (possibleClosedErrors.size() - 1),
possibleClosedErrors.size() - 1,
errorsNotFound.size());
for (String err: errorsNotFound) {
assertTrue("Found a wrong error " + err, containsAndClosed.contains(err));
}
}
private Pair<Set<String>, Set<String>> checkErrorsContain(
Set<String> errors,
Set<String> contains) {
Set<String> remainingErrors = Sets.newHashSet(errors);
Set<String> notFound = Sets.newHashSet();
for (String contain : contains) {
Iterator<String> it = remainingErrors.iterator();
boolean foundMatch = false;
while (it.hasNext()) {
if (it.next().contains(contain)) {
it.remove();
foundMatch = true;
break;
}
}
if (!foundMatch) {
notFound.add(contain);
}
}
return new ImmutablePair<>(remainingErrors, notFound);
}
private static class VerifyingStreamCallback implements StreamCallbackWithID {
final String streamId;
final StreamSuite.TestCallback helper;
final OutputStream out;
final File outFile;
VerifyingStreamCallback(String streamId) throws IOException {
if (streamId.equals("file")) {
outFile = File.createTempFile("data", ".tmp", testData.tempDir);
out = new FileOutputStream(outFile);
} else {
out = new ByteArrayOutputStream();
outFile = null;
}
this.streamId = streamId;
helper = new StreamSuite.TestCallback(out);
}
void verify() throws IOException {
if (streamId.equals("file")) {
assertTrue("File stream did not match.", Files.equal(testData.testFile, outFile));
} else {
byte[] result = ((ByteArrayOutputStream)out).toByteArray();
ByteBuffer srcBuffer = testData.srcBuffer(streamId);
ByteBuffer base;
synchronized (srcBuffer) {
base = srcBuffer.duplicate();
}
byte[] expected = new byte[base.remaining()];
base.get(expected);
assertEquals(expected.length, result.length);
assertTrue("buffers don't match", Arrays.equals(expected, result));
}
}
@Override
public void onData(String streamId, ByteBuffer buf) throws IOException {
helper.onData(streamId, buf);
}
@Override
public void onComplete(String streamId) throws IOException {
helper.onComplete(streamId);
}
@Override
public void onFailure(String streamId, Throwable cause) throws IOException {
helper.onFailure(streamId, cause);
}
@Override
public String getID() {
return streamId;
}
}
}
| 9,784 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/RequestTimeoutIntegrationSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.client.ChunkReceivedCallback;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientFactory;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
import org.junit.*;
import static org.junit.Assert.*;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
/**
* Suite which ensures that requests that go without a response for the network timeout period are
* failed, and the connection closed.
*
* In this suite, we use 10 seconds as the connection timeout, with some slack given in the tests,
* to ensure stability in different test environments.
*/
public class RequestTimeoutIntegrationSuite {
private TransportServer server;
private TransportClientFactory clientFactory;
private StreamManager defaultManager;
private TransportConf conf;
// A large timeout that "shouldn't happen", for the sake of faulty tests not hanging forever.
private static final int FOREVER = 60 * 1000;
@Before
public void setUp() throws Exception {
Map<String, String> configMap = new HashMap<>();
configMap.put("spark.shuffle.io.connectionTimeout", "10s");
conf = new TransportConf("shuffle", new MapConfigProvider(configMap));
defaultManager = new StreamManager() {
@Override
public ManagedBuffer getChunk(long streamId, int chunkIndex) {
throw new UnsupportedOperationException();
}
};
}
@After
public void tearDown() {
if (server != null) {
server.close();
}
if (clientFactory != null) {
clientFactory.close();
}
}
// Basic suite: First request completes quickly, and second waits for longer than network timeout.
@Test
public void timeoutInactiveRequests() throws Exception {
final Semaphore semaphore = new Semaphore(1);
final int responseSize = 16;
RpcHandler handler = new RpcHandler() {
@Override
public void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
try {
semaphore.acquire();
callback.onSuccess(ByteBuffer.allocate(responseSize));
} catch (InterruptedException e) {
// do nothing
}
}
@Override
public StreamManager getStreamManager() {
return defaultManager;
}
};
TransportContext context = new TransportContext(conf, handler);
server = context.createServer();
clientFactory = context.createClientFactory();
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
// First completes quickly (semaphore starts at 1).
TestCallback callback0 = new TestCallback();
client.sendRpc(ByteBuffer.allocate(0), callback0);
callback0.latch.await();
assertEquals(responseSize, callback0.successLength);
// Second times out after 10 seconds, with slack. Must be IOException.
TestCallback callback1 = new TestCallback();
client.sendRpc(ByteBuffer.allocate(0), callback1);
callback1.latch.await(60, TimeUnit.SECONDS);
assertNotNull(callback1.failure);
assertTrue(callback1.failure instanceof IOException);
semaphore.release();
}
// A timeout will cause the connection to be closed, invalidating the current TransportClient.
// It should be the case that requesting a client from the factory produces a new, valid one.
@Test
public void timeoutCleanlyClosesClient() throws Exception {
final Semaphore semaphore = new Semaphore(0);
final int responseSize = 16;
RpcHandler handler = new RpcHandler() {
@Override
public void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
try {
semaphore.acquire();
callback.onSuccess(ByteBuffer.allocate(responseSize));
} catch (InterruptedException e) {
// do nothing
}
}
@Override
public StreamManager getStreamManager() {
return defaultManager;
}
};
TransportContext context = new TransportContext(conf, handler);
server = context.createServer();
clientFactory = context.createClientFactory();
// First request should eventually fail.
TransportClient client0 =
clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
TestCallback callback0 = new TestCallback();
client0.sendRpc(ByteBuffer.allocate(0), callback0);
callback0.latch.await();
assertTrue(callback0.failure instanceof IOException);
assertFalse(client0.isActive());
// Increment the semaphore and the second request should succeed quickly.
semaphore.release(2);
TransportClient client1 =
clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
TestCallback callback1 = new TestCallback();
client1.sendRpc(ByteBuffer.allocate(0), callback1);
callback1.latch.await();
assertEquals(responseSize, callback1.successLength);
assertNull(callback1.failure);
}
// The timeout is relative to the LAST request sent, which is kinda weird, but still.
// This test also makes sure the timeout works for Fetch requests as well as RPCs.
@Test
public void furtherRequestsDelay() throws Exception {
final byte[] response = new byte[16];
final StreamManager manager = new StreamManager() {
@Override
public ManagedBuffer getChunk(long streamId, int chunkIndex) {
Uninterruptibles.sleepUninterruptibly(FOREVER, TimeUnit.MILLISECONDS);
return new NioManagedBuffer(ByteBuffer.wrap(response));
}
};
RpcHandler handler = new RpcHandler() {
@Override
public void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
throw new UnsupportedOperationException();
}
@Override
public StreamManager getStreamManager() {
return manager;
}
};
TransportContext context = new TransportContext(conf, handler);
server = context.createServer();
clientFactory = context.createClientFactory();
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
// Send one request, which will eventually fail.
TestCallback callback0 = new TestCallback();
client.fetchChunk(0, 0, callback0);
Uninterruptibles.sleepUninterruptibly(1200, TimeUnit.MILLISECONDS);
// Send a second request before the first has failed.
TestCallback callback1 = new TestCallback();
client.fetchChunk(0, 1, callback1);
Uninterruptibles.sleepUninterruptibly(1200, TimeUnit.MILLISECONDS);
// not complete yet, but should complete soon
assertEquals(-1, callback0.successLength);
assertNull(callback0.failure);
callback0.latch.await(60, TimeUnit.SECONDS);
assertTrue(callback0.failure instanceof IOException);
// make sure callback1 is called.
callback1.latch.await(60, TimeUnit.SECONDS);
// failed at same time as previous
assertTrue(callback1.failure instanceof IOException);
}
/**
* Callback which sets 'success' or 'failure' on completion.
* Additionally notifies all waiters on this callback when invoked.
*/
static class TestCallback implements RpcResponseCallback, ChunkReceivedCallback {
int successLength = -1;
Throwable failure;
final CountDownLatch latch = new CountDownLatch(1);
@Override
public void onSuccess(ByteBuffer response) {
successLength = response.remaining();
latch.countDown();
}
@Override
public void onFailure(Throwable e) {
failure = e;
latch.countDown();
}
@Override
public void onSuccess(int chunkIndex, ManagedBuffer buffer) {
try {
successLength = buffer.nioByteBuffer().remaining();
} catch (IOException e) {
// weird
} finally {
latch.countDown();
}
}
@Override
public void onFailure(int chunkIndex, Throwable e) {
failure = e;
latch.countDown();
}
}
}
| 9,785 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/ProtocolSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.util.List;
import com.google.common.primitives.Ints;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.FileRegion;
import io.netty.channel.embedded.EmbeddedChannel;
import io.netty.handler.codec.MessageToMessageEncoder;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import org.apache.spark.network.protocol.ChunkFetchFailure;
import org.apache.spark.network.protocol.ChunkFetchRequest;
import org.apache.spark.network.protocol.ChunkFetchSuccess;
import org.apache.spark.network.protocol.Message;
import org.apache.spark.network.protocol.MessageDecoder;
import org.apache.spark.network.protocol.MessageEncoder;
import org.apache.spark.network.protocol.OneWayMessage;
import org.apache.spark.network.protocol.RpcFailure;
import org.apache.spark.network.protocol.RpcRequest;
import org.apache.spark.network.protocol.RpcResponse;
import org.apache.spark.network.protocol.StreamChunkId;
import org.apache.spark.network.protocol.StreamFailure;
import org.apache.spark.network.protocol.StreamRequest;
import org.apache.spark.network.protocol.StreamResponse;
import org.apache.spark.network.util.ByteArrayWritableChannel;
import org.apache.spark.network.util.NettyUtils;
public class ProtocolSuite {
private void testServerToClient(Message msg) {
EmbeddedChannel serverChannel = new EmbeddedChannel(new FileRegionEncoder(),
MessageEncoder.INSTANCE);
serverChannel.writeOutbound(msg);
EmbeddedChannel clientChannel = new EmbeddedChannel(
NettyUtils.createFrameDecoder(), MessageDecoder.INSTANCE);
while (!serverChannel.outboundMessages().isEmpty()) {
clientChannel.writeOneInbound(serverChannel.readOutbound());
}
assertEquals(1, clientChannel.inboundMessages().size());
assertEquals(msg, clientChannel.readInbound());
}
private void testClientToServer(Message msg) {
EmbeddedChannel clientChannel = new EmbeddedChannel(new FileRegionEncoder(),
MessageEncoder.INSTANCE);
clientChannel.writeOutbound(msg);
EmbeddedChannel serverChannel = new EmbeddedChannel(
NettyUtils.createFrameDecoder(), MessageDecoder.INSTANCE);
while (!clientChannel.outboundMessages().isEmpty()) {
serverChannel.writeOneInbound(clientChannel.readOutbound());
}
assertEquals(1, serverChannel.inboundMessages().size());
assertEquals(msg, serverChannel.readInbound());
}
@Test
public void requests() {
testClientToServer(new ChunkFetchRequest(new StreamChunkId(1, 2)));
testClientToServer(new RpcRequest(12345, new TestManagedBuffer(0)));
testClientToServer(new RpcRequest(12345, new TestManagedBuffer(10)));
testClientToServer(new StreamRequest("abcde"));
testClientToServer(new OneWayMessage(new TestManagedBuffer(10)));
}
@Test
public void responses() {
testServerToClient(new ChunkFetchSuccess(new StreamChunkId(1, 2), new TestManagedBuffer(10)));
testServerToClient(new ChunkFetchSuccess(new StreamChunkId(1, 2), new TestManagedBuffer(0)));
testServerToClient(new ChunkFetchFailure(new StreamChunkId(1, 2), "this is an error"));
testServerToClient(new ChunkFetchFailure(new StreamChunkId(1, 2), ""));
testServerToClient(new RpcResponse(12345, new TestManagedBuffer(0)));
testServerToClient(new RpcResponse(12345, new TestManagedBuffer(100)));
testServerToClient(new RpcFailure(0, "this is an error"));
testServerToClient(new RpcFailure(0, ""));
// Note: buffer size must be "0" since StreamResponse's buffer is written differently to the
// channel and cannot be tested like this.
testServerToClient(new StreamResponse("anId", 12345L, new TestManagedBuffer(0)));
testServerToClient(new StreamFailure("anId", "this is an error"));
}
/**
* Handler to transform a FileRegion into a byte buffer. EmbeddedChannel doesn't actually transfer
* bytes, but messages, so this is needed so that the frame decoder on the receiving side can
* understand what MessageWithHeader actually contains.
*/
private static class FileRegionEncoder extends MessageToMessageEncoder<FileRegion> {
@Override
public void encode(ChannelHandlerContext ctx, FileRegion in, List<Object> out)
throws Exception {
ByteArrayWritableChannel channel = new ByteArrayWritableChannel(Ints.checkedCast(in.count()));
while (in.transferred() < in.count()) {
in.transferTo(channel, in.transferred());
}
out.add(Unpooled.wrappedBuffer(channel.getData()));
}
}
}
| 9,786 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientFactory;
import org.apache.spark.network.server.NoOpRpcHandler;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.util.ConfigProvider;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.TransportConf;
public class TransportClientFactorySuite {
private TransportConf conf;
private TransportContext context;
private TransportServer server1;
private TransportServer server2;
@Before
public void setUp() {
conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
RpcHandler rpcHandler = new NoOpRpcHandler();
context = new TransportContext(conf, rpcHandler);
server1 = context.createServer();
server2 = context.createServer();
}
@After
public void tearDown() {
JavaUtils.closeQuietly(server1);
JavaUtils.closeQuietly(server2);
}
/**
* Request a bunch of clients to a single server to test
* we create up to maxConnections of clients.
*
* If concurrent is true, create multiple threads to create clients in parallel.
*/
private void testClientReuse(int maxConnections, boolean concurrent)
throws IOException, InterruptedException {
Map<String, String> configMap = new HashMap<>();
configMap.put("spark.shuffle.io.numConnectionsPerPeer", Integer.toString(maxConnections));
TransportConf conf = new TransportConf("shuffle", new MapConfigProvider(configMap));
RpcHandler rpcHandler = new NoOpRpcHandler();
TransportContext context = new TransportContext(conf, rpcHandler);
TransportClientFactory factory = context.createClientFactory();
Set<TransportClient> clients = Collections.synchronizedSet(
new HashSet<TransportClient>());
AtomicInteger failed = new AtomicInteger();
Thread[] attempts = new Thread[maxConnections * 10];
// Launch a bunch of threads to create new clients.
for (int i = 0; i < attempts.length; i++) {
attempts[i] = new Thread(() -> {
try {
TransportClient client =
factory.createClient(TestUtils.getLocalHost(), server1.getPort());
assertTrue(client.isActive());
clients.add(client);
} catch (IOException e) {
failed.incrementAndGet();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
if (concurrent) {
attempts[i].start();
} else {
attempts[i].run();
}
}
// Wait until all the threads complete.
for (Thread attempt : attempts) {
attempt.join();
}
Assert.assertEquals(0, failed.get());
Assert.assertTrue(clients.size() <= maxConnections);
for (TransportClient client : clients) {
client.close();
}
factory.close();
}
@Test
public void reuseClientsUpToConfigVariable() throws Exception {
testClientReuse(1, false);
testClientReuse(2, false);
testClientReuse(3, false);
testClientReuse(4, false);
}
@Test
public void reuseClientsUpToConfigVariableConcurrent() throws Exception {
testClientReuse(1, true);
testClientReuse(2, true);
testClientReuse(3, true);
testClientReuse(4, true);
}
@Test
public void returnDifferentClientsForDifferentServers() throws IOException, InterruptedException {
TransportClientFactory factory = context.createClientFactory();
TransportClient c1 = factory.createClient(TestUtils.getLocalHost(), server1.getPort());
TransportClient c2 = factory.createClient(TestUtils.getLocalHost(), server2.getPort());
assertTrue(c1.isActive());
assertTrue(c2.isActive());
assertNotSame(c1, c2);
factory.close();
}
@Test
public void neverReturnInactiveClients() throws IOException, InterruptedException {
TransportClientFactory factory = context.createClientFactory();
TransportClient c1 = factory.createClient(TestUtils.getLocalHost(), server1.getPort());
c1.close();
long start = System.currentTimeMillis();
while (c1.isActive() && (System.currentTimeMillis() - start) < 3000) {
Thread.sleep(10);
}
assertFalse(c1.isActive());
TransportClient c2 = factory.createClient(TestUtils.getLocalHost(), server1.getPort());
assertNotSame(c1, c2);
assertTrue(c2.isActive());
factory.close();
}
@Test
public void closeBlockClientsWithFactory() throws IOException, InterruptedException {
TransportClientFactory factory = context.createClientFactory();
TransportClient c1 = factory.createClient(TestUtils.getLocalHost(), server1.getPort());
TransportClient c2 = factory.createClient(TestUtils.getLocalHost(), server2.getPort());
assertTrue(c1.isActive());
assertTrue(c2.isActive());
factory.close();
assertFalse(c1.isActive());
assertFalse(c2.isActive());
}
@Test
public void closeIdleConnectionForRequestTimeOut() throws IOException, InterruptedException {
TransportConf conf = new TransportConf("shuffle", new ConfigProvider() {
@Override
public String get(String name) {
if ("spark.shuffle.io.connectionTimeout".equals(name)) {
// We should make sure there is enough time for us to observe the channel is active
return "1s";
}
String value = System.getProperty(name);
if (value == null) {
throw new NoSuchElementException(name);
}
return value;
}
@Override
public Iterable<Map.Entry<String, String>> getAll() {
throw new UnsupportedOperationException();
}
});
TransportContext context = new TransportContext(conf, new NoOpRpcHandler(), true);
try (TransportClientFactory factory = context.createClientFactory()) {
TransportClient c1 = factory.createClient(TestUtils.getLocalHost(), server1.getPort());
assertTrue(c1.isActive());
long expiredTime = System.currentTimeMillis() + 10000; // 10 seconds
while (c1.isActive() && System.currentTimeMillis() < expiredTime) {
Thread.sleep(10);
}
assertFalse(c1.isActive());
}
}
}
| 9,787 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/TransportResponseHandlerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.io.IOException;
import java.nio.ByteBuffer;
import io.netty.channel.Channel;
import io.netty.channel.local.LocalChannel;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.client.ChunkReceivedCallback;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.StreamCallback;
import org.apache.spark.network.client.TransportResponseHandler;
import org.apache.spark.network.protocol.ChunkFetchFailure;
import org.apache.spark.network.protocol.ChunkFetchSuccess;
import org.apache.spark.network.protocol.RpcFailure;
import org.apache.spark.network.protocol.RpcResponse;
import org.apache.spark.network.protocol.StreamChunkId;
import org.apache.spark.network.protocol.StreamFailure;
import org.apache.spark.network.protocol.StreamResponse;
import org.apache.spark.network.util.TransportFrameDecoder;
public class TransportResponseHandlerSuite {
@Test
public void handleSuccessfulFetch() throws Exception {
StreamChunkId streamChunkId = new StreamChunkId(1, 0);
TransportResponseHandler handler = new TransportResponseHandler(new LocalChannel());
ChunkReceivedCallback callback = mock(ChunkReceivedCallback.class);
handler.addFetchRequest(streamChunkId, callback);
assertEquals(1, handler.numOutstandingRequests());
handler.handle(new ChunkFetchSuccess(streamChunkId, new TestManagedBuffer(123)));
verify(callback, times(1)).onSuccess(eq(0), any());
assertEquals(0, handler.numOutstandingRequests());
}
@Test
public void handleFailedFetch() throws Exception {
StreamChunkId streamChunkId = new StreamChunkId(1, 0);
TransportResponseHandler handler = new TransportResponseHandler(new LocalChannel());
ChunkReceivedCallback callback = mock(ChunkReceivedCallback.class);
handler.addFetchRequest(streamChunkId, callback);
assertEquals(1, handler.numOutstandingRequests());
handler.handle(new ChunkFetchFailure(streamChunkId, "some error msg"));
verify(callback, times(1)).onFailure(eq(0), any());
assertEquals(0, handler.numOutstandingRequests());
}
@Test
public void clearAllOutstandingRequests() throws Exception {
TransportResponseHandler handler = new TransportResponseHandler(new LocalChannel());
ChunkReceivedCallback callback = mock(ChunkReceivedCallback.class);
handler.addFetchRequest(new StreamChunkId(1, 0), callback);
handler.addFetchRequest(new StreamChunkId(1, 1), callback);
handler.addFetchRequest(new StreamChunkId(1, 2), callback);
assertEquals(3, handler.numOutstandingRequests());
handler.handle(new ChunkFetchSuccess(new StreamChunkId(1, 0), new TestManagedBuffer(12)));
handler.exceptionCaught(new Exception("duh duh duhhhh"));
// should fail both b2 and b3
verify(callback, times(1)).onSuccess(eq(0), any());
verify(callback, times(1)).onFailure(eq(1), any());
verify(callback, times(1)).onFailure(eq(2), any());
assertEquals(0, handler.numOutstandingRequests());
}
@Test
public void handleSuccessfulRPC() throws Exception {
TransportResponseHandler handler = new TransportResponseHandler(new LocalChannel());
RpcResponseCallback callback = mock(RpcResponseCallback.class);
handler.addRpcRequest(12345, callback);
assertEquals(1, handler.numOutstandingRequests());
// This response should be ignored.
handler.handle(new RpcResponse(54321, new NioManagedBuffer(ByteBuffer.allocate(7))));
assertEquals(1, handler.numOutstandingRequests());
ByteBuffer resp = ByteBuffer.allocate(10);
handler.handle(new RpcResponse(12345, new NioManagedBuffer(resp)));
verify(callback, times(1)).onSuccess(eq(ByteBuffer.allocate(10)));
assertEquals(0, handler.numOutstandingRequests());
}
@Test
public void handleFailedRPC() throws Exception {
TransportResponseHandler handler = new TransportResponseHandler(new LocalChannel());
RpcResponseCallback callback = mock(RpcResponseCallback.class);
handler.addRpcRequest(12345, callback);
assertEquals(1, handler.numOutstandingRequests());
handler.handle(new RpcFailure(54321, "uh-oh!")); // should be ignored
assertEquals(1, handler.numOutstandingRequests());
handler.handle(new RpcFailure(12345, "oh no"));
verify(callback, times(1)).onFailure(any());
assertEquals(0, handler.numOutstandingRequests());
}
@Test
public void testActiveStreams() throws Exception {
Channel c = new LocalChannel();
c.pipeline().addLast(TransportFrameDecoder.HANDLER_NAME, new TransportFrameDecoder());
TransportResponseHandler handler = new TransportResponseHandler(c);
StreamResponse response = new StreamResponse("stream", 1234L, null);
StreamCallback cb = mock(StreamCallback.class);
handler.addStreamCallback("stream", cb);
assertEquals(1, handler.numOutstandingRequests());
handler.handle(response);
assertEquals(1, handler.numOutstandingRequests());
handler.deactivateStream();
assertEquals(0, handler.numOutstandingRequests());
StreamFailure failure = new StreamFailure("stream", "uh-oh");
handler.addStreamCallback("stream", cb);
assertEquals(1, handler.numOutstandingRequests());
handler.handle(failure);
assertEquals(0, handler.numOutstandingRequests());
}
@Test
public void failOutstandingStreamCallbackOnClose() throws Exception {
Channel c = new LocalChannel();
c.pipeline().addLast(TransportFrameDecoder.HANDLER_NAME, new TransportFrameDecoder());
TransportResponseHandler handler = new TransportResponseHandler(c);
StreamCallback cb = mock(StreamCallback.class);
handler.addStreamCallback("stream-1", cb);
handler.channelInactive();
verify(cb).onFailure(eq("stream-1"), isA(IOException.class));
}
@Test
public void failOutstandingStreamCallbackOnException() throws Exception {
Channel c = new LocalChannel();
c.pipeline().addLast(TransportFrameDecoder.HANDLER_NAME, new TransportFrameDecoder());
TransportResponseHandler handler = new TransportResponseHandler(c);
StreamCallback cb = mock(StreamCallback.class);
handler.addStreamCallback("stream-1", cb);
handler.exceptionCaught(new IOException("Oops!"));
verify(cb).onFailure(eq("stream-1"), isA(IOException.class));
}
}
| 9,788 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/StreamTestHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Random;
import com.google.common.io.Files;
import org.apache.spark.network.buffer.FileSegmentManagedBuffer;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NioManagedBuffer;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.TransportConf;
class StreamTestHelper {
static final String[] STREAMS = { "largeBuffer", "smallBuffer", "emptyBuffer", "file" };
final File testFile;
final File tempDir;
final ByteBuffer emptyBuffer;
final ByteBuffer smallBuffer;
final ByteBuffer largeBuffer;
private static ByteBuffer createBuffer(int bufSize) {
ByteBuffer buf = ByteBuffer.allocate(bufSize);
for (int i = 0; i < bufSize; i ++) {
buf.put((byte) i);
}
buf.flip();
return buf;
}
StreamTestHelper() throws Exception {
tempDir = Files.createTempDir();
emptyBuffer = createBuffer(0);
smallBuffer = createBuffer(100);
largeBuffer = createBuffer(100000);
testFile = File.createTempFile("stream-test-file", "txt", tempDir);
FileOutputStream fp = new FileOutputStream(testFile);
try {
Random rnd = new Random();
for (int i = 0; i < 512; i++) {
byte[] fileContent = new byte[1024];
rnd.nextBytes(fileContent);
fp.write(fileContent);
}
} finally {
fp.close();
}
}
public ByteBuffer srcBuffer(String name) {
switch (name) {
case "largeBuffer":
return largeBuffer;
case "smallBuffer":
return smallBuffer;
case "emptyBuffer":
return emptyBuffer;
default:
throw new IllegalArgumentException("Invalid stream: " + name);
}
}
public ManagedBuffer openStream(TransportConf conf, String streamId) {
switch (streamId) {
case "file":
return new FileSegmentManagedBuffer(conf, testFile, 0, testFile.length());
default:
return new NioManagedBuffer(srcBuffer(streamId));
}
}
void cleanup() {
if (tempDir != null) {
try {
JavaUtils.deleteRecursively(tempDir);
} catch (IOException io) {
throw new RuntimeException(io);
}
}
}
}
| 9,789 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/TransportRequestHandlerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.util.ArrayList;
import java.util.List;
import io.netty.channel.Channel;
import io.netty.channel.ChannelPromise;
import io.netty.channel.DefaultChannelPromise;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.GenericFutureListener;
import org.junit.Test;
import static org.mockito.Mockito.*;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.protocol.*;
import org.apache.spark.network.server.NoOpRpcHandler;
import org.apache.spark.network.server.OneForOneStreamManager;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.TransportRequestHandler;
public class TransportRequestHandlerSuite {
@Test
public void handleFetchRequestAndStreamRequest() throws Exception {
RpcHandler rpcHandler = new NoOpRpcHandler();
OneForOneStreamManager streamManager = (OneForOneStreamManager) (rpcHandler.getStreamManager());
Channel channel = mock(Channel.class);
List<Pair<Object, ExtendedChannelPromise>> responseAndPromisePairs =
new ArrayList<>();
when(channel.writeAndFlush(any()))
.thenAnswer(invocationOnMock0 -> {
Object response = invocationOnMock0.getArguments()[0];
ExtendedChannelPromise channelFuture = new ExtendedChannelPromise(channel);
responseAndPromisePairs.add(ImmutablePair.of(response, channelFuture));
return channelFuture;
});
// Prepare the stream.
List<ManagedBuffer> managedBuffers = new ArrayList<>();
managedBuffers.add(new TestManagedBuffer(10));
managedBuffers.add(new TestManagedBuffer(20));
managedBuffers.add(new TestManagedBuffer(30));
managedBuffers.add(new TestManagedBuffer(40));
long streamId = streamManager.registerStream("test-app", managedBuffers.iterator(), channel);
assert streamManager.numStreamStates() == 1;
TransportClient reverseClient = mock(TransportClient.class);
TransportRequestHandler requestHandler = new TransportRequestHandler(channel, reverseClient,
rpcHandler, 2L);
RequestMessage request0 = new ChunkFetchRequest(new StreamChunkId(streamId, 0));
requestHandler.handle(request0);
assert responseAndPromisePairs.size() == 1;
assert responseAndPromisePairs.get(0).getLeft() instanceof ChunkFetchSuccess;
assert ((ChunkFetchSuccess) (responseAndPromisePairs.get(0).getLeft())).body() ==
managedBuffers.get(0);
RequestMessage request1 = new ChunkFetchRequest(new StreamChunkId(streamId, 1));
requestHandler.handle(request1);
assert responseAndPromisePairs.size() == 2;
assert responseAndPromisePairs.get(1).getLeft() instanceof ChunkFetchSuccess;
assert ((ChunkFetchSuccess) (responseAndPromisePairs.get(1).getLeft())).body() ==
managedBuffers.get(1);
// Finish flushing the response for request0.
responseAndPromisePairs.get(0).getRight().finish(true);
RequestMessage request2 = new StreamRequest(String.format("%d_%d", streamId, 2));
requestHandler.handle(request2);
assert responseAndPromisePairs.size() == 3;
assert responseAndPromisePairs.get(2).getLeft() instanceof StreamResponse;
assert ((StreamResponse) (responseAndPromisePairs.get(2).getLeft())).body() ==
managedBuffers.get(2);
// Request3 will trigger the close of channel, because the number of max chunks being
// transferred is 2;
RequestMessage request3 = new StreamRequest(String.format("%d_%d", streamId, 3));
requestHandler.handle(request3);
verify(channel, times(1)).close();
assert responseAndPromisePairs.size() == 3;
streamManager.connectionTerminated(channel);
assert streamManager.numStreamStates() == 0;
}
private class ExtendedChannelPromise extends DefaultChannelPromise {
private List<GenericFutureListener<Future<Void>>> listeners = new ArrayList<>();
private boolean success;
ExtendedChannelPromise(Channel channel) {
super(channel);
success = false;
}
@Override
public ChannelPromise addListener(
GenericFutureListener<? extends Future<? super Void>> listener) {
@SuppressWarnings("unchecked")
GenericFutureListener<Future<Void>> gfListener =
(GenericFutureListener<Future<Void>>) listener;
listeners.add(gfListener);
return super.addListener(listener);
}
@Override
public boolean isSuccess() {
return success;
}
public void finish(boolean success) {
this.success = success;
listeners.forEach(listener -> {
try {
listener.operationComplete(this);
} catch (Exception e) {
// do nothing
}
});
}
}
}
| 9,790 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/StreamSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import com.google.common.io.Files;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.StreamCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientFactory;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class StreamSuite {
private static final String[] STREAMS = StreamTestHelper.STREAMS;
private static StreamTestHelper testData;
private static TransportServer server;
private static TransportClientFactory clientFactory;
private static ByteBuffer createBuffer(int bufSize) {
ByteBuffer buf = ByteBuffer.allocate(bufSize);
for (int i = 0; i < bufSize; i ++) {
buf.put((byte) i);
}
buf.flip();
return buf;
}
@BeforeClass
public static void setUp() throws Exception {
testData = new StreamTestHelper();
final TransportConf conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
final StreamManager streamManager = new StreamManager() {
@Override
public ManagedBuffer getChunk(long streamId, int chunkIndex) {
throw new UnsupportedOperationException();
}
@Override
public ManagedBuffer openStream(String streamId) {
return testData.openStream(conf, streamId);
}
};
RpcHandler handler = new RpcHandler() {
@Override
public void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
throw new UnsupportedOperationException();
}
@Override
public StreamManager getStreamManager() {
return streamManager;
}
};
TransportContext context = new TransportContext(conf, handler);
server = context.createServer();
clientFactory = context.createClientFactory();
}
@AfterClass
public static void tearDown() {
server.close();
clientFactory.close();
testData.cleanup();
}
@Test
public void testZeroLengthStream() throws Throwable {
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
try {
StreamTask task = new StreamTask(client, "emptyBuffer", TimeUnit.SECONDS.toMillis(5));
task.run();
task.check();
} finally {
client.close();
}
}
@Test
public void testSingleStream() throws Throwable {
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
try {
StreamTask task = new StreamTask(client, "largeBuffer", TimeUnit.SECONDS.toMillis(5));
task.run();
task.check();
} finally {
client.close();
}
}
@Test
public void testMultipleStreams() throws Throwable {
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
try {
for (int i = 0; i < 20; i++) {
StreamTask task = new StreamTask(client, STREAMS[i % STREAMS.length],
TimeUnit.SECONDS.toMillis(5));
task.run();
task.check();
}
} finally {
client.close();
}
}
@Test
public void testConcurrentStreams() throws Throwable {
ExecutorService executor = Executors.newFixedThreadPool(20);
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
try {
List<StreamTask> tasks = new ArrayList<>();
for (int i = 0; i < 20; i++) {
StreamTask task = new StreamTask(client, STREAMS[i % STREAMS.length],
TimeUnit.SECONDS.toMillis(20));
tasks.add(task);
executor.submit(task);
}
executor.shutdown();
assertTrue("Timed out waiting for tasks.", executor.awaitTermination(30, TimeUnit.SECONDS));
for (StreamTask task : tasks) {
task.check();
}
} finally {
executor.shutdownNow();
client.close();
}
}
private static class StreamTask implements Runnable {
private final TransportClient client;
private final String streamId;
private final long timeoutMs;
private Throwable error;
StreamTask(TransportClient client, String streamId, long timeoutMs) {
this.client = client;
this.streamId = streamId;
this.timeoutMs = timeoutMs;
}
@Override
public void run() {
ByteBuffer srcBuffer = null;
OutputStream out = null;
File outFile = null;
try {
ByteArrayOutputStream baos = null;
switch (streamId) {
case "largeBuffer":
baos = new ByteArrayOutputStream();
out = baos;
srcBuffer = testData.largeBuffer;
break;
case "smallBuffer":
baos = new ByteArrayOutputStream();
out = baos;
srcBuffer = testData.smallBuffer;
break;
case "file":
outFile = File.createTempFile("data", ".tmp", testData.tempDir);
out = new FileOutputStream(outFile);
break;
case "emptyBuffer":
baos = new ByteArrayOutputStream();
out = baos;
srcBuffer = testData.emptyBuffer;
break;
default:
throw new IllegalArgumentException(streamId);
}
TestCallback callback = new TestCallback(out);
client.stream(streamId, callback);
callback.waitForCompletion(timeoutMs);
if (srcBuffer == null) {
assertTrue("File stream did not match.", Files.equal(testData.testFile, outFile));
} else {
ByteBuffer base;
synchronized (srcBuffer) {
base = srcBuffer.duplicate();
}
byte[] result = baos.toByteArray();
byte[] expected = new byte[base.remaining()];
base.get(expected);
assertEquals(expected.length, result.length);
assertTrue("buffers don't match", Arrays.equals(expected, result));
}
} catch (Throwable t) {
error = t;
} finally {
if (out != null) {
try {
out.close();
} catch (Exception e) {
// ignore.
}
}
if (outFile != null) {
outFile.delete();
}
}
}
public void check() throws Throwable {
if (error != null) {
throw error;
}
}
}
static class TestCallback implements StreamCallback {
private final OutputStream out;
public volatile boolean completed;
public volatile Throwable error;
TestCallback(OutputStream out) {
this.out = out;
this.completed = false;
}
@Override
public void onData(String streamId, ByteBuffer buf) throws IOException {
byte[] tmp = new byte[buf.remaining()];
buf.get(tmp);
out.write(tmp);
}
@Override
public void onComplete(String streamId) throws IOException {
out.close();
synchronized (this) {
completed = true;
notifyAll();
}
}
@Override
public void onFailure(String streamId, Throwable cause) {
error = cause;
synchronized (this) {
completed = true;
notifyAll();
}
}
void waitForCompletion(long timeoutMs) {
long now = System.currentTimeMillis();
long deadline = now + timeoutMs;
synchronized (this) {
while (!completed && now < deadline) {
try {
wait(deadline - now);
} catch (InterruptedException ie) {
throw new RuntimeException(ie);
}
now = System.currentTimeMillis();
}
}
assertTrue("Timed out waiting for stream.", completed);
assertNull(error);
}
}
}
| 9,791 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/crypto/AuthIntegrationSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import io.netty.channel.Channel;
import org.junit.After;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.spark.network.TestUtils;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientBootstrap;
import org.apache.spark.network.sasl.SaslRpcHandler;
import org.apache.spark.network.sasl.SaslServerBootstrap;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class AuthIntegrationSuite {
private AuthTestCtx ctx;
@After
public void cleanUp() throws Exception {
if (ctx != null) {
ctx.close();
}
ctx = null;
}
@Test
public void testNewAuth() throws Exception {
ctx = new AuthTestCtx();
ctx.createServer("secret");
ctx.createClient("secret");
ByteBuffer reply = ctx.client.sendRpcSync(JavaUtils.stringToBytes("Ping"), 5000);
assertEquals("Pong", JavaUtils.bytesToString(reply));
assertTrue(ctx.authRpcHandler.doDelegate);
assertFalse(ctx.authRpcHandler.delegate instanceof SaslRpcHandler);
}
@Test
public void testAuthFailure() throws Exception {
ctx = new AuthTestCtx();
ctx.createServer("server");
try {
ctx.createClient("client");
fail("Should have failed to create client.");
} catch (Exception e) {
assertFalse(ctx.authRpcHandler.doDelegate);
assertFalse(ctx.serverChannel.isActive());
}
}
@Test
public void testSaslServerFallback() throws Exception {
ctx = new AuthTestCtx();
ctx.createServer("secret", true);
ctx.createClient("secret", false);
ByteBuffer reply = ctx.client.sendRpcSync(JavaUtils.stringToBytes("Ping"), 5000);
assertEquals("Pong", JavaUtils.bytesToString(reply));
}
@Test
public void testSaslClientFallback() throws Exception {
ctx = new AuthTestCtx();
ctx.createServer("secret", false);
ctx.createClient("secret", true);
ByteBuffer reply = ctx.client.sendRpcSync(JavaUtils.stringToBytes("Ping"), 5000);
assertEquals("Pong", JavaUtils.bytesToString(reply));
}
@Test
public void testAuthReplay() throws Exception {
// This test covers the case where an attacker replays a challenge message sniffed from the
// network, but doesn't know the actual secret. The server should close the connection as
// soon as a message is sent after authentication is performed. This is emulated by removing
// the client encryption handler after authentication.
ctx = new AuthTestCtx();
ctx.createServer("secret");
ctx.createClient("secret");
assertNotNull(ctx.client.getChannel().pipeline()
.remove(TransportCipher.ENCRYPTION_HANDLER_NAME));
try {
ctx.client.sendRpcSync(JavaUtils.stringToBytes("Ping"), 5000);
fail("Should have failed unencrypted RPC.");
} catch (Exception e) {
assertTrue(ctx.authRpcHandler.doDelegate);
}
}
@Test
public void testLargeMessageEncryption() throws Exception {
// Use a big length to create a message that cannot be put into the encryption buffer completely
final int testErrorMessageLength = TransportCipher.STREAM_BUFFER_SIZE;
ctx = new AuthTestCtx(new RpcHandler() {
@Override
public void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
char[] longMessage = new char[testErrorMessageLength];
Arrays.fill(longMessage, 'D');
callback.onFailure(new RuntimeException(new String(longMessage)));
}
@Override
public StreamManager getStreamManager() {
return null;
}
});
ctx.createServer("secret");
ctx.createClient("secret");
try {
ctx.client.sendRpcSync(JavaUtils.stringToBytes("Ping"), 5000);
fail("Should have failed unencrypted RPC.");
} catch (Exception e) {
assertTrue(ctx.authRpcHandler.doDelegate);
assertTrue(e.getMessage() + " is not an expected error", e.getMessage().contains("DDDDD"));
// Verify we receive the complete error message
int messageStart = e.getMessage().indexOf("DDDDD");
int messageEnd = e.getMessage().lastIndexOf("DDDDD") + 5;
assertEquals(testErrorMessageLength, messageEnd - messageStart);
}
}
private class AuthTestCtx {
private final String appId = "testAppId";
private final TransportConf conf;
private final TransportContext ctx;
TransportClient client;
TransportServer server;
volatile Channel serverChannel;
volatile AuthRpcHandler authRpcHandler;
AuthTestCtx() throws Exception {
this(new RpcHandler() {
@Override
public void receive(
TransportClient client,
ByteBuffer message,
RpcResponseCallback callback) {
assertEquals("Ping", JavaUtils.bytesToString(message));
callback.onSuccess(JavaUtils.stringToBytes("Pong"));
}
@Override
public StreamManager getStreamManager() {
return null;
}
});
}
AuthTestCtx(RpcHandler rpcHandler) throws Exception {
Map<String, String> testConf = ImmutableMap.of("spark.network.crypto.enabled", "true");
this.conf = new TransportConf("rpc", new MapConfigProvider(testConf));
this.ctx = new TransportContext(conf, rpcHandler);
}
void createServer(String secret) throws Exception {
createServer(secret, true);
}
void createServer(String secret, boolean enableAes) throws Exception {
TransportServerBootstrap introspector = (channel, rpcHandler) -> {
this.serverChannel = channel;
if (rpcHandler instanceof AuthRpcHandler) {
this.authRpcHandler = (AuthRpcHandler) rpcHandler;
}
return rpcHandler;
};
SecretKeyHolder keyHolder = createKeyHolder(secret);
TransportServerBootstrap auth = enableAes ? new AuthServerBootstrap(conf, keyHolder)
: new SaslServerBootstrap(conf, keyHolder);
this.server = ctx.createServer(Arrays.asList(auth, introspector));
}
void createClient(String secret) throws Exception {
createClient(secret, true);
}
void createClient(String secret, boolean enableAes) throws Exception {
TransportConf clientConf = enableAes ? conf
: new TransportConf("rpc", MapConfigProvider.EMPTY);
List<TransportClientBootstrap> bootstraps = Arrays.asList(
new AuthClientBootstrap(clientConf, appId, createKeyHolder(secret)));
this.client = ctx.createClientFactory(bootstraps)
.createClient(TestUtils.getLocalHost(), server.getPort());
}
void close() {
if (client != null) {
client.close();
}
if (server != null) {
server.close();
}
}
private SecretKeyHolder createKeyHolder(String secret) {
SecretKeyHolder keyHolder = mock(SecretKeyHolder.class);
when(keyHolder.getSaslUser(anyString())).thenReturn(appId);
when(keyHolder.getSecretKey(anyString())).thenReturn(secret);
return keyHolder;
}
}
}
| 9,792 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/crypto/AuthMessagesSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.nio.ByteBuffer;
import java.util.Arrays;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.spark.network.protocol.Encodable;
public class AuthMessagesSuite {
private static int COUNTER = 0;
private static String string() {
return String.valueOf(COUNTER++);
}
private static byte[] byteArray() {
byte[] bytes = new byte[COUNTER++];
for (int i = 0; i < bytes.length; i++) {
bytes[i] = (byte) COUNTER;
} return bytes;
}
private static int integer() {
return COUNTER++;
}
@Test
public void testClientChallenge() {
ClientChallenge msg = new ClientChallenge(string(), string(), integer(), string(), integer(),
byteArray(), byteArray());
ClientChallenge decoded = ClientChallenge.decodeMessage(encode(msg));
assertEquals(msg.appId, decoded.appId);
assertEquals(msg.kdf, decoded.kdf);
assertEquals(msg.iterations, decoded.iterations);
assertEquals(msg.cipher, decoded.cipher);
assertEquals(msg.keyLength, decoded.keyLength);
assertTrue(Arrays.equals(msg.nonce, decoded.nonce));
assertTrue(Arrays.equals(msg.challenge, decoded.challenge));
}
@Test
public void testServerResponse() {
ServerResponse msg = new ServerResponse(byteArray(), byteArray(), byteArray(), byteArray());
ServerResponse decoded = ServerResponse.decodeMessage(encode(msg));
assertTrue(Arrays.equals(msg.response, decoded.response));
assertTrue(Arrays.equals(msg.nonce, decoded.nonce));
assertTrue(Arrays.equals(msg.inputIv, decoded.inputIv));
assertTrue(Arrays.equals(msg.outputIv, decoded.outputIv));
}
private ByteBuffer encode(Encodable msg) {
ByteBuf buf = Unpooled.buffer();
msg.encode(buf);
return buf.nioBuffer();
}
}
| 9,793 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/crypto/AuthEngineSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.crypto;
import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
import java.util.Arrays;
import java.util.Map;
import java.security.InvalidKeyException;
import java.util.Random;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.collect.ImmutableMap;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.FileRegion;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.spark.network.util.ByteArrayWritableChannel;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
public class AuthEngineSuite {
private static TransportConf conf;
@BeforeClass
public static void setUp() {
conf = new TransportConf("rpc", MapConfigProvider.EMPTY);
}
@Test
public void testAuthEngine() throws Exception {
AuthEngine client = new AuthEngine("appId", "secret", conf);
AuthEngine server = new AuthEngine("appId", "secret", conf);
try {
ClientChallenge clientChallenge = client.challenge();
ServerResponse serverResponse = server.respond(clientChallenge);
client.validate(serverResponse);
TransportCipher serverCipher = server.sessionCipher();
TransportCipher clientCipher = client.sessionCipher();
assertTrue(Arrays.equals(serverCipher.getInputIv(), clientCipher.getOutputIv()));
assertTrue(Arrays.equals(serverCipher.getOutputIv(), clientCipher.getInputIv()));
assertEquals(serverCipher.getKey(), clientCipher.getKey());
} finally {
client.close();
server.close();
}
}
@Test
public void testMismatchedSecret() throws Exception {
AuthEngine client = new AuthEngine("appId", "secret", conf);
AuthEngine server = new AuthEngine("appId", "different_secret", conf);
ClientChallenge clientChallenge = client.challenge();
try {
server.respond(clientChallenge);
fail("Should have failed to validate response.");
} catch (IllegalArgumentException e) {
// Expected.
}
}
@Test(expected = IllegalArgumentException.class)
public void testWrongAppId() throws Exception {
AuthEngine engine = new AuthEngine("appId", "secret", conf);
ClientChallenge challenge = engine.challenge();
byte[] badChallenge = engine.challenge(new byte[] { 0x00 }, challenge.nonce,
engine.rawResponse(engine.challenge));
engine.respond(new ClientChallenge(challenge.appId, challenge.kdf, challenge.iterations,
challenge.cipher, challenge.keyLength, challenge.nonce, badChallenge));
}
@Test(expected = IllegalArgumentException.class)
public void testWrongNonce() throws Exception {
AuthEngine engine = new AuthEngine("appId", "secret", conf);
ClientChallenge challenge = engine.challenge();
byte[] badChallenge = engine.challenge(challenge.appId.getBytes(UTF_8), new byte[] { 0x00 },
engine.rawResponse(engine.challenge));
engine.respond(new ClientChallenge(challenge.appId, challenge.kdf, challenge.iterations,
challenge.cipher, challenge.keyLength, challenge.nonce, badChallenge));
}
@Test(expected = IllegalArgumentException.class)
public void testBadChallenge() throws Exception {
AuthEngine engine = new AuthEngine("appId", "secret", conf);
ClientChallenge challenge = engine.challenge();
byte[] badChallenge = new byte[challenge.challenge.length];
engine.respond(new ClientChallenge(challenge.appId, challenge.kdf, challenge.iterations,
challenge.cipher, challenge.keyLength, challenge.nonce, badChallenge));
}
@Test
public void testEncryptedMessage() throws Exception {
AuthEngine client = new AuthEngine("appId", "secret", conf);
AuthEngine server = new AuthEngine("appId", "secret", conf);
try {
ClientChallenge clientChallenge = client.challenge();
ServerResponse serverResponse = server.respond(clientChallenge);
client.validate(serverResponse);
TransportCipher cipher = server.sessionCipher();
TransportCipher.EncryptionHandler handler = new TransportCipher.EncryptionHandler(cipher);
byte[] data = new byte[TransportCipher.STREAM_BUFFER_SIZE + 1];
new Random().nextBytes(data);
ByteBuf buf = Unpooled.wrappedBuffer(data);
ByteArrayWritableChannel channel = new ByteArrayWritableChannel(data.length);
TransportCipher.EncryptedMessage emsg = handler.createEncryptedMessage(buf);
while (emsg.transfered() < emsg.count()) {
emsg.transferTo(channel, emsg.transfered());
}
assertEquals(data.length, channel.length());
} finally {
client.close();
server.close();
}
}
@Test
public void testEncryptedMessageWhenTransferringZeroBytes() throws Exception {
AuthEngine client = new AuthEngine("appId", "secret", conf);
AuthEngine server = new AuthEngine("appId", "secret", conf);
try {
ClientChallenge clientChallenge = client.challenge();
ServerResponse serverResponse = server.respond(clientChallenge);
client.validate(serverResponse);
TransportCipher cipher = server.sessionCipher();
TransportCipher.EncryptionHandler handler = new TransportCipher.EncryptionHandler(cipher);
int testDataLength = 4;
FileRegion region = mock(FileRegion.class);
when(region.count()).thenReturn((long) testDataLength);
// Make `region.transferTo` do nothing in first call and transfer 4 bytes in the second one.
when(region.transferTo(any(), anyLong())).thenAnswer(new Answer<Long>() {
private boolean firstTime = true;
@Override
public Long answer(InvocationOnMock invocationOnMock) throws Throwable {
if (firstTime) {
firstTime = false;
return 0L;
} else {
WritableByteChannel channel =
invocationOnMock.getArgumentAt(0, WritableByteChannel.class);
channel.write(ByteBuffer.wrap(new byte[testDataLength]));
return (long) testDataLength;
}
}
});
TransportCipher.EncryptedMessage emsg = handler.createEncryptedMessage(region);
ByteArrayWritableChannel channel = new ByteArrayWritableChannel(testDataLength);
// "transferTo" should act correctly when the underlying FileRegion transfers 0 bytes.
assertEquals(0L, emsg.transferTo(channel, emsg.transfered()));
assertEquals(testDataLength, emsg.transferTo(channel, emsg.transfered()));
assertEquals(emsg.transfered(), emsg.count());
assertEquals(4, channel.length());
} finally {
client.close();
server.close();
}
}
@Test(expected = InvalidKeyException.class)
public void testBadKeySize() throws Exception {
Map<String, String> mconf = ImmutableMap.of("spark.network.crypto.keyLength", "42");
TransportConf conf = new TransportConf("rpc", new MapConfigProvider(mconf));
try (AuthEngine engine = new AuthEngine("appId", "secret", conf)) {
engine.challenge();
fail("Should have failed to create challenge message.");
// Call close explicitly to make sure it's idempotent.
engine.close();
}
}
}
| 9,794 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/util/TransportFrameDecoderSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import org.junit.AfterClass;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class TransportFrameDecoderSuite {
private static Random RND = new Random();
@AfterClass
public static void cleanup() {
RND = null;
}
@Test
public void testFrameDecoding() throws Exception {
TransportFrameDecoder decoder = new TransportFrameDecoder();
ChannelHandlerContext ctx = mockChannelHandlerContext();
ByteBuf data = createAndFeedFrames(100, decoder, ctx);
verifyAndCloseDecoder(decoder, ctx, data);
}
@Test
public void testInterception() throws Exception {
int interceptedReads = 3;
TransportFrameDecoder decoder = new TransportFrameDecoder();
TransportFrameDecoder.Interceptor interceptor = spy(new MockInterceptor(interceptedReads));
ChannelHandlerContext ctx = mockChannelHandlerContext();
byte[] data = new byte[8];
ByteBuf len = Unpooled.copyLong(8 + data.length);
ByteBuf dataBuf = Unpooled.wrappedBuffer(data);
try {
decoder.setInterceptor(interceptor);
for (int i = 0; i < interceptedReads; i++) {
decoder.channelRead(ctx, dataBuf);
assertEquals(0, dataBuf.refCnt());
dataBuf = Unpooled.wrappedBuffer(data);
}
decoder.channelRead(ctx, len);
decoder.channelRead(ctx, dataBuf);
verify(interceptor, times(interceptedReads)).handle(any(ByteBuf.class));
verify(ctx).fireChannelRead(any(ByteBuf.class));
assertEquals(0, len.refCnt());
assertEquals(0, dataBuf.refCnt());
} finally {
release(len);
release(dataBuf);
}
}
@Test
public void testRetainedFrames() throws Exception {
TransportFrameDecoder decoder = new TransportFrameDecoder();
AtomicInteger count = new AtomicInteger();
List<ByteBuf> retained = new ArrayList<>();
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.fireChannelRead(any())).thenAnswer(in -> {
// Retain a few frames but not others.
ByteBuf buf = (ByteBuf) in.getArguments()[0];
if (count.incrementAndGet() % 2 == 0) {
retained.add(buf);
} else {
buf.release();
}
return null;
});
ByteBuf data = createAndFeedFrames(100, decoder, ctx);
try {
// Verify all retained buffers are readable.
for (ByteBuf b : retained) {
byte[] tmp = new byte[b.readableBytes()];
b.readBytes(tmp);
b.release();
}
verifyAndCloseDecoder(decoder, ctx, data);
} finally {
for (ByteBuf b : retained) {
release(b);
}
}
}
@Test
public void testSplitLengthField() throws Exception {
byte[] frame = new byte[1024 * (RND.nextInt(31) + 1)];
ByteBuf buf = Unpooled.buffer(frame.length + 8);
buf.writeLong(frame.length + 8);
buf.writeBytes(frame);
TransportFrameDecoder decoder = new TransportFrameDecoder();
ChannelHandlerContext ctx = mockChannelHandlerContext();
try {
decoder.channelRead(ctx, buf.readSlice(RND.nextInt(7)).retain());
verify(ctx, never()).fireChannelRead(any(ByteBuf.class));
decoder.channelRead(ctx, buf);
verify(ctx).fireChannelRead(any(ByteBuf.class));
assertEquals(0, buf.refCnt());
} finally {
decoder.channelInactive(ctx);
release(buf);
}
}
@Test(expected = IllegalArgumentException.class)
public void testNegativeFrameSize() throws Exception {
testInvalidFrame(-1);
}
@Test(expected = IllegalArgumentException.class)
public void testEmptyFrame() throws Exception {
// 8 because frame size includes the frame length.
testInvalidFrame(8);
}
/**
* Creates a number of randomly sized frames and feed them to the given decoder, verifying
* that the frames were read.
*/
private ByteBuf createAndFeedFrames(
int frameCount,
TransportFrameDecoder decoder,
ChannelHandlerContext ctx) throws Exception {
ByteBuf data = Unpooled.buffer();
for (int i = 0; i < frameCount; i++) {
byte[] frame = new byte[1024 * (RND.nextInt(31) + 1)];
data.writeLong(frame.length + 8);
data.writeBytes(frame);
}
try {
while (data.isReadable()) {
int size = RND.nextInt(4 * 1024) + 256;
decoder.channelRead(ctx, data.readSlice(Math.min(data.readableBytes(), size)).retain());
}
verify(ctx, times(frameCount)).fireChannelRead(any(ByteBuf.class));
} catch (Exception e) {
release(data);
throw e;
}
return data;
}
private void verifyAndCloseDecoder(
TransportFrameDecoder decoder,
ChannelHandlerContext ctx,
ByteBuf data) throws Exception {
try {
decoder.channelInactive(ctx);
assertTrue("There shouldn't be dangling references to the data.", data.release());
} finally {
release(data);
}
}
private void testInvalidFrame(long size) throws Exception {
TransportFrameDecoder decoder = new TransportFrameDecoder();
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
ByteBuf frame = Unpooled.copyLong(size);
try {
decoder.channelRead(ctx, frame);
} finally {
release(frame);
}
}
private ChannelHandlerContext mockChannelHandlerContext() {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.fireChannelRead(any())).thenAnswer(in -> {
ByteBuf buf = (ByteBuf) in.getArguments()[0];
buf.release();
return null;
});
return ctx;
}
private void release(ByteBuf buf) {
if (buf.refCnt() > 0) {
buf.release(buf.refCnt());
}
}
private static class MockInterceptor implements TransportFrameDecoder.Interceptor {
private int remainingReads;
MockInterceptor(int readCount) {
this.remainingReads = readCount;
}
@Override
public boolean handle(ByteBuf data) throws Exception {
data.readerIndex(data.readerIndex() + data.readableBytes());
assertFalse(data.isReadable());
remainingReads -= 1;
return remainingReads != 0;
}
@Override
public void exceptionCaught(Throwable cause) throws Exception {
}
@Override
public void channelInactive() throws Exception {
}
}
}
| 9,795 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/util/CryptoUtilsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.util.Map;
import java.util.Properties;
import com.google.common.collect.ImmutableMap;
import org.junit.Test;
import static org.junit.Assert.*;
public class CryptoUtilsSuite {
@Test
public void testConfConversion() {
String prefix = "my.prefix.commons.config.";
String confKey1 = prefix + "a.b.c";
String confVal1 = "val1";
String cryptoKey1 = CryptoUtils.COMMONS_CRYPTO_CONFIG_PREFIX + "a.b.c";
String confKey2 = prefix.substring(0, prefix.length() - 1) + "A.b.c";
String confVal2 = "val2";
String cryptoKey2 = CryptoUtils.COMMONS_CRYPTO_CONFIG_PREFIX + "A.b.c";
Map<String, String> conf = ImmutableMap.of(
confKey1, confVal1,
confKey2, confVal2);
Properties cryptoConf = CryptoUtils.toCryptoConf(prefix, conf.entrySet());
assertEquals(confVal1, cryptoConf.getProperty(cryptoKey1));
assertFalse(cryptoConf.containsKey(cryptoKey2));
}
}
| 9,796 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/util/NettyMemoryMetricsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.util;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.MetricSet;
import org.apache.spark.network.TestUtils;
import org.apache.spark.network.client.TransportClient;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.client.TransportClientFactory;
import org.apache.spark.network.server.NoOpRpcHandler;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.TransportServer;
public class NettyMemoryMetricsSuite {
private TransportConf conf;
private TransportContext context;
private TransportServer server;
private TransportClientFactory clientFactory;
private void setUp(boolean enableVerboseMetrics) {
HashMap<String, String> configMap = new HashMap<>();
configMap.put("spark.shuffle.io.enableVerboseMetrics", String.valueOf(enableVerboseMetrics));
conf = new TransportConf("shuffle", new MapConfigProvider(configMap));
RpcHandler rpcHandler = new NoOpRpcHandler();
context = new TransportContext(conf, rpcHandler);
server = context.createServer();
clientFactory = context.createClientFactory();
}
@After
public void tearDown() {
if (clientFactory != null) {
JavaUtils.closeQuietly(clientFactory);
clientFactory = null;
}
if (server != null) {
JavaUtils.closeQuietly(server);
server = null;
}
}
@Test
@SuppressWarnings("unchecked")
public void testGeneralNettyMemoryMetrics() throws IOException, InterruptedException {
setUp(false);
MetricSet serverMetrics = server.getAllMetrics();
Assert.assertNotNull(serverMetrics);
Assert.assertNotNull(serverMetrics.getMetrics());
Assert.assertNotEquals(serverMetrics.getMetrics().size(), 0);
Map<String, Metric> serverMetricMap = serverMetrics.getMetrics();
serverMetricMap.forEach((name, metric) ->
Assert.assertTrue(name.startsWith("shuffle-server"))
);
MetricSet clientMetrics = clientFactory.getAllMetrics();
Assert.assertNotNull(clientMetrics);
Assert.assertNotNull(clientMetrics.getMetrics());
Assert.assertNotEquals(clientMetrics.getMetrics().size(), 0);
Map<String, Metric> clientMetricMap = clientMetrics.getMetrics();
clientMetricMap.forEach((name, metrics) ->
Assert.assertTrue(name.startsWith("shuffle-client"))
);
// Make sure general metrics existed.
String heapMemoryMetric = "usedHeapMemory";
String directMemoryMetric = "usedDirectMemory";
Assert.assertNotNull(serverMetricMap.get(
MetricRegistry.name("shuffle-server", heapMemoryMetric)));
Assert.assertNotNull(serverMetricMap.get(
MetricRegistry.name("shuffle-server", directMemoryMetric)));
Assert.assertNotNull(clientMetricMap.get(
MetricRegistry.name("shuffle-client", heapMemoryMetric)));
Assert.assertNotNull(clientMetricMap.get(
MetricRegistry.name("shuffle-client", directMemoryMetric)));
TransportClient client = null;
try {
client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
Assert.assertTrue(client.isActive());
Assert.assertTrue(((Gauge<Long>)serverMetricMap.get(
MetricRegistry.name("shuffle-server", heapMemoryMetric))).getValue() >= 0L);
Assert.assertTrue(((Gauge<Long>)serverMetricMap.get(
MetricRegistry.name("shuffle-server", directMemoryMetric))).getValue() >= 0L);
Assert.assertTrue(((Gauge<Long>)clientMetricMap.get(
MetricRegistry.name("shuffle-client", heapMemoryMetric))).getValue() >= 0L);
Assert.assertTrue(((Gauge<Long>)clientMetricMap.get(
MetricRegistry.name("shuffle-client", directMemoryMetric))).getValue() >= 0L);
} finally {
if (client != null) {
client.close();
}
}
}
@Test
@SuppressWarnings("unchecked")
public void testAdditionalMetrics() throws IOException, InterruptedException {
setUp(true);
// Make sure additional metrics are added.
Map<String, Metric> serverMetricMap = server.getAllMetrics().getMetrics();
serverMetricMap.forEach((name, metric) -> {
Assert.assertTrue(name.startsWith("shuffle-server"));
String metricName = name.substring(name.lastIndexOf(".") + 1);
Assert.assertTrue(metricName.equals("usedDirectMemory")
|| metricName.equals("usedHeapMemory")
|| NettyMemoryMetrics.VERBOSE_METRICS.contains(metricName));
});
Map<String, Metric> clientMetricMap = clientFactory.getAllMetrics().getMetrics();
clientMetricMap.forEach((name, metric) -> {
Assert.assertTrue(name.startsWith("shuffle-client"));
String metricName = name.substring(name.lastIndexOf(".") + 1);
Assert.assertTrue(metricName.equals("usedDirectMemory")
|| metricName.equals("usedHeapMemory")
|| NettyMemoryMetrics.VERBOSE_METRICS.contains(metricName));
});
TransportClient client = null;
try {
client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
Assert.assertTrue(client.isActive());
String activeBytesMetric = "numActiveBytes";
Assert.assertTrue(((Gauge<Long>) serverMetricMap.get(MetricRegistry.name("shuffle-server",
"directArena0", activeBytesMetric))).getValue() >= 0L);
Assert.assertTrue(((Gauge<Long>) clientMetricMap.get(MetricRegistry.name("shuffle-client",
"directArena0", activeBytesMetric))).getValue() >= 0L);
} finally {
if (client != null) {
client.close();
}
}
}
}
| 9,797 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/sasl/SparkSaslSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.sasl;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.File;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import javax.security.sasl.SaslException;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteStreams;
import com.google.common.io.Files;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise;
import org.junit.Test;
import org.apache.spark.network.TestUtils;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.buffer.FileSegmentManagedBuffer;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.client.ChunkReceivedCallback;
import org.apache.spark.network.client.RpcResponseCallback;
import org.apache.spark.network.client.TransportClient;
import org.apache.spark.network.client.TransportClientBootstrap;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.StreamManager;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.util.ByteArrayWritableChannel;
import org.apache.spark.network.util.JavaUtils;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
/**
* Jointly tests SparkSaslClient and SparkSaslServer, as both are black boxes.
*/
public class SparkSaslSuite {
/** Provides a secret key holder which returns secret key == appId */
private SecretKeyHolder secretKeyHolder = new SecretKeyHolder() {
@Override
public String getSaslUser(String appId) {
return "user";
}
@Override
public String getSecretKey(String appId) {
return appId;
}
};
@Test
public void testMatching() {
SparkSaslClient client = new SparkSaslClient("shared-secret", secretKeyHolder, false);
SparkSaslServer server = new SparkSaslServer("shared-secret", secretKeyHolder, false);
assertFalse(client.isComplete());
assertFalse(server.isComplete());
byte[] clientMessage = client.firstToken();
while (!client.isComplete()) {
clientMessage = client.response(server.response(clientMessage));
}
assertTrue(server.isComplete());
// Disposal should invalidate
server.dispose();
assertFalse(server.isComplete());
client.dispose();
assertFalse(client.isComplete());
}
@Test
public void testNonMatching() {
SparkSaslClient client = new SparkSaslClient("my-secret", secretKeyHolder, false);
SparkSaslServer server = new SparkSaslServer("your-secret", secretKeyHolder, false);
assertFalse(client.isComplete());
assertFalse(server.isComplete());
byte[] clientMessage = client.firstToken();
try {
while (!client.isComplete()) {
clientMessage = client.response(server.response(clientMessage));
}
fail("Should not have completed");
} catch (Exception e) {
assertTrue(e.getMessage().contains("Mismatched response"));
assertFalse(client.isComplete());
assertFalse(server.isComplete());
}
}
@Test
public void testSaslAuthentication() throws Throwable {
testBasicSasl(false);
}
@Test
public void testSaslEncryption() throws Throwable {
testBasicSasl(true);
}
private static void testBasicSasl(boolean encrypt) throws Throwable {
RpcHandler rpcHandler = mock(RpcHandler.class);
doAnswer(invocation -> {
ByteBuffer message = (ByteBuffer) invocation.getArguments()[1];
RpcResponseCallback cb = (RpcResponseCallback) invocation.getArguments()[2];
assertEquals("Ping", JavaUtils.bytesToString(message));
cb.onSuccess(JavaUtils.stringToBytes("Pong"));
return null;
})
.when(rpcHandler)
.receive(any(TransportClient.class), any(ByteBuffer.class), any(RpcResponseCallback.class));
SaslTestCtx ctx = new SaslTestCtx(rpcHandler, encrypt, false);
try {
ByteBuffer response = ctx.client.sendRpcSync(JavaUtils.stringToBytes("Ping"),
TimeUnit.SECONDS.toMillis(10));
assertEquals("Pong", JavaUtils.bytesToString(response));
} finally {
ctx.close();
// There should be 2 terminated events; one for the client, one for the server.
Throwable error = null;
long deadline = System.nanoTime() + TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS);
while (deadline > System.nanoTime()) {
try {
verify(rpcHandler, times(2)).channelInactive(any(TransportClient.class));
error = null;
break;
} catch (Throwable t) {
error = t;
TimeUnit.MILLISECONDS.sleep(10);
}
}
if (error != null) {
throw error;
}
}
}
@Test
public void testEncryptedMessage() throws Exception {
SaslEncryptionBackend backend = mock(SaslEncryptionBackend.class);
byte[] data = new byte[1024];
new Random().nextBytes(data);
when(backend.wrap(any(byte[].class), anyInt(), anyInt())).thenReturn(data);
ByteBuf msg = Unpooled.buffer();
try {
msg.writeBytes(data);
// Create a channel with a really small buffer compared to the data. This means that on each
// call, the outbound data will not be fully written, so the write() method should return a
// dummy count to keep the channel alive when possible.
ByteArrayWritableChannel channel = new ByteArrayWritableChannel(32);
SaslEncryption.EncryptedMessage emsg =
new SaslEncryption.EncryptedMessage(backend, msg, 1024);
long count = emsg.transferTo(channel, emsg.transfered());
assertTrue(count < data.length);
assertTrue(count > 0);
// Here, the output buffer is full so nothing should be transferred.
assertEquals(0, emsg.transferTo(channel, emsg.transfered()));
// Now there's room in the buffer, but not enough to transfer all the remaining data,
// so the dummy count should be returned.
channel.reset();
assertEquals(1, emsg.transferTo(channel, emsg.transfered()));
// Eventually, the whole message should be transferred.
for (int i = 0; i < data.length / 32 - 2; i++) {
channel.reset();
assertEquals(1, emsg.transferTo(channel, emsg.transfered()));
}
channel.reset();
count = emsg.transferTo(channel, emsg.transfered());
assertTrue("Unexpected count: " + count, count > 1 && count < data.length);
assertEquals(data.length, emsg.transfered());
} finally {
msg.release();
}
}
@Test
public void testEncryptedMessageChunking() throws Exception {
File file = File.createTempFile("sasltest", ".txt");
try {
TransportConf conf = new TransportConf("shuffle", MapConfigProvider.EMPTY);
byte[] data = new byte[8 * 1024];
new Random().nextBytes(data);
Files.write(data, file);
SaslEncryptionBackend backend = mock(SaslEncryptionBackend.class);
// It doesn't really matter what we return here, as long as it's not null.
when(backend.wrap(any(byte[].class), anyInt(), anyInt())).thenReturn(data);
FileSegmentManagedBuffer msg = new FileSegmentManagedBuffer(conf, file, 0, file.length());
SaslEncryption.EncryptedMessage emsg =
new SaslEncryption.EncryptedMessage(backend, msg.convertToNetty(), data.length / 8);
ByteArrayWritableChannel channel = new ByteArrayWritableChannel(data.length);
while (emsg.transfered() < emsg.count()) {
channel.reset();
emsg.transferTo(channel, emsg.transfered());
}
verify(backend, times(8)).wrap(any(byte[].class), anyInt(), anyInt());
} finally {
file.delete();
}
}
@Test
public void testFileRegionEncryption() throws Exception {
Map<String, String> testConf = ImmutableMap.of(
"spark.network.sasl.maxEncryptedBlockSize", "1k");
AtomicReference<ManagedBuffer> response = new AtomicReference<>();
File file = File.createTempFile("sasltest", ".txt");
SaslTestCtx ctx = null;
try {
TransportConf conf = new TransportConf("shuffle", new MapConfigProvider(testConf));
StreamManager sm = mock(StreamManager.class);
when(sm.getChunk(anyLong(), anyInt())).thenAnswer(invocation ->
new FileSegmentManagedBuffer(conf, file, 0, file.length()));
RpcHandler rpcHandler = mock(RpcHandler.class);
when(rpcHandler.getStreamManager()).thenReturn(sm);
byte[] data = new byte[8 * 1024];
new Random().nextBytes(data);
Files.write(data, file);
ctx = new SaslTestCtx(rpcHandler, true, false, testConf);
CountDownLatch lock = new CountDownLatch(1);
ChunkReceivedCallback callback = mock(ChunkReceivedCallback.class);
doAnswer(invocation -> {
response.set((ManagedBuffer) invocation.getArguments()[1]);
response.get().retain();
lock.countDown();
return null;
}).when(callback).onSuccess(anyInt(), any(ManagedBuffer.class));
ctx.client.fetchChunk(0, 0, callback);
lock.await(10, TimeUnit.SECONDS);
verify(callback, times(1)).onSuccess(anyInt(), any(ManagedBuffer.class));
verify(callback, never()).onFailure(anyInt(), any(Throwable.class));
byte[] received = ByteStreams.toByteArray(response.get().createInputStream());
assertTrue(Arrays.equals(data, received));
} finally {
file.delete();
if (ctx != null) {
ctx.close();
}
if (response.get() != null) {
response.get().release();
}
}
}
@Test
public void testServerAlwaysEncrypt() throws Exception {
SaslTestCtx ctx = null;
try {
ctx = new SaslTestCtx(mock(RpcHandler.class), false, false,
ImmutableMap.of("spark.network.sasl.serverAlwaysEncrypt", "true"));
fail("Should have failed to connect without encryption.");
} catch (Exception e) {
assertTrue(e.getCause() instanceof SaslException);
} finally {
if (ctx != null) {
ctx.close();
}
}
}
@Test
public void testDataEncryptionIsActuallyEnabled() throws Exception {
// This test sets up an encrypted connection but then, using a client bootstrap, removes
// the encryption handler from the client side. This should cause the server to not be
// able to understand RPCs sent to it and thus close the connection.
SaslTestCtx ctx = null;
try {
ctx = new SaslTestCtx(mock(RpcHandler.class), true, true);
ctx.client.sendRpcSync(JavaUtils.stringToBytes("Ping"),
TimeUnit.SECONDS.toMillis(10));
fail("Should have failed to send RPC to server.");
} catch (Exception e) {
assertFalse(e.getCause() instanceof TimeoutException);
} finally {
if (ctx != null) {
ctx.close();
}
}
}
@Test
public void testRpcHandlerDelegate() throws Exception {
// Tests all delegates exception for receive(), which is more complicated and already handled
// by all other tests.
RpcHandler handler = mock(RpcHandler.class);
RpcHandler saslHandler = new SaslRpcHandler(null, null, handler, null);
saslHandler.getStreamManager();
verify(handler).getStreamManager();
saslHandler.channelInactive(null);
verify(handler).channelInactive(any(TransportClient.class));
saslHandler.exceptionCaught(null, null);
verify(handler).exceptionCaught(any(Throwable.class), any(TransportClient.class));
}
@Test
public void testDelegates() throws Exception {
Method[] rpcHandlerMethods = RpcHandler.class.getDeclaredMethods();
for (Method m : rpcHandlerMethods) {
SaslRpcHandler.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
}
}
private static class SaslTestCtx {
final TransportClient client;
final TransportServer server;
private final boolean encrypt;
private final boolean disableClientEncryption;
private final EncryptionCheckerBootstrap checker;
SaslTestCtx(
RpcHandler rpcHandler,
boolean encrypt,
boolean disableClientEncryption)
throws Exception {
this(rpcHandler, encrypt, disableClientEncryption, Collections.emptyMap());
}
SaslTestCtx(
RpcHandler rpcHandler,
boolean encrypt,
boolean disableClientEncryption,
Map<String, String> extraConf)
throws Exception {
Map<String, String> testConf = ImmutableMap.<String, String>builder()
.putAll(extraConf)
.put("spark.authenticate.enableSaslEncryption", String.valueOf(encrypt))
.build();
TransportConf conf = new TransportConf("shuffle", new MapConfigProvider(testConf));
SecretKeyHolder keyHolder = mock(SecretKeyHolder.class);
when(keyHolder.getSaslUser(anyString())).thenReturn("user");
when(keyHolder.getSecretKey(anyString())).thenReturn("secret");
TransportContext ctx = new TransportContext(conf, rpcHandler);
this.checker = new EncryptionCheckerBootstrap(SaslEncryption.ENCRYPTION_HANDLER_NAME);
this.server = ctx.createServer(Arrays.asList(new SaslServerBootstrap(conf, keyHolder),
checker));
try {
List<TransportClientBootstrap> clientBootstraps = new ArrayList<>();
clientBootstraps.add(new SaslClientBootstrap(conf, "user", keyHolder));
if (disableClientEncryption) {
clientBootstraps.add(new EncryptionDisablerBootstrap());
}
this.client = ctx.createClientFactory(clientBootstraps)
.createClient(TestUtils.getLocalHost(), server.getPort());
} catch (Exception e) {
close();
throw e;
}
this.encrypt = encrypt;
this.disableClientEncryption = disableClientEncryption;
}
void close() {
if (!disableClientEncryption) {
assertEquals(encrypt, checker.foundEncryptionHandler);
}
if (client != null) {
client.close();
}
if (server != null) {
server.close();
}
}
}
private static class EncryptionCheckerBootstrap extends ChannelOutboundHandlerAdapter
implements TransportServerBootstrap {
boolean foundEncryptionHandler;
String encryptHandlerName;
EncryptionCheckerBootstrap(String encryptHandlerName) {
this.encryptHandlerName = encryptHandlerName;
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
throws Exception {
if (!foundEncryptionHandler) {
foundEncryptionHandler =
ctx.channel().pipeline().get(encryptHandlerName) != null;
}
ctx.write(msg, promise);
}
@Override
public RpcHandler doBootstrap(Channel channel, RpcHandler rpcHandler) {
channel.pipeline().addFirst("encryptionChecker", this);
return rpcHandler;
}
}
private static class EncryptionDisablerBootstrap implements TransportClientBootstrap {
@Override
public void doBootstrap(TransportClient client, Channel channel) {
channel.pipeline().remove(SaslEncryption.ENCRYPTION_HANDLER_NAME);
}
}
}
| 9,798 |
0 | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network | Create_ds/spark/common/network-common/src/test/java/org/apache/spark/network/server/OneForOneStreamManagerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.server;
import java.util.ArrayList;
import java.util.List;
import io.netty.channel.Channel;
import org.junit.Test;
import org.mockito.Mockito;
import org.apache.spark.network.TestManagedBuffer;
import org.apache.spark.network.buffer.ManagedBuffer;
public class OneForOneStreamManagerSuite {
@Test
public void managedBuffersAreFeedWhenConnectionIsClosed() throws Exception {
OneForOneStreamManager manager = new OneForOneStreamManager();
List<ManagedBuffer> buffers = new ArrayList<>();
TestManagedBuffer buffer1 = Mockito.spy(new TestManagedBuffer(10));
TestManagedBuffer buffer2 = Mockito.spy(new TestManagedBuffer(20));
buffers.add(buffer1);
buffers.add(buffer2);
Channel dummyChannel = Mockito.mock(Channel.class, Mockito.RETURNS_SMART_NULLS);
manager.registerStream("appId", buffers.iterator(), dummyChannel);
assert manager.numStreamStates() == 1;
manager.connectionTerminated(dummyChannel);
Mockito.verify(buffer1, Mockito.times(1)).release();
Mockito.verify(buffer2, Mockito.times(1)).release();
assert manager.numStreamStates() == 0;
}
}
| 9,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.