repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
santhoshkumarvs/spark
|
mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import scala.collection.JavaConverters._
import scala.util.Random
import org.dmg.pmml.{OpType, PMML}
import org.dmg.pmml.regression.{RegressionModel => PMMLRegressionModel}
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{DenseVector, Vector, Vectors}
import org.apache.spark.ml.param.{ParamMap, ParamsSuite}
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.LinearDataGenerator
import org.apache.spark.sql.{DataFrame, Row}
class LinearRegressionSuite extends MLTest with DefaultReadWriteTest with PMMLReadWriteTest {
import testImplicits._
private val seed: Int = 42
@transient var datasetWithDenseFeature: DataFrame = _
@transient var datasetWithStrongNoise: DataFrame = _
@transient var datasetWithDenseFeatureWithoutIntercept: DataFrame = _
@transient var datasetWithSparseFeature: DataFrame = _
@transient var datasetWithWeight: DataFrame = _
@transient var datasetWithWeightConstantLabel: DataFrame = _
@transient var datasetWithWeightZeroLabel: DataFrame = _
@transient var datasetWithOutlier: DataFrame = _
override def beforeAll(): Unit = {
super.beforeAll()
datasetWithDenseFeature = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, eps = 0.1), 2).map(_.asML).toDF()
datasetWithStrongNoise = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 100, seed, eps = 5.0), 2).map(_.asML).toDF()
/*
datasetWithDenseFeatureWithoutIntercept is not needed for correctness testing
but is useful for illustrating training model without intercept
*/
datasetWithDenseFeatureWithoutIntercept = sc.parallelize(
LinearDataGenerator.generateLinearInput(
intercept = 0.0, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, eps = 0.1), 2).map(_.asML).toDF()
val r = new Random(seed)
// When feature size is larger than 4096, normal optimizer is chosen
// as the solver of linear regression in the case of "auto" mode.
val featureSize = 4100
datasetWithSparseFeature = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 0.0, weights = Seq.fill(featureSize)(r.nextDouble()).toArray,
xMean = Seq.fill(featureSize)(r.nextDouble()).toArray,
xVariance = Seq.fill(featureSize)(r.nextDouble()).toArray, nPoints = 200,
seed, eps = 0.1, sparsity = 0.7), 2).map(_.asML).toDF()
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(17, 19, 23, 29)
w <- c(1, 2, 3, 4)
df <- as.data.frame(cbind(A, b))
*/
datasetWithWeight = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b.const <- c(17, 17, 17, 17)
w <- c(1, 2, 3, 4)
df.const.label <- as.data.frame(cbind(A, b.const))
*/
datasetWithWeightConstantLabel = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(17.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(17.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(17.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
datasetWithWeightZeroLabel = sc.parallelize(Seq(
Instance(0.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(0.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(0.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(0.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
datasetWithOutlier = {
val inlierData = LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 900, seed, eps = 0.1)
val outlierData = LinearDataGenerator.generateLinearInput(
intercept = -2.1, weights = Array(0.6, -1.2), xMean = Array(0.9, -1.3),
xVariance = Array(1.5, 0.8), nPoints = 100, seed, eps = 0.1)
sc.parallelize(inlierData ++ outlierData, 2).map(_.asML).toDF()
}
}
/**
* Enable the ignored test to export the dataset into CSV format,
* so we can validate the training accuracy compared with R's glmnet package.
*/
ignore("export test data into CSV format") {
datasetWithDenseFeature.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithDenseFeature")
datasetWithDenseFeatureWithoutIntercept.rdd.map {
case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/LinearRegressionSuite/datasetWithDenseFeatureWithoutIntercept")
datasetWithSparseFeature.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithSparseFeature")
datasetWithOutlier.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithOutlier")
}
test("params") {
ParamsSuite.checkParams(new LinearRegression)
val model = new LinearRegressionModel("linearReg", Vectors.dense(0.0), 0.0)
ParamsSuite.checkParams(model)
}
test("linear regression: default params") {
val lir = new LinearRegression
assert(lir.getLabelCol === "label")
assert(lir.getFeaturesCol === "features")
assert(lir.getPredictionCol === "prediction")
assert(lir.getRegParam === 0.0)
assert(lir.getElasticNetParam === 0.0)
assert(lir.getFitIntercept)
assert(lir.getStandardization)
assert(lir.getSolver === "auto")
assert(lir.getLoss === "squaredError")
assert(lir.getEpsilon === 1.35)
val model = lir.fit(datasetWithDenseFeature)
MLTestingUtils.checkCopyAndUids(lir, model)
assert(model.hasSummary)
val copiedModel = model.copy(ParamMap.empty)
assert(copiedModel.hasSummary)
model.setSummary(None)
assert(!model.hasSummary)
model.transform(datasetWithDenseFeature)
.select("label", "prediction")
.collect()
assert(model.getFeaturesCol === "features")
assert(model.getPredictionCol === "prediction")
assert(model.intercept !== 0.0)
assert(model.scale === 1.0)
assert(model.hasParent)
val numFeatures = datasetWithDenseFeature.select("features").first().getAs[Vector](0).size
assert(model.numFeatures === numFeatures)
}
test("linear regression: illegal params") {
withClue("LinearRegression with huber loss only supports L2 regularization") {
intercept[IllegalArgumentException] {
new LinearRegression().setLoss("huber").setElasticNetParam(0.5)
.fit(datasetWithDenseFeature)
}
}
withClue("LinearRegression with huber loss doesn't support normal solver") {
intercept[IllegalArgumentException] {
new LinearRegression().setLoss("huber").setSolver("normal").fit(datasetWithDenseFeature)
}
}
}
test("linear regression handles singular matrices") {
// check for both constant columns with intercept (zero std) and collinear
val singularDataConstantColumn = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(1.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(1.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(1.0, 13.0))
), 2).toDF()
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setFitIntercept(true)
val model = trainer.fit(singularDataConstantColumn)
// to make it clear that WLS did not solve analytically
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
assert(model.summary.objectiveHistory !== Array(0.0))
}
val singularDataCollinearFeatures = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(10.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(14.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(22.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(26.0, 13.0))
), 2).toDF()
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setFitIntercept(true)
val model = trainer.fit(singularDataCollinearFeatures)
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
assert(model.summary.objectiveHistory !== Array(0.0))
}
}
test("linear regression with intercept without regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = new LinearRegression().setSolver(solver)
// The result should be the same regardless of standardization without regularization
val trainer2 = (new LinearRegression).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE, stringsAsFactors=FALSE)
features <- as.matrix(data.frame(as.numeric(data$V2), as.numeric(data$V3)))
label <- as.numeric(data$V1)
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0, lambda = 0))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.298698
as.numeric.data.V2. 4.700706
as.numeric.data.V3. 7.199082
*/
val interceptR = 6.298698
val coefficientsR = Vectors.dense(4.700706, 7.199082)
assert(model1.intercept ~== interceptR relTol 1E-3)
assert(model1.coefficients ~= coefficientsR relTol 1E-3)
assert(model2.intercept ~== interceptR relTol 1E-3)
assert(model2.coefficients ~= coefficientsR relTol 1E-3)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept without regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setFitIntercept(false).setSolver(solver)
// Without regularization the results should be the same
val trainer2 = (new LinearRegression).setFitIntercept(false).setStandardization(false)
.setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val modelWithoutIntercept1 = trainer1.fit(datasetWithDenseFeatureWithoutIntercept)
val model2 = trainer2.fit(datasetWithDenseFeature)
val modelWithoutIntercept2 = trainer2.fit(datasetWithDenseFeatureWithoutIntercept)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0, lambda = 0,
intercept = FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.973403
as.numeric.data.V3. 5.284370
*/
val coefficientsR = Vectors.dense(6.973403, 5.284370)
assert(model1.intercept ~== 0 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR relTol 1E-2)
assert(model2.intercept ~== 0 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR relTol 1E-2)
/*
Then again with the data with no intercept:
> coefficientsWithoutIntercept
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data3.V2. 4.70011
as.numeric.data3.V3. 7.19943
*/
val coefficientsWithoutInterceptR = Vectors.dense(4.70011, 7.19943)
assert(modelWithoutIntercept1.intercept ~== 0 absTol 1E-3)
assert(modelWithoutIntercept1.coefficients ~= coefficientsWithoutInterceptR relTol 1E-3)
assert(modelWithoutIntercept2.intercept ~== 0 absTol 1E-3)
assert(modelWithoutIntercept2.coefficients ~= coefficientsWithoutInterceptR relTol 1E-3)
}
}
test("linear regression with intercept with L1 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setSolver(solver).setStandardization(false)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian",
alpha = 1.0, lambda = 0.57 ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.242284
as.numeric.d1.V2. 4.019605
as.numeric.d1.V3. 6.679538
*/
val interceptR1 = 6.242284
val coefficientsR1 = Vectors.dense(4.019605, 6.679538)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.416948
as.numeric.data.V2. 3.893869
as.numeric.data.V3. 6.724286
*/
val interceptR2 = 6.416948
val coefficientsR2 = Vectors.dense(3.893869, 6.724286)
assert(model2.intercept ~== interceptR2 relTol 1E-3)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-3)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with L1 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, intercept=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.272927
as.numeric.data.V3. 4.782604
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(6.272927, 4.782604)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, intercept=FALSE, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.207817
as.numeric.data.V3. 4.775780
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(6.207817, 4.775780)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression with intercept with L2 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.260103
as.numeric.d1.V2. 3.725522
as.numeric.d1.V3. 5.711203
*/
val interceptR1 = 5.260103
val coefficientsR1 = Vectors.dense(3.725522, 5.711203)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.790885
as.numeric.d1.V2. 3.432373
as.numeric.d1.V3. 5.919196
*/
val interceptR2 = 5.790885
val coefficientsR2 = Vectors.dense(3.432373, 5.919196)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with L2 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
intercept = FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.493430
as.numeric.d1.V3. 4.223082
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(5.493430, 4.223082)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
intercept = FALSE, standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.244324
as.numeric.d1.V3. 4.203106
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(5.244324, 4.203106)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression with intercept with ElasticNet regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6 ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.689855
as.numeric.d1.V2. 3.661181
as.numeric.d1.V3. 6.000274
*/
val interceptR1 = 5.689855
val coefficientsR1 = Vectors.dense(3.661181, 6.000274)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3, lambda = 1.6
standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.113890
as.numeric.d1.V2. 3.407021
as.numeric.d1.V3. 6.152512
*/
val interceptR2 = 6.113890
val coefficientsR2 = Vectors.dense(3.407021, 6.152512)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with ElasticNet regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6, intercept=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.643748
as.numeric.d1.V3. 4.331519
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(5.643748, 4.331519)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6, intercept=FALSE, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.455902
as.numeric.d1.V3. 4.312266
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(5.455902, 4.312266)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("prediction on single instance") {
val trainer = new LinearRegression
val model = trainer.fit(datasetWithDenseFeature)
testPredictionModelSinglePrediction(model, datasetWithDenseFeature)
}
test("linear regression model with constant label") {
/*
R code:
for (formula in c(b.const ~ . -1, b.const ~ .)) {
model <- lm(formula, data=df.const.label, weights=w)
print(as.vector(coef(model)))
}
[1] -9.221298 3.394343
[1] 17 0 0
*/
val expected = Seq(
Vectors.dense(0.0, -9.221298, 3.394343),
Vectors.dense(17.0, 0.0, 0.0))
Seq("auto", "l-bfgs", "normal").foreach { solver =>
var idx = 0
for (fitIntercept <- Seq(false, true)) {
val model1 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setPredictionCol("myPrediction")
.setSolver(solver)
.fit(datasetWithWeightConstantLabel)
val actual1 = Vectors.dense(model1.intercept, model1.coefficients(0),
model1.coefficients(1))
assert(actual1 ~== expected(idx) absTol 1e-4)
// Schema of summary.predictions should be a superset of the input dataset
assert((datasetWithWeightConstantLabel.schema.fieldNames.toSet + model1.getPredictionCol)
.subsetOf(model1.summary.predictions.schema.fieldNames.toSet))
val model2 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setPredictionCol("myPrediction")
.setSolver(solver)
.fit(datasetWithWeightZeroLabel)
val actual2 = Vectors.dense(model2.intercept, model2.coefficients(0),
model2.coefficients(1))
assert(actual2 ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1e-4)
// Schema of summary.predictions should be a superset of the input dataset
assert((datasetWithWeightZeroLabel.schema.fieldNames.toSet + model2.getPredictionCol)
.subsetOf(model2.summary.predictions.schema.fieldNames.toSet))
idx += 1
}
}
}
test("regularized linear regression through origin with constant label") {
// The problem is ill-defined if fitIntercept=false, regParam is non-zero.
// An exception is thrown in this case.
Seq("auto", "l-bfgs", "normal").foreach { solver =>
for (standardization <- Seq(false, true)) {
val model = new LinearRegression().setFitIntercept(false)
.setRegParam(0.1).setStandardization(standardization).setSolver(solver)
intercept[IllegalArgumentException] {
model.fit(datasetWithWeightConstantLabel)
}
}
}
}
test("linear regression with l-bfgs when training is not needed") {
// When label is constant, l-bfgs solver returns results without training.
// There are two possibilities: If the label is non-zero but constant,
// and fitIntercept is true, then the model return yMean as intercept without training.
// If label is all zeros, then all coefficients are zero regardless of fitIntercept, so
// no training is needed.
for (fitIntercept <- Seq(false, true)) {
for (standardization <- Seq(false, true)) {
val model1 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setWeightCol("weight")
.setSolver("l-bfgs")
.fit(datasetWithWeightConstantLabel)
if (fitIntercept) {
assert(model1.summary.objectiveHistory(0) ~== 0.0 absTol 1e-4)
}
val model2 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setSolver("l-bfgs")
.fit(datasetWithWeightZeroLabel)
assert(model2.summary.objectiveHistory(0) ~== 0.0 absTol 1e-4)
}
}
}
test("linear regression model training summary") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setPredictionCol("myPrediction")
val model = trainer.fit(datasetWithDenseFeature)
val trainerNoPredictionCol = trainer.setPredictionCol("")
val modelNoPredictionCol = trainerNoPredictionCol.fit(datasetWithDenseFeature)
// Training results for the model should be available
assert(model.hasSummary)
assert(modelNoPredictionCol.hasSummary)
// Schema should be a superset of the input dataset
assert((datasetWithDenseFeature.schema.fieldNames.toSet + model.getPredictionCol).subsetOf(
model.summary.predictions.schema.fieldNames.toSet))
// Validate that we re-insert a prediction column for evaluation
val modelNoPredictionColFieldNames
= modelNoPredictionCol.summary.predictions.schema.fieldNames
assert(datasetWithDenseFeature.schema.fieldNames.toSet.subsetOf(
modelNoPredictionColFieldNames.toSet))
assert(modelNoPredictionColFieldNames.exists(s => s.startsWith("prediction_")))
// Residuals in [[LinearRegressionResults]] should equal those manually computed
datasetWithDenseFeature.select("features", "label")
.rdd
.map { case Row(features: DenseVector, label: Double) =>
val prediction =
features(0) * model.coefficients(0) + features(1) * model.coefficients(1) +
model.intercept
label - prediction
}
.zip(model.summary.residuals.rdd.map(_.getDouble(0)))
.collect()
.foreach { case (manualResidual: Double, resultResidual: Double) =>
assert(manualResidual ~== resultResidual relTol 1E-5)
}
/*
# Use the following R code to generate model training results.
# path/part-00000 is the file generated by running LinearDataGenerator.generateLinearInput
# as described before the beforeAll() method.
d1 <- read.csv("path/part-00000", header=FALSE, stringsAsFactors=FALSE)
fit <- glm(V1 ~ V2 + V3, data = d1, family = "gaussian")
names(f1)[1] = c("V2")
names(f1)[2] = c("V3")
f1 <- data.frame(as.numeric(d1$V2), as.numeric(d1$V3))
predictions <- predict(fit, newdata=f1)
l1 <- as.numeric(d1$V1)
residuals <- l1 - predictions
> mean(residuals^2) # MSE
[1] 0.00985449
> mean(abs(residuals)) # MAD
[1] 0.07961668
> cor(predictions, l1)^2 # r^2
[1] 0.9998737
> summary(fit)
Call:
glm(formula = V1 ~ V2 + V3, family = "gaussian", data = d1)
Deviance Residuals:
Min 1Q Median 3Q Max
-0.47082 -0.06797 0.00002 0.06725 0.34635
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 6.3022157 0.0018600 3388 <2e-16 ***
V2 4.6982442 0.0011805 3980 <2e-16 ***
V3 7.1994344 0.0009044 7961 <2e-16 ***
# R code for r2adj
lm_fit <- lm(V1 ~ V2 + V3, data = d1)
summary(lm_fit)$adj.r.squared
[1] 0.9998736
---
....
*/
assert(model.summary.meanSquaredError ~== 0.00985449 relTol 1E-4)
assert(model.summary.meanAbsoluteError ~== 0.07961668 relTol 1E-4)
assert(model.summary.r2 ~== 0.9998737 relTol 1E-4)
assert(model.summary.r2adj ~== 0.9998736 relTol 1E-4)
// Normal solver uses "WeightedLeastSquares". If no regularization is applied or only L2
// regularization is applied, this algorithm uses a direct solver and does not generate an
// objective history because it does not run through iterations.
if (solver == "l-bfgs") {
// Objective function should be monotonically decreasing for linear regression
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
} else {
// To clarify that the normal solver is used here.
assert(model.summary.objectiveHistory.length == 1)
assert(model.summary.objectiveHistory(0) == 0.0)
val devianceResidualsR = Array(-0.47082, 0.34635)
val seCoefR = Array(0.0011805, 0.0009044, 0.0018600)
val tValsR = Array(3980, 7961, 3388)
val pValsR = Array(0, 0, 0)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-4) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-4) }
model.summary.tValues.map(_.round).zip(tValsR).foreach{ x => assert(x._1 === x._2) }
model.summary.pValues.map(_.round).zip(pValsR).foreach{ x => assert(x._1 === x._2) }
}
}
}
test("linear regression model testset evaluation summary") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver)
val model = trainer.fit(datasetWithDenseFeature)
// Evaluating on training dataset should yield results summary equal to training summary
val testSummary = model.evaluate(datasetWithDenseFeature)
assert(model.summary.meanSquaredError ~== testSummary.meanSquaredError relTol 1E-5)
assert(model.summary.r2 ~== testSummary.r2 relTol 1E-5)
model.summary.residuals.select("residuals").collect()
.zip(testSummary.residuals.select("residuals").collect())
.forall { case (Row(r1: Double), Row(r2: Double)) => r1 ~== r2 relTol 1E-5 }
}
}
test("linear regression with weighted samples") {
val sqlContext = spark.sqlContext
import sqlContext.implicits._
val numClasses = 0
def modelEquals(m1: LinearRegressionModel, m2: LinearRegressionModel): Unit = {
assert(m1.coefficients ~== m2.coefficients relTol 0.01)
assert(m1.intercept ~== m2.intercept relTol 0.01)
}
val testParams = Seq(
// (elasticNetParam, regParam, fitIntercept, standardization)
(0.0, 0.21, true, true),
(0.0, 0.21, true, false),
(0.0, 0.21, false, false),
(1.0, 0.21, true, true)
)
// For squaredError loss
for (solver <- Seq("auto", "l-bfgs", "normal");
(elasticNetParam, regParam, fitIntercept, standardization) <- testParams) {
val estimator = new LinearRegression()
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setRegParam(regParam)
.setElasticNetParam(elasticNetParam)
.setSolver(solver)
MLTestingUtils.testArbitrarilyScaledWeights[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, modelEquals)
MLTestingUtils.testOutliersWithSmallWeights[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, numClasses, modelEquals,
outlierRatio = 3)
MLTestingUtils.testOversamplingVsWeighting[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, modelEquals, seed)
}
// For huber loss
for ((_, regParam, fitIntercept, standardization) <- testParams) {
val estimator = new LinearRegression()
.setLoss("huber")
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setRegParam(regParam)
MLTestingUtils.testArbitrarilyScaledWeights[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, modelEquals)
MLTestingUtils.testOutliersWithSmallWeights[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, numClasses, modelEquals,
outlierRatio = 3)
MLTestingUtils.testOversamplingVsWeighting[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, modelEquals, seed)
}
}
test("linear regression model with l-bfgs with big feature datasets") {
val trainer = new LinearRegression().setSolver("auto")
val model = trainer.fit(datasetWithSparseFeature)
// Training results for the model should be available
assert(model.hasSummary)
// When LBFGS is used as optimizer, objective history can be restored.
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("linear regression summary with weighted samples and intercept by normal solver") {
/*
R code:
model <- glm(formula = "b ~ .", data = df, weights = w)
summary(model)
Call:
glm(formula = "b ~ .", data = df, weights = w)
Deviance Residuals:
1 2 3 4
1.920 -1.358 -1.109 0.960
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 18.080 9.608 1.882 0.311
V1 6.080 5.556 1.094 0.471
V2 -0.600 1.960 -0.306 0.811
(Dispersion parameter for gaussian family taken to be 7.68)
Null deviance: 202.00 on 3 degrees of freedom
Residual deviance: 7.68 on 1 degrees of freedom
AIC: 18.783
Number of Fisher Scoring iterations: 2
*/
val model = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(6.080, -0.600))
val interceptR = 18.080
val devianceResidualsR = Array(-1.358, 1.920)
val seCoefR = Array(5.556, 1.960, 9.608)
val tValsR = Array(1.094, -0.306, 1.882)
val pValsR = Array(0.471, 0.811, 0.311)
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
val modelWithL1 = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.setRegParam(0.5)
.setElasticNetParam(1.0)
.fit(datasetWithWeight)
assert(modelWithL1.summary.objectiveHistory !== Array(0.0))
assert(
modelWithL1.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("linear regression summary with weighted samples and w/o intercept by normal solver") {
/*
R code:
model <- glm(formula = "b ~ . -1", data = df, weights = w)
summary(model)
Call:
glm(formula = "b ~ . -1", data = df, weights = w)
Deviance Residuals:
1 2 3 4
1.950 2.344 -4.600 2.103
Coefficients:
Estimate Std. Error t value Pr(>|t|)
V1 -3.7271 2.9032 -1.284 0.3279
V2 3.0100 0.6022 4.998 0.0378 *
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
(Dispersion parameter for gaussian family taken to be 17.4376)
Null deviance: 5962.000 on 4 degrees of freedom
Residual deviance: 34.875 on 2 degrees of freedom
AIC: 22.835
Number of Fisher Scoring iterations: 2
*/
val model = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.setFitIntercept(false)
.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(-3.7271, 3.0100))
val interceptR = 0.0
val devianceResidualsR = Array(-4.600, 2.344)
val seCoefR = Array(2.9032, 0.6022)
val tValsR = Array(-1.284, 4.998)
val pValsR = Array(0.3279, 0.0378)
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept === interceptR)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
}
test("read/write") {
def checkModelData(model: LinearRegressionModel, model2: LinearRegressionModel): Unit = {
assert(model.intercept === model2.intercept)
assert(model.coefficients === model2.coefficients)
}
val lr = new LinearRegression()
testEstimatorAndModelReadWrite(lr, datasetWithWeight, LinearRegressionSuite.allParamSettings,
LinearRegressionSuite.allParamSettings, checkModelData)
}
test("pmml export") {
val lr = new LinearRegression()
val model = lr.fit(datasetWithWeight)
def checkModel(pmml: PMML): Unit = {
val dd = pmml.getDataDictionary
assert(dd.getNumberOfFields === 3)
val fields = dd.getDataFields.asScala
assert(fields(0).getName().toString === "field_0")
assert(fields(0).getOpType() == OpType.CONTINUOUS)
val pmmlRegressionModel = pmml.getModels().get(0).asInstanceOf[PMMLRegressionModel]
val pmmlPredictors = pmmlRegressionModel.getRegressionTables.get(0).getNumericPredictors
val pmmlWeights = pmmlPredictors.asScala.map(_.getCoefficient()).toList
assert(pmmlWeights(0) ~== model.coefficients(0) relTol 1E-3)
assert(pmmlWeights(1) ~== model.coefficients(1) relTol 1E-3)
}
testPMMLWrite(sc, model, checkModel)
}
test("should support all NumericType labels and weights, and not support other types") {
for (solver <- Seq("auto", "l-bfgs", "normal")) {
val lr = new LinearRegression().setMaxIter(1).setSolver(solver)
MLTestingUtils.checkNumericTypes[LinearRegressionModel, LinearRegression](
lr, spark, isClassification = false) { (expected, actual) =>
assert(expected.intercept === actual.intercept)
assert(expected.coefficients === actual.coefficients)
}
}
}
test("linear regression (huber loss) with intercept without regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Using the following Python code to load the data and train the model using
scikit-learn package.
import pandas as pd
import numpy as np
from sklearn.linear_model import HuberRegressor
df = pd.read_csv("path", header = None)
X = df[df.columns[1:3]]
y = np.array(df[df.columns[0]])
huber = HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 4.68998007, 7.19429011])
>>> huber.intercept_
6.3002404351083037
>>> huber.scale_
0.077810159205220747
*/
val coefficientsPy = Vectors.dense(4.68998007, 7.19429011)
val interceptPy = 6.30024044
val scalePy = 0.07781016
assert(model1.coefficients ~= coefficientsPy relTol 1E-3)
assert(model1.intercept ~== interceptPy relTol 1E-3)
assert(model1.scale ~== scalePy relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.coefficients ~= coefficientsPy relTol 1E-3)
assert(model2.intercept ~== interceptPy relTol 1E-3)
assert(model2.scale ~== scalePy relTol 1E-3)
}
test("linear regression (huber loss) without intercept without regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 6.71756703, 5.08873222])
>>> huber.intercept_
0.0
>>> huber.scale_
2.5560209922722317
*/
val coefficientsPy = Vectors.dense(6.71756703, 5.08873222)
val interceptPy = 0.0
val scalePy = 2.55602099
assert(model1.coefficients ~= coefficientsPy relTol 1E-3)
assert(model1.intercept === interceptPy)
assert(model1.scale ~== scalePy relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.coefficients ~= coefficientsPy relTol 1E-3)
assert(model2.intercept === interceptPy)
assert(model2.scale ~== scalePy relTol 1E-3)
}
test("linear regression (huber loss) with intercept with L2 regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setRegParam(0.21).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setRegParam(0.21).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Since scikit-learn HuberRegressor does not support standardization,
we do it manually out of the estimator.
xStd = np.std(X, axis=0)
scaledX = X / xStd
huber = HuberRegressor(fit_intercept=True, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(scaledX, y)
>>> np.array(huber.coef_ / xStd)
array([ 1.97732633, 3.38816722])
>>> huber.intercept_
3.7527581430531227
>>> huber.scale_
3.787363673371801
*/
val coefficientsPy1 = Vectors.dense(1.97732633, 3.38816722)
val interceptPy1 = 3.75275814
val scalePy1 = 3.78736367
assert(model1.coefficients ~= coefficientsPy1 relTol 1E-2)
assert(model1.intercept ~== interceptPy1 relTol 1E-2)
assert(model1.scale ~== scalePy1 relTol 1E-2)
/*
huber = HuberRegressor(fit_intercept=True, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 1.73346444, 3.63746999])
>>> huber.intercept_
4.3017134790781739
>>> huber.scale_
3.6472742809286793
*/
val coefficientsPy2 = Vectors.dense(1.73346444, 3.63746999)
val interceptPy2 = 4.30171347
val scalePy2 = 3.64727428
assert(model2.coefficients ~= coefficientsPy2 relTol 1E-3)
assert(model2.intercept ~== interceptPy2 relTol 1E-3)
assert(model2.scale ~== scalePy2 relTol 1E-3)
}
test("linear regression (huber loss) without intercept with L2 regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setRegParam(0.21).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setRegParam(0.21).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Since scikit-learn HuberRegressor does not support standardization,
we do it manually out of the estimator.
xStd = np.std(X, axis=0)
scaledX = X / xStd
huber = HuberRegressor(fit_intercept=False, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(scaledX, y)
>>> np.array(huber.coef_ / xStd)
array([ 2.59679008, 2.26973102])
>>> huber.intercept_
0.0
>>> huber.scale_
4.5766311924091791
*/
val coefficientsPy1 = Vectors.dense(2.59679008, 2.26973102)
val interceptPy1 = 0.0
val scalePy1 = 4.57663119
assert(model1.coefficients ~= coefficientsPy1 relTol 1E-2)
assert(model1.intercept === interceptPy1)
assert(model1.scale ~== scalePy1 relTol 1E-2)
/*
huber = HuberRegressor(fit_intercept=False, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 2.28423908, 2.25196887])
>>> huber.intercept_
0.0
>>> huber.scale_
4.5979643506051753
*/
val coefficientsPy2 = Vectors.dense(2.28423908, 2.25196887)
val interceptPy2 = 0.0
val scalePy2 = 4.59796435
assert(model2.coefficients ~= coefficientsPy2 relTol 1E-3)
assert(model2.intercept === interceptPy2)
assert(model2.scale ~== scalePy2 relTol 1E-3)
}
test("huber loss model match squared error for large epsilon") {
val trainer1 = new LinearRegression().setLoss("huber").setEpsilon(1E5)
val model1 = trainer1.fit(datasetWithOutlier)
val trainer2 = new LinearRegression()
val model2 = trainer2.fit(datasetWithOutlier)
assert(model1.coefficients ~== model2.coefficients relTol 1E-3)
assert(model1.intercept ~== model2.intercept relTol 1E-3)
}
}
object LinearRegressionSuite {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = Map(
"predictionCol" -> "myPrediction",
"regParam" -> 0.01,
"elasticNetParam" -> 0.1,
"maxIter" -> 2, // intentionally small
"fitIntercept" -> true,
"tol" -> 0.8,
"standardization" -> false,
"solver" -> "l-bfgs"
)
}
|
santhoshkumarvs/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileDataSourceV2.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.sources.v2.TableProvider
import org.apache.spark.sql.util.CaseInsensitiveStringMap
/**
* A base interface for data source v2 implementations of the built-in file-based data sources.
*/
trait FileDataSourceV2 extends TableProvider with DataSourceRegister {
/**
* Returns a V1 [[FileFormat]] class of the same file data source.
* This is a solution for the following cases:
* 1. File datasource V2 implementations cause regression. Users can disable the problematic data
* source via SQL configuration and fall back to FileFormat.
* 2. Catalog support is required, which is still under development for data source V2.
*/
def fallbackFileFormat: Class[_ <: FileFormat]
lazy val sparkSession = SparkSession.active
protected def getPaths(map: CaseInsensitiveStringMap): Seq[String] = {
val objectMapper = new ObjectMapper()
Option(map.get("paths")).map { pathStr =>
objectMapper.readValue(pathStr, classOf[Array[String]]).toSeq
}.getOrElse {
Option(map.get("path")).toSeq
}
}
protected def getTableName(paths: Seq[String]): String = {
shortName() + ":" + paths.mkString(";")
}
}
|
santhoshkumarvs/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/arrow/ArrowConverters.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.arrow
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, FileInputStream, OutputStream}
import java.nio.channels.{Channels, ReadableByteChannel}
import scala.collection.JavaConverters._
import org.apache.arrow.flatbuf.MessageHeader
import org.apache.arrow.memory.BufferAllocator
import org.apache.arrow.vector._
import org.apache.arrow.vector.ipc.{ArrowStreamWriter, ReadChannel, WriteChannel}
import org.apache.arrow.vector.ipc.message.{ArrowRecordBatch, MessageSerializer}
import org.apache.spark.TaskContext
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.{ArrowColumnVector, ColumnarBatch, ColumnVector}
import org.apache.spark.util.{ByteBufferOutputStream, Utils}
/**
* Writes serialized ArrowRecordBatches to a DataOutputStream in the Arrow stream format.
*/
private[sql] class ArrowBatchStreamWriter(
schema: StructType,
out: OutputStream,
timeZoneId: String) {
val arrowSchema = ArrowUtils.toArrowSchema(schema, timeZoneId)
val writeChannel = new WriteChannel(Channels.newChannel(out))
// Write the Arrow schema first, before batches
MessageSerializer.serialize(writeChannel, arrowSchema)
/**
* Consume iterator to write each serialized ArrowRecordBatch to the stream.
*/
def writeBatches(arrowBatchIter: Iterator[Array[Byte]]): Unit = {
arrowBatchIter.foreach(writeChannel.write)
}
/**
* End the Arrow stream, does not close output stream.
*/
def end(): Unit = {
ArrowStreamWriter.writeEndOfStream(writeChannel)
}
}
private[sql] object ArrowConverters {
/**
* Maps Iterator from InternalRow to serialized ArrowRecordBatches. Limit ArrowRecordBatch size
* in a batch by setting maxRecordsPerBatch or use 0 to fully consume rowIter.
*/
private[sql] def toBatchIterator(
rowIter: Iterator[InternalRow],
schema: StructType,
maxRecordsPerBatch: Int,
timeZoneId: String,
context: TaskContext): Iterator[Array[Byte]] = {
val arrowSchema = ArrowUtils.toArrowSchema(schema, timeZoneId)
val allocator =
ArrowUtils.rootAllocator.newChildAllocator("toBatchIterator", 0, Long.MaxValue)
val root = VectorSchemaRoot.create(arrowSchema, allocator)
val unloader = new VectorUnloader(root)
val arrowWriter = ArrowWriter.create(root)
context.addTaskCompletionListener[Unit] { _ =>
root.close()
allocator.close()
}
new Iterator[Array[Byte]] {
override def hasNext: Boolean = rowIter.hasNext || {
root.close()
allocator.close()
false
}
override def next(): Array[Byte] = {
val out = new ByteArrayOutputStream()
val writeChannel = new WriteChannel(Channels.newChannel(out))
Utils.tryWithSafeFinally {
var rowCount = 0
while (rowIter.hasNext && (maxRecordsPerBatch <= 0 || rowCount < maxRecordsPerBatch)) {
val row = rowIter.next()
arrowWriter.write(row)
rowCount += 1
}
arrowWriter.finish()
val batch = unloader.getRecordBatch()
MessageSerializer.serialize(writeChannel, batch)
batch.close()
} {
arrowWriter.reset()
}
out.toByteArray
}
}
}
/**
* Maps iterator from serialized ArrowRecordBatches to InternalRows.
*/
private[sql] def fromBatchIterator(
arrowBatchIter: Iterator[Array[Byte]],
schema: StructType,
timeZoneId: String,
context: TaskContext): Iterator[InternalRow] = {
val allocator =
ArrowUtils.rootAllocator.newChildAllocator("fromBatchIterator", 0, Long.MaxValue)
val arrowSchema = ArrowUtils.toArrowSchema(schema, timeZoneId)
val root = VectorSchemaRoot.create(arrowSchema, allocator)
new Iterator[InternalRow] {
private var rowIter = if (arrowBatchIter.hasNext) nextBatch() else Iterator.empty
context.addTaskCompletionListener[Unit] { _ =>
root.close()
allocator.close()
}
override def hasNext: Boolean = rowIter.hasNext || {
if (arrowBatchIter.hasNext) {
rowIter = nextBatch()
true
} else {
root.close()
allocator.close()
false
}
}
override def next(): InternalRow = rowIter.next()
private def nextBatch(): Iterator[InternalRow] = {
val arrowRecordBatch = ArrowConverters.loadBatch(arrowBatchIter.next(), allocator)
val vectorLoader = new VectorLoader(root)
vectorLoader.load(arrowRecordBatch)
arrowRecordBatch.close()
val columns = root.getFieldVectors.asScala.map { vector =>
new ArrowColumnVector(vector).asInstanceOf[ColumnVector]
}.toArray
val batch = new ColumnarBatch(columns)
batch.setNumRows(root.getRowCount)
batch.rowIterator().asScala
}
}
}
/**
* Load a serialized ArrowRecordBatch.
*/
private[arrow] def loadBatch(
batchBytes: Array[Byte],
allocator: BufferAllocator): ArrowRecordBatch = {
val in = new ByteArrayInputStream(batchBytes)
MessageSerializer.deserializeRecordBatch(
new ReadChannel(Channels.newChannel(in)), allocator) // throws IOException
}
/**
* Create a DataFrame from an RDD of serialized ArrowRecordBatches.
*/
private[sql] def toDataFrame(
arrowBatchRDD: JavaRDD[Array[Byte]],
schemaString: String,
sqlContext: SQLContext): DataFrame = {
val schema = DataType.fromJson(schemaString).asInstanceOf[StructType]
val timeZoneId = sqlContext.sessionState.conf.sessionLocalTimeZone
val rdd = arrowBatchRDD.rdd.mapPartitions { iter =>
val context = TaskContext.get()
ArrowConverters.fromBatchIterator(iter, schema, timeZoneId, context)
}
sqlContext.internalCreateDataFrame(rdd.setName("arrow"), schema)
}
/**
* Read a file as an Arrow stream and parallelize as an RDD of serialized ArrowRecordBatches.
*/
private[sql] def readArrowStreamFromFile(
sqlContext: SQLContext,
filename: String): JavaRDD[Array[Byte]] = {
Utils.tryWithResource(new FileInputStream(filename)) { fileStream =>
// Create array to consume iterator so that we can safely close the file
val batches = getBatchesFromStream(fileStream.getChannel).toArray
// Parallelize the record batches to create an RDD
JavaRDD.fromRDD(sqlContext.sparkContext.parallelize(batches, batches.length))
}
}
/**
* Read an Arrow stream input and return an iterator of serialized ArrowRecordBatches.
*/
private[sql] def getBatchesFromStream(in: ReadableByteChannel): Iterator[Array[Byte]] = {
// Iterate over the serialized Arrow RecordBatch messages from a stream
new Iterator[Array[Byte]] {
var batch: Array[Byte] = readNextBatch()
override def hasNext: Boolean = batch != null
override def next(): Array[Byte] = {
val prevBatch = batch
batch = readNextBatch()
prevBatch
}
// This gets the next serialized ArrowRecordBatch by reading message metadata to check if it
// is a RecordBatch message and then returning the complete serialized message which consists
// of a int32 length, serialized message metadata and a serialized RecordBatch message body
def readNextBatch(): Array[Byte] = {
val msgMetadata = MessageSerializer.readMessage(new ReadChannel(in))
if (msgMetadata == null) {
return null
}
// Get the length of the body, which has not been read at this point
val bodyLength = msgMetadata.getMessageBodyLength.toInt
// Only care about RecordBatch messages, skip Schema and unsupported Dictionary messages
if (msgMetadata.getMessage.headerType() == MessageHeader.RecordBatch) {
// Buffer backed output large enough to hold the complete serialized message
val bbout = new ByteBufferOutputStream(4 + msgMetadata.getMessageLength + bodyLength)
// Write message metadata to ByteBuffer output stream
MessageSerializer.writeMessageBuffer(
new WriteChannel(Channels.newChannel(bbout)),
msgMetadata.getMessageLength,
msgMetadata.getMessageBuffer)
// Get a zero-copy ByteBuffer with already contains message metadata, must close first
bbout.close()
val bb = bbout.toByteBuffer
bb.position(bbout.getCount())
// Read message body directly into the ByteBuffer to avoid copy, return backed byte array
bb.limit(bb.capacity())
JavaUtils.readFully(in, bb)
bb.array()
} else {
if (bodyLength > 0) {
// Skip message body if not a RecordBatch
Channels.newInputStream(in).skip(bodyLength)
}
// Proceed to next message
readNextBatch()
}
}
}
}
}
|
santhoshkumarvs/spark
|
core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.concurrent.atomic.AtomicBoolean
import scala.concurrent.duration._
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar._
import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkException, SparkFunSuite}
import org.apache.spark.internal.config.{CPUS_PER_TASK, UI}
import org.apache.spark.internal.config.Network.RPC_MESSAGE_MAX_SIZE
import org.apache.spark.rdd.RDD
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RegisterExecutor
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.util.{RpcUtils, SerializableBuffer}
class CoarseGrainedSchedulerBackendSuite extends SparkFunSuite with LocalSparkContext
with Eventually {
private val executorUpTimeout = 60.seconds
test("serialized task larger than max RPC message size") {
val conf = new SparkConf
conf.set(RPC_MESSAGE_MAX_SIZE, 1)
conf.set("spark.default.parallelism", "1")
sc = new SparkContext("local-cluster[2, 1, 1024]", "test", conf)
val frameSize = RpcUtils.maxMessageSizeBytes(sc.conf)
val buffer = new SerializableBuffer(java.nio.ByteBuffer.allocate(2 * frameSize))
val larger = sc.parallelize(Seq(buffer))
val thrown = intercept[SparkException] {
larger.collect()
}
assert(thrown.getMessage.contains("using broadcast variables for large values"))
val smaller = sc.parallelize(1 to 4).collect()
assert(smaller.size === 4)
}
test("compute max number of concurrent tasks can be launched") {
val conf = new SparkConf()
.setMaster("local-cluster[4, 3, 1024]")
.setAppName("test")
sc = new SparkContext(conf)
eventually(timeout(executorUpTimeout)) {
// Ensure all executors have been launched.
assert(sc.getExecutorIds().length == 4)
}
assert(sc.maxNumConcurrentTasks() == 12)
}
test("compute max number of concurrent tasks can be launched when spark.task.cpus > 1") {
val conf = new SparkConf()
.set(CPUS_PER_TASK, 2)
.setMaster("local-cluster[4, 3, 1024]")
.setAppName("test")
sc = new SparkContext(conf)
eventually(timeout(executorUpTimeout)) {
// Ensure all executors have been launched.
assert(sc.getExecutorIds().length == 4)
}
// Each executor can only launch one task since `spark.task.cpus` is 2.
assert(sc.maxNumConcurrentTasks() == 4)
}
test("compute max number of concurrent tasks can be launched when some executors are busy") {
val conf = new SparkConf()
.set(CPUS_PER_TASK, 2)
.setMaster("local-cluster[4, 3, 1024]")
.setAppName("test")
sc = new SparkContext(conf)
val rdd = sc.parallelize(1 to 10, 4).mapPartitions { iter =>
Thread.sleep(5000)
iter
}
var taskStarted = new AtomicBoolean(false)
var taskEnded = new AtomicBoolean(false)
val listener = new SparkListener() {
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = {
taskStarted.set(true)
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
taskEnded.set(true)
}
}
try {
sc.addSparkListener(listener)
eventually(timeout(executorUpTimeout)) {
// Ensure all executors have been launched.
assert(sc.getExecutorIds().length == 4)
}
// Submit a job to trigger some tasks on active executors.
testSubmitJob(sc, rdd)
eventually(timeout(10.seconds)) {
// Ensure some tasks have started and no task finished, so some executors must be busy.
assert(taskStarted.get())
assert(taskEnded.get() == false)
// Assert we count in slots on both busy and free executors.
assert(sc.maxNumConcurrentTasks() == 4)
}
} finally {
sc.removeSparkListener(listener)
}
}
// Here we just have test for one happy case instead of all cases: other cases are covered in
// FsHistoryProviderSuite.
test("custom log url for Spark UI is applied") {
val customExecutorLogUrl = "http://newhost:9999/logs/clusters/{{CLUSTER_ID}}/users/{{USER}}" +
"/containers/{{CONTAINER_ID}}/{{FILE_NAME}}"
val conf = new SparkConf()
.set(UI.CUSTOM_EXECUTOR_LOG_URL, customExecutorLogUrl)
.setMaster("local-cluster[0, 3, 1024]")
.setAppName("test")
sc = new SparkContext(conf)
val backend = sc.schedulerBackend.asInstanceOf[CoarseGrainedSchedulerBackend]
val mockEndpointRef = mock[RpcEndpointRef]
val mockAddress = mock[RpcAddress]
val logUrls = Map(
"stdout" -> "http://oldhost:8888/logs/dummy/stdout",
"stderr" -> "http://oldhost:8888/logs/dummy/stderr")
val attributes = Map(
"CLUSTER_ID" -> "cl1",
"USER" -> "dummy",
"CONTAINER_ID" -> "container1",
"LOG_FILES" -> "stdout,stderr")
val baseUrl = s"http://newhost:9999/logs/clusters/${attributes("CLUSTER_ID")}" +
s"/users/${attributes("USER")}/containers/${attributes("CONTAINER_ID")}"
var executorAddedCount: Int = 0
val listener = new SparkListener() {
override def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit = {
executorAddedCount += 1
assert(executorAdded.executorInfo.logUrlMap === Seq("stdout", "stderr").map { file =>
file -> (baseUrl + s"/$file")
}.toMap)
}
}
sc.addSparkListener(listener)
backend.driverEndpoint.askSync[Boolean](
RegisterExecutor("1", mockEndpointRef, mockAddress.host, 1, logUrls, attributes))
backend.driverEndpoint.askSync[Boolean](
RegisterExecutor("2", mockEndpointRef, mockAddress.host, 1, logUrls, attributes))
backend.driverEndpoint.askSync[Boolean](
RegisterExecutor("3", mockEndpointRef, mockAddress.host, 1, logUrls, attributes))
sc.listenerBus.waitUntilEmpty(executorUpTimeout.toMillis)
assert(executorAddedCount === 3)
}
private def testSubmitJob(sc: SparkContext, rdd: RDD[Int]): Unit = {
sc.submitJob(
rdd,
(iter: Iterator[Int]) => iter.toArray,
0 until rdd.partitions.length,
{ case (_, _) => return }: (Int, Array[Int]) => Unit,
{ return }
)
}
}
|
santhoshkumarvs/spark
|
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicDriverFeatureStep.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features
import scala.collection.JavaConverters._
import scala.collection.mutable
import io.fabric8.kubernetes.api.model._
import org.apache.spark.SparkException
import org.apache.spark.deploy.k8s._
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.submit._
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.UI._
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.Utils
private[spark] class BasicDriverFeatureStep(conf: KubernetesDriverConf)
extends KubernetesFeatureConfigStep {
private val driverPodName = conf
.get(KUBERNETES_DRIVER_POD_NAME)
.getOrElse(s"${conf.resourceNamePrefix}-driver")
private val driverContainerImage = conf
.get(DRIVER_CONTAINER_IMAGE)
.getOrElse(throw new SparkException("Must specify the driver container image"))
// CPU settings
private val driverCpuCores = conf.get(DRIVER_CORES.key, "1")
private val driverLimitCores = conf.get(KUBERNETES_DRIVER_LIMIT_CORES)
// Memory settings
private val driverMemoryMiB = conf.get(DRIVER_MEMORY)
// The memory overhead factor to use. If the user has not set it, then use a different
// value for non-JVM apps. This value is propagated to executors.
private val overheadFactor =
if (conf.mainAppResource.isInstanceOf[NonJVMResource]) {
if (conf.contains(MEMORY_OVERHEAD_FACTOR)) {
conf.get(MEMORY_OVERHEAD_FACTOR)
} else {
NON_JVM_MEMORY_OVERHEAD_FACTOR
}
} else {
conf.get(MEMORY_OVERHEAD_FACTOR)
}
private val memoryOverheadMiB = conf
.get(DRIVER_MEMORY_OVERHEAD)
.getOrElse(math.max((overheadFactor * driverMemoryMiB).toInt, MEMORY_OVERHEAD_MIN_MIB))
private val driverMemoryWithOverheadMiB = driverMemoryMiB + memoryOverheadMiB
override def configurePod(pod: SparkPod): SparkPod = {
val driverCustomEnvs = conf.environment.toSeq
.map { env =>
new EnvVarBuilder()
.withName(env._1)
.withValue(env._2)
.build()
}
val driverCpuQuantity = new QuantityBuilder(false)
.withAmount(driverCpuCores)
.build()
val driverMemoryQuantity = new QuantityBuilder(false)
.withAmount(s"${driverMemoryWithOverheadMiB}Mi")
.build()
val maybeCpuLimitQuantity = driverLimitCores.map { limitCores =>
("cpu", new QuantityBuilder(false).withAmount(limitCores).build())
}
val driverPort = conf.sparkConf.getInt(DRIVER_PORT.key, DEFAULT_DRIVER_PORT)
val driverBlockManagerPort = conf.sparkConf.getInt(
DRIVER_BLOCK_MANAGER_PORT.key,
DEFAULT_BLOCKMANAGER_PORT
)
val driverUIPort = SparkUI.getUIPort(conf.sparkConf)
val driverContainer = new ContainerBuilder(pod.container)
.withName(Option(pod.container.getName).getOrElse(DEFAULT_DRIVER_CONTAINER_NAME))
.withImage(driverContainerImage)
.withImagePullPolicy(conf.imagePullPolicy)
.addNewPort()
.withName(DRIVER_PORT_NAME)
.withContainerPort(driverPort)
.withProtocol("TCP")
.endPort()
.addNewPort()
.withName(BLOCK_MANAGER_PORT_NAME)
.withContainerPort(driverBlockManagerPort)
.withProtocol("TCP")
.endPort()
.addNewPort()
.withName(UI_PORT_NAME)
.withContainerPort(driverUIPort)
.withProtocol("TCP")
.endPort()
.addNewEnv()
.withName(ENV_SPARK_USER)
.withValue(Utils.getCurrentUserName())
.endEnv()
.addAllToEnv(driverCustomEnvs.asJava)
.addNewEnv()
.withName(ENV_DRIVER_BIND_ADDRESS)
.withValueFrom(new EnvVarSourceBuilder()
.withNewFieldRef("v1", "status.podIP")
.build())
.endEnv()
.editOrNewResources()
.addToRequests("cpu", driverCpuQuantity)
.addToLimits(maybeCpuLimitQuantity.toMap.asJava)
.addToRequests("memory", driverMemoryQuantity)
.addToLimits("memory", driverMemoryQuantity)
.endResources()
.build()
val driverPod = new PodBuilder(pod.pod)
.editOrNewMetadata()
.withName(driverPodName)
.addToLabels(conf.labels.asJava)
.addToAnnotations(conf.annotations.asJava)
.endMetadata()
.editOrNewSpec()
.withRestartPolicy("Never")
.addToNodeSelector(conf.nodeSelector.asJava)
.addToImagePullSecrets(conf.imagePullSecrets: _*)
.endSpec()
.build()
SparkPod(driverPod, driverContainer)
}
override def getAdditionalPodSystemProperties(): Map[String, String] = {
val additionalProps = mutable.Map(
KUBERNETES_DRIVER_POD_NAME.key -> driverPodName,
"spark.app.id" -> conf.appId,
KUBERNETES_EXECUTOR_POD_NAME_PREFIX.key -> conf.resourceNamePrefix,
KUBERNETES_DRIVER_SUBMIT_CHECK.key -> "true",
MEMORY_OVERHEAD_FACTOR.key -> overheadFactor.toString)
additionalProps.toMap
}
}
|
santhoshkumarvs/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketSourceProvider.scala
|
<filename>sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketSourceProvider.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import java.text.SimpleDateFormat
import java.util
import java.util.{Collections, Locale}
import scala.util.{Failure, Success, Try}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.execution.streaming.continuous.TextSocketContinuousStream
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.sources.v2._
import org.apache.spark.sql.sources.v2.reader.{Scan, ScanBuilder}
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousStream, MicroBatchStream}
import org.apache.spark.sql.types.{StringType, StructField, StructType, TimestampType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class TextSocketSourceProvider extends TableProvider with DataSourceRegister with Logging {
private def checkParameters(params: CaseInsensitiveStringMap): Unit = {
logWarning("The socket source should not be used for production applications! " +
"It does not support recovery.")
if (!params.containsKey("host")) {
throw new AnalysisException("Set a host to read from with option(\"host\", ...).")
}
if (!params.containsKey("port")) {
throw new AnalysisException("Set a port to read from with option(\"port\", ...).")
}
Try {
params.getBoolean("includeTimestamp", false)
} match {
case Success(_) =>
case Failure(_) =>
throw new AnalysisException("includeTimestamp must be set to either \"true\" or \"false\"")
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = {
checkParameters(options)
new TextSocketTable(
options.get("host"),
options.getInt("port", -1),
options.getInt("numPartitions", SparkSession.active.sparkContext.defaultParallelism),
options.getBoolean("includeTimestamp", false))
}
/** String that represents the format that this data source provider uses. */
override def shortName(): String = "socket"
}
class TextSocketTable(host: String, port: Int, numPartitions: Int, includeTimestamp: Boolean)
extends Table with SupportsMicroBatchRead with SupportsContinuousRead {
override def name(): String = s"Socket[$host:$port]"
override def schema(): StructType = {
if (includeTimestamp) {
TextSocketReader.SCHEMA_TIMESTAMP
} else {
TextSocketReader.SCHEMA_REGULAR
}
}
override def capabilities(): util.Set[TableCapability] = Collections.emptySet()
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = () => new Scan {
override def readSchema(): StructType = schema()
override def toMicroBatchStream(checkpointLocation: String): MicroBatchStream = {
new TextSocketMicroBatchStream(host, port, numPartitions)
}
override def toContinuousStream(checkpointLocation: String): ContinuousStream = {
new TextSocketContinuousStream(host, port, numPartitions, options)
}
}
}
object TextSocketReader {
val SCHEMA_REGULAR = StructType(StructField("value", StringType) :: Nil)
val SCHEMA_TIMESTAMP = StructType(StructField("value", StringType) ::
StructField("timestamp", TimestampType) :: Nil)
val DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
}
|
santhoshkumarvs/spark
|
streaming/src/test/scala/org/apache/spark/streaming/UISeleniumSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import scala.collection.mutable.Queue
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.scalatest._
import org.scalatest.concurrent.Eventually._
import org.scalatest.selenium.WebBrowser
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.internal.config.UI.UI_ENABLED
import org.apache.spark.ui.SparkUICssErrorHandler
/**
* Selenium tests for the Spark Streaming Web UI.
*/
class UISeleniumSuite
extends SparkFunSuite with WebBrowser with Matchers with BeforeAndAfterAll with TestSuiteBase {
implicit var webDriver: WebDriver = _
override def beforeAll(): Unit = {
super.beforeAll()
webDriver = new HtmlUnitDriver {
getWebClient.setCssErrorHandler(new SparkUICssErrorHandler)
}
}
override def afterAll(): Unit = {
try {
if (webDriver != null) {
webDriver.quit()
}
} finally {
super.afterAll()
}
}
/**
* Create a test SparkStreamingContext with the SparkUI enabled.
*/
private def newSparkStreamingContext(): StreamingContext = {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
.set(UI_ENABLED, true)
val ssc = new StreamingContext(conf, Seconds(1))
assert(ssc.sc.ui.isDefined, "Spark UI is not started!")
ssc
}
private def setupStreams(ssc: StreamingContext): Unit = {
val rdds = Queue(ssc.sc.parallelize(1 to 4, 4))
val inputStream = ssc.queueStream(rdds)
inputStream.foreachRDD { rdd =>
rdd.foreach(_ => {})
rdd.foreach(_ => {})
}
inputStream.foreachRDD { rdd =>
rdd.foreach(_ => {})
try {
rdd.foreach { _ =>
// Failing the task with id 15 to ensure only one task fails
if (TaskContext.get.taskAttemptId() % 15 == 0) {
throw new RuntimeException("Oops")
}
}
} catch {
case e: SparkException if e.getMessage.contains("Oops") =>
}
}
}
test("attaching and detaching a Streaming tab") {
withStreamingContext(newSparkStreamingContext()) { ssc =>
setupStreams(ssc)
ssc.start()
val sparkUI = ssc.sparkContext.ui.get
eventually(timeout(10 seconds), interval(50 milliseconds)) {
go to (sparkUI.webUrl.stripSuffix("/"))
find(cssSelector( """ul li a[href*="streaming"]""")) should not be (None)
}
eventually(timeout(10 seconds), interval(50 milliseconds)) {
// check whether streaming page exists
go to (sparkUI.webUrl.stripSuffix("/") + "/streaming")
val h3Text = findAll(cssSelector("h3")).map(_.text).toSeq
h3Text should contain("Streaming Statistics")
// Check stat table
val statTableHeaders = findAll(cssSelector("#stat-table th")).map(_.text).toSeq
statTableHeaders.exists(
_.matches("Timelines \\(Last \\d+ batches, \\d+ active, \\d+ completed\\)")
) should be (true)
statTableHeaders should contain ("Histograms")
val statTableCells = findAll(cssSelector("#stat-table td")).map(_.text).toSeq
statTableCells.exists(_.contains("Input Rate")) should be (true)
statTableCells.exists(_.contains("Scheduling Delay")) should be (true)
statTableCells.exists(_.contains("Processing Time")) should be (true)
statTableCells.exists(_.contains("Total Delay")) should be (true)
// Check batch tables
val h4Text = findAll(cssSelector("h4")).map(_.text).toSeq
h4Text.exists(_.matches("Active Batches \\(\\d+\\)")) should be (true)
h4Text.exists(_.matches("Completed Batches \\(last \\d+ out of \\d+\\)")) should be (true)
findAll(cssSelector("""#active-batches-table th""")).map(_.text).toSeq should be {
List("Batch Time", "Records", "Scheduling Delay (?)", "Processing Time (?)",
"Output Ops: Succeeded/Total", "Status")
}
findAll(cssSelector("""#completed-batches-table th""")).map(_.text).toSeq should be {
List("Batch Time", "Records", "Scheduling Delay (?)", "Processing Time (?)",
"Total Delay (?)", "Output Ops: Succeeded/Total")
}
val batchLinks =
findAll(cssSelector("""#completed-batches-table a""")).flatMap(_.attribute("href")).toSeq
batchLinks.size should be >= 1
// Check a normal batch page
go to (batchLinks.last) // Last should be the first batch, so it will have some jobs
val summaryText = findAll(cssSelector("li strong")).map(_.text).toSeq
summaryText should contain ("Batch Duration:")
summaryText should contain ("Input data size:")
summaryText should contain ("Scheduling delay:")
summaryText should contain ("Processing time:")
summaryText should contain ("Total delay:")
findAll(cssSelector("""#batch-job-table th""")).map(_.text).toSeq should be {
List("Output Op Id", "Description", "Output Op Duration", "Status", "Job Id",
"Job Duration", "Stages: Succeeded/Total", "Tasks (for all stages): Succeeded/Total",
"Error")
}
// Check we have 2 output op ids
val outputOpIds = findAll(cssSelector(".output-op-id-cell")).toSeq
outputOpIds.map(_.attribute("rowspan")) should be (List(Some("2"), Some("2")))
outputOpIds.map(_.text) should be (List("0", "1"))
// Check job ids
val jobIdCells = findAll(cssSelector( """#batch-job-table a""")).toSeq
jobIdCells.map(_.text) should be (List("0", "1", "2", "3"))
val jobLinks = jobIdCells.flatMap(_.attribute("href"))
jobLinks.size should be (4)
// Check stage progress
findAll(cssSelector(""".stage-progress-cell""")).map(_.text).toList should be (
List("1/1", "1/1", "1/1", "0/1 (1 failed)"))
// Check job progress
findAll(cssSelector(""".progress-cell""")).map(_.text).toList should be (
List("4/4", "4/4", "4/4", "3/4 (1 failed)"))
// Check stacktrace
val errorCells = findAll(cssSelector(""".stacktrace-details""")).map(_.underlying).toSeq
errorCells should have size 1
// Can't get the inner (invisible) text without running JS
// Check the job link in the batch page is right
go to (jobLinks(0))
val jobDetails = findAll(cssSelector("li strong")).map(_.text).toSeq
jobDetails should contain("Status:")
jobDetails should contain("Completed Stages:")
// Check a batch page without id
go to (sparkUI.webUrl.stripSuffix("/") + "/streaming/batch/")
webDriver.getPageSource should include ("Missing id parameter")
// Check a non-exist batch
go to (sparkUI.webUrl.stripSuffix("/") + "/streaming/batch/?id=12345")
webDriver.getPageSource should include ("does not exist")
}
ssc.stop(false)
eventually(timeout(10 seconds), interval(50 milliseconds)) {
go to (sparkUI.webUrl.stripSuffix("/"))
find(cssSelector( """ul li a[href*="streaming"]""")) should be(None)
}
eventually(timeout(10 seconds), interval(50 milliseconds)) {
go to (sparkUI.webUrl.stripSuffix("/") + "/streaming")
val h3Text = findAll(cssSelector("h3")).map(_.text).toSeq
h3Text should not contain("Streaming Statistics")
}
}
}
}
|
santhoshkumarvs/spark
|
resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala
|
<filename>resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.yarn
import java.io.{DataOutputStream, File, FileOutputStream, IOException}
import java.nio.ByteBuffer
import java.nio.file.Files
import java.nio.file.attribute.PosixFilePermission._
import java.util.EnumSet
import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.language.postfixOps
import org.apache.hadoop.fs.Path
import org.apache.hadoop.service.ServiceStateException
import org.apache.hadoop.yarn.api.records.ApplicationId
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.server.api.{ApplicationInitializationContext, ApplicationTerminationContext}
import org.scalatest.{BeforeAndAfterEach, Matchers}
import org.scalatest.concurrent.Eventually._
import org.apache.spark.SecurityManager
import org.apache.spark.SparkFunSuite
import org.apache.spark.internal.config._
import org.apache.spark.network.shuffle.ShuffleTestAccessor
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo
import org.apache.spark.util.Utils
class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAndAfterEach {
private[yarn] var yarnConfig: YarnConfiguration = null
private[yarn] val SORT_MANAGER = "org.apache.spark.shuffle.sort.SortShuffleManager"
private var recoveryLocalDir: File = _
override def beforeEach(): Unit = {
super.beforeEach()
yarnConfig = new YarnConfiguration()
yarnConfig.set(YarnConfiguration.NM_AUX_SERVICES, "spark_shuffle")
yarnConfig.set(YarnConfiguration.NM_AUX_SERVICE_FMT.format("spark_shuffle"),
classOf[YarnShuffleService].getCanonicalName)
yarnConfig.setInt(SHUFFLE_SERVICE_PORT.key, 0)
yarnConfig.setBoolean(YarnShuffleService.STOP_ON_FAILURE_KEY, true)
val localDir = Utils.createTempDir()
yarnConfig.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath)
recoveryLocalDir = Utils.createTempDir()
}
var s1: YarnShuffleService = null
var s2: YarnShuffleService = null
var s3: YarnShuffleService = null
override def afterEach(): Unit = {
try {
if (s1 != null) {
s1.stop()
s1 = null
}
if (s2 != null) {
s2.stop()
s2 = null
}
if (s3 != null) {
s3.stop()
s3 = null
}
} finally {
super.afterEach()
}
}
test("executor state kept across NM restart") {
s1 = new YarnShuffleService
s1.setRecoveryPath(new Path(recoveryLocalDir.toURI))
// set auth to true to test the secrets recovery
yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, true)
s1.init(yarnConfig)
val app1Id = ApplicationId.newInstance(0, 1)
val app1Data = makeAppInfo("user", app1Id)
s1.initializeApplication(app1Data)
val app2Id = ApplicationId.newInstance(0, 2)
val app2Data = makeAppInfo("user", app2Id)
s1.initializeApplication(app2Data)
val execStateFile = s1.registeredExecutorFile
execStateFile should not be (null)
val secretsFile = s1.secretsFile
secretsFile should not be (null)
val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER)
val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER)
val blockHandler = s1.blockHandler
val blockResolver = ShuffleTestAccessor.getBlockResolver(blockHandler)
ShuffleTestAccessor.registeredExecutorFile(blockResolver) should be (execStateFile)
blockResolver.registerExecutor(app1Id.toString, "exec-1", shuffleInfo1)
blockResolver.registerExecutor(app2Id.toString, "exec-2", shuffleInfo2)
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", blockResolver) should
be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", blockResolver) should
be (Some(shuffleInfo2))
if (!execStateFile.exists()) {
@tailrec def findExistingParent(file: File): File = {
if (file == null) file
else if (file.exists()) file
else findExistingParent(file.getParentFile())
}
val existingParent = findExistingParent(execStateFile)
assert(false, s"$execStateFile does not exist -- closest existing parent is $existingParent")
}
assert(execStateFile.exists(), s"$execStateFile did not exist")
// now we pretend the shuffle service goes down, and comes back up
s1.stop()
s2 = new YarnShuffleService
s2.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s2.init(yarnConfig)
s2.secretsFile should be (secretsFile)
s2.registeredExecutorFile should be (execStateFile)
val handler2 = s2.blockHandler
val resolver2 = ShuffleTestAccessor.getBlockResolver(handler2)
// now we reinitialize only one of the apps, and expect yarn to tell us that app2 was stopped
// during the restart
s2.initializeApplication(app1Data)
s2.stopApplication(new ApplicationTerminationContext(app2Id))
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", resolver2) should be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver2) should be (None)
// Act like the NM restarts one more time
s2.stop()
s3 = new YarnShuffleService
s3.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s3.init(yarnConfig)
s3.registeredExecutorFile should be (execStateFile)
s3.secretsFile should be (secretsFile)
val handler3 = s3.blockHandler
val resolver3 = ShuffleTestAccessor.getBlockResolver(handler3)
// app1 is still running
s3.initializeApplication(app1Data)
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", resolver3) should be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver3) should be (None)
s3.stop()
}
test("removed applications should not be in registered executor file") {
s1 = new YarnShuffleService
s1.setRecoveryPath(new Path(recoveryLocalDir.toURI))
yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, false)
s1.init(yarnConfig)
val secretsFile = s1.secretsFile
secretsFile should be (null)
val app1Id = ApplicationId.newInstance(0, 1)
val app1Data = makeAppInfo("user", app1Id)
s1.initializeApplication(app1Data)
val app2Id = ApplicationId.newInstance(0, 2)
val app2Data = makeAppInfo("user", app2Id)
s1.initializeApplication(app2Data)
val execStateFile = s1.registeredExecutorFile
execStateFile should not be (null)
val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER)
val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER)
val blockHandler = s1.blockHandler
val blockResolver = ShuffleTestAccessor.getBlockResolver(blockHandler)
ShuffleTestAccessor.registeredExecutorFile(blockResolver) should be (execStateFile)
blockResolver.registerExecutor(app1Id.toString, "exec-1", shuffleInfo1)
blockResolver.registerExecutor(app2Id.toString, "exec-2", shuffleInfo2)
val db = ShuffleTestAccessor.shuffleServiceLevelDB(blockResolver)
ShuffleTestAccessor.reloadRegisteredExecutors(db) should not be empty
s1.stopApplication(new ApplicationTerminationContext(app1Id))
ShuffleTestAccessor.reloadRegisteredExecutors(db) should not be empty
s1.stopApplication(new ApplicationTerminationContext(app2Id))
ShuffleTestAccessor.reloadRegisteredExecutors(db) shouldBe empty
}
test("shuffle service should be robust to corrupt registered executor file") {
s1 = new YarnShuffleService
s1.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s1.init(yarnConfig)
val app1Id = ApplicationId.newInstance(0, 1)
val app1Data = makeAppInfo("user", app1Id)
s1.initializeApplication(app1Data)
val execStateFile = s1.registeredExecutorFile
val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER)
val blockHandler = s1.blockHandler
val blockResolver = ShuffleTestAccessor.getBlockResolver(blockHandler)
ShuffleTestAccessor.registeredExecutorFile(blockResolver) should be (execStateFile)
blockResolver.registerExecutor(app1Id.toString, "exec-1", shuffleInfo1)
// now we pretend the shuffle service goes down, and comes back up. But we'll also
// make a corrupt registeredExecutor File
s1.stop()
execStateFile.listFiles().foreach{_.delete()}
val out = new DataOutputStream(new FileOutputStream(execStateFile + "/CURRENT"))
out.writeInt(42)
out.close()
s2 = new YarnShuffleService
s2.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s2.init(yarnConfig)
s2.registeredExecutorFile should be (execStateFile)
val handler2 = s2.blockHandler
val resolver2 = ShuffleTestAccessor.getBlockResolver(handler2)
// we re-initialize app1, but since the file was corrupt there is nothing we can do about it ...
s2.initializeApplication(app1Data)
// however, when we initialize a totally new app2, everything is still happy
val app2Id = ApplicationId.newInstance(0, 2)
val app2Data = makeAppInfo("user", app2Id)
s2.initializeApplication(app2Data)
val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER)
resolver2.registerExecutor(app2Id.toString, "exec-2", shuffleInfo2)
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver2) should be (Some(shuffleInfo2))
s2.stop()
// another stop & restart should be fine though (eg., we recover from previous corruption)
s3 = new YarnShuffleService
s3.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s3.init(yarnConfig)
s3.registeredExecutorFile should be (execStateFile)
val handler3 = s3.blockHandler
val resolver3 = ShuffleTestAccessor.getBlockResolver(handler3)
s3.initializeApplication(app2Data)
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver3) should be (Some(shuffleInfo2))
s3.stop()
}
test("get correct recovery path") {
// Test recovery path is set outside the shuffle service, this is to simulate NM recovery
// enabled scenario, where recovery path will be set by yarn.
s1 = new YarnShuffleService
val recoveryPath = new Path(Utils.createTempDir().toURI)
s1.setRecoveryPath(recoveryPath)
s1.init(yarnConfig)
s1._recoveryPath should be (recoveryPath)
s1.stop()
}
test("moving recovery file from NM local dir to recovery path") {
// This is to test when Hadoop is upgrade to 2.5+ and NM recovery is enabled, we should move
// old recovery file to the new path to keep compatibility
// Simulate s1 is running on old version of Hadoop in which recovery file is in the NM local
// dir.
s1 = new YarnShuffleService
s1.setRecoveryPath(new Path(yarnConfig.getTrimmedStrings(YarnConfiguration.NM_LOCAL_DIRS)(0)))
// set auth to true to test the secrets recovery
yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, true)
s1.init(yarnConfig)
val app1Id = ApplicationId.newInstance(0, 1)
val app1Data = makeAppInfo("user", app1Id)
s1.initializeApplication(app1Data)
val app2Id = ApplicationId.newInstance(0, 2)
val app2Data = makeAppInfo("user", app2Id)
s1.initializeApplication(app2Data)
assert(s1.secretManager.getSecretKey(app1Id.toString()) != null)
assert(s1.secretManager.getSecretKey(app2Id.toString()) != null)
val execStateFile = s1.registeredExecutorFile
execStateFile should not be (null)
val secretsFile = s1.secretsFile
secretsFile should not be (null)
val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER)
val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER)
val blockHandler = s1.blockHandler
val blockResolver = ShuffleTestAccessor.getBlockResolver(blockHandler)
ShuffleTestAccessor.registeredExecutorFile(blockResolver) should be (execStateFile)
blockResolver.registerExecutor(app1Id.toString, "exec-1", shuffleInfo1)
blockResolver.registerExecutor(app2Id.toString, "exec-2", shuffleInfo2)
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", blockResolver) should
be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", blockResolver) should
be (Some(shuffleInfo2))
assert(execStateFile.exists(), s"$execStateFile did not exist")
s1.stop()
// Simulate s2 is running on Hadoop 2.5+ with NM recovery is enabled.
assert(execStateFile.exists())
val recoveryPath = new Path(recoveryLocalDir.toURI)
s2 = new YarnShuffleService
s2.setRecoveryPath(recoveryPath)
s2.init(yarnConfig)
// Ensure that s2 has loaded known apps from the secrets db.
assert(s2.secretManager.getSecretKey(app1Id.toString()) != null)
assert(s2.secretManager.getSecretKey(app2Id.toString()) != null)
val execStateFile2 = s2.registeredExecutorFile
val secretsFile2 = s2.secretsFile
recoveryPath.toString should be (new Path(execStateFile2.getParentFile.toURI).toString)
recoveryPath.toString should be (new Path(secretsFile2.getParentFile.toURI).toString)
eventually(timeout(10 seconds), interval(5 millis)) {
assert(!execStateFile.exists())
}
eventually(timeout(10 seconds), interval(5 millis)) {
assert(!secretsFile.exists())
}
val handler2 = s2.blockHandler
val resolver2 = ShuffleTestAccessor.getBlockResolver(handler2)
// now we reinitialize only one of the apps, and expect yarn to tell us that app2 was stopped
// during the restart
// Since recovery file is got from old path, so the previous state should be stored.
s2.initializeApplication(app1Data)
s2.stopApplication(new ApplicationTerminationContext(app2Id))
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", resolver2) should be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver2) should be (None)
s2.stop()
}
test("service throws error if cannot start") {
// Set up a read-only local dir.
val roDir = Utils.createTempDir()
Files.setPosixFilePermissions(roDir.toPath(), EnumSet.of(OWNER_READ, OWNER_EXECUTE))
// Try to start the shuffle service, it should fail.
val service = new YarnShuffleService()
service.setRecoveryPath(new Path(roDir.toURI))
try {
val error = intercept[ServiceStateException] {
service.init(yarnConfig)
}
assert(error.getCause().isInstanceOf[IOException])
} finally {
service.stop()
Files.setPosixFilePermissions(roDir.toPath(),
EnumSet.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE))
}
}
private def makeAppInfo(user: String, appId: ApplicationId): ApplicationInitializationContext = {
val secret = ByteBuffer.wrap(new Array[Byte](0))
new ApplicationInitializationContext(user, appId, secret)
}
test("recovery db should not be created if NM recovery is not enabled") {
s1 = new YarnShuffleService
s1.init(yarnConfig)
s1._recoveryPath should be (null)
s1.registeredExecutorFile should be (null)
s1.secretsFile should be (null)
}
}
|
santhoshkumarvs/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/ProcessingTimeExecutorSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.concurrent.ConcurrentHashMap
import org.scalatest.concurrent.{Eventually, Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.SpanSugar._
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.ProcessingTime
import org.apache.spark.sql.streaming.util.StreamManualClock
class ProcessingTimeExecutorSuite extends SparkFunSuite with TimeLimits {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
val timeout = 10.seconds
test("nextBatchTime") {
val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(100))
assert(processingTimeExecutor.nextBatchTime(0) === 100)
assert(processingTimeExecutor.nextBatchTime(1) === 100)
assert(processingTimeExecutor.nextBatchTime(99) === 100)
assert(processingTimeExecutor.nextBatchTime(100) === 200)
assert(processingTimeExecutor.nextBatchTime(101) === 200)
assert(processingTimeExecutor.nextBatchTime(150) === 200)
}
test("trigger timing") {
val triggerTimes = ConcurrentHashMap.newKeySet[Int]()
val clock = new StreamManualClock()
@volatile var continueExecuting = true
@volatile var clockIncrementInTrigger = 0L
val executor = ProcessingTimeExecutor(ProcessingTime("1000 milliseconds"), clock)
val executorThread = new Thread() {
override def run(): Unit = {
executor.execute(() => {
// Record the trigger time, increment clock if needed and
triggerTimes.add(clock.getTimeMillis.toInt)
clock.advance(clockIncrementInTrigger)
clockIncrementInTrigger = 0 // reset this so that there are no runaway triggers
continueExecuting
})
}
}
executorThread.start()
// First batch should execute immediately, then executor should wait for next one
eventually {
assert(triggerTimes.contains(0))
assert(clock.isStreamWaitingAt(0))
assert(clock.isStreamWaitingFor(1000))
}
// Second batch should execute when clock reaches the next trigger time.
// If next trigger takes less than the trigger interval, executor should wait for next one
clockIncrementInTrigger = 500
clock.setTime(1000)
eventually {
assert(triggerTimes.contains(1000))
assert(clock.isStreamWaitingAt(1500))
assert(clock.isStreamWaitingFor(2000))
}
// If next trigger takes less than the trigger interval, executor should immediately execute
// another one
clockIncrementInTrigger = 1500
clock.setTime(2000) // allow another trigger by setting clock to 2000
eventually {
// Since the next trigger will take 1500 (which is more than trigger interval of 1000)
// executor will immediately execute another trigger
assert(triggerTimes.contains(2000) && triggerTimes.contains(3500))
assert(clock.isStreamWaitingAt(3500))
assert(clock.isStreamWaitingFor(4000))
}
continueExecuting = false
clock.advance(1000)
waitForThreadJoin(executorThread)
}
test("calling nextBatchTime with the result of a previous call should return the next interval") {
val intervalMS = 100
val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMS))
val ITERATION = 10
var nextBatchTime: Long = 0
for (it <- 1 to ITERATION) {
nextBatchTime = processingTimeExecutor.nextBatchTime(nextBatchTime)
}
// nextBatchTime should be 1000
assert(nextBatchTime === intervalMS * ITERATION)
}
private def testBatchTermination(intervalMs: Long): Unit = {
var batchCounts = 0
val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMs))
processingTimeExecutor.execute(() => {
batchCounts += 1
// If the batch termination works correctly, batchCounts should be 3 after `execute`
batchCounts < 3
})
assert(batchCounts === 3)
}
test("batch termination") {
testBatchTermination(0)
testBatchTermination(10)
}
test("notifyBatchFallingBehind") {
val clock = new StreamManualClock()
@volatile var batchFallingBehindCalled = false
val t = new Thread() {
override def run(): Unit = {
val processingTimeExecutor = new ProcessingTimeExecutor(ProcessingTime(100), clock) {
override def notifyBatchFallingBehind(realElapsedTimeMs: Long): Unit = {
batchFallingBehindCalled = true
}
}
processingTimeExecutor.execute(() => {
clock.waitTillTime(200)
false
})
}
}
t.start()
// Wait until the batch is running so that we don't call `advance` too early
eventually { assert(clock.isStreamWaitingFor(200)) }
clock.advance(200)
waitForThreadJoin(t)
assert(batchFallingBehindCalled)
}
private def eventually(body: => Unit): Unit = {
Eventually.eventually(Timeout(timeout)) { body }
}
private def waitForThreadJoin(thread: Thread): Unit = {
failAfter(timeout) { thread.join() }
}
}
|
santhoshkumarvs/spark
|
sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTablesOperation.scala
|
<gh_stars>10-100
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.util.{List => JList}
import scala.collection.JavaConverters.seqAsJavaListConverter
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType
import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObjectUtils
import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.GetTablesOperation
import org.apache.hive.service.cli.session.HiveSession
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.catalog.CatalogTableType
import org.apache.spark.sql.catalyst.catalog.CatalogTableType._
/**
* Spark's own GetTablesOperation
*
* @param sqlContext SQLContext to use
* @param parentSession a HiveSession from SessionManager
* @param catalogName catalog name. null if not applicable
* @param schemaName database name, null or a concrete database name
* @param tableName table name pattern
* @param tableTypes list of allowed table types, e.g. "TABLE", "VIEW"
*/
private[hive] class SparkGetTablesOperation(
sqlContext: SQLContext,
parentSession: HiveSession,
catalogName: String,
schemaName: String,
tableName: String,
tableTypes: JList[String])
extends GetTablesOperation(parentSession, catalogName, schemaName, tableName, tableTypes) {
if (tableTypes != null) {
this.tableTypes.addAll(tableTypes)
}
override def runInternal(): Unit = {
setState(OperationState.RUNNING)
// Always use the latest class loader provided by executionHive's state.
val executionHiveClassLoader = sqlContext.sharedState.jarClassLoader
Thread.currentThread().setContextClassLoader(executionHiveClassLoader)
val catalog = sqlContext.sessionState.catalog
val schemaPattern = convertSchemaPattern(schemaName)
val matchingDbs = catalog.listDatabases(schemaPattern)
if (isAuthV2Enabled) {
val privObjs =
HivePrivilegeObjectUtils.getHivePrivDbObjects(seqAsJavaListConverter(matchingDbs).asJava)
val cmdStr = s"catalog : $catalogName, schemaPattern : $schemaName"
authorizeMetaGets(HiveOperationType.GET_TABLES, privObjs, cmdStr)
}
val tablePattern = convertIdentifierPattern(tableName, true)
matchingDbs.foreach { dbName =>
catalog.listTables(dbName, tablePattern).foreach { tableIdentifier =>
val catalogTable = catalog.getTableMetadata(tableIdentifier)
val tableType = tableTypeString(catalogTable.tableType)
if (tableTypes == null || tableTypes.isEmpty || tableTypes.contains(tableType)) {
val rowData = Array[AnyRef](
"",
catalogTable.database,
catalogTable.identifier.table,
tableType,
catalogTable.comment.getOrElse(""))
rowSet.addRow(rowData)
}
}
}
setState(OperationState.FINISHED)
}
private def tableTypeString(tableType: CatalogTableType): String = tableType match {
case EXTERNAL | MANAGED => "TABLE"
case VIEW => "VIEW"
case t =>
throw new IllegalArgumentException(s"Unknown table type is found at showCreateHiveTable: $t")
}
}
|
santhoshkumarvs/spark
|
core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io.NotSerializableException
import scala.language.reflectiveCalls
import org.apache.spark.{SparkContext, SparkException, SparkFunSuite, TaskContext}
import org.apache.spark.LocalSparkContext._
import org.apache.spark.partial.CountEvaluator
import org.apache.spark.rdd.RDD
class ClosureCleanerSuite extends SparkFunSuite {
test("closures inside an object") {
assert(TestObject.run() === 30) // 6 + 7 + 8 + 9
}
test("closures inside a class") {
val obj = new TestClass
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("closures inside a class with no default constructor") {
val obj = new TestClassWithoutDefaultConstructor(5)
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("closures that don't use fields of the outer class") {
val obj = new TestClassWithoutFieldAccess
assert(obj.run() === 30) // 6 + 7 + 8 + 9
}
test("nested closures inside an object") {
assert(TestObjectWithNesting.run() === 96) // 4 * (1+2+3+4) + 4 * (1+2+3+4) + 16 * 1
}
test("nested closures inside a class") {
val obj = new TestClassWithNesting(1)
assert(obj.run() === 96) // 4 * (1+2+3+4) + 4 * (1+2+3+4) + 16 * 1
}
test("toplevel return statements in closures are identified at cleaning time") {
intercept[ReturnStatementInClosureException] {
TestObjectWithBogusReturns.run()
}
}
test("return statements from named functions nested in closures don't raise exceptions") {
val result = TestObjectWithNestedReturns.run()
assert(result === 1)
}
test("user provided closures are actually cleaned") {
// We use return statements as an indication that a closure is actually being cleaned
// We expect closure cleaner to find the return statements in the user provided closures
def expectCorrectException(body: => Unit): Unit = {
try {
body
} catch {
case _: ReturnStatementInClosureException => // Success!
case e @ (_: NotSerializableException | _: SparkException) =>
fail(s"Expected ReturnStatementInClosureException, but got $e.\n" +
"This means the closure provided by user is not actually cleaned.")
}
}
withSpark(new SparkContext("local", "test")) { sc =>
val rdd = sc.parallelize(1 to 10)
val pairRdd = rdd.map { i => (i, i) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMap(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFlatMap(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFilter(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testSortBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testGroupBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testKeyBy(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapPartitions(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapPartitionsWithIndex(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions2(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions3(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testZipPartitions4(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeach(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachPartition(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduce(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testTreeReduce(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFold(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testAggregate(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testTreeAggregate(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testCombineByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testAggregateByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFoldByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduceByKey(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testReduceByKeyLocally(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testMapValues(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testFlatMapValues(pairRdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachAsync(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testForeachPartitionAsync(rdd) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunJob1(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunJob2(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testRunApproximateJob(sc) }
expectCorrectException { TestUserClosuresActuallyCleaned.testSubmitJob(sc) }
}
}
test("createNullValue") {
new TestCreateNullValue().run()
}
}
// A non-serializable class we create in closures to make sure that we aren't
// keeping references to unneeded variables from our outer closures.
class NonSerializable(val id: Int = -1) {
override def hashCode(): Int = id
override def equals(other: Any): Boolean = {
other match {
case o: NonSerializable => id == o.id
case _ => false
}
}
}
object TestObject {
def run(): Int = {
var nonSer = new NonSerializable
val x = 5
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + x).reduce(_ + _)
}
}
}
class TestClass extends Serializable {
var x = 5
def getX: Int = x
def run(): Int = {
var nonSer = new NonSerializable
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + getX).reduce(_ + _)
}
}
}
class TestClassWithoutDefaultConstructor(x: Int) extends Serializable {
def getX: Int = x
def run(): Int = {
var nonSer = new NonSerializable
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + getX).reduce(_ + _)
}
}
}
// This class is not serializable, but we aren't using any of its fields in our
// closures, so they won't have a $outer pointing to it and should still work.
class TestClassWithoutFieldAccess {
var nonSer = new NonSerializable
def run(): Int = {
var nonSer2 = new NonSerializable
var x = 5
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map(_ + x).reduce(_ + _)
}
}
}
object TestObjectWithBogusReturns {
def run(): Int = {
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
// this return is invalid since it will transfer control outside the closure
nums.map {x => return 1 ; x * 2}
1
}
}
}
object TestObjectWithNestedReturns {
def run(): Int = {
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
nums.map {x =>
// this return is fine since it will not transfer control outside the closure
def foo(): Int = { return 5; 1 }
foo()
}
1
}
}
}
object TestObjectWithNesting {
def run(): Int = {
var nonSer = new NonSerializable
var answer = 0
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
var y = 1
for (i <- 1 to 4) {
var nonSer2 = new NonSerializable
var x = i
answer += nums.map(_ + x + y).reduce(_ + _)
}
answer
}
}
}
class TestClassWithNesting(val y: Int) extends Serializable {
def getY: Int = y
def run(): Int = {
var nonSer = new NonSerializable
var answer = 0
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
for (i <- 1 to 4) {
var nonSer2 = new NonSerializable
var x = i
answer += nums.map(_ + x + getY).reduce(_ + _)
}
answer
}
}
}
/**
* Test whether closures passed in through public APIs are actually cleaned.
*
* We put a return statement in each of these closures as a mechanism to detect whether the
* ClosureCleaner actually cleaned our closure. If it did, then it would throw an appropriate
* exception explicitly complaining about the return statement. Otherwise, we know the
* ClosureCleaner did not actually clean our closure, in which case we should fail the test.
*/
private object TestUserClosuresActuallyCleaned {
def testMap(rdd: RDD[Int]): Unit = { rdd.map { _ => return; 0 }.count() }
def testFlatMap(rdd: RDD[Int]): Unit = { rdd.flatMap { _ => return; Seq() }.count() }
def testFilter(rdd: RDD[Int]): Unit = { rdd.filter { _ => return; true }.count() }
def testSortBy(rdd: RDD[Int]): Unit = { rdd.sortBy { _ => return; 1 }.count() }
def testKeyBy(rdd: RDD[Int]): Unit = { rdd.keyBy { _ => return; 1 }.count() }
def testGroupBy(rdd: RDD[Int]): Unit = { rdd.groupBy { _ => return; 1 }.count() }
def testMapPartitions(rdd: RDD[Int]): Unit = { rdd.mapPartitions { it => return; it }.count() }
def testMapPartitionsWithIndex(rdd: RDD[Int]): Unit = {
rdd.mapPartitionsWithIndex { (_, it) => return; it }.count()
}
def testZipPartitions2(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd) { case (it1, _) => return; it1 }.count()
}
def testZipPartitions3(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd, rdd) { case (it1, _, _) => return; it1 }.count()
}
def testZipPartitions4(rdd: RDD[Int]): Unit = {
rdd.zipPartitions(rdd, rdd, rdd) { case (it1, _, _, _) => return; it1 }.count()
}
def testForeach(rdd: RDD[Int]): Unit = { rdd.foreach { _ => return } }
def testForeachPartition(rdd: RDD[Int]): Unit = { rdd.foreachPartition { _ => return } }
def testReduce(rdd: RDD[Int]): Unit = { rdd.reduce { case (_, _) => return; 1 } }
def testTreeReduce(rdd: RDD[Int]): Unit = { rdd.treeReduce { case (_, _) => return; 1 } }
def testFold(rdd: RDD[Int]): Unit = { rdd.fold(0) { case (_, _) => return; 1 } }
def testAggregate(rdd: RDD[Int]): Unit = {
rdd.aggregate(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 })
}
def testTreeAggregate(rdd: RDD[Int]): Unit = {
rdd.treeAggregate(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 })
}
// Test pair RDD functions
def testCombineByKey(rdd: RDD[(Int, Int)]): Unit = {
rdd.combineByKey(
{ _ => return; 1 }: Int => Int,
{ case (_, _) => return; 1 }: (Int, Int) => Int,
{ case (_, _) => return; 1 }: (Int, Int) => Int
).count()
}
def testAggregateByKey(rdd: RDD[(Int, Int)]): Unit = {
rdd.aggregateByKey(0)({ case (_, _) => return; 1 }, { case (_, _) => return; 1 }).count()
}
def testFoldByKey(rdd: RDD[(Int, Int)]): Unit = { rdd.foldByKey(0) { case (_, _) => return; 1 } }
def testReduceByKey(rdd: RDD[(Int, Int)]): Unit = { rdd.reduceByKey { case (_, _) => return; 1 } }
def testReduceByKeyLocally(rdd: RDD[(Int, Int)]): Unit = {
rdd.reduceByKeyLocally { case (_, _) => return; 1 }
}
def testMapValues(rdd: RDD[(Int, Int)]): Unit = { rdd.mapValues { _ => return; 1 } }
def testFlatMapValues(rdd: RDD[(Int, Int)]): Unit = { rdd.flatMapValues { _ => return; Seq() } }
// Test async RDD actions
def testForeachAsync(rdd: RDD[Int]): Unit = { rdd.foreachAsync { _ => return } }
def testForeachPartitionAsync(rdd: RDD[Int]): Unit = { rdd.foreachPartitionAsync { _ => return } }
// Test SparkContext runJob
def testRunJob1(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.runJob(rdd, { (_: TaskContext, _: Iterator[Int]) => return; 1 } )
}
def testRunJob2(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.runJob(rdd, { _: Iterator[Int] => return; 1 } )
}
def testRunApproximateJob(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
val evaluator = new CountEvaluator(1, 0.5)
sc.runApproximateJob(
rdd, { (_: TaskContext, _: Iterator[Int]) => return; 1L }, evaluator, 1000)
}
def testSubmitJob(sc: SparkContext): Unit = {
val rdd = sc.parallelize(1 to 10, 10)
sc.submitJob(
rdd,
{ _ => return; 1 }: Iterator[Int] => Int,
Seq.empty,
{ case (_, _) => return }: (Int, Int) => Unit,
{ return }
)
}
}
class TestCreateNullValue {
var x = 5
def getX: Int = x
def run(): Unit = {
val bo: Boolean = true
val c: Char = '1'
val b: Byte = 1
val s: Short = 1
val i: Int = 1
val l: Long = 1
val f: Float = 1
val d: Double = 1
// Bring in all primitive types into the closure such that they become
// parameters of the closure constructor. This allows us to test whether
// null values are created correctly for each type.
val nestedClosure = () => {
// scalastyle:off println
if (s.toString == "123") { // Don't really output them to avoid noisy
println(bo)
println(c)
println(b)
println(s)
println(i)
println(l)
println(f)
println(d)
}
val closure = () => {
println(getX)
}
// scalastyle:on println
ClosureCleaner.clean(closure)
}
nestedClosure()
}
}
abstract class TestAbstractClass extends Serializable {
val n1 = 111
val s1 = "aaa"
protected val d1 = 1.0d
def run(): Seq[(Int, Int, String, String, Double, Double)]
def body(rdd: RDD[Int]): Seq[(Int, Int, String, String, Double, Double)]
}
abstract class TestAbstractClass2 extends Serializable {
val n1 = 111
val s1 = "aaa"
protected val d1 = 1.0d
}
|
santhoshkumarvs/spark
|
external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/MsSqlServerIntegrationSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.math.BigDecimal
import java.sql.{Connection, Date, Timestamp}
import java.util.Properties
import org.apache.spark.tags.DockerTest
@DockerTest
class MsSqlServerIntegrationSuite extends DockerJDBCIntegrationSuite {
override val db = new DatabaseOnDocker {
override val imageName = "mcr.microsoft.com/mssql/server:2017-GA-ubuntu"
override val env = Map(
"SA_PASSWORD" -> "<PASSWORD>",
"ACCEPT_EULA" -> "Y"
)
override val usesIpc = false
override val jdbcPort: Int = 1433
override def getJdbcUrl(ip: String, port: Int): String =
s"jdbc:sqlserver://$ip:$port;user=sa;password=<PASSWORD>;"
override def getStartupProcessName: Option[String] = None
}
override def dataPreparation(conn: Connection): Unit = {
conn.prepareStatement("CREATE TABLE tbl (x INT, y VARCHAR (50))").executeUpdate()
conn.prepareStatement("INSERT INTO tbl VALUES (42,'fred')").executeUpdate()
conn.prepareStatement("INSERT INTO tbl VALUES (17,'dave')").executeUpdate()
conn.prepareStatement(
"""
|CREATE TABLE numbers (
|a BIT,
|b TINYINT, c SMALLINT, d INT, e BIGINT,
|f FLOAT, f1 FLOAT(24),
|g REAL,
|h DECIMAL(5,2), i NUMERIC(10,5),
|j MONEY, k SMALLMONEY)
""".stripMargin).executeUpdate()
conn.prepareStatement(
"""
|INSERT INTO numbers VALUES (
|0,
|255, 32767, 2147483647, 9223372036854775807,
|123456789012345.123456789012345, 123456789012345.123456789012345,
|123456789012345.123456789012345,
|123, 12345.12,
|922337203685477.58, 214748.3647)
""".stripMargin).executeUpdate()
conn.prepareStatement(
"""
|CREATE TABLE dates (
|a DATE, b DATETIME, c DATETIME2,
|d DATETIMEOFFSET, e SMALLDATETIME,
|f TIME)
""".stripMargin).executeUpdate()
conn.prepareStatement(
"""
|INSERT INTO dates VALUES (
|'1991-11-09', '1999-01-01 13:23:35', '9999-12-31 23:59:59',
|'1901-05-09 23:59:59 +14:00', '1996-01-01 23:23:45',
|'13:31:24')
""".stripMargin).executeUpdate()
conn.prepareStatement(
"""
|CREATE TABLE strings (
|a CHAR(10), b VARCHAR(10),
|c NCHAR(10), d NVARCHAR(10),
|e BINARY(4), f VARBINARY(4),
|g TEXT, h NTEXT,
|i IMAGE)
""".stripMargin).executeUpdate()
conn.prepareStatement(
"""
|INSERT INTO strings VALUES (
|'the', 'quick',
|'brown', 'fox',
|123456, 123456,
|'the', 'lazy',
|'dog')
""".stripMargin).executeUpdate()
}
test("Basic test") {
val df = spark.read.jdbc(jdbcUrl, "tbl", new Properties)
val rows = df.collect()
assert(rows.length == 2)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 2)
assert(types(0).equals("class java.lang.Integer"))
assert(types(1).equals("class java.lang.String"))
}
test("Numeric types") {
val df = spark.read.jdbc(jdbcUrl, "numbers", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val row = rows(0)
val types = row.toSeq.map(x => x.getClass.toString)
assert(types.length == 12)
assert(types(0).equals("class java.lang.Boolean"))
assert(types(1).equals("class java.lang.Integer"))
assert(types(2).equals("class java.lang.Integer"))
assert(types(3).equals("class java.lang.Integer"))
assert(types(4).equals("class java.lang.Long"))
assert(types(5).equals("class java.lang.Double"))
assert(types(6).equals("class java.lang.Double"))
assert(types(7).equals("class java.lang.Double"))
assert(types(8).equals("class java.math.BigDecimal"))
assert(types(9).equals("class java.math.BigDecimal"))
assert(types(10).equals("class java.math.BigDecimal"))
assert(types(11).equals("class java.math.BigDecimal"))
assert(row.getBoolean(0) == false)
assert(row.getInt(1) == 255)
assert(row.getInt(2) == 32767)
assert(row.getInt(3) == 2147483647)
assert(row.getLong(4) == 9223372036854775807L)
assert(row.getDouble(5) == 1.2345678901234512E14) // float = float(53) has 15-digits precision
assert(row.getDouble(6) == 1.23456788103168E14) // float(24) has 7-digits precision
assert(row.getDouble(7) == 1.23456788103168E14) // real = float(24)
assert(row.getAs[BigDecimal](8).equals(new BigDecimal("123.00")))
assert(row.getAs[BigDecimal](9).equals(new BigDecimal("12345.12000")))
assert(row.getAs[BigDecimal](10).equals(new BigDecimal("922337203685477.5800")))
assert(row.getAs[BigDecimal](11).equals(new BigDecimal("214748.3647")))
}
test("Date types") {
val df = spark.read.jdbc(jdbcUrl, "dates", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val row = rows(0)
val types = row.toSeq.map(x => x.getClass.toString)
assert(types.length == 6)
assert(types(0).equals("class java.sql.Date"))
assert(types(1).equals("class java.sql.Timestamp"))
assert(types(2).equals("class java.sql.Timestamp"))
assert(types(3).equals("class java.lang.String"))
assert(types(4).equals("class java.sql.Timestamp"))
assert(types(5).equals("class java.sql.Timestamp"))
assert(row.getAs[Date](0).equals(Date.valueOf("1991-11-09")))
assert(row.getAs[Timestamp](1).equals(Timestamp.valueOf("1999-01-01 13:23:35.0")))
assert(row.getAs[Timestamp](2).equals(Timestamp.valueOf("9999-12-31 23:59:59.0")))
assert(row.getString(3).equals("1901-05-09 23:59:59.0000000 +14:00"))
assert(row.getAs[Timestamp](4).equals(Timestamp.valueOf("1996-01-01 23:24:00.0")))
assert(row.getAs[Timestamp](5).equals(Timestamp.valueOf("1900-01-01 13:31:24.0")))
}
test("String types") {
val df = spark.read.jdbc(jdbcUrl, "strings", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val row = rows(0)
val types = row.toSeq.map(x => x.getClass.toString)
assert(types.length == 9)
assert(types(0).equals("class java.lang.String"))
assert(types(1).equals("class java.lang.String"))
assert(types(2).equals("class java.lang.String"))
assert(types(3).equals("class java.lang.String"))
assert(types(4).equals("class [B"))
assert(types(5).equals("class [B"))
assert(types(6).equals("class java.lang.String"))
assert(types(7).equals("class java.lang.String"))
assert(types(8).equals("class [B"))
assert(row.getString(0).length == 10)
assert(row.getString(0).trim.equals("the"))
assert(row.getString(1).equals("quick"))
assert(row.getString(2).length == 10)
assert(row.getString(2).trim.equals("brown"))
assert(row.getString(3).equals("fox"))
assert(java.util.Arrays.equals(row.getAs[Array[Byte]](4), Array[Byte](0, 1, -30, 64)))
assert(java.util.Arrays.equals(row.getAs[Array[Byte]](5), Array[Byte](0, 1, -30, 64)))
assert(row.getString(6).equals("the"))
assert(row.getString(7).equals("lazy"))
assert(java.util.Arrays.equals(row.getAs[Array[Byte]](8), Array[Byte](100, 111, 103)))
}
test("Basic write test") {
val df1 = spark.read.jdbc(jdbcUrl, "numbers", new Properties)
val df2 = spark.read.jdbc(jdbcUrl, "dates", new Properties)
val df3 = spark.read.jdbc(jdbcUrl, "strings", new Properties)
df1.write.jdbc(jdbcUrl, "numberscopy", new Properties)
df2.write.jdbc(jdbcUrl, "datescopy", new Properties)
df3.write.jdbc(jdbcUrl, "stringscopy", new Properties)
}
}
|
santhoshkumarvs/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/hash.scala
|
<reponame>santhoshkumarvs/spark
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.math.{BigDecimal, RoundingMode}
import java.security.{MessageDigest, NoSuchAlgorithmException}
import java.util.concurrent.TimeUnit._
import java.util.zip.CRC32
import scala.annotation.tailrec
import org.apache.commons.codec.digest.DigestUtils
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.util.{ArrayData, MapData}
import org.apache.spark.sql.catalyst.util.DateTimeUtils._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.hash.Murmur3_x86_32
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
////////////////////////////////////////////////////////////////////////////////////////////////////
// This file defines all the expressions for hashing.
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* A function that calculates an MD5 128-bit checksum and returns it as a hex string
* For input of type [[BinaryType]]
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns an MD5 128-bit checksum as a hex string of `expr`.",
examples = """
Examples:
> SELECT _FUNC_('Spark');
8cde774d6f7333752ed72cacddb05126
""")
case class Md5(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(BinaryType)
protected override def nullSafeEval(input: Any): Any =
UTF8String.fromString(DigestUtils.md5Hex(input.asInstanceOf[Array[Byte]]))
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c =>
s"UTF8String.fromString(org.apache.commons.codec.digest.DigestUtils.md5Hex($c))")
}
}
/**
* A function that calculates the SHA-2 family of functions (SHA-224, SHA-256, SHA-384, and SHA-512)
* and returns it as a hex string. The first argument is the string or binary to be hashed. The
* second argument indicates the desired bit length of the result, which must have a value of 224,
* 256, 384, 512, or 0 (which is equivalent to 256). SHA-224 is supported starting from Java 8. If
* asking for an unsupported SHA function, the return value is NULL. If either argument is NULL or
* the hash length is not one of the permitted values, the return value is NULL.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(expr, bitLength) - Returns a checksum of SHA-2 family as a hex string of `expr`.
SHA-224, SHA-256, SHA-384, and SHA-512 are supported. Bit length of 0 is equivalent to 256.
""",
examples = """
Examples:
> SELECT _FUNC_('Spark', 256);
529bc3b07127ecb7e53a4dcf1991d9152c24537d919178022b2c42657f79a26b
""")
// scalastyle:on line.size.limit
case class Sha2(left: Expression, right: Expression)
extends BinaryExpression with Serializable with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def inputTypes: Seq[DataType] = Seq(BinaryType, IntegerType)
protected override def nullSafeEval(input1: Any, input2: Any): Any = {
val bitLength = input2.asInstanceOf[Int]
val input = input1.asInstanceOf[Array[Byte]]
bitLength match {
case 224 =>
// DigestUtils doesn't support SHA-224 now
try {
val md = MessageDigest.getInstance("SHA-224")
md.update(input)
UTF8String.fromBytes(md.digest())
} catch {
// SHA-224 is not supported on the system, return null
case noa: NoSuchAlgorithmException => null
}
case 256 | 0 =>
UTF8String.fromString(DigestUtils.sha256Hex(input))
case 384 =>
UTF8String.fromString(DigestUtils.sha384Hex(input))
case 512 =>
UTF8String.fromString(DigestUtils.sha512Hex(input))
case _ => null
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val digestUtils = "org.apache.commons.codec.digest.DigestUtils"
nullSafeCodeGen(ctx, ev, (eval1, eval2) => {
s"""
if ($eval2 == 224) {
try {
java.security.MessageDigest md = java.security.MessageDigest.getInstance("SHA-224");
md.update($eval1);
${ev.value} = UTF8String.fromBytes(md.digest());
} catch (java.security.NoSuchAlgorithmException e) {
${ev.isNull} = true;
}
} else if ($eval2 == 256 || $eval2 == 0) {
${ev.value} =
UTF8String.fromString($digestUtils.sha256Hex($eval1));
} else if ($eval2 == 384) {
${ev.value} =
UTF8String.fromString($digestUtils.sha384Hex($eval1));
} else if ($eval2 == 512) {
${ev.value} =
UTF8String.fromString($digestUtils.sha512Hex($eval1));
} else {
${ev.isNull} = true;
}
"""
})
}
}
/**
* A function that calculates a sha1 hash value and returns it as a hex string
* For input of type [[BinaryType]] or [[StringType]]
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns a sha1 hash value as a hex string of the `expr`.",
examples = """
Examples:
> SELECT _FUNC_('Spark');
85f5955f4b27a9a4c2aab6ffe5d7189fc298b92c
""")
case class Sha1(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(BinaryType)
protected override def nullSafeEval(input: Any): Any =
UTF8String.fromString(DigestUtils.sha1Hex(input.asInstanceOf[Array[Byte]]))
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c =>
s"UTF8String.fromString(org.apache.commons.codec.digest.DigestUtils.sha1Hex($c))"
)
}
}
/**
* A function that computes a cyclic redundancy check value and returns it as a bigint
* For input of type [[BinaryType]]
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns a cyclic redundancy check value of the `expr` as a bigint.",
examples = """
Examples:
> SELECT _FUNC_('Spark');
1557323817
""")
case class Crc32(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = LongType
override def inputTypes: Seq[DataType] = Seq(BinaryType)
protected override def nullSafeEval(input: Any): Any = {
val checksum = new CRC32
checksum.update(input.asInstanceOf[Array[Byte]], 0, input.asInstanceOf[Array[Byte]].length)
checksum.getValue
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val CRC32 = "java.util.zip.CRC32"
val checksum = ctx.freshName("checksum")
nullSafeCodeGen(ctx, ev, value => {
s"""
$CRC32 $checksum = new $CRC32();
$checksum.update($value, 0, $value.length);
${ev.value} = $checksum.getValue();
"""
})
}
}
/**
* A function that calculates hash value for a group of expressions. Note that the `seed` argument
* is not exposed to users and should only be set inside spark SQL.
*
* The hash value for an expression depends on its type and seed:
* - null: seed
* - boolean: turn boolean into int, 1 for true, 0 for false, and then use murmur3 to
* hash this int with seed.
* - byte, short, int: use murmur3 to hash the input as int with seed.
* - long: use murmur3 to hash the long input with seed.
* - float: turn it into int: java.lang.Float.floatToIntBits(input), and hash it.
* - double: turn it into long: java.lang.Double.doubleToLongBits(input), and hash it.
* - decimal: if it's a small decimal, i.e. precision <= 18, turn it into long and hash
* it. Else, turn it into bytes and hash it.
* - calendar interval: hash `microseconds` first, and use the result as seed to hash `months`.
* - binary: use murmur3 to hash the bytes with seed.
* - string: get the bytes of string and hash it.
* - array: The `result` starts with seed, then use `result` as seed, recursively
* calculate hash value for each element, and assign the element hash value
* to `result`.
* - map: The `result` starts with seed, then use `result` as seed, recursively
* calculate hash value for each key-value, and assign the key-value hash
* value to `result`.
* - struct: The `result` starts with seed, then use `result` as seed, recursively
* calculate hash value for each field, and assign the field hash value to
* `result`.
*
* Finally we aggregate the hash values for each expression by the same way of struct.
*/
abstract class HashExpression[E] extends Expression {
/** Seed of the HashExpression. */
val seed: E
override def foldable: Boolean = children.forall(_.foldable)
override def nullable: Boolean = false
override def checkInputDataTypes(): TypeCheckResult = {
if (children.length < 1) {
TypeCheckResult.TypeCheckFailure(
s"input to function $prettyName requires at least one argument")
} else {
TypeCheckResult.TypeCheckSuccess
}
}
override def eval(input: InternalRow = null): Any = {
var hash = seed
var i = 0
val len = children.length
while (i < len) {
hash = computeHash(children(i).eval(input), children(i).dataType, hash)
i += 1
}
hash
}
protected def computeHash(value: Any, dataType: DataType, seed: E): E
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
ev.isNull = FalseLiteral
val childrenHash = children.map { child =>
val childGen = child.genCode(ctx)
childGen.code + ctx.nullSafeExec(child.nullable, childGen.isNull) {
computeHash(childGen.value, child.dataType, ev.value, ctx)
}
}
val hashResultType = CodeGenerator.javaType(dataType)
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = childrenHash,
funcName = "computeHash",
extraArguments = Seq(hashResultType -> ev.value),
returnType = hashResultType,
makeSplitFunction = body =>
s"""
|$body
|return ${ev.value};
""".stripMargin,
foldFunctions = _.map(funcCall => s"${ev.value} = $funcCall;").mkString("\n"))
ev.copy(code =
code"""
|$hashResultType ${ev.value} = $seed;
|$codes
""".stripMargin)
}
protected def nullSafeElementHash(
input: String,
index: String,
nullable: Boolean,
elementType: DataType,
result: String,
ctx: CodegenContext): String = {
val element = ctx.freshName("element")
val jt = CodeGenerator.javaType(elementType)
ctx.nullSafeExec(nullable, s"$input.isNullAt($index)") {
s"""
final $jt $element = ${CodeGenerator.getValue(input, elementType, index)};
${computeHash(element, elementType, result, ctx)}
"""
}
}
protected def genHashInt(i: String, result: String): String =
s"$result = $hasherClassName.hashInt($i, $result);"
protected def genHashLong(l: String, result: String): String =
s"$result = $hasherClassName.hashLong($l, $result);"
protected def genHashBytes(b: String, result: String): String = {
val offset = "Platform.BYTE_ARRAY_OFFSET"
s"$result = $hasherClassName.hashUnsafeBytes($b, $offset, $b.length, $result);"
}
protected def genHashBoolean(input: String, result: String): String =
genHashInt(s"$input ? 1 : 0", result)
protected def genHashFloat(input: String, result: String): String =
genHashInt(s"Float.floatToIntBits($input)", result)
protected def genHashDouble(input: String, result: String): String =
genHashLong(s"Double.doubleToLongBits($input)", result)
protected def genHashDecimal(
ctx: CodegenContext,
d: DecimalType,
input: String,
result: String): String = {
if (d.precision <= Decimal.MAX_LONG_DIGITS) {
genHashLong(s"$input.toUnscaledLong()", result)
} else {
val bytes = ctx.freshName("bytes")
s"""
|final byte[] $bytes = $input.toJavaBigDecimal().unscaledValue().toByteArray();
|${genHashBytes(bytes, result)}
""".stripMargin
}
}
protected def genHashTimestamp(t: String, result: String): String = genHashLong(t, result)
protected def genHashCalendarInterval(input: String, result: String): String = {
val microsecondsHash = s"$hasherClassName.hashLong($input.microseconds, $result)"
s"$result = $hasherClassName.hashInt($input.months, $microsecondsHash);"
}
protected def genHashString(input: String, result: String): String = {
val baseObject = s"$input.getBaseObject()"
val baseOffset = s"$input.getBaseOffset()"
val numBytes = s"$input.numBytes()"
s"$result = $hasherClassName.hashUnsafeBytes($baseObject, $baseOffset, $numBytes, $result);"
}
protected def genHashForMap(
ctx: CodegenContext,
input: String,
result: String,
keyType: DataType,
valueType: DataType,
valueContainsNull: Boolean): String = {
val index = ctx.freshName("index")
val keys = ctx.freshName("keys")
val values = ctx.freshName("values")
s"""
final ArrayData $keys = $input.keyArray();
final ArrayData $values = $input.valueArray();
for (int $index = 0; $index < $input.numElements(); $index++) {
${nullSafeElementHash(keys, index, false, keyType, result, ctx)}
${nullSafeElementHash(values, index, valueContainsNull, valueType, result, ctx)}
}
"""
}
protected def genHashForArray(
ctx: CodegenContext,
input: String,
result: String,
elementType: DataType,
containsNull: Boolean): String = {
val index = ctx.freshName("index")
s"""
for (int $index = 0; $index < $input.numElements(); $index++) {
${nullSafeElementHash(input, index, containsNull, elementType, result, ctx)}
}
"""
}
protected def genHashForStruct(
ctx: CodegenContext,
input: String,
result: String,
fields: Array[StructField]): String = {
val tmpInput = ctx.freshName("input")
val fieldsHash = fields.zipWithIndex.map { case (field, index) =>
nullSafeElementHash(tmpInput, index.toString, field.nullable, field.dataType, result, ctx)
}
val hashResultType = CodeGenerator.javaType(dataType)
val code = ctx.splitExpressions(
expressions = fieldsHash,
funcName = "computeHashForStruct",
arguments = Seq("InternalRow" -> tmpInput, hashResultType -> result),
returnType = hashResultType,
makeSplitFunction = body =>
s"""
|$body
|return $result;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$result = $funcCall;").mkString("\n"))
s"""
|final InternalRow $tmpInput = $input;
|$code
""".stripMargin
}
@tailrec
private def computeHashWithTailRec(
input: String,
dataType: DataType,
result: String,
ctx: CodegenContext): String = dataType match {
case NullType => ""
case BooleanType => genHashBoolean(input, result)
case ByteType | ShortType | IntegerType | DateType => genHashInt(input, result)
case LongType => genHashLong(input, result)
case TimestampType => genHashTimestamp(input, result)
case FloatType => genHashFloat(input, result)
case DoubleType => genHashDouble(input, result)
case d: DecimalType => genHashDecimal(ctx, d, input, result)
case CalendarIntervalType => genHashCalendarInterval(input, result)
case BinaryType => genHashBytes(input, result)
case StringType => genHashString(input, result)
case ArrayType(et, containsNull) => genHashForArray(ctx, input, result, et, containsNull)
case MapType(kt, vt, valueContainsNull) =>
genHashForMap(ctx, input, result, kt, vt, valueContainsNull)
case StructType(fields) => genHashForStruct(ctx, input, result, fields)
case udt: UserDefinedType[_] => computeHashWithTailRec(input, udt.sqlType, result, ctx)
}
protected def computeHash(
input: String,
dataType: DataType,
result: String,
ctx: CodegenContext): String = computeHashWithTailRec(input, dataType, result, ctx)
protected def hasherClassName: String
}
/**
* Base class for interpreted hash functions.
*/
abstract class InterpretedHashFunction {
protected def hashInt(i: Int, seed: Long): Long
protected def hashLong(l: Long, seed: Long): Long
protected def hashUnsafeBytes(base: AnyRef, offset: Long, length: Int, seed: Long): Long
/**
* Computes hash of a given `value` of type `dataType`. The caller needs to check the validity
* of input `value`.
*/
def hash(value: Any, dataType: DataType, seed: Long): Long = {
value match {
case null => seed
case b: Boolean => hashInt(if (b) 1 else 0, seed)
case b: Byte => hashInt(b, seed)
case s: Short => hashInt(s, seed)
case i: Int => hashInt(i, seed)
case l: Long => hashLong(l, seed)
case f: Float => hashInt(java.lang.Float.floatToIntBits(f), seed)
case d: Double => hashLong(java.lang.Double.doubleToLongBits(d), seed)
case d: Decimal =>
val precision = dataType.asInstanceOf[DecimalType].precision
if (precision <= Decimal.MAX_LONG_DIGITS) {
hashLong(d.toUnscaledLong, seed)
} else {
val bytes = d.toJavaBigDecimal.unscaledValue().toByteArray
hashUnsafeBytes(bytes, Platform.BYTE_ARRAY_OFFSET, bytes.length, seed)
}
case c: CalendarInterval => hashInt(c.months, hashLong(c.microseconds, seed))
case a: Array[Byte] =>
hashUnsafeBytes(a, Platform.BYTE_ARRAY_OFFSET, a.length, seed)
case s: UTF8String =>
hashUnsafeBytes(s.getBaseObject, s.getBaseOffset, s.numBytes(), seed)
case array: ArrayData =>
val elementType = dataType match {
case udt: UserDefinedType[_] => udt.sqlType.asInstanceOf[ArrayType].elementType
case ArrayType(et, _) => et
}
var result = seed
var i = 0
while (i < array.numElements()) {
result = hash(array.get(i, elementType), elementType, result)
i += 1
}
result
case map: MapData =>
val (kt, vt) = dataType match {
case udt: UserDefinedType[_] =>
val mapType = udt.sqlType.asInstanceOf[MapType]
mapType.keyType -> mapType.valueType
case MapType(kt, vt, _) => kt -> vt
}
val keys = map.keyArray()
val values = map.valueArray()
var result = seed
var i = 0
while (i < map.numElements()) {
result = hash(keys.get(i, kt), kt, result)
result = hash(values.get(i, vt), vt, result)
i += 1
}
result
case struct: InternalRow =>
val types: Array[DataType] = dataType match {
case udt: UserDefinedType[_] =>
udt.sqlType.asInstanceOf[StructType].map(_.dataType).toArray
case StructType(fields) => fields.map(_.dataType)
}
var result = seed
var i = 0
val len = struct.numFields
while (i < len) {
result = hash(struct.get(i, types(i)), types(i), result)
i += 1
}
result
}
}
}
/**
* A MurMur3 Hash expression.
*
* We should use this hash function for both shuffle and bucket, so that we can guarantee shuffle
* and bucketing have same data distribution.
*/
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2, ...) - Returns a hash value of the arguments.",
examples = """
Examples:
> SELECT _FUNC_('Spark', array(123), 2);
-1321691492
""")
case class Murmur3Hash(children: Seq[Expression], seed: Int) extends HashExpression[Int] {
def this(arguments: Seq[Expression]) = this(arguments, 42)
override def dataType: DataType = IntegerType
override def prettyName: String = "hash"
override protected def hasherClassName: String = classOf[Murmur3_x86_32].getName
override protected def computeHash(value: Any, dataType: DataType, seed: Int): Int = {
Murmur3HashFunction.hash(value, dataType, seed).toInt
}
}
object Murmur3HashFunction extends InterpretedHashFunction {
override protected def hashInt(i: Int, seed: Long): Long = {
Murmur3_x86_32.hashInt(i, seed.toInt)
}
override protected def hashLong(l: Long, seed: Long): Long = {
Murmur3_x86_32.hashLong(l, seed.toInt)
}
override protected def hashUnsafeBytes(base: AnyRef, offset: Long, len: Int, seed: Long): Long = {
Murmur3_x86_32.hashUnsafeBytes(base, offset, len, seed.toInt)
}
}
/**
* A xxHash64 64-bit hash expression.
*/
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2, ...) - Returns a 64-bit hash value of the arguments.",
examples = """
Examples:
> SELECT _FUNC_('Spark', array(123), 2);
5602566077635097486
""")
case class XxHash64(children: Seq[Expression], seed: Long) extends HashExpression[Long] {
def this(arguments: Seq[Expression]) = this(arguments, 42L)
override def dataType: DataType = LongType
override def prettyName: String = "xxhash64"
override protected def hasherClassName: String = classOf[XXH64].getName
override protected def computeHash(value: Any, dataType: DataType, seed: Long): Long = {
XxHash64Function.hash(value, dataType, seed)
}
}
object XxHash64Function extends InterpretedHashFunction {
override protected def hashInt(i: Int, seed: Long): Long = XXH64.hashInt(i, seed)
override protected def hashLong(l: Long, seed: Long): Long = XXH64.hashLong(l, seed)
override protected def hashUnsafeBytes(base: AnyRef, offset: Long, len: Int, seed: Long): Long = {
XXH64.hashUnsafeBytes(base, offset, len, seed)
}
}
/**
* Simulates Hive's hashing function from Hive v1.2.1 at
* org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils#hashcode()
*
* We should use this hash function for both shuffle and bucket of Hive tables, so that
* we can guarantee shuffle and bucketing have same data distribution
*/
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2, ...) - Returns a hash value of the arguments.")
case class HiveHash(children: Seq[Expression]) extends HashExpression[Int] {
override val seed = 0
override def dataType: DataType = IntegerType
override def prettyName: String = "hive-hash"
override protected def hasherClassName: String = classOf[HiveHasher].getName
override protected def computeHash(value: Any, dataType: DataType, seed: Int): Int = {
HiveHashFunction.hash(value, dataType, this.seed).toInt
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
ev.isNull = FalseLiteral
val childHash = ctx.freshName("childHash")
val childrenHash = children.map { child =>
val childGen = child.genCode(ctx)
val codeToComputeHash = ctx.nullSafeExec(child.nullable, childGen.isNull) {
computeHash(childGen.value, child.dataType, childHash, ctx)
}
s"""
|${childGen.code}
|$childHash = 0;
|$codeToComputeHash
|${ev.value} = (31 * ${ev.value}) + $childHash;
""".stripMargin
}
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = childrenHash,
funcName = "computeHash",
extraArguments = Seq(CodeGenerator.JAVA_INT -> ev.value),
returnType = CodeGenerator.JAVA_INT,
makeSplitFunction = body =>
s"""
|${CodeGenerator.JAVA_INT} $childHash = 0;
|$body
|return ${ev.value};
""".stripMargin,
foldFunctions = _.map(funcCall => s"${ev.value} = $funcCall;").mkString("\n"))
ev.copy(code =
code"""
|${CodeGenerator.JAVA_INT} ${ev.value} = $seed;
|${CodeGenerator.JAVA_INT} $childHash = 0;
|$codes
""".stripMargin)
}
override def eval(input: InternalRow = null): Int = {
var hash = seed
var i = 0
val len = children.length
while (i < len) {
hash = (31 * hash) + computeHash(children(i).eval(input), children(i).dataType, hash)
i += 1
}
hash
}
override protected def genHashInt(i: String, result: String): String =
s"$result = $hasherClassName.hashInt($i);"
override protected def genHashLong(l: String, result: String): String =
s"$result = $hasherClassName.hashLong($l);"
override protected def genHashBytes(b: String, result: String): String =
s"$result = $hasherClassName.hashUnsafeBytes($b, Platform.BYTE_ARRAY_OFFSET, $b.length);"
override protected def genHashDecimal(
ctx: CodegenContext,
d: DecimalType,
input: String,
result: String): String = {
s"""
$result = ${HiveHashFunction.getClass.getName.stripSuffix("$")}.normalizeDecimal(
$input.toJavaBigDecimal()).hashCode();"""
}
override protected def genHashCalendarInterval(input: String, result: String): String = {
s"""
$result = (int)
${HiveHashFunction.getClass.getName.stripSuffix("$")}.hashCalendarInterval($input);
"""
}
override protected def genHashTimestamp(input: String, result: String): String =
s"""
$result = (int) ${HiveHashFunction.getClass.getName.stripSuffix("$")}.hashTimestamp($input);
"""
override protected def genHashString(input: String, result: String): String = {
val baseObject = s"$input.getBaseObject()"
val baseOffset = s"$input.getBaseOffset()"
val numBytes = s"$input.numBytes()"
s"$result = $hasherClassName.hashUnsafeBytes($baseObject, $baseOffset, $numBytes);"
}
override protected def genHashForArray(
ctx: CodegenContext,
input: String,
result: String,
elementType: DataType,
containsNull: Boolean): String = {
val index = ctx.freshName("index")
val childResult = ctx.freshName("childResult")
s"""
int $childResult = 0;
for (int $index = 0; $index < $input.numElements(); $index++) {
$childResult = 0;
${nullSafeElementHash(input, index, containsNull, elementType, childResult, ctx)};
$result = (31 * $result) + $childResult;
}
"""
}
override protected def genHashForMap(
ctx: CodegenContext,
input: String,
result: String,
keyType: DataType,
valueType: DataType,
valueContainsNull: Boolean): String = {
val index = ctx.freshName("index")
val keys = ctx.freshName("keys")
val values = ctx.freshName("values")
val keyResult = ctx.freshName("keyResult")
val valueResult = ctx.freshName("valueResult")
s"""
final ArrayData $keys = $input.keyArray();
final ArrayData $values = $input.valueArray();
int $keyResult = 0;
int $valueResult = 0;
for (int $index = 0; $index < $input.numElements(); $index++) {
$keyResult = 0;
${nullSafeElementHash(keys, index, false, keyType, keyResult, ctx)}
$valueResult = 0;
${nullSafeElementHash(values, index, valueContainsNull, valueType, valueResult, ctx)}
$result += $keyResult ^ $valueResult;
}
"""
}
override protected def genHashForStruct(
ctx: CodegenContext,
input: String,
result: String,
fields: Array[StructField]): String = {
val tmpInput = ctx.freshName("input")
val childResult = ctx.freshName("childResult")
val fieldsHash = fields.zipWithIndex.map { case (field, index) =>
val computeFieldHash = nullSafeElementHash(
tmpInput, index.toString, field.nullable, field.dataType, childResult, ctx)
s"""
|$childResult = 0;
|$computeFieldHash
|$result = (31 * $result) + $childResult;
""".stripMargin
}
val code = ctx.splitExpressions(
expressions = fieldsHash,
funcName = "computeHashForStruct",
arguments = Seq("InternalRow" -> tmpInput, CodeGenerator.JAVA_INT -> result),
returnType = CodeGenerator.JAVA_INT,
makeSplitFunction = body =>
s"""
|${CodeGenerator.JAVA_INT} $childResult = 0;
|$body
|return $result;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$result = $funcCall;").mkString("\n"))
s"""
|final InternalRow $tmpInput = $input;
|${CodeGenerator.JAVA_INT} $childResult = 0;
|$code
""".stripMargin
}
}
object HiveHashFunction extends InterpretedHashFunction {
override protected def hashInt(i: Int, seed: Long): Long = {
HiveHasher.hashInt(i)
}
override protected def hashLong(l: Long, seed: Long): Long = {
HiveHasher.hashLong(l)
}
override protected def hashUnsafeBytes(base: AnyRef, offset: Long, len: Int, seed: Long): Long = {
HiveHasher.hashUnsafeBytes(base, offset, len)
}
private val HIVE_DECIMAL_MAX_PRECISION = 38
private val HIVE_DECIMAL_MAX_SCALE = 38
// Mimics normalization done for decimals in Hive at HiveDecimalV1.normalize()
def normalizeDecimal(input: BigDecimal): BigDecimal = {
if (input == null) return null
def trimDecimal(input: BigDecimal) = {
var result = input
if (result.compareTo(BigDecimal.ZERO) == 0) {
// Special case for 0, because java doesn't strip zeros correctly on that number.
result = BigDecimal.ZERO
} else {
result = result.stripTrailingZeros
if (result.scale < 0) {
// no negative scale decimals
result = result.setScale(0)
}
}
result
}
var result = trimDecimal(input)
val intDigits = result.precision - result.scale
if (intDigits > HIVE_DECIMAL_MAX_PRECISION) {
return null
}
val maxScale = Math.min(HIVE_DECIMAL_MAX_SCALE,
Math.min(HIVE_DECIMAL_MAX_PRECISION - intDigits, result.scale))
if (result.scale > maxScale) {
result = result.setScale(maxScale, RoundingMode.HALF_UP)
// Trimming is again necessary, because rounding may introduce new trailing 0's.
result = trimDecimal(result)
}
result
}
/**
* Mimics TimestampWritable.hashCode() in Hive
*/
def hashTimestamp(timestamp: Long): Long = {
val timestampInSeconds = MICROSECONDS.toSeconds(timestamp)
val nanoSecondsPortion = (timestamp % MICROS_PER_SECOND) * NANOS_PER_MICROS
var result = timestampInSeconds
result <<= 30 // the nanosecond part fits in 30 bits
result |= nanoSecondsPortion
((result >>> 32) ^ result).toInt
}
/**
* Hive allows input intervals to be defined using units below but the intervals
* have to be from the same category:
* - year, month (stored as HiveIntervalYearMonth)
* - day, hour, minute, second, nanosecond (stored as HiveIntervalDayTime)
*
* eg. (INTERVAL '30' YEAR + INTERVAL '-23' DAY) fails in Hive
*
* This method mimics HiveIntervalDayTime.hashCode() in Hive.
*
* Two differences wrt Hive due to how intervals are stored in Spark vs Hive:
*
* - If the `INTERVAL` is backed as HiveIntervalYearMonth in Hive, then this method will not
* produce Hive compatible result. The reason being Spark's representation of calendar does not
* have such categories based on the interval and is unified.
*
* - Spark's [[CalendarInterval]] has precision upto microseconds but Hive's
* HiveIntervalDayTime can store data with precision upto nanoseconds. So, any input intervals
* with nanosecond values will lead to wrong output hashes (ie. non adherent with Hive output)
*/
def hashCalendarInterval(calendarInterval: CalendarInterval): Long = {
val totalSeconds = calendarInterval.microseconds / CalendarInterval.MICROS_PER_SECOND.toInt
val result: Int = (17 * 37) + (totalSeconds ^ totalSeconds >> 32).toInt
val nanoSeconds =
(calendarInterval.microseconds -
(totalSeconds * CalendarInterval.MICROS_PER_SECOND.toInt)).toInt * 1000
(result * 37) + nanoSeconds
}
override def hash(value: Any, dataType: DataType, seed: Long): Long = {
value match {
case null => 0
case array: ArrayData =>
val elementType = dataType match {
case udt: UserDefinedType[_] => udt.sqlType.asInstanceOf[ArrayType].elementType
case ArrayType(et, _) => et
}
var result = 0
var i = 0
val length = array.numElements()
while (i < length) {
result = (31 * result) + hash(array.get(i, elementType), elementType, 0).toInt
i += 1
}
result
case map: MapData =>
val (kt, vt) = dataType match {
case udt: UserDefinedType[_] =>
val mapType = udt.sqlType.asInstanceOf[MapType]
mapType.keyType -> mapType.valueType
case MapType(_kt, _vt, _) => _kt -> _vt
}
val keys = map.keyArray()
val values = map.valueArray()
var result = 0
var i = 0
val length = map.numElements()
while (i < length) {
result += hash(keys.get(i, kt), kt, 0).toInt ^ hash(values.get(i, vt), vt, 0).toInt
i += 1
}
result
case struct: InternalRow =>
val types: Array[DataType] = dataType match {
case udt: UserDefinedType[_] =>
udt.sqlType.asInstanceOf[StructType].map(_.dataType).toArray
case StructType(fields) => fields.map(_.dataType)
}
var result = 0
var i = 0
val length = struct.numFields
while (i < length) {
result = (31 * result) + hash(struct.get(i, types(i)), types(i), 0).toInt
i += 1
}
result
case d: Decimal => normalizeDecimal(d.toJavaBigDecimal).hashCode()
case timestamp: Long if dataType.isInstanceOf[TimestampType] => hashTimestamp(timestamp)
case calendarInterval: CalendarInterval => hashCalendarInterval(calendarInterval)
case _ => super.hash(value, dataType, 0)
}
}
}
|
santhoshkumarvs/spark
|
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaRelation.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.util.UUID
import org.apache.kafka.common.TopicPartition
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.sources.{BaseRelation, TableScan}
import org.apache.spark.sql.types.StructType
import org.apache.spark.unsafe.types.UTF8String
private[kafka010] class KafkaRelation(
override val sqlContext: SQLContext,
strategy: ConsumerStrategy,
sourceOptions: Map[String, String],
specifiedKafkaParams: Map[String, String],
failOnDataLoss: Boolean,
startingOffsets: KafkaOffsetRangeLimit,
endingOffsets: KafkaOffsetRangeLimit)
extends BaseRelation with TableScan with Logging {
assert(startingOffsets != LatestOffsetRangeLimit,
"Starting offset not allowed to be set to latest offsets.")
assert(endingOffsets != EarliestOffsetRangeLimit,
"Ending offset not allowed to be set to earliest offsets.")
private val pollTimeoutMs = sourceOptions.getOrElse(
"kafkaConsumer.pollTimeoutMs",
(sqlContext.sparkContext.conf.getTimeAsSeconds(
"spark.network.timeout",
"120s") * 1000L).toString
).toLong
override def schema: StructType = KafkaOffsetReader.kafkaSchema
override def buildScan(): RDD[Row] = {
// Each running query should use its own group id. Otherwise, the query may be only assigned
// partial data since Kafka will assign partitions to multiple consumers having the same group
// id. Hence, we should generate a unique id for each query.
val uniqueGroupId = s"spark-kafka-relation-${UUID.randomUUID}"
val kafkaOffsetReader = new KafkaOffsetReader(
strategy,
KafkaSourceProvider.kafkaParamsForDriver(specifiedKafkaParams),
sourceOptions,
driverGroupIdPrefix = s"$uniqueGroupId-driver")
// Leverage the KafkaReader to obtain the relevant partition offsets
val (fromPartitionOffsets, untilPartitionOffsets) = {
try {
(getPartitionOffsets(kafkaOffsetReader, startingOffsets),
getPartitionOffsets(kafkaOffsetReader, endingOffsets))
} finally {
kafkaOffsetReader.close()
}
}
// Obtain topicPartitions in both from and until partition offset, ignoring
// topic partitions that were added and/or deleted between the two above calls.
if (fromPartitionOffsets.keySet != untilPartitionOffsets.keySet) {
implicit val topicOrdering: Ordering[TopicPartition] = Ordering.by(t => t.topic())
val fromTopics = fromPartitionOffsets.keySet.toList.sorted.mkString(",")
val untilTopics = untilPartitionOffsets.keySet.toList.sorted.mkString(",")
throw new IllegalStateException("different topic partitions " +
s"for starting offsets topics[${fromTopics}] and " +
s"ending offsets topics[${untilTopics}]")
}
// Calculate offset ranges
val offsetRanges = untilPartitionOffsets.keySet.map { tp =>
val fromOffset = fromPartitionOffsets.getOrElse(tp,
// This should not happen since topicPartitions contains all partitions not in
// fromPartitionOffsets
throw new IllegalStateException(s"$tp doesn't have a from offset"))
val untilOffset = untilPartitionOffsets(tp)
KafkaSourceRDDOffsetRange(tp, fromOffset, untilOffset, None)
}.toArray
logInfo("GetBatch generating RDD of offset range: " +
offsetRanges.sortBy(_.topicPartition.toString).mkString(", "))
// Create an RDD that reads from Kafka and get the (key, value) pair as byte arrays.
val executorKafkaParams =
KafkaSourceProvider.kafkaParamsForExecutors(specifiedKafkaParams, uniqueGroupId)
val rdd = new KafkaSourceRDD(
sqlContext.sparkContext, executorKafkaParams, offsetRanges,
pollTimeoutMs, failOnDataLoss, reuseKafkaConsumer = false).map { cr =>
InternalRow(
cr.key,
cr.value,
UTF8String.fromString(cr.topic),
cr.partition,
cr.offset,
DateTimeUtils.fromJavaTimestamp(new java.sql.Timestamp(cr.timestamp)),
cr.timestampType.id)
}
sqlContext.internalCreateDataFrame(rdd.setName("kafka"), schema).rdd
}
private def getPartitionOffsets(
kafkaReader: KafkaOffsetReader,
kafkaOffsets: KafkaOffsetRangeLimit): Map[TopicPartition, Long] = {
def validateTopicPartitions(partitions: Set[TopicPartition],
partitionOffsets: Map[TopicPartition, Long]): Map[TopicPartition, Long] = {
assert(partitions == partitionOffsets.keySet,
"If startingOffsets contains specific offsets, you must specify all TopicPartitions.\n" +
"Use -1 for latest, -2 for earliest, if you don't care.\n" +
s"Specified: ${partitionOffsets.keySet} Assigned: ${partitions}")
logDebug(s"Partitions assigned to consumer: $partitions. Seeking to $partitionOffsets")
partitionOffsets
}
val partitions = kafkaReader.fetchTopicPartitions()
// Obtain TopicPartition offsets with late binding support
kafkaOffsets match {
case EarliestOffsetRangeLimit => partitions.map {
case tp => tp -> KafkaOffsetRangeLimit.EARLIEST
}.toMap
case LatestOffsetRangeLimit => partitions.map {
case tp => tp -> KafkaOffsetRangeLimit.LATEST
}.toMap
case SpecificOffsetRangeLimit(partitionOffsets) =>
validateTopicPartitions(partitions, partitionOffsets)
}
}
override def toString: String =
s"KafkaRelation(strategy=$strategy, start=$startingOffsets, end=$endingOffsets)"
}
|
santhoshkumarvs/spark
|
sql/core/src/test/scala/org/apache/spark/sql/internal/SQLConfSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.internal
import org.apache.hadoop.fs.Path
import org.apache.spark.sql._
import org.apache.spark.sql.internal.StaticSQLConf._
import org.apache.spark.sql.test.{SharedSQLContext, TestSQLContext}
import org.apache.spark.util.Utils
class SQLConfSuite extends QueryTest with SharedSQLContext {
import testImplicits._
private val testKey = "test.key.0"
private val testVal = "test.val.0"
test("propagate from spark conf") {
// We create a new context here to avoid order dependence with other tests that might call
// clear().
val newContext = new SQLContext(SparkSession.builder().sparkContext(sparkContext).getOrCreate())
assert(newContext.getConf("spark.sql.testkey", "false") === "true")
}
test("programmatic ways of basic setting and getting") {
// Set a conf first.
spark.conf.set(testKey, testVal)
// Clear the conf.
spark.sessionState.conf.clear()
// After clear, only overrideConfs used by unit test should be in the SQLConf.
assert(spark.conf.getAll === TestSQLContext.overrideConfs)
spark.conf.set(testKey, testVal)
assert(spark.conf.get(testKey) === testVal)
assert(spark.conf.get(testKey, testVal + "_") === testVal)
assert(spark.conf.getAll.contains(testKey))
// Tests SQLConf as accessed from a SQLContext is mutable after
// the latter is initialized, unlike SparkConf inside a SparkContext.
assert(spark.conf.get(testKey) === testVal)
assert(spark.conf.get(testKey, testVal + "_") === testVal)
assert(spark.conf.getAll.contains(testKey))
spark.sessionState.conf.clear()
}
test("parse SQL set commands") {
spark.sessionState.conf.clear()
sql(s"set $testKey=$testVal")
assert(spark.conf.get(testKey, testVal + "_") === testVal)
assert(spark.conf.get(testKey, testVal + "_") === testVal)
sql("set some.property=20")
assert(spark.conf.get("some.property", "0") === "20")
sql("set some.property = 40")
assert(spark.conf.get("some.property", "0") === "40")
val key = "spark.sql.key"
val vs = "val0,val_1,val2.3,my_table"
sql(s"set $key=$vs")
assert(spark.conf.get(key, "0") === vs)
sql(s"set $key=")
assert(spark.conf.get(key, "0") === "")
spark.sessionState.conf.clear()
}
test("set command for display") {
spark.sessionState.conf.clear()
checkAnswer(
sql("SET").where("key = 'spark.sql.groupByOrdinal'").select("key", "value"),
Nil)
checkAnswer(
sql("SET -v").where("key = 'spark.sql.groupByOrdinal'").select("key", "value"),
Row("spark.sql.groupByOrdinal", "true"))
sql("SET spark.sql.groupByOrdinal=false")
checkAnswer(
sql("SET").where("key = 'spark.sql.groupByOrdinal'").select("key", "value"),
Row("spark.sql.groupByOrdinal", "false"))
checkAnswer(
sql("SET -v").where("key = 'spark.sql.groupByOrdinal'").select("key", "value"),
Row("spark.sql.groupByOrdinal", "false"))
}
test("deprecated property") {
spark.sessionState.conf.clear()
val original = spark.conf.get(SQLConf.SHUFFLE_PARTITIONS)
try {
sql(s"set ${SQLConf.Deprecated.MAPRED_REDUCE_TASKS}=10")
assert(spark.conf.get(SQLConf.SHUFFLE_PARTITIONS) === 10)
} finally {
sql(s"set ${SQLConf.SHUFFLE_PARTITIONS}=$original")
}
}
test("reset - public conf") {
spark.sessionState.conf.clear()
val original = spark.conf.get(SQLConf.GROUP_BY_ORDINAL)
try {
assert(spark.conf.get(SQLConf.GROUP_BY_ORDINAL))
sql(s"set ${SQLConf.GROUP_BY_ORDINAL.key}=false")
assert(spark.conf.get(SQLConf.GROUP_BY_ORDINAL) === false)
assert(sql(s"set").where(s"key = '${SQLConf.GROUP_BY_ORDINAL.key}'").count() == 1)
sql(s"reset")
assert(spark.conf.get(SQLConf.GROUP_BY_ORDINAL))
assert(sql(s"set").where(s"key = '${SQLConf.GROUP_BY_ORDINAL.key}'").count() == 0)
} finally {
sql(s"set ${SQLConf.GROUP_BY_ORDINAL}=$original")
}
}
test("reset - internal conf") {
spark.sessionState.conf.clear()
val original = spark.conf.get(SQLConf.OPTIMIZER_MAX_ITERATIONS)
try {
assert(spark.conf.get(SQLConf.OPTIMIZER_MAX_ITERATIONS) === 100)
sql(s"set ${SQLConf.OPTIMIZER_MAX_ITERATIONS.key}=10")
assert(spark.conf.get(SQLConf.OPTIMIZER_MAX_ITERATIONS) === 10)
assert(sql(s"set").where(s"key = '${SQLConf.OPTIMIZER_MAX_ITERATIONS.key}'").count() == 1)
sql(s"reset")
assert(spark.conf.get(SQLConf.OPTIMIZER_MAX_ITERATIONS) === 100)
assert(sql(s"set").where(s"key = '${SQLConf.OPTIMIZER_MAX_ITERATIONS.key}'").count() == 0)
} finally {
sql(s"set ${SQLConf.OPTIMIZER_MAX_ITERATIONS}=$original")
}
}
test("reset - user-defined conf") {
spark.sessionState.conf.clear()
val userDefinedConf = "x.y.z.reset"
try {
assert(spark.conf.getOption(userDefinedConf).isEmpty)
sql(s"set $userDefinedConf=false")
assert(spark.conf.get(userDefinedConf) === "false")
assert(sql(s"set").where(s"key = '$userDefinedConf'").count() == 1)
sql(s"reset")
assert(spark.conf.getOption(userDefinedConf).isEmpty)
} finally {
spark.conf.unset(userDefinedConf)
}
}
test("invalid conf value") {
spark.sessionState.conf.clear()
val e = intercept[IllegalArgumentException] {
sql(s"set ${SQLConf.CASE_SENSITIVE.key}=10")
}
assert(e.getMessage === s"${SQLConf.CASE_SENSITIVE.key} should be boolean, but was 10")
}
test("Test SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE's method") {
spark.sessionState.conf.clear()
spark.conf.set(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE.key, "100")
assert(spark.conf.get(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE) === 100)
spark.conf.set(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE.key, "1k")
assert(spark.conf.get(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE) === 1024)
spark.conf.set(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE.key, "1M")
assert(spark.conf.get(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE) === 1048576)
spark.conf.set(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE.key, "1g")
assert(spark.conf.get(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE) === 1073741824)
spark.conf.set(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE.key, "-1")
assert(spark.conf.get(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE) === -1)
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MaxValue
spark.conf.set(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE.key, "90000000000g")
}
intercept[IllegalArgumentException] {
// This value less than Long.MinValue
spark.conf.set(SQLConf.SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE.key, "-90000000000g")
}
spark.sessionState.conf.clear()
}
test("SparkSession can access configs set in SparkConf") {
try {
sparkContext.conf.set("spark.to.be.or.not.to.be", "my love")
sparkContext.conf.set("spark.sql.with.or.without.you", "my love")
val spark = new SparkSession(sparkContext)
assert(spark.conf.get("spark.to.be.or.not.to.be") == "my love")
assert(spark.conf.get("spark.sql.with.or.without.you") == "my love")
} finally {
sparkContext.conf.remove("spark.to.be.or.not.to.be")
sparkContext.conf.remove("spark.sql.with.or.without.you")
}
}
test("default value of WAREHOUSE_PATH") {
// JVM adds a trailing slash if the directory exists and leaves it as-is, if it doesn't
// In our comparison, strip trailing slash off of both sides, to account for such cases
assert(new Path(Utils.resolveURI("spark-warehouse")).toString.stripSuffix("/") === spark
.sessionState.conf.warehousePath.stripSuffix("/"))
}
test("static SQL conf comes from SparkConf") {
val previousValue = sparkContext.conf.get(SCHEMA_STRING_LENGTH_THRESHOLD)
try {
sparkContext.conf.set(SCHEMA_STRING_LENGTH_THRESHOLD, 2000)
val newSession = new SparkSession(sparkContext)
assert(newSession.conf.get(SCHEMA_STRING_LENGTH_THRESHOLD) == 2000)
checkAnswer(
newSession.sql(s"SET ${SCHEMA_STRING_LENGTH_THRESHOLD.key}"),
Row(SCHEMA_STRING_LENGTH_THRESHOLD.key, "2000"))
} finally {
sparkContext.conf.set(SCHEMA_STRING_LENGTH_THRESHOLD, previousValue)
}
}
test("cannot set/unset static SQL conf") {
val e1 = intercept[AnalysisException](sql(s"SET ${SCHEMA_STRING_LENGTH_THRESHOLD.key}=10"))
assert(e1.message.contains("Cannot modify the value of a static config"))
val e2 = intercept[AnalysisException](spark.conf.unset(SCHEMA_STRING_LENGTH_THRESHOLD.key))
assert(e2.message.contains("Cannot modify the value of a static config"))
}
test("SPARK-21588 SQLContext.getConf(key, null) should return null") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
assert("1" == spark.conf.get(SQLConf.SHUFFLE_PARTITIONS.key, null))
assert("1" == spark.conf.get(SQLConf.SHUFFLE_PARTITIONS.key, "<undefined>"))
}
assert(spark.conf.getOption("spark.sql.nonexistent").isEmpty)
assert(null == spark.conf.get("spark.sql.nonexistent", null))
assert("<undefined>" == spark.conf.get("spark.sql.nonexistent", "<undefined>"))
}
test("SPARK-10365: PARQUET_OUTPUT_TIMESTAMP_TYPE") {
spark.sessionState.conf.clear()
// check default value
assert(spark.sessionState.conf.parquetOutputTimestampType ==
SQLConf.ParquetOutputTimestampType.INT96)
// PARQUET_INT64_AS_TIMESTAMP_MILLIS should be respected.
spark.sessionState.conf.setConf(SQLConf.PARQUET_INT64_AS_TIMESTAMP_MILLIS, true)
assert(spark.sessionState.conf.parquetOutputTimestampType ==
SQLConf.ParquetOutputTimestampType.TIMESTAMP_MILLIS)
// PARQUET_OUTPUT_TIMESTAMP_TYPE has higher priority over PARQUET_INT64_AS_TIMESTAMP_MILLIS
spark.sessionState.conf.setConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE, "timestamp_micros")
assert(spark.sessionState.conf.parquetOutputTimestampType ==
SQLConf.ParquetOutputTimestampType.TIMESTAMP_MICROS)
spark.sessionState.conf.setConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE, "int96")
assert(spark.sessionState.conf.parquetOutputTimestampType ==
SQLConf.ParquetOutputTimestampType.INT96)
// test invalid conf value
intercept[IllegalArgumentException] {
spark.conf.set(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key, "invalid")
}
spark.sessionState.conf.clear()
}
test("SPARK-22779: correctly compute default value for fallback configs") {
val fallback = SQLConf.buildConf("spark.sql.__test__.spark_22779")
.fallbackConf(SQLConf.PARQUET_COMPRESSION)
assert(spark.sessionState.conf.getConfString(fallback.key) ===
SQLConf.PARQUET_COMPRESSION.defaultValue.get)
assert(spark.sessionState.conf.getConfString(fallback.key, "lzo") === "lzo")
val displayValue = spark.sessionState.conf.getAllDefinedConfs
.find { case (key, _, _) => key == fallback.key }
.map { case (_, v, _) => v }
.get
assert(displayValue === fallback.defaultValueString)
spark.sessionState.conf.setConf(SQLConf.PARQUET_COMPRESSION, "gzip")
assert(spark.sessionState.conf.getConfString(fallback.key) === "gzip")
spark.sessionState.conf.setConf(fallback, "lzo")
assert(spark.sessionState.conf.getConfString(fallback.key) === "lzo")
val newDisplayValue = spark.sessionState.conf.getAllDefinedConfs
.find { case (key, _, _) => key == fallback.key }
.map { case (_, v, _) => v }
.get
assert(newDisplayValue === "lzo")
SQLConf.unregister(fallback)
}
test("SPARK-24783: spark.sql.shuffle.partitions=0 should throw exception ") {
val e = intercept[IllegalArgumentException] {
spark.conf.set(SQLConf.SHUFFLE_PARTITIONS.key, 0)
}
assert(e.getMessage.contains("spark.sql.shuffle.partitions"))
val e2 = intercept[IllegalArgumentException] {
spark.conf.set(SQLConf.SHUFFLE_PARTITIONS.key, -1)
}
assert(e2.getMessage.contains("spark.sql.shuffle.partitions"))
}
}
|
santhoshkumarvs/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UpdateAttributeNullability.scala
|
<filename>sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UpdateAttributeNullability.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
/**
* Updates nullability of Attributes in a resolved LogicalPlan by using the nullability of
* corresponding Attributes of its children output Attributes. This step is needed because
* users can use a resolved AttributeReference in the Dataset API and outer joins
* can change the nullability of an AttribtueReference. Without this rule, a nullable column's
* nullable field can be actually set as non-nullable, which cause illegal optimization
* (e.g., NULL propagation) and wrong answers.
* See SPARK-13484 and SPARK-13801 for the concrete queries of this case.
*/
object UpdateAttributeNullability extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsUp {
// Skip unresolved nodes.
case p if !p.resolved => p
// Skip leaf node, as it has no child and no need to update nullability.
case p: LeafNode => p
case p: LogicalPlan =>
val nullabilities = p.children.flatMap(c => c.output).groupBy(_.exprId).map {
// If there are multiple Attributes having the same ExprId, we need to resolve
// the conflict of nullable field. We do not really expect this to happen.
case (exprId, attributes) => exprId -> attributes.exists(_.nullable)
}
// For an Attribute used by the current LogicalPlan, if it is from its children,
// we fix the nullable field by using the nullability setting of the corresponding
// output Attribute from the children.
p.transformExpressions {
case attr: Attribute if nullabilities.contains(attr.exprId) =>
attr.withNullability(nullabilities(attr.exprId))
}
}
}
|
santhoshkumarvs/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PushDownLeftSemiAntiJoin.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
/**
* This rule is a variant of [[PushDownPredicate]] which can handle
* pushing down Left semi and Left Anti joins below the following operators.
* 1) Project
* 2) Window
* 3) Union
* 4) Aggregate
* 5) Other permissible unary operators. please see [[PushDownPredicate.canPushThrough]].
*/
object PushDownLeftSemiAntiJoin extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// LeftSemi/LeftAnti over Project
case Join(p @ Project(pList, gChild), rightOp, LeftSemiOrAnti(joinType), joinCond, hint)
if pList.forall(_.deterministic) &&
!pList.exists(ScalarSubquery.hasCorrelatedScalarSubquery) &&
canPushThroughCondition(Seq(gChild), joinCond, rightOp) =>
if (joinCond.isEmpty) {
// No join condition, just push down the Join below Project
p.copy(child = Join(gChild, rightOp, joinType, joinCond, hint))
} else {
val aliasMap = PushDownPredicate.getAliasMap(p)
val newJoinCond = if (aliasMap.nonEmpty) {
Option(replaceAlias(joinCond.get, aliasMap))
} else {
joinCond
}
p.copy(child = Join(gChild, rightOp, joinType, newJoinCond, hint))
}
// LeftSemi/LeftAnti over Aggregate
case join @ Join(agg: Aggregate, rightOp, LeftSemiOrAnti(_), _, _)
if agg.aggregateExpressions.forall(_.deterministic) && agg.groupingExpressions.nonEmpty &&
!agg.aggregateExpressions.exists(ScalarSubquery.hasCorrelatedScalarSubquery) =>
val aliasMap = PushDownPredicate.getAliasMap(agg)
val canPushDownPredicate = (predicate: Expression) => {
val replaced = replaceAlias(predicate, aliasMap)
predicate.references.nonEmpty &&
replaced.references.subsetOf(agg.child.outputSet ++ rightOp.outputSet)
}
val makeJoinCondition = (predicates: Seq[Expression]) => {
replaceAlias(predicates.reduce(And), aliasMap)
}
pushDownJoin(join, canPushDownPredicate, makeJoinCondition)
// LeftSemi/LeftAnti over Window
case join @ Join(w: Window, rightOp, LeftSemiOrAnti(_), _, _)
if w.partitionSpec.forall(_.isInstanceOf[AttributeReference]) =>
val partitionAttrs = AttributeSet(w.partitionSpec.flatMap(_.references)) ++ rightOp.outputSet
pushDownJoin(join, _.references.subsetOf(partitionAttrs), _.reduce(And))
// LeftSemi/LeftAnti over Union
case Join(union: Union, rightOp, LeftSemiOrAnti(joinType), joinCond, hint)
if canPushThroughCondition(union.children, joinCond, rightOp) =>
if (joinCond.isEmpty) {
// Push down the Join below Union
val newGrandChildren = union.children.map { Join(_, rightOp, joinType, joinCond, hint) }
union.withNewChildren(newGrandChildren)
} else {
val output = union.output
val newGrandChildren = union.children.map { grandchild =>
val newCond = joinCond.get transform {
case e if output.exists(_.semanticEquals(e)) =>
grandchild.output(output.indexWhere(_.semanticEquals(e)))
}
assert(newCond.references.subsetOf(grandchild.outputSet ++ rightOp.outputSet))
Join(grandchild, rightOp, joinType, Option(newCond), hint)
}
union.withNewChildren(newGrandChildren)
}
// LeftSemi/LeftAnti over UnaryNode
case join @ Join(u: UnaryNode, rightOp, LeftSemiOrAnti(_), _, _)
if PushDownPredicate.canPushThrough(u) && u.expressions.forall(_.deterministic) =>
val validAttrs = u.child.outputSet ++ rightOp.outputSet
pushDownJoin(join, _.references.subsetOf(validAttrs), _.reduce(And))
}
/**
* Check if we can safely push a join through a project or union by making sure that attributes
* referred in join condition do not contain the same attributes as the plan they are moved
* into. This can happen when both sides of join refers to the same source (self join). This
* function makes sure that the join condition refers to attributes that are not ambiguous (i.e
* present in both the legs of the join) or else the resultant plan will be invalid.
*/
private def canPushThroughCondition(
plans: Seq[LogicalPlan],
condition: Option[Expression],
rightOp: LogicalPlan): Boolean = {
val attributes = AttributeSet(plans.flatMap(_.output))
if (condition.isDefined) {
val matched = condition.get.references.intersect(rightOp.outputSet).intersect(attributes)
matched.isEmpty
} else {
true
}
}
private def pushDownJoin(
join: Join,
canPushDownPredicate: Expression => Boolean,
makeJoinCondition: Seq[Expression] => Expression): LogicalPlan = {
assert(join.left.children.length == 1)
if (join.condition.isEmpty) {
join.left.withNewChildren(Seq(join.copy(left = join.left.children.head)))
} else {
val (pushDown, stayUp) = splitConjunctivePredicates(join.condition.get)
.partition(canPushDownPredicate)
// Check if the remaining predicates do not contain columns from the right hand side of the
// join. Since the remaining predicates will be kept as a filter over the operator under join,
// this check is necessary after the left-semi/anti join is pushed down. The reason is, for
// this kind of join, we only output from the left leg of the join.
val referRightSideCols = AttributeSet(stayUp.toSet).intersect(join.right.outputSet).nonEmpty
if (pushDown.isEmpty || referRightSideCols) {
join
} else {
val newPlan = join.left.withNewChildren(Seq(join.copy(
left = join.left.children.head, condition = Some(makeJoinCondition(pushDown)))))
// If there is no more filter to stay up, return the new plan that has join pushed down.
if (stayUp.isEmpty) {
newPlan
} else {
join.joinType match {
// In case of Left semi join, the part of the join condition which does not refer to
// to attributes of the grandchild are kept as a Filter above.
case LeftSemi => Filter(stayUp.reduce(And), newPlan)
// In case of left-anti join, the join is pushed down only when the entire join
// condition is eligible to be pushed down to preserve the semantics of left-anti join.
case _ => join
}
}
}
}
}
}
|
santhoshkumarvs/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/v2/ResolveMultipartIdentifierSuite.scala
|
<gh_stars>10-100
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog.v2
import org.scalatest.Matchers._
import org.apache.spark.sql.catalog.v2.{CatalogNotFoundException, CatalogPlugin}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, Analyzer}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.util.CaseInsensitiveStringMap
private class TestCatalogPlugin(override val name: String) extends CatalogPlugin {
override def initialize(name: String, options: CaseInsensitiveStringMap): Unit = Unit
}
class ResolveMultipartIdentifierSuite extends AnalysisTest {
import CatalystSqlParser._
private val analyzer = makeAnalyzer(caseSensitive = false)
private val catalogs = Seq("prod", "test").map(name => name -> new TestCatalogPlugin(name)).toMap
private def lookupCatalog(catalog: String): CatalogPlugin =
catalogs.getOrElse(catalog, throw new CatalogNotFoundException("Not found"))
private def makeAnalyzer(caseSensitive: Boolean) = {
val conf = new SQLConf().copy(SQLConf.CASE_SENSITIVE -> caseSensitive)
new Analyzer(Some(lookupCatalog _), null, conf)
}
override protected def getAnalyzer(caseSensitive: Boolean) = analyzer
private def checkResolution(sqlText: String, expectedCatalog: Option[CatalogPlugin],
expectedNamespace: Array[String], expectedName: String): Unit = {
import analyzer.CatalogObjectIdentifier
val CatalogObjectIdentifier(catalog, ident) = parseMultipartIdentifier(sqlText)
catalog shouldEqual expectedCatalog
ident.namespace shouldEqual expectedNamespace
ident.name shouldEqual expectedName
}
private def checkTableResolution(sqlText: String,
expectedIdent: Option[TableIdentifier]): Unit = {
import analyzer.AsTableIdentifier
parseMultipartIdentifier(sqlText) match {
case AsTableIdentifier(ident) =>
assert(Some(ident) === expectedIdent)
case _ =>
assert(None === expectedIdent)
}
}
test("resolve multipart identifier") {
checkResolution("tbl", None, Array.empty, "tbl")
checkResolution("db.tbl", None, Array("db"), "tbl")
checkResolution("prod.func", catalogs.get("prod"), Array.empty, "func")
checkResolution("ns1.ns2.tbl", None, Array("ns1", "ns2"), "tbl")
checkResolution("prod.db.tbl", catalogs.get("prod"), Array("db"), "tbl")
checkResolution("test.db.tbl", catalogs.get("test"), Array("db"), "tbl")
checkResolution("test.ns1.ns2.ns3.tbl",
catalogs.get("test"), Array("ns1", "ns2", "ns3"), "tbl")
checkResolution("`db.tbl`", None, Array.empty, "db.tbl")
checkResolution("parquet.`file:/tmp/db.tbl`", None, Array("parquet"), "file:/tmp/db.tbl")
checkResolution("`org.apache.spark.sql.json`.`s3://buck/tmp/abc.json`", None,
Array("org.apache.spark.sql.json"), "s3://buck/tmp/abc.json")
}
test("resolve table identifier") {
checkTableResolution("tbl", Some(TableIdentifier("tbl")))
checkTableResolution("db.tbl", Some(TableIdentifier("tbl", Some("db"))))
checkTableResolution("prod.func", None)
checkTableResolution("ns1.ns2.tbl", None)
checkTableResolution("prod.db.tbl", None)
checkTableResolution("`db.tbl`", Some(TableIdentifier("db.tbl")))
checkTableResolution("parquet.`file:/tmp/db.tbl`",
Some(TableIdentifier("file:/tmp/db.tbl", Some("parquet"))))
checkTableResolution("`org.apache.spark.sql.json`.`s3://buck/tmp/abc.json`",
Some(TableIdentifier("s3://buck/tmp/abc.json", Some("org.apache.spark.sql.json"))))
}
}
|
santhoshkumarvs/spark
|
resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtilsSuite.scala
|
<filename>resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtilsSuite.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.{File, FileNotFoundException}
import scala.collection.JavaConverters._
import scala.language.reflectiveCalls
import com.google.common.io.Files
import org.apache.mesos.Protos.{FrameworkInfo, Resource, Value}
import org.mockito.Mockito._
import org.scalatest._
import org.scalatest.mockito.MockitoSugar
import org.apache.spark.{SparkConf, SparkContext, SparkException, SparkFunSuite}
import org.apache.spark.deploy.mesos.{config => mesosConfig}
import org.apache.spark.internal.config._
import org.apache.spark.util.SparkConfWithEnv
class MesosSchedulerUtilsSuite extends SparkFunSuite with Matchers with MockitoSugar {
// scalastyle:off structural.type
// this is the documented way of generating fixtures in scalatest
def fixture: Object {val sc: SparkContext; val sparkConf: SparkConf} = new {
val sparkConf = new SparkConf
val sc = mock[SparkContext]
when(sc.conf).thenReturn(sparkConf)
}
private def createTestPortResource(range: (Long, Long), role: Option[String] = None): Resource = {
val rangeValue = Value.Range.newBuilder()
rangeValue.setBegin(range._1)
rangeValue.setEnd(range._2)
val builder = Resource.newBuilder()
.setName("ports")
.setType(Value.Type.RANGES)
.setRanges(Value.Ranges.newBuilder().addRange(rangeValue))
role.foreach { r => builder.setRole(r) }
builder.build()
}
private def rangesResourcesToTuple(resources: List[Resource]): List[(Long, Long)] = {
resources.flatMap{resource => resource.getRanges.getRangeList
.asScala.map(range => (range.getBegin, range.getEnd))}
}
def arePortsEqual(array1: Array[(Long, Long)], array2: Array[(Long, Long)])
: Boolean = {
array1.sortBy(identity).deep == array2.sortBy(identity).deep
}
def arePortsEqual(array1: Array[Long], array2: Array[Long])
: Boolean = {
array1.sortBy(identity).deep == array2.sortBy(identity).deep
}
def getRangesFromResources(resources: List[Resource]): List[(Long, Long)] = {
resources.flatMap{ resource =>
resource.getRanges.getRangeList.asScala.toList.map{
range => (range.getBegin, range.getEnd)}}
}
val utils = new MesosSchedulerUtils { }
// scalastyle:on structural.type
test("use at-least minimum overhead") {
val f = fixture
when(f.sc.executorMemory).thenReturn(512)
utils.executorMemory(f.sc) shouldBe 896
}
test("use overhead if it is greater than minimum value") {
val f = fixture
when(f.sc.executorMemory).thenReturn(4096)
utils.executorMemory(f.sc) shouldBe 4505
}
test("use spark.mesos.executor.memoryOverhead (if set)") {
val f = fixture
when(f.sc.executorMemory).thenReturn(1024)
f.sparkConf.set(mesosConfig.EXECUTOR_MEMORY_OVERHEAD, 512)
utils.executorMemory(f.sc) shouldBe 1536
}
test("parse a non-empty constraint string correctly") {
val expectedMap = Map(
"os" -> Set("centos7"),
"zone" -> Set("us-east-1a", "us-east-1b")
)
utils.parseConstraintString("os:centos7;zone:us-east-1a,us-east-1b") should be (expectedMap)
}
test("parse an empty constraint string correctly") {
utils.parseConstraintString("") shouldBe Map()
}
test("throw an exception when the input is malformed") {
an[IllegalArgumentException] should be thrownBy
utils.parseConstraintString("os;zone:us-east")
}
test("empty values for attributes' constraints matches all values") {
val constraintsStr = "os:"
val parsedConstraints = utils.parseConstraintString(constraintsStr)
parsedConstraints shouldBe Map("os" -> Set())
val zoneSet = Value.Set.newBuilder().addItem("us-east-1a").addItem("us-east-1b").build()
val noOsOffer = Map("zone" -> zoneSet)
val centosOffer = Map("os" -> Value.Text.newBuilder().setValue("centos").build())
val ubuntuOffer = Map("os" -> Value.Text.newBuilder().setValue("ubuntu").build())
utils.matchesAttributeRequirements(parsedConstraints, noOsOffer) shouldBe false
utils.matchesAttributeRequirements(parsedConstraints, centosOffer) shouldBe true
utils.matchesAttributeRequirements(parsedConstraints, ubuntuOffer) shouldBe true
}
test("subset match is performed for set attributes") {
val supersetConstraint = Map(
"os" -> Value.Text.newBuilder().setValue("ubuntu").build(),
"zone" -> Value.Set.newBuilder()
.addItem("us-east-1a")
.addItem("us-east-1b")
.addItem("us-east-1c")
.build())
val zoneConstraintStr = "os:;zone:us-east-1a,us-east-1c"
val parsedConstraints = utils.parseConstraintString(zoneConstraintStr)
utils.matchesAttributeRequirements(parsedConstraints, supersetConstraint) shouldBe true
}
test("less than equal match is performed on scalar attributes") {
val offerAttribs = Map("gpus" -> Value.Scalar.newBuilder().setValue(3).build())
val ltConstraint = utils.parseConstraintString("gpus:2")
val eqConstraint = utils.parseConstraintString("gpus:3")
val gtConstraint = utils.parseConstraintString("gpus:4")
utils.matchesAttributeRequirements(ltConstraint, offerAttribs) shouldBe true
utils.matchesAttributeRequirements(eqConstraint, offerAttribs) shouldBe true
utils.matchesAttributeRequirements(gtConstraint, offerAttribs) shouldBe false
}
test("contains match is performed for range attributes") {
val offerAttribs = Map("ports" -> Value.Range.newBuilder().setBegin(7000).setEnd(8000).build())
val ltConstraint = utils.parseConstraintString("ports:6000")
val eqConstraint = utils.parseConstraintString("ports:7500")
val gtConstraint = utils.parseConstraintString("ports:8002")
val multiConstraint = utils.parseConstraintString("ports:5000,7500,8300")
utils.matchesAttributeRequirements(ltConstraint, offerAttribs) shouldBe false
utils.matchesAttributeRequirements(eqConstraint, offerAttribs) shouldBe true
utils.matchesAttributeRequirements(gtConstraint, offerAttribs) shouldBe false
utils.matchesAttributeRequirements(multiConstraint, offerAttribs) shouldBe true
}
test("equality match is performed for text attributes") {
val offerAttribs = Map("os" -> Value.Text.newBuilder().setValue("centos7").build())
val trueConstraint = utils.parseConstraintString("os:centos7")
val falseConstraint = utils.parseConstraintString("os:ubuntu")
utils.matchesAttributeRequirements(trueConstraint, offerAttribs) shouldBe true
utils.matchesAttributeRequirements(falseConstraint, offerAttribs) shouldBe false
}
test("Port reservation is done correctly with user specified ports only") {
val conf = new SparkConf()
conf.set(BLOCK_MANAGER_PORT, 4000)
val portResource = createTestPortResource((3000, 5000), Some("my_role"))
val (resourcesLeft, resourcesToBeUsed) = utils
.partitionPortResources(List(4000), List(portResource))
resourcesToBeUsed.length shouldBe 1
val portsToUse = getRangesFromResources(resourcesToBeUsed).map{r => r._1}.toArray
portsToUse.length shouldBe 1
arePortsEqual(portsToUse, Array(4000L)) shouldBe true
val portRangesToBeUsed = rangesResourcesToTuple(resourcesToBeUsed)
val expectedUSed = Array((4000L, 4000L))
arePortsEqual(portRangesToBeUsed.toArray, expectedUSed) shouldBe true
}
test("Port reservation is done correctly with all random ports") {
val conf = new SparkConf()
val portResource = createTestPortResource((3000L, 5000L), Some("my_role"))
val (resourcesLeft, resourcesToBeUsed) = utils
.partitionPortResources(List(), List(portResource))
val portsToUse = getRangesFromResources(resourcesToBeUsed).map{r => r._1}
portsToUse.isEmpty shouldBe true
}
test("Port reservation is done correctly with user specified ports only - multiple ranges") {
val conf = new SparkConf()
conf.set(BLOCK_MANAGER_PORT, 4000)
val portResourceList = List(createTestPortResource((3000, 5000), Some("my_role")),
createTestPortResource((2000, 2500), Some("other_role")))
val (resourcesLeft, resourcesToBeUsed) = utils
.partitionPortResources(List(4000), portResourceList)
val portsToUse = getRangesFromResources(resourcesToBeUsed).map{r => r._1}
portsToUse.length shouldBe 1
val portsRangesLeft = rangesResourcesToTuple(resourcesLeft)
val portRangesToBeUsed = rangesResourcesToTuple(resourcesToBeUsed)
val expectedUsed = Array((4000L, 4000L))
arePortsEqual(portsToUse.toArray, Array(4000L)) shouldBe true
arePortsEqual(portRangesToBeUsed.toArray, expectedUsed) shouldBe true
}
test("Port reservation is done correctly with all random ports - multiple ranges") {
val conf = new SparkConf()
val portResourceList = List(createTestPortResource((3000, 5000), Some("my_role")),
createTestPortResource((2000, 2500), Some("other_role")))
val (resourcesLeft, resourcesToBeUsed) = utils
.partitionPortResources(List(), portResourceList)
val portsToUse = getRangesFromResources(resourcesToBeUsed).map{r => r._1}
portsToUse.isEmpty shouldBe true
}
test("Principal specified via spark.mesos.principal") {
val conf = new SparkConf()
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL, "test-principal")
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
}
test("Principal specified via spark.mesos.principal.file") {
val pFile = File.createTempFile("MesosSchedulerUtilsSuite", ".txt");
pFile.deleteOnExit()
Files.write("test-principal".getBytes("UTF-8"), pFile);
val conf = new SparkConf()
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL_FILE, pFile.getAbsolutePath())
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
}
test("Principal specified via spark.mesos.principal.file that does not exist") {
val conf = new SparkConf()
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL_FILE, "/tmp/does-not-exist")
intercept[FileNotFoundException] {
utils.buildCredentials(conf, FrameworkInfo.newBuilder())
}
}
test("Principal specified via SPARK_MESOS_PRINCIPAL") {
val conf = new SparkConfWithEnv(Map("SPARK_MESOS_PRINCIPAL" -> "test-principal"))
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
}
test("Principal specified via SPARK_MESOS_PRINCIPAL_FILE") {
val pFile = File.createTempFile("MesosSchedulerUtilsSuite", ".txt");
pFile.deleteOnExit()
Files.write("test-principal".getBytes("UTF-8"), pFile);
val conf = new SparkConfWithEnv(Map("SPARK_MESOS_PRINCIPAL_FILE" -> pFile.getAbsolutePath()))
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
}
test("Principal specified via SPARK_MESOS_PRINCIPAL_FILE that does not exist") {
val conf = new SparkConfWithEnv(Map("SPARK_MESOS_PRINCIPAL_FILE" -> "/tmp/does-not-exist"))
intercept[FileNotFoundException] {
utils.buildCredentials(conf, FrameworkInfo.newBuilder())
}
}
test("Secret specified via spark.mesos.secret") {
val conf = new SparkConf()
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL, "test-principal")
conf.set(mesosConfig.CREDENTIAL_SECRET, "my-secret")
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
credBuilder.hasSecret shouldBe true
credBuilder.getSecret shouldBe "my-secret"
}
test("Principal specified via spark.mesos.secret.file") {
val sFile = File.createTempFile("MesosSchedulerUtilsSuite", ".txt");
sFile.deleteOnExit()
Files.write("my-secret".getBytes("UTF-8"), sFile);
val conf = new SparkConf()
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL, "test-principal")
conf.set(mesosConfig.CREDENTIAL_SECRET_FILE, sFile.getAbsolutePath())
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
credBuilder.hasSecret shouldBe true
credBuilder.getSecret shouldBe "my-secret"
}
test("Principal specified via spark.mesos.secret.file that does not exist") {
val conf = new SparkConf()
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL, "test-principal")
conf.set(mesosConfig.CREDENTIAL_SECRET_FILE, "/tmp/does-not-exist")
intercept[FileNotFoundException] {
utils.buildCredentials(conf, FrameworkInfo.newBuilder())
}
}
test("Principal specified via SPARK_MESOS_SECRET") {
val env = Map("SPARK_MESOS_SECRET" -> "my-secret")
val conf = new SparkConfWithEnv(env)
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL, "test-principal")
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
credBuilder.hasSecret shouldBe true
credBuilder.getSecret shouldBe "my-secret"
}
test("Principal specified via SPARK_MESOS_SECRET_FILE") {
val sFile = File.createTempFile("MesosSchedulerUtilsSuite", ".txt");
sFile.deleteOnExit()
Files.write("my-secret".getBytes("UTF-8"), sFile);
val sFilePath = sFile.getAbsolutePath()
val env = Map("SPARK_MESOS_SECRET_FILE" -> sFilePath)
val conf = new SparkConfWithEnv(env)
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL, "test-principal")
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
credBuilder.hasSecret shouldBe true
credBuilder.getSecret shouldBe "my-secret"
}
test("Secret specified with no principal") {
val conf = new SparkConf()
conf.set(mesosConfig.CREDENTIAL_SECRET, "my-secret")
intercept[SparkException] {
utils.buildCredentials(conf, FrameworkInfo.newBuilder())
}
}
test("Principal specification preference") {
val conf = new SparkConfWithEnv(Map("SPARK_MESOS_PRINCIPAL" -> "other-principal"))
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL, "test-principal")
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
}
test("Secret specification preference") {
val conf = new SparkConfWithEnv(Map("SPARK_MESOS_SECRET" -> "other-secret"))
conf.set(mesosConfig.CREDENTIAL_PRINCIPAL, "test-principal")
conf.set(mesosConfig.CREDENTIAL_SECRET, "my-secret")
val credBuilder = utils.buildCredentials(conf, FrameworkInfo.newBuilder())
credBuilder.hasPrincipal shouldBe true
credBuilder.getPrincipal shouldBe "test-principal"
credBuilder.hasSecret shouldBe true
credBuilder.getSecret shouldBe "my-secret"
}
}
|
santhoshkumarvs/spark
|
resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/Utils.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest
import java.io.Closeable
import java.net.URI
import org.apache.commons.io.output.ByteArrayOutputStream
import org.apache.spark.internal.Logging
object Utils extends Logging {
def tryWithResource[R <: Closeable, T](createResource: => R)(f: R => T): T = {
val resource = createResource
try f.apply(resource) finally resource.close()
}
def executeCommand(cmd: String*)(
implicit podName: String,
kubernetesTestComponents: KubernetesTestComponents): String = {
val out = new ByteArrayOutputStream()
val watch = kubernetesTestComponents
.kubernetesClient
.pods()
.withName(podName)
.readingInput(System.in)
.writingOutput(out)
.writingError(System.err)
.withTTY()
.exec(cmd.toArray: _*)
// wait to get some result back
Thread.sleep(1000)
watch.close()
out.flush()
out.toString()
}
}
|
santhoshkumarvs/spark
|
resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/KubernetesFeaturesTestUtils.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import io.fabric8.kubernetes.api.model.{Container, HasMetadata, PodBuilder, SecretBuilder}
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito.{mock, when}
import org.mockito.invocation.InvocationOnMock
import org.apache.spark.deploy.k8s.SparkPod
object KubernetesFeaturesTestUtils {
def getMockConfigStepForStepType[T <: KubernetesFeatureConfigStep](
stepType: String, stepClass: Class[T]): T = {
val mockStep = mock(stepClass)
when(mockStep.getAdditionalKubernetesResources()).thenReturn(
getSecretsForStepType(stepType))
when(mockStep.getAdditionalPodSystemProperties())
.thenReturn(Map(stepType -> stepType))
when(mockStep.configurePod(any(classOf[SparkPod])))
.thenAnswer((invocation: InvocationOnMock) => {
val originalPod: SparkPod = invocation.getArgument(0)
val configuredPod = new PodBuilder(originalPod.pod)
.editOrNewMetadata()
.addToLabels(stepType, stepType)
.endMetadata()
.build()
SparkPod(configuredPod, originalPod.container)
})
mockStep
}
def getSecretsForStepType[T <: KubernetesFeatureConfigStep](stepType: String)
: Seq[HasMetadata] = {
Seq(new SecretBuilder()
.withNewMetadata()
.withName(stepType)
.endMetadata()
.build())
}
def containerHasEnvVar(container: Container, envVarName: String): Boolean = {
container.getEnv.asScala.exists(envVar => envVar.getName == envVarName)
}
def filter[T: ClassTag](list: Seq[HasMetadata]): Seq[T] = {
val desired = implicitly[ClassTag[T]].runtimeClass
list.filter(_.getClass() == desired).map(_.asInstanceOf[T])
}
}
|
santhoshkumarvs/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListener.scala
|
<gh_stars>10-100
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.ui
import java.util.{Date, NoSuchElementException}
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConverters._
import org.apache.spark.{JobExecutionStatus, SparkConf}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.Status._
import org.apache.spark.scheduler._
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.metric._
import org.apache.spark.sql.internal.StaticSQLConf._
import org.apache.spark.status.{ElementTrackingStore, KVUtils, LiveEntity}
class SQLAppStatusListener(
conf: SparkConf,
kvstore: ElementTrackingStore,
live: Boolean) extends SparkListener with Logging {
// How often to flush intermediate state of a live execution to the store. When replaying logs,
// never flush (only do the very last write).
private val liveUpdatePeriodNs = if (live) conf.get(LIVE_ENTITY_UPDATE_PERIOD) else -1L
// Live tracked data is needed by the SQL status store to calculate metrics for in-flight
// executions; that means arbitrary threads may be querying these maps, so they need to be
// thread-safe.
private val liveExecutions = new ConcurrentHashMap[Long, LiveExecutionData]()
private val stageMetrics = new ConcurrentHashMap[Int, LiveStageMetrics]()
// Returns true if this listener has no live data. Exposed for tests only.
private[sql] def noLiveData(): Boolean = {
liveExecutions.isEmpty && stageMetrics.isEmpty
}
kvstore.addTrigger(classOf[SQLExecutionUIData], conf.get(UI_RETAINED_EXECUTIONS)) { count =>
cleanupExecutions(count)
}
kvstore.onFlush {
if (!live) {
val now = System.nanoTime()
liveExecutions.values.asScala.foreach { exec =>
// This saves the partial aggregated metrics to the store; this works currently because
// when the SHS sees an updated event log, all old data for the application is thrown
// away.
exec.metricsValues = aggregateMetrics(exec)
exec.write(kvstore, now)
}
}
}
override def onJobStart(event: SparkListenerJobStart): Unit = {
val executionIdString = event.properties.getProperty(SQLExecution.EXECUTION_ID_KEY)
if (executionIdString == null) {
// This is not a job created by SQL
return
}
val executionId = executionIdString.toLong
val jobId = event.jobId
val exec = Option(liveExecutions.get(executionId))
.orElse {
try {
// Should not overwrite the kvstore with new entry, if it already has the SQLExecution
// data corresponding to the execId.
val sqlStoreData = kvstore.read(classOf[SQLExecutionUIData], executionId)
val executionData = new LiveExecutionData(executionId)
executionData.description = sqlStoreData.description
executionData.details = sqlStoreData.details
executionData.physicalPlanDescription = sqlStoreData.physicalPlanDescription
executionData.metrics = sqlStoreData.metrics
executionData.submissionTime = sqlStoreData.submissionTime
executionData.completionTime = sqlStoreData.completionTime
executionData.jobs = sqlStoreData.jobs
executionData.stages = sqlStoreData.stages
executionData.metricsValues = sqlStoreData.metricValues
executionData.endEvents = sqlStoreData.jobs.size + 1
liveExecutions.put(executionId, executionData)
Some(executionData)
} catch {
case _: NoSuchElementException => None
}
}.getOrElse(getOrCreateExecution(executionId))
// Record the accumulator IDs for the stages of this job, so that the code that keeps
// track of the metrics knows which accumulators to look at.
val accumIds = exec.metrics.map(_.accumulatorId).toSet
event.stageIds.foreach { id =>
stageMetrics.put(id, new LiveStageMetrics(id, 0, accumIds, new ConcurrentHashMap()))
}
exec.jobs = exec.jobs + (jobId -> JobExecutionStatus.RUNNING)
exec.stages ++= event.stageIds.toSet
update(exec, force = true)
}
override def onStageSubmitted(event: SparkListenerStageSubmitted): Unit = {
if (!isSQLStage(event.stageInfo.stageId)) {
return
}
// Reset the metrics tracking object for the new attempt.
Option(stageMetrics.get(event.stageInfo.stageId)).foreach { metrics =>
metrics.taskMetrics.clear()
metrics.attemptId = event.stageInfo.attemptNumber
}
}
override def onJobEnd(event: SparkListenerJobEnd): Unit = {
liveExecutions.values().asScala.foreach { exec =>
if (exec.jobs.contains(event.jobId)) {
val result = event.jobResult match {
case JobSucceeded => JobExecutionStatus.SUCCEEDED
case _ => JobExecutionStatus.FAILED
}
exec.jobs = exec.jobs + (event.jobId -> result)
exec.endEvents += 1
update(exec)
}
}
}
override def onExecutorMetricsUpdate(event: SparkListenerExecutorMetricsUpdate): Unit = {
event.accumUpdates.foreach { case (taskId, stageId, attemptId, accumUpdates) =>
updateStageMetrics(stageId, attemptId, taskId, accumUpdates, false)
}
}
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = {
if (!isSQLStage(event.stageId)) {
return
}
val info = event.taskInfo
// SPARK-20342. If processing events from a live application, use the task metrics info to
// work around a race in the DAGScheduler. The metrics info does not contain accumulator info
// when reading event logs in the SHS, so we have to rely on the accumulator in that case.
val accums = if (live && event.taskMetrics != null) {
event.taskMetrics.externalAccums.flatMap { a =>
// This call may fail if the accumulator is gc'ed, so account for that.
try {
Some(a.toInfo(Some(a.value), None))
} catch {
case _: IllegalAccessError => None
}
}
} else {
info.accumulables
}
updateStageMetrics(event.stageId, event.stageAttemptId, info.taskId, accums,
info.successful)
}
def liveExecutionMetrics(executionId: Long): Option[Map[Long, String]] = {
Option(liveExecutions.get(executionId)).map { exec =>
if (exec.metricsValues != null) {
exec.metricsValues
} else {
aggregateMetrics(exec)
}
}
}
private def aggregateMetrics(exec: LiveExecutionData): Map[Long, String] = {
val metricTypes = exec.metrics.map { m => (m.accumulatorId, m.metricType) }.toMap
val metrics = exec.stages.toSeq
.flatMap { stageId => Option(stageMetrics.get(stageId)) }
.flatMap(_.taskMetrics.values().asScala)
.flatMap { metrics => metrics.ids.zip(metrics.values) }
val aggregatedMetrics = (metrics ++ exec.driverAccumUpdates.toSeq)
.filter { case (id, _) => metricTypes.contains(id) }
.groupBy(_._1)
.map { case (id, values) =>
id -> SQLMetrics.stringValue(metricTypes(id), values.map(_._2))
}
// Check the execution again for whether the aggregated metrics data has been calculated.
// This can happen if the UI is requesting this data, and the onExecutionEnd handler is
// running at the same time. The metrics calculated for the UI can be inaccurate in that
// case, since the onExecutionEnd handler will clean up tracked stage metrics.
if (exec.metricsValues != null) {
exec.metricsValues
} else {
aggregatedMetrics
}
}
private def updateStageMetrics(
stageId: Int,
attemptId: Int,
taskId: Long,
accumUpdates: Seq[AccumulableInfo],
succeeded: Boolean): Unit = {
Option(stageMetrics.get(stageId)).foreach { metrics =>
if (metrics.attemptId != attemptId || metrics.accumulatorIds.isEmpty) {
return
}
val oldTaskMetrics = metrics.taskMetrics.get(taskId)
if (oldTaskMetrics != null && oldTaskMetrics.succeeded) {
return
}
val updates = accumUpdates
.filter { acc => acc.update.isDefined && metrics.accumulatorIds.contains(acc.id) }
.sortBy(_.id)
if (updates.isEmpty) {
return
}
val ids = new Array[Long](updates.size)
val values = new Array[Long](updates.size)
updates.zipWithIndex.foreach { case (acc, idx) =>
ids(idx) = acc.id
// In a live application, accumulators have Long values, but when reading from event
// logs, they have String values. For now, assume all accumulators are Long and covert
// accordingly.
values(idx) = acc.update.get match {
case s: String => s.toLong
case l: Long => l
case o => throw new IllegalArgumentException(s"Unexpected: $o")
}
}
// TODO: storing metrics by task ID can cause metrics for the same task index to be
// counted multiple times, for example due to speculation or re-attempts.
metrics.taskMetrics.put(taskId, new LiveTaskMetrics(ids, values, succeeded))
}
}
private def onExecutionStart(event: SparkListenerSQLExecutionStart): Unit = {
val SparkListenerSQLExecutionStart(executionId, description, details,
physicalPlanDescription, sparkPlanInfo, time) = event
def toStoredNodes(nodes: Seq[SparkPlanGraphNode]): Seq[SparkPlanGraphNodeWrapper] = {
nodes.map {
case cluster: SparkPlanGraphCluster =>
val storedCluster = new SparkPlanGraphClusterWrapper(
cluster.id,
cluster.name,
cluster.desc,
toStoredNodes(cluster.nodes),
cluster.metrics)
new SparkPlanGraphNodeWrapper(null, storedCluster)
case node =>
new SparkPlanGraphNodeWrapper(node, null)
}
}
val planGraph = SparkPlanGraph(sparkPlanInfo)
val sqlPlanMetrics = planGraph.allNodes.flatMap { node =>
node.metrics.map { metric => (metric.accumulatorId, metric) }
}.toMap.values.toList
val graphToStore = new SparkPlanGraphWrapper(
executionId,
toStoredNodes(planGraph.nodes),
planGraph.edges)
kvstore.write(graphToStore)
val exec = getOrCreateExecution(executionId)
exec.description = description
exec.details = details
exec.physicalPlanDescription = physicalPlanDescription
exec.metrics = sqlPlanMetrics
exec.submissionTime = time
update(exec)
}
private def onExecutionEnd(event: SparkListenerSQLExecutionEnd): Unit = {
val SparkListenerSQLExecutionEnd(executionId, time) = event
Option(liveExecutions.get(executionId)).foreach { exec =>
exec.metricsValues = aggregateMetrics(exec)
exec.completionTime = Some(new Date(time))
exec.endEvents += 1
update(exec)
removeStaleMetricsData(exec)
}
}
private def removeStaleMetricsData(exec: LiveExecutionData): Unit = {
// Remove stale LiveStageMetrics objects for stages that are not active anymore.
val activeStages = liveExecutions.values().asScala.flatMap { other =>
if (other != exec) other.stages else Nil
}.toSet
stageMetrics.keySet().asScala
.filter(!activeStages.contains(_))
.foreach(stageMetrics.remove)
}
private def onDriverAccumUpdates(event: SparkListenerDriverAccumUpdates): Unit = {
val SparkListenerDriverAccumUpdates(executionId, accumUpdates) = event
Option(liveExecutions.get(executionId)).foreach { exec =>
exec.driverAccumUpdates = exec.driverAccumUpdates ++ accumUpdates
update(exec)
}
}
override def onOtherEvent(event: SparkListenerEvent): Unit = event match {
case e: SparkListenerSQLExecutionStart => onExecutionStart(e)
case e: SparkListenerSQLExecutionEnd => onExecutionEnd(e)
case e: SparkListenerDriverAccumUpdates => onDriverAccumUpdates(e)
case _ => // Ignore
}
private def getOrCreateExecution(executionId: Long): LiveExecutionData = {
liveExecutions.computeIfAbsent(executionId,
(_: Long) => new LiveExecutionData(executionId))
}
private def update(exec: LiveExecutionData, force: Boolean = false): Unit = {
val now = System.nanoTime()
if (exec.endEvents >= exec.jobs.size + 1) {
exec.write(kvstore, now)
removeStaleMetricsData(exec)
liveExecutions.remove(exec.executionId)
} else if (force) {
exec.write(kvstore, now)
} else if (liveUpdatePeriodNs >= 0) {
if (now - exec.lastWriteTime > liveUpdatePeriodNs) {
exec.write(kvstore, now)
}
}
}
private def isSQLStage(stageId: Int): Boolean = {
liveExecutions.values().asScala.exists { exec =>
exec.stages.contains(stageId)
}
}
private def cleanupExecutions(count: Long): Unit = {
val countToDelete = count - conf.get(UI_RETAINED_EXECUTIONS)
if (countToDelete <= 0) {
return
}
val view = kvstore.view(classOf[SQLExecutionUIData]).index("completionTime").first(0L)
val toDelete = KVUtils.viewToSeq(view, countToDelete.toInt)(_.completionTime.isDefined)
toDelete.foreach { e =>
kvstore.delete(e.getClass(), e.executionId)
kvstore.delete(classOf[SparkPlanGraphWrapper], e.executionId)
}
}
}
private class LiveExecutionData(val executionId: Long) extends LiveEntity {
var description: String = null
var details: String = null
var physicalPlanDescription: String = null
var metrics = Seq[SQLPlanMetric]()
var submissionTime = -1L
var completionTime: Option[Date] = None
var jobs = Map[Int, JobExecutionStatus]()
var stages = Set[Int]()
var driverAccumUpdates = Map[Long, Long]()
@volatile var metricsValues: Map[Long, String] = null
// Just in case job end and execution end arrive out of order, keep track of how many
// end events arrived so that the listener can stop tracking the execution.
var endEvents = 0
override protected def doUpdate(): Any = {
new SQLExecutionUIData(
executionId,
description,
details,
physicalPlanDescription,
metrics,
submissionTime,
completionTime,
jobs,
stages,
metricsValues)
}
}
private class LiveStageMetrics(
val stageId: Int,
var attemptId: Int,
val accumulatorIds: Set[Long],
val taskMetrics: ConcurrentHashMap[Long, LiveTaskMetrics])
private class LiveTaskMetrics(
val ids: Array[Long],
val values: Array[Long],
val succeeded: Boolean)
|
santhoshkumarvs/spark
|
mllib/src/main/scala/org/apache/spark/ml/util/Instrumentation.scala
|
<reponame>santhoshkumarvs/spark<filename>mllib/src/main/scala/org/apache/spark/ml/util/Instrumentation.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.util
import java.util.UUID
import scala.util.{Failure, Success, Try}
import scala.util.control.NonFatal
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.internal.Logging
import org.apache.spark.ml.{MLEvents, PipelineStage}
import org.apache.spark.ml.param.{Param, Params}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Dataset
import org.apache.spark.util.Utils
/**
* A small wrapper that defines a training session for an estimator, some methods to log
* useful information during this session, and some methods to send
* [[org.apache.spark.ml.MLEvent]].
*/
private[spark] class Instrumentation private () extends Logging with MLEvents {
private val id = UUID.randomUUID()
private val shortId = id.toString.take(8)
private[util] val prefix = s"[$shortId] "
/**
* Log some info about the pipeline stage being fit.
*/
def logPipelineStage(stage: PipelineStage): Unit = {
// estimator.getClass.getSimpleName can cause Malformed class name error,
// call safer `Utils.getSimpleName` instead
val className = Utils.getSimpleName(stage.getClass)
logInfo(s"Stage class: $className")
logInfo(s"Stage uid: ${stage.uid}")
}
/**
* Log some data about the dataset being fit.
*/
def logDataset(dataset: Dataset[_]): Unit = logDataset(dataset.rdd)
/**
* Log some data about the dataset being fit.
*/
def logDataset(dataset: RDD[_]): Unit = {
logInfo(s"training: numPartitions=${dataset.partitions.length}" +
s" storageLevel=${dataset.getStorageLevel}")
}
/**
* Logs a debug message with a prefix that uniquely identifies the training session.
*/
override def logDebug(msg: => String): Unit = {
super.logDebug(prefix + msg)
}
/**
* Logs a warning message with a prefix that uniquely identifies the training session.
*/
override def logWarning(msg: => String): Unit = {
super.logWarning(prefix + msg)
}
/**
* Logs a error message with a prefix that uniquely identifies the training session.
*/
override def logError(msg: => String): Unit = {
super.logError(prefix + msg)
}
/**
* Logs an info message with a prefix that uniquely identifies the training session.
*/
override def logInfo(msg: => String): Unit = {
super.logInfo(prefix + msg)
}
/**
* Logs the value of the given parameters for the estimator being used in this session.
*/
def logParams(hasParams: Params, params: Param[_]*): Unit = {
val pairs: Seq[(String, JValue)] = for {
p <- params
value <- hasParams.get(p)
} yield {
val cast = p.asInstanceOf[Param[Any]]
p.name -> parse(cast.jsonEncode(value))
}
logInfo(compact(render(map2jvalue(pairs.toMap))))
}
def logNumFeatures(num: Long): Unit = {
logNamedValue(Instrumentation.loggerTags.numFeatures, num)
}
def logNumClasses(num: Long): Unit = {
logNamedValue(Instrumentation.loggerTags.numClasses, num)
}
def logNumExamples(num: Long): Unit = {
logNamedValue(Instrumentation.loggerTags.numExamples, num)
}
/**
* Logs the value with customized name field.
*/
def logNamedValue(name: String, value: String): Unit = {
logInfo(compact(render(name -> value)))
}
def logNamedValue(name: String, value: Long): Unit = {
logInfo(compact(render(name -> value)))
}
def logNamedValue(name: String, value: Double): Unit = {
logInfo(compact(render(name -> value)))
}
def logNamedValue(name: String, value: Array[String]): Unit = {
logInfo(compact(render(name -> compact(render(value.toSeq)))))
}
def logNamedValue(name: String, value: Array[Long]): Unit = {
logInfo(compact(render(name -> compact(render(value.toSeq)))))
}
def logNamedValue(name: String, value: Array[Double]): Unit = {
logInfo(compact(render(name -> compact(render(value.toSeq)))))
}
/**
* Logs the successful completion of the training session.
*/
def logSuccess(): Unit = {
logInfo("training finished")
}
/**
* Logs an exception raised during a training session.
*/
def logFailure(e: Throwable): Unit = {
val msg = e.getStackTrace.mkString("\n")
super.logError(msg)
}
}
/**
* Some common methods for logging information about a training session.
*/
private[spark] object Instrumentation {
object loggerTags {
val numFeatures = "numFeatures"
val numClasses = "numClasses"
val numExamples = "numExamples"
val meanOfLabels = "meanOfLabels"
val varianceOfLabels = "varianceOfLabels"
}
def instrumented[T](body: (Instrumentation => T)): T = {
val instr = new Instrumentation()
Try(body(instr)) match {
case Failure(NonFatal(e)) =>
instr.logFailure(e)
throw e
case Success(result) =>
instr.logSuccess()
result
}
}
}
/**
* A small wrapper that contains an optional `Instrumentation` object.
* Provide some log methods, if the containing `Instrumentation` object is defined,
* will log via it, otherwise will log via common logger.
*/
private[spark] class OptionalInstrumentation private(
val instrumentation: Option[Instrumentation],
val className: String) extends Logging {
protected override def logName: String = className
override def logInfo(msg: => String) {
instrumentation match {
case Some(instr) => instr.logInfo(msg)
case None => super.logInfo(msg)
}
}
override def logWarning(msg: => String) {
instrumentation match {
case Some(instr) => instr.logWarning(msg)
case None => super.logWarning(msg)
}
}
override def logError(msg: => String) {
instrumentation match {
case Some(instr) => instr.logError(msg)
case None => super.logError(msg)
}
}
}
private[spark] object OptionalInstrumentation {
/**
* Creates an `OptionalInstrumentation` object from an existing `Instrumentation` object.
*/
def create(instr: Instrumentation): OptionalInstrumentation = {
new OptionalInstrumentation(Some(instr), instr.prefix)
}
/**
* Creates an `OptionalInstrumentation` object from a `Class` object.
* The created `OptionalInstrumentation` object will log messages via common logger and use the
* specified class name as logger name.
*/
def create(clazz: Class[_]): OptionalInstrumentation = {
new OptionalInstrumentation(None, clazz.getName.stripSuffix("$"))
}
}
|
santhoshkumarvs/spark
|
mllib/src/test/scala/org/apache/spark/ml/clustering/GaussianMixtureSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.clustering
import scala.language.existentials
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.linalg.{DenseMatrix, Matrices, Vector, Vectors}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.stat.distribution.MultivariateGaussian
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.sql.{Dataset, Row}
class GaussianMixtureSuite extends MLTest with DefaultReadWriteTest {
import GaussianMixtureSuite._
import testImplicits._
final val k = 5
private val seed = 538009335
@transient var dataset: Dataset[_] = _
@transient var denseDataset: Dataset[_] = _
@transient var sparseDataset: Dataset[_] = _
@transient var decompositionDataset: Dataset[_] = _
@transient var rDataset: Dataset[_] = _
override def beforeAll(): Unit = {
super.beforeAll()
dataset = KMeansSuite.generateKMeansData(spark, 50, 3, k)
denseDataset = denseData.map(FeatureData).toDF()
sparseDataset = denseData.map { point =>
FeatureData(point.toSparse)
}.toDF()
decompositionDataset = decompositionData.map(FeatureData).toDF()
rDataset = rData.map(FeatureData).toDF()
}
test("gmm fails on high dimensional data") {
val df = Seq(
Vectors.sparse(GaussianMixture.MAX_NUM_FEATURES + 1, Array(0, 4), Array(3.0, 8.0)),
Vectors.sparse(GaussianMixture.MAX_NUM_FEATURES + 1, Array(1, 5), Array(4.0, 9.0)))
.map(Tuple1.apply).toDF("features")
val gm = new GaussianMixture()
withClue(s"GMM should restrict the maximum number of features to be < " +
s"${GaussianMixture.MAX_NUM_FEATURES}") {
intercept[IllegalArgumentException] {
gm.fit(df)
}
}
}
test("default parameters") {
val gm = new GaussianMixture()
assert(gm.getK === 2)
assert(gm.getFeaturesCol === "features")
assert(gm.getPredictionCol === "prediction")
assert(gm.getMaxIter === 100)
assert(gm.getTol === 0.01)
val model = gm.setMaxIter(1).fit(dataset)
MLTestingUtils.checkCopyAndUids(gm, model)
assert(model.hasSummary)
val copiedModel = model.copy(ParamMap.empty)
assert(copiedModel.hasSummary)
}
test("set parameters") {
val gm = new GaussianMixture()
.setK(9)
.setFeaturesCol("test_feature")
.setPredictionCol("test_prediction")
.setProbabilityCol("test_probability")
.setMaxIter(33)
.setSeed(123)
.setTol(1e-3)
assert(gm.getK === 9)
assert(gm.getFeaturesCol === "test_feature")
assert(gm.getPredictionCol === "test_prediction")
assert(gm.getProbabilityCol === "test_probability")
assert(gm.getMaxIter === 33)
assert(gm.getSeed === 123)
assert(gm.getTol === 1e-3)
}
test("parameters validation") {
intercept[IllegalArgumentException] {
new GaussianMixture().setK(1)
}
}
test("fit, transform and summary") {
val predictionColName = "gm_prediction"
val probabilityColName = "gm_probability"
val gm = new GaussianMixture().setK(k).setMaxIter(2).setPredictionCol(predictionColName)
.setProbabilityCol(probabilityColName).setSeed(1)
val model = gm.fit(dataset)
assert(model.hasParent)
assert(model.weights.length === k)
assert(model.gaussians.length === k)
// Check prediction matches the highest probability, and probabilities sum to one.
testTransformer[Tuple1[Vector]](dataset.toDF(), model,
"features", predictionColName, probabilityColName) {
case Row(_, pred: Int, prob: Vector) =>
val probArray = prob.toArray
val predFromProb = probArray.zipWithIndex.maxBy(_._1)._2
assert(pred === predFromProb)
assert(probArray.sum ~== 1.0 absTol 1E-5)
}
// Check validity of model summary
val numRows = dataset.count()
assert(model.hasSummary)
val summary: GaussianMixtureSummary = model.summary
assert(summary.predictionCol === predictionColName)
assert(summary.probabilityCol === probabilityColName)
assert(summary.featuresCol === "features")
assert(summary.predictions.count() === numRows)
for (c <- Array(predictionColName, probabilityColName, "features")) {
assert(summary.predictions.columns.contains(c))
}
assert(summary.cluster.columns === Array(predictionColName))
assert(summary.probability.columns === Array(probabilityColName))
val clusterSizes = summary.clusterSizes
assert(clusterSizes.length === k)
assert(clusterSizes.sum === numRows)
assert(clusterSizes.forall(_ >= 0))
assert(summary.numIter == 2)
model.setSummary(None)
assert(!model.hasSummary)
}
test("read/write") {
def checkModelData(model: GaussianMixtureModel, model2: GaussianMixtureModel): Unit = {
assert(model.weights === model2.weights)
assert(model.gaussians.map(_.mean) === model2.gaussians.map(_.mean))
assert(model.gaussians.map(_.cov) === model2.gaussians.map(_.cov))
}
val gm = new GaussianMixture()
testEstimatorAndModelReadWrite(gm, dataset, GaussianMixtureSuite.allParamSettings,
GaussianMixtureSuite.allParamSettings, checkModelData)
}
test("univariate dense/sparse data with two clusters") {
val weights = Array(2.0 / 3.0, 1.0 / 3.0)
val means = Array(Vectors.dense(5.1604), Vectors.dense(-4.3673))
val covs = Array(Matrices.dense(1, 1, Array(0.86644)), Matrices.dense(1, 1, Array(1.1098)))
val gaussians = means.zip(covs).map { case (mean, cov) =>
new MultivariateGaussian(mean, cov)
}
val expected = new GaussianMixtureModel("dummy", weights, gaussians)
Seq(denseDataset, sparseDataset).foreach { dataset =>
val actual = new GaussianMixture().setK(2).setSeed(seed).fit(dataset)
modelEquals(expected, actual)
}
}
test("check distributed decomposition") {
val k = 5
val d = decompositionData.head.size
assert(GaussianMixture.shouldDistributeGaussians(k, d))
val gmm = new GaussianMixture().setK(k).setSeed(seed).fit(decompositionDataset)
assert(gmm.getK === k)
}
test("multivariate data and check againt R mvnormalmixEM") {
/*
Using the following R code to generate data and train the model using mixtools package.
library(mvtnorm)
library(mixtools)
set.seed(1)
a <- rmvnorm(7, c(0, 0))
b <- rmvnorm(8, c(10, 10))
data <- rbind(a, b)
model <- mvnormalmixEM(data, k = 2)
model$lambda
[1] 0.4666667 0.5333333
model$mu
[1] 0.11731091 -0.06192351
[1] 10.363673 9.897081
model$sigma
[[1]]
[,1] [,2]
[1,] 0.62049934 0.06880802
[2,] 0.06880802 1.27431874
[[2]]
[,1] [,2]
[1,] 0.2961543 0.160783
[2,] 0.1607830 1.008878
model$loglik
[1] -46.89499
*/
val weights = Array(0.5333333, 0.4666667)
val means = Array(Vectors.dense(10.363673, 9.897081), Vectors.dense(0.11731091, -0.06192351))
val covs = Array(Matrices.dense(2, 2, Array(0.2961543, 0.1607830, 0.160783, 1.008878)),
Matrices.dense(2, 2, Array(0.62049934, 0.06880802, 0.06880802, 1.27431874)))
val gaussians = means.zip(covs).map { case (mean, cov) =>
new MultivariateGaussian(mean, cov)
}
val expected = new GaussianMixtureModel("dummy", weights, gaussians)
val actual = new GaussianMixture().setK(2).setSeed(seed).fit(rDataset)
modelEquals(expected, actual)
val llk = actual.summary.logLikelihood
assert(llk ~== -46.89499 absTol 1E-5)
}
test("upper triangular matrix unpacking") {
/*
The full symmetric matrix is as follows:
1.0 2.5 3.8 0.9
2.5 2.0 7.2 3.8
3.8 7.2 3.0 1.0
0.9 3.8 1.0 4.0
*/
val triangularValues = Array(1.0, 2.5, 2.0, 3.8, 7.2, 3.0, 0.9, 3.8, 1.0, 4.0)
val symmetricValues = Array(1.0, 2.5, 3.8, 0.9, 2.5, 2.0, 7.2, 3.8,
3.8, 7.2, 3.0, 1.0, 0.9, 3.8, 1.0, 4.0)
val symmetricMatrix = new DenseMatrix(4, 4, symmetricValues)
val expectedMatrix = GaussianMixture.unpackUpperTriangularMatrix(4, triangularValues)
assert(symmetricMatrix === expectedMatrix)
}
test("GaussianMixture with Array input") {
def trainAndComputlogLikelihood(dataset: Dataset[_]): Double = {
val model = new GaussianMixture().setK(k).setMaxIter(1).setSeed(1).fit(dataset)
model.summary.logLikelihood
}
val (newDataset, newDatasetD, newDatasetF) = MLTestingUtils.generateArrayFeatureDataset(dataset)
val trueLikelihood = trainAndComputlogLikelihood(newDataset)
val doubleLikelihood = trainAndComputlogLikelihood(newDatasetD)
val floatLikelihood = trainAndComputlogLikelihood(newDatasetF)
// checking the cost is fine enough as a sanity check
assert(trueLikelihood ~== doubleLikelihood absTol 1e-6)
assert(trueLikelihood ~== floatLikelihood absTol 1e-6)
}
test("prediction on single instance") {
val gmm = new GaussianMixture().setSeed(123L)
val model = gmm.fit(dataset)
testClusteringModelSinglePrediction(model, model.predict, dataset,
model.getFeaturesCol, model.getPredictionCol)
testClusteringModelSingleProbabilisticPrediction(model, model.predictProbability, dataset,
model.getFeaturesCol, model.getProbabilityCol)
}
}
object GaussianMixtureSuite extends SparkFunSuite {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = Map(
"predictionCol" -> "myPrediction",
"probabilityCol" -> "myProbability",
"k" -> 3,
"maxIter" -> 2,
"tol" -> 0.01
)
val denseData = Seq(
Vectors.dense(-5.1971), Vectors.dense(-2.5359), Vectors.dense(-3.8220),
Vectors.dense(-5.2211), Vectors.dense(-5.0602), Vectors.dense( 4.7118),
Vectors.dense( 6.8989), Vectors.dense( 3.4592), Vectors.dense( 4.6322),
Vectors.dense( 5.7048), Vectors.dense( 4.6567), Vectors.dense( 5.5026),
Vectors.dense( 4.5605), Vectors.dense( 5.2043), Vectors.dense( 6.2734)
)
val decompositionData: Seq[Vector] = Seq.tabulate(25) { i: Int =>
Vectors.dense(Array.tabulate(50)(i + _.toDouble))
}
val rData = Seq(
Vectors.dense(-0.6264538, 0.1836433), Vectors.dense(-0.8356286, 1.5952808),
Vectors.dense(0.3295078, -0.8204684), Vectors.dense(0.4874291, 0.7383247),
Vectors.dense(0.5757814, -0.3053884), Vectors.dense(1.5117812, 0.3898432),
Vectors.dense(-0.6212406, -2.2146999), Vectors.dense(11.1249309, 9.9550664),
Vectors.dense(9.9838097, 10.9438362), Vectors.dense(10.8212212, 10.5939013),
Vectors.dense(10.9189774, 10.7821363), Vectors.dense(10.0745650, 8.0106483),
Vectors.dense(10.6198257, 9.9438713), Vectors.dense(9.8442045, 8.5292476),
Vectors.dense(9.5218499, 10.4179416)
)
case class FeatureData(features: Vector)
def modelEquals(m1: GaussianMixtureModel, m2: GaussianMixtureModel): Unit = {
assert(m1.weights.length === m2.weights.length)
for (i <- m1.weights.indices) {
assert(m1.weights(i) ~== m2.weights(i) absTol 1E-3)
assert(m1.gaussians(i).mean ~== m2.gaussians(i).mean absTol 1E-3)
assert(m1.gaussians(i).cov ~== m2.gaussians(i).cov absTol 1E-3)
}
}
}
|
santhoshkumarvs/spark
|
mllib/src/test/scala/org/apache/spark/ml/MLEventsSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml
import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import org.apache.hadoop.fs.Path
import org.mockito.ArgumentMatchers.{any, eq => meq}
import org.mockito.Mockito.when
import org.scalatest.BeforeAndAfterEach
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar.mock
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.util.{DefaultParamsReader, DefaultParamsWriter, MLWriter}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent}
import org.apache.spark.sql._
import org.apache.spark.util.JsonProtocol
class MLEventsSuite
extends SparkFunSuite with BeforeAndAfterEach with MLlibTestSparkContext with Eventually {
private val events = mutable.ArrayBuffer.empty[MLEvent]
private val listener: SparkListener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = event match {
case e: MLEvent => events.append(e)
case _ =>
}
}
override def beforeAll(): Unit = {
super.beforeAll()
spark.sparkContext.addSparkListener(listener)
}
override def afterEach(): Unit = {
try {
events.clear()
} finally {
super.afterEach()
}
}
override def afterAll(): Unit = {
try {
if (spark != null) {
spark.sparkContext.removeSparkListener(listener)
}
} finally {
super.afterAll()
}
}
abstract class MyModel extends Model[MyModel]
test("pipeline fit events") {
val estimator1 = mock[Estimator[MyModel]]
val model1 = mock[MyModel]
val transformer1 = mock[Transformer]
val estimator2 = mock[Estimator[MyModel]]
val model2 = mock[MyModel]
when(estimator1.copy(any[ParamMap])).thenReturn(estimator1)
when(model1.copy(any[ParamMap])).thenReturn(model1)
when(transformer1.copy(any[ParamMap])).thenReturn(transformer1)
when(estimator2.copy(any[ParamMap])).thenReturn(estimator2)
when(model2.copy(any[ParamMap])).thenReturn(model2)
val dataset1 = mock[DataFrame]
val dataset2 = mock[DataFrame]
val dataset3 = mock[DataFrame]
val dataset4 = mock[DataFrame]
val dataset5 = mock[DataFrame]
when(dataset1.toDF).thenReturn(dataset1)
when(dataset2.toDF).thenReturn(dataset2)
when(dataset3.toDF).thenReturn(dataset3)
when(dataset4.toDF).thenReturn(dataset4)
when(dataset5.toDF).thenReturn(dataset5)
when(estimator1.fit(meq(dataset1))).thenReturn(model1)
when(model1.transform(meq(dataset1))).thenReturn(dataset2)
when(model1.parent).thenReturn(estimator1)
when(transformer1.transform(meq(dataset2))).thenReturn(dataset3)
when(estimator2.fit(meq(dataset3))).thenReturn(model2)
val pipeline = new Pipeline()
.setStages(Array(estimator1, transformer1, estimator2))
assert(events.isEmpty)
val pipelineModel = pipeline.fit(dataset1)
val event0 = FitStart[PipelineModel]()
event0.estimator = pipeline
event0.dataset = dataset1
val event1 = FitStart[MyModel]()
event1.estimator = estimator1
event1.dataset = dataset1
val event2 = FitEnd[MyModel]()
event2.estimator = estimator1
event2.model = model1
val event3 = TransformStart()
event3.transformer = model1
event3.input = dataset1
val event4 = TransformEnd()
event4.transformer = model1
event4.output = dataset2
val event5 = TransformStart()
event5.transformer = transformer1
event5.input = dataset2
val event6 = TransformEnd()
event6.transformer = transformer1
event6.output = dataset3
val event7 = FitStart[MyModel]()
event7.estimator = estimator2
event7.dataset = dataset3
val event8 = FitEnd[MyModel]()
event8.estimator = estimator2
event8.model = model2
val event9 = FitEnd[PipelineModel]()
event9.estimator = pipeline
event9.model = pipelineModel
val expected = Seq(
event0, event1, event2, event3, event4, event5, event6, event7, event8, event9)
eventually(timeout(10 seconds), interval(1 second)) {
assert(events === expected)
}
// Test if they can be ser/de via JSON protocol.
assert(events.nonEmpty)
events.map(JsonProtocol.sparkEventToJson).foreach { event =>
assert(JsonProtocol.sparkEventFromJson(event).isInstanceOf[MLEvent])
}
}
test("pipeline model transform events") {
val dataset1 = mock[DataFrame]
val dataset2 = mock[DataFrame]
val dataset3 = mock[DataFrame]
val dataset4 = mock[DataFrame]
when(dataset1.toDF).thenReturn(dataset1)
when(dataset2.toDF).thenReturn(dataset2)
when(dataset3.toDF).thenReturn(dataset3)
when(dataset4.toDF).thenReturn(dataset4)
val transformer1 = mock[Transformer]
val model = mock[MyModel]
val transformer2 = mock[Transformer]
when(transformer1.transform(meq(dataset1))).thenReturn(dataset2)
when(model.transform(meq(dataset2))).thenReturn(dataset3)
when(transformer2.transform(meq(dataset3))).thenReturn(dataset4)
val newPipelineModel = new PipelineModel(
"pipeline0", Array(transformer1, model, transformer2))
assert(events.isEmpty)
val output = newPipelineModel.transform(dataset1)
val event0 = TransformStart()
event0.transformer = newPipelineModel
event0.input = dataset1
val event1 = TransformStart()
event1.transformer = transformer1
event1.input = dataset1
val event2 = TransformEnd()
event2.transformer = transformer1
event2.output = dataset2
val event3 = TransformStart()
event3.transformer = model
event3.input = dataset2
val event4 = TransformEnd()
event4.transformer = model
event4.output = dataset3
val event5 = TransformStart()
event5.transformer = transformer2
event5.input = dataset3
val event6 = TransformEnd()
event6.transformer = transformer2
event6.output = dataset4
val event7 = TransformEnd()
event7.transformer = newPipelineModel
event7.output = output
val expected = Seq(event0, event1, event2, event3, event4, event5, event6, event7)
eventually(timeout(10 seconds), interval(1 second)) {
assert(events === expected)
}
// Test if they can be ser/de via JSON protocol.
assert(events.nonEmpty)
events.map(JsonProtocol.sparkEventToJson).foreach { event =>
assert(JsonProtocol.sparkEventFromJson(event).isInstanceOf[MLEvent])
}
}
test("pipeline read/write events") {
def getInstance(w: MLWriter): AnyRef =
w.getClass.getDeclaredMethod("instance").invoke(w)
withTempDir { dir =>
val path = new Path(dir.getCanonicalPath, "pipeline").toUri.toString
val writableStage = new WritableStage("writableStage")
val newPipeline = new Pipeline().setStages(Array(writableStage))
val pipelineWriter = newPipeline.write
assert(events.isEmpty)
pipelineWriter.save(path)
eventually(timeout(10 seconds), interval(1 second)) {
events.foreach {
case e: SaveInstanceStart if e.writer.isInstanceOf[DefaultParamsWriter] =>
assert(e.path.endsWith("writableStage"))
case e: SaveInstanceEnd if e.writer.isInstanceOf[DefaultParamsWriter] =>
assert(e.path.endsWith("writableStage"))
case e: SaveInstanceStart if getInstance(e.writer).isInstanceOf[Pipeline] =>
assert(getInstance(e.writer).asInstanceOf[Pipeline].uid === newPipeline.uid)
case e: SaveInstanceEnd if getInstance(e.writer).isInstanceOf[Pipeline] =>
assert(getInstance(e.writer).asInstanceOf[Pipeline].uid === newPipeline.uid)
case e => fail(s"Unexpected event thrown: $e")
}
}
// Test if they can be ser/de via JSON protocol.
assert(events.nonEmpty)
events.map(JsonProtocol.sparkEventToJson).foreach { event =>
assert(JsonProtocol.sparkEventFromJson(event).isInstanceOf[MLEvent])
}
sc.listenerBus.waitUntilEmpty(timeoutMillis = 10000)
events.clear()
val pipelineReader = Pipeline.read
assert(events.isEmpty)
pipelineReader.load(path)
eventually(timeout(10 seconds), interval(1 second)) {
events.foreach {
case e: LoadInstanceStart[PipelineStage]
if e.reader.isInstanceOf[DefaultParamsReader[PipelineStage]] =>
assert(e.path.endsWith("writableStage"))
case e: LoadInstanceEnd[PipelineStage]
if e.reader.isInstanceOf[DefaultParamsReader[PipelineStage]] =>
assert(e.instance.isInstanceOf[PipelineStage])
case e: LoadInstanceStart[Pipeline] =>
assert(e.reader === pipelineReader)
case e: LoadInstanceEnd[Pipeline] =>
assert(e.instance.uid === newPipeline.uid)
case e => fail(s"Unexpected event thrown: $e")
}
}
// Test if they can be ser/de via JSON protocol.
assert(events.nonEmpty)
events.map(JsonProtocol.sparkEventToJson).foreach { event =>
assert(JsonProtocol.sparkEventFromJson(event).isInstanceOf[MLEvent])
}
}
}
test("pipeline model read/write events") {
def getInstance(w: MLWriter): AnyRef =
w.getClass.getDeclaredMethod("instance").invoke(w)
withTempDir { dir =>
val path = new Path(dir.getCanonicalPath, "pipeline").toUri.toString
val writableStage = new WritableStage("writableStage")
val pipelineModel =
new PipelineModel("pipeline_89329329", Array(writableStage.asInstanceOf[Transformer]))
val pipelineWriter = pipelineModel.write
assert(events.isEmpty)
pipelineWriter.save(path)
eventually(timeout(10 seconds), interval(1 second)) {
events.foreach {
case e: SaveInstanceStart if e.writer.isInstanceOf[DefaultParamsWriter] =>
assert(e.path.endsWith("writableStage"))
case e: SaveInstanceEnd if e.writer.isInstanceOf[DefaultParamsWriter] =>
assert(e.path.endsWith("writableStage"))
case e: SaveInstanceStart if getInstance(e.writer).isInstanceOf[PipelineModel] =>
assert(getInstance(e.writer).asInstanceOf[PipelineModel].uid === pipelineModel.uid)
case e: SaveInstanceEnd if getInstance(e.writer).isInstanceOf[PipelineModel] =>
assert(getInstance(e.writer).asInstanceOf[PipelineModel].uid === pipelineModel.uid)
case e => fail(s"Unexpected event thrown: $e")
}
}
// Test if they can be ser/de via JSON protocol.
assert(events.nonEmpty)
events.map(JsonProtocol.sparkEventToJson).foreach { event =>
assert(JsonProtocol.sparkEventFromJson(event).isInstanceOf[MLEvent])
}
sc.listenerBus.waitUntilEmpty(timeoutMillis = 10000)
events.clear()
val pipelineModelReader = PipelineModel.read
assert(events.isEmpty)
pipelineModelReader.load(path)
eventually(timeout(10 seconds), interval(1 second)) {
events.foreach {
case e: LoadInstanceStart[PipelineStage]
if e.reader.isInstanceOf[DefaultParamsReader[PipelineStage]] =>
assert(e.path.endsWith("writableStage"))
case e: LoadInstanceEnd[PipelineStage]
if e.reader.isInstanceOf[DefaultParamsReader[PipelineStage]] =>
assert(e.instance.isInstanceOf[PipelineStage])
case e: LoadInstanceStart[PipelineModel] =>
assert(e.reader === pipelineModelReader)
case e: LoadInstanceEnd[PipelineModel] =>
assert(e.instance.uid === pipelineModel.uid)
case e => fail(s"Unexpected event thrown: $e")
}
}
// Test if they can be ser/de via JSON protocol.
assert(events.nonEmpty)
events.map(JsonProtocol.sparkEventToJson).foreach { event =>
assert(JsonProtocol.sparkEventFromJson(event).isInstanceOf[MLEvent])
}
}
}
}
|
santhoshkumarvs/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/NestedSchemaPruningBenchmark.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import org.apache.spark.benchmark.Benchmark
import org.apache.spark.sql.internal.SQLConf
/**
* The base class for synthetic benchmark for nested schema pruning performance.
*/
abstract class NestedSchemaPruningBenchmark extends SqlBasedBenchmark {
import spark.implicits._
val dataSourceName: String
val benchmarkName: String
protected val N = 1000000
protected val numIters = 10
// We use `col1 BIGINT, col2 STRUCT<_1: BIGINT, _2: STRING>` as a test schema.
// col1 and col2._1 is used for comparision. col2._2 mimics the burden for the other columns
private val df = spark
.range(N * 10)
.sample(false, 0.1)
.map(x => (x, (x, s"$x" * 100)))
.toDF("col1", "col2")
private def addCase(benchmark: Benchmark, name: String, sql: String): Unit = {
benchmark.addCase(name) { _ =>
spark.sql(sql).write.format("noop").save()
}
}
protected def selectBenchmark(numRows: Int, numIters: Int): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
Seq(1, 2).foreach { i =>
df.write.format(dataSourceName).save(path + s"/$i")
spark.read.format(dataSourceName).load(path + s"/$i").createOrReplaceTempView(s"t$i")
}
val benchmark = new Benchmark(s"Selection", numRows, numIters, output = output)
addCase(benchmark, "Top-level column", "SELECT col1 FROM (SELECT col1 FROM t1)")
addCase(benchmark, "Nested column", "SELECT col2._1 FROM (SELECT col2 FROM t2)")
benchmark.run()
}
}
protected def limitBenchmark(numRows: Int, numIters: Int): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
Seq(1, 2).foreach { i =>
df.write.format(dataSourceName).save(path + s"/$i")
spark.read.format(dataSourceName).load(path + s"/$i").createOrReplaceTempView(s"t$i")
}
val benchmark = new Benchmark(s"Limiting", numRows, numIters, output = output)
addCase(benchmark, "Top-level column",
s"SELECT col1 FROM (SELECT col1 FROM t1 LIMIT ${Int.MaxValue})")
addCase(benchmark, "Nested column",
s"SELECT col2._1 FROM (SELECT col2 FROM t2 LIMIT ${Int.MaxValue})")
benchmark.run()
}
}
protected def repartitionBenchmark(numRows: Int, numIters: Int): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
Seq(1, 2).foreach { i =>
df.write.format(dataSourceName).save(path + s"/$i")
spark.read.format(dataSourceName).load(path + s"/$i").createOrReplaceTempView(s"t$i")
}
val benchmark = new Benchmark(s"Repartitioning", numRows, numIters, output = output)
addCase(benchmark, "Top-level column",
s"SELECT col1 FROM (SELECT /*+ REPARTITION(1) */ col1 FROM t1)")
addCase(benchmark, "Nested column",
s"SELECT col2._1 FROM (SELECT /*+ REPARTITION(1) */ col2 FROM t2)")
benchmark.run()
}
}
protected def repartitionByExprBenchmark(numRows: Int, numIters: Int): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
Seq(1, 2).foreach { i =>
df.write.format(dataSourceName).save(path + s"/$i")
spark.read.format(dataSourceName).load(path + s"/$i").createOrReplaceTempView(s"t$i")
}
val benchmark = new Benchmark(s"Repartitioning by exprs", numRows, numIters, output = output)
addCase(benchmark, "Top-level column",
s"SELECT col1 FROM (SELECT col1 FROM t1 DISTRIBUTE BY col1)")
addCase(benchmark, "Nested column",
s"SELECT col2._1 FROM (SELECT col2 FROM t2 DISTRIBUTE BY col2._1)")
benchmark.run()
}
}
protected def sampleBenchmark(numRows: Int, numIters: Int): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
Seq(1, 2).foreach { i =>
df.write.format(dataSourceName).save(path + s"/$i")
spark.read.format(dataSourceName).load(path + s"/$i").createOrReplaceTempView(s"t$i")
}
val benchmark = new Benchmark(s"Sample", numRows, numIters, output = output)
addCase(benchmark, "Top-level column",
s"SELECT col1 FROM (SELECT col1 FROM t1 TABLESAMPLE(100 percent))")
addCase(benchmark, "Nested column",
s"SELECT col2._1 FROM (SELECT col2 FROM t2 TABLESAMPLE(100 percent))")
benchmark.run()
}
}
protected def sortBenchmark(numRows: Int, numIters: Int): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
Seq(1, 2).foreach { i =>
df.write.format(dataSourceName).save(path + s"/$i")
spark.read.format(dataSourceName).load(path + s"/$i").createOrReplaceTempView(s"t$i")
}
val benchmark = new Benchmark(s"Sorting", numRows, numIters, output = output)
addCase(benchmark, "Top-level column", "SELECT col1 FROM t1 ORDER BY col1")
addCase(benchmark, "Nested column", "SELECT col2._1 FROM t2 ORDER BY col2._1")
benchmark.run()
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
runBenchmark(benchmarkName) {
withSQLConf(SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "true") {
selectBenchmark(N, numIters)
limitBenchmark(N, numIters)
repartitionBenchmark(N, numIters)
repartitionByExprBenchmark(N, numIters)
sampleBenchmark(N, numIters)
sortBenchmark(N, numIters)
}
}
}
}
|
santhoshkumarvs/spark
|
core/src/main/scala/org/apache/spark/SecurityManager.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.File
import java.net.{Authenticator, PasswordAuthentication}
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.Files
import java.util.Base64
import org.apache.hadoop.io.Text
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.UI._
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.sasl.SecretKeyHolder
import org.apache.spark.util.Utils
/**
* Spark class responsible for security.
*
* In general this class should be instantiated by the SparkEnv and most components
* should access it from that. There are some cases where the SparkEnv hasn't been
* initialized yet and this class must be instantiated directly.
*
* This class implements all of the configuration related to security features described
* in the "Security" document. Please refer to that document for specific features implemented
* here.
*/
private[spark] class SecurityManager(
sparkConf: SparkConf,
val ioEncryptionKey: Option[Array[Byte]] = None,
authSecretFileConf: ConfigEntry[Option[String]] = AUTH_SECRET_FILE)
extends Logging with SecretKeyHolder {
import SecurityManager._
// allow all users/groups to have view/modify permissions
private val WILDCARD_ACL = "*"
private val authOn = sparkConf.get(NETWORK_AUTH_ENABLED)
private var aclsOn = sparkConf.get(ACLS_ENABLE)
// admin acls should be set before view or modify acls
private var adminAcls: Set[String] = sparkConf.get(ADMIN_ACLS).toSet
// admin group acls should be set before view or modify group acls
private var adminAclsGroups: Set[String] = sparkConf.get(ADMIN_ACLS_GROUPS).toSet
private var viewAcls: Set[String] = _
private var viewAclsGroups: Set[String] = _
// list of users who have permission to modify the application. This should
// apply to both UI and CLI for things like killing the application.
private var modifyAcls: Set[String] = _
private var modifyAclsGroups: Set[String] = _
// always add the current user and SPARK_USER to the viewAcls
private val defaultAclUsers = Set[String](System.getProperty("user.name", ""),
Utils.getCurrentUserName())
setViewAcls(defaultAclUsers, sparkConf.get(UI_VIEW_ACLS))
setModifyAcls(defaultAclUsers, sparkConf.get(MODIFY_ACLS))
setViewAclsGroups(sparkConf.get(UI_VIEW_ACLS_GROUPS))
setModifyAclsGroups(sparkConf.get(MODIFY_ACLS_GROUPS))
private var secretKey: String = _
logInfo("SecurityManager: authentication " + (if (authOn) "enabled" else "disabled") +
"; ui acls " + (if (aclsOn) "enabled" else "disabled") +
"; users with view permissions: " + viewAcls.toString() +
"; groups with view permissions: " + viewAclsGroups.toString() +
"; users with modify permissions: " + modifyAcls.toString() +
"; groups with modify permissions: " + modifyAclsGroups.toString())
private val hadoopConf = SparkHadoopUtil.get.newConfiguration(sparkConf)
// the default SSL configuration - it will be used by all communication layers unless overwritten
private val defaultSSLOptions =
SSLOptions.parse(sparkConf, hadoopConf, "spark.ssl", defaults = None)
def getSSLOptions(module: String): SSLOptions = {
val opts =
SSLOptions.parse(sparkConf, hadoopConf, s"spark.ssl.$module", Some(defaultSSLOptions))
logDebug(s"Created SSL options for $module: $opts")
opts
}
/**
* Admin acls should be set before the view or modify acls. If you modify the admin
* acls you should also set the view and modify acls again to pick up the changes.
*/
def setViewAcls(defaultUsers: Set[String], allowedUsers: Seq[String]) {
viewAcls = adminAcls ++ defaultUsers ++ allowedUsers
logInfo("Changing view acls to: " + viewAcls.mkString(","))
}
def setViewAcls(defaultUser: String, allowedUsers: Seq[String]) {
setViewAcls(Set[String](defaultUser), allowedUsers)
}
/**
* Admin acls groups should be set before the view or modify acls groups. If you modify the admin
* acls groups you should also set the view and modify acls groups again to pick up the changes.
*/
def setViewAclsGroups(allowedUserGroups: Seq[String]) {
viewAclsGroups = adminAclsGroups ++ allowedUserGroups
logInfo("Changing view acls groups to: " + viewAclsGroups.mkString(","))
}
/**
* Checking the existence of "*" is necessary as YARN can't recognize the "*" in "defaultuser,*"
*/
def getViewAcls: String = {
if (viewAcls.contains(WILDCARD_ACL)) {
WILDCARD_ACL
} else {
viewAcls.mkString(",")
}
}
def getViewAclsGroups: String = {
if (viewAclsGroups.contains(WILDCARD_ACL)) {
WILDCARD_ACL
} else {
viewAclsGroups.mkString(",")
}
}
/**
* Admin acls should be set before the view or modify acls. If you modify the admin
* acls you should also set the view and modify acls again to pick up the changes.
*/
def setModifyAcls(defaultUsers: Set[String], allowedUsers: Seq[String]) {
modifyAcls = adminAcls ++ defaultUsers ++ allowedUsers
logInfo("Changing modify acls to: " + modifyAcls.mkString(","))
}
/**
* Admin acls groups should be set before the view or modify acls groups. If you modify the admin
* acls groups you should also set the view and modify acls groups again to pick up the changes.
*/
def setModifyAclsGroups(allowedUserGroups: Seq[String]) {
modifyAclsGroups = adminAclsGroups ++ allowedUserGroups
logInfo("Changing modify acls groups to: " + modifyAclsGroups.mkString(","))
}
/**
* Checking the existence of "*" is necessary as YARN can't recognize the "*" in "defaultuser,*"
*/
def getModifyAcls: String = {
if (modifyAcls.contains(WILDCARD_ACL)) {
WILDCARD_ACL
} else {
modifyAcls.mkString(",")
}
}
def getModifyAclsGroups: String = {
if (modifyAclsGroups.contains(WILDCARD_ACL)) {
WILDCARD_ACL
} else {
modifyAclsGroups.mkString(",")
}
}
/**
* Admin acls should be set before the view or modify acls. If you modify the admin
* acls you should also set the view and modify acls again to pick up the changes.
*/
def setAdminAcls(adminUsers: Seq[String]) {
adminAcls = adminUsers.toSet
logInfo("Changing admin acls to: " + adminAcls.mkString(","))
}
/**
* Admin acls groups should be set before the view or modify acls groups. If you modify the admin
* acls groups you should also set the view and modify acls groups again to pick up the changes.
*/
def setAdminAclsGroups(adminUserGroups: Seq[String]) {
adminAclsGroups = adminUserGroups.toSet
logInfo("Changing admin acls groups to: " + adminAclsGroups.mkString(","))
}
def setAcls(aclSetting: Boolean) {
aclsOn = aclSetting
logInfo("Changing acls enabled to: " + aclsOn)
}
def getIOEncryptionKey(): Option[Array[Byte]] = ioEncryptionKey
/**
* Check to see if Acls for the UI are enabled
* @return true if UI authentication is enabled, otherwise false
*/
def aclsEnabled(): Boolean = aclsOn
/**
* Checks the given user against the view acl and groups list to see if they have
* authorization to view the UI. If the UI acls are disabled
* via spark.acls.enable, all users have view access. If the user is null
* it is assumed authentication is off and all users have access. Also if any one of the
* UI acls or groups specify the WILDCARD(*) then all users have view access.
*
* @param user to see if is authorized
* @return true is the user has permission, otherwise false
*/
def checkUIViewPermissions(user: String): Boolean = {
logDebug("user=" + user + " aclsEnabled=" + aclsEnabled() + " viewAcls=" +
viewAcls.mkString(",") + " viewAclsGroups=" + viewAclsGroups.mkString(","))
if (!aclsEnabled || user == null || viewAcls.contains(user) ||
viewAcls.contains(WILDCARD_ACL) || viewAclsGroups.contains(WILDCARD_ACL)) {
return true
}
val currentUserGroups = Utils.getCurrentUserGroups(sparkConf, user)
logDebug("userGroups=" + currentUserGroups.mkString(","))
viewAclsGroups.exists(currentUserGroups.contains(_))
}
/**
* Checks the given user against the modify acl and groups list to see if they have
* authorization to modify the application. If the modify acls are disabled
* via spark.acls.enable, all users have modify access. If the user is null
* it is assumed authentication isn't turned on and all users have access. Also if any one
* of the modify acls or groups specify the WILDCARD(*) then all users have modify access.
*
* @param user to see if is authorized
* @return true is the user has permission, otherwise false
*/
def checkModifyPermissions(user: String): Boolean = {
logDebug("user=" + user + " aclsEnabled=" + aclsEnabled() + " modifyAcls=" +
modifyAcls.mkString(",") + " modifyAclsGroups=" + modifyAclsGroups.mkString(","))
if (!aclsEnabled || user == null || modifyAcls.contains(user) ||
modifyAcls.contains(WILDCARD_ACL) || modifyAclsGroups.contains(WILDCARD_ACL)) {
return true
}
val currentUserGroups = Utils.getCurrentUserGroups(sparkConf, user)
logDebug("userGroups=" + currentUserGroups)
modifyAclsGroups.exists(currentUserGroups.contains(_))
}
/**
* Check to see if authentication for the Spark communication protocols is enabled
* @return true if authentication is enabled, otherwise false
*/
def isAuthenticationEnabled(): Boolean = authOn
/**
* Checks whether network encryption should be enabled.
* @return Whether to enable encryption when connecting to services that support it.
*/
def isEncryptionEnabled(): Boolean = {
sparkConf.get(Network.NETWORK_CRYPTO_ENABLED) || sparkConf.get(SASL_ENCRYPTION_ENABLED)
}
/**
* Gets the user used for authenticating SASL connections.
* For now use a single hardcoded user.
* @return the SASL user as a String
*/
def getSaslUser(): String = "sparkSaslUser"
/**
* Gets the secret key.
* @return the secret key as a String if authentication is enabled, otherwise returns null
*/
def getSecretKey(): String = {
if (isAuthenticationEnabled) {
val creds = UserGroupInformation.getCurrentUser().getCredentials()
Option(creds.getSecretKey(SECRET_LOOKUP_KEY))
.map { bytes => new String(bytes, UTF_8) }
// Secret key may not be found in current UGI's credentials.
// This happens when UGI is refreshed in the driver side by UGI's loginFromKeytab but not
// copy secret key from original UGI to the new one. This exists in ThriftServer's Hive
// logic. So as a workaround, storing secret key in a local variable to make it visible
// in different context.
.orElse(Option(secretKey))
.orElse(Option(sparkConf.getenv(ENV_AUTH_SECRET)))
.orElse(sparkConf.getOption(SPARK_AUTH_SECRET_CONF))
.orElse(secretKeyFromFile())
.getOrElse {
throw new IllegalArgumentException(
s"A secret key must be specified via the $SPARK_AUTH_SECRET_CONF config")
}
} else {
null
}
}
/**
* Initialize the authentication secret.
*
* If authentication is disabled, do nothing.
*
* In YARN and local mode, generate a new secret and store it in the current user's credentials.
*
* In other modes, assert that the auth secret is set in the configuration.
*/
def initializeAuth(): Unit = {
import SparkMasterRegex._
if (!sparkConf.get(NETWORK_AUTH_ENABLED)) {
return
}
// TODO: this really should be abstracted somewhere else.
val master = sparkConf.get(SparkLauncher.SPARK_MASTER, "")
val storeInUgi = master match {
case "yarn" | "local" | LOCAL_N_REGEX(_) | LOCAL_N_FAILURES_REGEX(_, _) =>
true
case k8sRegex() =>
// Don't propagate the secret through the user's credentials in kubernetes. That conflicts
// with the way k8s handles propagation of delegation tokens.
false
case _ =>
require(sparkConf.contains(SPARK_AUTH_SECRET_CONF),
s"A secret key must be specified via the $SPARK_AUTH_SECRET_CONF config.")
return
}
if (sparkConf.get(AUTH_SECRET_FILE_DRIVER).isDefined !=
sparkConf.get(AUTH_SECRET_FILE_EXECUTOR).isDefined) {
throw new IllegalArgumentException(
"Invalid secret configuration: Secret files must be specified for both the driver and the" +
" executors, not only one or the other.")
}
secretKey = secretKeyFromFile().getOrElse(Utils.createSecret(sparkConf))
if (storeInUgi) {
val creds = new Credentials()
creds.addSecretKey(SECRET_LOOKUP_KEY, secretKey.getBytes(UTF_8))
UserGroupInformation.getCurrentUser().addCredentials(creds)
}
}
private def secretKeyFromFile(): Option[String] = {
sparkConf.get(authSecretFileConf).flatMap { secretFilePath =>
sparkConf.getOption(SparkLauncher.SPARK_MASTER).map {
case k8sRegex() =>
val secretFile = new File(secretFilePath)
require(secretFile.isFile, s"No file found containing the secret key at $secretFilePath.")
val base64Key = Base64.getEncoder.encodeToString(Files.readAllBytes(secretFile.toPath))
require(!base64Key.isEmpty, s"Secret key from file located at $secretFilePath is empty.")
base64Key
case _ =>
throw new IllegalArgumentException(
"Secret keys provided via files is only allowed in Kubernetes mode.")
}
}
}
// Default SecurityManager only has a single secret key, so ignore appId.
override def getSaslUser(appId: String): String = getSaslUser()
override def getSecretKey(appId: String): String = getSecretKey()
}
private[spark] object SecurityManager {
val k8sRegex = "k8s.*".r
val SPARK_AUTH_CONF = NETWORK_AUTH_ENABLED.key
val SPARK_AUTH_SECRET_CONF = AUTH_SECRET.key
// This is used to set auth secret to an executor's env variable. It should have the same
// value as SPARK_AUTH_SECRET_CONF set in SparkConf
val ENV_AUTH_SECRET = "_SPARK_AUTH_SECRET"
// key used to store the spark secret in the Hadoop UGI
val SECRET_LOOKUP_KEY = new Text("sparkCookie")
}
|
santhoshkumarvs/spark
|
repl/src/test/scala/org/apache/spark/repl/ReplSuite.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.repl
import java.io._
import scala.tools.nsc.interpreter.SimpleReader
import org.apache.log4j.{Level, LogManager}
import org.apache.spark.{SparkContext, SparkFunSuite}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
class ReplSuite extends SparkFunSuite {
def runInterpreter(master: String, input: String): String = {
val CONF_EXECUTOR_CLASSPATH = "spark.executor.extraClassPath"
val oldExecutorClasspath = System.getProperty(CONF_EXECUTOR_CLASSPATH)
val classpath = System.getProperty("java.class.path")
System.setProperty(CONF_EXECUTOR_CLASSPATH, classpath)
Main.sparkContext = null
Main.sparkSession = null // causes recreation of SparkContext for each test.
Main.conf.set("spark.master", master)
val in = new BufferedReader(new StringReader(input + "\n"))
val out = new StringWriter()
Main.doMain(Array("-classpath", classpath), new SparkILoop(in, new PrintWriter(out)))
if (oldExecutorClasspath != null) {
System.setProperty(CONF_EXECUTOR_CLASSPATH, oldExecutorClasspath)
} else {
System.clearProperty(CONF_EXECUTOR_CLASSPATH)
}
out.toString
}
// Simulate the paste mode in Scala REPL.
def runInterpreterInPasteMode(master: String, input: String): String =
runInterpreter(master, ":paste\n" + input + 4.toChar) // 4 is the ascii code of CTRL + D
def assertContains(message: String, output: String) {
val isContain = output.contains(message)
assert(isContain,
"Interpreter output did not contain '" + message + "':\n" + output)
}
def assertDoesNotContain(message: String, output: String) {
val isContain = output.contains(message)
assert(!isContain,
"Interpreter output contained '" + message + "':\n" + output)
}
test("propagation of local properties") {
// A mock ILoop that doesn't install the SIGINT handler.
class ILoop(out: PrintWriter) extends SparkILoop(None, out) {
settings = new scala.tools.nsc.Settings
settings.usejavacp.value = true
org.apache.spark.repl.Main.interp = this
in = SimpleReader()
}
val out = new StringWriter()
Main.interp = new ILoop(new PrintWriter(out))
Main.sparkContext = new SparkContext("local", "repl-test")
Main.interp.createInterpreter()
Main.sparkContext.setLocalProperty("someKey", "someValue")
// Make sure the value we set in the caller to interpret is propagated in the thread that
// interprets the command.
Main.interp.interpret("org.apache.spark.repl.Main.sparkContext.getLocalProperty(\"someKey\")")
assert(out.toString.contains("someValue"))
Main.sparkContext.stop()
System.clearProperty("spark.driver.port")
}
test("SPARK-15236: use Hive catalog") {
// turn on the INFO log so that it is possible the code will dump INFO
// entry for using "HiveMetastore"
val rootLogger = LogManager.getRootLogger()
val logLevel = rootLogger.getLevel
rootLogger.setLevel(Level.INFO)
try {
Main.conf.set(CATALOG_IMPLEMENTATION.key, "hive")
val output = runInterpreter("local",
"""
|spark.sql("drop table if exists t_15236")
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
// only when the config is set to hive and
// hive classes are built, we will use hive catalog.
// Then log INFO entry will show things using HiveMetastore
if (SparkSession.hiveClassesArePresent) {
assertContains("HiveMetaStore", output)
} else {
// If hive classes are not built, in-memory catalog will be used
assertDoesNotContain("HiveMetaStore", output)
}
} finally {
rootLogger.setLevel(logLevel)
}
}
test("SPARK-15236: use in-memory catalog") {
val rootLogger = LogManager.getRootLogger()
val logLevel = rootLogger.getLevel
rootLogger.setLevel(Level.INFO)
try {
Main.conf.set(CATALOG_IMPLEMENTATION.key, "in-memory")
val output = runInterpreter("local",
"""
|spark.sql("drop table if exists t_16236")
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
assertDoesNotContain("HiveMetaStore", output)
} finally {
rootLogger.setLevel(logLevel)
}
}
test("broadcast vars") {
// Test that the value that a broadcast var had when it was created is used,
// even if that variable is then modified in the driver program
// TODO: This doesn't actually work for arrays when we run in local mode!
val output = runInterpreter("local",
"""
|var array = new Array[Int](5)
|val broadcastArray = sc.broadcast(array)
|sc.parallelize(0 to 4).map(x => broadcastArray.value(x)).collect()
|array(0) = 5
|sc.parallelize(0 to 4).map(x => broadcastArray.value(x)).collect()
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
assertContains("res0: Array[Int] = Array(0, 0, 0, 0, 0)", output)
assertContains("res2: Array[Int] = Array(5, 0, 0, 0, 0)", output)
}
if (System.getenv("MESOS_NATIVE_JAVA_LIBRARY") != null) {
test("running on Mesos") {
val output = runInterpreter("localquiet",
"""
|var v = 7
|def getV() = v
|sc.parallelize(1 to 10).map(x => getV()).collect().reduceLeft(_+_)
|v = 10
|sc.parallelize(1 to 10).map(x => getV()).collect().reduceLeft(_+_)
|var array = new Array[Int](5)
|val broadcastArray = sc.broadcast(array)
|sc.parallelize(0 to 4).map(x => broadcastArray.value(x)).collect()
|array(0) = 5
|sc.parallelize(0 to 4).map(x => broadcastArray.value(x)).collect()
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
assertContains("res0: Int = 70", output)
assertContains("res1: Int = 100", output)
assertContains("res2: Array[Int] = Array(0, 0, 0, 0, 0)", output)
assertContains("res4: Array[Int] = Array(0, 0, 0, 0, 0)", output)
}
}
test("line wrapper only initialized once when used as encoder outer scope") {
val output = runInterpreter("local",
"""
|val fileName = "repl-test-" + System.currentTimeMillis
|val tmpDir = System.getProperty("java.io.tmpdir")
|val file = new java.io.File(tmpDir, fileName)
|def createFile(): Unit = file.createNewFile()
|
|createFile();case class TestCaseClass(value: Int)
|sc.parallelize(1 to 10).map(x => TestCaseClass(x)).collect()
|
|file.delete()
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
}
test("define case class and create Dataset together with paste mode") {
val output = runInterpreterInPasteMode("local-cluster[1,1,1024]",
"""
|import spark.implicits._
|case class TestClass(value: Int)
|Seq(TestClass(1)).toDS()
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
}
test(":replay should work correctly") {
val output = runInterpreter("local",
"""
|sc
|:replay
""".stripMargin)
assertDoesNotContain("error: not found: value sc", output)
}
test("spark-shell should find imported types in class constructors and extends clause") {
val output = runInterpreter("local",
"""
|import org.apache.spark.Partition
|class P(p: Partition)
|class P(val index: Int) extends Partition
""".stripMargin)
assertDoesNotContain("error: not found: type Partition", output)
}
test("spark-shell should shadow val/def definitions correctly") {
val output1 = runInterpreter("local",
"""
|def myMethod() = "first definition"
|val tmp = myMethod(); val out = tmp
|def myMethod() = "second definition"
|val tmp = myMethod(); val out = s"$tmp aabbcc"
""".stripMargin)
assertContains("second definition aabbcc", output1)
val output2 = runInterpreter("local",
"""
|val a = 1
|val b = a; val c = b;
|val a = 2
|val b = a; val c = b;
|s"!!$b!!"
""".stripMargin)
assertContains("!!2!!", output2)
}
test("SPARK-26633: ExecutorClassLoader.getResourceAsStream find REPL classes") {
val output = runInterpreterInPasteMode("local-cluster[1,1,1024]",
"""
|case class TestClass(value: Int)
|
|sc.parallelize(1 to 1).map { _ =>
| val clz = classOf[TestClass]
| val name = clz.getName.replace('.', '/') + ".class";
| val stream = clz.getClassLoader.getResourceAsStream(name)
| if (stream == null) {
| "failed: stream is null"
| } else {
| val magic = new Array[Byte](4)
| try {
| stream.read(magic)
| // the magic number of a Java Class file
| val expected = Array[Byte](0xCA.toByte, 0xFE.toByte, 0xBA.toByte, 0xBE.toByte)
| if (magic sameElements expected) {
| "successful"
| } else {
| "failed: unexpected contents from stream"
| }
| } finally {
| stream.close()
| }
| }
|}.collect()
""".stripMargin)
assertDoesNotContain("failed", output)
assertContains("successful", output)
}
}
|
tarangbhalodia/scala-lagom-user-crud
|
user-impl/src/main/scala/com/lagom/user/infrastructure/impl/UserServiceImpl.scala
|
<gh_stars>1-10
package com.lagom.user.infrastructure.impl
import java.util.UUID
import akka.{Done, NotUsed}
import com.lagom.user.api
import com.lagom.user.api._
import com.lagom.user.infrastructure.ElasticsearchClient
import com.lagom.user.infrastructure.util.Converters._
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.api.transport.{BadRequest, NotFound}
import com.sksamuel.elastic4s.analyzers.{AsciiFoldingTokenFilter, LowercaseTokenFilter}
import com.sksamuel.elastic4s.http.ElasticDsl._
import com.sksamuel.elastic4s.http.{ElasticDsl, RequestFailure, RequestSuccess}
import com.sksamuel.elastic4s.playjson._
import com.sksamuel.elastic4s.searches.queries.BoolQuery
import play.api.Logger
import scala.concurrent.{ExecutionContext, Future}
class UserServiceImpl(elasticsearch: ElasticsearchClient)(implicit executionContext: ExecutionContext) extends UserService {
private lazy val logger = Logger(classOf[UserServiceImpl])
private val INDEX_NAME = "user"
private val TYPE_NAME = "entry"
private val NORMALIZED_FIELD = "normalized-field"
createIndex()
private def createIndex() = {
for {
_ <- elasticsearch
.execute {
createIndexTemplate("generic-template", "*")
.settings(Map("number_of_shards" -> 5))
.normalizers(customNormalizer("lowercase-normalizer", LowercaseTokenFilter, AsciiFoldingTokenFilter))
.mappings(
mapping("entry") templates (
dynamicTemplate("longs")
.mapping(dynamicLongField().fields(longField(NORMALIZED_FIELD))) matchMappingType "long",
dynamicTemplate("doubles")
.mapping(dynamicDoubleField().fields(doubleField(NORMALIZED_FIELD))) matchMappingType "double",
dynamicTemplate("dates")
.mapping(dynamicDateField().fields(dateField(NORMALIZED_FIELD))) matchMappingType "date",
dynamicTemplate("booleans")
.mapping(dynamicBooleanField().fields(booleanField(NORMALIZED_FIELD))) matchMappingType "boolean",
dynamicTemplate("strings")
.mapping(dynamicTextField().fields(keywordField(NORMALIZED_FIELD).normalizer("lowercase-normalizer"))) matchMappingType "string"
)
)
}
_ <- elasticsearch.execute(ElasticDsl.createIndex(INDEX_NAME))
} yield ()
}
override def getUsers(pageNumber: Int, pageSize: Int): ServiceCall[SearchRequest, SearchResponse] = ServiceCall { request =>
val searchQuery =
request.keyword.fold[BoolQuery](boolQuery().must(matchAllQuery())) { serchTerm =>
boolQuery()
.should {
Seq(
matchQuery("firstName", serchTerm),
matchQuery(s"firstName.$NORMALIZED_FIELD", serchTerm),
matchQuery("lastName", serchTerm),
matchQuery(s"lastName.$NORMALIZED_FIELD", serchTerm),
matchQuery("email", serchTerm),
matchQuery(s"email.$NORMALIZED_FIELD", serchTerm)
)
}
.minimumShouldMatch(1)
}
if (pageNumber < 1) throw BadRequest(s"PageNumber: $pageNumber must be greater or equal to 1")
if (pageSize < 1) throw BadRequest(s"PageSize: $pageSize must be greater or equal to 1")
executeRequest(searchQuery, pageSize, pageNumber)
}
private def findOptionalBy(id: UUID): Future[Option[User]] = {
val booleanIdsQuery = boolQuery().must(idsQuery(id))
executeRequest(booleanIdsQuery).map { result =>
result.result.headOption
}
}
override def findBy(id: UUID): ServiceCall[NotUsed, User] = ServiceCall { _ =>
findOptionalBy(id).map {
case None => throw NotFound(s"User with id: $id not found")
case Some(user) => user
}
}
private def index(user: User): Future[User] = {
val indexRequest = ElasticDsl.indexInto(INDEX_NAME / TYPE_NAME).doc(user).id(user.id.toString)
elasticsearch.execute(indexRequest).map {
case RequestFailure(_, _, _, error) =>
logger.error(s"Error while indexing $user: $user - ${error.reason}")
throw new RuntimeException(s"Error while index $user: $user - ${error.reason}")
case RequestSuccess(_, _, _, _) => user
}
}
private def findOptionalByEmail(email: String): Future[Option[User]] = {
val emailQuery = boolQuery().must(matchQuery(s"email.$NORMALIZED_FIELD", email))
for {
result <- executeRequest(emailQuery)
} yield result.result.headOption
}
override def create: ServiceCall[CreateUser, User] = ServiceCall { createUser =>
for {
existingUser <- findOptionalByEmail(createUser.email)
result <- existingUser match {
case None => index(createUser.getUser)
case _ => throw BadRequest(s"User with email: ${createUser.email} already exists")
}
} yield result
}
override def update(userId: UUID): ServiceCall[User, User] = ServiceCall { user =>
for {
_ <- findBy(userId).invoke()
result <- index(user.copy(id = userId))
} yield result
}
override def delete(userId: UUID): ServiceCall[NotUsed, Done] = ServiceCall { _ =>
for {
_ <- findBy(userId).invoke()
_ <- elasticsearch.execute(deleteById(INDEX_NAME, TYPE_NAME, userId.toString))
} yield Done
}
private def executeRequest(query: BoolQuery, pageSize: Int = 10, pageNumber: Int = 1): Future[SearchResponse] = {
val request = search(INDEX_NAME)
.query(query)
.from((pageNumber - 1) * pageSize)
.size(pageSize)
if (logger.isDebugEnabled) logger.debug(s"Search request is: $request")
elasticsearch
.execute(request)
.map {
case RequestFailure(_, _, _, error) =>
logger.error(s"""Error while performing search request: ${request.show}:
|Error: ${error.reason}
|""".stripMargin)
throw new RuntimeException(s"Failed to execute search request: ${error.reason}")
case RequestSuccess(_, _, _, result) => api.SearchResponse(result.hits.toUsers, pageNumber, pageSize, result.totalHits)
}
}
}
|
tarangbhalodia/scala-lagom-user-crud
|
user-impl/src/main/scala/com/lagom/user/infrastructure/impl/LagomUserLoader.scala
|
package com.lagom.user.infrastructure.impl
import com.lagom.user.api.UserService
import com.lagom.user.infrastructure.ElasticsearchClient
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import com.lightbend.lagom.scaladsl.server._
import com.softwaremill.macwire._
import play.api.Environment
import play.api.libs.ws.ahc.AhcWSComponents
import scala.concurrent.ExecutionContext
trait LoginComponent extends LagomServerComponents {
implicit def executionContext: ExecutionContext
def environment: Environment
}
class LagomUserLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication =
new LagomloginApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new LagomloginApplication(context) with LagomDevModeComponents
override def describeService = Some(readDescriptor[UserService])
}
abstract class LagomloginApplication(context: LagomApplicationContext) extends LagomApplication(context) with LoginComponent with AhcWSComponents {
lazy val elasticSearch: ElasticsearchClient = wire[ElasticsearchClient]
override lazy val lagomServer: LagomServer = serverFor[UserService](wire[UserServiceImpl])
}
|
tarangbhalodia/scala-lagom-user-crud
|
user-impl/src/main/scala/com/lagom/user/infrastructure/ElasticsearchClient.scala
|
<gh_stars>1-10
package com.lagom.user.infrastructure
import java.net.URL
import com.sksamuel.elastic4s.http.{ElasticClient, ElasticNodeEndpoint, ElasticProperties, HttpClient}
import javax.inject.Singleton
import org.apache.http.auth.{AuthScope, UsernamePasswordCredentials}
import org.apache.http.client.config.RequestConfig
import org.apache.http.impl.client.BasicCredentialsProvider
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder
import play.api.Configuration
@Singleton
private[infrastructure] class ElasticsearchClient(configuration: Configuration) extends ElasticClient {
private val clientUrl = new URL(configuration.get[String]("elasticsearch.url"))
private val httpClient = {
ElasticClient(
ElasticProperties(Seq(ElasticNodeEndpoint(clientUrl.getProtocol, clientUrl.getHost, clientUrl.getPort, clientUrl.getPath match {
case "" | "/" => None
case prefix => Some(prefix)
}))),
(requestConfigBuilder: RequestConfig.Builder) => {
requestConfigBuilder.setConnectionRequestTimeout(6000)
},
(httpClientBuilder: HttpAsyncClientBuilder) => {
if (clientUrl.getUserInfo != null) {
val Array(user, password) = clientUrl.getUserInfo.split(':')
val provider = {
val provider = new BasicCredentialsProvider
val credentials = new UsernamePasswordCredentials(user, password)
provider.setCredentials(AuthScope.ANY, credentials)
provider
}
httpClientBuilder.setDefaultCredentialsProvider(provider)
} else httpClientBuilder
}
)
}
override def close(): Unit = httpClient.close()
override def client: HttpClient = httpClient.client
}
|
tarangbhalodia/scala-lagom-user-crud
|
user-impl/src/main/scala/com/lagom/user/infrastructure/util/Converters.scala
|
package com.lagom.user.infrastructure.util
import com.lagom.user.api.User
import com.sksamuel.elastic4s.http.search.SearchHits
import com.sksamuel.elastic4s.playjson._
object Converters {
implicit class RichSearchHits(val searchHits: SearchHits) extends AnyVal {
def toUsers: Seq[User] = {
searchHits.hits.toSeq.map(_.to[User])
}
}
}
|
tarangbhalodia/scala-lagom-user-crud
|
user-api/src/main/scala/com/lagom/user/api/UserService.scala
|
<filename>user-api/src/main/scala/com/lagom/user/api/UserService.scala
package com.lagom.user.api
import java.util.UUID
import akka.{Done, NotUsed}
import com.lightbend.lagom.scaladsl.api.{Descriptor, Service, ServiceCall}
import play.api.libs.json._
trait UserService extends Service {
def getUsers(pageNumber: Int, pageSize: Int): ServiceCall[SearchRequest, SearchResponse]
def findBy(userId: UUID): ServiceCall[NotUsed, User]
def create: ServiceCall[CreateUser, User]
def update(userId: UUID): ServiceCall[User, User]
def delete(userId: UUID): ServiceCall[NotUsed, Done]
override final def descriptor: Descriptor = {
import Service._
named("lagom-login")
.withCalls(
pathCall("/api/users?pageNumber&pageSize", getUsers _),
pathCall("/api/users/:userId", findBy _),
pathCall("/api/user/create", create _),
pathCall("/api/user/:userId/update", update _),
pathCall("/api/user/:userId/delete", delete _)
)
.withAutoAcl(true)
}
}
case class CreateUser(firstName: String, lastName: String, email: String) {
def getUser = User(UUID.randomUUID(), firstName, lastName, email)
}
object CreateUser {
implicit val format: Format[CreateUser] = Json.format
}
case class User(id: UUID = UUID.randomUUID(), firstName: String, lastName: String, email: String)
object User {
implicit val format: Format[User] = Json.format
}
case class SearchRequest(keyword: Option[String])
object SearchRequest {
implicit val format: Format[SearchRequest] = Json.format
}
case class SearchResponse(result: Seq[User], pageNumber: Int, pageSize: Int, totalRecords: Long)
object SearchResponse {
implicit val format: Format[SearchResponse] = Json.format
}
|
jyoo980/maybe
|
src/test/scala/NothingSpec.scala
|
import java.util.NoSuchElementException
import org.scalatest.matchers._
import org.scalatest.flatspec.AnyFlatSpec
class NothingSpec extends AnyFlatSpec with should.Matchers with MaybeFixture {
"Nothing" should "not be defined" in {
val none = toNothing
none.isDefined shouldBe false
}
it should "throw a NoSuchElementException when .get is called" in {
val none = toNothing
try {
none.get
fail("calling .get on Nothing failed to throw a NoSuchElementException")
} catch {
case _: NoSuchElementException => succeed
case e: Throwable => fail(s"calling .get on Nothing yielded: $e")
}
}
it should "not evaluate to true for .exists" in {
val none = toNothing
none.exists(_ == 1) shouldBe false
}
it should "have no effect when .filter is called" in {
val none = toNothing[1]
none.filter(_ < 1) shouldBe Nothing()
}
it should "have no effect when .filterNot is called" in {
val none = toNothing[String]
none.filterNot(_ == "apple") shouldBe Nothing()
}
it should "evaluate to the alternate when .orElse is called" in {
val alternate = toJust(22)
val none = toNothing[Int]
none.orElse(alternate) match {
case Just(num) => num shouldBe 22
case _ => fail("calling .orElse on Nothing should not have yielded a Nothing given a Just[T]")
}
}
it should "evaluate to Nothing when .flatMap is called" in {
def f(x: String): Maybe[String] =
Just(x * 3)
val none = toNothing[String]
none.flatMap(f) match {
case Just(_) => fail("calling .flatMap on Nothing should not have yielded Just[T]")
case Nothing() => succeed
}
}
it should "be able to be converted to an empty List[T] instance" in {
val none = toNothing[Int]
none.toList match {
case Nil => succeed
case _ => fail(".toList on a Nothing should have been converted to an empty List[T]")
}
}
}
|
jyoo980/maybe
|
project/plugin.sbt
|
<reponame>jyoo980/maybe
// Adding sbt formatting tool
// See docs here: https://scalameta.org/scalafmt/docs/installation.html#task-keys
addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.0")
|
jyoo980/maybe
|
src/main/scala/Just.scala
|
<reponame>jyoo980/maybe<gh_stars>1-10
final case class Just[T](x: T) extends Maybe[T] {
override def isDefined: Boolean = true
override def get: T = x
override def exists(p: T => Boolean): Boolean = p(x)
override def filter(p: T => Boolean): Maybe[T] =
if (p(x)) Just(x)
else Nothing[T]()
override def filterNot(p: T => Boolean): Maybe[T] =
if (!p(x)) Just(x)
else Nothing[T]()
override def map[U](f: T => U): Maybe[U] = Just(f(x))
override def fold[U](ifEmpty: => U)(f: T => U): U = f(x)
override def orElse[U >: T](alternate: => Maybe[U]): Maybe[U] = Just(x)
override def getOrElse[U](ifEmpty: => U): T = x
override def flatMap[U](f: T => Maybe[U]): Maybe[U] = f(x)
override def toList: List[T] = List(x)
}
|
jyoo980/maybe
|
build.sbt
|
<filename>build.sbt
name := "maybe"
version := "0.1"
scalaVersion := "2.13.3"
libraryDependencies += "org.scalatest" %% "scalatest" % "3.2.0" % "test"
|
jyoo980/maybe
|
src/main/scala/Nothing.scala
|
final case class Nothing[T]() extends Maybe[T] {
override def isDefined: Boolean = false
override def get: T =
throw new NoSuchElementException
override def exists(p: T => Boolean): Boolean = false
override def filter(p: T => Boolean): Maybe[T] = Nothing[T]()
override def filterNot(p: T => Boolean): Maybe[T] = Nothing[T]()
override def map[U](f: T => U): Maybe[U] = Nothing[U]()
override def fold[U](ifEmpty: => U)(f: T => U): U = ifEmpty
override def orElse[U >: T](alternate: => Maybe[U]): Maybe[U] = alternate
override def getOrElse[U <: T](ifEmpty: => U): U = ifEmpty
override def flatMap[U](f: T => Maybe[U]): Maybe[U] = Nothing[U]()
override def toList: List[T] = List.empty
}
|
jyoo980/maybe
|
src/main/scala/Maybe.scala
|
abstract class Maybe[T] {
/** Existential check for a Maybe[T] type
*
* @return true for Just[T], false otherwise
*/
def isDefined: Boolean
/** Evaluates to the unwrapped instance of T
*
* @return unwrapped instance of T for Just[T]
* @throws NoSuchElementException if invoked on Nothing[T]
*/
def get: T
/** Existential check for a Maybe[T] type
*
* Applies predicate function p against wrapped instance of T
*
* @param p predicate function with type T => Boolean
* @return true if the wrapped instance of T exists and fulfills p
*/
def exists(p: T => Boolean): Boolean
/** Filter function for a Maybe[T] type
*
* Evaluates to a Maybe[T] type where the wrapped instance of T fulfills
* the predicate function p
*
* @param p predicate function with type T => Boolean
* @return Just[T] if the wrapped instance of T fulfills p, else Nothing[T]
*/
def filter(p: T => Boolean): Maybe[T]
/** Analogue to .filter for a Maybe[T] type
*
* Evaluates to a Maybe[T] tye where the wrapped instance of T does NOT
* fulfill the predicate function p
*
* @param p predicate function with type T => Boolean
* @return Just[T] if the wrapped instance of T does NOT fulfill p, else Nothing[T]
*/
def filterNot(p: T => Boolean): Maybe[T]
/** Mapping function from Maybe[T] => Maybe[U]
*
* @param f mapping function with type T => U
* @tparam U return type of function f
* @return Maybe[U]
*/
def map[U](f: T => U): Maybe[U]
/** Fold function for Maybe[T] => U
*
* Note, this is analogous to a composition of .map and .getOrElse and
* can also be expressed as an explicit pattern match on Just and Nothing
* e.g.
* Just(2).map(_ + 2).getOrElse(-1)
* Just(2).fold(-1)(_ + 2)
* maybeNum match {
* case Just(n) => n + 2
* case _ => -1
* }
*
* @param ifEmpty evaluates to this if the Maybe[T] instance is a Nothing[T]
* @param f mapping function from T => U
* @tparam U return type of this fold operation
* @return some instance of type U
*/
def fold[U](ifEmpty: => U)(f: T => U): U
/** Offers an alternate instance of Maybe[U] in case this is an instance of Nothing[T]
*
* @param alternate evaluate to this in the case that this is an instance of Nothing[T]
* @tparam U supertype of T
* @return this Maybe[T] if it is an instance of Just[T], else offer alternate
*/
def orElse[U >: T](alternate: => Maybe[U]): Maybe[U]
/** Unwrapping function for this Maybe[T]
*
* @param ifEmpty evaluates to this unwrapped instance of U if this is an instance of Nothing[T]
* @tparam U subtype of T
* @return the unwrapped T if this is an instance of Just[T], else offer ifEmpty
*/
def getOrElse[U <: T](ifEmpty: => U): T
/** Identical to .map, except the mapping function produces the type Maybe[U]
*
* @param f mapping function from T => Maybe[U]
* @tparam U type parameter of the mapping function f
* @return an instance of Maybe[U], where the U is a result of a transformation from
* T => Maybe[U]
*/
def flatMap[U](f: T => Maybe[U]): Maybe[U]
/** Transformation from a Maybe[T] to a List[T]
*
* Recall that a Maybe[T] can be thought of as a unary collection
* This conversion function makes that connection explicit
*
* @return a unary List[T] for a Just[T], an empty List[T] for a Nothing[T]
*/
def toList: List[T]
}
|
jyoo980/maybe
|
src/test/scala/JustSpec.scala
|
<reponame>jyoo980/maybe
import org.scalatest.matchers._
import org.scalatest.flatspec.AnyFlatSpec
class JustSpec extends AnyFlatSpec with should.Matchers with MaybeFixture {
"a Just[T]" should "be defined" in {
val maybeNum = toJust(1)
maybeNum.isDefined shouldBe true
}
it should "be defined in a pattern-match" in {
val maybeStr = toJust("apple")
maybeStr match {
case Just(str) => str shouldBe "apple"
case Nothing() => fail()
}
}
it should "be map-able" in {
val maybeStr = toJust("a")
maybeStr.map(_ * 3) shouldBe Just("aaa")
}
it should "be fold-able" in {
val maybeNum = toJust(123)
maybeNum.fold("was empty")(_.toString) shouldBe "123"
}
it should "not throw a NoSuchElement exception when .get is called" in {
val maybeStr = toJust("a")
maybeStr.get match {
case s: String => s shouldBe "a"
case _ => fail("Calling .get on a Just should not throw an exception")
}
}
it should "filter correctly for a given predicate that its inner value fulfills" in {
val maybeString = toJust("a")
maybeString.filter(_ == "a") shouldBe Just("a")
}
it should "filter correctly for a given predicate that its inner value does not fulfill" in {
val maybeNum = toJust(1)
maybeNum.filter(_ != 1) shouldBe Nothing()
}
it should "filterNot correctly for a given predicate that its inner value fulfills" in {
val maybeNum = toJust(11)
maybeNum.filterNot(_ == 11) shouldBe Nothing()
}
it should "filterNot correctly for a given predicate that its inner value does not fulfill" in {
val maybeNum = toJust("a")
maybeNum.filterNot(_ != "a") shouldBe Just("a")
}
it should "exist for a given predicate that its inner value fulfills" in {
val maybeStr = toJust("a")
maybeStr.exists(_ == "a") shouldBe true
}
it should "evaluate to a Just[T] value when .orElse is called on it" in {
val alternate = toJust(-1)
val maybeNum = toJust(1)
maybeNum.orElse(alternate) match {
case Just(-1) => fail(".orElse should not evaluate to its alternate value on a Just[T]")
case Just(num) => num shouldBe 1
}
}
it should "evaluate to a Just[T] value when .flatMap is called on it" in {
def f(x: Int): Maybe[Int] =
Just(x + 1)
val maybeNum = toJust(1)
maybeNum.flatMap(f) match {
case Just(n) => n shouldBe 2
case _ => fail(".flatMap failed to produce a Just[T] value")
}
}
it should "not fail when .getOrElse is called" in {
val maybeNum = toJust(1)
maybeNum.getOrElse(-1) shouldBe 1
}
it should "be able to chain calls" in {
val maybeNum = toJust(123)
maybeNum.map(_.toString).getOrElse("-1") shouldBe "123"
}
it should "be able to be converted to a unary List[T]" in {
val maybeStr = toJust("hello")
maybeStr.toList match {
case h :: Nil => h shouldBe "hello"
case _ => fail(".toList on a Just[T] should have evaluated to a unary List[T]")
}
}
}
|
jyoo980/maybe
|
src/test/scala/MaybeFixture.scala
|
<reponame>jyoo980/maybe
trait MaybeFixture {
def toJust[T](t: T): Maybe[T] = Just[T](t)
def toNothing[T]: Maybe[T] = Nothing[T]()
}
|
super-wj-0820/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/api/TableImpl.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import org.apache.flink.table.expressions.Expression
import org.apache.flink.table.functions.TemporalTableFunction
import org.apache.flink.table.operations.QueryOperation
/**
* The implementation of the [[Table]].
*
* NOTE: Currently, [[TableImpl]] is just a wrapper for RelNode
* and all the methods in the class are not implemented. This is
* used to support end-to-end tests for Blink planner. It will be
* implemented when we support full stack Table API for Blink planner.
*
* @param tableEnv The [[TableEnvironment]] to which the table is bound.
* @param operationTree logical representation
*/
class TableImpl(val tableEnv: TableEnvironment, operationTree: QueryOperation) extends Table {
private lazy val tableSchema: TableSchema = operationTree.getTableSchema
override def getQueryOperation: QueryOperation = operationTree
override def getSchema: TableSchema = tableSchema
override def printSchema(): Unit = ???
override def select(fields: String): Table = ???
override def select(fields: Expression*): Table = ???
override def createTemporalTableFunction(
timeAttribute: String,
primaryKey: String): TemporalTableFunction = ???
override def createTemporalTableFunction(
timeAttribute: Expression,
primaryKey: Expression): TemporalTableFunction = ???
override def as(fields: String): Table = ???
override def as(fields: Expression*): Table = ???
override def filter(predicate: String): Table = ???
override def filter(predicate: Expression): Table = ???
override def where(predicate: String): Table = ???
override def where(predicate: Expression): Table = ???
override def groupBy(fields: String): GroupedTable = ???
override def groupBy(fields: Expression*): GroupedTable = ???
override def distinct(): Table = ???
override def join(right: Table): Table = ???
override def join(
right: Table,
joinPredicate: String): Table = ???
override def join(
right: Table,
joinPredicate: Expression): Table = ???
override def leftOuterJoin(right: Table): Table = ???
override def leftOuterJoin(
right: Table,
joinPredicate: String): Table = ???
override def leftOuterJoin(
right: Table,
joinPredicate: Expression): Table = ???
override def rightOuterJoin(
right: Table,
joinPredicate: String): Table = ???
override def rightOuterJoin(
right: Table,
joinPredicate: Expression): Table = ???
override def fullOuterJoin(
right: Table,
joinPredicate: String): Table = ???
override def fullOuterJoin(
right: Table,
joinPredicate: Expression): Table = ???
override def joinLateral(tableFunctionCall: String): Table = ???
override def joinLateral(tableFunctionCall: Expression): Table = ???
override def joinLateral(
tableFunctionCall: String,
joinPredicate: String): Table = ???
override def joinLateral(
tableFunctionCall: Expression,
joinPredicate: Expression): Table = ???
override def leftOuterJoinLateral(tableFunctionCall: String): Table = ???
override def leftOuterJoinLateral(tableFunctionCall: Expression): Table = ???
override def leftOuterJoinLateral(
tableFunctionCall: String,
joinPredicate: String): Table = ???
override def leftOuterJoinLateral(
tableFunctionCall: Expression,
joinPredicate: Expression): Table = ???
override def minus(right: Table): Table = ???
override def minusAll(right: Table): Table = ???
override def union(right: Table): Table = ???
override def unionAll(right: Table): Table = ???
override def intersect(right: Table): Table = ???
override def intersectAll(right: Table): Table = ???
override def orderBy(fields: String): Table = ???
override def orderBy(fields: Expression*): Table = ???
override def offset(offset: Int): Table = ???
override def fetch(fetch: Int): Table = ???
override def insertInto(tablePath: String, tablePathContinued: String*): Unit = ???
override def insertInto(
conf: QueryConfig,
tablePath: String,
tablePathContinued: String*): Unit = ???
override def insertInto(
tableName: String,
conf: QueryConfig): Unit = ???
override def window(groupWindow: GroupWindow): GroupWindowedTable = ???
override def window(overWindows: OverWindow*): OverWindowedTable = ???
override def addColumns(fields: String): Table = ???
override def addColumns(fields: Expression*): Table = ???
override def addOrReplaceColumns(fields: String): Table = ???
override def addOrReplaceColumns(fields: Expression*): Table = ???
override def renameColumns(fields: String): Table = ???
override def renameColumns(fields: Expression*): Table = ???
override def dropColumns(fields: String): Table = ???
override def dropColumns(fields: Expression*): Table = ???
override def map(mapFunction: String): Table = ???
override def map(mapFunction: Expression): Table = ???
override def flatMap(tableFunction: String): Table = ???
override def flatMap(tableFunction: Expression): Table = ???
override def aggregate(aggregateFunction: String): AggregatedTable = ???
override def aggregate(aggregateFunction: Expression): AggregatedTable = ???
override def flatAggregate(tableAggregateFunction: String): FlatAggregateTable = ???
override def flatAggregate(tableAggregateFunction: Expression): FlatAggregateTable = ???
}
|
super-wj-0820/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/api/StreamTableEnvironment.scala
|
<filename>flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/api/StreamTableEnvironment.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import org.apache.flink.annotation.VisibleForTesting
import org.apache.flink.api.common.JobExecutionResult
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.runtime.state.memory.MemoryStateBackend
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.streaming.api.graph.{StreamGraph, StreamGraphGenerator}
import org.apache.flink.streaming.api.transformations.StreamTransformation
import org.apache.flink.table.catalog.CatalogManager
import org.apache.flink.table.dataformat.BaseRow
import org.apache.flink.table.operations.DataStreamQueryOperation
import org.apache.flink.table.plan.`trait`.{AccModeTraitDef, FlinkRelDistributionTraitDef, MiniBatchIntervalTraitDef, UpdateAsRetractionTraitDef}
import org.apache.flink.table.plan.nodes.calcite.LogicalSink
import org.apache.flink.table.plan.nodes.exec.{ExecNode, StreamExecNode}
import org.apache.flink.table.plan.nodes.process.DAGProcessContext
import org.apache.flink.table.plan.nodes.resource.parallelism.ParallelismProcessor
import org.apache.flink.table.plan.optimize.{Optimizer, StreamCommonSubGraphBasedOptimizer}
import org.apache.flink.table.plan.stats.FlinkStatistic
import org.apache.flink.table.plan.util.{ExecNodePlanDumper, FlinkRelOptUtil}
import org.apache.flink.table.sinks.DataStreamTableSink
import org.apache.flink.table.sources.{LookupableTableSource, StreamTableSource, TableSource}
import org.apache.flink.table.types.logical.{LogicalType, RowType}
import org.apache.flink.table.types.utils.TypeConversions.fromLegacyInfoToDataType
import org.apache.flink.table.types.{DataType, LogicalTypeDataTypeConverter}
import org.apache.flink.table.typeutils.{TimeIndicatorTypeInfo, TypeCheckUtils}
import org.apache.flink.table.util.PlanUtil
import org.apache.calcite.plan.{ConventionTraitDef, RelTrait, RelTraitDef}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.sql.SqlExplainLevel
import _root_.scala.collection.JavaConversions._
/**
* The base class for stream TableEnvironments.
*
* A TableEnvironment can be used to:
* - convert [[DataStream]] to a [[Table]]
* - register a [[DataStream]] as a table in the catalog
* - register a [[Table]] in the catalog
* - scan a registered table to obtain a [[Table]]
* - specify a SQL query on registered tables to obtain a [[Table]]
* - convert a [[Table]] into a [[DataStream]]
*
* @param execEnv The [[StreamExecutionEnvironment]] which is wrapped in this
* [[StreamTableEnvironment]].
* @param config The [[TableConfig]] of this [[StreamTableEnvironment]].
*/
abstract class StreamTableEnvironment(
private[flink] val execEnv: StreamExecutionEnvironment,
config: TableConfig,
catalogManager: CatalogManager)
extends TableEnvironment(execEnv, config, catalogManager) {
// prefix for unique table names.
override private[flink] val tableNamePrefix = "_DataStreamTable_"
// the naming pattern for internally registered tables.
private val internalNamePattern = "^_DataStreamTable_[0-9]+$".r
private var isConfigMerged: Boolean = false
override def queryConfig: StreamQueryConfig = new StreamQueryConfig
override protected def getTraitDefs: Array[RelTraitDef[_ <: RelTrait]] = {
Array(
ConventionTraitDef.INSTANCE,
FlinkRelDistributionTraitDef.INSTANCE,
MiniBatchIntervalTraitDef.INSTANCE,
UpdateAsRetractionTraitDef.INSTANCE,
AccModeTraitDef.INSTANCE)
}
override protected def getOptimizer: Optimizer = new StreamCommonSubGraphBasedOptimizer(this)
override private[flink] def isBatch = false
/**
* Checks if the chosen table name is valid.
*
* @param name The table name to check.
*/
override protected def checkValidTableName(name: String): Unit = {
val m = internalNamePattern.findFirstIn(name)
m match {
case Some(_) =>
throw new TableException(s"Illegal Table name. " +
s"Please choose a name that does not contain the pattern $internalNamePattern")
case None =>
}
}
override protected def validateTableSource(tableSource: TableSource[_]): Unit = {
// TODO TableSourceUtil.validateTableSource(tableSource)
tableSource match {
// check for proper stream table source
case streamTableSource: StreamTableSource[_] if !streamTableSource.isBounded => // ok
// TODO `TableSourceUtil.hasRowtimeAttribute` depends on [Expression]
// check that event-time is enabled if table source includes rowtime attributes
// if (TableSourceUtil.hasRowtimeAttribute(streamTableSource) &&
// execEnv.getStreamTimeCharacteristic != TimeCharacteristic.EventTime) {
// throw new TableException(
// s"A rowtime attribute requires an EventTime time characteristic in stream " +
// s"environment. But is: ${execEnv.getStreamTimeCharacteristic}")
// }
// a lookupable table source can also be registered in the env
case _: LookupableTableSource[_] =>
// not a stream table source
case _ =>
throw new TableException("Only LookupableTableSource and unbounded StreamTableSource " +
"can be registered in StreamTableEnvironment")
}
}
override def execute(jobName: String): JobExecutionResult = {
generateStreamGraph(jobName)
// TODO supports execEnv.execute(streamGraph)
execEnv.execute(jobName)
}
protected override def translateStreamGraph(
streamingTransformations: Seq[StreamTransformation[_]],
jobName: Option[String] = None): StreamGraph = {
mergeParameters()
new StreamGraphGenerator(
streamingTransformations.toList, execEnv.getConfig, execEnv.getCheckpointConfig)
.setChaining(execEnv.isChainingEnabled)
.setDefaultBufferTimeout(execEnv.getBufferTimeout)
.setStateBackend(execEnv.getStateBackend)
.setTimeCharacteristic(execEnv.getStreamTimeCharacteristic)
.setUserArtifacts(execEnv.getCachedFiles)
.setJobName(jobName.getOrElse(DEFAULT_JOB_NAME))
.generate()
}
/**
* Merge global job parameters and table config parameters,
* and set the merged result to GlobalJobParameters
*/
private def mergeParameters(): Unit = {
if (!isConfigMerged && execEnv != null && execEnv.getConfig != null) {
val parameters = new Configuration()
if (config != null && config.getConf != null) {
parameters.addAll(config.getConf)
}
if (execEnv.getConfig.getGlobalJobParameters != null) {
execEnv.getConfig.getGlobalJobParameters.toMap.foreach {
kv => parameters.setString(kv._1, kv._2)
}
}
val isHeapState = Option(execEnv.getStateBackend) match {
case Some(backend) if backend.isInstanceOf[MemoryStateBackend] ||
backend.isInstanceOf[FsStateBackend]=> true
case None => true
case _ => false
}
parameters.setBoolean(TableConfigOptions.SQL_EXEC_STATE_BACKEND_ON_HEAP, isHeapState)
execEnv.getConfig.setGlobalJobParameters(parameters)
isConfigMerged = true
}
}
override private[flink] def translateToExecNodeDag(rels: Seq[RelNode]): Seq[ExecNode[_, _]] = {
val nodeDag = super.translateToExecNodeDag(rels)
val context = new DAGProcessContext(this)
new ParallelismProcessor().process(nodeDag, context)
}
/**
* Translates a [[Table]] into a [[DataStream]].
*
* The transformation involves optimizing the relational expression tree as defined by
* Table API calls and / or SQL queries and generating corresponding [[DataStream]] operators.
*
* @param table The root node of the relational expression tree.
* @param updatesAsRetraction Set to true to encode updates as retraction messages.
* @param withChangeFlag Set to true to emit records with change flags.
* @param resultType The [[org.apache.flink.api.common.typeinfo.TypeInformation[_]] of
* the resulting [[DataStream]].
* @tparam T The type of the resulting [[DataStream]].
* @return The [[DataStream]] that corresponds to the translated [[Table]].
*/
protected def translateToDataStream[T](
table: Table,
updatesAsRetraction: Boolean,
withChangeFlag: Boolean,
resultType: TypeInformation[T]): DataStream[T] = {
val sink = new DataStreamTableSink[T](table, resultType, updatesAsRetraction, withChangeFlag)
val sinkName = createUniqueTableName()
val input = getRelBuilder.queryOperation(table.getQueryOperation).build()
val sinkNode = LogicalSink.create(input, sink, sinkName)
val transformation = translateSink(sinkNode)
new DataStream(execEnv, transformation).asInstanceOf[DataStream[T]]
}
private def translateSink(sink: LogicalSink): StreamTransformation[_] = {
mergeParameters()
val optimizedPlan = optimize(sink)
val optimizedNodes = translateToExecNodeDag(Seq(optimizedPlan))
require(optimizedNodes.size() == 1)
translateToPlan(optimizedNodes.head)
}
override protected def translateToPlan(
sinks: Seq[ExecNode[_, _]]): Seq[StreamTransformation[_]] = sinks.map(translateToPlan)
/**
* Translates a [[StreamExecNode]] plan into a [[StreamTransformation]].
*
* @param node The plan to translate.
* @return The [[StreamTransformation]] of type [[BaseRow]].
*/
private def translateToPlan(node: ExecNode[_, _]): StreamTransformation[_] = {
node match {
case node: StreamExecNode[_] => node.translateToPlan(this)
case _ =>
throw new TableException("Cannot generate DataStream due to an invalid logical plan. " +
"This is a bug and should not happen. Please file an issue.")
}
}
/**
* Returns the AST of the specified Table API and SQL queries and the execution plan to compute
* the result of the given [[Table]].
*
* @param table The table for which the AST and execution plan will be returned.
*/
def explain(table: Table): String = explain(table, extended = false)
/**
* Returns the AST of the specified Table API and SQL queries and the execution plan to compute
* the result of the given [[Table]].
*
* @param table The table for which the AST and execution plan will be returned.
* @param extended Flag to include detailed optimizer estimates.
*/
def explain(table: Table, extended: Boolean): String = {
val ast = getRelBuilder.queryOperation(table.getQueryOperation).build()
val execNodeDag = compileToExecNodePlan(ast)
val transformations = translateToPlan(execNodeDag)
val streamGraph = translateStreamGraph(transformations)
val executionPlan = PlanUtil.explainStreamGraph(streamGraph)
val (explainLevel, withRetractTraits) = if (extended) {
(SqlExplainLevel.ALL_ATTRIBUTES, true)
} else {
(SqlExplainLevel.EXPPLAN_ATTRIBUTES, false)
}
s"== Abstract Syntax Tree ==" +
System.lineSeparator +
s"${FlinkRelOptUtil.toString(ast)}" +
System.lineSeparator +
s"== Optimized Logical Plan ==" +
System.lineSeparator +
s"${
ExecNodePlanDumper.dagToString(
execNodeDag,
explainLevel,
withRetractTraits = withRetractTraits)
}" +
System.lineSeparator +
s"== Physical Execution Plan ==" +
System.lineSeparator +
s"$executionPlan"
}
/**
* Explain the whole plan, and returns the AST(s) of the specified Table API and SQL queries
* and the execution plan.
*/
def explain(): String = explain(extended = false)
/**
* Explain the whole plan, and returns the AST(s) of the specified Table API and SQL queries
* and the execution plan.
*
* @param extended Flag to include detailed optimizer estimates.
*/
def explain(extended: Boolean): String = {
val sinkExecNodes = compileToExecNodePlan(sinkNodes: _*)
// translate relNodes to StreamTransformations
val sinkTransformations = translateToPlan(sinkExecNodes)
val streamGraph = translateStreamGraph(sinkTransformations)
val sqlPlan = PlanUtil.explainStreamGraph(streamGraph)
val sb = new StringBuilder
sb.append("== Abstract Syntax Tree ==")
sb.append(System.lineSeparator)
sinkNodes.foreach { sink =>
sb.append(FlinkRelOptUtil.toString(sink))
sb.append(System.lineSeparator)
}
sb.append("== Optimized Logical Plan ==")
sb.append(System.lineSeparator)
val (explainLevel, withRetractTraits) = if (extended) {
(SqlExplainLevel.ALL_ATTRIBUTES, true)
} else {
(SqlExplainLevel.EXPPLAN_ATTRIBUTES, false)
}
sb.append(ExecNodePlanDumper.dagToString(
sinkExecNodes,
explainLevel,
withRetractTraits = withRetractTraits))
sb.append(System.lineSeparator)
sb.append("== Physical Execution Plan ==")
sb.append(System.lineSeparator)
sb.append(sqlPlan)
sb.toString()
}
@VisibleForTesting
private[flink] def asQueryOperation[T](
dataStream: DataStream[T],
fields: Option[Array[String]],
fieldNullables: Option[Array[Boolean]] = None,
statistic: Option[FlinkStatistic] = None): DataStreamQueryOperation[T] = {
val streamType = dataStream.getType
val streamDataType = fromLegacyInfoToDataType(streamType)
// get field names and types for all non-replaced fields
val (indices, names) = fields match {
case Some(f) =>
// validate and extract time attributes
// TODO should use FieldInfoUtils#getFieldsInfo instead of getFieldInfo
// TODO: validate and extract time attributes after we introduce [Expression],
// return None currently
val (rowtime, proctime) = validateAndExtractTimeAttributes(streamDataType, f)
val (fieldNames, fieldIndexes) = getFieldInfo(streamDataType, f)
// check if event-time is enabled
if (rowtime.isDefined &&
execEnv.getStreamTimeCharacteristic != TimeCharacteristic.EventTime) {
throw new TableException(
s"A rowtime attribute requires an EventTime time characteristic in stream environment" +
s". But is: ${execEnv.getStreamTimeCharacteristic}")
}
// adjust field indexes and field names
val indexesWithIndicatorFields = adjustFieldIndexes(fieldIndexes, rowtime, proctime)
val namesWithIndicatorFields = adjustFieldNames(fieldNames, rowtime, proctime)
(indexesWithIndicatorFields, namesWithIndicatorFields)
case None =>
val (fieldNames, fieldIndexes) = getFieldInfo[T](streamDataType)
(fieldIndexes, fieldNames)
}
val dataStreamTable = new DataStreamQueryOperation(
dataStream,
indices,
TableEnvironment.calculateTableSchema(streamType, indices, names),
fieldNullables.getOrElse(Array.fill(indices.length)(true)),
false,
false,
statistic.getOrElse(FlinkStatistic.UNKNOWN))
dataStreamTable
}
/**
* Checks for at most one rowtime and proctime attribute.
* Returns the time attributes.
*
* @return rowtime attribute and proctime attribute
*/
// TODO: we should support Expression fields after we introduce [Expression]
private[flink] def validateAndExtractTimeAttributes(
streamType: DataType,
fields: Array[String]): (Option[(Int, String)], Option[(Int, String)]) = {
val streamLogicalType = LogicalTypeDataTypeConverter.fromDataTypeToLogicalType(streamType)
val (isRefByPos, fieldTypes) = streamLogicalType match {
case c: RowType =>
// determine schema definition mode (by position or by name)
(isReferenceByPosition(c, fields),
(0 until c.getFieldCount).map(i => c.getTypeAt(i)).toArray)
case t =>
(false, Array(t))
}
var fieldNames: List[String] = Nil
var rowtime: Option[(Int, String)] = None
var proctime: Option[(Int, String)] = None
def checkRowtimeType(t: LogicalType): Unit = {
if (!(TypeCheckUtils.isLong(t) || TypeCheckUtils.isTimePoint(t))) {
throw new TableException(
s"The rowtime attribute can only replace a field with a valid time type, " +
s"such as Timestamp or Long. But was: $t")
}
}
def extractRowtime(idx: Int, name: String, origName: Option[String]): Unit = {
if (rowtime.isDefined) {
throw new TableException(
"The rowtime attribute can only be defined once in a table schema.")
} else {
// if the fields are referenced by position,
// it is possible to replace an existing field or append the time attribute at the end
if (isRefByPos) {
// aliases are not permitted
if (origName.isDefined) {
throw new TableException(
s"Invalid alias '${origName.get}' because fields are referenced by position.")
}
// check type of field that is replaced
if (idx < fieldTypes.length) {
checkRowtimeType(fieldTypes(idx))
}
}
// check reference-by-name
else {
val aliasOrName = origName.getOrElse(name)
streamLogicalType match {
// both alias and reference must have a valid type if they replace a field
case ct: RowType if ct.getFieldIndex(aliasOrName) != -1 =>
val t = ct.getTypeAt(ct.getFieldIndex(aliasOrName))
checkRowtimeType(t)
// alias could not be found
case _ if origName.isDefined =>
throw new TableException(s"Alias '${origName.get}' must reference an existing field.")
case _ => // ok
}
}
rowtime = Some(idx, name)
}
}
def extractProctime(idx: Int, name: String): Unit = {
if (proctime.isDefined) {
throw new TableException(
"The proctime attribute can only be defined once in a table schema.")
} else {
// if the fields are referenced by position,
// it is only possible to append the time attribute at the end
if (isRefByPos) {
// check that proctime is only appended
if (idx < fieldTypes.length) {
throw new TableException(
"The proctime attribute can only be appended to the table schema and not replace " +
s"an existing field. Please move '$name' to the end of the schema.")
}
}
// check reference-by-name
else {
streamLogicalType match {
// proctime attribute must not replace a field
case ct: RowType if ct.getFieldIndex(name) != -1 =>
throw new TableException(
s"The proctime attribute '$name' must not replace an existing field.")
case _ => // ok
}
}
proctime = Some(idx, name)
}
}
fields.zipWithIndex.foreach {
case ("rowtime", idx) =>
extractRowtime(idx, "rowtime", None)
case ("proctime", idx) =>
extractProctime(idx, "proctime")
case (name, _) => fieldNames = name :: fieldNames
}
if (rowtime.isDefined && fieldNames.contains(rowtime.get._2)) {
throw new TableException(
"The rowtime attribute may not have the same name as an another field.")
}
if (proctime.isDefined && fieldNames.contains(proctime.get._2)) {
throw new TableException(
"The proctime attribute may not have the same name as an another field.")
}
(rowtime, proctime)
}
/**
* Injects markers for time indicator fields into the field indexes.
*
* @param fieldIndexes The field indexes into which the time indicators markers are injected.
* @param rowtime An optional rowtime indicator
* @param proctime An optional proctime indicator
* @return An adjusted array of field indexes.
*/
private def adjustFieldIndexes(
fieldIndexes: Array[Int],
rowtime: Option[(Int, String)],
proctime: Option[(Int, String)]): Array[Int] = {
// inject rowtime field
val withRowtime = rowtime match {
case Some(rt) =>
fieldIndexes.patch(rt._1, Seq(TimeIndicatorTypeInfo.ROWTIME_STREAM_MARKER), 0)
case _ =>
fieldIndexes
}
// inject proctime field
val withProctime = proctime match {
case Some(pt) =>
withRowtime.patch(pt._1, Seq(TimeIndicatorTypeInfo.PROCTIME_STREAM_MARKER), 0)
case _ =>
withRowtime
}
withProctime
}
/**
* Injects names of time indicator fields into the list of field names.
*
* @param fieldNames The array of field names into which the time indicator field names are
* injected.
* @param rowtime An optional rowtime indicator
* @param proctime An optional proctime indicator
* @return An adjusted array of field names.
*/
private def adjustFieldNames(
fieldNames: Array[String],
rowtime: Option[(Int, String)],
proctime: Option[(Int, String)]): Array[String] = {
// inject rowtime field
val withRowtime = rowtime match {
case Some(rt) => fieldNames.patch(rt._1, Seq(rowtime.get._2), 0)
case _ => fieldNames
}
// inject proctime field
val withProctime = proctime match {
case Some(pt) => withRowtime.patch(pt._1, Seq(proctime.get._2), 0)
case _ => withRowtime
}
withProctime
}
}
|
jgagnon1/cats-andra
|
src/main/scala/com/jgagnon/cats/andra/Application.scala
|
<filename>src/main/scala/com/jgagnon/cats/andra/Application.scala
/*
* Copyright 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jgagnon.cats.andra
import com.jgagnon.cats.andra.interpreters.ScalaMapInterpreter
/**
* Sample application showing the use of Operations using the Free Monad backend by multiple Interpreters
*/
object Application extends App {
import Operations._
def program: Storable[Option[Int]] =
for {
_ <- put("alpha", 2)
_ <- put("beta", 5)
_ <- update[Int]("alpha", _ + 1)
_ <- delete("beta")
u <- get[Int]("alpha")
} yield u
val interpreter = ScalaMapInterpreter.pureCompiler
val finalValue = program.foldMap(interpreter).run(Map.empty).value
println(finalValue)
}
|
jgagnon1/cats-andra
|
src/main/scala/com/jgagnon/cats/andra/interpreters/ScalaMapInterpreter.scala
|
/*
* Copyright 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jgagnon.cats.andra.interpreters
import cats._
import data.State
import com.jgagnon.cats.andra.Operations.StorableA
object ScalaMapInterpreter {
import StorableA._
// Scala Map backed interpreter
type KVStoreState[A] = State[Map[String, Any], A]
val pureCompiler: StorableA ~> KVStoreState =
new (StorableA ~> KVStoreState) {
def apply[A](fa: StorableA[A]): KVStoreState[A] =
fa match {
case Put(key, value) =>
State.modify(_.updated(key, value))
case Get(key) =>
State.inspect(_.get(key).map(_.asInstanceOf[A]))
case Delete(key) =>
State.modify(_ - key)
}
}
}
|
jgagnon1/cats-andra
|
src/main/scala/com/jgagnon/cats/andra/Operations.scala
|
/*
* Copyright 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jgagnon.cats.andra
import cats._
import free._
import Free._
object Operations {
// Operation ADT for StorableA
sealed trait StorableA[A]
object StorableA {
final case class Put[T](key: String, value: T) extends StorableA[Unit]
final case class Get[T](key: String) extends StorableA[Option[T]]
final case class Delete(key: String) extends StorableA[Unit]
}
import StorableA._
// Lift ADT Operations StorableA[_] into Storable[_] free monad
type Storable[A] = Free[StorableA, A]
def put[T](key: String, value: T): Storable[Unit] =
liftF[StorableA, Unit](Put(key, value))
def get[T](key: String): Storable[Option[T]] =
liftF[StorableA, Option[T]](Get(key))
def delete[T](key: String): Storable[Unit] =
liftF[StorableA, Unit](Delete(key))
def update[T](key: String, f: T => T): Storable[Unit] =
for {
mV <- get[T](key)
_ <- mV.map(v => put(key, f(v))).getOrElse(Free.pure(()))
} yield ()
}
|
tianhuil/spark-types
|
Injection.scala
|
<reponame>tianhuil/spark-types<gh_stars>0
class Bag
trait AComponent { this: BComponent =>
val a: A
class A {
def aFunc(i: Int): Int = {
if (i == 0) {
0
} else {
b.bFunc(i - 1) + 1
}
}
}
}
trait BComponent { this: AComponent =>
val b: B
class B {
def bFunc(i: Int): Int = {
if (i == 0) {
0
} else {
a.aFunc(i - 1) + 1
}
}
}
}
object ComponentRegistry extends
AComponent with
BComponent {
val a = new A
val b = new B
}
object Foo {
def main() {
println(ComponentRegistry.a.aFunc(5))
println(ComponentRegistry.b.bFunc(5))
}
}
|
tianhuil/spark-types
|
Inherit.scala
|
<reponame>tianhuil/spark-types
trait T {
val str: String
val int: Int
}
class C1 extends T {
val str: String = "foo"
val int: Int = 2
}
class C2 {
val str: String = "foo"
val int: Int = 2
}
object D {
def foo[S <: T](s: S): String = {
s.int.toString + s.str
}
def foo1(s: C1) = foo(s)
//
// def foo2(s: C2) = foo(s)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3USWest1ConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3USWest1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.US_WEST_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
project/plugins.sbt
|
<reponame>Lance0312/spark-cassandra-connection-factory<gh_stars>0
resolvers += "Era7 maven releases" at "https://s3-eu-west-1.amazonaws.com/releases.era7.com"
addSbtPlugin("ohnosequences" % "sbt-s3-resolver" % "0.15.0")
addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.5.0")
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3APSouth1ConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3APSouth1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.AP_SOUTH_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3EUCentral1ConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3EUCentral1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.EU_CENTRAL_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/S3ConnectionFactory.scala
|
<reponame>Lance0312/spark-cassandra-connection-factory
package co.verdigris.spark.connector.cql
import co.verdigris.spark.connector.ssl.AwsS3SSLOptions
import com.datastax.driver.core.policies.ExponentialReconnectionPolicy
import com.datastax.driver.core.{Cluster, QueryOptions, SSLOptions, SocketOptions}
import com.datastax.spark.connector.cql.CassandraConnectorConf.CassandraSSLConf
import com.datastax.spark.connector.cql._
import org.apache.spark.{SparkConf, SparkContext}
trait S3ConnectionFactory extends CassandraConnectionFactory {
protected val sparkConf: SparkConf = SparkContext.getOrCreate().getConf
protected var s3Region: Option[String] = sparkConf.getOption("spark.connection.ssl.s3AwsRegion")
/** Returns the Cluster.Builder object used to setup Cluster instance. */
def clusterBuilder(conf: CassandraConnectorConf): Cluster.Builder = {
val options = new SocketOptions()
.setConnectTimeoutMillis(conf.connectTimeoutMillis)
.setReadTimeoutMillis(conf.readTimeoutMillis)
val builder = Cluster.builder()
.addContactPoints(conf.hosts.toSeq: _*)
.withPort(conf.port)
.withRetryPolicy(
new MultipleRetryPolicy(conf.queryRetryCount))
.withReconnectionPolicy(
new ExponentialReconnectionPolicy(conf.minReconnectionDelayMillis, conf.maxReconnectionDelayMillis))
.withLoadBalancingPolicy(
new LocalNodeFirstLoadBalancingPolicy(conf.hosts, conf.localDC))
.withAuthProvider(conf.authConf.authProvider)
.withSocketOptions(options)
.withCompression(conf.compression)
.withQueryOptions(
new QueryOptions()
.setRefreshNodeIntervalMillis(0)
.setRefreshNodeListIntervalMillis(0)
.setRefreshSchemaIntervalMillis(0))
if (conf.cassandraSSLConf.enabled) {
maybeCreateSSLOptions(conf.cassandraSSLConf) match {
case Some(sslOptions) ⇒ builder.withSSL(sslOptions)
case None ⇒ builder.withSSL()
}
} else {
builder
}
}
protected def maybeCreateSSLOptions(conf: CassandraSSLConf): Option[SSLOptions] = {
if (conf.enabled) {
Some(
AwsS3SSLOptions.builder()
.withSSLConf(conf)
.withAwsRegion(this.s3Region)
.build()
)
} else {
None
}
}
override def createCluster(conf: CassandraConnectorConf): Cluster = clusterBuilder(conf).build()
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3EUWest1ConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3EUWest1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.EU_WEST_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
build.sbt
|
<filename>build.sbt
import com.amazonaws.services.s3.model.Region
name := "spark-cassandra-connection-factory"
organization := "co.verdigris.spark"
version := "0.3.5"
scalaVersion := "2.11.10"
crossScalaVersions := Seq("2.10.6", "2.11.10")
lazy val scalaTest = "org.scalatest" %% "scalatest" % "3.0.0"
libraryDependencies += "org.apache.spark" %% "spark-core" % "2.0.2" % "provided"
libraryDependencies += "com.datastax.spark" %% "spark-cassandra-connector" % "2.0.0-M3" % "provided"
libraryDependencies += "co.verdigris.ssl" %% "ssllib" % "1.1.2"
libraryDependencies += scalaTest % Test
s3region := Region.US_Standard
s3overwrite := true
publishTo := Some(s3resolver.value("Verdigris Scala Libs", s3("scala-jars")))
resolvers += "Verdigris Scala Lib Repository" at "https://s3.amazonaws.com/scala-jars"
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3EUWest2ConnectionFactory.scala
|
<reponame>Lance0312/spark-cassandra-connection-factory<gh_stars>0
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3EUWest2ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.EU_WEST_2.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3APSoutheast1ConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3APSoutheast1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.AP_SOUTHEAST_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/test/scala/co/verdigris/spark/connector/cql/SparkContextUnitSpec.scala
|
package co.verdigris.spark.connector.cql
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, FunSpec}
trait SparkContextUnitSpec extends FunSpec with BeforeAndAfterAll {
var master: Option[String] = None
var appName: Option[String] = None
var sparkConf: SparkConf = _
var sc: SparkContext = _
override def beforeAll {
sparkConf = new SparkConf()
.setMaster(master.getOrElse("local[*]"))
.setAppName(appName.getOrElse("Spark Context Tests"))
sc = SparkContext.getOrCreate(sparkConf)
}
override def afterAll: Unit = {
sc.cancelAllJobs()
}
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3SAEast1ConnectionFactory.scala
|
<filename>src/main/scala/co/verdigris/spark/connector/cql/AwsS3SAEast1ConnectionFactory.scala
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3SAEast1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.SA_EAST_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3APNortheast2ConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3APNortheast2ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.AP_NORTHEAST_2.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/test/scala/co/verdigris/spark/connector/ssl/AwsS3SSLOptionsTest.scala
|
<gh_stars>1-10
package co.verdigris.spark.connector.ssl
import com.amazonaws.regions.{Region, Regions}
import com.datastax.spark.connector.cql.CassandraConnectorConf.CassandraSSLConf
import org.scalatest.{BeforeAndAfter, FunSpec, Matchers}
class AwsS3SSLOptionsTest extends FunSpec with Matchers with BeforeAndAfter {
var builder: AwsS3SSLOptions.Builder = _
before {
this.builder = AwsS3SSLOptions.builder()
}
describe("Builder") {
describe("#withAwsRegion") {
it("should set the correct region") {
builder.withAwsRegion(Some("us-east-1"))
.build()
.awsRegion shouldBe Some("us-east-1")
builder.withAwsRegion("us-east-1")
.build()
.awsRegion shouldBe Some("us-east-1")
builder.withAwsRegion(Region.getRegion(Regions.US_EAST_1))
.build()
.awsRegion shouldBe Some("us-east-1")
}
it("should return this instance") {
builder.withAwsRegion(Some("us-east-1")) shouldBe builder
builder.withAwsRegion("us-east-1") shouldBe builder
builder.withAwsRegion(Region.getRegion(Regions.US_EAST_1)) shouldBe builder
}
}
describe("#withCiphterSuites") {
it("should set the cipher suites") {
builder.withCipherSuites(Set("TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_256_CBC_SHA"))
.build()
.cipherSuites should contain allOf("TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_256_CBC_SHA")
}
it("should override cipher suites from CassandraSSLConf instance") {
val cipherSuites = builder.withSSLConf(
CassandraSSLConf(
enabled = true,
Some("s3://bucket/key.jks"),
Some("pwd"),
"JKS",
"TLS",
Set("TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384")))
.withCipherSuites(Set("TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"))
.build()
.cipherSuites
cipherSuites should contain noneOf(
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384")
cipherSuites should contain allOf("TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_DHE_DSS_WITH_AES_256_CBC_SHA")
}
it("should return this instance") {
builder.withCipherSuites(Set("TLS_RSA_WITH_AES_256_CBC_SHA")) shouldBe builder
}
}
describe("#withSSLConf") {
it("should set the SSL configurations") {
val subject = builder.withSSLConf(
CassandraSSLConf(
enabled = true,
Some("s3://bucket/key.jks"),
Some("<PASSWORD>"),
"JKS",
"TLS",
Set("TLS_RSA_WITH_AES_256_CBC_SHA256")))
.build()
subject.s3TrustStoreUrl shouldBe Some("s3://bucket/key.jks")
subject.trustStoreType shouldBe Some("JKS")
subject.cipherSuites should contain("TLS_RSA_WITH_AES_256_CBC_SHA256")
}
}
describe("#build") {
it("should return a new instance of AwsS3SSLOptions") {
builder.build() shouldBe a[AwsS3SSLOptions]
}
}
}
describe(".builder") {
it("should return a new AwsS3SSLOptions.Builder instance") {
AwsS3SSLOptions.builder() shouldBe a[AwsS3SSLOptions.Builder]
}
}
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3APSoutheast2ConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3APSoutheast2ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.AP_SOUTHEAST_2.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3GovCloudConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3GovCloudConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.GovCloud.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/ssl/AwsS3SSLOptions.scala
|
package co.verdigris.spark.connector.ssl
import co.verdigris.ssl.S3JKSSSLOptions
import com.amazonaws.regions.Region
import com.datastax.spark.connector.cql.CassandraConnectorConf.CassandraSSLConf
class AwsS3SSLOptions(
sslConf: CassandraSSLConf,
awsRegion: Option[String] = None)
extends S3JKSSSLOptions(
sslConf.trustStorePath,
sslConf.trustStorePassword,
Some(sslConf.trustStoreType),
sslConf.enabledAlgorithms,
awsRegion)
/**
* AwsS3SSLOptions object contains the builder function to facilitate instantiate the AwsS3SSLOptions class.
*/
object AwsS3SSLOptions {
class Builder {
protected var cassandraSSLConf: Option[CassandraSSLConf] = None
protected var awsRegion: Option[String] = None
/** Set the AWS S3 region.
*
* @param awsRegion S3 bucket region.
* @return this builder.
*/
def withAwsRegion(awsRegion: Option[String]): Builder = {
this.awsRegion = awsRegion
this
}
/** Set the AWS S3 region.
*
* @param awsRegion S3 bucket region.
* @return this builder.
*/
def withAwsRegion(awsRegion: String): Builder = {
this.withAwsRegion(Some(awsRegion))
}
/** Set the AWS S3 region.
*
* @param awsRegion S3 bucket region.
* @return this builder.
*/
def withAwsRegion(awsRegion: Region): Builder = {
this.awsRegion = Some(awsRegion.getName)
this
}
/** Set the cipher suites to use.
* <p/>
* If this method isn't called, the default is to present all the eligible client ciphers to the server.
*
* @param cipherSuites set of cipher suites to use.
* @return this builder.
*/
def withCipherSuites(cipherSuites: Set[String]): Builder = {
val oldConf = this.cassandraSSLConf
.getOrElse(CassandraSSLConf(enabledAlgorithms = cipherSuites))
this.cassandraSSLConf = Some(
CassandraSSLConf(
oldConf.enabled,
oldConf.trustStorePath,
oldConf.trustStorePassword,
oldConf.trustStoreType,
oldConf.protocol,
cipherSuites)
)
this
}
/** Set the Cassandra SSL configuration.
* <p/>
* If this method isn't called, it will instantiate a new Cassandra SSL configuration object with default values.
*
* @param conf Cassandra SSL configuration object.
* @return this builder.
*/
def withSSLConf(conf: CassandraSSLConf): Builder = {
this.cassandraSSLConf = Some(conf)
this
}
/** This function is used to instantiate an [[AwsS3SSLOptions]]. It uses the AWS region, Cassandra SSL
* configurations, and cipher suites that were passed in to build the object.
*
* @return new instance of AwsS3SSLOptions
*/
def build(): AwsS3SSLOptions =
new AwsS3SSLOptions(
this.cassandraSSLConf.getOrElse(CassandraSSLConf()),
this.awsRegion)
}
def builder(): Builder = new Builder
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3CNNorth1ConnectionFactory.scala
|
<reponame>Lance0312/spark-cassandra-connection-factory<filename>src/main/scala/co/verdigris/spark/connector/cql/AwsS3CNNorth1ConnectionFactory.scala
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3CNNorth1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.CN_NORTH_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3USWest2ConnectionFactory.scala
|
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3USWest2ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.US_WEST_2.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3APNortheast1ConnectionFactory.scala
|
<reponame>Lance0312/spark-cassandra-connection-factory
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3APNortheast1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.AP_NORTHEAST_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/main/scala/co/verdigris/spark/connector/cql/AwsS3CACentral1ConnectionFactory.scala
|
<reponame>Lance0312/spark-cassandra-connection-factory
package co.verdigris.spark.connector.cql
import com.amazonaws.regions.Regions
object AwsS3CACentral1ConnectionFactory extends S3ConnectionFactory {
this.s3Region = Some(Regions.CA_CENTRAL_1.getName)
}
|
Lance0312/spark-cassandra-connection-factory
|
src/test/scala/co/verdigris/spark/connector/cql/AwsS3EUWest1ConnectionFactoryTest.scala
|
package co.verdigris.spark.connector.cql
import com.datastax.driver.core.Cluster
class AwsS3EUWest1ConnectionFactoryTest extends ConnectionFactorySpec {
override def beforeAll {
super.beforeAll
factory = AwsS3EUWest1ConnectionFactory
}
describe(".clusterBuilder") {
it("should return a new Cluster.Builder instance") {
factory.clusterBuilder(cassandraConf) shouldBe a [Cluster.Builder]
}
}
describe(".createCluster") {
it("should return a new Cluster instance") {
factory.createCluster(cassandraConf) shouldBe a [Cluster]
}
}
}
|
sridhar-sid/almaren-framework
|
src/main/scala/com/github/music/of/the/ainur/almaren/state/core/Source.scala
|
package com.github.music.of.the.ainur.almaren.state.core
import com.github.music.of.the.ainur.almaren.State
import org.apache.spark.sql.DataFrame
private[ainur] abstract class Source() extends State {
override def executor(df: DataFrame): DataFrame = source(df)
def source(df: DataFrame): DataFrame
}
case class SourceSql(sql: String) extends Source {
override def source(df: DataFrame): DataFrame = {
logger.info(s"sql:{$sql}")
val sqlDf = df.sparkSession.sql(sql)
sqlDf
}
}
case class SourceJdbc(url: String, driver: String, query: String, user: Option[String], password: Option[String], params: Map[String, String]) extends Source {
override def source(df: DataFrame): DataFrame = {
logger.info(s"url:{$url}, driver:{$driver}, query:{$query}, user:{$user}, params:{$params}")
val options = (user, password) match {
case (Some(user), None) => params + ("user" -> user)
case (Some(user), Some(password)) => params + ("user" -> user, "password" -> password)
case (_, _) => params
}
df.sparkSession.read.format("jdbc")
.option("url", url)
.option("driver", driver)
.option("dbtable", s"(${query}) MY_TABLE")
.options(options)
.load()
}
}
case class SourceFile(format: String, path: String, params: Map[String, String]) extends Source {
override def source(df: DataFrame): DataFrame = {
logger.info(s"format:{$format}, path:{$path}, params:{$params}")
df.sparkSession.read.format(format)
.options(params)
.load(path)
}
}
|
sridhar-sid/almaren-framework
|
src/main/scala/com/github/music/of/the/ainur/almaren/builder/core/Source.scala
|
package com.github.music.of.the.ainur.almaren.builder.core
import com.github.music.of.the.ainur.almaren.Tree
import com.github.music.of.the.ainur.almaren.builder.Core
import com.github.music.of.the.ainur.almaren.state.core.{SourceJdbc, SourceSql, SourceFile}
private[almaren] trait Source extends Core {
def sourceSql(sql: String): Option[Tree] =
SourceSql(sql)
def sourceJdbc(url: String, driver: String, query: String, user: Option[String] = None, password: Option[String] = None, params: Map[String, String] = Map()): Option[Tree] =
SourceJdbc(url, driver, query, user, password, params)
def sourceFile(format: String, path: String, params: Map[String, String] = Map()): Option[Tree] =
SourceFile(format,path,params)
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/http/BasicAuthentication.scala
|
package org.el.documento.config.http
import java.util.UUID
import akka.http.scaladsl.server._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.server.Directives._
import org.el.documento.config.ApplicationConfig
import org.el.documento.model.{Public, SuperAdmin, UserClaim}
import scala.concurrent.ExecutionContextExecutor
trait BasicAuthentication extends ApplicationConfig {
implicit val jwtAuth: JWTAuthenticationServices
implicit val ec: ExecutionContextExecutor
private def getUserId(userClaim: UserClaim): UUID = userClaim.userId
private def getUserWithRoleId(userClaim: UserClaim): (UUID, String) = (userClaim.userId, userClaim.roleTitle)
def authenticate(token: String): Directive1[UserClaim] = {
jwtAuth.verifyJwtToken(token) match {
case Some(user) => provide(user)
case None =>
logger.error("Invalid authorization credential type")
reject(AuthorizationFailedRejection)
}
}
def authenticatedWithHeader: Directive1[UserClaim] = {
extractCredentials.flatMap {
case Some(OAuth2BearerToken(token)) => authenticate(token)
case _ =>
logger.error("Invalid authorization credential type")
reject(AuthorizationFailedRejection)
}
}
def withPublicAuthentication: Directive1[UUID] = {
extraToken.flatMap {
case userClaim: UserClaim if isPublic(userClaim) => provide(userClaim.userId)
case _ =>
logger.error("User does not have access to this route")
reject(AuthorizationFailedRejection)
}
}
def withSuperAdminAuthentication: Directive1[UUID] = {
extraToken.flatMap {
case userClaim: UserClaim if isSuperAdmin(userClaim) => provide(userClaim.userId)
case _ =>
logger.error("User does not have access to this route")
reject(AuthorizationFailedRejection)
}
}
private def isPublic(userClaim: UserClaim): Boolean = Public.name.toLowerCase().contains(userClaim.roleTitle)
private def isSuperAdmin(userClaim: UserClaim): Boolean = SuperAdmin.name.toLowerCase().contains(userClaim.roleTitle)
private def extraToken: Directive1[UserClaim] = {
extractCredentials.flatMap {
case Some(OAuth2BearerToken(token)) => authenticate(token)
case _ =>
logger.error("Invalid authorization credential type")
reject(AuthorizationFailedRejection)
}
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/ServiceMain.scala
|
package org.el.documento
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import de.heikoseeberger.akkahttpplayjson.PlayJsonSupport
import org.el.documento.config.ApplicationConfig
import org.el.documento.config.http.{JWTAuthentication, JWTAuthenticationServices, RouteHandlerConfig}
import org.el.documento.controller.{DocumentoController, DocumentoControllerImpl}
import org.el.documento.database.ElDocumentoDAO
import org.el.documento.route.DocumentoRoute
import scala.concurrent.ExecutionContextExecutor
import scala.util.{Failure, Success}
object ServiceMain extends PlayJsonSupport with ApplicationConfig with App with RouteHandlerConfig {
implicit val system: ActorSystem = ActorSystem("el-documento")
implicit val materializer: ActorMaterializer = ActorMaterializer()
implicit val executionContext: ExecutionContextExecutor = system.dispatcher
implicit val jwt: JWTAuthenticationServices = new JWTAuthentication
lazy val controller: DocumentoController = {
val db = new ElDocumentoDAO
new DocumentoControllerImpl(db)
}
lazy val documentoRoute = new DocumentoRoute(controller)
lazy val routeWithHandler: Route = {
handleExceptions(myExceptionHandler) {
handleRejections(rejectionHandler) {
documentoRoute.routes
}
}
}
val binding = Http().bindAndHandle(routeWithHandler, httpInterface, httpPort)
binding.onComplete {
case Success(serverBinding) =>
logger.info(s"==> Server bound to http:/${serverBinding.localAddress}")
case Failure(ex) =>
logger.error(s"Failed to bind to $httpInterface:$httpPort !", ex)
system.terminate()
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/messages/CreateRoleRequest.scala
|
package org.el.documento.messages
import play.api.libs.json.{Format, Json}
case class CreateRoleRequest(title: String, roleType: Option[String])
object CreateRoleRequest {
implicit val format: Format[CreateRoleRequest] = Json.format
}
|
oyinda-subair/el-documento
|
src/test/scala/org/el/documento/DocumentoRouteTestkit.scala
|
<filename>src/test/scala/org/el/documento/DocumentoRouteTestkit.scala
package org.el.documento
import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpEntity, MediaTypes}
import akka.http.scaladsl.server.Directives.{handleExceptions, handleRejections}
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.stream.ActorMaterializer
import de.heikoseeberger.akkahttpplayjson.PlayJsonSupport
import org.el.documento.config.ApplicationConfig
import org.el.documento.config.http.{JWTAuthentication, JWTAuthenticationServices, RouteHandlerConfig}
import org.el.documento.controller.{DocumentoController, DocumentoControllerImpl}
import org.el.documento.database.ElDocumentoDAO
import org.el.documento.route.DocumentoRoute
import org.scalatest.WordSpec
import play.api.libs.json.{Json, Reads, Writes}
trait DocumentoRouteTestkit extends WordSpec with ScalatestRouteTest with ApplicationConfig with RouteHandlerConfig with PlayJsonSupport {
val db = new ElDocumentoDAO
val version = "v1"
val userPath = "user"
val rolePath = "role"
val api = "api"
implicit val jwt: JWTAuthenticationServices = new JWTAuthentication
def toEntity[T: Reads: Writes](body: T): HttpEntity.Strict = {
val message = Json.toJson(body).toString()
HttpEntity(MediaTypes.`application/json`, message)
}
lazy val controller: DocumentoController = {
new DocumentoControllerImpl(db)
}
lazy val documentoRoute = new DocumentoRoute(controller)
val route: Route = {
handleExceptions(myExceptionHandler) {
handleRejections(rejectionHandler){
documentoRoute.routes
}
}
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/controller/DocumentoController.scala
|
package org.el.documento.controller
import java.util.UUID
import akka.Done
import akka.actor.ActorSystem
import org.el.documento.config.base.SecureHelper.confirmPassword
import org.el.documento.config.ApplicationConfig
import org.el.documento.config.exceptions.{ResourceNotFoundException, UnauthorizedUserException}
import org.el.documento.config.http.JWTAuthenticationServices
import org.el.documento.database.ElDocumentoDAO
import org.el.documento.messages.{CreateRoleRequest, CreateUserRequest, LoginByEmail}
import org.el.documento.model._
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
trait DocumentoController {
def createUser(request: CreateUserRequest): Future[UserToken]
def createRole(request: CreateRoleRequest): Future[Done]
def loginByEmail(request: LoginByEmail): Future[UserToken]
}
class DocumentoControllerImpl(documentoDb: ElDocumentoDAO)(implicit val system: ActorSystem, jwt: JWTAuthenticationServices) extends ApplicationConfig with DocumentoController {
// User commands
def createUser(request: CreateUserRequest): Future[UserToken] = {
for {
role <- getRoleByTitle(Public.name)
entity <- documentoDb.UserRepo.create(request, role.roleId)
role <- getRoleById(entity._2)
} yield {
val userClaim = UserClaim(entity._1, role.title)
val token = jwt.generateToken(userClaim)
UserToken(token).bearerToken
}
}
// Role commands
def createRole(request: CreateRoleRequest): Future[Done] = {
for {
_ <- documentoDb.RoleRepo.create(request)
} yield Done
}
// Login Commands
override def loginByEmail(request: LoginByEmail): Future[UserToken] = {
for {
user <- getUserByEmail(request.email)
role <- getRoleById(user.roleId)
} yield {
if(confirmPassword(request.password, user.password)) {
val token = jwt.generateToken(UserClaim(user.userId, role.title))
UserToken(token).bearerToken
} else {
logger.error("Incorrect Password")
throw UnauthorizedUserException("Incorrect password")
}
}
}
private def getUserByEmail(email: String): Future[UserEntity] = {
documentoDb.UserRepo.getUserByEmail(email).map {
case Some(user) => user
case None =>
logger.error("Email does not exist")
throw ResourceNotFoundException("Email does not exist")
}
}
private def getUserById(userId: UUID): Future[UserEntity] = {
documentoDb.UserRepo.getUserById(userId).map {
case Some(user) => user
case None =>
logger.error("Oops! sorry, UserId does not exist")
throw ResourceNotFoundException("Oops! sorry, UserId does not exist")
}
}
private def getRoleById(roleId: Int): Future[RoleEntity] = {
documentoDb.RoleRepo.getRoleById(roleId).map {
case Some(role) => role
case None =>
logger.error("Oops! sorry, RoleId does not exist")
throw ResourceNotFoundException("Oops! sorry, RoleId does not exist")
}
}
private def getRoleByTitle(title: String): Future[RoleEntity] = {
documentoDb.RoleRepo.getRoleByTitle(title).map {
case Some(role) => role
case None =>
logger.error("Oops! sorry, Role title does not exist")
throw ResourceNotFoundException("Oops! sorry, Role title does not exist")
}
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/ApplicationConfig.scala
|
package org.el.documento.config
import com.typesafe.config.ConfigFactory
import org.slf4j.Logger
import org.slf4j.LoggerFactory
trait ApplicationConfig {
lazy val className: String = if(this.getClass.getCanonicalName != null)
this.getClass.getCanonicalName else "none"
private val config = ConfigFactory.load()
private val databaseConfig = config.getConfig("db")
private val httpConfig = config.getConfig("app")
val pgDriver: String = databaseConfig.getString("driver")
// val pgUrl: String = databaseConfig.getString("url")
val pgPassword: String = databaseConfig.getString("password")
val pgUser: String = databaseConfig.getString("user")
val pgHost: String = databaseConfig.getString("host")
val pgDBName: String = databaseConfig.getString("name")
val secret: String = System.getenv("JWT_SECRET")
val logger: Logger = LoggerFactory.getLogger(className)
val httpInterface: String = httpConfig.getString("host")
val httpPort: Int = httpConfig.getInt("port")
val env: String = httpConfig.getString("env")
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/model/UserId.scala
|
package org.el.documento.model
import java.util.UUID
import play.api.libs.json.{Format, Json}
case class UserId(userId: UUID)
object UserId {
implicit val format: Format[UserId] = Json.format
}
case class UserClaim(userId: UUID, roleTitle: String)
object UserClaim {
implicit val format: Format[UserClaim] = Json.format
}
case class UserToken(token: String) {
def bearerToken = UserToken(s"Bearer $token")
}
object UserToken {
implicit val format: Format[UserToken] = Json.format
}
|
oyinda-subair/el-documento
|
src/test/scala/org/el/documento/route/UserRouteSpec.scala
|
package org.el.documento.route
import akka.Done
import akka.http.scaladsl.model._
import org.el.documento.DocumentoRouteTestkit
import org.el.documento.messages.{CreateRoleRequest, CreateUserRequest, LoginByEmail}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}
import org.el.documento.Util._
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
class UserRouteSpec extends WordSpec with Matchers with DocumentoRouteTestkit with BeforeAndAfterAll with ScalaFutures {
override protected def beforeAll(): Unit = {
super.beforeAll()
val role = CreateRoleRequest("public", Some("public access only"))
val inserted = for {
_ <- controller.createRole(role)
} yield Done
Await.result(inserted, 50 seconds)
}
override protected def afterAll(): Unit = {
super.afterAll()
val deleted = for {
_ <- db.UserRepo.deleteAll()
_ <- db.RoleRepo.deleteAll()
} yield Done
Await.result(deleted, 10 seconds)
}
"El Documento user Route" when {
"User Endpoints" should {
val userEntity = CreateUserRequest("test user", "usertest", s"<EMAIL>-<EMAIL>", s"$string10")
"create user " in {
Post(s"/$api/$version/users").withEntity(toEntity(userEntity)) ~> route ~> check {
status shouldEqual StatusCodes.Created
}
}
"Login user" in {
Post(s"/$api/$version/users").withEntity(toEntity(userEntity)) ~> route ~> check {
status shouldEqual StatusCodes.Created
val login = LoginByEmail(userEntity.email, userEntity.password)
Post(s"/$api/$version/login").withEntity(toEntity(login)) ~> route ~> check {
status shouldEqual StatusCodes.OK
}
}
}
// wrong email
"Login with wrong email address" in {
val login = LoginByEmail(s"user-login-$string1<EMAIL>", s"<PASSWORD>")
Post(s"/$api/$version/login").withEntity(toEntity(login)) ~> route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
//wrong password
"Login with wrong password" in {
val login = LoginByEmail(userEntity.email, s"<PASSWORD>0")
Post(s"/$api/$version/login").withEntity(toEntity(login)) ~> route ~> check {
status shouldEqual StatusCodes.Unauthorized
}
}
}
"Role Endpoint" should {
"create role" in {
val roleRequest = CreateRoleRequest("sample role", Some("sample role"))
Post(s"/$api/$version/roles").withEntity(toEntity(roleRequest)) ~> route ~> check {
status shouldEqual StatusCodes.Unauthorized
}
}
}
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/exceptions/DocumentoRejections.scala
|
package org.el.documento.config.exceptions
import akka.http.javadsl.server.Rejection
case class UnauthorizedUser(message: String) extends Rejection
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/model/UserEntity.scala
|
package org.el.documento.model
import java.util.UUID
import org.el.documento.config.base.FormatEntity
import org.joda.time.DateTime
import play.api.libs.json.{Format, Json}
case class UserEntity(
userId: UUID,
name: String,
username: String,
email: String,
password: String,
roleId: Int,
timestampCreated: DateTime,
timestampUpdated: Option[DateTime]
)
object UserEntity extends FormatEntity[UserEntity]{
implicit val format: Format[UserEntity] = Json.format
}
|
oyinda-subair/el-documento
|
src/test/scala/org/el/documento/Util.scala
|
<reponame>oyinda-subair/el-documento
package org.el.documento
import scala.util.Random
object Util {
private val random = new Random(System.currentTimeMillis)
def string10 = new String(Array.fill(10)((random.nextInt(26) + 65).toByte))
}
|
oyinda-subair/el-documento
|
build.sbt
|
val akkaHttpVersion = "10.1.9"
val akkaStreamVersion = "2.5.23"
val akkaSlickVersion = "1.1.1"
val playJsonVersion = "2.7.3"
val flywayVersion = "5.0.2"
val scalaTestVersion = "3.0.8"
val akkaHttpSessionVersion = "0.5.10"
val pauldijouVersion = "4.0.0"
val log4jVersion = "2.10.0"
val sentryVersion = "1.7.27"
val slickJodaMapperVersion = "2.4.0"
val qosVersion = "1.2.3"
val akkaHttp = "com.typesafe.akka" %% "akka-http" % akkaHttpVersion
val akkaStream = "com.typesafe.akka" %% "akka-stream" % akkaStreamVersion
val playJson = "com.typesafe.play" %% "play-json" % playJsonVersion
val playJsonSupport = "de.heikoseeberger" %% "akka-http-play-json" % "1.27.0"
val bcyrpt = "org.mindrot" % "jbcrypt" % "0.3m"
val akkaSlick = "com.lightbend.akka" %% "akka-stream-alpakka-slick" % akkaSlickVersion
val postgres = "org.postgresql" % "postgresql" % "42.2.6"
val flywayCore = "org.flywaydb" % "flyway-core" % flywayVersion
val scalaTest = "org.scalatest" %% "scalatest" % scalaTestVersion % "test"
val akkaStreamTest = "com.typesafe.akka" %% "akka-stream-testkit" % "2.5.23" % Test
val akkaTest = "com.typesafe.akka" %% "akka-testkit" % "2.5.23" % Test
val akkaHttpTest = "com.typesafe.akka" %% "akka-http-testkit" % akkaHttpVersion % Test
//val log4jApi = "org.apache.logging.log4j" % "log4j-api" % log4jVersion
//val log4jCore = "org.apache.logging.log4j" % "log4j-core" % log4jVersion
val slf4j = "org.slf4j" % "slf4j-api" % "1.7.28"
val qos = "ch.qos.logback" % "logback-classic" % qosVersion
val qosCore = "ch.qos.logback" % "logback-core" % qosVersion
//val scalaLogging = "com.typesafe.scala-logging" %% "scala-logging" % "3.9.2"
val sentryIO = "io.sentry" % "sentry" % sentryVersion
val sentryIOLogback = "io.sentry" % "sentry-logback" % sentryVersion
//val playLogback = "com.typesafe.play" %% "play-logback" % "2.7.3"
val akkaHttpSessionCore = "com.softwaremill.akka-http-session" %% "core" % akkaHttpSessionVersion
val akkaHttpSessionJwt = "com.softwaremill.akka-http-session" %% "jwt" % akkaHttpSessionVersion
val jodaTime = "joda-time" % "joda-time" % "2.10.3"
val jodaConvert = "org.joda" % "joda-convert" % "2.2.1"
val jodaTimeFormatter = "com.github.tminglei" %% "slick-pg_joda-time" % "0.18.0"
val tototoshi = "com.github.tototoshi" %% "slick-joda-mapper" % slickJodaMapperVersion
val pauldijouJwt = "com.pauldijou" %% "jwt-core" % "4.0.0"
lazy val commonSettings = Seq(
version := "1.0",
scalaVersion := "2.12.8",
scalacOptions ++= Seq(
"-feature",
"-deprecation",
"-Xfatal-warnings"
),
resolvers ++= Seq(
"typesafe" at "https://repo.typesafe.com/typesafe/releases/",
Resolver.jcenterRepo,
"Artima Maven Repository" at "https://repo.artima.com/releases"
)
)
lazy val root = (project in file("."))
.enablePlugins(FlywayPlugin)
.settings(
name := "el-documento",
version := "0.1",
commonSettings,
libraryDependencies ++= Seq(
akkaHttp,
akkaStream,
akkaSlick,
akkaStreamTest,
akkaHttpTest,
bcyrpt,
flywayCore,
postgres,
playJson,
playJsonSupport,
pauldijouJwt,
slf4j,
qos,
qosCore,
scalaTest,
sentryIO,
sentryIOLogback,
jodaTime,
jodaConvert,
tototoshi
)
)
flywayUrl := "jdbc:postgresql://localhost:5432/el_documento"
flywayUser := "postgres"
flywayPassword := ""
flywayLocations += "db/migration"
flywayUrl in Test := "jdbc:postgresql://localhost:5432/el_documento_test"
flywayUser in Test := "postgres"
flywayPassword in Test := ""
flywayBaselineOnMigrate := true
envFileName in ThisBuild := "dotenv"
fork in run := true
cancelable in Global := true
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/database/ElDocumentoDAO.scala
|
package org.el.documento.database
import org.el.documento.repo.{RoleEntities, UserEntities}
import scala.concurrent.ExecutionContext
class ElDocumentoDAO(implicit ec: ExecutionContext) extends UserEntities with RoleEntities with DatabaseConnector {
object UserRepo extends UserRepository
object RoleRepo extends RoleRepository
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/exceptions/DocumentoExceptions.scala
|
<filename>src/main/scala/org/el/documento/config/exceptions/DocumentoExceptions.scala
package org.el.documento.config.exceptions
case class ResourceNotFoundException(message: String, cause: Option[Throwable] = None) extends Exception(message, cause.orNull)
case class UnauthorizedUserException(message: String, cause: Option[Throwable] = None) extends Exception(message, cause.orNull)
|
oyinda-subair/el-documento
|
project/plugins.sbt
|
<reponame>oyinda-subair/el-documento
addSbtPlugin("io.github.davidmweber" % "flyway-sbt" % "6.0.0")
addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.5.1")
addSbtPlugin("au.com.onegeek" %% "sbt-dotenv" % "2.0.117")
resolvers += "Flyway" at "https://flywaydb.org/repo"
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/base/SecureHelper.scala
|
<gh_stars>0
package org.el.documento.config.base
import org.mindrot.jbcrypt.BCrypt
object SecureHelper {
def hashPassword(password: String): String = {
BCrypt.hashpw(password, BCrypt.gensalt())
}
def confirmPassword(password: String, hashPassword: String): Boolean = {
BCrypt.checkpw(password, hashPassword)
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/repo/UserEntities.scala
|
<gh_stars>0
package org.el.documento.repo
import java.util.UUID
import org.el.documento.config.base.SecureHelper._
import org.el.documento.database.DatabaseConnector
import org.el.documento.messages.CreateUserRequest
import org.el.documento.model.{UserClaim, UserEntity}
import org.joda.time.DateTime
import slick.ast.ColumnOption.AutoInc
import scala.concurrent.{ExecutionContext, Future}
trait UserEntities extends RoleEntities with DatabaseConnector {
import driver.api._
import com.github.tototoshi.slick.PostgresJodaSupport._
val roleRepo: RoleRepository = {
import scala.concurrent.ExecutionContext.Implicits.global
new RoleRepository
}
val roles = roleRepo.roles
class UserTable(tag: Tag) extends Table[UserEntity](tag, "user_by_id") {
def id: Rep[UUID] = column[UUID]("user_id", O.PrimaryKey, AutoInc)
def name: Rep[String] = column[String]("name")
val index1 = index("index_name", name)
def username: Rep[String] = column[String]("username", O.Unique)
val index2 = index("index_username", username)
def email: Rep[String] = column[String]("email", O.Unique)
val index3 = index("index_email", email)
def password: Rep[String] = column[String]("password")
def roleId: Rep[Int] = column[Int]("role_id")
def timestampCreated: Rep[DateTime] = column[DateTime]("timestamp_created")
def timestampUpdated: Rep[Option[DateTime]] = column[Option[DateTime]]("timestamp_updated")
def * = (id, name, username, email, password, roleId, timestampCreated, timestampUpdated) <> ((UserEntity.apply _).tupled, UserEntity.unapply)
def role = foreignKey("role_by_id_fk", roleId, roles)(_.id, onDelete = ForeignKeyAction.Cascade)
}
class UserRepository(implicit ec: ExecutionContext) {
val users = TableQuery[UserTable]
def create(entity: CreateUserRequest, roleId: Int): Future[(UUID, Int)] = {
val now = DateTime.now
val user_id = UUID.randomUUID()
val password = <PASSWORD>Password(entity.password)
val user = UserEntity(user_id, entity.name, entity.username, entity.email, password, roleId, now, None)
db.run(users returning users.map(u => (u.id, u.roleId)) += user).map(user => (user._1, user._2))
}
def getAllUsers: Future[Seq[UserEntity]] = db.run (users.result)
def getUserByEmail(email: String): Future[Option[UserEntity]] = db.run {
users.filter(_.email === email).result.headOption
}
def getUserById(userId: UUID): Future[Option[UserEntity]] = db.run {
users.filter(_.id === userId).result.headOption
}
def deleteAll(): Future[Int] = db.run(users.delete)
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/repo/RoleEntities.scala
|
<reponame>oyinda-subair/el-documento<filename>src/main/scala/org/el/documento/repo/RoleEntities.scala
package org.el.documento.repo
import org.el.documento.database.DatabaseConnector
import org.el.documento.messages.CreateRoleRequest
import org.el.documento.model.RoleEntity
import org.joda.time.DateTime
import slick.ast.ColumnOption.AutoInc
import scala.concurrent.{ExecutionContext, Future}
trait RoleEntities extends DatabaseConnector {
import driver.api._
import com.github.tototoshi.slick.PostgresJodaSupport._
class RoleTable(tag: Tag) extends Table[RoleEntity](tag, "role_by_id") {
def id: Rep[Int] = column[Int]("role_id", O.PrimaryKey, AutoInc)
def title: Rep[String] = column[String]("title", O.Unique)
val index1 = index("index_title", title)
def roleType: Rep[Option[String]] = column[Option[String]]("role_type")
def timestampCreated: Rep[DateTime] = column[DateTime]("timestamp_created")
def timestampUpdated: Rep[Option[DateTime]] = column[Option[DateTime]]("timestamp_updated")
def * = (id, title, roleType, timestampCreated, timestampUpdated) <> ((RoleEntity.apply _).tupled, RoleEntity.unapply)
}
class RoleRepository(implicit ec: ExecutionContext) {
val roles = TableQuery[RoleTable]
def create(entity: CreateRoleRequest): Future[RoleEntity] = {
val now = DateTime.now
db.run {
(roles.map(r ⇒ (r.title, r.roleType, r.timestampCreated))
returning roles.map(_.id)
into ((role, roleId) ⇒ RoleEntity(roleId, role._1, role._2, role._3, None))
) += (entity.title, entity.roleType, now)
}
}
def getRoleById(id: Int): Future[Option[RoleEntity]] = db.run {
roles.filter(_.id === id).result.headOption
}
def getRoleByTitle(title: String): Future[Option[RoleEntity]] = db.run{
roles.filter(_.title === title).result.headOption
}
def deleteAll(): Future[Int] = db.run(roles.delete)
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/messages/LoginByEmail.scala
|
package org.el.documento.messages
import play.api.libs.json.{Format, Json}
case class LoginByEmail(email: String, password: String)
object LoginByEmail {
implicit val format: Format[LoginByEmail] = Json.format
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/http/RouteHandlerConfig.scala
|
package org.el.documento.config.http
import akka.http.scaladsl.model.StatusCode
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpResponse}
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import org.el.documento.config.exceptions.{ResourceNotFoundException, UnauthorizedUserException}
import org.el.documento.config.{ApplicationConfig, ErrorResponse}
trait RouteHandlerConfig extends ApplicationConfig{
def rejectionHandler: RejectionHandler =
RejectionHandler.newBuilder()
.handle { case MissingQueryParamRejection(param) =>
val errorResponse = ErrorResponse(BadRequest.intValue, "Missing Parameter", s"The required $param was not found.").toStrEntity
complete(HttpResponse(BadRequest, entity = HttpEntity(ContentTypes.`application/json`, errorResponse)))
}
.handle { case AuthorizationFailedRejection =>
val errorResponse = ErrorResponse(Unauthorized.intValue, "Authorization", "The authorization check failed for you. Access Denied.").toStrEntity
complete(HttpResponse(Unauthorized, entity = HttpEntity(ContentTypes.`application/json`, errorResponse)))
}
.handleAll[MethodRejection] { methodRejections =>
val names = methodRejections.map(_.supported.name)
val errorResponse = ErrorResponse(MethodNotAllowed.intValue, "Not Allowed", s"Access to $names is not allowed.").toStrEntity
complete(HttpResponse(MethodNotAllowed, entity = HttpEntity(ContentTypes.`application/json`, errorResponse)))
}
.handleNotFound {
val errorResponse = ErrorResponse(NotFound.intValue, "NotFound", "The requested resource could not be found.").toStrEntity
complete(HttpResponse(NotFound, entity = HttpEntity(ContentTypes.`application/json`, errorResponse)))
}
.result()
def myExceptionHandler: ExceptionHandler =
ExceptionHandler {
case e: ResourceNotFoundException => logException(NotFound, e.message, "Not Found Error")
case e: UnauthorizedUserException => logException(Unauthorized, e.message, "Unauthorized Error")
case e: Exception =>
extractUri { uri =>
val errorResponse = ErrorResponse(InternalServerError.intValue, "Internal Server Error", e.getLocalizedMessage).toStrEntity
logger.error(s"Request to $uri could not be handled normally")
e.printStackTrace()
complete(HttpResponse(InternalServerError, entity = errorResponse))
}
}
private def logException(code: StatusCode, message: String, errorType: String): StandardRoute = {
val errorResponse = ErrorResponse(code.intValue(), errorType, message)
val response = HttpResponse(code, entity = errorResponse.toStrEntity)
logger.error(s"Error processing user request: $message")
complete(response)
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/base/DateTimeFormatter.scala
|
<reponame>oyinda-subair/el-documento
package org.el.documento.config.base
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import play.api.libs.json.{JsNull, JsString, JsValue, Reads, Writes}
object DateTimeFormatter {
val dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSZ"
val jodaDateReads: Reads[DateTime] = Reads[DateTime](js =>
js.validate[String].map[DateTime](dtString =>
DateTime.parse(dtString, DateTimeFormat.forPattern(dateFormat))
)
)
val jodaDateWrites: Writes[DateTime] = new Writes[DateTime] {
def writes(d: DateTime): JsValue = JsString(d.toString())
}
val jodaDateOptWrites: Writes[Option[DateTime]] = new Writes[Option[DateTime]] {
def writes(d: Option[DateTime]): JsValue = d match {
case Some(dateTime) ⇒ JsString(dateTime.toString())
case None ⇒ JsNull
}
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/model/RoleEntity.scala
|
<reponame>oyinda-subair/el-documento
package org.el.documento.model
import org.el.documento.config.base.FormatEntity
import org.joda.time.DateTime
import play.api.libs.json.{Format, Json}
case class RoleEntity(
roleId: Int,
title: String,
roleType: Option[String],
timestampCreated: DateTime,
timestampUpdated: Option[DateTime]
)
object RoleEntity extends FormatEntity[RoleEntity]{
implicit val format: Format[RoleEntity] = Json.format
}
sealed abstract class RoleType(val name: String)
case object Public extends RoleType("public")
case object Admin extends RoleType("admin")
case object SuperAdmin extends RoleType("super-admin")
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/database/DatabaseConnector.scala
|
package org.el.documento.database
import org.el.documento.config.ApplicationConfig
import slick.basic.DatabaseConfig
import slick.jdbc.JdbcProfile
trait DatabaseConnector extends ApplicationConfig {
val driver: JdbcProfile = slick.jdbc.PostgresProfile
import driver.api._
private val databaseConfig = DatabaseConfig.forConfig[JdbcProfile]("slick-postgres")
val db = databaseConfig.db
implicit val session: Session = db.createSession()
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/route/DocumentoRoute.scala
|
package org.el.documento.route
import akka.actor.ActorSystem
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import de.heikoseeberger.akkahttpplayjson.PlayJsonSupport
import org.el.documento.config.http.{BasicAuthentication, JWTAuthenticationServices}
import org.el.documento.controller.DocumentoController
import org.el.documento.messages.{CreateRoleRequest, CreateUserRequest, LoginByEmail}
import scala.concurrent.ExecutionContextExecutor
class DocumentoRoute(controller: DocumentoController)(implicit val jwtAuth: JWTAuthenticationServices, val ec: ExecutionContextExecutor) extends PlayJsonSupport with BasicAuthentication {
val version = "v1"
val user = "user"
val role = "role"
val api = "api"
protected val createUser: Route =
path(api / version / "users") {
post {
entity(as[CreateUserRequest]) { request =>
complete((StatusCodes.Created, controller.createUser(request)))
}
}
}
protected val createRole: Route =
path(api / version / "roles") {
post {
withSuperAdminAuthentication { isAdmin =>
entity(as[CreateRoleRequest]) { request =>
complete((StatusCodes.Created, controller.createRole(request)))
}
}
}
}
protected val loginByEmail: Route =
path(api / version / "login") {
post {
entity(as[LoginByEmail]) { request =>
complete((StatusCodes.OK, controller.loginByEmail(request)))
}
}
}
val routes: Route =
createUser ~
createRole ~
loginByEmail
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/http/JWTAuthentication.scala
|
<gh_stars>0
package org.el.documento.config.http
import org.el.documento.config.ApplicationConfig
import org.el.documento.model.UserClaim
import pdi.jwt.{Jwt, JwtAlgorithm}
import play.api.libs.json.Json
import scala.util.{Failure, Success, Try}
trait JWTAuthenticationServices {
def generateToken(userClaim: UserClaim) : String
def verifyToken(token : String) : Try[(String,String,String)]
def verifyJwtToken(token : String) : Option[UserClaim]
}
class JWTAuthentication extends ApplicationConfig with JWTAuthenticationServices {
override def generateToken(userClaim: UserClaim): String = {
try {
Jwt.encode(Json.toJson(userClaim).toString(), secret, JwtAlgorithm.HS256)
}catch {
case e: Exception =>
logger.error("Error encoding token")
throw e
}
}
override def verifyToken(token : String): Try[(String,String,String)] = Jwt.decodeRawAll(token, secret, Seq(JwtAlgorithm.HS256))
override def verifyJwtToken(token: String): Option[UserClaim] = {
val jwt = Jwt.decode(token, secret, Seq(JwtAlgorithm.HS256))
jwt match {
case Success(x) => Json.parse(x.content).asOpt[UserClaim]
case Failure(exception) =>
logger.error("Error decoding token")
None
}
}
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/config/ErrorResponse.scala
|
package org.el.documento.config
import play.api.libs.json.{Format, Json}
case class ErrorResponse(code: Int, `type`: String, message: String) {
def toStrEntity = Json.toJson(this).toString()
}
object ErrorResponse {
implicit val format: Format[ErrorResponse] = Json.format
}
|
oyinda-subair/el-documento
|
src/main/scala/org/el/documento/messages/CreateUserRequest.scala
|
<filename>src/main/scala/org/el/documento/messages/CreateUserRequest.scala
package org.el.documento.messages
import org.el.documento.config.base.FormatEntity
import play.api.libs.json.{Format, Json}
case class CreateUserRequest(
name: String,
username: String,
email: String,
password: String,
)
object CreateUserRequest extends FormatEntity[CreateUserRequest]{
implicit val format: Format[CreateUserRequest] = Json.format
}
|
Nboaram/shapeless-guide-code
|
build.sbt
|
scalaVersion in ThisBuild := "2.13.5"
scalacOptions in Global ++= Seq(
"-deprecation",
"-encoding",
"UTF-8",
"-unchecked",
"-feature",
// Linter configuration (replaces -Xfatal-warnings, -Xlint, etc). More info here:
// https://www.scala-lang.org/2021/01/12/configuring-and-suppressing-warnings.html
List(
"-Wconf",
List(
// We need to disable the byname-implicit warning in Scala 2.13.3+
"cat=lint-byname-implicit:silent",
// Anything else can be an error, though
"any:error",
).mkString(",")
).mkString(":")
)
libraryDependencies in Global ++= Seq(
"com.chuusai" %% "shapeless" % "2.3.3",
"org.typelevel" %% "cats-core" % "2.2.0",
"io.circe" %% "circe-core" % "0.13.0",
"io.circe" %% "circe-generic" % "0.13.0",
"io.circe" %% "circe-parser" % "0.13.0",
"org.scalactic" %% "scalactic" % "3.2.5" % Test,
"org.scalatest" %% "scalatest" % "3.2.5" % Test
)
lazy val common =
project.in(file("common"))
lazy val representations =
project.in(file("representations")).dependsOn(common)
lazy val csv =
project.in(file("csv")).dependsOn(common)
lazy val literaltypes =
project.in(file("literaltypes")).dependsOn(common)
lazy val json =
project.in(file("json")).dependsOn(common)
lazy val numfields =
project.in(file("numfields")).dependsOn(common)
lazy val random =
project.in(file("random")).dependsOn(common)
lazy val migrations =
project.in(file("migrations")).dependsOn(common)
lazy val mapping =
project.in(file("mapping")).dependsOn(common)
lazy val root = project.in(file(".")).aggregate(
representations,
csv,
literaltypes,
json,
numfields,
random,
migrations,
mapping
)
|
frozenspider/fs-common-utils
|
src/main/scala/org/fs/utility/RichGeneralImplicits.scala
|
<filename>src/main/scala/org/fs/utility/RichGeneralImplicits.scala<gh_stars>0
package org.fs.utility
import java.io.StringWriter
import java.io.PrintWriter
/**
* Some general implicit helpers
*
* @author FS
*/
trait RichGeneralImplicits {
/** Throwable enriched with some of most general support methods */
implicit class RichThrowable[Th <: Throwable](th: Th) {
/** @return stack trace printed to a string */
def stackTraceString: String = {
val writer = new StringWriter
th.printStackTrace(new PrintWriter(writer, true))
writer.toString
}
}
implicit class RichLong(l: Long) {
/**
* Pretty-printed version of a time between two events, in form {@code HH:mm:ss}, e.g. 112:35:16
* (112 hours, 35 minutes and 16 seconds). Will round the value to the closest integer.
* <p>
* Mostly useful with {@link System#currentTimeMillis} and {@link StopWatch}.
*
* @param elapsedMilliSeconds
* the difference between two time marks, e.g.
* {@code System.currentTimeMills - oldMark}
* @return {@code HH:mm:ss} string
*/
def hhMmSsString: String = {
val totalSeconds = Math.round(l.toDouble / 1000)
val hours = totalSeconds / 3600
val remainingSeconds = totalSeconds % 3600
val minutes = remainingSeconds / 60
val seconds = remainingSeconds % 60
String.format("%d:%02d:%02d", hours: java.lang.Long, minutes: java.lang.Long, seconds: java.lang.Long)
}
}
}
object RichGeneralImplicits extends RichGeneralImplicits
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.