repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
ekasitk/spark
|
mllib/src/main/scala/org/apache/spark/ml/feature/CountVectorizer.scala
|
<reponame>ekasitk/spark
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared.{HasInputCol, HasOutputCol}
import org.apache.spark.ml.util._
import org.apache.spark.mllib.linalg.{Vectors, VectorUDT}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.util.collection.OpenHashMap
/**
* Params for [[CountVectorizer]] and [[CountVectorizerModel]].
*/
private[feature] trait CountVectorizerParams extends Params with HasInputCol with HasOutputCol {
/**
* Max size of the vocabulary.
* CountVectorizer will build a vocabulary that only considers the top
* vocabSize terms ordered by term frequency across the corpus.
*
* Default: 2^18^
* @group param
*/
val vocabSize: IntParam =
new IntParam(this, "vocabSize", "max size of the vocabulary", ParamValidators.gt(0))
/** @group getParam */
def getVocabSize: Int = $(vocabSize)
/**
* Specifies the minimum number of different documents a term must appear in to be included
* in the vocabulary.
* If this is an integer >= 1, this specifies the number of documents the term must appear in;
* if this is a double in [0,1), then this specifies the fraction of documents.
*
* Default: 1
* @group param
*/
val minDF: DoubleParam = new DoubleParam(this, "minDF", "Specifies the minimum number of" +
" different documents a term must appear in to be included in the vocabulary." +
" If this is an integer >= 1, this specifies the number of documents the term must" +
" appear in; if this is a double in [0,1), then this specifies the fraction of documents.",
ParamValidators.gtEq(0.0))
/** @group getParam */
def getMinDF: Double = $(minDF)
/** Validates and transforms the input schema. */
protected def validateAndTransformSchema(schema: StructType): StructType = {
validateParams()
SchemaUtils.checkColumnType(schema, $(inputCol), new ArrayType(StringType, true))
SchemaUtils.appendColumn(schema, $(outputCol), new VectorUDT)
}
/**
* Filter to ignore rare words in a document. For each document, terms with
* frequency/count less than the given threshold are ignored.
* If this is an integer >= 1, then this specifies a count (of times the term must appear
* in the document);
* if this is a double in [0,1), then this specifies a fraction (out of the document's token
* count).
*
* Note that the parameter is only used in transform of [[CountVectorizerModel]] and does not
* affect fitting.
*
* Default: 1
* @group param
*/
val minTF: DoubleParam = new DoubleParam(this, "minTF", "Filter to ignore rare words in" +
" a document. For each document, terms with frequency/count less than the given threshold are" +
" ignored. If this is an integer >= 1, then this specifies a count (of times the term must" +
" appear in the document); if this is a double in [0,1), then this specifies a fraction (out" +
" of the document's token count). Note that the parameter is only used in transform of" +
" CountVectorizerModel and does not affect fitting.", ParamValidators.gtEq(0.0))
setDefault(minTF -> 1)
/** @group getParam */
def getMinTF: Double = $(minTF)
}
/**
* :: Experimental ::
* Extracts a vocabulary from document collections and generates a [[CountVectorizerModel]].
*/
@Experimental
class CountVectorizer(override val uid: String)
extends Estimator[CountVectorizerModel] with CountVectorizerParams with DefaultParamsWritable {
def this() = this(Identifiable.randomUID("cntVec"))
/** @group setParam */
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
def setOutputCol(value: String): this.type = set(outputCol, value)
/** @group setParam */
def setVocabSize(value: Int): this.type = set(vocabSize, value)
/** @group setParam */
def setMinDF(value: Double): this.type = set(minDF, value)
/** @group setParam */
def setMinTF(value: Double): this.type = set(minTF, value)
setDefault(vocabSize -> (1 << 18), minDF -> 1)
override def fit(dataset: DataFrame): CountVectorizerModel = {
transformSchema(dataset.schema, logging = true)
val vocSize = $(vocabSize)
val input = dataset.select($(inputCol)).map(_.getAs[Seq[String]](0))
val minDf = if ($(minDF) >= 1.0) {
$(minDF)
} else {
$(minDF) * input.cache().count()
}
val wordCounts: RDD[(String, Long)] = input.flatMap { case (tokens) =>
val wc = new OpenHashMap[String, Long]
tokens.foreach { w =>
wc.changeValue(w, 1L, _ + 1L)
}
wc.map { case (word, count) => (word, (count, 1)) }
}.reduceByKey { case ((wc1, df1), (wc2, df2)) =>
(wc1 + wc2, df1 + df2)
}.filter { case (word, (wc, df)) =>
df >= minDf
}.map { case (word, (count, dfCount)) =>
(word, count)
}.cache()
val fullVocabSize = wordCounts.count()
val vocab: Array[String] = {
val tmpSortedWC: Array[(String, Long)] = if (fullVocabSize <= vocSize) {
// Use all terms
wordCounts.collect().sortBy(-_._2)
} else {
// Sort terms to select vocab
wordCounts.sortBy(_._2, ascending = false).take(vocSize)
}
tmpSortedWC.map(_._1)
}
require(vocab.length > 0, "The vocabulary size should be > 0. Lower minDF as necessary.")
copyValues(new CountVectorizerModel(uid, vocab).setParent(this))
}
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
override def copy(extra: ParamMap): CountVectorizer = defaultCopy(extra)
}
@Since("1.6.0")
object CountVectorizer extends DefaultParamsReadable[CountVectorizer] {
@Since("1.6.0")
override def load(path: String): CountVectorizer = super.load(path)
}
/**
* :: Experimental ::
* Converts a text document to a sparse vector of token counts.
* @param vocabulary An Array over terms. Only the terms in the vocabulary will be counted.
*/
@Experimental
class CountVectorizerModel(override val uid: String, val vocabulary: Array[String])
extends Model[CountVectorizerModel] with CountVectorizerParams with MLWritable {
import CountVectorizerModel._
def this(vocabulary: Array[String]) = {
this(Identifiable.randomUID("cntVecModel"), vocabulary)
set(vocabSize, vocabulary.length)
}
/** @group setParam */
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
def setOutputCol(value: String): this.type = set(outputCol, value)
/** @group setParam */
def setMinTF(value: Double): this.type = set(minTF, value)
/** Dictionary created from [[vocabulary]] and its indices, broadcast once for [[transform()]] */
private var broadcastDict: Option[Broadcast[Map[String, Int]]] = None
override def transform(dataset: DataFrame): DataFrame = {
if (broadcastDict.isEmpty) {
val dict = vocabulary.zipWithIndex.toMap
broadcastDict = Some(dataset.sqlContext.sparkContext.broadcast(dict))
}
val dictBr = broadcastDict.get
val minTf = $(minTF)
val vectorizer = udf { (document: Seq[String]) =>
val termCounts = new OpenHashMap[Int, Double]
var tokenCount = 0L
document.foreach { term =>
dictBr.value.get(term) match {
case Some(index) => termCounts.changeValue(index, 1.0, _ + 1.0)
case None => // ignore terms not in the vocabulary
}
tokenCount += 1
}
val effectiveMinTF = if (minTf >= 1.0) {
minTf
} else {
tokenCount * minTf
}
Vectors.sparse(dictBr.value.size, termCounts.filter(_._2 >= effectiveMinTF).toSeq)
}
dataset.withColumn($(outputCol), vectorizer(col($(inputCol))))
}
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
override def copy(extra: ParamMap): CountVectorizerModel = {
val copied = new CountVectorizerModel(uid, vocabulary).setParent(parent)
copyValues(copied, extra)
}
@Since("1.6.0")
override def write: MLWriter = new CountVectorizerModelWriter(this)
}
@Since("1.6.0")
object CountVectorizerModel extends MLReadable[CountVectorizerModel] {
private[CountVectorizerModel]
class CountVectorizerModelWriter(instance: CountVectorizerModel) extends MLWriter {
private case class Data(vocabulary: Seq[String])
override protected def saveImpl(path: String): Unit = {
DefaultParamsWriter.saveMetadata(instance, path, sc)
val data = Data(instance.vocabulary)
val dataPath = new Path(path, "data").toString
sqlContext.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class CountVectorizerModelReader extends MLReader[CountVectorizerModel] {
private val className = classOf[CountVectorizerModel].getName
override def load(path: String): CountVectorizerModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sqlContext.read.parquet(dataPath)
.select("vocabulary")
.head()
val vocabulary = data.getAs[Seq[String]](0).toArray
val model = new CountVectorizerModel(metadata.uid, vocabulary)
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
@Since("1.6.0")
override def read: MLReader[CountVectorizerModel] = new CountVectorizerModelReader
@Since("1.6.0")
override def load(path: String): CountVectorizerModel = super.load(path)
}
|
ekasitk/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala
|
<gh_stars>0
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.lang.Thread.UncaughtExceptionHandler
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.Logging
import org.apache.spark.sql.{ContinuousQuery, DataFrame, SQLContext}
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap}
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.QueryExecution
/**
* Manages the execution of a streaming Spark SQL query that is occurring in a separate thread.
* Unlike a standard query, a streaming query executes repeatedly each time new data arrives at any
* [[Source]] present in the query plan. Whenever new data arrives, a [[QueryExecution]] is created
* and the results are committed transactionally to the given [[Sink]].
*/
class StreamExecution(
sqlContext: SQLContext,
private[sql] val logicalPlan: LogicalPlan,
val sink: Sink) extends ContinuousQuery with Logging {
/** An monitor used to wait/notify when batches complete. */
private val awaitBatchLock = new Object
@volatile
private var batchRun = false
/** Minimum amount of time in between the start of each batch. */
private val minBatchTime = 10
/** Tracks how much data we have processed from each input source. */
private[sql] val streamProgress = new StreamProgress
/** All stream sources present the query plan. */
private val sources =
logicalPlan.collect { case s: StreamingRelation => s.source }
// Start the execution at the current offsets stored in the sink. (i.e. avoid reprocessing data
// that we have already processed).
{
sink.currentOffset match {
case Some(c: CompositeOffset) =>
val storedProgress = c.offsets
val sources = logicalPlan collect {
case StreamingRelation(source, _) => source
}
assert(sources.size == storedProgress.size)
sources.zip(storedProgress).foreach { case (source, offset) =>
offset.foreach(streamProgress.update(source, _))
}
case None => // We are starting this stream for the first time.
case _ => throw new IllegalArgumentException("Expected composite offset from sink")
}
}
logInfo(s"Stream running at $streamProgress")
/** When false, signals to the microBatchThread that it should stop running. */
@volatile private var shouldRun = true
/** The thread that runs the micro-batches of this stream. */
private[sql] val microBatchThread = new Thread("stream execution thread") {
override def run(): Unit = {
SQLContext.setActive(sqlContext)
while (shouldRun) {
attemptBatch()
Thread.sleep(minBatchTime) // TODO: Could be tighter
}
}
}
microBatchThread.setDaemon(true)
microBatchThread.setUncaughtExceptionHandler(
new UncaughtExceptionHandler {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
streamDeathCause = e
}
})
microBatchThread.start()
@volatile
private[sql] var lastExecution: QueryExecution = null
@volatile
private[sql] var streamDeathCause: Throwable = null
/**
* Checks to see if any new data is present in any of the sources. When new data is available,
* a batch is executed and passed to the sink, updating the currentOffsets.
*/
private def attemptBatch(): Unit = {
val startTime = System.nanoTime()
// A list of offsets that need to be updated if this batch is successful.
// Populated while walking the tree.
val newOffsets = new ArrayBuffer[(Source, Offset)]
// A list of attributes that will need to be updated.
var replacements = new ArrayBuffer[(Attribute, Attribute)]
// Replace sources in the logical plan with data that has arrived since the last batch.
val withNewSources = logicalPlan transform {
case StreamingRelation(source, output) =>
val prevOffset = streamProgress.get(source)
val newBatch = source.getNextBatch(prevOffset)
newBatch.map { batch =>
newOffsets += ((source, batch.end))
val newPlan = batch.data.logicalPlan
assert(output.size == newPlan.output.size)
replacements ++= output.zip(newPlan.output)
newPlan
}.getOrElse {
LocalRelation(output)
}
}
// Rewire the plan to use the new attributes that were returned by the source.
val replacementMap = AttributeMap(replacements)
val newPlan = withNewSources transformAllExpressions {
case a: Attribute if replacementMap.contains(a) => replacementMap(a)
}
if (newOffsets.nonEmpty) {
val optimizerStart = System.nanoTime()
lastExecution = new QueryExecution(sqlContext, newPlan)
val executedPlan = lastExecution.executedPlan
val optimizerTime = (System.nanoTime() - optimizerStart).toDouble / 1000000
logDebug(s"Optimized batch in ${optimizerTime}ms")
streamProgress.synchronized {
// Update the offsets and calculate a new composite offset
newOffsets.foreach(streamProgress.update)
val newStreamProgress = logicalPlan.collect {
case StreamingRelation(source, _) => streamProgress.get(source)
}
val batchOffset = CompositeOffset(newStreamProgress)
// Construct the batch and send it to the sink.
val nextBatch = new Batch(batchOffset, new DataFrame(sqlContext, newPlan))
sink.addBatch(nextBatch)
}
batchRun = true
awaitBatchLock.synchronized {
// Wake up any threads that are waiting for the stream to progress.
awaitBatchLock.notifyAll()
}
val batchTime = (System.nanoTime() - startTime).toDouble / 1000000
logInfo(s"Compete up to $newOffsets in ${batchTime}ms")
}
logDebug(s"Waiting for data, current: $streamProgress")
}
/**
* Signals to the thread executing micro-batches that it should stop running after the next
* batch. This method blocks until the thread stops running.
*/
def stop(): Unit = {
shouldRun = false
if (microBatchThread.isAlive) { microBatchThread.join() }
}
/**
* Blocks the current thread until processing for data from the given `source` has reached at
* least the given `Offset`. This method is indented for use primarily when writing tests.
*/
def awaitOffset(source: Source, newOffset: Offset): Unit = {
def notDone = streamProgress.synchronized {
!streamProgress.contains(source) || streamProgress(source) < newOffset
}
while (notDone) {
logInfo(s"Waiting until $newOffset at $source")
awaitBatchLock.synchronized { awaitBatchLock.wait(100) }
}
logDebug(s"Unblocked at $newOffset for $source")
}
override def toString: String =
s"""
|=== Streaming Query ===
|CurrentOffsets: $streamProgress
|Thread State: ${microBatchThread.getState}
|${if (streamDeathCause != null) stackTraceToString(streamDeathCause) else ""}
|
|$logicalPlan
""".stripMargin
}
|
ekasitk/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/BenchmarkWholeStageCodegen.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.memory.{StaticMemoryManager, TaskMemoryManager}
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.functions._
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.hash.Murmur3_x86_32
import org.apache.spark.unsafe.map.BytesToBytesMap
import org.apache.spark.util.Benchmark
/**
* Benchmark to measure whole stage codegen performance.
* To run this:
* build/sbt "sql/test-only *BenchmarkWholeStageCodegen"
*/
class BenchmarkWholeStageCodegen extends SparkFunSuite {
lazy val conf = new SparkConf().setMaster("local[1]").setAppName("benchmark")
.set("spark.sql.shuffle.partitions", "1")
lazy val sc = SparkContext.getOrCreate(conf)
lazy val sqlContext = SQLContext.getOrCreate(sc)
def runBenchmark(name: String, values: Int)(f: => Unit): Unit = {
val benchmark = new Benchmark(name, values)
Seq(false, true).foreach { enabled =>
benchmark.addCase(s"$name codegen=$enabled") { iter =>
sqlContext.setConf("spark.sql.codegen.wholeStage", enabled.toString)
f
}
}
benchmark.run()
}
// These benchmark are skipped in normal build
ignore("range/filter/sum") {
val N = 500 << 20
runBenchmark("rang/filter/sum", N) {
sqlContext.range(N).filter("(id & 1) = 1").groupBy().sum().collect()
}
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
rang/filter/sum: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
rang/filter/sum codegen=false 14332 / 16646 36.0 27.8 1.0X
rang/filter/sum codegen=true 845 / 940 620.0 1.6 17.0X
*/
}
ignore("stat functions") {
val N = 100 << 20
runBenchmark("stddev", N) {
sqlContext.range(N).groupBy().agg("id" -> "stddev").collect()
}
runBenchmark("kurtosis", N) {
sqlContext.range(N).groupBy().agg("id" -> "kurtosis").collect()
}
/**
Using ImperativeAggregate (as implemented in Spark 1.6):
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
stddev: Avg Time(ms) Avg Rate(M/s) Relative Rate
-------------------------------------------------------------------------------
stddev w/o codegen 2019.04 10.39 1.00 X
stddev w codegen 2097.29 10.00 0.96 X
kurtosis w/o codegen 2108.99 9.94 0.96 X
kurtosis w codegen 2090.69 10.03 0.97 X
Using DeclarativeAggregate:
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
stddev: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
stddev codegen=false 5630 / 5776 18.0 55.6 1.0X
stddev codegen=true 1259 / 1314 83.0 12.0 4.5X
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
kurtosis: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
kurtosis codegen=false 14847 / 15084 7.0 142.9 1.0X
kurtosis codegen=true 1652 / 2124 63.0 15.9 9.0X
*/
}
ignore("aggregate with keys") {
val N = 20 << 20
runBenchmark("Aggregate w keys", N) {
sqlContext.range(N).selectExpr("(id & 65535) as k").groupBy("k").sum().collect()
}
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
Aggregate w keys: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
Aggregate w keys codegen=false 2402 / 2551 8.0 125.0 1.0X
Aggregate w keys codegen=true 1620 / 1670 12.0 83.3 1.5X
*/
}
ignore("broadcast hash join") {
val N = 20 << 20
val dim = broadcast(sqlContext.range(1 << 16).selectExpr("id as k", "cast(id as string) as v"))
runBenchmark("BroadcastHashJoin", N) {
sqlContext.range(N).join(dim, (col("id") % 60000) === col("k")).count()
}
/*
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
BroadcastHashJoin: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
BroadcastHashJoin codegen=false 4405 / 6147 4.0 250.0 1.0X
BroadcastHashJoin codegen=true 1857 / 1878 11.0 90.9 2.4X
*/
}
ignore("hash and BytesToBytesMap") {
val N = 50 << 20
val benchmark = new Benchmark("BytesToBytesMap", N)
benchmark.addCase("hash") { iter =>
var i = 0
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
val value = new UnsafeRow(2)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var s = 0
while (i < N) {
key.setInt(0, i % 1000)
val h = Murmur3_x86_32.hashUnsafeWords(
key.getBaseObject, key.getBaseOffset, key.getSizeInBytes, 0)
s += h
i += 1
}
}
Seq("off", "on").foreach { heap =>
benchmark.addCase(s"BytesToBytesMap ($heap Heap)") { iter =>
val taskMemoryManager = new TaskMemoryManager(
new StaticMemoryManager(
new SparkConf().set("spark.memory.offHeap.enabled", s"${heap == "off"}")
.set("spark.memory.offHeap.size", "102400000"),
Long.MaxValue,
Long.MaxValue,
1),
0)
val map = new BytesToBytesMap(taskMemoryManager, 1024, 64L<<20)
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
val value = new UnsafeRow(2)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var i = 0
while (i < N) {
key.setInt(0, i % 65536)
val loc = map.lookup(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes)
if (loc.isDefined) {
value.pointTo(loc.getValueAddress.getBaseObject, loc.getValueAddress.getBaseOffset,
loc.getValueLength)
value.setInt(0, value.getInt(0) + 1)
i += 1
} else {
loc.putNewKey(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
value.getBaseObject, value.getBaseOffset, value.getSizeInBytes)
}
}
}
}
/**
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
BytesToBytesMap: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
hash 628 / 661 83.0 12.0 1.0X
BytesToBytesMap (off Heap) 3292 / 3408 15.0 66.7 0.2X
BytesToBytesMap (on Heap) 3349 / 4267 15.0 66.7 0.2X
*/
benchmark.run()
}
}
|
ekasitk/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.{Murmur3Hash, UnsafeProjection}
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateSafeProjection
import org.apache.spark.sql.types._
import org.apache.spark.util.Benchmark
/**
* Benchmark for the previous interpreted hash function(InternalRow.hashCode) vs the new codegen
* hash expression(Murmur3Hash).
*/
object HashBenchmark {
def test(name: String, schema: StructType, iters: Int): Unit = {
val numRows = 1024 * 8
val generator = RandomDataGenerator.forType(schema, nullable = false).get
val encoder = RowEncoder(schema)
val attrs = schema.toAttributes
val safeProjection = GenerateSafeProjection.generate(attrs, attrs)
val rows = (1 to numRows).map(_ =>
// The output of encoder is UnsafeRow, use safeProjection to turn in into safe format.
safeProjection(encoder.toRow(generator().asInstanceOf[Row])).copy()
).toArray
val benchmark = new Benchmark("Hash For " + name, iters * numRows)
benchmark.addCase("interpreted version") { _: Int =>
for (_ <- 0L until iters) {
var sum = 0
var i = 0
while (i < numRows) {
sum += rows(i).hashCode()
i += 1
}
}
}
val getHashCode = UnsafeProjection.create(new Murmur3Hash(attrs) :: Nil, attrs)
benchmark.addCase("codegen version") { _: Int =>
for (_ <- 0L until iters) {
var sum = 0
var i = 0
while (i < numRows) {
sum += getHashCode(rows(i)).getInt(0)
i += 1
}
}
}
benchmark.run()
}
def main(args: Array[String]): Unit = {
val simple = new StructType().add("i", IntegerType)
test("simple", simple, 1024)
val normal = new StructType()
.add("null", NullType)
.add("boolean", BooleanType)
.add("byte", ByteType)
.add("short", ShortType)
.add("int", IntegerType)
.add("long", LongType)
.add("float", FloatType)
.add("double", DoubleType)
.add("bigDecimal", DecimalType.SYSTEM_DEFAULT)
.add("smallDecimal", DecimalType.USER_DEFAULT)
.add("string", StringType)
.add("binary", BinaryType)
.add("date", DateType)
.add("timestamp", TimestampType)
test("normal", normal, 128)
val arrayOfInt = ArrayType(IntegerType)
val array = new StructType()
.add("array", arrayOfInt)
.add("arrayOfArray", ArrayType(arrayOfInt))
test("array", array, 64)
val mapOfInt = MapType(IntegerType, IntegerType)
val map = new StructType()
.add("map", mapOfInt)
.add("mapOfMap", MapType(IntegerType, mapOfInt))
test("map", map, 64)
}
}
|
ekasitk/spark
|
core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{ArrayList => JArrayList, Collections, List => JList}
import scala.collection.JavaConverters._
import scala.collection.mutable.{HashMap, HashSet}
import org.apache.mesos.{Scheduler => MScheduler, _}
import org.apache.mesos.Protos.{ExecutorInfo => MesosExecutorInfo, TaskInfo => MesosTaskInfo, _}
import org.apache.mesos.protobuf.ByteString
import org.apache.spark.{SparkContext, SparkException, TaskState}
import org.apache.spark.executor.MesosExecutorBackend
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.util.Utils
/**
* A SchedulerBackend for running fine-grained tasks on Mesos. Each Spark task is mapped to a
* separate Mesos task, allowing multiple applications to share cluster nodes both in space (tasks
* from multiple apps can run on different cores) and in time (a core can switch ownership).
*/
private[spark] class MesosSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
master: String)
extends SchedulerBackend
with MScheduler
with MesosSchedulerUtils {
// Stores the slave ids that has launched a Mesos executor.
val slaveIdToExecutorInfo = new HashMap[String, MesosExecutorInfo]
val taskIdToSlaveId = new HashMap[Long, String]
// An ExecutorInfo for our tasks
var execArgs: Array[Byte] = null
var classLoader: ClassLoader = null
// The listener bus to publish executor added/removed events.
val listenerBus = sc.listenerBus
private[mesos] val mesosExecutorCores = sc.conf.getDouble("spark.mesos.mesosExecutor.cores", 1)
// Offer constraints
private[this] val slaveOfferConstraints =
parseConstraintString(sc.conf.get("spark.mesos.constraints", ""))
// reject offers with mismatched constraints in seconds
private val rejectOfferDurationForUnmetConstraints =
getRejectOfferDurationForUnmetConstraints(sc)
@volatile var appId: String = _
override def start() {
classLoader = Thread.currentThread.getContextClassLoader
val driver = createSchedulerDriver(
master,
MesosSchedulerBackend.this,
sc.sparkUser,
sc.appName,
sc.conf,
sc.ui.map(_.appUIAddress))
startScheduler(driver)
}
/**
* Creates a MesosExecutorInfo that is used to launch a Mesos executor.
* @param availableResources Available resources that is offered by Mesos
* @param execId The executor id to assign to this new executor.
* @return A tuple of the new mesos executor info and the remaining available resources.
*/
def createExecutorInfo(
availableResources: JList[Resource],
execId: String): (MesosExecutorInfo, JList[Resource]) = {
val executorSparkHome = sc.conf.getOption("spark.mesos.executor.home")
.orElse(sc.getSparkHome()) // Fall back to driver Spark home for backward compatibility
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val environment = Environment.newBuilder()
sc.conf.getOption("spark.executor.extraClassPath").foreach { cp =>
environment.addVariables(
Environment.Variable.newBuilder().setName("SPARK_CLASSPATH").setValue(cp).build())
}
val extraJavaOpts = sc.conf.getOption("spark.executor.extraJavaOptions").getOrElse("")
val prefixEnv = sc.conf.getOption("spark.executor.extraLibraryPath").map { p =>
Utils.libraryPathEnvPrefix(Seq(p))
}.getOrElse("")
environment.addVariables(
Environment.Variable.newBuilder()
.setName("SPARK_EXECUTOR_OPTS")
.setValue(extraJavaOpts)
.build())
sc.executorEnvs.foreach { case (key, value) =>
environment.addVariables(Environment.Variable.newBuilder()
.setName(key)
.setValue(value)
.build())
}
val command = CommandInfo.newBuilder()
.setEnvironment(environment)
val uri = sc.conf.getOption("spark.executor.uri")
.orElse(Option(System.getenv("SPARK_EXECUTOR_URI")))
val executorBackendName = classOf[MesosExecutorBackend].getName
if (uri.isEmpty) {
val executorPath = new File(executorSparkHome, "/bin/spark-class").getPath
command.setValue(s"$prefixEnv $executorPath $executorBackendName")
} else {
// Grab everything to the first '.'. We'll use that and '*' to
// glob the directory "correctly".
val basename = uri.get.split('/').last.split('.').head
command.setValue(s"cd ${basename}*; $prefixEnv ./bin/spark-class $executorBackendName")
command.addUris(CommandInfo.URI.newBuilder().setValue(uri.get))
}
val builder = MesosExecutorInfo.newBuilder()
val (resourcesAfterCpu, usedCpuResources) =
partitionResources(availableResources, "cpus", mesosExecutorCores)
val (resourcesAfterMem, usedMemResources) =
partitionResources(resourcesAfterCpu.asJava, "mem", calculateTotalMemory(sc))
builder.addAllResources(usedCpuResources.asJava)
builder.addAllResources(usedMemResources.asJava)
sc.conf.getOption("spark.mesos.uris").foreach(setupUris(_, command))
val executorInfo = builder
.setExecutorId(ExecutorID.newBuilder().setValue(execId).build())
.setCommand(command)
.setData(ByteString.copyFrom(createExecArg()))
sc.conf.getOption("spark.mesos.executor.docker.image").foreach { image =>
MesosSchedulerBackendUtil
.setupContainerBuilderDockerInfo(image, sc.conf, executorInfo.getContainerBuilder())
}
(executorInfo.build(), resourcesAfterMem.asJava)
}
/**
* Create and serialize the executor argument to pass to Mesos. Our executor arg is an array
* containing all the spark.* system properties in the form of (String, String) pairs.
*/
private def createExecArg(): Array[Byte] = {
if (execArgs == null) {
val props = new HashMap[String, String]
for ((key, value) <- sc.conf.getAll) {
props(key) = value
}
// Serialize the map as an array of (String, String) pairs
execArgs = Utils.serialize(props.toArray)
}
execArgs
}
override def offerRescinded(d: SchedulerDriver, o: OfferID) {}
override def registered(d: SchedulerDriver, frameworkId: FrameworkID, masterInfo: MasterInfo) {
inClassLoader() {
appId = frameworkId.getValue
logInfo("Registered as framework ID " + appId)
markRegistered()
}
}
private def inClassLoader()(fun: => Unit) = {
val oldClassLoader = Thread.currentThread.getContextClassLoader
Thread.currentThread.setContextClassLoader(classLoader)
try {
fun
} finally {
Thread.currentThread.setContextClassLoader(oldClassLoader)
}
}
override def disconnected(d: SchedulerDriver) {}
override def reregistered(d: SchedulerDriver, masterInfo: MasterInfo) {}
private def getTasksSummary(tasks: JArrayList[MesosTaskInfo]): String = {
val builder = new StringBuilder
tasks.asScala.foreach { t =>
builder.append("Task id: ").append(t.getTaskId.getValue).append("\n")
.append("Slave id: ").append(t.getSlaveId.getValue).append("\n")
.append("Task resources: ").append(t.getResourcesList).append("\n")
.append("Executor resources: ").append(t.getExecutor.getResourcesList)
.append("---------------------------------------------\n")
}
builder.toString()
}
/**
* Method called by Mesos to offer resources on slaves. We respond by asking our active task sets
* for tasks in order of priority. We fill each node with tasks in a round-robin manner so that
* tasks are balanced across the cluster.
*/
override def resourceOffers(d: SchedulerDriver, offers: JList[Offer]) {
inClassLoader() {
// Fail first on offers with unmet constraints
val (offersMatchingConstraints, offersNotMatchingConstraints) =
offers.asScala.partition { o =>
val offerAttributes = toAttributeMap(o.getAttributesList)
val meetsConstraints =
matchesAttributeRequirements(slaveOfferConstraints, offerAttributes)
// add some debug messaging
if (!meetsConstraints) {
val id = o.getId.getValue
logDebug(s"Declining offer: $id with attributes: $offerAttributes")
}
meetsConstraints
}
// These offers do not meet constraints. We don't need to see them again.
// Decline the offer for a long period of time.
offersNotMatchingConstraints.foreach { o =>
d.declineOffer(o.getId, Filters.newBuilder()
.setRefuseSeconds(rejectOfferDurationForUnmetConstraints).build())
}
// Of the matching constraints, see which ones give us enough memory and cores
val (usableOffers, unUsableOffers) = offersMatchingConstraints.partition { o =>
val mem = getResource(o.getResourcesList, "mem")
val cpus = getResource(o.getResourcesList, "cpus")
val slaveId = o.getSlaveId.getValue
val offerAttributes = toAttributeMap(o.getAttributesList)
// check offers for
// 1. Memory requirements
// 2. CPU requirements - need at least 1 for executor, 1 for task
val meetsMemoryRequirements = mem >= calculateTotalMemory(sc)
val meetsCPURequirements = cpus >= (mesosExecutorCores + scheduler.CPUS_PER_TASK)
val meetsRequirements =
(meetsMemoryRequirements && meetsCPURequirements) ||
(slaveIdToExecutorInfo.contains(slaveId) && cpus >= scheduler.CPUS_PER_TASK)
val debugstr = if (meetsRequirements) "Accepting" else "Declining"
logDebug(s"$debugstr offer: ${o.getId.getValue} with attributes: "
+ s"$offerAttributes mem: $mem cpu: $cpus")
meetsRequirements
}
// Decline offers we ruled out immediately
unUsableOffers.foreach(o => d.declineOffer(o.getId))
val workerOffers = usableOffers.map { o =>
val cpus = if (slaveIdToExecutorInfo.contains(o.getSlaveId.getValue)) {
getResource(o.getResourcesList, "cpus").toInt
} else {
// If the Mesos executor has not been started on this slave yet, set aside a few
// cores for the Mesos executor by offering fewer cores to the Spark executor
(getResource(o.getResourcesList, "cpus") - mesosExecutorCores).toInt
}
new WorkerOffer(
o.getSlaveId.getValue,
o.getHostname,
cpus)
}
val slaveIdToOffer = usableOffers.map(o => o.getSlaveId.getValue -> o).toMap
val slaveIdToWorkerOffer = workerOffers.map(o => o.executorId -> o).toMap
val slaveIdToResources = new HashMap[String, JList[Resource]]()
usableOffers.foreach { o =>
slaveIdToResources(o.getSlaveId.getValue) = o.getResourcesList
}
val mesosTasks = new HashMap[String, JArrayList[MesosTaskInfo]]
val slavesIdsOfAcceptedOffers = HashSet[String]()
// Call into the TaskSchedulerImpl
val acceptedOffers = scheduler.resourceOffers(workerOffers).filter(!_.isEmpty)
acceptedOffers
.foreach { offer =>
offer.foreach { taskDesc =>
val slaveId = taskDesc.executorId
slavesIdsOfAcceptedOffers += slaveId
taskIdToSlaveId(taskDesc.taskId) = slaveId
val (mesosTask, remainingResources) = createMesosTask(
taskDesc,
slaveIdToResources(slaveId),
slaveId)
mesosTasks.getOrElseUpdate(slaveId, new JArrayList[MesosTaskInfo])
.add(mesosTask)
slaveIdToResources(slaveId) = remainingResources
}
}
// Reply to the offers
val filters = Filters.newBuilder().setRefuseSeconds(1).build() // TODO: lower timeout?
mesosTasks.foreach { case (slaveId, tasks) =>
slaveIdToWorkerOffer.get(slaveId).foreach(o =>
listenerBus.post(SparkListenerExecutorAdded(System.currentTimeMillis(), slaveId,
// TODO: Add support for log urls for Mesos
new ExecutorInfo(o.host, o.cores, Map.empty)))
)
logTrace(s"Launching Mesos tasks on slave '$slaveId', tasks:\n${getTasksSummary(tasks)}")
d.launchTasks(Collections.singleton(slaveIdToOffer(slaveId).getId), tasks, filters)
}
// Decline offers that weren't used
// NOTE: This logic assumes that we only get a single offer for each host in a given batch
for (o <- usableOffers if !slavesIdsOfAcceptedOffers.contains(o.getSlaveId.getValue)) {
d.declineOffer(o.getId)
}
}
}
/** Turn a Spark TaskDescription into a Mesos task and also resources unused by the task */
def createMesosTask(
task: TaskDescription,
resources: JList[Resource],
slaveId: String): (MesosTaskInfo, JList[Resource]) = {
val taskId = TaskID.newBuilder().setValue(task.taskId.toString).build()
val (executorInfo, remainingResources) = if (slaveIdToExecutorInfo.contains(slaveId)) {
(slaveIdToExecutorInfo(slaveId), resources)
} else {
createExecutorInfo(resources, slaveId)
}
slaveIdToExecutorInfo(slaveId) = executorInfo
val (finalResources, cpuResources) =
partitionResources(remainingResources, "cpus", scheduler.CPUS_PER_TASK)
val taskInfo = MesosTaskInfo.newBuilder()
.setTaskId(taskId)
.setSlaveId(SlaveID.newBuilder().setValue(slaveId).build())
.setExecutor(executorInfo)
.setName(task.name)
.addAllResources(cpuResources.asJava)
.setData(MesosTaskLaunchData(task.serializedTask, task.attemptNumber).toByteString)
.build()
(taskInfo, finalResources.asJava)
}
override def statusUpdate(d: SchedulerDriver, status: TaskStatus) {
inClassLoader() {
val tid = status.getTaskId.getValue.toLong
val state = TaskState.fromMesos(status.getState)
synchronized {
if (TaskState.isFailed(TaskState.fromMesos(status.getState))
&& taskIdToSlaveId.contains(tid)) {
// We lost the executor on this slave, so remember that it's gone
removeExecutor(taskIdToSlaveId(tid), "Lost executor")
}
if (TaskState.isFinished(state)) {
taskIdToSlaveId.remove(tid)
}
}
scheduler.statusUpdate(tid, state, status.getData.asReadOnlyByteBuffer)
}
}
override def error(d: SchedulerDriver, message: String) {
inClassLoader() {
logError("Mesos error: " + message)
markErr()
scheduler.error(message)
}
}
override def stop() {
if (mesosDriver != null) {
mesosDriver.stop()
}
}
override def reviveOffers() {
mesosDriver.reviveOffers()
}
override def frameworkMessage(d: SchedulerDriver, e: ExecutorID, s: SlaveID, b: Array[Byte]) {}
/**
* Remove executor associated with slaveId in a thread safe manner.
*/
private def removeExecutor(slaveId: String, reason: String) = {
synchronized {
listenerBus.post(SparkListenerExecutorRemoved(System.currentTimeMillis(), slaveId, reason))
slaveIdToExecutorInfo -= slaveId
}
}
private def recordSlaveLost(d: SchedulerDriver, slaveId: SlaveID, reason: ExecutorLossReason) {
inClassLoader() {
logInfo("Mesos slave lost: " + slaveId.getValue)
removeExecutor(slaveId.getValue, reason.toString)
scheduler.executorLost(slaveId.getValue, reason)
}
}
override def slaveLost(d: SchedulerDriver, slaveId: SlaveID) {
recordSlaveLost(d, slaveId, SlaveLost())
}
override def executorLost(d: SchedulerDriver, executorId: ExecutorID,
slaveId: SlaveID, status: Int) {
logInfo("Executor lost: %s, marking slave %s as lost".format(executorId.getValue,
slaveId.getValue))
recordSlaveLost(d, slaveId, ExecutorExited(status, exitCausedByApp = true))
}
override def killTask(taskId: Long, executorId: String, interruptThread: Boolean): Unit = {
mesosDriver.killTask(
TaskID.newBuilder()
.setValue(taskId.toString).build()
)
}
// TODO: query Mesos for number of cores
override def defaultParallelism(): Int = sc.conf.getInt("spark.default.parallelism", 8)
override def applicationId(): String =
Option(appId).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
}
|
ekasitk/spark
|
core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{BufferedInputStream, FileNotFoundException, InputStream, IOException, OutputStream}
import java.util.UUID
import java.util.concurrent.{Executors, ExecutorService, TimeUnit}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.mutable
import com.google.common.io.ByteStreams
import com.google.common.util.concurrent.{MoreExecutors, ThreadFactoryBuilder}
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.apache.hadoop.hdfs.protocol.HdfsConstants
import org.apache.hadoop.security.AccessControlException
import org.apache.spark.{Logging, SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.io.CompressionCodec
import org.apache.spark.scheduler._
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils}
/**
* A class that provides application history from event logs stored in the file system.
* This provider checks for new finished applications in the background periodically and
* renders the history application UI by parsing the associated event logs.
*/
private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
extends ApplicationHistoryProvider with Logging {
def this(conf: SparkConf) = {
this(conf, new SystemClock())
}
import FsHistoryProvider._
private val NOT_STARTED = "<Not Started>"
// Interval between safemode checks.
private val SAFEMODE_CHECK_INTERVAL_S = conf.getTimeAsSeconds(
"spark.history.fs.safemodeCheck.interval", "5s")
// Interval between each check for event log updates
private val UPDATE_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.update.interval", "10s")
// Interval between each cleaner checks for event logs to delete
private val CLEAN_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.cleaner.interval", "1d")
private val logDir = conf.getOption("spark.history.fs.logDirectory")
.map { d => Utils.resolveURI(d).toString }
.getOrElse(DEFAULT_LOG_DIR)
private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
private val fs = Utils.getHadoopFileSystem(logDir, hadoopConf)
// Used by check event thread and clean log thread.
// Scheduled thread pool size must be one, otherwise it will have concurrent issues about fs
// and applications between check task and clean task.
private val pool = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
.setNameFormat("spark-history-task-%d").setDaemon(true).build())
// The modification time of the newest log detected during the last scan. This is used
// to ignore logs that are older during subsequent scans, to avoid processing data that
// is already known.
private var lastScanTime = -1L
// Mapping of application IDs to their metadata, in descending end time order. Apps are inserted
// into the map in order, so the LinkedHashMap maintains the correct ordering.
@volatile private var applications: mutable.LinkedHashMap[String, FsApplicationHistoryInfo]
= new mutable.LinkedHashMap()
// List of application logs to be deleted by event log cleaner.
private var attemptsToClean = new mutable.ListBuffer[FsApplicationAttemptInfo]
/**
* Return a runnable that performs the given operation on the event logs.
* This operation is expected to be executed periodically.
*/
private def getRunner(operateFun: () => Unit): Runnable = {
new Runnable() {
override def run(): Unit = Utils.tryOrExit {
operateFun()
}
}
}
/**
* An Executor to fetch and parse log files.
*/
private val replayExecutor: ExecutorService = {
if (!conf.contains("spark.testing")) {
ThreadUtils.newDaemonSingleThreadExecutor("log-replay-executor")
} else {
MoreExecutors.sameThreadExecutor()
}
}
// Conf option used for testing the initialization code.
val initThread = initialize()
private[history] def initialize(): Thread = {
if (!isFsInSafeMode()) {
startPolling()
null
} else {
startSafeModeCheckThread(None)
}
}
private[history] def startSafeModeCheckThread(
errorHandler: Option[Thread.UncaughtExceptionHandler]): Thread = {
// Cannot probe anything while the FS is in safe mode, so spawn a new thread that will wait
// for the FS to leave safe mode before enabling polling. This allows the main history server
// UI to be shown (so that the user can see the HDFS status).
val initThread = new Thread(new Runnable() {
override def run(): Unit = {
try {
while (isFsInSafeMode()) {
logInfo("HDFS is still in safe mode. Waiting...")
val deadline = clock.getTimeMillis() +
TimeUnit.SECONDS.toMillis(SAFEMODE_CHECK_INTERVAL_S)
clock.waitTillTime(deadline)
}
startPolling()
} catch {
case _: InterruptedException =>
}
}
})
initThread.setDaemon(true)
initThread.setName(s"${getClass().getSimpleName()}-init")
initThread.setUncaughtExceptionHandler(errorHandler.getOrElse(
new Thread.UncaughtExceptionHandler() {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
logError("Error initializing FsHistoryProvider.", e)
System.exit(1)
}
}))
initThread.start()
initThread
}
private def startPolling(): Unit = {
// Validate the log directory.
val path = new Path(logDir)
if (!fs.exists(path)) {
var msg = s"Log directory specified does not exist: $logDir."
if (logDir == DEFAULT_LOG_DIR) {
msg += " Did you configure the correct one through spark.history.fs.logDirectory?"
}
throw new IllegalArgumentException(msg)
}
if (!fs.getFileStatus(path).isDirectory) {
throw new IllegalArgumentException(
"Logging directory specified is not a directory: %s".format(logDir))
}
// Disable the background thread during tests.
if (!conf.contains("spark.testing")) {
// A task that periodically checks for event log updates on disk.
pool.scheduleWithFixedDelay(getRunner(checkForLogs), 0, UPDATE_INTERVAL_S, TimeUnit.SECONDS)
if (conf.getBoolean("spark.history.fs.cleaner.enabled", false)) {
// A task that periodically cleans event logs on disk.
pool.scheduleWithFixedDelay(getRunner(cleanLogs), 0, CLEAN_INTERVAL_S, TimeUnit.SECONDS)
}
}
}
override def getListing(): Iterable[FsApplicationHistoryInfo] = applications.values
override def getAppUI(appId: String, attemptId: Option[String]): Option[SparkUI] = {
try {
applications.get(appId).flatMap { appInfo =>
appInfo.attempts.find(_.attemptId == attemptId).flatMap { attempt =>
val replayBus = new ReplayListenerBus()
val ui = {
val conf = this.conf.clone()
val appSecManager = new SecurityManager(conf)
SparkUI.createHistoryUI(conf, replayBus, appSecManager, appInfo.name,
HistoryServer.getAttemptURI(appId, attempt.attemptId), attempt.startTime)
// Do not call ui.bind() to avoid creating a new server for each application
}
val appListener = new ApplicationEventListener()
replayBus.addListener(appListener)
val appAttemptInfo = replay(fs.getFileStatus(new Path(logDir, attempt.logPath)),
replayBus)
appAttemptInfo.map { info =>
val uiAclsEnabled = conf.getBoolean("spark.history.ui.acls.enable", false)
ui.getSecurityManager.setAcls(uiAclsEnabled)
// make sure to set admin acls before view acls so they are properly picked up
ui.getSecurityManager.setAdminAcls(appListener.adminAcls.getOrElse(""))
ui.getSecurityManager.setViewAcls(attempt.sparkUser,
appListener.viewAcls.getOrElse(""))
ui
}
}
}
} catch {
case e: FileNotFoundException => None
}
}
override def getConfig(): Map[String, String] = {
val safeMode = if (isFsInSafeMode()) {
Map("HDFS State" -> "In safe mode, application logs not available.")
} else {
Map()
}
Map("Event log directory" -> logDir.toString) ++ safeMode
}
override def stop(): Unit = {
if (initThread != null && initThread.isAlive()) {
initThread.interrupt()
initThread.join()
}
}
/**
* Builds the application list based on the current contents of the log directory.
* Tries to reuse as much of the data already in memory as possible, by not reading
* applications that haven't been updated since last time the logs were checked.
*/
private[history] def checkForLogs(): Unit = {
try {
val newLastScanTime = getNewLastScanTime()
val statusList = Option(fs.listStatus(new Path(logDir))).map(_.toSeq)
.getOrElse(Seq[FileStatus]())
val logInfos: Seq[FileStatus] = statusList
.filter { entry =>
try {
!entry.isDirectory() && (entry.getModificationTime() >= lastScanTime)
} catch {
case e: AccessControlException =>
// Do not use "logInfo" since these messages can get pretty noisy if printed on
// every poll.
logDebug(s"No permission to read $entry, ignoring.")
false
}
}
.flatMap { entry => Some(entry) }
.sortWith { case (entry1, entry2) =>
entry1.getModificationTime() >= entry2.getModificationTime()
}
logInfos.grouped(20)
.map { batch =>
replayExecutor.submit(new Runnable {
override def run(): Unit = mergeApplicationListing(batch)
})
}
.foreach { task =>
try {
// Wait for all tasks to finish. This makes sure that checkForLogs
// is not scheduled again while some tasks are already running in
// the replayExecutor.
task.get()
} catch {
case e: InterruptedException =>
throw e
case e: Exception =>
logError("Exception while merging application listings", e)
}
}
lastScanTime = newLastScanTime
} catch {
case e: Exception => logError("Exception in checking for event log updates", e)
}
}
private def getNewLastScanTime(): Long = {
val fileName = "." + UUID.randomUUID().toString
val path = new Path(logDir, fileName)
val fos = fs.create(path)
try {
fos.close()
fs.getFileStatus(path).getModificationTime
} catch {
case e: Exception =>
logError("Exception encountered when attempting to update last scan time", e)
lastScanTime
} finally {
if (!fs.delete(path, true)) {
logWarning(s"Error deleting ${path}")
}
}
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
/**
* This method compresses the files passed in, and writes the compressed data out into the
* [[OutputStream]] passed in. Each file is written as a new [[ZipEntry]] with its name being
* the name of the file being compressed.
*/
def zipFileToStream(file: Path, entryName: String, outputStream: ZipOutputStream): Unit = {
val fs = FileSystem.get(hadoopConf)
val inputStream = fs.open(file, 1 * 1024 * 1024) // 1MB Buffer
try {
outputStream.putNextEntry(new ZipEntry(entryName))
ByteStreams.copy(inputStream, outputStream)
outputStream.closeEntry()
} finally {
inputStream.close()
}
}
applications.get(appId) match {
case Some(appInfo) =>
try {
// If no attempt is specified, or there is no attemptId for attempts, return all attempts
appInfo.attempts.filter { attempt =>
attempt.attemptId.isEmpty || attemptId.isEmpty || attempt.attemptId.get == attemptId.get
}.foreach { attempt =>
val logPath = new Path(logDir, attempt.logPath)
zipFileToStream(new Path(logDir, attempt.logPath), attempt.logPath, zipStream)
}
} finally {
zipStream.close()
}
case None => throw new SparkException(s"Logs for $appId not found.")
}
}
/**
* Replay the log files in the list and merge the list of old applications with new ones
*/
private def mergeApplicationListing(logs: Seq[FileStatus]): Unit = {
val newAttempts = logs.flatMap { fileStatus =>
try {
val bus = new ReplayListenerBus()
val res = replay(fileStatus, bus)
res match {
case Some(r) => logDebug(s"Application log ${r.logPath} loaded successfully.")
case None => logWarning(s"Failed to load application log ${fileStatus.getPath}. " +
"The application may have not started.")
}
res
} catch {
case e: Exception =>
logError(
s"Exception encountered when attempting to load application log ${fileStatus.getPath}",
e)
None
}
}
if (newAttempts.isEmpty) {
return
}
// Build a map containing all apps that contain new attempts. The app information in this map
// contains both the new app attempt, and those that were already loaded in the existing apps
// map. If an attempt has been updated, it replaces the old attempt in the list.
val newAppMap = new mutable.HashMap[String, FsApplicationHistoryInfo]()
newAttempts.foreach { attempt =>
val appInfo = newAppMap.get(attempt.appId)
.orElse(applications.get(attempt.appId))
.map { app =>
val attempts =
app.attempts.filter(_.attemptId != attempt.attemptId).toList ++ List(attempt)
new FsApplicationHistoryInfo(attempt.appId, attempt.name,
attempts.sortWith(compareAttemptInfo))
}
.getOrElse(new FsApplicationHistoryInfo(attempt.appId, attempt.name, List(attempt)))
newAppMap(attempt.appId) = appInfo
}
// Merge the new app list with the existing one, maintaining the expected ordering (descending
// end time). Maintaining the order is important to avoid having to sort the list every time
// there is a request for the log list.
val newApps = newAppMap.values.toSeq.sortWith(compareAppInfo)
val mergedApps = new mutable.LinkedHashMap[String, FsApplicationHistoryInfo]()
def addIfAbsent(info: FsApplicationHistoryInfo): Unit = {
if (!mergedApps.contains(info.id)) {
mergedApps += (info.id -> info)
}
}
val newIterator = newApps.iterator.buffered
val oldIterator = applications.values.iterator.buffered
while (newIterator.hasNext && oldIterator.hasNext) {
if (newAppMap.contains(oldIterator.head.id)) {
oldIterator.next()
} else if (compareAppInfo(newIterator.head, oldIterator.head)) {
addIfAbsent(newIterator.next())
} else {
addIfAbsent(oldIterator.next())
}
}
newIterator.foreach(addIfAbsent)
oldIterator.foreach(addIfAbsent)
applications = mergedApps
}
/**
* Delete event logs from the log directory according to the clean policy defined by the user.
*/
private[history] def cleanLogs(): Unit = {
try {
val maxAge = conf.getTimeAsSeconds("spark.history.fs.cleaner.maxAge", "7d") * 1000
val now = clock.getTimeMillis()
val appsToRetain = new mutable.LinkedHashMap[String, FsApplicationHistoryInfo]()
def shouldClean(attempt: FsApplicationAttemptInfo): Boolean = {
now - attempt.lastUpdated > maxAge && attempt.completed
}
// Scan all logs from the log directory.
// Only completed applications older than the specified max age will be deleted.
applications.values.foreach { app =>
val (toClean, toRetain) = app.attempts.partition(shouldClean)
attemptsToClean ++= toClean
if (toClean.isEmpty) {
appsToRetain += (app.id -> app)
} else if (toRetain.nonEmpty) {
appsToRetain += (app.id ->
new FsApplicationHistoryInfo(app.id, app.name, toRetain.toList))
}
}
applications = appsToRetain
val leftToClean = new mutable.ListBuffer[FsApplicationAttemptInfo]
attemptsToClean.foreach { attempt =>
try {
val path = new Path(logDir, attempt.logPath)
if (fs.exists(path)) {
if (!fs.delete(path, true)) {
logWarning(s"Error deleting ${path}")
}
}
} catch {
case e: AccessControlException =>
logInfo(s"No permission to delete ${attempt.logPath}, ignoring.")
case t: IOException =>
logError(s"IOException in cleaning ${attempt.logPath}", t)
leftToClean += attempt
}
}
attemptsToClean = leftToClean
} catch {
case t: Exception => logError("Exception in cleaning logs", t)
}
}
/**
* Comparison function that defines the sort order for the application listing.
*
* @return Whether `i1` should precede `i2`.
*/
private def compareAppInfo(
i1: FsApplicationHistoryInfo,
i2: FsApplicationHistoryInfo): Boolean = {
val a1 = i1.attempts.head
val a2 = i2.attempts.head
if (a1.endTime != a2.endTime) a1.endTime >= a2.endTime else a1.startTime >= a2.startTime
}
/**
* Comparison function that defines the sort order for application attempts within the same
* application. Order is: attempts are sorted by descending start time.
* Most recent attempt state matches with current state of the app.
*
* Normally applications should have a single running attempt; but failure to call sc.stop()
* may cause multiple running attempts to show up.
*
* @return Whether `a1` should precede `a2`.
*/
private def compareAttemptInfo(
a1: FsApplicationAttemptInfo,
a2: FsApplicationAttemptInfo): Boolean = {
a1.startTime >= a2.startTime
}
/**
* Replays the events in the specified log file and returns information about the associated
* application. Return `None` if the application ID cannot be located.
*/
private def replay(
eventLog: FileStatus,
bus: ReplayListenerBus): Option[FsApplicationAttemptInfo] = {
val logPath = eventLog.getPath()
logInfo(s"Replaying log path: $logPath")
val logInput = EventLoggingListener.openEventLog(logPath, fs)
try {
val appListener = new ApplicationEventListener
val appCompleted = isApplicationCompleted(eventLog)
bus.addListener(appListener)
bus.replay(logInput, logPath.toString, !appCompleted)
// Without an app ID, new logs will render incorrectly in the listing page, so do not list or
// try to show their UI.
if (appListener.appId.isDefined) {
Some(new FsApplicationAttemptInfo(
logPath.getName(),
appListener.appName.getOrElse(NOT_STARTED),
appListener.appId.getOrElse(logPath.getName()),
appListener.appAttemptId,
appListener.startTime.getOrElse(-1L),
appListener.endTime.getOrElse(-1L),
eventLog.getModificationTime(),
appListener.sparkUser.getOrElse(NOT_STARTED),
appCompleted))
} else {
None
}
} finally {
logInput.close()
}
}
/**
* Return true when the application has completed.
*/
private def isApplicationCompleted(entry: FileStatus): Boolean = {
!entry.getPath().getName().endsWith(EventLoggingListener.IN_PROGRESS)
}
/**
* Checks whether HDFS is in safe mode.
*
* Note that DistributedFileSystem is a `@LimitedPrivate` class, which for all practical reasons
* makes it more public than not.
*/
private[history] def isFsInSafeMode(): Boolean = fs match {
case dfs: DistributedFileSystem =>
isFsInSafeMode(dfs)
case _ =>
false
}
// For testing.
private[history] def isFsInSafeMode(dfs: DistributedFileSystem): Boolean = {
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET)
}
}
private[history] object FsHistoryProvider {
val DEFAULT_LOG_DIR = "file:/tmp/spark-events"
}
private class FsApplicationAttemptInfo(
val logPath: String,
val name: String,
val appId: String,
attemptId: Option[String],
startTime: Long,
endTime: Long,
lastUpdated: Long,
sparkUser: String,
completed: Boolean = true)
extends ApplicationAttemptInfo(
attemptId, startTime, endTime, lastUpdated, sparkUser, completed)
private class FsApplicationHistoryInfo(
id: String,
override val name: String,
override val attempts: List[FsApplicationAttemptInfo])
extends ApplicationHistoryInfo(id, name, attempts)
|
ekasitk/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
|
<reponame>ekasitk/spark
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.metric
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import scala.collection.mutable
import org.apache.xbean.asm5._
import org.apache.xbean.asm5.Opcodes._
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql._
import org.apache.spark.sql.execution.SparkPlanInfo
import org.apache.spark.sql.execution.ui.SparkPlanGraph
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.{JsonProtocol, Utils}
class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
import testImplicits._
test("LongSQLMetric should not box Long") {
val l = SQLMetrics.createLongMetric(sparkContext, "long")
val f = () => {
l += 1L
l.add(1L)
}
val cl = BoxingFinder.getClassReader(f.getClass)
val boxingFinder = new BoxingFinder()
cl.accept(boxingFinder, 0)
assert(boxingFinder.boxingInvokes.isEmpty, s"Found boxing: ${boxingFinder.boxingInvokes}")
}
test("Normal accumulator should do boxing") {
// We need this test to make sure BoxingFinder works.
val l = sparkContext.accumulator(0L)
val f = () => { l += 1L }
val cl = BoxingFinder.getClassReader(f.getClass)
val boxingFinder = new BoxingFinder()
cl.accept(boxingFinder, 0)
assert(boxingFinder.boxingInvokes.nonEmpty, "Found find boxing in this test")
}
/**
* Call `df.collect()` and verify if the collected metrics are same as "expectedMetrics".
*
* @param df `DataFrame` to run
* @param expectedNumOfJobs number of jobs that will run
* @param expectedMetrics the expected metrics. The format is
* `nodeId -> (operatorName, metric name -> metric value)`.
*/
private def testSparkPlanMetrics(
df: DataFrame,
expectedNumOfJobs: Int,
expectedMetrics: Map[Long, (String, Map[String, Any])]): Unit = {
val previousExecutionIds = sqlContext.listener.executionIdToData.keySet
withSQLConf("spark.sql.codegen.wholeStage" -> "false") {
df.collect()
}
sparkContext.listenerBus.waitUntilEmpty(10000)
val executionIds = sqlContext.listener.executionIdToData.keySet.diff(previousExecutionIds)
assert(executionIds.size === 1)
val executionId = executionIds.head
val jobs = sqlContext.listener.getExecution(executionId).get.jobs
// Use "<=" because there is a race condition that we may miss some jobs
// TODO Change it to "=" once we fix the race condition that missing the JobStarted event.
assert(jobs.size <= expectedNumOfJobs)
if (jobs.size == expectedNumOfJobs) {
// If we can track all jobs, check the metric values
val metricValues = sqlContext.listener.getExecutionMetrics(executionId)
val actualMetrics = SparkPlanGraph(SparkPlanInfo.fromSparkPlan(
df.queryExecution.executedPlan)).allNodes.filter { node =>
expectedMetrics.contains(node.id)
}.map { node =>
val nodeMetrics = node.metrics.map { metric =>
val metricValue = metricValues(metric.accumulatorId)
(metric.name, metricValue)
}.toMap
(node.id, node.name -> nodeMetrics)
}.toMap
assert(expectedMetrics.keySet === actualMetrics.keySet)
for (nodeId <- expectedMetrics.keySet) {
val (expectedNodeName, expectedMetricsMap) = expectedMetrics(nodeId)
val (actualNodeName, actualMetricsMap) = actualMetrics(nodeId)
assert(expectedNodeName === actualNodeName)
for (metricName <- expectedMetricsMap.keySet) {
assert(expectedMetricsMap(metricName).toString === actualMetricsMap(metricName))
}
}
} else {
// TODO Remove this "else" once we fix the race condition that missing the JobStarted event.
// Since we cannot track all jobs, the metric values could be wrong and we should not check
// them.
logWarning("Due to a race condition, we miss some jobs and cannot verify the metric values")
}
}
test("Project metrics") {
// Assume the execution plan is
// PhysicalRDD(nodeId = 1) -> Project(nodeId = 0)
val df = person.select('name)
testSparkPlanMetrics(df, 1, Map(
0L -> ("Project", Map(
"number of rows" -> 2L)))
)
}
test("Filter metrics") {
// Assume the execution plan is
// PhysicalRDD(nodeId = 1) -> Filter(nodeId = 0)
val df = person.filter('age < 25)
testSparkPlanMetrics(df, 1, Map(
0L -> ("Filter", Map(
"number of input rows" -> 2L,
"number of output rows" -> 1L)))
)
}
test("WholeStageCodegen metrics") {
// Assume the execution plan is
// WholeStageCodegen(nodeId = 0, Range(nodeId = 2) -> Filter(nodeId = 1))
// TODO: update metrics in generated operators
val df = sqlContext.range(10).filter('id < 5)
testSparkPlanMetrics(df, 1, Map.empty)
}
test("TungstenAggregate metrics") {
// Assume the execution plan is
// ... -> TungstenAggregate(nodeId = 2) -> Exchange(nodeId = 1)
// -> TungstenAggregate(nodeId = 0)
val df = testData2.groupBy().count() // 2 partitions
testSparkPlanMetrics(df, 1, Map(
2L -> ("TungstenAggregate", Map(
"number of input rows" -> 6L,
"number of output rows" -> 2L)),
0L -> ("TungstenAggregate", Map(
"number of input rows" -> 2L,
"number of output rows" -> 1L)))
)
// 2 partitions and each partition contains 2 keys
val df2 = testData2.groupBy('a).count()
testSparkPlanMetrics(df2, 1, Map(
2L -> ("TungstenAggregate", Map(
"number of input rows" -> 6L,
"number of output rows" -> 4L)),
0L -> ("TungstenAggregate", Map(
"number of input rows" -> 4L,
"number of output rows" -> 3L)))
)
}
test("SortMergeJoin metrics") {
// Because SortMergeJoin may skip different rows if the number of partitions is different, this
// test should use the deterministic number of partitions.
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.registerTempTable("testDataForJoin")
withTempTable("testDataForJoin") {
// Assume the execution plan is
// ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = sqlContext.sql(
"SELECT * FROM testData2 JOIN testDataForJoin ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df, 1, Map(
1L -> ("SortMergeJoin", Map(
// It's 4 because we only read 3 rows in the first partition and 1 row in the second one
"number of left rows" -> 4L,
"number of right rows" -> 2L,
"number of output rows" -> 4L)))
)
}
}
test("SortMergeOuterJoin metrics") {
// Because SortMergeOuterJoin may skip different rows if the number of partitions is different,
// this test should use the deterministic number of partitions.
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.registerTempTable("testDataForJoin")
withTempTable("testDataForJoin") {
// Assume the execution plan is
// ... -> SortMergeOuterJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = sqlContext.sql(
"SELECT * FROM testData2 left JOIN testDataForJoin ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df, 1, Map(
1L -> ("SortMergeOuterJoin", Map(
// It's 4 because we only read 3 rows in the first partition and 1 row in the second one
"number of left rows" -> 6L,
"number of right rows" -> 2L,
"number of output rows" -> 8L)))
)
val df2 = sqlContext.sql(
"SELECT * FROM testDataForJoin right JOIN testData2 ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df2, 1, Map(
1L -> ("SortMergeOuterJoin", Map(
// It's 4 because we only read 3 rows in the first partition and 1 row in the second one
"number of left rows" -> 2L,
"number of right rows" -> 6L,
"number of output rows" -> 8L)))
)
}
}
test("BroadcastHashJoin metrics") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = Seq((1, "1"), (2, "2"), (3, "3"), (4, "4")).toDF("key", "value")
// Assume the execution plan is
// ... -> BroadcastHashJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = df1.join(broadcast(df2), "key")
testSparkPlanMetrics(df, 2, Map(
1L -> ("BroadcastHashJoin", Map(
"number of left rows" -> 2L,
"number of right rows" -> 4L,
"number of output rows" -> 2L)))
)
}
test("BroadcastHashOuterJoin metrics") {
val df1 = Seq((1, "a"), (1, "b"), (4, "c")).toDF("key", "value")
val df2 = Seq((1, "a"), (1, "b"), (2, "c"), (3, "d")).toDF("key2", "value")
// Assume the execution plan is
// ... -> BroadcastHashOuterJoin(nodeId = 0)
val df = df1.join(broadcast(df2), $"key" === $"key2", "left_outer")
testSparkPlanMetrics(df, 2, Map(
0L -> ("BroadcastHashOuterJoin", Map(
"number of left rows" -> 3L,
"number of right rows" -> 4L,
"number of output rows" -> 5L)))
)
val df3 = df1.join(broadcast(df2), $"key" === $"key2", "right_outer")
testSparkPlanMetrics(df3, 2, Map(
0L -> ("BroadcastHashOuterJoin", Map(
"number of left rows" -> 3L,
"number of right rows" -> 4L,
"number of output rows" -> 6L)))
)
}
test("BroadcastNestedLoopJoin metrics") {
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.registerTempTable("testDataForJoin")
withTempTable("testDataForJoin") {
// Assume the execution plan is
// ... -> BroadcastNestedLoopJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = sqlContext.sql(
"SELECT * FROM testData2 left JOIN testDataForJoin ON " +
"testData2.a * testDataForJoin.a != testData2.a + testDataForJoin.a")
testSparkPlanMetrics(df, 3, Map(
1L -> ("BroadcastNestedLoopJoin", Map(
"number of left rows" -> 12L, // left needs to be scanned twice
"number of right rows" -> 2L,
"number of output rows" -> 12L)))
)
}
}
test("BroadcastLeftSemiJoinHash metrics") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = Seq((1, "1"), (2, "2"), (3, "3"), (4, "4")).toDF("key2", "value")
// Assume the execution plan is
// ... -> BroadcastLeftSemiJoinHash(nodeId = 0)
val df = df1.join(broadcast(df2), $"key" === $"key2", "leftsemi")
testSparkPlanMetrics(df, 2, Map(
0L -> ("BroadcastLeftSemiJoinHash", Map(
"number of left rows" -> 2L,
"number of right rows" -> 4L,
"number of output rows" -> 2L)))
)
}
test("LeftSemiJoinHash metrics") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = Seq((1, "1"), (2, "2"), (3, "3"), (4, "4")).toDF("key2", "value")
// Assume the execution plan is
// ... -> LeftSemiJoinHash(nodeId = 0)
val df = df1.join(df2, $"key" === $"key2", "leftsemi")
testSparkPlanMetrics(df, 1, Map(
0L -> ("LeftSemiJoinHash", Map(
"number of left rows" -> 2L,
"number of right rows" -> 4L,
"number of output rows" -> 2L)))
)
}
}
test("LeftSemiJoinBNL metrics") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = Seq((1, "1"), (2, "2"), (3, "3"), (4, "4")).toDF("key2", "value")
// Assume the execution plan is
// ... -> LeftSemiJoinBNL(nodeId = 0)
val df = df1.join(df2, $"key" < $"key2", "leftsemi")
testSparkPlanMetrics(df, 2, Map(
0L -> ("LeftSemiJoinBNL", Map(
"number of left rows" -> 2L,
"number of right rows" -> 4L,
"number of output rows" -> 2L)))
)
}
test("CartesianProduct metrics") {
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.registerTempTable("testDataForJoin")
withTempTable("testDataForJoin") {
// Assume the execution plan is
// ... -> CartesianProduct(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = sqlContext.sql(
"SELECT * FROM testData2 JOIN testDataForJoin")
testSparkPlanMetrics(df, 1, Map(
1L -> ("CartesianProduct", Map(
"number of left rows" -> 12L, // left needs to be scanned twice
"number of right rows" -> 4L, // right is read twice
"number of output rows" -> 12L)))
)
}
}
test("save metrics") {
withTempPath { file =>
withSQLConf("spark.sql.codegen.wholeStage" -> "false") {
val previousExecutionIds = sqlContext.listener.executionIdToData.keySet
// Assume the execution plan is
// PhysicalRDD(nodeId = 0)
person.select('name).write.format("json").save(file.getAbsolutePath)
sparkContext.listenerBus.waitUntilEmpty(10000)
val executionIds = sqlContext.listener.executionIdToData.keySet.diff(previousExecutionIds)
assert(executionIds.size === 1)
val executionId = executionIds.head
val jobs = sqlContext.listener.getExecution(executionId).get.jobs
// Use "<=" because there is a race condition that we may miss some jobs
// TODO Change "<=" to "=" once we fix the race condition that missing the JobStarted event.
assert(jobs.size <= 1)
val metricValues = sqlContext.listener.getExecutionMetrics(executionId)
// Because "save" will create a new DataFrame internally, we cannot get the real metric id.
// However, we still can check the value.
assert(metricValues.values.toSeq === Seq("2"))
}
}
}
test("metrics can be loaded by history server") {
val metric = new LongSQLMetric("zanzibar", LongSQLMetricParam)
metric += 10L
val metricInfo = metric.toInfo(Some(metric.localValue), None)
metricInfo.update match {
case Some(v: LongSQLMetricValue) => assert(v.value === 10L)
case Some(v) => fail(s"metric value was not a LongSQLMetricValue: ${v.getClass.getName}")
case _ => fail("metric update is missing")
}
assert(metricInfo.metadata === Some(SQLMetrics.ACCUM_IDENTIFIER))
// After serializing to JSON, the original value type is lost, but we can still
// identify that it's a SQL metric from the metadata
val metricInfoJson = JsonProtocol.accumulableInfoToJson(metricInfo)
val metricInfoDeser = JsonProtocol.accumulableInfoFromJson(metricInfoJson)
metricInfoDeser.update match {
case Some(v: String) => assert(v.toLong === 10L)
case Some(v) => fail(s"deserialized metric value was not a string: ${v.getClass.getName}")
case _ => fail("deserialized metric update is missing")
}
assert(metricInfoDeser.metadata === Some(SQLMetrics.ACCUM_IDENTIFIER))
}
}
private case class MethodIdentifier[T](cls: Class[T], name: String, desc: String)
/**
* If `method` is null, search all methods of this class recursively to find if they do some boxing.
* If `method` is specified, only search this method of the class to speed up the searching.
*
* This method will skip the methods in `visitedMethods` to avoid potential infinite cycles.
*/
private class BoxingFinder(
method: MethodIdentifier[_] = null,
val boxingInvokes: mutable.Set[String] = mutable.Set.empty,
visitedMethods: mutable.Set[MethodIdentifier[_]] = mutable.Set.empty)
extends ClassVisitor(ASM5) {
private val primitiveBoxingClassName =
Set("java/lang/Long",
"java/lang/Double",
"java/lang/Integer",
"java/lang/Float",
"java/lang/Short",
"java/lang/Character",
"java/lang/Byte",
"java/lang/Boolean")
override def visitMethod(
access: Int, name: String, desc: String, sig: String, exceptions: Array[String]):
MethodVisitor = {
if (method != null && (method.name != name || method.desc != desc)) {
// If method is specified, skip other methods.
return new MethodVisitor(ASM5) {}
}
new MethodVisitor(ASM5) {
override def visitMethodInsn(
op: Int, owner: String, name: String, desc: String, itf: Boolean) {
if (op == INVOKESPECIAL && name == "<init>" || op == INVOKESTATIC && name == "valueOf") {
if (primitiveBoxingClassName.contains(owner)) {
// Find boxing methods, e.g, new java.lang.Long(l) or java.lang.Long.valueOf(l)
boxingInvokes.add(s"$owner.$name")
}
} else {
// scalastyle:off classforname
val classOfMethodOwner = Class.forName(owner.replace('/', '.'), false,
Thread.currentThread.getContextClassLoader)
// scalastyle:on classforname
val m = MethodIdentifier(classOfMethodOwner, name, desc)
if (!visitedMethods.contains(m)) {
// Keep track of visited methods to avoid potential infinite cycles
visitedMethods += m
val cl = BoxingFinder.getClassReader(classOfMethodOwner)
visitedMethods += m
cl.accept(new BoxingFinder(m, boxingInvokes, visitedMethods), 0)
}
}
}
}
}
}
private object BoxingFinder {
def getClassReader(cls: Class[_]): ClassReader = {
val className = cls.getName.replaceFirst("^.*\\.", "") + ".class"
val resourceStream = cls.getResourceAsStream(className)
val baos = new ByteArrayOutputStream(128)
// Copy data over, before delegating to ClassReader -
// else we can run out of open file handles.
Utils.copyStream(resourceStream, baos, true)
new ClassReader(new ByteArrayInputStream(baos.toByteArray))
}
}
|
ekasitk/spark
|
sql/core/src/test/scala/org/apache/spark/sql/StreamTest.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.lang.Thread.UncaughtExceptionHandler
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar._
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.streaming._
/**
* A framework for implementing tests for streaming queries and sources.
*
* A test consists of a set of steps (expressed as a `StreamAction`) that are executed in order,
* blocking as necessary to let the stream catch up. For example, the following adds some data to
* a stream, blocking until it can verify that the correct values are eventually produced.
*
* {{{
* val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map(_ + 1)
testStream(mapped)(
AddData(inputData, 1, 2, 3),
CheckAnswer(2, 3, 4))
* }}}
*
* Note that while we do sleep to allow the other thread to progress without spinning,
* `StreamAction` checks should not depend on the amount of time spent sleeping. Instead they
* should check the actual progress of the stream before verifying the required test condition.
*
* Currently it is assumed that all streaming queries will eventually complete in 10 seconds to
* avoid hanging forever in the case of failures. However, individual suites can change this
* by overriding `streamingTimeout`.
*/
trait StreamTest extends QueryTest with Timeouts {
implicit class RichSource(s: Source) {
def toDF(): DataFrame = new DataFrame(sqlContext, StreamingRelation(s))
}
/** How long to wait for an active stream to catch up when checking a result. */
val streamingTimout = 10.seconds
/** A trait for actions that can be performed while testing a streaming DataFrame. */
trait StreamAction
/** A trait to mark actions that require the stream to be actively running. */
trait StreamMustBeRunning
/**
* Adds the given data to the stream. Subsuquent check answers will block until this data has
* been processed.
*/
object AddData {
def apply[A](source: MemoryStream[A], data: A*): AddDataMemory[A] =
AddDataMemory(source, data)
}
/** A trait that can be extended when testing other sources. */
trait AddData extends StreamAction {
def source: Source
/**
* Called to trigger adding the data. Should return the offset that will denote when this
* new data has been processed.
*/
def addData(): Offset
}
case class AddDataMemory[A](source: MemoryStream[A], data: Seq[A]) extends AddData {
override def toString: String = s"AddData to $source: ${data.mkString(",")}"
override def addData(): Offset = {
source.addData(data)
}
}
/**
* Checks to make sure that the current data stored in the sink matches the `expectedAnswer`.
* This operation automatically blocks untill all added data has been processed.
*/
object CheckAnswer {
def apply[A : Encoder](data: A*): CheckAnswerRows = {
val encoder = encoderFor[A]
val toExternalRow = RowEncoder(encoder.schema)
CheckAnswerRows(data.map(d => toExternalRow.fromRow(encoder.toRow(d))))
}
def apply(rows: Row*): CheckAnswerRows = CheckAnswerRows(rows)
}
case class CheckAnswerRows(expectedAnswer: Seq[Row])
extends StreamAction with StreamMustBeRunning {
override def toString: String = s"CheckAnswer: ${expectedAnswer.mkString(",")}"
}
case class DropBatches(num: Int) extends StreamAction
/** Stops the stream. It must currently be running. */
case object StopStream extends StreamAction with StreamMustBeRunning
/** Starts the stream, resuming if data has already been processed. It must not be running. */
case object StartStream extends StreamAction
/** Signals that a failure is expected and should not kill the test. */
case object ExpectFailure extends StreamAction
/** A helper for running actions on a Streaming Dataset. See `checkAnswer(DataFrame)`. */
def testStream(stream: Dataset[_])(actions: StreamAction*): Unit =
testStream(stream.toDF())(actions: _*)
/**
* Executes the specified actions on the the given streaming DataFrame and provides helpful
* error messages in the case of failures or incorrect answers.
*
* Note that if the stream is not explictly started before an action that requires it to be
* running then it will be automatically started before performing any other actions.
*/
def testStream(stream: DataFrame)(actions: StreamAction*): Unit = {
var pos = 0
var currentPlan: LogicalPlan = stream.logicalPlan
var currentStream: StreamExecution = null
val awaiting = new mutable.HashMap[Source, Offset]()
val sink = new MemorySink(stream.schema)
@volatile
var streamDeathCause: Throwable = null
// If the test doesn't manually start the stream, we do it automatically at the beginning.
val startedManually =
actions.takeWhile(!_.isInstanceOf[StreamMustBeRunning]).contains(StartStream)
val startedTest = if (startedManually) actions else StartStream +: actions
def testActions = actions.zipWithIndex.map {
case (a, i) =>
if ((pos == i && startedManually) || (pos == (i + 1) && !startedManually)) {
"=> " + a.toString
} else {
" " + a.toString
}
}.mkString("\n")
def currentOffsets =
if (currentStream != null) currentStream.streamProgress.toString else "not started"
def threadState =
if (currentStream != null && currentStream.microBatchThread.isAlive) "alive" else "dead"
def testState =
s"""
|== Progress ==
|$testActions
|
|== Stream ==
|Stream state: $currentOffsets
|Thread state: $threadState
|${if (streamDeathCause != null) stackTraceToString(streamDeathCause) else ""}
|
|== Sink ==
|$sink
|
|== Plan ==
|${if (currentStream != null) currentStream.lastExecution else ""}
"""
def checkState(check: Boolean, error: String) = if (!check) {
fail(
s"""
|Invalid State: $error
|$testState
""".stripMargin)
}
val testThread = Thread.currentThread()
try {
startedTest.foreach { action =>
action match {
case StartStream =>
checkState(currentStream == null, "stream already running")
currentStream = new StreamExecution(sqlContext, stream.logicalPlan, sink)
currentStream.microBatchThread.setUncaughtExceptionHandler(
new UncaughtExceptionHandler {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
streamDeathCause = e
testThread.interrupt()
}
})
case StopStream =>
checkState(currentStream != null, "can not stop a stream that is not running")
currentStream.stop()
currentStream = null
case DropBatches(num) =>
checkState(currentStream == null, "dropping batches while running leads to corruption")
sink.dropBatches(num)
case ExpectFailure =>
try failAfter(streamingTimout) {
while (streamDeathCause == null) {
Thread.sleep(100)
}
} catch {
case _: InterruptedException =>
case _: org.scalatest.exceptions.TestFailedDueToTimeoutException =>
fail(
s"""
|Timed out while waiting for failure.
|$testState
""".stripMargin)
}
currentStream = null
streamDeathCause = null
case a: AddData =>
awaiting.put(a.source, a.addData())
case CheckAnswerRows(expectedAnswer) =>
checkState(currentStream != null, "stream not running")
// Block until all data added has been processed
awaiting.foreach { case (source, offset) =>
failAfter(streamingTimout) {
currentStream.awaitOffset(source, offset)
}
}
val allData = try sink.allData catch {
case e: Exception =>
fail(
s"""
|Exception while getting data from sink $e
|$testState
""".stripMargin)
}
QueryTest.sameRows(expectedAnswer, allData).foreach {
error => fail(
s"""
|$error
|$testState
""".stripMargin)
}
}
pos += 1
}
} catch {
case _: InterruptedException if streamDeathCause != null =>
fail(
s"""
|Stream Thread Died
|$testState
""".stripMargin)
case _: org.scalatest.exceptions.TestFailedDueToTimeoutException =>
fail(
s"""
|Timed out waiting for stream
|$testState
""".stripMargin)
} finally {
if (currentStream != null && currentStream.microBatchThread.isAlive) {
currentStream.stop()
}
}
}
/**
* Creates a stress test that randomly starts/stops/adds data/checks the result.
*
* @param ds a dataframe that executes + 1 on a stream of integers, returning the result.
* @param addData and add data action that adds the given numbers to the stream, encoding them
* as needed
*/
def runStressTest(
ds: Dataset[Int],
addData: Seq[Int] => StreamAction,
iterations: Int = 100): Unit = {
implicit val intEncoder = ExpressionEncoder[Int]()
var dataPos = 0
var running = true
val actions = new ArrayBuffer[StreamAction]()
def addCheck() = { actions += CheckAnswer(1 to dataPos: _*) }
def addRandomData() = {
val numItems = Random.nextInt(10)
val data = dataPos until (dataPos + numItems)
dataPos += numItems
actions += addData(data)
}
(1 to iterations).foreach { i =>
val rand = Random.nextDouble()
if(!running) {
rand match {
case r if r < 0.7 => // AddData
addRandomData()
case _ => // StartStream
actions += StartStream
running = true
}
} else {
rand match {
case r if r < 0.1 =>
addCheck()
case r if r < 0.7 => // AddData
addRandomData()
case _ => // StartStream
actions += StopStream
running = false
}
}
}
if(!running) { actions += StartStream }
addCheck()
testStream(ds)(actions: _*)
}
}
|
ekasitk/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala
|
<reponame>ekasitk/spark
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import java.util.NoSuchElementException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.metric.LongSQLMetric
trait HashJoin {
self: SparkPlan =>
val leftKeys: Seq[Expression]
val rightKeys: Seq[Expression]
val buildSide: BuildSide
val condition: Option[Expression]
val left: SparkPlan
val right: SparkPlan
protected lazy val (buildPlan, streamedPlan) = buildSide match {
case BuildLeft => (left, right)
case BuildRight => (right, left)
}
protected lazy val (buildKeys, streamedKeys) = buildSide match {
case BuildLeft => (leftKeys, rightKeys)
case BuildRight => (rightKeys, leftKeys)
}
override def output: Seq[Attribute] = left.output ++ right.output
protected def buildSideKeyGenerator: Projection =
UnsafeProjection.create(buildKeys, buildPlan.output)
protected def streamSideKeyGenerator: Projection =
UnsafeProjection.create(streamedKeys, streamedPlan.output)
@transient private[this] lazy val boundCondition = if (condition.isDefined) {
newPredicate(condition.getOrElse(Literal(true)), left.output ++ right.output)
} else {
(r: InternalRow) => true
}
protected def hashJoin(
streamIter: Iterator[InternalRow],
numStreamRows: LongSQLMetric,
hashedRelation: HashedRelation,
numOutputRows: LongSQLMetric): Iterator[InternalRow] =
{
new Iterator[InternalRow] {
private[this] var currentStreamedRow: InternalRow = _
private[this] var currentHashMatches: Seq[InternalRow] = _
private[this] var currentMatchPosition: Int = -1
// Mutable per row objects.
private[this] val joinRow = new JoinedRow
private[this] val resultProjection: (InternalRow) => InternalRow =
UnsafeProjection.create(self.schema)
private[this] val joinKeys = streamSideKeyGenerator
override final def hasNext: Boolean = {
while (true) {
// check if it's end of current matches
if (currentHashMatches != null && currentMatchPosition == currentHashMatches.length) {
currentHashMatches = null
currentMatchPosition = -1
}
// find the next match
while (currentHashMatches == null && streamIter.hasNext) {
currentStreamedRow = streamIter.next()
numStreamRows += 1
val key = joinKeys(currentStreamedRow)
if (!key.anyNull) {
currentHashMatches = hashedRelation.get(key)
if (currentHashMatches != null) {
currentMatchPosition = 0
}
}
}
if (currentHashMatches == null) {
return false
}
// found some matches
buildSide match {
case BuildRight => joinRow(currentStreamedRow, currentHashMatches(currentMatchPosition))
case BuildLeft => joinRow(currentHashMatches(currentMatchPosition), currentStreamedRow)
}
if (boundCondition(joinRow)) {
return true
} else {
currentMatchPosition += 1
}
}
false // unreachable
}
override final def next(): InternalRow = {
// next() could be called without calling hasNext()
if (hasNext) {
currentMatchPosition += 1
numOutputRows += 1
resultProjection(joinRow)
} else {
throw new NoSuchElementException
}
}
}
}
}
|
ekasitk/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
|
<filename>sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala<gh_stars>1-10
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.Random
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.hash.HashShuffleManager
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors.attachTree
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.util.MutablePair
/**
* Performs a shuffle that will result in the desired `newPartitioning`.
*/
case class Exchange(
var newPartitioning: Partitioning,
child: SparkPlan,
@transient coordinator: Option[ExchangeCoordinator]) extends UnaryNode {
override def nodeName: String = {
val extraInfo = coordinator match {
case Some(exchangeCoordinator) if exchangeCoordinator.isEstimated =>
s"(coordinator id: ${System.identityHashCode(coordinator)})"
case Some(exchangeCoordinator) if !exchangeCoordinator.isEstimated =>
s"(coordinator id: ${System.identityHashCode(coordinator)})"
case None => ""
}
val simpleNodeName = "Exchange"
s"$simpleNodeName$extraInfo"
}
override def outputPartitioning: Partitioning = newPartitioning
override def output: Seq[Attribute] = child.output
/**
* Determines whether records must be defensively copied before being sent to the shuffle.
* Several of Spark's shuffle components will buffer deserialized Java objects in memory. The
* shuffle code assumes that objects are immutable and hence does not perform its own defensive
* copying. In Spark SQL, however, operators' iterators return the same mutable `Row` object. In
* order to properly shuffle the output of these operators, we need to perform our own copying
* prior to sending records to the shuffle. This copying is expensive, so we try to avoid it
* whenever possible. This method encapsulates the logic for choosing when to copy.
*
* In the long run, we might want to push this logic into core's shuffle APIs so that we don't
* have to rely on knowledge of core internals here in SQL.
*
* See SPARK-2967, SPARK-4479, and SPARK-7375 for more discussion of this issue.
*
* @param partitioner the partitioner for the shuffle
* @param serializer the serializer that will be used to write rows
* @return true if rows should be copied before being shuffled, false otherwise
*/
private def needToCopyObjectsBeforeShuffle(
partitioner: Partitioner,
serializer: Serializer): Boolean = {
// Note: even though we only use the partitioner's `numPartitions` field, we require it to be
// passed instead of directly passing the number of partitions in order to guard against
// corner-cases where a partitioner constructed with `numPartitions` partitions may output
// fewer partitions (like RangePartitioner, for example).
val conf = child.sqlContext.sparkContext.conf
val shuffleManager = SparkEnv.get.shuffleManager
val sortBasedShuffleOn = shuffleManager.isInstanceOf[SortShuffleManager]
val bypassMergeThreshold = conf.getInt("spark.shuffle.sort.bypassMergeThreshold", 200)
if (sortBasedShuffleOn) {
val bypassIsSupported = SparkEnv.get.shuffleManager.isInstanceOf[SortShuffleManager]
if (bypassIsSupported && partitioner.numPartitions <= bypassMergeThreshold) {
// If we're using the original SortShuffleManager and the number of output partitions is
// sufficiently small, then Spark will fall back to the hash-based shuffle write path, which
// doesn't buffer deserialized records.
// Note that we'll have to remove this case if we fix SPARK-6026 and remove this bypass.
false
} else if (serializer.supportsRelocationOfSerializedObjects) {
// SPARK-4550 and SPARK-7081 extended sort-based shuffle to serialize individual records
// prior to sorting them. This optimization is only applied in cases where shuffle
// dependency does not specify an aggregator or ordering and the record serializer has
// certain properties. If this optimization is enabled, we can safely avoid the copy.
//
// Exchange never configures its ShuffledRDDs with aggregators or key orderings, so we only
// need to check whether the optimization is enabled and supported by our serializer.
false
} else {
// Spark's SortShuffleManager uses `ExternalSorter` to buffer records in memory, so we must
// copy.
true
}
} else if (shuffleManager.isInstanceOf[HashShuffleManager]) {
// We're using hash-based shuffle, so we don't need to copy.
false
} else {
// Catch-all case to safely handle any future ShuffleManager implementations.
true
}
}
private val serializer: Serializer = new UnsafeRowSerializer(child.output.size)
override protected def doPrepare(): Unit = {
// If an ExchangeCoordinator is needed, we register this Exchange operator
// to the coordinator when we do prepare. It is important to make sure
// we register this operator right before the execution instead of register it
// in the constructor because it is possible that we create new instances of
// Exchange operators when we transform the physical plan
// (then the ExchangeCoordinator will hold references of unneeded Exchanges).
// So, we should only call registerExchange just before we start to execute
// the plan.
coordinator match {
case Some(exchangeCoordinator) => exchangeCoordinator.registerExchange(this)
case None =>
}
}
/**
* Returns a [[ShuffleDependency]] that will partition rows of its child based on
* the partitioning scheme defined in `newPartitioning`. Those partitions of
* the returned ShuffleDependency will be the input of shuffle.
*/
private[sql] def prepareShuffleDependency(): ShuffleDependency[Int, InternalRow, InternalRow] = {
val rdd = child.execute()
val part: Partitioner = newPartitioning match {
case RoundRobinPartitioning(numPartitions) => new HashPartitioner(numPartitions)
case HashPartitioning(_, n) =>
new Partitioner {
override def numPartitions: Int = n
// For HashPartitioning, the partitioning key is already a valid partition ID, as we use
// `HashPartitioning.partitionIdExpression` to produce partitioning key.
override def getPartition(key: Any): Int = key.asInstanceOf[Int]
}
case RangePartitioning(sortingExpressions, numPartitions) =>
// Internally, RangePartitioner runs a job on the RDD that samples keys to compute
// partition bounds. To get accurate samples, we need to copy the mutable keys.
val rddForSampling = rdd.mapPartitionsInternal { iter =>
val mutablePair = new MutablePair[InternalRow, Null]()
iter.map(row => mutablePair.update(row.copy(), null))
}
// We need to use an interpreted ordering here because generated orderings cannot be
// serialized and this ordering needs to be created on the driver in order to be passed into
// Spark core code.
implicit val ordering = new InterpretedOrdering(sortingExpressions, child.output)
new RangePartitioner(numPartitions, rddForSampling, ascending = true)
case SinglePartition =>
new Partitioner {
override def numPartitions: Int = 1
override def getPartition(key: Any): Int = 0
}
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
// TODO: Handle BroadcastPartitioning.
}
def getPartitionKeyExtractor(): InternalRow => Any = newPartitioning match {
case RoundRobinPartitioning(numPartitions) =>
// Distributes elements evenly across output partitions, starting from a random partition.
var position = new Random(TaskContext.get().partitionId()).nextInt(numPartitions)
(row: InternalRow) => {
// The HashPartitioner will handle the `mod` by the number of partitions
position += 1
position
}
case h: HashPartitioning =>
val projection = UnsafeProjection.create(h.partitionIdExpression :: Nil, child.output)
row => projection(row).getInt(0)
case RangePartitioning(_, _) | SinglePartition => identity
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
}
val rddWithPartitionIds: RDD[Product2[Int, InternalRow]] = {
if (needToCopyObjectsBeforeShuffle(part, serializer)) {
rdd.mapPartitionsInternal { iter =>
val getPartitionKey = getPartitionKeyExtractor()
iter.map { row => (part.getPartition(getPartitionKey(row)), row.copy()) }
}
} else {
rdd.mapPartitionsInternal { iter =>
val getPartitionKey = getPartitionKeyExtractor()
val mutablePair = new MutablePair[Int, InternalRow]()
iter.map { row => mutablePair.update(part.getPartition(getPartitionKey(row)), row) }
}
}
}
// Now, we manually create a ShuffleDependency. Because pairs in rddWithPartitionIds
// are in the form of (partitionId, row) and every partitionId is in the expected range
// [0, part.numPartitions - 1]. The partitioner of this is a PartitionIdPassthrough.
val dependency =
new ShuffleDependency[Int, InternalRow, InternalRow](
rddWithPartitionIds,
new PartitionIdPassthrough(part.numPartitions),
Some(serializer))
dependency
}
/**
* Returns a [[ShuffledRowRDD]] that represents the post-shuffle dataset.
* This [[ShuffledRowRDD]] is created based on a given [[ShuffleDependency]] and an optional
* partition start indices array. If this optional array is defined, the returned
* [[ShuffledRowRDD]] will fetch pre-shuffle partitions based on indices of this array.
*/
private[sql] def preparePostShuffleRDD(
shuffleDependency: ShuffleDependency[Int, InternalRow, InternalRow],
specifiedPartitionStartIndices: Option[Array[Int]] = None): ShuffledRowRDD = {
// If an array of partition start indices is provided, we need to use this array
// to create the ShuffledRowRDD. Also, we need to update newPartitioning to
// update the number of post-shuffle partitions.
specifiedPartitionStartIndices.foreach { indices =>
assert(newPartitioning.isInstanceOf[HashPartitioning])
newPartitioning = UnknownPartitioning(indices.length)
}
new ShuffledRowRDD(shuffleDependency, specifiedPartitionStartIndices)
}
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
coordinator match {
case Some(exchangeCoordinator) =>
val shuffleRDD = exchangeCoordinator.postShuffleRDD(this)
assert(shuffleRDD.partitions.length == newPartitioning.numPartitions)
shuffleRDD
case None =>
val shuffleDependency = prepareShuffleDependency()
preparePostShuffleRDD(shuffleDependency)
}
}
}
object Exchange {
def apply(newPartitioning: Partitioning, child: SparkPlan): Exchange = {
Exchange(newPartitioning, child, coordinator = None: Option[ExchangeCoordinator])
}
}
/**
* Ensures that the [[org.apache.spark.sql.catalyst.plans.physical.Partitioning Partitioning]]
* of input data meets the
* [[org.apache.spark.sql.catalyst.plans.physical.Distribution Distribution]] requirements for
* each operator by inserting [[Exchange]] Operators where required. Also ensure that the
* input partition ordering requirements are met.
*/
private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[SparkPlan] {
private def defaultNumPreShufflePartitions: Int = sqlContext.conf.numShufflePartitions
private def targetPostShuffleInputSize: Long = sqlContext.conf.targetPostShuffleInputSize
private def adaptiveExecutionEnabled: Boolean = sqlContext.conf.adaptiveExecutionEnabled
private def minNumPostShufflePartitions: Option[Int] = {
val minNumPostShufflePartitions = sqlContext.conf.minNumPostShufflePartitions
if (minNumPostShufflePartitions > 0) Some(minNumPostShufflePartitions) else None
}
/**
* Given a required distribution, returns a partitioning that satisfies that distribution.
*/
private def createPartitioning(
requiredDistribution: Distribution,
numPartitions: Int): Partitioning = {
requiredDistribution match {
case AllTuples => SinglePartition
case ClusteredDistribution(clustering) => HashPartitioning(clustering, numPartitions)
case OrderedDistribution(ordering) => RangePartitioning(ordering, numPartitions)
case dist => sys.error(s"Do not know how to satisfy distribution $dist")
}
}
/**
* Adds [[ExchangeCoordinator]] to [[Exchange]]s if adaptive query execution is enabled
* and partitioning schemes of these [[Exchange]]s support [[ExchangeCoordinator]].
*/
private def withExchangeCoordinator(
children: Seq[SparkPlan],
requiredChildDistributions: Seq[Distribution]): Seq[SparkPlan] = {
val supportsCoordinator =
if (children.exists(_.isInstanceOf[Exchange])) {
// Right now, ExchangeCoordinator only support HashPartitionings.
children.forall {
case e @ Exchange(hash: HashPartitioning, _, _) => true
case child =>
child.outputPartitioning match {
case hash: HashPartitioning => true
case collection: PartitioningCollection =>
collection.partitionings.forall(_.isInstanceOf[HashPartitioning])
case _ => false
}
}
} else {
// In this case, although we do not have Exchange operators, we may still need to
// shuffle data when we have more than one children because data generated by
// these children may not be partitioned in the same way.
// Please see the comment in withCoordinator for more details.
val supportsDistribution =
requiredChildDistributions.forall(_.isInstanceOf[ClusteredDistribution])
children.length > 1 && supportsDistribution
}
val withCoordinator =
if (adaptiveExecutionEnabled && supportsCoordinator) {
val coordinator =
new ExchangeCoordinator(
children.length,
targetPostShuffleInputSize,
minNumPostShufflePartitions)
children.zip(requiredChildDistributions).map {
case (e: Exchange, _) =>
// This child is an Exchange, we need to add the coordinator.
e.copy(coordinator = Some(coordinator))
case (child, distribution) =>
// If this child is not an Exchange, we need to add an Exchange for now.
// Ideally, we can try to avoid this Exchange. However, when we reach here,
// there are at least two children operators (because if there is a single child
// and we can avoid Exchange, supportsCoordinator will be false and we
// will not reach here.). Although we can make two children have the same number of
// post-shuffle partitions. Their numbers of pre-shuffle partitions may be different.
// For example, let's say we have the following plan
// Join
// / \
// Agg Exchange
// / \
// Exchange t2
// /
// t1
// In this case, because a post-shuffle partition can include multiple pre-shuffle
// partitions, a HashPartitioning will not be strictly partitioned by the hashcodes
// after shuffle. So, even we can use the child Exchange operator of the Join to
// have a number of post-shuffle partitions that matches the number of partitions of
// Agg, we cannot say these two children are partitioned in the same way.
// Here is another case
// Join
// / \
// Agg1 Agg2
// / \
// Exchange1 Exchange2
// / \
// t1 t2
// In this case, two Aggs shuffle data with the same column of the join condition.
// After we use ExchangeCoordinator, these two Aggs may not be partitioned in the same
// way. Let's say that Agg1 and Agg2 both have 5 pre-shuffle partitions and 2
// post-shuffle partitions. It is possible that Agg1 fetches those pre-shuffle
// partitions by using a partitionStartIndices [0, 3]. However, Agg2 may fetch its
// pre-shuffle partitions by using another partitionStartIndices [0, 4].
// So, Agg1 and Agg2 are actually not co-partitioned.
//
// It will be great to introduce a new Partitioning to represent the post-shuffle
// partitions when one post-shuffle partition includes multiple pre-shuffle partitions.
val targetPartitioning =
createPartitioning(distribution, defaultNumPreShufflePartitions)
assert(targetPartitioning.isInstanceOf[HashPartitioning])
Exchange(targetPartitioning, child, Some(coordinator))
}
} else {
// If we do not need ExchangeCoordinator, the original children are returned.
children
}
withCoordinator
}
private def ensureDistributionAndOrdering(operator: SparkPlan): SparkPlan = {
val requiredChildDistributions: Seq[Distribution] = operator.requiredChildDistribution
val requiredChildOrderings: Seq[Seq[SortOrder]] = operator.requiredChildOrdering
var children: Seq[SparkPlan] = operator.children
assert(requiredChildDistributions.length == children.length)
assert(requiredChildOrderings.length == children.length)
// Ensure that the operator's children satisfy their output distribution requirements:
children = children.zip(requiredChildDistributions).map { case (child, distribution) =>
if (child.outputPartitioning.satisfies(distribution)) {
child
} else {
Exchange(createPartitioning(distribution, defaultNumPreShufflePartitions), child)
}
}
// If the operator has multiple children and specifies child output distributions (e.g. join),
// then the children's output partitionings must be compatible:
if (children.length > 1
&& requiredChildDistributions.toSet != Set(UnspecifiedDistribution)
&& !Partitioning.allCompatible(children.map(_.outputPartitioning))) {
// First check if the existing partitions of the children all match. This means they are
// partitioned by the same partitioning into the same number of partitions. In that case,
// don't try to make them match `defaultPartitions`, just use the existing partitioning.
val maxChildrenNumPartitions = children.map(_.outputPartitioning.numPartitions).max
val useExistingPartitioning = children.zip(requiredChildDistributions).forall {
case (child, distribution) => {
child.outputPartitioning.guarantees(
createPartitioning(distribution, maxChildrenNumPartitions))
}
}
children = if (useExistingPartitioning) {
// We do not need to shuffle any child's output.
children
} else {
// We need to shuffle at least one child's output.
// Now, we will determine the number of partitions that will be used by created
// partitioning schemes.
val numPartitions = {
// Let's see if we need to shuffle all child's outputs when we use
// maxChildrenNumPartitions.
val shufflesAllChildren = children.zip(requiredChildDistributions).forall {
case (child, distribution) => {
!child.outputPartitioning.guarantees(
createPartitioning(distribution, maxChildrenNumPartitions))
}
}
// If we need to shuffle all children, we use defaultNumPreShufflePartitions as the
// number of partitions. Otherwise, we use maxChildrenNumPartitions.
if (shufflesAllChildren) defaultNumPreShufflePartitions else maxChildrenNumPartitions
}
children.zip(requiredChildDistributions).map {
case (child, distribution) => {
val targetPartitioning =
createPartitioning(distribution, numPartitions)
if (child.outputPartitioning.guarantees(targetPartitioning)) {
child
} else {
child match {
// If child is an exchange, we replace it with
// a new one having targetPartitioning.
case Exchange(_, c, _) => Exchange(targetPartitioning, c)
case _ => Exchange(targetPartitioning, child)
}
}
}
}
}
}
// Now, we need to add ExchangeCoordinator if necessary.
// Actually, it is not a good idea to add ExchangeCoordinators while we are adding Exchanges.
// However, with the way that we plan the query, we do not have a place where we have a
// global picture of all shuffle dependencies of a post-shuffle stage. So, we add coordinator
// at here for now.
// Once we finish https://issues.apache.org/jira/browse/SPARK-10665,
// we can first add Exchanges and then add coordinator once we have a DAG of query fragments.
children = withExchangeCoordinator(children, requiredChildDistributions)
// Now that we've performed any necessary shuffles, add sorts to guarantee output orderings:
children = children.zip(requiredChildOrderings).map { case (child, requiredOrdering) =>
if (requiredOrdering.nonEmpty) {
// If child.outputOrdering is [a, b] and requiredOrdering is [a], we do not need to sort.
if (requiredOrdering != child.outputOrdering.take(requiredOrdering.length)) {
Sort(requiredOrdering, global = false, child = child)
} else {
child
}
} else {
child
}
}
operator.withNewChildren(children)
}
def apply(plan: SparkPlan): SparkPlan = plan.transformUp {
case operator @ Exchange(partitioning, child, _) =>
child.children match {
case Exchange(childPartitioning, baseChild, _)::Nil =>
if (childPartitioning.guarantees(partitioning)) child else operator
case _ => operator
}
case operator: SparkPlan => ensureDistributionAndOrdering(operator)
}
}
|
ekasitk/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/SQLBuilderTest.scala
|
<reponame>ekasitk/spark
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql.{DataFrame, QueryTest}
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.hive.test.TestHiveSingleton
abstract class SQLBuilderTest extends QueryTest with TestHiveSingleton {
protected def checkSQL(e: Expression, expectedSQL: String): Unit = {
val actualSQL = e.sql
try {
assert(actualSQL === expectedSQL)
} catch {
case cause: Throwable =>
fail(
s"""Wrong SQL generated for the following expression:
|
|${e.prettyName}
|
|$cause
""".stripMargin)
}
}
protected def checkSQL(plan: LogicalPlan, expectedSQL: String): Unit = {
val maybeSQL = new SQLBuilder(plan, hiveContext).toSQL
if (maybeSQL.isEmpty) {
fail(
s"""Cannot convert the following logical query plan to SQL:
|
|${plan.treeString}
""".stripMargin)
}
val actualSQL = maybeSQL.get
try {
assert(actualSQL === expectedSQL)
} catch {
case cause: Throwable =>
fail(
s"""Wrong SQL generated for the following logical query plan:
|
|${plan.treeString}
|
|$cause
""".stripMargin)
}
checkAnswer(sqlContext.sql(actualSQL), new DataFrame(sqlContext, plan))
}
protected def checkSQL(df: DataFrame, expectedSQL: String): Unit = {
checkSQL(df.queryExecution.analyzed, expectedSQL)
}
}
|
ekasitk/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala
|
<reponame>ekasitk/spark<gh_stars>1-10
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import org.apache.spark._
import org.apache.spark.rdd.{CartesianPartition, CartesianRDD, RDD}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, UnsafeRow}
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeRowJoiner
import org.apache.spark.sql.execution.{BinaryNode, SparkPlan}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.util.CompletionIterator
import org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter
/**
* An optimized CartesianRDD for UnsafeRow, which will cache the rows from second child RDD,
* will be much faster than building the right partition for every row in left RDD, it also
* materialize the right RDD (in case of the right RDD is nondeterministic).
*/
private[spark]
class UnsafeCartesianRDD(left : RDD[UnsafeRow], right : RDD[UnsafeRow], numFieldsOfRight: Int)
extends CartesianRDD[UnsafeRow, UnsafeRow](left.sparkContext, left, right) {
override def compute(split: Partition, context: TaskContext): Iterator[(UnsafeRow, UnsafeRow)] = {
// We will not sort the rows, so prefixComparator and recordComparator are null.
val sorter = UnsafeExternalSorter.create(
context.taskMemoryManager(),
SparkEnv.get.blockManager,
context,
null,
null,
1024,
SparkEnv.get.memoryManager.pageSizeBytes)
val partition = split.asInstanceOf[CartesianPartition]
for (y <- rdd2.iterator(partition.s2, context)) {
sorter.insertRecord(y.getBaseObject, y.getBaseOffset, y.getSizeInBytes, 0)
}
// Create an iterator from sorter and wrapper it as Iterator[UnsafeRow]
def createIter(): Iterator[UnsafeRow] = {
val iter = sorter.getIterator
val unsafeRow = new UnsafeRow(numFieldsOfRight)
new Iterator[UnsafeRow] {
override def hasNext: Boolean = {
iter.hasNext
}
override def next(): UnsafeRow = {
iter.loadNext()
unsafeRow.pointTo(iter.getBaseObject, iter.getBaseOffset, iter.getRecordLength)
unsafeRow
}
}
}
val resultIter =
for (x <- rdd1.iterator(partition.s1, context);
y <- createIter()) yield (x, y)
CompletionIterator[(UnsafeRow, UnsafeRow), Iterator[(UnsafeRow, UnsafeRow)]](
resultIter, sorter.cleanupResources)
}
}
case class CartesianProduct(left: SparkPlan, right: SparkPlan) extends BinaryNode {
override def output: Seq[Attribute] = left.output ++ right.output
override private[sql] lazy val metrics = Map(
"numLeftRows" -> SQLMetrics.createLongMetric(sparkContext, "number of left rows"),
"numRightRows" -> SQLMetrics.createLongMetric(sparkContext, "number of right rows"),
"numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of output rows"))
protected override def doExecute(): RDD[InternalRow] = {
val numLeftRows = longMetric("numLeftRows")
val numRightRows = longMetric("numRightRows")
val numOutputRows = longMetric("numOutputRows")
val leftResults = left.execute().map { row =>
numLeftRows += 1
row.asInstanceOf[UnsafeRow]
}
val rightResults = right.execute().map { row =>
numRightRows += 1
row.asInstanceOf[UnsafeRow]
}
val pair = new UnsafeCartesianRDD(leftResults, rightResults, right.output.size)
pair.mapPartitionsInternal { iter =>
val joiner = GenerateUnsafeRowJoiner.create(left.schema, right.schema)
iter.map { r =>
numOutputRows += 1
joiner.join(r._1, r._2)
}
}
}
}
|
ciads-ut/poisson-factorization
|
GPPF/src/main/scala/pfmodel/samplers.scala
|
package sample
import java.math._
import scala.util._
import org.bytedeco.javacpp._
import org.bytedeco.javacpp.gsl._
object samplers {
def sampleCRT(rng: gsl.gsl_rng, m: Double, gammazero: Double): Double = {
var(sum, bparam) = (0.0, 0.0)
for(i <- 0 to (m.toInt - 1)) {
bparam = gammazero / (gammazero + i)
if(gsl.gsl_rng_uniform(rng) <= bparam)
sum += 1
}
return sum
}
def TruncPoisson(rng: gsl.gsl_rng, lam: Double): Double = {
var lambda = lam
var (m, pmf, prob) = (0, 1.0, 0.0)
var rand = new Random()
if (lambda >= 1) {
while (m <= 0) {
m = gsl.gsl_ran_poisson(rng, lambda)
}
}
else {
m = 1
if (lambda <= 0.000001) {
lambda = 0.000001
}
prob = math.pow(lambda,m)*math.exp(-lambda)/(m*(1-math.exp(-lambda)))
while(prob/pmf <= gsl.gsl_rng_uniform(rng)) {
pmf = pmf-prob
m += 1
prob = math.pow(lambda,m)*math.exp(-lambda)/(m*(1-math.exp(-lambda)))
}
}
return m
}
}
|
ciads-ut/poisson-factorization
|
GPPF/src/main/scala/pfmodel/CGPPFmodel.scala
|
<filename>GPPF/src/main/scala/pfmodel/CGPPFmodel.scala
package cgppf
import scala.io.Source
import data._
import scala.collection.mutable.ListBuffer
import java.io._
import mathutilities._
import org.bytedeco.javacpp._
import org.bytedeco.javacpp.gsl._
import sample._
import scala.collection.mutable.ArrayBuffer
import scala.math.sqrt
import scala.math.pow
import printutilities._
// --------------------------------------------------
// -------------------- C-GPPF --------------------
// --------------------------------------------------
class CorpusModel(rng: gsl.gsl_rng, data: PoissonData, numTopics: Integer, outDir: String) {
var D = data.D
var V = data.V
var K = numTopics
var outDirectory = outDir
// initialize hyperparameters
var(azero, bzero, czero, dzero, ezero) = (1.0, 1.0, 1.0, 1.0, 1.0)
var(fzero, gzero, hzero, c) = (1.0, sqrt(D.toDouble), 1.0, 1.0)
var(mzero, nzero, szero, tzero) = (1.0, 1.0, 1.0, 1.0)
// initialize data structures
var thetadk = Array.fill[Double](D,K)(1.0/K)
var betawk = Array.fill[Double](V,K)(1.0/K)
var thetakss = Array.fill[Double](K)(0.0)
var betakss = Array.fill[Double](K)(0.0)
var thetadss = Array.fill[Double](D)(0.0)
var betawss = Array.fill[Double](V)(0.0)
var gammak = Array.fill[Double](K)(1.0*azero/bzero)
var lk = Array.fill[Double](K)(0.0)
var rk = Array.fill[Double](K)(1.0*gammak(0)/c)
var uk = Array.fill[Double](K)(0.0)
var cd = Array.fill[Double](D)(1.0*gzero/hzero)
var ad = Array.fill[Double](D)(1.0*ezero/fzero)
var lddot = Array.fill[Double](D)(0.0)
var sw = Array.fill[Double](V)(1.0*szero/tzero)
var bw = Array.fill[Double](V)(1.0*mzero/nzero)
var lwdot = Array.fill[Double](V)(0.0)
var xdotwk = Array.fill[Double](V,K)(0.0)
var xddotk = Array.fill[Double](D,K)(0.0)
var xk = Array.fill[Double](K)(0.0)
var pdsum = Array.fill[Double](D)(0.0)
var qwsum = Array.fill[Double](V)(0.0)
// for debugging and display
var thetadkss = Array.fill[Double](D,K)(0.0)
var betawkss = Array.fill[Double](V,K)(0.0)
var rkss = Array.fill[Double](K)(0.0)
def construct() {
for (k <- 0 to (K-1)) {
for (d <- 0 to (D-1)) {
thetadss(d) += thetadk(d)(k) // sum over topics for each different document
thetakss(k) += thetadk(d)(k) // sum over documents for each different topic
}
for (w <- 0 to (V-1)) {
betawss(w) += betawk(w)(k) // sum over topics for each different word
betakss(k) += betawk(w)(k) // sum over words for each different topic
}
}
}
construct
def train(BurninITER: Int, CollectionITER: Int, data: PoissonData, samplePeriod: Int) {
var(rsum, gammasum, param1, param2) = (0.0, 0.0, 0.0, 0.0)
var(w, value, training_betakss_k, training_thetakss_k) = (0, 0, 0.0, 0.0)
// temporary structures for getting word ids and their counts
for (i <- 0 to (CollectionITER.toInt + BurninITER- 1)) {
if (i==0 || (i+1)%200 == 0) {
print("Iteration: " + (i+1) + "\n");
}
if (i % samplePeriod == 0) {
printSamples(i)
}
// reset statistics
rsum = 0.0
gammasum = 0.0
for (k <- 0 to (K-1)) {
xk(k) = 0.0
for (d <- 0 to (D-1)) {
xddotk(d)(k) = 0.0
}
for (w <- 0 to (V-1)) {
xdotwk(w)(k) = 0.0
}
}
// sampling starts
// sampling of latent counts; O(SK)
for (d <- 0 to (D-1)) {
for (nz <- 0 to (data.Y.row_ids(d).size-1)) {
w = data.Y.row_ids(d)(nz)
value = data.Y.row_counts(d)(nz)
var countsample = Array.fill[Int](K)(0)
var pmf = Array.fill[Double](K)(0.0)
for (k <- 0 to (K-1)) {
pmf(k) = mathutilities.mathutils.minguard(rk(k)*thetadk(d)(k)*betawk(w)(k))
}
// normalization
var normsum = pmf.sum
for (k <- 0 to (K-1)) {
pmf(k) = pmf(k) / normsum
}
gsl.gsl_ran_multinomial(rng, K.toLong, value, pmf, countsample);
// update sufficient statistics of x
for (k <- 0 to K-1) {
xddotk(d)(k) += countsample(k)
xdotwk(w)(k) += countsample(k)
xk(k) += countsample(k)
}
}
}
// sampling of rk, lk, gammak, thetadk and betawk; O(DK+VK+K)
for (k <- 0 to (K-1)) {
thetakss(k) = 0.0 // reset the old statistics about theta
for (d <- 0 to D-1) {
training_betakss_k = betakss(k)
if (data.mSY > 0) {
// subtract the missing Y links
for (nz <- 0 to (data.mY.row_ids(d).size-1)) {
training_betakss_k -= betawk(data.mY.row_ids(d)(nz))(k)
}
}
param1 = ad(d) + xddotk(d)(k)
param2 = 1.0 / (cd(d) + rk(k) * training_betakss_k)
pdsum(d) += mathutilities.mathutils.logguard(cd(d) * param2)
// sample thetadk
thetadk(d)(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
if (i >= BurninITER) {
thetadkss(d)(k) += thetadk(d)(k) / CollectionITER
}
// sample ldk
lddot(d) += sample.samplers.sampleCRT(rng, xddotk(d)(k), ad(d))
// update sufficient statistics for theta
thetakss(k) += thetadk(d)(k)
thetadss(d) += thetadk(d)(k)
}
betakss(k) = 0.0 // reset the old statistics about beta
uk(k) = 0.0 // reset the sufficient statistics to be used for updating rk's
for (w <- 0 to V - 1) {
training_thetakss_k = thetakss(k)
if (data.mSY > 0) {
// subtract the missing Y links
for (nz <- 0 to (data.mYt.row_ids(w).size-1)) {
training_thetakss_k -= thetadk(data.mYt.row_ids(w)(nz))(k)
}
}
param1 = bw(w) + xdotwk(w)(k)
param2 = 1.0 / (sw(w) + rk(k) * training_thetakss_k)
qwsum(w) += mathutilities.mathutils.logguard(sw(w) * param2)
// sample betawk
betawk(w)(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
if (i >= BurninITER) {
betawkss(w)(k) += betawk(w)(k) / CollectionITER
}
// sample lwk
lwdot(w) += sample.samplers.sampleCRT(rng, xdotwk(w)(k), bw(w))
// update sufficient statistics for beta
betakss(k) += betawk(w)(k)
betawss(w) += betawk(w)(k)
// update sufficient statistics to be used for updating rk's
uk(k) += betawk(w)(k) * training_thetakss_k
}
// sample rk
param1 = gammak(k) + xk(k)
param2 = 1.0 / (c + uk(k))
rk(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
if (i >= BurninITER) {
rkss(k) += rk(k) / CollectionITER
}
rsum += rk(k)
// sample lk
lk(k) = sample.samplers.sampleCRT(rng, xk(k), gammak(k))
// sample gammak
param1 = azero + lk(k)
param2 = 1.0 / (bzero - mathutilities.mathutils.logguard(c/(c+uk(k))))
gammak(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
gammasum += gammak(k)
}
// sample ad, cd; O(D)
for (d <- 0 to D -1 ) {
// sample cd
param1 = gzero + K*(ad(d))
param2 = 1.0 / (hzero + thetadss(d))
cd(d) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
// sample ad (this was commented out in the c++ version)
//param1 = ezero + lddot(d)
//param2 = 1.0/(fzero - pdsum(d))
// *(ad+d) = minguard(gsl_ran_gamma(rng,param1,param2));
thetadss(d) = 0.0 // reset thetadss for next iteration
lddot(d) = 0.0 // reset lddot for next iteration
pdsum(d) = 0.0
}
// sample bw, sw; O(V)
for (w <- 0 to V - 1) {
// sample sw
param1 = szero + K*(bw(w))
param2 = 1.0 / (tzero + betawss(w))
sw(w) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
// sample bw (this was commented out in the c++ version)
//param1 = mzero + lwdot(w)
//param2 = 1.0/(nzero - qwsum(w))
// *(bw+w) = minguard(gsl_ran_gamma(rng,param1,param2))
betawss(w) = 0.0 // reset betawss for next iteration
lwdot(w) = 0.0 // reset lwdot for next iteration
qwsum(w) = 0.0
}
// sample global variable (commenting this out to be consistent with N-GPPF)
//c = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, (czero + gammasum), 1.0/(dzero + rsum)))
}
// end of Gibbs sampling loop
}
def printResults() {
// saves the final expected values
// first create a new directory
val resultDirectoryName = outDirectory + "/CGPPF/expectedValues"
val resultDirectory = new File(resultDirectoryName)
resultDirectory.mkdirs()
// save rkY
printutilities.printutils.printVec(rkss, resultDirectoryName + "/rkY.txt", K)
// save thetadk
printutilities.printutils.printMat(thetadkss, resultDirectoryName + "/thetadk.txt", K, D)
// save betawk
printutilities.printutils.printMat(betawkss, resultDirectoryName + "/betawk.txt", K, V)
}
def printSamples(iter: Int) {
// saves interim samples
// first create a directory, if not there
val iterDirectoryName = outDirectory + "/CGPPF/iterations"
val iterDirectory = new File(iterDirectoryName)
iterDirectory.mkdirs()
// save rkY
printutilities.printutils.printVec(rk, iterDirectoryName + "/rkY-itr%04d.txt".format(iter), K)
// save thetadk
printutilities.printutils.printMat(thetadk, iterDirectoryName + "/thetadk-itr%04d.txt".format(iter), K, D)
// save betawk
printutilities.printutils.printMat(betawk, iterDirectoryName + "/betawk-itr%04d.txt".format(iter), K, V)
}
def generateSample() {
// generates random Y sample from the trained matrices, and saves to file
// first create a directory, if not there
val genDirectoryName = outDirectory + "/CGPPF/generatedSamples"
val genDirectory = new File(genDirectoryName)
genDirectory.mkdirs()
// generate Y
var Ynew = Array.fill[Integer](D,V)(0)
var(numEntries, y) = (0, 0)
var lambda = 0.0
for (d <- 0 to (D-1)) {
for (w <- 0 to (V-1)) {
lambda = 0.0
for (k <- 0 to (K-1)) {
lambda += rkss(k) * thetadkss(d)(k) * betawkss(w)(k)
}
y = gsl.gsl_ran_poisson(rng, lambda)
if (y > 0) {
numEntries += 1
Ynew(d)(w) = y
}
}
}
// save generated Y
printutilities.printutils.printTrFile(Ynew, genDirectoryName + "/genY.txt", D, V, D + "\t" + V + "\t" + numEntries)
}
}
|
ciads-ut/poisson-factorization
|
GPPF/src/main/scala/pfmodel/printutils.scala
|
package printutilities
import java.io._
object printutils {
def printVec(vec: Array[Double], outpath: String, K: Int) {
val fout = new PrintWriter(new File(outpath))
for (k <- 0 to (K-1)) {
fout.write(vec(k) + "\n")
}
fout.close
}
def printMat(mat: Array[Array[Double]], outpath: String, K: Int, L: Int) {
val fout = new PrintWriter(new File(outpath))
for (k <- 0 to (K-1)) {
fout.write(mat(0)(k).toString)
for (l <- 1 to (L-1)) {
fout.write("\t" + mat(l)(k))
}
fout.write("\n")
}
fout.close
}
def printTrFile(mat: Array[Array[Integer]], outpath: String, Xlen: Int, Ylen: Int, header: String) {
val fout = new PrintWriter(new File(outpath))
fout.write(header + "\n")
for (n <- 0 to (Xlen-1)) {
for (m <- 0 to (Ylen-1)) {
if (mat(n)(m) > 0) {
fout.write(n + "\t" + m + "\t" + mat(n)(m) + "\n")
}
}
}
fout.close
}
}
|
ciads-ut/poisson-factorization
|
GPPF/src/main/scala/pfmodel/NGPPFmodel.scala
|
package ngppf
import scala.io.Source
import data._
import scala.collection.mutable.ListBuffer
import java.io._
import mathutilities._
import org.bytedeco.javacpp._
import org.bytedeco.javacpp.gsl._
import sample._
import scala.collection.mutable.ArrayBuffer
import scala.math.sqrt
import scala.math.pow
import printutilities._
// --------------------------------------------------
// -------------------- N-GPPF --------------------
// --------------------------------------------------
class NetworkModel(rng: gsl.gsl_rng, data: PoissonData, numTopics: Integer, outDir: String) {
var N = data.N
var K = numTopics
var outDirectory = outDir
// initialize hyperparameters
var(azero, bzero, czero, dzero, ezero) = (1.0, 1.0, 1.0, 1.0, 1.0)
var(fzero, gzero, hzero, c) = (1.0, sqrt(N.toDouble), 1.0, 1.0)
// initialize data structures
var phink = Array.fill[Double](N,K)(1.0/K)
var lnk = Array.fill[Double](N,K)(0.0)
var phinss = Array.fill[Double](N)(0.0)
var phikss = Array.fill[Double](K)(0.0)
var phikss2 = Array.fill[Double](K)(0.0)
var lk = Array.fill[Double](K)(0.0)
var gammak = Array.fill[Double](K)(1.0*azero/bzero)
var rk = Array.fill[Double](K)(1.0*gammak(0)/c)
var an = Array.fill[Double](N)(1.0*ezero/fzero)
var cn = Array.fill[Double](N)(1.0*gzero/hzero)
var xndotk = Array.fill[Double](N,K)(0.0)
var xk = Array.fill[Double](K)(0.0)
// for debugging and display
var phinkss = Array.fill[Double](N,K)(0.0)
var rkss = Array.fill[Double](K)(0.0)
def construct() {
for (k <- 0 to (K-1)) {
for (n <- 0 to (N-1)) {
phikss(k) += phink(n)(k)
phikss2(k) += pow(phink(n)(k),2)
}
}
}
construct
def train(BurninITER: Int, CollectionITER: Int, data: PoissonData, netOption: Int, samplePeriod: Int) {
var(param1, param2, training_phikss_k) = (0.0, 0.0, 0.0)
var(gammasum, rsum, lnsum, pnsum) = (0.0, 0.0, 0.0, 0.0)
var(value, m, c, sk) = (0, 0, 0.0, 0.0)
// Gibbs sampling iteration starts
for (i <- 0 to (BurninITER+CollectionITER-1)) {
if (i==0 || (i+1)%200 == 0) {
print("Iteration: " + (i+1) + "\n")
}
if (i % samplePeriod == 0) {
printSamples(i)
}
// reset a few statistics first; O(NK)
rsum = 0.0
gammasum = 0.0
for (k <- 0 to (K-1)) {
xk(k) = 0.0
for (n <- 0 to (N-1)) {
xndotk(n)(k) = 0.0
}
}
// sampling starts
// sampling of latent counts; O(SK)
for (n <- 0 to (N-2)) {
for (nz <- 0 to (data.B.row_ids(n).size-1)) {
m = data.B.row_ids(n)(nz)
value = data.B.row_counts(n)(nz)
var countsample = Array.fill[Int](K)(0)
var pmf = Array.fill[Double](K)(0.0)
for (k <- 0 to (K-1)) {
pmf(k) = mathutilities.mathutils.minguard(rk(k)*phink(n)(k)*phink(m)(k))
}
// normalization
var normsum = pmf.sum
for (k <- 0 to (K-1)) {
pmf(k) = pmf(k) / normsum
}
if (netOption == 0) {
// binary network, use truncated poisson to estimate "value"
value = sample.samplers.TruncPoisson(rng, normsum.toInt).toInt
}
gsl.gsl_ran_multinomial(rng, K.toLong, value, pmf, countsample)
// update sufficient statistics of x
for (k <- 0 to (K-1)) {
xndotk(n)(k) += countsample(k)
xndotk(m)(k) += countsample(k)
xk(k) += countsample(k)
}
}
}
// sampling of rk, lk, gammak; O(3*K)
for (k <- 0 to (K-1)) {
sk = (pow(phikss(k),2) - phikss2(k))/2
if (data.mSN > 0) {
// subtract the missing B links
for (n <- 0 to (N-1)) {
for (nz <- 0 to (data.mB.row_ids(n).size-1)) {
sk -= phink(n)(k) * phink(data.mB.row_ids(n)(nz))(k)
}
}
}
// sample rk
param1 = gammak(k) + xk(k)
param2 = 1.0 / (c + sk)
rk(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
if (i >= BurninITER) {
rkss(k) += rk(k) / CollectionITER
}
rsum += rk(k)
// sample lk
lk(k) = sample.samplers.sampleCRT(rng, xk(k), gammak(k))
// sample gammak
param1 = azero + lk(k)
param2 = 1.0 / (bzero - mathutilities.mathutils.logguard(c/(c+sk)))
gammak(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
gammasum += gammak(k)
}
// sampling of phink and cn; O(NK+N)
for (n <- 0 to (N-1)) {
// reset the statistics about phi
phinss(n) = 0.0
lnsum = 0.0
pnsum = 0.0
for (k <- 0 to (K-1)) {
phikss(k) -= phink(n)(k) // avoids recomputing sum_n(phink)
phikss2(k) -= pow(phink(n)(k),2) // avoids recomputing sum_n(phink^2)
training_phikss_k = phikss(k) // remove heldout links from this (if applicable)
if (data.mSN > 0) {
// subtract the missing B links
for (nz <- 0 to (data.mB.row_ids(n).size-1)) {
training_phikss_k -= phink(data.mB.row_ids(n)(nz))(k)
}
}
param1 = an(n) + xndotk(n)(k)
param2 = 1.0 / (cn(n) + rk(k) * training_phikss_k) // this is 1/(c_n + r_k*s_nk)
pnsum += mathutilities.mathutils.logguard(cn(n) * param2)
// sample phink
phink(n)(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
if (i >= BurninITER) {
phinkss(n)(k) += phink(n)(k) / CollectionITER
}
// sample lnk
lnk(n)(k) = sample.samplers.sampleCRT(rng, xndotk(n)(k), an(n))
lnsum += lnk(n)(k)
// update sufficient statistics for phi
phinss(n) += phink(n)(k)
phikss(k) += phink(n)(k)
phikss2(k) += pow(phink(n)(k),2)
}
// sample an (this was commented out in the c++ version)
//param1 = ezero + lnsum
//param2 = 1.0 / (fzero - pnsum)
//an(n) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2)
// sample cn
param1 = gzero + K*an(n)
param2 = 1.0 / (hzero + phinss(n))
cn(n) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
}
// sample global variable (this is also commented out in the c++ version, and is listed as a hyperparameter)
//c = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, czero+gammasum, 1.0/(dzero+rsum)))
}
// end of Gibbs sampling loop
}
def printResults() {
// saves the final expected values
// first create a new directory
val resultDirectoryName = outDirectory + "/NGPPF/expectedValues"
val resultDirectory = new File(resultDirectoryName)
resultDirectory.mkdirs()
// save rkB
printutilities.printutils.printVec(rkss, resultDirectoryName + "/rkB.txt", K)
// save phink
printutilities.printutils.printMat(phinkss, resultDirectoryName + "/phink.txt", K, N)
}
def printSamples(iter: Int) {
// saves interim samples
// first create a directory, if not there
val iterDirectoryName = outDirectory + "/NGPPF/iterations"
val iterDirectory = new File(iterDirectoryName)
iterDirectory.mkdirs()
// save rkB
printutilities.printutils.printVec(rk, iterDirectoryName + "/rkB-itr%04d.txt".format(iter), K)
// save phink
printutilities.printutils.printMat(phink, iterDirectoryName + "/phink-itr%04d.txt".format(iter), K, N)
}
def generateSample(netOption: Int) {
// generates random B sample from the trained matrices, and saves to file
// first create a directory, if not there
val genDirectoryName = outDirectory + "/NGPPF/generatedSamples"
val genDirectory = new File(genDirectoryName)
genDirectory.mkdirs()
// generate B
var Bnew = Array.fill[Integer](N,N)(0)
var(numEntries, x) = (0, 0)
var lambda = 0.0
for (n <- 0 to (N-1)) {
for (m <- (n+1) to (N-1)) {
lambda = 0.0
for (k <- 0 to (K-1)) {
lambda += rkss(k) * phinkss(n)(k) * phinkss(m)(k)
}
x = gsl.gsl_ran_poisson(rng, lambda)
if (netOption == 0) {
// binary network, truncate x
if (x > 1) {x = 1}
}
if (x > 0) {
numEntries += 1
Bnew(n)(m) = x
}
}
}
// save generated B
printutilities.printutils.printTrFile(Bnew, genDirectoryName + "/genB.txt", N, N, N + "\t" + numEntries)
}
}
|
ciads-ut/poisson-factorization
|
GPPF/src/main/scala/pfmodel/mathutils.scala
|
<gh_stars>1-10
package mathutilities
import java.math._
object mathutils {
val LOWLIMIT = 1e-15
def logguard(m : Double) : Double = {
// provides guard against log(0)
if (m < LOWLIMIT) {
return math.log(LOWLIMIT)
}
else {
return math.log(m)
}
}
def minguard(m : Double) : Double = {
// provides guard against number lower than LOWLIMIT
if (m < LOWLIMIT) {
return (LOWLIMIT)
}
else {
return m
}
}
}
|
ciads-ut/poisson-factorization
|
GPPF/src/main/scala/pfmodel/JGPPFmodel.scala
|
package jgppf
import scala.io.Source
import data._
import scala.collection.mutable.ListBuffer
import java.io._
import mathutilities._
import org.bytedeco.javacpp._
import org.bytedeco.javacpp.gsl._
import sample._
import scala.collection.mutable.ArrayBuffer
import scala.math.sqrt
import scala.math.pow
import printutilities._
// --------------------------------------------------
// -------------------- J-GPPF --------------------
// --------------------------------------------------
class JointModel(rng: gsl.gsl_rng, data: PoissonData, epsilonlc: Double, kB: Integer, kY: Integer, outDir: String) {
var KB = kB
var KY = kY
var N = data.N
var D = data.D
var V = data.V
var epsilon = epsilonlc
var outDirectory = outDir
// initialize hyperparameters
// for network
var(azeroB, bzeroB, czeroB, dzeroB) = (1.0, 1.0, sqrt(N.toDouble), 1.0)
var(ezeroB, fzeroB, gzeroB) = (1.0, 1.0, 1.0)
var(hzeroB, mzeroB, nzeroB, szeroB) = (1.0, 1.0, 1.0, 1.0)
var(tzeroB, cB, gammaB, xiB) = (1.0, 1.0, 1.0, 0.1)
// for corpus
var(azeroY, bzeroY, czeroY, dzeroY) = (1.0, 1.0, sqrt(D.toDouble), 1.0)
var(ezeroY, fzeroY, gzeroY) = (1.0, 1.0, 1.0)
var(hzeroY, mzeroY, nzeroY, szeroY) = (1.0, 1.0, 1.0, 1.0)
var(tzeroY, cY, gammaY, xiY) = (1.0, 1.0, 1.0, 0.1)
// initialize matrices and arrays
// for network
var phink = Array.fill[Double](N,KB)(1.0/KB)
var phinss = Array.fill[Double](N)(1.0) // sum(phink,1)
var phikss = Array.fill[Double](KB)(1.0*N/KB) // sum(phink,0)
var phikss2 = Array.fill[Double](KB)(1.0*N/(KB*KB)) // sum(phink^2,0)
var rkB = Array.fill[Double](KB)(1.0*gammaB/KB)
var akB = Array.fill[Double](KB)(1.0)
var cn = Array.fill[Double](N)(1.0)
var psiwk = Array.fill[Double](V,KB)(1.0/V)
var xndotk = Array.fill[Double](N,KB)(0.0)
var ydotndotk = Array.fill[Double](N,KB)(0.0)
var xk = Array.fill[Double](KB)(0.0)
// for corpus
var thetadk = Array.fill[Double](D,KY)(1.0/KY)
var thetadss = Array.fill[Double](D)(1.0) // sum(thetadk,1)
var thetakss = Array.fill[Double](KY)(1.0*D/KY) // sum(thetadk,0)
var rkY = Array.fill[Double](KY)(1.0*gammaY/KY)
var akY = Array.fill[Double](KY)(1.0)
var cd = Array.fill[Double](D)(1.0)
var betawk = Array.fill[Double](V,KY)(1.0/V)
var ydotwk = Array.fill[Double](V,(KY+KB))(0.0)
var yddotk = Array.fill[Double](D,KY)(0.0)
var yk = Array.fill[Double](KY+KB)(0.0)
var uk = Array.fill[Double](KY)(0.0)
// for Z
var Zphikss = Array.fill[Double](KB)(0.0)
// for debugging and display
var phinkss = Array.fill[Double](N,KB)(0.0)
var psiwkss = Array.fill[Double](V,KB)(0.0)
var rkBss = Array.fill[Double](KB)(0.0)
var thetadkss = Array.fill[Double](D,KY)(0.0)
var betawkss = Array.fill[Double](V,KY)(0.0)
var rkYss = Array.fill[Double](KY)(0.0)
def construct() {
// Initialize Zphikss
for (n <- 0 to (N-1)) {
for (k <- 0 to (KB-1)) {
var training_Ndsz_n = data.Ndsz(n) // remove heldout Y links from this (if applicable)
if (data.mSY > 0) {
// subtract the missing Y links
var d = 0
for (zbin <- 0 to (data.Z.row_ids(n).size-1)) {
d = data.Z.row_ids(n)(zbin)
for (nz <- 0 to (data.mY.row_ids(d).size-1)) {
training_Ndsz_n -= psiwk(data.mY.row_ids(d)(nz))(k)
}
}
}
Zphikss(k) += training_Ndsz_n * phink(n)(k)
}
}
}
construct
def train(BurninITER: Int, CollectionITER: Int, data: PoissonData, Option: Int, netOption: Int, samplePeriod: Int) {
var(param1, param2, xiBparam1, xiBparam2, xiYparam1, xiYparam2) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
var(rkBsum, rkYsum, lsum1, logpsum1, lsum2, logpsum2) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
var(sk, akBsum, akYsum, epsilonparam1, epsilonparam2) = (0.0, 0.0, 0.0, 0.0, 0.0)
var(training_phikss_k, training_Ndsz_n, training_betakss_k) = (0.0, 0.0, 0.0)
var(kBcount, m, n, w, d, value) = (0, 0, 0, 0, 0, 0)
akBsum = akB.sum
akYsum = akY.sum
// Gibbs sampling iterations start
for (i <- 0 to (BurninITER+CollectionITER-1)) {
// interim output
if (i==0 || (i+1)%100==0) {
print("Iteration: " + (i+1) + "\n")
}
if (i % samplePeriod == 0) {
printSamples(i)
}
// reset a few statistics first; O(NK)
xndotk = Array.fill[Double](N,KB)(0.0)
xk = Array.fill[Double](KB)(0.0)
ydotndotk = Array.fill[Double](N,KB)(0.0)
yddotk = Array.fill[Double](D,KY)(0.0)
ydotwk = Array.fill[Double](V,(KY+KB))(0.0)
yk = Array.fill[Double](KY+KB)(0.0)
epsilonparam1 = 0.0
epsilonparam2 = 0.0
// sampling starts
// network sampling of latent counts; O(SK)
for (n <- 0 to (N-1)) {
for (nz <- 0 to (data.B.row_ids(n).size-1)) {
m = data.B.row_ids(n)(nz)
value = data.B.row_counts(n)(nz)
var countsample = Array.fill[Int](KB)(0)
var pmf = Array.fill[Double](KB)(0.0)
for (k <- 0 to (KB-1)) {
pmf(k) = mathutilities.mathutils.minguard(rkB(k)*phink(n)(k)*phink(m)(k))
}
// normalization
var normsum = pmf.sum
for (k <- 0 to (KB-1)) {
pmf(k) = pmf(k) / normsum
}
if (netOption == 0) {
// binary network, use truncated poisson to estimate "value"
value = sample.samplers.TruncPoisson(rng, normsum.toInt).toInt
}
gsl.gsl_ran_multinomial(rng, KB.toLong, value, pmf, countsample)
// update sufficient statistics of x
for (k <- 0 to (KB-1)) {
xndotk(n)(k) += countsample(k)
xndotk(m)(k) += countsample(k)
xk(k) += countsample(k)
}
}
}
// corpus sampling of latent counts; O(SK)
for (d <- 0 to (D-1)) {
var KBcount = KB * data.Dnsz(d).toInt
for (nz <- 0 to (data.Y.row_ids(d).size-1)) {
w = data.Y.row_ids(d)(nz)
value = data.Y.row_counts(d)(nz)
var countsample = Array.fill[Int](KBcount+KY)(0)
var pmf = Array.fill[Double](KBcount+KY)(0)
kBcount = 0
if (Option == 1) {
for (k <- 0 to (KB-1)) {
// for network groups
for (zbin <- 0 to (data.Zt.row_ids(d).size-1)) {
n = data.Zt.row_ids(d)(zbin)
pmf(kBcount) = mathutilities.mathutils.minguard(epsilon*rkB(k)*phink(n)(k)*psiwk(w)(k))
kBcount += 1
}
}
}
for (k <- 0 to (KY-1)) {
// for count data related groups
pmf(KBcount+k) = mathutilities.mathutils.minguard(rkY(k)*thetadk(d)(k)*betawk(w)(k))
}
// normalization
var normsum = pmf.sum
for (k <- 0 to (KBcount+KY-1)) {
pmf(k) = pmf(k) / normsum
}
gsl.gsl_ran_multinomial(rng, (KBcount+KY).toLong, value, pmf, countsample)
// update sufficient statistics of Y
kBcount = 0
for (k <- 0 to (KB-1)) {
// for network groups
for (zbin <- 0 to (data.Zt.row_ids(d).size-1)) {
n = data.Zt.row_ids(d)(zbin)
ydotndotk(n)(k) += countsample(kBcount)
ydotwk(w)(k) += countsample(kBcount)
yk(k) += countsample(kBcount)
epsilonparam1 += countsample(kBcount)
kBcount += 1
}
}
for (k <- 0 to (KY-1)) {
yddotk(d)(k) += countsample(KBcount+k)
ydotwk(w)(KB+k) += countsample(KBcount+k)
yk(KB+k) += countsample(KBcount+k)
}
}
}
// network sampling of phink and cn; O(NK+N)
for (n <- 0 to (N-1)) {
// reset the statistics about phi
phinss(n) = 0.0
for (k <- 0 to (KB-1)) {
// sampling of phink
phikss(k) -= phink(n)(k) // avoids recomputing sum_n(phink)
phikss2(k) -= pow(phink(n)(k),2) // avoids recomputing sum_n(phink^2)
training_phikss_k = phikss(k) // remove heldout B links from this (if applicable)
training_Ndsz_n = data.Ndsz(n) // remove heldout Y links from this (if applicable)
if (data.mSN > 0) {
// subtract the missing B links
for (nz <- 0 to (data.mB.row_ids(n).size-1)) {
training_phikss_k -= phink(data.mB.row_ids(n)(nz))(k)
}
}
if (data.mSY > 0) {
// subtract the missing Y links
for (zbin <- 0 to (data.Z.row_ids(n).size-1)) {
d = data.Z.row_ids(n)(zbin)
for (nz <- 0 to (data.mY.row_ids(d).size-1)) {
training_Ndsz_n -= psiwk(data.mY.row_ids(d)(nz))(k)
}
}
}
Zphikss(k) -= training_Ndsz_n * phink(n)(k)
param1 = azeroB + xndotk(n)(k) + ydotndotk(n)(k)
if (Option == 1) {
param2 = 1.0 / (cn(n) + rkB(k) * (training_phikss_k + epsilon*training_Ndsz_n))
}
else {
param2 = 1.0 / (cn(n) + rkB(k) * training_phikss_k)
}
phink(n)(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
if (i >= BurninITER) {
phinkss(n)(k) += phink(n)(k) / CollectionITER
}
// update sufficient statistics for phi
phinss(n) += phink(n)(k)
phikss(k) += phink(n)(k)
phikss2(k) += pow(phink(n)(k), 2)
Zphikss(k) += training_Ndsz_n * phink(n)(k)
}
// sampling of cn
param1 = czeroB + KB*azeroB
param2 = 1.0 / (dzeroB + phinss(n))
cn(n) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
}
// network sampling of rk, lk, and gammak; O(3*K)
rkBsum = 0.0
lsum1 = 0.0
logpsum1 = 0.0
xiBparam1 = 0.0
xiBparam2 = 0.0
for (k <- 0 to (KB-1)) {
// sample rkB
sk = (pow(phikss(k), 2) - phikss2(k)) / 2.0
if (data.mSN > 0) {
// subtract the missing B links
for (n <- 0 to (N-1)) {
for (nz <- 0 to (data.mB.row_ids(n).size-1)) {
sk -= phink(n)(k) * phink(data.mB.row_ids(n)(nz))(k)
}
}
}
// note: Zphikss has already been compensated for missing Y links, no need to subtract Z*phi*psi terms
param1 = 1.0*gammaB/KB + xk(k) + yk(k)
if (Option == 1) {
param2 = 1.0 / (cB + sk + epsilon*Zphikss(k))
}
else {
param2 = 1.0 / (cB + sk)
}
rkB(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
rkBsum += rkB(k)
if (i >= BurninITER) {
rkBss(k) += rkB(k) / CollectionITER
}
// sample lk's for the updates of gammaB
lsum1 += sample.samplers.sampleCRT(rng, yk(k), 1.0*gammaB/KB)
if (Option == 1) {
logpsum1 += mathutilities.mathutils.logguard(1.0 + (sk + epsilon*Zphikss(k))/cB)
}
else {
logpsum1 += mathutilities.mathutils.logguard(1.0 + 1.0*sk/cB)
}
epsilonparam2 += rkB(k) * Zphikss(k)
// sample psiwk only for the joint model, no need to sample for the disjoint model
if (Option == 1) {
// sample psiwk
var pmf = Array.fill[Double](V)(0.0)
var alpha_k = Array.fill[Double](V)(0.0)
for (w <- 0 to (V-1)) {
// get the parameters for the Dirichlet distribution
alpha_k(w) = 1.0*xiB + ydotwk(w)(k)
}
gsl.gsl_ran_dirichlet(rng, V, alpha_k, pmf)
for (w <- 0 to (V-1)) {
psiwk(w)(k) = mathutilities.mathutils.minguard(pmf(w))
if (i >= BurninITER) {
psiwkss(w)(k) += psiwk(w)(k) / CollectionITER
}
// sample the CRT random variables for the updates of the Dirichlet hyperparameter
xiBparam1 += sample.samplers.sampleCRT(rng, ydotwk(w)(k), xiB)
}
// sample the beta random variables for the updates of the Dirichlet hyperparameter
xiBparam2 += mathutilities.mathutils.logguard(1.0 - gsl.gsl_ran_beta(rng, yk(k), V*xiB))
}
}
// sample gammaB
param1 = ezeroB + lsum1
param2 = 1.0 / (fzeroB + 1.0*logpsum1/KB)
gammaB = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
// corpus sampling of rk, lk, gammak, thetadk, and betawk; O(DK+VK+K)
lsum2 = 0.0
logpsum2 = 0.0
rkYsum = 0.0
xiYparam1 = 0.0
xiYparam2 = 0.0
thetadss = Array.fill[Double](D)(0.0)
for (k <- 0 to (KY-1)) {
// sample thetadk
thetakss(k) = 0.0
lsum1 = 0.0
logpsum1 = 0.0
for (d <- 0 to (D-1)) {
param1 = azeroY + yddotk(d)(k)
training_betakss_k = 1.0 // remove missing Y links from this (if applicable)
if (data.mSY > 0) {
// subtract the missing Y links
for (nz <- 0 to (data.mY.row_ids(d).size-1)) {
training_betakss_k -= betawk(data.mY.row_ids(d)(nz))(k)
}
}
param2 = 1.0 / (cd(d) + rkY(k)*training_betakss_k)
thetadk(d)(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
thetakss(k) += thetadk(d)(k)
thetadss(d) += thetadk(d)(k)
if (i >= BurninITER) {
thetadkss(d)(k) += thetadk(d)(k) / CollectionITER
}
// sample ldky's for the updates of akY's
lsum1 += sample.samplers.sampleCRT(rng, yddotk(d)(k), akY(k))
logpsum1 += mathutilities.mathutils.logguard(1.0 + rkY(k)/cd(d))
}
// sample betawk
var pmf = Array.fill[Double](V)(0.0)
var alpha_k = Array.fill[Double](V)(0.0)
for (w <- 0 to (V-1)) {
// get the parameters for the Dirichlet distribution
alpha_k(w) = 1.0*xiY + ydotwk(w)(k+KB)
}
gsl.gsl_ran_dirichlet(rng, V, alpha_k, pmf)
for (w <- 0 to (V-1)) {
betawk(w)(k) = mathutilities.mathutils.minguard(pmf(w))
if (i >= BurninITER) {
betawkss(w)(k) += betawk(w)(k) / CollectionITER
}
// sample the CRT random variables for the updates of the Dirichlet hyperparameter
xiYparam1 += sample.samplers.sampleCRT(rng, ydotwk(w)(k+KB), xiY)
}
// prepare sufficient statistics for updating rkY, and compensate for missing Y links
uk(k) = thetakss(k)
if (data.mSY > 0) {
for (d <- 0 to (D-1)) {
for (nz <- 0 to (data.mY.row_ids(d).size-1)) {
uk(k) -= thetadk(d)(k) * betawk(data.mY.row_ids(d)(nz))(k)
}
}
}
// sample the beta random variables for the updates of the Dirichlet hyperparameter
xiYparam2 += mathutilities.mathutils.logguard(1.0 - gsl.gsl_ran_beta(rng, yk(k+KB), V*xiY))
// sample rkY
param1 = 1.0*gammaY/KY + yk(k+KB)
param2 = 1.0 / (cY + uk(k))
rkY(k) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
rkYsum += rkY(k)
if (i >= BurninITER) {
rkYss(k) += rkY(k) / CollectionITER
}
// sample lk's for the updates of gammaY
lsum2 += sample.samplers.sampleCRT(rng, yk(k), 1.0*gammaY/KY)
logpsum2 += mathutilities.mathutils.logguard(1.0 + uk(k)/cY)
}
// sample gammaY
param1 = ezeroY + lsum2
param2 = 1.0 / (fzeroY + 1.0*logpsum2/KY)
gammaY = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
// corpus sampling of cd; O(D)
for (d <- 0 to (D-1)) {
param1 = czeroY + KY*azeroY
param2 = 1.0 / (dzeroY + thetadss(d))
cd(d) = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
}
// sample global variable
param1 = gzeroB + gammaB
param2 = 1.0 / (hzeroB + rkBsum)
cB = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
param1 = gzeroY + gammaY
param2 = 1.0 / (hzeroY + rkYsum)
cY = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
// sample xiB
//param1 = szeroB + xiBparam1
//param2 = 1.0 / (tzeroB - V*xiBparam2)
//xiB = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
// sample xiY
//param1 = szeroY + xiYparam1
//param2 = 1.0 / (tzeroY - V*xiYparam2)
//xiY = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
// sample epsilon
//param1 = szeroY + epsilonparam1
//param2 = 1.0 / (tzeroY + epsilonparam2)
//epsilon = mathutilities.mathutils.minguard(gsl.gsl_ran_gamma(rng, param1, param2))
}
// end of Gibbs sampling iteration loop
}
def printResults() {
// saves the final expected values
// first create a new directory
val resultDirectoryName = outDirectory + "/JGPPF/expectedValues"
val resultDirectory = new File(resultDirectoryName)
resultDirectory.mkdirs()
// save rkY
printutilities.printutils.printVec(rkYss, resultDirectoryName + "/rkY.txt", KY)
// save thetadk
printutilities.printutils.printMat(thetadkss, resultDirectoryName + "/thetadk.txt", KY, D)
// save betawk
printutilities.printutils.printMat(betawkss, resultDirectoryName + "/betawk.txt", KY, V)
// save psiwk
printutilities.printutils.printMat(psiwkss, resultDirectoryName + "/psiwk.txt", KB, V)
// save rkB
printutilities.printutils.printVec(rkBss, resultDirectoryName + "/rkB.txt", KB)
// save phink
printutilities.printutils.printMat(phinkss, resultDirectoryName + "/phink.txt", KB, N)
}
def printSamples(iter: Int) {
// saves interim samples
// first create a directory, if not there
val iterDirectoryName = outDirectory + "/JGPPF/iterations"
val iterDirectory = new File(iterDirectoryName)
iterDirectory.mkdirs()
// save rkY
printutilities.printutils.printVec(rkY, iterDirectoryName + "/rkY-itr%04d.txt".format(iter), KY)
// save thetadk
printutilities.printutils.printMat(thetadk, iterDirectoryName + "/thetadk-itr%04d.txt".format(iter), KY, D)
// save betawk
printutilities.printutils.printMat(betawk, iterDirectoryName + "/betawk-itr%04d.txt".format(iter), KY, V)
// save psiwk
printutilities.printutils.printMat(psiwk, iterDirectoryName + "/psiwk-itr%04d.txt".format(iter), KB, V)
// save rkB
printutilities.printutils.printVec(rkB, iterDirectoryName + "/rkB-itr%04d.txt".format(iter), KB)
// save phink
printutilities.printutils.printMat(phink, iterDirectoryName + "/phink-itr%04d.txt".format(iter), KB, N)
}
def generateSample(Option: Int, netOption: Int) {
// generates random B,Y samples from the trained matrices, and saves to file
// first create a directory, if not there
val genDirectoryName = outDirectory + "/JGPPF/generatedSamples"
val genDirectory = new File(genDirectoryName)
genDirectory.mkdirs()
// generate B
var Bnew = Array.fill[Integer](N,N)(0)
var(numEntries, x, y) = (0, 0, 0)
var lambda = 0.0
for (n <- 0 to (N-1)) {
for (m <- (n+1) to (N-1)) {
lambda = 0.0
for (k <- 0 to (KB-1)) {
lambda += rkBss(k) * phinkss(n)(k) * phinkss(m)(k)
}
x = gsl.gsl_ran_poisson(rng, lambda)
if (netOption == 0) {
// binary network, truncate x
if (x > 1) {x = 1}
}
if (x > 0) {
numEntries += 1
Bnew(n)(m) = x
}
}
}
// save generated B
printutilities.printutils.printTrFile(Bnew, genDirectoryName + "/genB.txt", N, N, N + "\t" + numEntries)
// generate Y
var Ynew = Array.fill[Integer](D,V)(0)
numEntries = 0
for (d <- 0 to (D-1)) {
for (w <- 0 to (V-1)) {
lambda = 0.0
for (k <- 0 to (KY-1)) {
lambda += rkYss(k) * thetadkss(d)(k) * betawkss(w)(k)
}
if (Option == 1) {
// sampling from joint model
for (k <- 0 to (KB-1)) {
for (zbin <- 0 to (data.Zt.row_ids(d).size-1)) {
lambda += epsilon * rkBss(k) * phinkss(data.Zt.row_ids(d)(zbin))(k) * psiwkss(w)(k)
}
}
}
y = gsl.gsl_ran_poisson(rng, lambda)
if (y > 0) {
numEntries += 1
Ynew(d)(w) = y
}
}
}
// save generated Y
printutilities.printutils.printTrFile(Ynew, genDirectoryName + "/genY.txt", D, V, D + "\t" + V + "\t" + numEntries)
}
}
|
ciads-ut/poisson-factorization
|
GPPF/src/main/scala/pfmodel/gppf.scala
|
<gh_stars>1-10
import scala.util.Random._
import data._
import org.bytedeco.javacpp._
import org.bytedeco.javacpp.gsl._
import jgppf._
import ngppf._
import cgppf._
object gppf {
def main(args: Array[String]) {
// start of timer
var t0 = System.currentTimeMillis
var Temp = gsl.gsl_rng_default
var rng = gsl.gsl_rng_alloc(Temp)
var aRand = scala.util.Random
// the seed for the rng
var random = aRand.nextInt(32767) % 10 + 1
gsl.gsl_rng_set(rng, random)
// load the config file
var configFileName = "poisson.config" // default config file
if (args.size > 0) {
configFileName = args(0) // user-defined config file
}
println("Loading parameters: " + configFileName)
var config = new Config(configFileName)
// filenames
var trFile1 = config.variables("NETWORK_TRAIN") // training filename (network)
var trFile2 = config.variables("CORPUS_TRAIN") // training filename (corpus)
var trZFile = config.variables("AUTHORS_TRAIN") // training filename (authorship)
var predFile1 = config.variables("NETWORK_HELDOUT") // prediction filename (network)
var predFile2 = config.variables("CORPUS_HELDOUT") // prediction filename (corpus)
// output directory
var outDir = config.variables("OUT_DIR")
// number of topics
var KB = config.variables("NETWORK_TOPICS").toInt // number of latent factors for B
var KY = config.variables("CORPUS_TOPICS").toInt // number of latent factors for Y
// iterations
var burnin = config.variables("BURNIN_ITER").toInt // number of burnin iterations
var collection = config.variables("COLLECT_ITER").toInt // number of collection iterations
// options
var jointOption = 1 // 0: disjoint model, 1: joint model
var epsilon = config.variables("EPSILON").toDouble // mixing parameter between network and corpus
var netOption = config.variables("COUNT_FLAG").toInt // 0: binary network, 1: count network
var samplePeriod = config.variables("OUTPUT_ITER").toInt // save interim samples every 'samplePeriod' iterations
// commands
var modelSelection = config.variables("RUN_MODEL")
var generateSamples = config.variables("GENERATE_SAMPLES").toInt
// get data and train model
if (modelSelection == "jgppf") {
var dat = new PoissonData(trFile1, trFile2, trZFile, predFile1, predFile2)
println("Running J-GPPF")
var mod = new JointModel(rng, dat, epsilon, KB, KY, outDir)
mod.train(burnin, collection, dat, jointOption, netOption, samplePeriod)
mod.printResults()
if (generateSamples == 1) {
mod.generateSample(jointOption, netOption)
}
}
else if (modelSelection == "ngppf") {
var dat = new PoissonData(trFile1, "", "", predFile1, "")
println("Running N-GPPF")
var mod = new NetworkModel(rng, dat, KB, outDir)
mod.train(burnin, collection, dat, netOption, samplePeriod)
mod.printResults()
if (generateSamples == 1) {
mod.generateSample(netOption)
}
}
else if (modelSelection == "cgppf") {
var dat = new PoissonData("", trFile2, "", "", predFile2)
println("Running C-GPPF")
var mod = new CorpusModel(rng, dat, KY, outDir)
mod.train(burnin, collection, dat, samplePeriod)
mod.printResults()
if (generateSamples == 1) {
mod.generateSample()
}
}
else {
println("Error: unknown model selection \"RUN_MODEL\"")
}
//end timer and print elapsed time
var elapsed_time = System.currentTimeMillis - t0
var elapsed_time_minutes = (elapsed_time / (1000*60)).toInt
var elapsed_time_seconds = (elapsed_time / (1000)).toInt
if (elapsed_time_minutes > 5) {
println("Elapsed time: " + elapsed_time_minutes + " minutes")
}
else {
println("Elapsed time: " + elapsed_time_seconds + " seconds")
}
}
}
|
ciads-ut/poisson-factorization
|
GPPF/src/main/scala/pfmodel/GPPFdata.scala
|
package data
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.Map
import scala.io.Source
// base class for the matrix entries of B, Y, and Z
class Input() {
/*
row_ids = list of lists of y indices (author ids for B, word ids for Y, document ids for Z)
row_counts = list of lists of counts
Access as:
inputObj.row_ids(x_id)
or
inputObj.row_counts(x_id)
where x_id corresponds to author ids for B, document ids for Y, author ids for Z
*/
var row_ids = new ArrayBuffer[ArrayBuffer[Int]]()
var row_counts = new ArrayBuffer[ArrayBuffer[Int]]()
}
// class for loading and accessing B, Y, and Z
class PoissonData(trFileName1: String, trFileName2: String, trZFileName: String, predFileName1: String, predFileName2: String) {
// Main variables
var B = new Input() // network matrix (N x N)
var Y = new Input() // corpus matrix (D x V)
var Z = new Input() // authorship matrix (N x D)
var Zt = new Input() // authorship matrix (D x N)
var mB = new Input() // missing data for network
var mY = new Input() // missing data for corpus
var mYt = new Input() // missing data for corpus (transposed)
var N = 0 // number of authors in network (B)
var D = 0 // number of documents in corpus (Y)
var V = 0 // number of words in vocabulary (Y)
var SN = 0 // number of entries in B
var mSN = 0 // number of missing entries in B
var SY = 0 // number of entries in Y
var mSY = 0 // number of missing entries in Y
// Counters for Z matrix
var Ndsz = new ArrayBuffer[Double]()
var Dnsz = new ArrayBuffer[Double]()
def construct() {
if (trFileName1.length() > 0) {
// Load the B matrix
println("Loading network: " + trFileName1)
var trLines1 = Source.fromFile(trFileName1).getLines.filter(!_.isEmpty())
var header = (trLines1.next().split("\t").map(_.toInt)).toList
N = header(0)
SN = header(1)
fill_matrix(B, trLines1, N)
}
if (trFileName2.length() > 0) {
// Load the Y matrix
println("Loading corpus: " + trFileName2)
var trLines2 = Source.fromFile(trFileName2).getLines.filter(!_.isEmpty())
var header = (trLines2.next().split("\t").map(_.toInt)).toList
D = header(0)
V = header(1)
SY = header(2)
fill_matrix(Y, trLines2, D)
}
if (predFileName1.length() > 0) {
// Load the missing entries for B
println("Loading network heldout links: " + predFileName1)
var predLines1 = Source.fromFile(predFileName1).getLines
var header = (predLines1.next().split("\t").map(_.toInt)).toList
mSN = header(1)
fill_matrix(mB, predLines1, N)
}
if (predFileName2.length() > 0) {
// Load the missing entries for Y
println("Loading corpus heldout links: " + predFileName2)
var predLines2 = Source.fromFile(predFileName2).getLines
var header = (predLines2.next().split("\t").map(_.toInt)).toList
mSY = header(2)
fill_matrix(mY, predLines2, D)
// Transpose missing Y entries (more efficient for CGPPF)
for (w <- 0 to (V-1)) {
mYt.row_ids += new ArrayBuffer[Int]()
mYt.row_counts += new ArrayBuffer[Int]()
}
var w = 0
for (d <- 0 to (D-1)) {
for (nz <- 0 to (mY.row_ids(d).size-1)) {
w = mY.row_ids(d)(nz)
mYt.row_ids(w) += d
mYt.row_counts(w) += mY.row_counts(d)(nz)
}
}
}
if (trZFileName.length() > 0) {
// Load the Z matrix (trzfile has no header)
println("Loading authors: " + trZFileName)
var trZLines = Source.fromFile(trZFileName).getLines
fill_matrix(Z, trZLines, N)
// Transpose Z (improves Gibbs sampling efficiency), and initialize Z counters
for (d <- 0 to (D-1)) {
Dnsz += 0.0
Zt.row_ids += new ArrayBuffer[Int]()
Zt.row_counts += new ArrayBuffer[Int]()
}
var d = 0
for (n <- 0 to (N-1)) {
Ndsz += 0.0
for (nz <- 0 to (Z.row_ids(n).size-1)) {
d = Z.row_ids(n)(nz)
Zt.row_ids(d) += n
Zt.row_counts(d) += 1
}
}
// Fill Z counters
for (d <- 0 to (D-1)) {
for (i <- 0 to (Zt.row_ids(d).size-1)) {
Ndsz(Zt.row_ids(d)(i).toInt) += 1.0
Dnsz(d) += 1.0
}
}
}
}
// fills member variables for the Input object
def fill_matrix(mat: Input, readFile: Iterator[String], L: Integer) {
var(n, m, value) = (0, 0, 0)
// first create empty rows
for (l <- 0 to (L-1)) {
mat.row_ids += new ArrayBuffer[Int]()
mat.row_counts += new ArrayBuffer[Int]()
}
// then load the data entries
while(readFile.hasNext) {
// load the line, remove right whitespace, split by tabs, and map to a list of integers
var line = readFile.next().replaceAll("\\s+$", "")
if (!line.isEmpty) {
var line_map = (line.split("\t").map(_.toInt)).toList
if (line_map.size == 3) {
n = line_map(0)
m = line_map(1)
value = line_map(2)
mat.row_ids(n) += m
mat.row_counts(n) += value
}
else if (line_map.size == 2) {
n = line_map(0)
m = line_map(1)
mat.row_ids(n) += m
mat.row_counts(n) += 1
}
}
}
}
// make call to start building the data object
construct
}
// class for poisson.config
class Config(configFileName: String) {
// config variable mapping
var variables = scala.collection.mutable.Map[String, String]()
def construct() {
var configLines = Source.fromFile(configFileName).getLines.filter(!_.isEmpty()).filter(!_.startsWith("#")).filter(_.contains("="))
while(configLines.hasNext) {
var line = configLines.next()
// remove comments
if (line.contains("#")) {
line = line.substring(0, line.indexOf("#"))
}
if (line.contains("=")) {
// save the variable
var line_split = (line.split("=")).toList
var key = line_split(0)
var value = line_split(1)
if (value.contains("\"")) {
// isolate quoted contents
value = value.substring(value.indexOf("\"")+1, value.indexOf("\"", value.indexOf("\"")+1))
}
else {
// trim left and right whitespace
value = value.replaceAll("^\\s+", "")
value = value.replaceAll("\\s+$", "")
}
variables += (key -> value)
}
}
}
construct
}
|
scleradb/sclera-install-pathgen
|
src/main/scala/PathGen.scala
|
/**
* Sclera - Path Generator
* Copyright 2012 - 2020 Sclera, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.scleradb.pathgen
class Main extends xsbti.AppMain {
override def run(config: xsbti.AppConfiguration): xsbti.MainResult = {
val provider: xsbti.AppProvider = config.provider()
provider.scalaProvider.jars().foreach { f =>
if( f.getName() == "scala-library.jar" )
println(f.getCanonicalPath)
}
provider.mainClasspath().foreach { f =>
println(f.getCanonicalPath)
}
new xsbti.Exit { override val code: Int = 0 }
}
}
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/Chart.scala
|
package org.phasanix.svggraph
import java.awt.geom.Point2D
/**
*/
object Chart {
import Helper._
def svgPath(points: Seq[(Int, Int)]): String = {
val sb = new StringBuilder()
if (points.length > 1) {
sb.fmt('M').fmt(' ')
.fmt(points.head)
.append('L')
points.tail.foreach(p => sb.append(' ').fmt(p))
}
sb.toString()
}
def svgPathPts(points: Seq[Point2D.Float]): String = {
val sb = new StringBuilder()
if (points.length > 1) {
sb.fmt('M').fmt(' ')
.fmt(points.head)
.append('L')
points.tail.foreach(p => sb.append(' ').fmt(p))
}
sb.toString()
}
}
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/Tick.scala
|
package org.phasanix.svggraph
import java.text.SimpleDateFormat
import java.util.{Calendar, Date}
import org.phasanix.svggraph.Axis.ScaleAxis
/**
* Definitions for ticks and tick boundaries
*/
object Tick {
import Helper._
type Fmt = Double => String
class DateTickFmt(fmt1: String, fmt2: String) extends Function1[Double, String] {
val dfmt1 = new SimpleDateFormat(fmt1)
val dfmt2 = new SimpleDateFormat(fmt2)
private[this] var prevYear = -1
private[this] val cal = Calendar.getInstance() // TODO: java.time or datemath alternative
def apply(d: Double): String = {
cal.setTimeInMillis(d.toLong)
val yr = cal.get(Calendar.YEAR)
val ret = if (yr == prevYear && prevYear != -1) {
dfmt1.format(new Date(d.toLong))
} else {
dfmt2.format(new Date(d.toLong))
}
prevYear = yr
ret
}
}
/** Date axis formatter, with optional formatter for Jan, so that we
* can display the year only when a year boundary is crossed. */
def dateFmt(fmtStr: String): Fmt = {
val fmt = new SimpleDateFormat(fmtStr);
{ d: Double => fmt.format(new Date(d.toLong)) }
}
val dateFmt_MY = dateFmt("MMM-yy")
val dateFmt_DM = dateFmt("dd MMM")
val dateFmt_DMY = dateFmt("dd MMM yy")
val defaultDateFmt = new DateTickFmt("dd MMM", "dd MMM yy")
val numFmt_0dp = { v: Double => fmt(v, 0) }
val numFmt_2dp = { v: Double => fmt(v, 2) }
def defaultFmt(isDate: Boolean): Double => String = if (isDate) defaultDateFmt else numFmt_0dp
/**
* Generate a series of tick-boundary values
*/
def values(opts: Options, axis: ScaleAxis): Seq[Double] = {
// estimate number of ticks to fit given range, round down to nearest 5,
// but with a minimum of 3.
val rawTickCount = {
val x = ((axis.length.toInt / opts.draw.pixelsPerTick) / 5) * 5
if (x == 0) 3 else x
}
axis.tickIntervals(rawTickCount).toSeq
}
}
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/Helper.scala
|
package org.phasanix.svggraph
import java.awt.geom.{Rectangle2D, Point2D}
import RichColor.color2richcolor
/**
* helper methods
*/
object Helper {
private def pow10(x: Int): Int = {
x match {
case 1 => 10
case 2 => 100
case 3 => 1000
case 4 => 10000
case _ =>
var count = x
var p = 1
while (count > 0) {
p *= 10
count -= 1
}
p
}
}
/**
* Fast append double value to string buffer with fixed dp
* Thread-safe alternative to java.text.NumberFormat
*/
def append(sb: StringBuilder, value: Double, dp: Int): Unit = {
val intV = value.toInt
sb.append(intV)
if (dp > 0) {
sb.append('.')
val rem = Math.abs(value) - Math.abs(intV)
val remInt = (pow10(dp) * rem).toInt
sb.append(remInt)
}
}
/** shorthand formatting methods for StringBuilder */
implicit class StringBuilderOps(val sb: StringBuilder) extends AnyVal {
def fmt(value: Double, dp: Int): StringBuilder = {
append(sb, value, dp)
sb
}
def fmt(value: Int): StringBuilder = {
sb.append(value)
}
def fmt(value: String): StringBuilder = {
sb.append(value)
}
def fmt(value: Char): StringBuilder = {
sb.append(value)
}
def fmt(point: Point2D.Float): StringBuilder = {
fmt(point.x.toDouble, 2)
sb.append(',')
fmt(point.y.toDouble, 2)
}
def fmt(point: (Int, Int)): StringBuilder = {
sb.append(point._1)
.append(',')
.append(point._2)
}
}
def fmt(value: Double, dp: Int): String = {
val sb = new StringBuilder()
sb.fmt(value, dp)
sb.toString()
}
def fmt(value: Int): String = value.toString
def fmt(value: Point2D.Float): String = {
val sb = new StringBuilder()
sb.fmt(value)
sb.toString()
}
implicit class Rectangle2DOps(val rect: Rectangle2D.Float) extends AnyVal {
def center: Point2D.Float = new Point2D.Float(rect.x + (rect.width / 2), rect.y + (rect.height / 2))
}
/** Draw a text element */
def drawText(value: String, pos: Point2D.Float, attrs: Seq[Constants.AttrProvider] = Seq.empty)(implicit opts: Options): xml.Elem = {
attrs.foldLeft(<text x={fmt(pos.x, 1)} y={fmt(pos.y, 1)}>{value}</text>) { (elt, attr) => attr(elt) }
}
/**
* Draw grid on the chart area defined by opts, at the given positions
*/
def drawGrid(vertical: Boolean, positions: Seq[Float])(implicit opts: Options) = {
val area = opts.plotArea
if (vertical) {
val x1 = fmt(area.x, 1)
val x2 = fmt(area.x + area.width, 1)
positions.map { pos =>
<line x1={x1} x2={x2} y1={fmt(pos, 1)} y2={fmt(pos, 1)} stroke={opts.draw.gridColor.asRgb}/>
}
} else {
val y1 = fmt(area.y, 1)
val y2 = fmt(area.height - area.y, 1)
positions.map { pos =>
<line x1={fmt(pos, 1)} x2={fmt(pos, 1)} y1={y1} y2={y2} stroke={opts.draw.gridColor.asRgb}/>
}
}
}
}
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/Options.scala
|
<reponame>richardclose/svggraph
package org.phasanix.svggraph
import java.awt.Color
import java.awt.geom.{Point2D, Rectangle2D}
import RichColor.color2richcolor
/**
* Configuration options.
*
* Measurements are all in px.
*/
case class Options (
layout: Options.Layout,
font: Options.Font,
draw: Options.Draw
) {
def plotArea: Rectangle2D.Float = layout.plotArea
def strokeWidth: Float = draw.strokeWidth
}
object Options {
/** Basic set of options */
def basic: Options = Options (
Layout.basic(chartWidth = 500, chartHeight = 300),
Font.basic(),
Draw.basic()
)
/**
* Chart layout. Chart regions are expressed as Rectangle2D, with measurements
* in px, and the origin of the coordinate system is at the bottom left of the
* chart area.
*
* @param chartWidth width of graph, in px.
* @param chartHeight height of graph, in px.
* @param xTickMargin height of x axis margin.
* @param yTickMarginLeft width of left y axis margin.
* @param yTickMarginRight width of right y axis margin (if any).
* @param plotMargin reserved space around plot area to accommodate
* points that fall on the x or y axis.
*/
case class Layout (
chartWidth: Int,
chartHeight: Int,
xTickMargin: Int,
yTickMarginLeft: Int,
yTickMarginRight: Int,
plotMargin: Int
) {
/** Area of whole chart */
def chartArea: Rectangle2D.Float = new Rectangle2D.Float(0f, 0f, chartWidth, chartHeight)
/** Area where chart contents are plotted */
def plotArea: Rectangle2D.Float = {
val x = yTickMarginLeft + plotMargin
val y = xTickMargin + plotMargin
new Rectangle2D.Float(x, y, chartWidth - x - plotMargin - yTickMarginRight, chartHeight - y)
}
/** Area of x axis title and tick marks */
def xTickArea: Rectangle2D.Float =
new Rectangle2D.Float(yTickMarginLeft + plotMargin, 0, chartWidth - yTickMarginLeft - yTickMarginRight - (2*plotMargin), xTickMargin)
/** Area of left y axis title and tick marks */
def yTickAreaLeft: Rectangle2D.Float =
new Rectangle2D.Float(0, xTickMargin + plotMargin, yTickMarginLeft, chartHeight - xTickMargin - plotMargin)
/** Area of left y axis title and tick marks */
def yTickAreaRight: Rectangle2D.Float =
new Rectangle2D.Float(chartWidth - yTickMarginRight, xTickMargin + plotMargin, yTickMarginRight, chartHeight - xTickMargin - plotMargin)
def origin: Point2D.Float = new Point2D.Float(yTickMarginLeft + plotMargin, xTickMargin + plotMargin)
}
object Layout {
private def orDefault(value: Int, defaultValue: Int): Int = if (value == -1) defaultValue else value
/** Basic set of layout options */
def basic(chartWidth: Int, chartHeight: Int, xTickMargin: Int = -1, yTickMarginLeft: Int = -1, yTickMarginRight: Int = -1, plotMargin: Int = -1): Layout = {
val xtm = orDefault(xTickMargin, Math.min(chartHeight/5, 50))
val ytml = orDefault(yTickMarginLeft, Math.min(chartWidth/5, 50))
val ytmr = orDefault(yTickMarginRight, 0)
val pm = orDefault(plotMargin, Math.min(chartWidth/10, 5))
Layout(chartWidth, chartHeight, xtm, ytml, ytmr, pm)
}
}
case class Font (
family: String,
baseSize: Float,
sizeIncrement: Float
)
object Font {
def basic(family: String = "Arial", baseSize: Float = 14f, sizeIncrement: Float = 1.5f): Font = {
Font(family, baseSize, sizeIncrement)
}
}
case class Draw (
strokeWidth: Float,
lineSpacing: Float,
pixelsPerTick: Int,
lineColor: Color,
frameColor: Color,
gridColor: Color
)
object Draw {
def basic (
strokeWidth: Float = 1.0f,
lineSpacing: Float = 12.0f,
pixelsPerTick: Int = 50,
lineColor: Color = Color.BLACK,
frameColor: Color = RichColor.Nil,
gridColor: Color = RichColor.Nil): Draw =
Draw(strokeWidth, lineSpacing, pixelsPerTick, lineColor, frameColor.or(lineColor), gridColor.or(lineColor))
}
}
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/RichColor.scala
|
<gh_stars>0
package org.phasanix.svggraph
import java.awt.Color
import scala.language.implicitConversions
/**
* Utility methods for handling Color.
*/
class RichColorOps(val value: Color) extends AnyVal {
def setAlpha(alpha: Double): Color =
new Color(value.getRed, value.getGreen, value.getBlue, (255 * alpha).toInt)
def opaque: Color = if (value.getAlpha == 255) value else setAlpha(1.0)
def asRgb: String = RichColor.colorAsRgb(value)
def asHex: String = RichColor.colorAsHex(value)
def or(color: Color): Color =
if (value == RichColor.Nil) color else value
}
object RichColor extends X11Colors {
implicit def color2richcolor(color: Color): RichColorOps = apply(color)
val Transparent: Color = new Color(0, 0, 0, 0)
val Nil: Color = new Color(12, 34, 56, 0)
def apply(color: Color): RichColorOps = new RichColorOps(color)
private val rxShortHex = """#([0-9A-F])([0-9A-F])([0-9A-F])""".r
private val rxLongHex = """#([0-9A-F]{2})([0-9A-F]{2})([0-9A-F]{2})""".r
private val rxRgb = """rgb\(\s*(\d+),\s*(\d+),\s*(\d+)\)""".r
private val rxRgba = """rgba\(\s*(\d+),\s*(\d+),\s*(\d+),\s*(0.\d{1,3})\)""".r
/**
* Parse a string in one of short hex (e.g. #DCB), long hex (e.g. #DDCCBB),
* rgb (e.g. rgb(100,120,130)) or rgba (e.g. rgba(100,120,130,0.5)
*/
def parse(s: String): Option[Color] = {
import java.lang.Integer.{ parseInt => pi }
import java.lang.Float.{ parseFloat => pf }
val c = s match {
case rxShortHex(r, g, b) => new Color(pi(r + r, 16), pi(g + g, 16), pi(b + b, 16), 255)
case rxLongHex(r, g, b) => new Color(pi(r, 16), pi(g, 16), pi(b, 16), 255)
case rxRgb(r, g, b) => new Color(pi(r), pi(g), pi(b), 255)
case rxRgba(r, g, b, a) =>
val alpha = pf(a)
val alphaInt = if (alpha == 0.0) 0 else (256f * alpha).toInt - 1
new Color(pi(r), pi(g), pi(b), alphaInt)
case _ => null
}
Option(c)
}
/**
* Convenience method, supplies default colour if parse fails
*/
def parseOrElse(s: String, defaultColor: Color): Color = {
parse(s).getOrElse(defaultColor)
}
/**
* represent color as rgb string, or rgba if alpha is present
*/
def encodeAsRgb(sb: StringBuilder, color: Color) {
val alpha = color.getAlpha
val hasAlpha = alpha < 255
sb.append(if (hasAlpha) "rgba(" else "rgb(")
.append(color.getRed).append(',')
.append(color.getGreen).append(',')
.append(color.getBlue)
if (hasAlpha) {
val s = "%.6f".format((alpha+1)/256.0).reverse.dropWhile(_ == '0').reverse // Remove trailing zeros
sb.append(',').append(s)
}
sb.append(')')
}
/** represent color as hex string, ignoring alpha */
def encodeAsHex(sb: StringBuilder, color: Color) {
sb.append('#')
val s = "%6X".format(color.getRGB)
sb.append(s.substring(2))
}
/** Color as HTML Hex string (e.g. "#12BCA3" */
def colorAsHex(color: Color) = {
val sb = new StringBuilder(7)
encodeAsHex(sb, color)
sb.toString()
}
/** Color as rgb or rgba value */
def colorAsRgb(color: Color) = {
val sb = new StringBuilder(20)
encodeAsRgb(sb, color)
sb.toString()
}
/**
* Generate colours by interpolating the requested number of colours between
* the given colours. Return range including original colours, or range(WHITE,BLACK,count)
* if the strings don't parse.
*/
def range(from: String, to: String, count: Int): Seq[Color] = {
val maybeRange = for (c1 <- parse(from); c2 <- parse(to)) yield range(c1, c2, count)
maybeRange.getOrElse(range(Color.WHITE, Color.BLACK, count))
}
/**
* Generate colours by interpolating the requested number of colours between
* the given colours. Return range includes original colours.
*/
def range(from: Color, to: Color, count: Int): Seq[Color] = {
def arr(c: Color) = Seq(c.getRed, c.getGreen, c.getBlue, c.getAlpha)
val arr1 = arr(from)
val arr2 = arr(to)
val steps = arr1.zip(arr2).map(e => (e._1 - e._2) / (count + 1))
val colours = 1 to count map { i =>
val xs = arr1.zip(steps).map(e => e._1 - i * e._2)
new Color(xs(0), xs(1), xs(2), xs(3))
}
from +: colours :+ to
}
/**
* Generate a Function1[Double, Color] for converting a value to a mapped
* color from the given range of colors.
*/
def rangeMapperLinear(colors: Seq[Color], values: Seq[Double]): Double => Color = {
val (min, max) = (values.min, values.max)
{ value: Double =>
val mapped = (colors.length * (value-min)/(max-min)).toInt
val index = if (mapped < 0) 0 else if (mapped >= colors.length) colors.length - 1 else mapped
colors(index)
}
}
private def _interp(from: Int, to: Int, dist: Double) = {
(from + (to - from) * dist).toInt
}
/**
* Linearly interpolate between two colours
*/
def interp(from: Color, to: Color, distance: Double) = {
val dist = Math.max(0.0, Math.min(1.0, distance))
new Color(
_interp(from.getRed, to.getRed, dist),
_interp(from.getGreen, to.getGreen, dist),
_interp(from.getBlue, to.getBlue, dist),
_interp(from.getAlpha, to.getAlpha, dist))
}
}
/**
* Converted from /etc/X11/rgb.txt
*/
trait X11Colors {
val AliceBlue = new Color(240, 248, 255)
val AntiqueWhite = new Color(250, 235, 215)
val AntiqueWhite1 = new Color(255, 239, 219)
val AntiqueWhite2 = new Color(238, 223, 204)
val AntiqueWhite3 = new Color(205, 192, 176)
val AntiqueWhite4 = new Color(139, 131, 120)
val Aquamarine = new Color(127, 255, 212)
val Aquamarine1 = new Color(127, 255, 212)
val Aquamarine2 = new Color(118, 238, 198)
val Aquamarine3 = new Color(102, 205, 170)
val Aquamarine4 = new Color(69, 139, 116)
val Azure = new Color(240, 255, 255)
val Azure1 = new Color(240, 255, 255)
val Azure2 = new Color(224, 238, 238)
val Azure3 = new Color(193, 205, 205)
val Azure4 = new Color(131, 139, 139)
val Beige = new Color(245, 245, 220)
val Bisque = new Color(255, 228, 196)
val Bisque1 = new Color(255, 228, 196)
val Bisque2 = new Color(238, 213, 183)
val Bisque3 = new Color(205, 183, 158)
val Bisque4 = new Color(139, 125, 107)
val Black = new Color(0, 0, 0)
val BlanchedAlmond = new Color(255, 235, 205)
val Blue = new Color(0, 0, 255)
val Blue1 = new Color(0, 0, 255)
val Blue2 = new Color(0, 0, 238)
val Blue3 = new Color(0, 0, 205)
val Blue4 = new Color(0, 0, 139)
val BlueViolet = new Color(138, 43, 226)
val Brown = new Color(165, 42, 42)
val Brown1 = new Color(255, 64, 64)
val Brown2 = new Color(238, 59, 59)
val Brown3 = new Color(205, 51, 51)
val Brown4 = new Color(139, 35, 35)
val Burlywood = new Color(222, 184, 135)
val Burlywood1 = new Color(255, 211, 155)
val Burlywood2 = new Color(238, 197, 145)
val Burlywood3 = new Color(205, 170, 125)
val Burlywood4 = new Color(139, 115, 85)
val CadetBlue = new Color(95, 158, 160)
val CadetBlue1 = new Color(152, 245, 255)
val CadetBlue2 = new Color(142, 229, 238)
val CadetBlue3 = new Color(122, 197, 205)
val CadetBlue4 = new Color(83, 134, 139)
val Chartreuse = new Color(127, 255, 0)
val Chartreuse1 = new Color(127, 255, 0)
val Chartreuse2 = new Color(118, 238, 0)
val Chartreuse3 = new Color(102, 205, 0)
val Chartreuse4 = new Color(69, 139, 0)
val Chocolate = new Color(210, 105, 30)
val Chocolate1 = new Color(255, 127, 36)
val Chocolate2 = new Color(238, 118, 33)
val Chocolate3 = new Color(205, 102, 29)
val Chocolate4 = new Color(139, 69, 19)
val Coral = new Color(255, 127, 80)
val Coral1 = new Color(255, 114, 86)
val Coral2 = new Color(238, 106, 80)
val Coral3 = new Color(205, 91, 69)
val Coral4 = new Color(139, 62, 47)
val CornflowerBlue = new Color(100, 149, 237)
val Cornsilk = new Color(255, 248, 220)
val Cornsilk1 = new Color(255, 248, 220)
val Cornsilk2 = new Color(238, 232, 205)
val Cornsilk3 = new Color(205, 200, 177)
val Cornsilk4 = new Color(139, 136, 120)
val Cyan = new Color(0, 255, 255)
val Cyan1 = new Color(0, 255, 255)
val Cyan2 = new Color(0, 238, 238)
val Cyan3 = new Color(0, 205, 205)
val Cyan4 = new Color(0, 139, 139)
val DarkBlue = new Color(0, 0, 139)
val DarkCyan = new Color(0, 139, 139)
val DarkGoldenrod = new Color(184, 134, 11)
val DarkGoldenrod1 = new Color(255, 185, 15)
val DarkGoldenrod2 = new Color(238, 173, 14)
val DarkGoldenrod3 = new Color(205, 149, 12)
val DarkGoldenrod4 = new Color(139, 101, 8)
val DarkGray = new Color(169, 169, 169)
val DarkGreen = new Color(0, 100, 0)
val DarkKhaki = new Color(189, 183, 107)
val DarkMagenta = new Color(139, 0, 139)
val DarkOliveGreen = new Color(85, 107, 47)
val DarkOliveGreen1 = new Color(202, 255, 112)
val DarkOliveGreen2 = new Color(188, 238, 104)
val DarkOliveGreen3 = new Color(162, 205, 90)
val DarkOliveGreen4 = new Color(110, 139, 61)
val DarkOrange = new Color(255, 140, 0)
val DarkOrange1 = new Color(255, 127, 0)
val DarkOrange2 = new Color(238, 118, 0)
val DarkOrange3 = new Color(205, 102, 0)
val DarkOrange4 = new Color(139, 69, 0)
val DarkOrchid = new Color(153, 50, 204)
val DarkOrchid1 = new Color(191, 62, 255)
val DarkOrchid2 = new Color(178, 58, 238)
val DarkOrchid3 = new Color(154, 50, 205)
val DarkOrchid4 = new Color(104, 34, 139)
val DarkRed = new Color(139, 0, 0)
val DarkSalmon = new Color(233, 150, 122)
val DarkSeaGreen = new Color(143, 188, 143)
val DarkSeaGreen1 = new Color(193, 255, 193)
val DarkSeaGreen2 = new Color(180, 238, 180)
val DarkSeaGreen3 = new Color(155, 205, 155)
val DarkSeaGreen4 = new Color(105, 139, 105)
val DarkSlateBlue = new Color(72, 61, 139)
val DarkSlateGray = new Color(47, 79, 79)
val DarkSlateGray1 = new Color(151, 255, 255)
val DarkSlateGray2 = new Color(141, 238, 238)
val DarkSlateGray3 = new Color(121, 205, 205)
val DarkSlateGray4 = new Color(82, 139, 139)
val DarkTurquoise = new Color(0, 206, 209)
val DarkViolet = new Color(148, 0, 211)
val DebianRed = new Color(215, 7, 81)
val DeepPink = new Color(255, 20, 147)
val DeepPink1 = new Color(255, 20, 147)
val DeepPink2 = new Color(238, 18, 137)
val DeepPink3 = new Color(205, 16, 118)
val DeepPink4 = new Color(139, 10, 80)
val DeepSkyBlue = new Color(0, 191, 255)
val DeepSkyBlue1 = new Color(0, 191, 255)
val DeepSkyBlue2 = new Color(0, 178, 238)
val DeepSkyBlue3 = new Color(0, 154, 205)
val DeepSkyBlue4 = new Color(0, 104, 139)
val DimGray = new Color(105, 105, 105)
val DodgerBlue = new Color(30, 144, 255)
val DodgerBlue1 = new Color(30, 144, 255)
val DodgerBlue2 = new Color(28, 134, 238)
val DodgerBlue3 = new Color(24, 116, 205)
val DodgerBlue4 = new Color(16, 78, 139)
val Firebrick = new Color(178, 34, 34)
val Firebrick1 = new Color(255, 48, 48)
val Firebrick2 = new Color(238, 44, 44)
val Firebrick3 = new Color(205, 38, 38)
val Firebrick4 = new Color(139, 26, 26)
val FloralWhite = new Color(255, 250, 240)
val ForestGreen = new Color(34, 139, 34)
val Gainsboro = new Color(220, 220, 220)
val GhostWhite = new Color(248, 248, 255)
val Gold = new Color(255, 215, 0)
val Gold1 = new Color(255, 215, 0)
val Gold2 = new Color(238, 201, 0)
val Gold3 = new Color(205, 173, 0)
val Gold4 = new Color(139, 117, 0)
val Goldenrod = new Color(218, 165, 32)
val Goldenrod1 = new Color(255, 193, 37)
val Goldenrod2 = new Color(238, 180, 34)
val Goldenrod3 = new Color(205, 155, 29)
val Goldenrod4 = new Color(139, 105, 20)
val Gray = new Color(190, 190, 190)
val Gray0 = new Color(0, 0, 0)
val Gray1 = new Color(3, 3, 3)
val Gray10 = new Color(26, 26, 26)
val Gray100 = new Color(255, 255, 255)
val Gray11 = new Color(28, 28, 28)
val Gray12 = new Color(31, 31, 31)
val Gray13 = new Color(33, 33, 33)
val Gray14 = new Color(36, 36, 36)
val Gray15 = new Color(38, 38, 38)
val Gray16 = new Color(41, 41, 41)
val Gray17 = new Color(43, 43, 43)
val Gray18 = new Color(46, 46, 46)
val Gray19 = new Color(48, 48, 48)
val Gray2 = new Color(5, 5, 5)
val Gray20 = new Color(51, 51, 51)
val Gray21 = new Color(54, 54, 54)
val Gray22 = new Color(56, 56, 56)
val Gray23 = new Color(59, 59, 59)
val Gray24 = new Color(61, 61, 61)
val Gray25 = new Color(64, 64, 64)
val Gray26 = new Color(66, 66, 66)
val Gray27 = new Color(69, 69, 69)
val Gray28 = new Color(71, 71, 71)
val Gray29 = new Color(74, 74, 74)
val Gray3 = new Color(8, 8, 8)
val Gray30 = new Color(77, 77, 77)
val Gray31 = new Color(79, 79, 79)
val Gray32 = new Color(82, 82, 82)
val Gray33 = new Color(84, 84, 84)
val Gray34 = new Color(87, 87, 87)
val Gray35 = new Color(89, 89, 89)
val Gray36 = new Color(92, 92, 92)
val Gray37 = new Color(94, 94, 94)
val Gray38 = new Color(97, 97, 97)
val Gray39 = new Color(99, 99, 99)
val Gray4 = new Color(10, 10, 10)
val Gray40 = new Color(102, 102, 102)
val Gray41 = new Color(105, 105, 105)
val Gray42 = new Color(107, 107, 107)
val Gray43 = new Color(110, 110, 110)
val Gray44 = new Color(112, 112, 112)
val Gray45 = new Color(115, 115, 115)
val Gray46 = new Color(117, 117, 117)
val Gray47 = new Color(120, 120, 120)
val Gray48 = new Color(122, 122, 122)
val Gray49 = new Color(125, 125, 125)
val Gray5 = new Color(13, 13, 13)
val Gray50 = new Color(127, 127, 127)
val Gray51 = new Color(130, 130, 130)
val Gray52 = new Color(133, 133, 133)
val Gray53 = new Color(135, 135, 135)
val Gray54 = new Color(138, 138, 138)
val Gray55 = new Color(140, 140, 140)
val Gray56 = new Color(143, 143, 143)
val Gray57 = new Color(145, 145, 145)
val Gray58 = new Color(148, 148, 148)
val Gray59 = new Color(150, 150, 150)
val Gray6 = new Color(15, 15, 15)
val Gray60 = new Color(153, 153, 153)
val Gray61 = new Color(156, 156, 156)
val Gray62 = new Color(158, 158, 158)
val Gray63 = new Color(161, 161, 161)
val Gray64 = new Color(163, 163, 163)
val Gray65 = new Color(166, 166, 166)
val Gray66 = new Color(168, 168, 168)
val Gray67 = new Color(171, 171, 171)
val Gray68 = new Color(173, 173, 173)
val Gray69 = new Color(176, 176, 176)
val Gray7 = new Color(18, 18, 18)
val Gray70 = new Color(179, 179, 179)
val Gray71 = new Color(181, 181, 181)
val Gray72 = new Color(184, 184, 184)
val Gray73 = new Color(186, 186, 186)
val Gray74 = new Color(189, 189, 189)
val Gray75 = new Color(191, 191, 191)
val Gray76 = new Color(194, 194, 194)
val Gray77 = new Color(196, 196, 196)
val Gray78 = new Color(199, 199, 199)
val Gray79 = new Color(201, 201, 201)
val Gray8 = new Color(20, 20, 20)
val Gray80 = new Color(204, 204, 204)
val Gray81 = new Color(207, 207, 207)
val Gray82 = new Color(209, 209, 209)
val Gray83 = new Color(212, 212, 212)
val Gray84 = new Color(214, 214, 214)
val Gray85 = new Color(217, 217, 217)
val Gray86 = new Color(219, 219, 219)
val Gray87 = new Color(222, 222, 222)
val Gray88 = new Color(224, 224, 224)
val Gray89 = new Color(227, 227, 227)
val Gray9 = new Color(23, 23, 23)
val Gray90 = new Color(229, 229, 229)
val Gray91 = new Color(232, 232, 232)
val Gray92 = new Color(235, 235, 235)
val Gray93 = new Color(237, 237, 237)
val Gray94 = new Color(240, 240, 240)
val Gray95 = new Color(242, 242, 242)
val Gray96 = new Color(245, 245, 245)
val Gray97 = new Color(247, 247, 247)
val Gray98 = new Color(250, 250, 250)
val Gray99 = new Color(252, 252, 252)
val Green = new Color(0, 255, 0)
val Green1 = new Color(0, 255, 0)
val Green2 = new Color(0, 238, 0)
val Green3 = new Color(0, 205, 0)
val Green4 = new Color(0, 139, 0)
val GreenYellow = new Color(173, 255, 47)
val Honeydew = new Color(240, 255, 240)
val Honeydew1 = new Color(240, 255, 240)
val Honeydew2 = new Color(224, 238, 224)
val Honeydew3 = new Color(193, 205, 193)
val Honeydew4 = new Color(131, 139, 131)
val HotPink = new Color(255, 105, 180)
val HotPink1 = new Color(255, 110, 180)
val HotPink2 = new Color(238, 106, 167)
val HotPink3 = new Color(205, 96, 144)
val HotPink4 = new Color(139, 58, 98)
val IndianRed = new Color(205, 92, 92)
val IndianRed1 = new Color(255, 106, 106)
val IndianRed2 = new Color(238, 99, 99)
val IndianRed3 = new Color(205, 85, 85)
val IndianRed4 = new Color(139, 58, 58)
val Ivory = new Color(255, 255, 240)
val Ivory1 = new Color(255, 255, 240)
val Ivory2 = new Color(238, 238, 224)
val Ivory3 = new Color(205, 205, 193)
val Ivory4 = new Color(139, 139, 131)
val Khaki = new Color(240, 230, 140)
val Khaki1 = new Color(255, 246, 143)
val Khaki2 = new Color(238, 230, 133)
val Khaki3 = new Color(205, 198, 115)
val Khaki4 = new Color(139, 134, 78)
val Lavender = new Color(230, 230, 250)
val LavenderBlush = new Color(255, 240, 245)
val LavenderBlush1 = new Color(255, 240, 245)
val LavenderBlush2 = new Color(238, 224, 229)
val LavenderBlush3 = new Color(205, 193, 197)
val LavenderBlush4 = new Color(139, 131, 134)
val LawnGreen = new Color(124, 252, 0)
val LemonChiffon = new Color(255, 250, 205)
val LemonChiffon1 = new Color(255, 250, 205)
val LemonChiffon2 = new Color(238, 233, 191)
val LemonChiffon3 = new Color(205, 201, 165)
val LemonChiffon4 = new Color(139, 137, 112)
val LightBlue = new Color(173, 216, 230)
val LightBlue1 = new Color(191, 239, 255)
val LightBlue2 = new Color(178, 223, 238)
val LightBlue3 = new Color(154, 192, 205)
val LightBlue4 = new Color(104, 131, 139)
val LightCoral = new Color(240, 128, 128)
val LightCyan = new Color(224, 255, 255)
val LightCyan1 = new Color(224, 255, 255)
val LightCyan2 = new Color(209, 238, 238)
val LightCyan3 = new Color(180, 205, 205)
val LightCyan4 = new Color(122, 139, 139)
val LightGoldenrod = new Color(238, 221, 130)
val LightGoldenrod1 = new Color(255, 236, 139)
val LightGoldenrod2 = new Color(238, 220, 130)
val LightGoldenrod3 = new Color(205, 190, 112)
val LightGoldenrod4 = new Color(139, 129, 76)
val LightGoldenrodYellow = new Color(250, 250, 210)
val LightGray = new Color(211, 211, 211)
val LightGreen = new Color(144, 238, 144)
val LightPink = new Color(255, 182, 193)
val LightPink1 = new Color(255, 174, 185)
val LightPink2 = new Color(238, 162, 173)
val LightPink3 = new Color(205, 140, 149)
val LightPink4 = new Color(139, 95, 101)
val LightSalmon = new Color(255, 160, 122)
val LightSalmon1 = new Color(255, 160, 122)
val LightSalmon2 = new Color(238, 149, 114)
val LightSalmon3 = new Color(205, 129, 98)
val LightSalmon4 = new Color(139, 87, 66)
val LightSeaGreen = new Color(32, 178, 170)
val LightSkyBlue = new Color(135, 206, 250)
val LightSkyBlue1 = new Color(176, 226, 255)
val LightSkyBlue2 = new Color(164, 211, 238)
val LightSkyBlue3 = new Color(141, 182, 205)
val LightSkyBlue4 = new Color(96, 123, 139)
val LightSlateBlue = new Color(132, 112, 255)
val LightSlateGray = new Color(119, 136, 153)
val LightSteelBlue = new Color(176, 196, 222)
val LightSteelBlue1 = new Color(202, 225, 255)
val LightSteelBlue2 = new Color(188, 210, 238)
val LightSteelBlue3 = new Color(162, 181, 205)
val LightSteelBlue4 = new Color(110, 123, 139)
val LightYellow = new Color(255, 255, 224)
val LightYellow1 = new Color(255, 255, 224)
val LightYellow2 = new Color(238, 238, 209)
val LightYellow3 = new Color(205, 205, 180)
val LightYellow4 = new Color(139, 139, 122)
val LimeGreen = new Color(50, 205, 50)
val Linen = new Color(250, 240, 230)
val Magenta = new Color(255, 0, 255)
val Magenta1 = new Color(255, 0, 255)
val Magenta2 = new Color(238, 0, 238)
val Magenta3 = new Color(205, 0, 205)
val Magenta4 = new Color(139, 0, 139)
val Maroon = new Color(176, 48, 96)
val Maroon1 = new Color(255, 52, 179)
val Maroon2 = new Color(238, 48, 167)
val Maroon3 = new Color(205, 41, 144)
val Maroon4 = new Color(139, 28, 98)
val MediumAquamarine = new Color(102, 205, 170)
val MediumBlue = new Color(0, 0, 205)
val MediumOrchid = new Color(186, 85, 211)
val MediumOrchid1 = new Color(224, 102, 255)
val MediumOrchid2 = new Color(209, 95, 238)
val MediumOrchid3 = new Color(180, 82, 205)
val MediumOrchid4 = new Color(122, 55, 139)
val MediumPurple = new Color(147, 112, 219)
val MediumPurple1 = new Color(171, 130, 255)
val MediumPurple2 = new Color(159, 121, 238)
val MediumPurple3 = new Color(137, 104, 205)
val MediumPurple4 = new Color(93, 71, 139)
val MediumSeaGreen = new Color(60, 179, 113)
val MediumSlateBlue = new Color(123, 104, 238)
val MediumSpringGreen = new Color(0, 250, 154)
val MediumTurquoise = new Color(72, 209, 204)
val MediumVioletRed = new Color(199, 21, 133)
val MidnightBlue = new Color(25, 25, 112)
val MintCream = new Color(245, 255, 250)
val MistyRose = new Color(255, 228, 225)
val MistyRose1 = new Color(255, 228, 225)
val MistyRose2 = new Color(238, 213, 210)
val MistyRose3 = new Color(205, 183, 181)
val MistyRose4 = new Color(139, 125, 123)
val Moccasin = new Color(255, 228, 181)
val NavajoWhite = new Color(255, 222, 173)
val NavajoWhite1 = new Color(255, 222, 173)
val NavajoWhite2 = new Color(238, 207, 161)
val NavajoWhite3 = new Color(205, 179, 139)
val NavajoWhite4 = new Color(139, 121, 94)
val Navy = new Color(0, 0, 128)
val NavyBlue = new Color(0, 0, 128)
val OldLace = new Color(253, 245, 230)
val OliveDrab = new Color(107, 142, 35)
val OliveDrab1 = new Color(192, 255, 62)
val OliveDrab2 = new Color(179, 238, 58)
val OliveDrab3 = new Color(154, 205, 50)
val OliveDrab4 = new Color(105, 139, 34)
val Orange = new Color(255, 165, 0)
val Orange1 = new Color(255, 165, 0)
val Orange2 = new Color(238, 154, 0)
val Orange3 = new Color(205, 133, 0)
val Orange4 = new Color(139, 90, 0)
val OrangeRed = new Color(255, 69, 0)
val OrangeRed1 = new Color(255, 69, 0)
val OrangeRed2 = new Color(238, 64, 0)
val OrangeRed3 = new Color(205, 55, 0)
val OrangeRed4 = new Color(139, 37, 0)
val Orchid = new Color(218, 112, 214)
val Orchid1 = new Color(255, 131, 250)
val Orchid2 = new Color(238, 122, 233)
val Orchid3 = new Color(205, 105, 201)
val Orchid4 = new Color(139, 71, 137)
val PaleGoldenrod = new Color(238, 232, 170)
val PaleGreen = new Color(152, 251, 152)
val PaleGreen1 = new Color(154, 255, 154)
val PaleGreen2 = new Color(144, 238, 144)
val PaleGreen3 = new Color(124, 205, 124)
val PaleGreen4 = new Color(84, 139, 84)
val PaleTurquoise = new Color(175, 238, 238)
val PaleTurquoise1 = new Color(187, 255, 255)
val PaleTurquoise2 = new Color(174, 238, 238)
val PaleTurquoise3 = new Color(150, 205, 205)
val PaleTurquoise4 = new Color(102, 139, 139)
val PaleVioletRed = new Color(219, 112, 147)
val PaleVioletRed1 = new Color(255, 130, 171)
val PaleVioletRed2 = new Color(238, 121, 159)
val PaleVioletRed3 = new Color(205, 104, 137)
val PaleVioletRed4 = new Color(139, 71, 93)
val PapayaWhip = new Color(255, 239, 213)
val PeachPuff = new Color(255, 218, 185)
val PeachPuff1 = new Color(255, 218, 185)
val PeachPuff2 = new Color(238, 203, 173)
val PeachPuff3 = new Color(205, 175, 149)
val PeachPuff4 = new Color(139, 119, 101)
val Peru = new Color(205, 133, 63)
val Pink = new Color(255, 192, 203)
val Pink1 = new Color(255, 181, 197)
val Pink2 = new Color(238, 169, 184)
val Pink3 = new Color(205, 145, 158)
val Pink4 = new Color(139, 99, 108)
val Plum = new Color(221, 160, 221)
val Plum1 = new Color(255, 187, 255)
val Plum2 = new Color(238, 174, 238)
val Plum3 = new Color(205, 150, 205)
val Plum4 = new Color(139, 102, 139)
val PowderBlue = new Color(176, 224, 230)
val Purple = new Color(160, 32, 240)
val Purple1 = new Color(155, 48, 255)
val Purple2 = new Color(145, 44, 238)
val Purple3 = new Color(125, 38, 205)
val Purple4 = new Color(85, 26, 139)
val Red = new Color(255, 0, 0)
val Red1 = new Color(255, 0, 0)
val Red2 = new Color(238, 0, 0)
val Red3 = new Color(205, 0, 0)
val Red4 = new Color(139, 0, 0)
val RosyBrown = new Color(188, 143, 143)
val RosyBrown1 = new Color(255, 193, 193)
val RosyBrown2 = new Color(238, 180, 180)
val RosyBrown3 = new Color(205, 155, 155)
val RosyBrown4 = new Color(139, 105, 105)
val RoyalBlue = new Color(65, 105, 225)
val RoyalBlue1 = new Color(72, 118, 255)
val RoyalBlue2 = new Color(67, 110, 238)
val RoyalBlue3 = new Color(58, 95, 205)
val RoyalBlue4 = new Color(39, 64, 139)
val SaddleBrown = new Color(139, 69, 19)
val Salmon = new Color(250, 128, 114)
val Salmon1 = new Color(255, 140, 105)
val Salmon2 = new Color(238, 130, 98)
val Salmon3 = new Color(205, 112, 84)
val Salmon4 = new Color(139, 76, 57)
val SandyBrown = new Color(244, 164, 96)
val SeaGreen = new Color(46, 139, 87)
val SeaGreen1 = new Color(84, 255, 159)
val SeaGreen2 = new Color(78, 238, 148)
val SeaGreen3 = new Color(67, 205, 128)
val SeaGreen4 = new Color(46, 139, 87)
val Seashell = new Color(255, 245, 238)
val Seashell1 = new Color(255, 245, 238)
val Seashell2 = new Color(238, 229, 222)
val Seashell3 = new Color(205, 197, 191)
val Seashell4 = new Color(139, 134, 130)
val Sienna = new Color(160, 82, 45)
val Sienna1 = new Color(255, 130, 71)
val Sienna2 = new Color(238, 121, 66)
val Sienna3 = new Color(205, 104, 57)
val Sienna4 = new Color(139, 71, 38)
val SkyBlue = new Color(135, 206, 235)
val SkyBlue1 = new Color(135, 206, 255)
val SkyBlue2 = new Color(126, 192, 238)
val SkyBlue3 = new Color(108, 166, 205)
val SkyBlue4 = new Color(74, 112, 139)
val SlateBlue = new Color(106, 90, 205)
val SlateBlue1 = new Color(131, 111, 255)
val SlateBlue2 = new Color(122, 103, 238)
val SlateBlue3 = new Color(105, 89, 205)
val SlateBlue4 = new Color(71, 60, 139)
val SlateGray = new Color(112, 128, 144)
val SlateGray1 = new Color(198, 226, 255)
val SlateGray2 = new Color(185, 211, 238)
val SlateGray3 = new Color(159, 182, 205)
val SlateGray4 = new Color(108, 123, 139)
val Snow = new Color(255, 250, 250)
val Snow1 = new Color(255, 250, 250)
val Snow2 = new Color(238, 233, 233)
val Snow3 = new Color(205, 201, 201)
val Snow4 = new Color(139, 137, 137)
val SpringGreen = new Color(0, 255, 127)
val SpringGreen1 = new Color(0, 255, 127)
val SpringGreen2 = new Color(0, 238, 118)
val SpringGreen3 = new Color(0, 205, 102)
val SpringGreen4 = new Color(0, 139, 69)
val SteelBlue = new Color(70, 130, 180)
val SteelBlue1 = new Color(99, 184, 255)
val SteelBlue2 = new Color(92, 172, 238)
val SteelBlue3 = new Color(79, 148, 205)
val SteelBlue4 = new Color(54, 100, 139)
val Tan = new Color(210, 180, 140)
val Tan1 = new Color(255, 165, 79)
val Tan2 = new Color(238, 154, 73)
val Tan3 = new Color(205, 133, 63)
val Tan4 = new Color(139, 90, 43)
val Thistle = new Color(216, 191, 216)
val Thistle1 = new Color(255, 225, 255)
val Thistle2 = new Color(238, 210, 238)
val Thistle3 = new Color(205, 181, 205)
val Thistle4 = new Color(139, 123, 139)
val Tomato = new Color(255, 99, 71)
val Tomato1 = new Color(255, 99, 71)
val Tomato2 = new Color(238, 92, 66)
val Tomato3 = new Color(205, 79, 57)
val Tomato4 = new Color(139, 54, 38)
val Turquoise = new Color(64, 224, 208)
val Turquoise1 = new Color(0, 245, 255)
val Turquoise2 = new Color(0, 229, 238)
val Turquoise3 = new Color(0, 197, 205)
val Turquoise4 = new Color(0, 134, 139)
val Violet = new Color(238, 130, 238)
val VioletRed = new Color(208, 32, 144)
val VioletRed1 = new Color(255, 62, 150)
val VioletRed2 = new Color(238, 58, 140)
val VioletRed3 = new Color(205, 50, 120)
val VioletRed4 = new Color(139, 34, 82)
val Wheat = new Color(245, 222, 179)
val Wheat1 = new Color(255, 231, 186)
val Wheat2 = new Color(238, 216, 174)
val Wheat3 = new Color(205, 186, 150)
val Wheat4 = new Color(139, 126, 102)
val White = new Color(255, 255, 255)
val WhiteSmoke = new Color(245, 245, 245)
val Yellow = new Color(255, 255, 0)
val Yellow1 = new Color(255, 255, 0)
val Yellow2 = new Color(238, 238, 0)
val Yellow3 = new Color(205, 205, 0)
val Yellow4 = new Color(139, 139, 0)
val YellowGreen = new Color(154, 205, 50)
}
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/PathBuilder.scala
|
package org.phasanix.svggraph
import java.awt.geom.Point2D
/**
* Convenience interface for constructing SVG path strings,
* absolute positions only
* TODO: split into absolute and relative path implementations
*/
class PathBuilder(start: Point2D.Float) {
import Helper._
private val points = collection.mutable.ArrayBuffer.empty[Either[Point2D.Float, String]]
private var current = start
points += Left(start)
/** Move to absolute position */
def moveAbs(point: Point2D.Float): PathBuilder = {
current = point
points += Left(point)
this
}
/** Insert path op string before current point */
def op(s: String): PathBuilder = {
points += Right(s)
this
}
/** Move relative to current position */
def moveRel(dx: Float, dy: Float): PathBuilder = {
val p = new Point2D.Float(current.x, current.y)
p.x += dx
p.y += dy
moveAbs(p)
this
}
/**Return to start */
def toStart: PathBuilder = {
moveAbs(start)
this
}
/** path string */
def path: String = {
val sb = new StringBuilder
sb.fmt('M')
for (x <- points) {
sb.fmt(' ')
x match {
case Left(pt) => sb.fmt(pt)
case Right(s) => sb.fmt(s)
}
}
sb.toString()
}
}
|
richardclose/svggraph
|
src/test/scala/RichColorTest.scala
|
<reponame>richardclose/svggraph
import org.phasanix.svggraph.RichColor
import org.scalatest.{Matchers, FlatSpec}
import org.phasanix.svggraph.RichColor._
import java.awt.Color
/**
* Test suite for RichColor
*/
class RichColorTest extends FlatSpec with Matchers {
def isBetween(color1: Color, color2: Color, mid: Color): Boolean = {
def between(c1: Int, c2: Int, m: Int) = Seq(c1, m, c2).sorted.apply(1) == m
between(color1.getRed, color2.getRed, mid.getRed) &&
between(color1.getGreen, color2.getGreen, mid.getGreen) &&
between(color1.getBlue, color2.getBlue, mid.getBlue)
}
"RichColor string encoding" should "encode correctly to long form hex" in {
val hex = Color.ORANGE.asHex
hex shouldEqual "#FFC800"
RichColor.parseOrElse(hex, Color.MAGENTA) shouldEqual Color.ORANGE
}
it should "encode solid color correctly to rgb" in {
val rgb = Color.ORANGE.asRgb
rgb shouldEqual "rgb(255,200,0)"
RichColor.parseOrElse(rgb, Color.MAGENTA) shouldEqual Color.ORANGE
}
it should "encode transparent color correctly to rgb" in {
val transparentOrange = new Color(Color.ORANGE.getRed, Color.ORANGE.getGreen, Color.ORANGE.getBlue, 127)
val rgb = transparentOrange.asRgb
rgb shouldEqual "rgba(255,200,0,0.5)"
val parsed = RichColor.parseOrElse(rgb, Color.MAGENTA)
parsed shouldEqual transparentOrange
}
"Color interpolation" should "generate expected value" in {
val (from, to) = (Color.CYAN, Color.ORANGE)
val mid = RichColor.interp(from, to, 0.5)
def interp(a: Int, b: Int) = Math.min(a,b) + (Math.abs(a-b) * 0.5).toInt
interp(from.getRed, to.getRed) shouldEqual mid.getRed
interp(from.getGreen, to.getGreen) shouldEqual mid.getGreen
interp(from.getBlue, to.getBlue) shouldEqual mid.getBlue
}
it should "generate color range correctly" in {
val (from, to) = (Color.CYAN, Color.ORANGE)
val range = RichColor.range(from, to, 4)
range.head shouldEqual from
range.last shouldEqual to
range.length shouldEqual 6
range.sliding(3).forall(xs => isBetween(xs(0), xs(2), xs(1)))
}
}
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/Constants.scala
|
package org.phasanix.svggraph
/**
* Constants for charting
*/
trait Constants {
trait Named {
override def toString: String = this.getClass.getSimpleName.replace("$", "")
}
/**
* Fill styles
*/
sealed trait FillStyle extends Named
object Solid extends FillStyle
object Radial extends FillStyle
object Vertical extends FillStyle
object Horizontal extends FillStyle
/**
* Position (e.g. of graph legend).
*/
sealed trait Corner extends Named
object TopLeft extends Corner
object TopRight extends Corner
object BottomLeft extends Corner
object BottomRight extends Corner
/**
* Adds particular attributes to an element
*/
class AttrProvider(val name: String, val value: String) {
def apply(elt: xml.Elem): xml.Elem =
if (value == null)
elt
else
elt % new xml.UnprefixedAttribute(name, value, xml.Null)
}
/**
* Text anchor, for horizontal alignment
*/
class TextAnchor(value: String) extends AttrProvider("text-anchor", value)
object TextAnchor {
val Nil = new TextAnchor(null: String)
val Start = new TextAnchor("start")
val Middle = new TextAnchor("middle")
val End = new TextAnchor("end")
}
sealed class BaselineAlignment(value: String) extends AttrProvider("alignment-baseline", value)
object BaselineAlignment {
val Nil = new BaselineAlignment(null: String)
val Middle = new BaselineAlignment("middle")
}
}
object Constants extends Constants
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/PointStyle.scala
|
<filename>src/main/scala/org/phasanix/svggraph/PointStyle.scala
package org.phasanix.svggraph
import java.awt.Color
import java.awt.geom.Point2D
import RichColor.color2richcolor
/**
* Shape of point on graph
*/
sealed abstract class PointStyle(val name: String) {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem
def render(color: Color, radius: Float, x: Int, y: Int): xml.Elem = render(color, radius, x, y, None)
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Color): xml.Elem = render(color, radius, x, y, Some(fillColor))
def render(color: Color, radius: Float, pos: Point2D.Float): xml.Elem = render(color, radius, pos.x.toInt, pos.y.toInt, None)
def render(color: Color, radius: Float, pos: Point2D.Float, fillColor: Color): xml.Elem = render(color, radius, pos.x.toInt, pos.y.toInt, Some(fillColor))
def render(color: Color, radius: Float, pos: Point2D.Float, fillColor: Option[Color]): xml.Elem = render(color, radius, pos.x.toInt, pos.y.toInt, fillColor)
protected def _c(color: Option[Color]) =
color.map(_.asHex).getOrElse("none")
}
object PointStyle {
import Helper._
object NoPoint extends PointStyle("none") {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem =
<g/>
}
object Circle extends PointStyle("circle") {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem =
<circle fill={_c(fillColor)} stroke={color.asRgb} r={fmt(radius, 1)} cx={fmt(x)} cy={fmt(y)}/>
}
object Square extends PointStyle("square") {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem = {
val x1 = fmt(x - radius / 2, 1)
val y1 = fmt(y - radius / 2, 1)
val h = fmt(radius, 1)
<rect fill={_c(fillColor)} stroke={color.asRgb} x={x1} y={y1} height={h} width={h}/>
}
}
object Triangle extends PointStyle("triangle") {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem = {
val rx = radius * 0.886 // sin(30)
val ry = radius * 0.5 //cos(30)
val start = (x, (y - radius).toInt)
val points = Seq(start, ((x - rx).toInt, (y + ry).toInt), ((x + rx).toInt, (y + ry).toInt), start)
<path fill={_c(fillColor)} stroke={color.asRgb} d={Chart.svgPath(points)}/>
}
}
// Tall downward pointed triangle, where the point is at the given coordinates.
object PointyTriangle extends PointStyle("pointy") {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem = {
val pb = new PathBuilder(new Point2D.Float(x, y))
pb.moveRel(-radius / 2, -2 * radius)
.moveRel(radius, 0f)
.toStart
<path fill={_c(fillColor)} stroke={color.asRgb} d={pb.path}/>
}
}
object VerticalLine extends PointStyle("vline") {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem =
<line x1={fmt(x)} x2={fmt(x)} y1={fmt(y - radius, 1)} y2={fmt(y + radius, 1)} stroke={color.asRgb}/>
}
object Diamond extends PointStyle("diamond") {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem = {
val start = new Point2D.Float(x - radius, y)
val pb = new PathBuilder(start)
pb.moveRel(radius, -radius)
.moveRel(radius, radius)
.moveRel(-radius, radius)
.moveAbs(start)
<path fill={_c(fillColor)} stroke={color.asRgb} d={pb.path}/>
}
}
object Cross extends PointStyle("cross") {
def render(color: Color, radius: Float, x: Int, y: Int, fillColor: Option[Color]): xml.Elem = {
val r = radius / 2
val sw = fmt(r, 1)
val (x1, x2, y1, y2) = (fmt(x - r, 1), fmt(x + r, 1), fmt(y - r, 1), fmt(y + r, 1))
<g>
<line x1={x1} y1={y1} x2={x2} y2={y2} stroke={color.asRgb} stroke-width={sw}/>
<line x1={x1} y1={y2} x2={x2} y2={y1} stroke={color.asRgb} stroke-width={sw}/>
</g>
}
}
/** Look up style with the given name */
def get(name: String) = styleMap.getOrElse(name, None)
private val styleMap = Seq(Circle, Square, Triangle, Cross, Diamond, VerticalLine, PointyTriangle).map(ps => (ps.name, ps)).toMap
}
|
richardclose/svggraph
|
src/main/scala/org/phasanix/svggraph/Axis.scala
|
<reponame>richardclose/svggraph<filename>src/main/scala/org/phasanix/svggraph/Axis.scala
package org.phasanix.svggraph
import java.awt.geom.{Rectangle2D, Point2D}
import java.util.{Calendar => Cal, Date, GregorianCalendar}
import Helper._
import RichColor.color2richcolor
import Constants._
abstract class Axis (
val opts: Options,
val title: String,
val isVertical: Boolean,
val isRight: Boolean) {
val origin: Point2D.Float = new Point2D.Float(opts.plotArea.x, opts.plotArea.y + opts.plotArea.height)
val length: Float = if (isVertical) opts.plotArea.height else opts.plotArea.width
val tickArea: Rectangle2D.Float =
if (isVertical) {
if (isRight) opts.layout.yTickAreaRight else opts.layout.yTickAreaLeft
}
else {
opts.layout.xTickArea
}
val endpoint: Point2D.Float = if (isVertical)
new Point2D.Float(origin.x, opts.plotArea.y)
else
new Point2D.Float(origin.x + length, origin.y)
val minPos: Float = if (isVertical) origin.y else origin.x
val maxPos: Float = if (isVertical) minPos - length else minPos + length
/**
* Draw baseline
*/
def drawBaseline: xml.Elem =
<line
x1={fmt(origin.x, 1)}
y1={fmt(origin.y, 1)}
x2={fmt(endpoint.x, 1)}
y2={fmt(endpoint.y, 1)}
stroke-width={fmt(opts.strokeWidth, 1)}
stroke={opts.draw.frameColor.asHex}/>
/**
* Draw axis title
*/
def drawTitle: xml.Elem = {
val pos = tickArea.center
val xform = if (isVertical) "rotate(270,%3.0f,%3.0f)".format(pos.x, pos.y) else null
val attrs = Seq(new AttrProvider("transform", xform), TextAnchor.Middle, BaselineAlignment.Middle)
attrs.foldLeft(<text x={fmt(pos.x, 1)} y={fmt(pos.y, 1)}>{title}</text>) { (elt, attr) => attr(elt) }
}
/**
* Generate evenly-spaced positions along the axis
*/
def spaceEvenly(count: Int): Seq[Float] = {
val spacing = (maxPos - minPos) / (count + 1)
(0 until count).map { i =>
minPos + (0.5f + i) * spacing
}
}
/**
* Draw a series of tick labels for the given axis
*/
def drawLabels: Seq[xml.Elem]
/**
* Draw all decorations for this axis.
*/
def decorate: Seq[xml.Elem]
}
object Axis {
/**
* Axis for which datapoints are positioned according to their value (e.g. x/y scatterplot)
* @param opts Options
* @param title Title of axis
* @param isVertical true if y axis, else false
* @param isRight true if right-hand y axis, else false
* @param isDate do values represent dates? (XXX: necessary?)
* @param isIntegral constrain tick intervals to be multiples of 1
* @param tickFmt tick formatter
*/
class ScaleAxis (
opts: Options,
title: String,
isVertical: Boolean,
isRight: Boolean,
val minValue: Double,
val maxValue: Double,
val isDate: Boolean,
val isIntegral: Boolean,
val tickFmt: Double => String) extends Axis(opts, title, isVertical, isRight) {
implicit val implicitOpts = opts
private[this] val factor = (maxPos - minPos) / (maxValue - minValue)
/**
* Translate the given value into a position on this axis.
*/
def mapValue(value: Double): Float = {
if (value < minValue)
minPos
else if (value > maxValue)
maxPos
else
minPos + ((value - minValue) * factor).toFloat
}
/**
* Translate the given value to a point on the axis
*/
def mapValueToPosition(value: Double): Point2D.Float = {
val pos = mapValue(value)
val ret = new Point2D.Float()
if (isVertical)
ret.setLocation(origin.x, pos)
else
ret.setLocation(pos, origin.y)
ret
}
/**
* Draw a series of tick labels for the given axis
*/
def drawLabels(vals: Seq[Double]): Seq[xml.Elem] = {
for (v <- vals) yield {
val p = this.mapValueToPosition(v)
val text = tickFmt(v)
val anchor = if (isVertical) TextAnchor.End else TextAnchor.Middle
// Tweak exact position
if (isVertical) {
p.x -= 2
} else {
p.y += opts.draw.lineSpacing
}
drawText(text, p, Seq(anchor))
}
}
def decorate: Seq[xml.Elem] = {
val values = Tick.values(opts, this)
val positions = values.map(this.mapValue)
drawLabels(values) ++
drawGrid(isVertical, positions) :+
drawTitle
}
/**
* Draw a series of tick labels for the given axis
*/
def drawLabels: Seq[xml.Elem] = drawLabels(Tick.values(opts, this))
/**
* Draw tick marks and labels
*/
def drawTickmarksAndLabels: Seq[xml.Elem] = {
val values = Tick.values(opts, this)
val labels = drawLabels(values)
val marks = for (v <- values) yield {
val ticklen = 4
val pos = mapValueToPosition(v)
val pos1 = new Point2D.Float(pos.x, pos.y)
if (isVertical) pos1.x -= ticklen else pos1.y += ticklen
<line x1={fmt(pos.x, 1)} y1={fmt(pos.y, 1)} x2={fmt(pos1.x, 1)} y2={fmt(pos1.y, 1)} stroke={RichColor.Gray.asRgb} stroke-width="1.0" />
}
marks ++ labels
}
/** Draw labels, tick marks and baseline */
def drawAll: Seq[xml.Elem] = drawTickmarksAndLabels :+ drawBaseline
/**
* Tick interval positions
*/
def tickIntervals(rawCount: Int): Iterator[Double] =
if (isDate) dateTickIntervals(rawCount) else numericTickIntervals(rawCount, isIntegral)
/**
* Generate appropriately sized tick intervals
*
* @param rawCount approximate number of ticks required.
*/
private def numericTickIntervals(rawCount: Int, isIntegral: Boolean) = new Iterator[Double] {
import java.lang.Math.{ pow, log10 }
// Interval, such that about rawCount intervals will fit into
// the range
private[this] val interval = {
val rawSize = (maxValue - minValue) / rawCount
val scale = pow(10, log10(rawSize).toInt - 1)
val s2 = rawSize / scale
val m = scaleBreaks
.find(e => s2 > e._1 && s2 < e._2)
.fold(50.0)(_._1)
// println("rawSize=" + rawSize + " scale=" + scale + " s2=" + s2 + " m=" + m)
val x = m * scale
if (isIntegral && x < 1.0) 1.0 else x
}
private[this] var nextValue = {
((minValue / interval).toInt + 1) * interval
}
def hasNext = nextValue < maxValue
def next() = {
val ret = nextValue
nextValue += interval
ret
}
}
/**
* Tick interval generator for date values (i.e. Date.getTime() -> Long: Double)
* @param rawCount approximate number of ticks required.
*/
def dateTickIntervals(rawCount: Int) = new Iterator[Double] {
private[this] val interval = {
val rawInterval = (maxValue - minValue) / rawCount
val int1 = dateScaleBreaks
.find(e => rawInterval > e._2._1 && rawInterval < e._2._2)
.map(_._1).getOrElse(Cal.YEAR, 1)
if (int1._1 == Cal.YEAR) {
val x = (rawInterval / 365).toInt
(Cal.YEAR, if (x == 0) 1 else x)
} else {
int1
}
}
private[this] val cal = new GregorianCalendar()
private var _passCount = 0
private[this] var nextValue: Double = {
cal.setTime(new Date(minValue.toLong))
interval._1 match {
case Cal.YEAR =>
cal.set(Cal.MONTH, 1)
cal.set(Cal.DAY_OF_MONTH, 1)
case Cal.MONTH =>
cal.set(Cal.DAY_OF_MONTH, 1)
case Cal.WEEK_OF_YEAR =>
cal.set(Cal.DAY_OF_WEEK, 1)
case _ =>
}
while (cal.getTimeInMillis < minValue.toLong)
cal.add(interval._1, interval._2)
cal.getTimeInMillis
}
def hasNext = nextValue < maxValue && _passCount < 100
def next() = {
_passCount += 1
val ret = nextValue
cal.add(interval._1, interval._2)
nextValue = cal.getTimeInMillis
ret
}
}
override def toString =
"<Axis factor=" + factor + " minValue=" + minValue + " maxValue=" +
maxValue + " minPos=" + minPos + " maxPos=" + maxPos + " />"
}
/**
* Multipliers to get integral tick intervals for a numeric range
*/
val scaleBreaks = Seq(0.0, 1.0, 2.0, 2.5, 5.0, 10.0, 20.0, 25.0, 50.0)
.sliding(2)
.map(e => (e(0), e(1)))
.toSeq
/**
* Date interval boundaries to get reasonable tick intervals for a date range
*/
val dateScaleBreaks = {
val xs = Seq (
Cal.MINUTE -> 1,
Cal.MINUTE -> 60,
Cal.HOUR -> 2,
Cal.DAY_OF_MONTH -> 1,
Cal.WEEK_OF_YEAR -> 1,
Cal.WEEK_OF_YEAR -> 2,
Cal.MONTH -> 1,
Cal.MONTH -> 2,
Cal.MONTH -> 3,
Cal.MONTH -> 6,
Cal.YEAR -> 1).map { e =>
val c = new GregorianCalendar()
c.setTimeInMillis(0L)
c.add(e._1, e._2)
(e, c.getTimeInMillis.toDouble)
}
xs.sliding(2)
.map { e => e(1)._1 ->(e(0)._2, e(1)._2) }
.toSeq
}
/**
* Translate given values to chart coordinates
*/
def locate(xAxis: ScaleAxis, yAxis: ScaleAxis, xValue: Float, yValue: Float): Point2D.Float = {
val xPos = xAxis.mapValue(xValue)
val yPos = yAxis.mapValue(yValue)
new Point2D.Float(xAxis.minPos + xPos, yAxis.minPos - yPos)
}
def xAxis(title: String, minValue: Double, maxValue: Double, isDate: Boolean, fmtOpt: Option[Tick.Fmt])(implicit opts: Options): ScaleAxis =
makeAxis(opts, title, isVertical = false, isRight = false, minValue, maxValue, isDate = isDate, isIntegral = true, fmtOpt)
private def makeAxis(
opts: Options,
title: String,
isVertical: Boolean,
isRight: Boolean,
minValue: Double,
maxValue: Double,
isDate: Boolean,
isIntegral: Boolean,
fmtOpt: Option[Tick.Fmt]) = {
val fmt = fmtOpt.getOrElse(Tick.defaultFmt(isDate))
new ScaleAxis(opts, title, isVertical, isRight, minValue, maxValue, isDate, isIntegral, fmt)
}
}
|
richardclose/svggraph
|
build.sbt
|
<reponame>richardclose/svggraph
name := "svggraph"
version := "1.0"
organization := "org.phasanix"
scalaVersion := "2.11.6"
libraryDependencies ++= Seq (
"org.scalatest" %% "scalatest" % "2.1.6" % "test",
"org.scala-lang.modules" %% "scala-xml" % "1.0.3"
)
|
richardclose/svggraph
|
src/test/scala/OptionsTest.scala
|
<reponame>richardclose/svggraph
import org.phasanix.svggraph.Options
import org.scalatest.{Matchers, FlatSpec}
/**
*/
class OptionsTest extends FlatSpec with Matchers {
"Options.Layout" should "have correct total dimensions" in {
val layout = Options.Layout.basic(chartWidth = 500, chartHeight = 300, xTickMargin = 44, yTickMarginLeft = 55)
val w = layout.yTickAreaLeft.width + (2*layout.plotMargin) + layout.plotArea.width + layout.yTickMarginRight
val h = layout.xTickArea.height + layout.plotMargin + layout.plotArea.height
w shouldEqual layout.chartArea.width
h shouldEqual layout.chartArea.height
}
it should "position plot area correctly" in {
val layout = Options.Layout.basic(chartWidth = 500, chartHeight = 300, xTickMargin = 44, yTickMarginLeft = 55)
layout.chartArea.height shouldEqual layout.plotArea.y + layout.plotArea.height
layout.chartArea.width shouldEqual layout.yTickAreaRight.x + layout.yTickAreaRight.width
}
}
|
richardclose/svggraph
|
src/test/scala/HelperTest.scala
|
import org.scalatest.{Matchers, FlatSpec}
import org.phasanix.svggraph.Helper._
/**
*
*/
class HelperTest extends FlatSpec with Matchers {
"Helper formatting" should "correctly encode float" in {
val sb = new StringBuilder()
sb.append("[").fmt(123.45, 3).append("]")
sb.toString shouldEqual "[123.450]"
sb.clear()
sb.append('[').fmt(123.456789, 5)
sb.toString shouldEqual "[123.45678"
}
it should "be faster than NumberFormat" in {
val sb = new StringBuilder()
val numFmt = java.text.NumberFormat.getInstance()
numFmt.setMinimumFractionDigits(3)
numFmt.setMaximumFractionDigits(3)
numFmt.setGroupingUsed(false)
val count = 500000
var i: Int = 0
var start: Long = 0L
// warm up loop for Helper and NumberFormat
i = 10000
while (i > 0) {
sb.append(numFmt.format(123.45))
sb.fmt(123.45, 3)
sb.setLength(0)
i -= 1
}
// Measure Helper.fmt
i = count
start = System.nanoTime()
while (i > 0) {
sb.fmt(123.45, 3)
sb.setLength(0)
i -= 1
}
val t1 = (System.nanoTime().toDouble - start)/count
// Measure NumberFormat
i = count
start = System.nanoTime()
while (i > 0) {
sb.append(numFmt.format(123.45))
sb.setLength(0)
i -= 1
}
val t2 = (System.nanoTime().toDouble - start)/count
// println(s"call to Helper.append(): $t1 ns call to NumberFormat.format(): $t2")
(t2 - t1 > 0) shouldBe true
}
}
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/BipartiteMatches.scala
|
package kov
import kov.graph.FlowGraph
class BipartiteMatches {
def findMatches(graph: FlowGraph, firstClassCount: Int, secondClassCount: Int): Map[Int, Int] = {
val flows = new EdmondsKarp().maxFlowWithGraph(graph)
val edges = flows
._2.edges
.filter(e => e.from != 0 && e.to != firstClassCount + secondClassCount + 1 && e.flow > 0 && e.from < e.to)
val correspondences = edges.map(e => e.from -> (e.to - firstClassCount)).toMap
correspondences
}
}
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/graph/Edge.scala
|
<filename>src/main/scala/kov/graph/Edge.scala
package kov.graph
case class Edge(from: Int, to: Int, capacity: Int, flow: Int)
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/util/GraphConsoleReader.scala
|
package kov.util
import java.io.{BufferedReader, IOException, InputStreamReader}
import java.util.StringTokenizer
import kov.graph.FlowGraph
class GraphConsoleReader {
val in: BufferedReader = new BufferedReader(new InputStreamReader(System.in))
var tok = new StringTokenizer("")
@throws[IOException]
def nextInt: Int = next.toInt
@throws[IOException]
def next: String = {
while ( {
!tok.hasMoreElements
}) tok = new StringTokenizer(in.readLine)
tok.nextToken
}
def readGraph: FlowGraph = {
val fastScanner = new GraphConsoleReader()
val vertexCount = fastScanner.nextInt
val edgeCount = fastScanner.nextInt
val graph = new FlowBuilder(vertexCount)
for (_ <- 0 until edgeCount) {
val from = fastScanner.nextInt - 1
val to = fastScanner.nextInt - 1
val capacity = fastScanner.nextInt
graph.addEdge(from, to, capacity)
}
graph.build
}
}
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/graph/FlowGraph.scala
|
package kov.graph
case class FlowGraph(edges: Vector[Edge], graph: Vector[Vector[Int]]) {
def size: Int = graph.length
def getIds(from: Int): Vector[Int] = graph(from)
def getEdge(id: Int): Edge = edges(id)
def addFlow(id: Int, flow: Int): FlowGraph = {
val edge = edges(id)
val backwardId = if (id % 2 == 0) id + 1 else id - 1
val backwardEdge = edges(backwardId)
val edgesUpdated = edges
.updated(id, edge.copy(flow = edge.flow + flow))
.updated(backwardId, backwardEdge.copy(flow = backwardEdge.flow - flow))
FlowGraph(edgesUpdated, graph)
}
}
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/bfs/BFS.scala
|
package kov.bfs
import kov.graph.FlowGraph
import kov.util.Logger
import scala.annotation.tailrec
import scala.collection.immutable.{BitSet, Queue}
object BFS {
def bfs(root: Int, target: Int, graph: FlowGraph): Option[List[Int]] = {
@tailrec
def iteration(queue: Queue[Int], visited: BitSet, trace: Map[Int, Int]): Option[List[Int]] = {
Logger.debug(s"Queue $queue visited $visited trace $trace")
if (queue.nonEmpty) {
val (node, remained) = queue.dequeue
if (node != target) {
val edges = graph.getIds(node).map(graph.getEdge).filter(e => e.flow < e.capacity && !visited.contains(e.to))
val (visitedUpd, traceUpd, remainedUpd) = edges.foldLeft((visited, trace, remained)) {
case ((visited, trace, queue), edge) => (visited + edge.to, trace + (edge.to -> node), queue.enqueue(edge.to))
}
iteration(remainedUpd, visitedUpd, traceUpd)
} else {
Some(tracePath(root, target, List(), trace))
}
} else {
None
}
}
iteration(Queue(root), BitSet(), Map())
}
def bfsMaxFromMin(graph: FlowGraph): Option[List[Int]] = bfs(0, graph.size - 1, graph)
@tailrec
private def tracePath(root: Int, toTrace: Int, path: List[Int], traceMap: Map[Int, Int]): List[Int] = {
if (toTrace == root) root :: path
else tracePath(root, traceMap(toTrace), toTrace :: path, traceMap)
}
}
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/BipartiteMatchesMain.scala
|
package kov
import kov.graph.FlowGraph
import kov.util.{FlowBuilder, GraphConsoleReader, Logger}
object BipartiteMatchesMain {
def main(args: Array[String]): Unit = {
val reader = new GraphConsoleReader()
val adjMatrix = read(reader)
val graph = graph(adjMatrix)
val correspondences = new BipartiteMatches().findMatches(graph, adjMatrix.size, adjMatrix(0).size)
val answer = for {
i <- adjMatrix.indices
index = i + 1
} yield correspondences.getOrElse(index, -1)
Logger.info(answer.mkString(" "))
}
/**
* Read i j dims to parse bipartite pairs as adjacency matrix
* @param read
* @return
*/
def read(read: GraphConsoleReader): Vector[Vector[Boolean]] = {
val numLeft: Int = read.nextInt
val numRight: Int = read.nextInt
val adjMatrix = Array.ofDim[Boolean](numLeft, numRight)
for {
i <- 0 until numLeft
j <- 0 until numRight
} adjMatrix(i)(j) = read.nextInt == 1
adjMatrix.map(_.toVector).toVector
}
def graph(adj: Vector[Vector[Boolean]]): FlowGraph = {
val firstClassCount = adj.size
val secondClassCount = adj(0).size
val nodeCount = firstClassCount + secondClassCount
val source = 0
val sink = nodeCount + 1
val g = new FlowBuilder(nodeCount + 2)
for {
i <- adj.indices
j <- adj(0).indices
hasEdge = adj(i)(j)
if hasEdge
} {
g.addEdge(i + 1, j + firstClassCount + 1, 1)
}
for (i <- 1 to firstClassCount) g.addEdge(source, i, 1)
for (i <- firstClassCount + 1 to firstClassCount + secondClassCount) g.addEdge(i, sink, 1)
g.build
}
}
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/util/Logger.scala
|
<gh_stars>0
package kov.util
object Logger {
def debug(s: => String) = {
println(s)
}
def info(s: String) = println(s)
}
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/EdmondsKarpMain.scala
|
<reponame>antonkw/Edmonds-Karp<filename>src/main/scala/kov/EdmondsKarpMain.scala<gh_stars>0
package kov
import kov.util.{GraphConsoleReader, Logger}
object EdmondsKarpMain extends App {
val graph = new GraphConsoleReader().readGraph
val maxFlow = new EdmondsKarp().maxFlow(graph)
Logger.info(s"Max flow is $maxFlow")
}
|
antonkw/Edmonds-Karp
|
build.sbt
|
<reponame>antonkw/Edmonds-Karp<gh_stars>0
ThisBuild / scalaVersion := "2.11.12"
ThisBuild / organization := "kov"
lazy val hello = (project in file("."))
.settings(
name := "Hello"
)
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/util/FlowBuilder.scala
|
<reponame>antonkw/Edmonds-Karp
package kov.util
import kov.graph.{Edge, FlowGraph}
import scala.collection.mutable.ArrayBuffer
class FlowBuilder {
private val edges: ArrayBuffer[Edge] = ArrayBuffer();
private var graph: Array[ArrayBuffer[Int]] = _
def this(n: Int) = {
this()
this.graph = (for (_ <- 0 until n) yield ArrayBuffer[Int]()).toArray;
}
def addEdge(from: Int, to: Int, capacity: Int) {
val forwardEdge = Edge(from, to, capacity, 0)
//residual capacity is equal to original capacity of the edge minus current flow (0)
val backwardEdge = Edge(to, from, capacity, capacity)
graph(from) += edges.size
edges += forwardEdge
graph(to) += edges.size
edges += backwardEdge
}
def build = FlowGraph(edges.toVector, graph.map(_.toVector).toVector)
}
|
antonkw/Edmonds-Karp
|
src/main/scala/kov/EdmondsKarp.scala
|
<reponame>antonkw/Edmonds-Karp
package kov
import kov.bfs.BFS
import kov.graph.FlowGraph
import kov.util.Logger
import scala.annotation.tailrec
class EdmondsKarp {
def maxFlow(graph: FlowGraph) = {
@tailrec
def maxFlowIteration(graph: FlowGraph, sum: Int = 0, bfsSink: FlowGraph => Option[List[Int]] = BFS.bfsMaxFromMin): Int = {
bfsSink(graph) match {
case Some(p) =>
val e = p.sliding(2)
.map { case List(a, b) => (a, b) }.toList
.flatMap {
case (from, to) =>
graph.getIds(from)
.map(id => (id, graph.getEdge(id)))
.find(e => e._2.to == to && e._2.flow < e._2.capacity)
}
val min = e.map { case (_, e) => e.capacity - e.flow }.min
val updatedGraph = e.map(_._1).foldLeft(graph) { case (g, id) => g.addFlow(id, min) }
Logger.debug(s"Path ${p.mkString("->")} is enriched with $min")
maxFlowIteration(updatedGraph, sum + min)
case None => sum
}
}
maxFlowIteration(graph)
}
def maxFlowWithGraph(graph: FlowGraph) = {
@tailrec
def maxFlowIteration(graph: FlowGraph, sum: Int = 0, bfs: FlowGraph => Option[List[Int]] = BFS.bfsMaxFromMin): (Int, FlowGraph) = {
bfs(graph) match {
case Some(p) =>
val e = p.sliding(2)
.map { case List(a, b) => (a, b) }.toList
.flatMap {
case (from, to) =>
graph.getIds(from)
.map(id => (id, graph.getEdge(id)))
.find(e => e._2.to == to && e._2.flow < e._2.capacity)
}
val min = e.map { case (_, e) => e.capacity - e.flow }.min
val updatedGraph = e.map(_._1).foldLeft(graph) { case (g, id) => g.addFlow(id, min) }
Logger.debug(s"Path ${p.mkString("->")} is enriched with $min")
maxFlowIteration(updatedGraph, sum + min)
case None => (sum, graph)
}
}
maxFlowIteration(graph)
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/fights/Spell.scala
|
/**
* @author <NAME>
*/
package fights
/**
* class used to represent the spell of an entity
* @param name name of the spell
* @param range range of the spell
* @param min min damage dealt by the spell
* @param max max damage fealt by the spell
*/
class Spell(val name: String, val range: Int, val min: Int, val max: Int) extends Serializable {
/* accessors */
def getRange() : Int = {
return this.range
}
def getName() : String = {
return this.name
}
def getDamages(): Int = {
return new MathHelper().getRandom(this.min, this.max)
}
/* methods */
override def toString: String = {
return name + ": " + min + "=>" + max + " @ " + range
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/fights/Generator.scala
|
/**
* @author <NAME>
*/
package fights
import org.apache.spark.SparkContext
import org.apache.spark.graphx.{Edge, Graph}
import scala.collection.mutable.ArrayBuffer
/**
* utility class used to generate various objects
*/
class Generator {
// some helpers
val fightGraph = new GraphHelper()
val mathHelper = new MathHelper()
/**
* returns a graph representing the fight #1
* @param sc SparkContext
* @return
*/
def generateFightOne(sc: SparkContext): Graph[Entity, Int] = {
// lets get our vertices
val vertices = this.getFightOneEntities()
// convert them as tuples
val verticesAsTuples = fightGraph.getVerticesAsTuple(vertices)
// lets get our edges
val edges = fightGraph.generateEdges(verticesAsTuples)
// finally return the Graph made from our vertices and edges
return Graph(sc.makeRDD(verticesAsTuples), sc.makeRDD(edges))
}
/**
* generates all the entities of the first fight
* @return ArrayBuffer of Entities
*/
def getFightOneEntities() : ArrayBuffer[Entity] = {
/*
1 Solar : https://pathfinderwiki.com/wiki/Solar
vs
9 Worgs Rider
4 Barbares Orcs
1 Warlord
*/
// will contain our entities
var entities = new ArrayBuffer[Entity]()
/* 1x Solar */
val lumenSword = new Spell(name = "Lumen Sword", range = 30, min = 21, max = 24)
val solar = new Entity(name = "Solar", health = 364, armor = 44, regen = 15, speed = 50, spell = lumenSword, coordX = mathHelper.getRandom(0, 500), coordY = mathHelper.getRandom(0, 500))
entities += solar
/* 9x Worgs Rider */
val battleAxe = new Spell(name = "Battle Axe", range = 15 , min = 3, max = 11)
(1 to 9) foreach (x => {
entities += new Entity(name = "WorgRider #" + x, health = 13, armor = 18, regen = 0, speed = 50, spell = battleAxe, coordX = mathHelper.getRandom(0, 500), coordY = mathHelper.getRandom(0, 500))
})
/* 4x Barbares Orcs */
val bigAxe = new Spell(name = "Big Axe", range = 10 , min = 11, max = 23)
(1 to 4) foreach (x => {
entities += new Entity(name = "Barbares Orcs #" + x, health = 42, armor = 15, regen = 0, speed = 30, spell = bigAxe, coordX = mathHelper.getRandom(0, 500), coordY = mathHelper.getRandom(0, 500))
})
/* 1x Warlord */
val viciousFail = new Spell(name = "Viscious Fail", range = 10, min = 9, max = 18)
val warlord = new Entity(name = "Warlord", health = 141, armor = 27, regen = 0, speed = 30, spell = viciousFail, coordX = mathHelper.getRandom(0, 500), coordY = mathHelper.getRandom(0, 500))
entities += warlord
// return all those entities !
return entities
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/fights/App.scala
|
/**
* @author <NAME>
*/
package fights
import org.apache.spark.graphx.{Graph, TripletFields}
import org.apache.spark.{SparkConf, SparkContext}
object App {
// some imports
val generator = new Generator()
val mathHelper = new MathHelper()
val graphHelper = new GraphHelper()
/**
* main function of our project
* @param args
*/
def main(args: Array[String]): Unit = {
// Spark options
val options = new SparkConf().setAppName("fight").setMaster("local[*]")
val sparkContext = new SparkContext(options)
sparkContext.setLogLevel("ERROR")
// lets use our generator to get the graph
val graph = generator.generateFightOne(sparkContext)
// and launch the fight !!
this.launchComputation(graph, sparkContext)
}
/**
* launches the computations of the fight
* @param graph
* @param context
*/
def launchComputation(graph: Graph[Entity, Int], context: SparkContext): Unit = {
// lets keep track of our iteration number
var iterationNumber = 0
/**
* this is our loop, its the core of the computation
* @return
*/
def fightIteration: Boolean = {
var localGraph = graph // let's copy the base graph so we can modify it
// while we have computation to do, checks stop condition later
while (true) {
iterationNumber += 1
println("\nITERATION " + iterationNumber)
/* https://spark.apache.org/docs/1.2.1/graphx-programming-guide.html#aggregate-messages-aggregatemessages */
// lets grab all our messages !
val allMessages = localGraph.aggregateMessages[(Entity, Entity, Long)](
graphHelper.sendPosition,
mathHelper.closestEntityLogic
)
// stop condition, if nothing has been done last iteration
if (allMessages.isEmpty()) {
println("\n*** FIGHT IS FINISHED ***")
return false
}
/* https://spark.apache.org/docs/latest/graphx-programming-guide.html#join_operators */
localGraph = localGraph.joinVertices(allMessages) {
(id, src, currentMessage) => {
val src = currentMessage._1 // source
val dest = currentMessage._2 // destination (= closest entity)
val distance = currentMessage._3 // distance between them
// debug purpose
println("=====================")
println("*** " + src.getName().toUpperCase() + " ***")
/* apply regeneration */
src.applyRegen()
// lets get our maximum range
val maxAttackRange = src.getSpell().getRange()
// if we can reach the opponent
if (distance < maxAttackRange) {
// then attack
println(">>ATTACK {" + dest.getName() + "} WITH {" + src.getSpell().getName() + "} // RANGE{" + maxAttackRange + "}, DISTANCE{" + distance + "}")
src.attack(dest)
} else {
// else move
println(">>MOVE")
src.moveInDirectionOf(dest)
}
src
}
}
/* lets aggregate damage messages */
val messageDamage = localGraph.aggregateMessages[(Entity, Int)](
graphHelper.sendDamagesToDest,
(acc, item) => {
(acc._1, acc._2 + item._2)
}
)
/* and join the vertices so we can apply the damages etc */
localGraph = localGraph.joinVertices(messageDamage) {
(id, src, message) => {
val opponent = message._1
val healthDifference = - message._2
opponent.modifyHealth(healthDifference)
opponent
}
}
}
return true
}
fightIteration
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/inversedIndex/Computing.scala
|
<gh_stars>1-10
/**
* @author <NAME>
*/
package formatCrawl
import org.apache.spark.rdd.RDD
object Computing
{
/**
* swaps the indexes and the content of an RDD
* @param RDD
* @return
*/
def SwapIndexesAndContent(RDD: RDD[String]): RDD[(String, String)] = {
return RDD.map(ConvertToTuple).map(SwapTuple)
}
/**
* converts an entry into a Tuple
* @param entry
* @return
*/
def ConvertToTuple(entry: String): (String, String) = {
val splittedEntry = entry.split(",")
val index = splittedEntry(0)
val spell = splittedEntry(1)
return (index, spell)
}
/**
* swaps a tuple
* @param entry
* @return
*/
def SwapTuple(entry: (String, String)): (String, String) = {
return (entry._2, entry._1)
}
/**
* reduces an RDD
* @param RDD
* @return
*/
def reduce(RDD: RDD[(String, String)]) : RDD[(String, String)] = {
return RDD.reduceByKey((acc, entry) => acc + ", " + entry)
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/fights/MathHelper.scala
|
<gh_stars>1-10
/**
* @author <NAME>
*/
package fights
import java.util.concurrent.ThreadLocalRandom
import scala.collection.mutable.ArrayBuffer
import scala.math.sqrt
import scala.math.pow
/**
* utility class used as a helper for math computation etc
*/
class MathHelper extends Serializable {
/**
* returns a random int between two numbers
* @param min
* @param max
* @return
*/
def getRandom(min: Int, max: Int) : Int = {
val random: ThreadLocalRandom = ThreadLocalRandom.current()
return random.nextInt(min, max + 1)
}
/**
* computes the distance between two entities
* @param entity1
* @param entity2
* @return
*/
def distanceBetween(entity1: Entity, entity2: Entity) : Float = {
// compute the difference on both axis
val xDiff = entity1.getX() - entity2.getX()
val yDiff = entity1.getY() - entity2.getY()
// euclidean distance
return sqrt(
pow(xDiff, 2) + pow(yDiff, 2)
).toFloat
}
/**
* utility function used by the aggregateMessage to determine which entity is the closest
* @param entity
* @param entity2
* @return
*/
def closestEntityLogic(entity: (Entity, Entity, Long), entity2: (Entity, Entity, Long)): (Entity, Entity, Long) = {
// the third accessor is the distance
if (entity._3 < entity2._3) return entity2
return entity2
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/inversedIndex/FSHelper.scala
|
<filename>src/main/inversedIndex/FSHelper.scala
/**
* @author <NAME>
*/
package formatCrawl
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions.explode
object FSHelper
{
// const
val PROJECT_ROOT = System.getProperty("user.dir") + "\\"
/**
* reads a JSON file from the filesystem
* @param path
* @param sc
* @return
*/
def ReadJSONFromFile(path: String, sc: SparkSession): RDD[String] = {
import sc.implicits._
val file = PROJECT_ROOT + path
/* https://code.dblock.org/2017/03/21/whats-the-simplest-way-to-parse-json-in-scala.html */
val fileAsDF = sc.read.format("json").json(file)
val fileAsRDD = fileAsDF.withColumn("spells", explode($"spells")).rdd.map(_.mkString(","))
return fileAsRDD
}
/**
* reads a CSV file from the filesystem
* @param path
* @param sc
* @return
*/
def ReadCSVFromFile(path: String, sc: SparkSession): RDD[(String, String)] = {
val file : String = PROJECT_ROOT + "out.csv"
val df : DataFrame = sc.read.format("csv").csv(file)
// split on ','
val rdd : RDD[String] = df.rdd.map(_.mkString(","))
// some formatting
val finalRdd : RDD[(String, String)] = rdd.map(line => (line.split(",")(0).toLowerCase(), line.split(",")(1)) )
return finalRdd
}
/**
* writes an RDD to the filesystem
* @param rdd
* @param path
*/
def RDDToFileSystem(rdd : RDD[(String, String)], path: String): Unit = {
println(">> WRITTING TO FILE")
rdd.map(x => x._1 + "," + x._2).coalesce(1).saveAsTextFile(PROJECT_ROOT + "out/" + path)
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/inversedIndex/App.scala
|
<gh_stars>1-10
/**
* @author <NAME>
*/
package formatCrawl
import org.apache.spark.sql.{SparkSession}
object App
{
val spark: SparkSession = SparkSession.builder.master("local").getOrCreate
/**
* entry of our program, does the RDD manipulation for the second part of the first exercice
* @param args
*/
def main(args:Array[String]){
// lets read our file
val RDDFromJSON = FSHelper.ReadJSONFromFile("monsters.json", spark)
// lets swap indexes
val swappedRDDFromJSON = Computing.SwapIndexesAndContent(RDDFromJSON)
// lets reduce our RDD
val finalRDD = Computing.reduce(swappedRDDFromJSON)
// and write it to filesystem
FSHelper.RDDToFileSystem(finalRDD, "swappedRDD")
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/fights/Entity.scala
|
/**
* @author <NAME>
*/
package fights
/**
* class representing an entity
* @param name
* @param armor
* @param regen
* @param health
* @param coordX
* @param coordY
* @param speed
* @param spell
*/
class Entity(val name: String, val armor: Int, val regen: Int, var health: Int, var coordX: Float, var coordY: Float, val speed: Int, val spell: Spell) extends Serializable {
/* attributes */
private val helper = new MathHelper()
private val maxHealth = this.health
/* accessors */
def getSpell() : Spell = {
return this.spell
}
def getName() : String = {
return this.name
}
def getX(): Float = {
return this.coordX
}
def getY(): Float = {
return this.coordY
}
def getHealth() : Int = {
return this.health
}
def getArmor() : Int = {
return this.armor
}
/**
* modifies the instance health
* @param difference
*/
def modifyHealth(difference: Int) : Unit = {
// debug purpose
if(difference < 0) {
println(">>" + this.name + " LOST {" + difference + "} HP, FROM {" + this.health + "} TO {" + (this.health + difference) + "}")
}
// debug purpose
if(this.health + difference <= 0) {
println(">>" + this.name + " IS DEAD")
}
// lets add the difference
this.health += difference
}
/**
* function called each iteration to apply the regeneration to the entity
*/
def applyRegen() : Unit = {
// debug purpose
var msg = ""
// check if max health is reached
if(this.getHealth() + this.regen > this.maxHealth) {
msg = (this.maxHealth - this.getHealth()).toString
this.health = this.maxHealth
} else {
// else apply the regen
this.modifyHealth(this.regen)
msg = this.regen.toString
}
println(">>REGEN {+" + msg + "}, NOW {" + this.health + "}")
}
/**
* function called when the entity attacks an opponent
* @param opponent
*/
def attack(opponent: Entity) : Unit = {
// lets get the damage of our spell
val attackPower = this.spell.getDamages();
// lets get opponent armor
val opponentArmor = opponent.getArmor();
// if we deal more than opponent has armor
if (attackPower > opponentArmor) {
println(">>(OK) : DMG{" + attackPower + "}, ARMOR{" + opponentArmor + "}")
// apply damages
opponent.modifyHealth(-attackPower)
} else {
println(">>(FAIL) : DMG{" + attackPower + "}, ARMOR{" + opponentArmor + "}")
}
}
/**
* moves the entity towards an opponent entity
* @param entity
*/
def moveInDirectionOf(entity: Entity) : Unit = {
// computes the difference between them
val distanceBetweenEntities: Float = helper.distanceBetween(this, entity)
// threshold to round the coordinates
val minDistanceToHit = 10
// if about to collide, then we round the coordinates
if (distanceBetweenEntities - this.speed < minDistanceToHit) {
println(">>ABOUT TO COLLIDE: SAME COORDINATES : {" + this.getName() + "," + entity.getName() + "}")
// we set the same coordinates
this.coordX = entity.getX()
this.coordY = entity.getY()
} else {
/*
pythagorus theorem
*/
// compute the difference on both axis
val diffX: Float = entity.getX() - this.coordX
val diffY: Float = entity.getY() - this.coordY
// compute the distance we can go on both axis
val moveX: Float = this.speed * (diffX / distanceBetweenEntities) // cosinus teta
val moveY: Float = this.speed * (diffY / distanceBetweenEntities) // sinus teta
// and move
this.coordX += moveX
this.coordY += moveY
// debug
println(">>" + this.getName() + " MOVING TOWARDS " + entity.getName() + " BY (" + moveX + "," + moveY + ") => (" + this.getX() + "," + this.getY() + ")")
println(">>DISTANCE IS NOW : " + helper.distanceBetween(this, entity))
}
}
/**
* prints the summary of an iteration, debug purpose
*/
def printSummary() : Unit = {
println(this.getName() + ", PV = " + this.getHealth() + " @ (" + this.getX() + ", " + this.getY() + ")")
}
/**
* returns a string representing the entity, debug purpose
* @return
*/
override def toString: String = {
return name + ": (" + coordX + "," + coordY + ") : armor " + armor + ", regen " + regen + ", health " + health + ", speed " + speed
}
}
|
stressGC/Spark_Distributed_Computation
|
src/main/fights/GraphHelper.scala
|
<filename>src/main/fights/GraphHelper.scala
/**
* @author <NAME>
*/
package fights
import org.apache.spark.graphx.{Edge, EdgeContext}
import scala.collection.mutable.ArrayBuffer
/**
* utility class used as a helper for graph transformations and actions
*/
class GraphHelper {
val mathHelper = new MathHelper()
/**
* returns the entities as vertices
* @param entities
* @return
*/
def getVerticesAsTuple(entities: ArrayBuffer[Entity]): ArrayBuffer[(Long, Entity)] = {
val verticesWithIndex = entities.zipWithIndex
// we swap the key/value for easier computation
return verticesWithIndex.map(v => (v._2.toLong, v._1))
}
/**
* we generate the edges, between the solar and each other entity
* @param vertices
* @param solarIndex index of the solar in the vertices array
* @return
*/
def generateEdges(vertices: ArrayBuffer[(Long, Entity)], solarIndex: Int = 0): ArrayBuffer[Edge[Int]] = {
// will contain our edges
val edges = new ArrayBuffer[Edge[Int]]()
// for each vertice
for(i <- 1 to vertices.length - 1) {
val newEdges = new ArrayBuffer[Edge[Int]]()
// append an edge between the vertice and the solar
newEdges.append(Edge(vertices(solarIndex)._1.toLong, vertices(i)._1.toLong))
// and between the solar and the vertice
newEdges.append(Edge(vertices(i)._1.toLong, vertices(solarIndex)._1.toLong))
edges.appendAll(newEdges)
}
return edges
}
/**
* sends the distance between two entities to the dest
* @param edgeC
*/
def sendPosition(edgeC: EdgeContext[Entity, Int, (Entity, Entity, Long)]): Unit = {
// get source & destination
val src = edgeC.srcAttr
val dest = edgeC.dstAttr
/* if both are alive */
val bothAlive = (dest.getHealth() > 0) && (src.getHealth() > 0)
if (bothAlive) {
// compute the distance between them
val distance = mathHelper.distanceBetween(src, dest).toLong
// send it
edgeC.sendToDst((dest, src, distance))
}
}
/**
* sends the damages information to the destination
* @param edgeC
*/
def sendDamagesToDest(edgeC: EdgeContext[Entity, Int, (Entity, Int)]): Unit = {
// get source & destination
val src = edgeC.srcAttr
val dest = edgeC.dstAttr
/* if both still alive */
val bothAlive = (src.getHealth() > 0) && (dest.getHealth() > 0)
if (bothAlive){
// get the damages the source deals
val damages = src.getSpell().getDamages()
// send it to the destination
edgeC.sendToDst((edgeC.dstAttr, damages))
}
}
}
|
TradeshiftCN/scala-datapipeline-dsl
|
src/main/scala/datapipeline/compiler/AwsDataPipelineCompiler.scala
|
/*
* Copyright 2018 Shazam Entertainment Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License
*/
package datapipeline.compiler
import java.io.{File, FileOutputStream, FilenameFilter}
import java.net.URLClassLoader
import java.nio.file.Paths
import datapipeline.dsl.PipelineBuilder
import scala.tools.nsc._
object AwsDataPipelineCompiler extends App {
import AwsDataPipelineCompilerHelpers._
if (args.length < 2) fail(
"""Usage: datapipeline-compiler <fqcn> <source> [source...]
|
|Where:
| - fqcn is the fully-qualified class name of the pipeline definition singleton. E.g. myorg.DataPipeline
| - source is either the Scala source file for your pipeline or a directory containing Scala source files
""".stripMargin
)
val (className :: sourcePaths) = args.toList
compileSources(sourcePaths)
val pipelineBuilder: PipelineBuilder = reflectivelyLoadPipelineBuilder(className)
val filename = s"$CurrentWorkingDir${pipelineBuilder.name}.json"
writePipelineJsonToFile(pipelineBuilder, filename)
}
object AwsDataPipelineCompilerHelpers {
def compileSources(sourcePaths: List[String]): Unit = {
val sourceFiles = sourcePaths.map(new File(_)).flatMap {
case directory if directory.isDirectory => directory.listFiles(ScalaFilenameFilter)
case file if file.isFile => file :: Nil
case other => sys.error(s"Unexpected input file/directory: $other")
}
val compiler = {
val settings = new Settings()
settings.usejavacp.value = true
val global = new Global(settings)
new global.Run
}
compiler.compile(sourceFiles.map(_.getAbsolutePath))
}
def reflectivelyLoadPipelineBuilder(className: String): PipelineBuilder = {
val classLoader = {
val environmentalClasspath = Option(System.getenv("CLASSPATH")).toList.flatMap(_.split(":"))
val classPath = (CurrentWorkingDir :: environmentalClasspath).map(new File(_).toURI.toURL)
new URLClassLoader(classPath.toArray, this.getClass.getClassLoader)
}
val clazz = classLoader.loadClass(className + "$")
if (!clazz.getDeclaredFields.map(_.getName).contains(PipelineField)) fail(
s"""Error: The class $className does not have a field named '$PipelineField'.
|Your pipeline definition singleton should include a field named '$PipelineField' of type datapipeline.dsl.PipelineBuilder,
|e.g.:
|
|object MyDataPipeline {
|
| import datapipeline.dsl._
|
| val $PipelineField = AwsDataPipeline(name = "MyDataPipeline", ...)
|
|}
""".stripMargin
)
val pipelineBuilderField = clazz.getDeclaredField(PipelineField)
pipelineBuilderField.setAccessible(true)
val obj = clazz.getField("MODULE$").get(null) // retrieve the Scala singleton instance
val pipelineBuilder = pipelineBuilderField.get(obj).asInstanceOf[PipelineBuilder]
if (pipelineBuilder == null && clazz.getDeclaredMethods.exists(_.getName == "delayedInit")) fail(
s"Error: Class $className cannot be loaded because it extends either DelayedInit or App."
)
pipelineBuilder
}
def writePipelineJsonToFile(pipelineBuilder: PipelineBuilder, filename: String): Unit = {
println(s"Writing pipeline definition to: $filename")
val os = new FileOutputStream(filename)
try {
os.write {
import org.json4s.native.JsonMethods._
pretty(render(pipelineBuilder.json)).getBytes("UTF-8")
}
} finally {
os.close()
}
}
def fail(message: String): Unit = {
System.err.println(message)
System.exit(1)
}
val PipelineField = "pipeline"
lazy val CurrentWorkingDir = s"${Paths.get("").toAbsolutePath}${File.separator}"
lazy val ScalaFilenameFilter: FilenameFilter = (_: File, name: String) => name.toLowerCase.endsWith(".scala")
}
|
tripl-ai/arc-elasticsearch-pipeline-plugin
|
src/it/scala/ai/tripl/arc/load/ElasticsearchLoadSuite.scala
|
package ai.tripl.arc
import java.net.URI
import java.util.UUID
import java.util.Properties
import scala.util.Random
import org.apache.http.client.methods.{HttpDelete, HttpGet}
import org.apache.http.impl.client.HttpClientBuilder
import com.fasterxml.jackson.databind.ObjectMapper
import org.scalatest.FunSuite
import org.scalatest.BeforeAndAfter
import scala.collection.JavaConverters._
import scala.io.Source
import org.apache.commons.io.FileUtils
import org.apache.commons.io.IOUtils
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import ai.tripl.arc.api._
import ai.tripl.arc.api.API._
import ai.tripl.arc.util._
import ai.tripl.arc.config.ArcPipeline
import org.elasticsearch.spark.sql._
class ElasticsearchLoadSuite extends FunSuite with BeforeAndAfter {
val alpha = "abcdefghijklmnopqrstuvwxyz"
val size = alpha.size
def randStr(n:Int) = (1 to n).map(x => alpha(Random.nextInt.abs % size)).mkString
var session: SparkSession = _
val testData = getClass.getResource("/akc_breed_info.csv").toString
val inputView = "expected"
val index = "dogs"
val esURL = "elasticsearch"
val port = "9200"
val wanOnly = "true"
val ssl = "false"
val streamingIndex = "streaming"
val outputView = "outputView"
val checkpointLocation = "/tmp/checkpointLocation"
before {
implicit val spark = SparkSession
.builder()
.master("local[*]")
.config("spark.ui.port", "9999")
.config("es.index.auto.create", "true")
.config("spark.sql.streaming.checkpointLocation", checkpointLocation)
.appName("Spark ETL Test")
.getOrCreate()
spark.sparkContext.setLogLevel("INFO")
implicit val logger = TestUtils.getLogger()
// set for deterministic timezone
spark.conf.set("spark.sql.session.timeZone", "UTC")
session = spark
}
after {
session.stop
}
test("ElasticsearchLoad") {
implicit val spark = session
import spark.implicits._
implicit val logger = TestUtils.getLogger()
implicit val arcContext = TestUtils.getARCContext(isStreaming=false)
val df0 = spark.read.option("header","true").csv(testData)
df0.createOrReplaceTempView(inputView)
val client = HttpClientBuilder.create.build
val delete = new HttpDelete(s"http://${esURL}:9200/${index}")
val response = client.execute(delete)
response.close
load.ElasticsearchLoadStage.execute(
load.ElasticsearchLoadStage(
plugin=new load.ElasticsearchLoad,
name="df",
description=None,
inputView=inputView,
output=index,
numPartitions=None,
params=Map("es.nodes.wan.only" -> wanOnly, "es.port" -> port, "es.net.ssl" -> ssl, "es.nodes" -> esURL),
saveMode=SaveMode.Overwrite,
outputMode=OutputModeTypeAppend,
partitionBy=Nil
)
)
val df1 = spark.read
.format("org.elasticsearch.spark.sql")
.option("es.nodes.wan.only",wanOnly)
.option("es.port", port)
.option("es.net.ssl", ssl)
.option("es.nodes", esURL)
.load(index)
df1.createOrReplaceTempView("actual")
// reselect fields to ensure correct order
val expected = spark.sql(s"""
SELECT Breed, height_high_inches, height_low_inches, weight_high_lbs, weight_low_lbs FROM ${inputView}
""")
// reselect fields to ensure correct order
val actual = spark.sql(s"""
SELECT Breed, height_high_inches, height_low_inches, weight_high_lbs, weight_low_lbs FROM actual
""")
val actualExceptExpectedCount = actual.except(expected).count
val expectedExceptActualCount = expected.except(actual).count
if (actualExceptExpectedCount != 0 || expectedExceptActualCount != 0) {
println("actual")
actual.show(100000, false)
println("expected")
expected.show(100000, false)
}
assert(actualExceptExpectedCount === 0)
assert(expectedExceptActualCount === 0)
}
test("ElasticsearchLoad end-to-end") {
implicit val spark = session
import spark.implicits._
implicit val logger = TestUtils.getLogger()
implicit val arcContext = TestUtils.getARCContext(isStreaming=false)
val df = spark.read.option("header","true").csv(testData)
df.createOrReplaceTempView(inputView)
val conf = s"""{
"stages": [
{
"type": "ElasticsearchLoad",
"name": "write person",
"environments": [
"production",
"test"
],
"output": "person",
"inputView": "${inputView}",
"saveMode": "Overwrite",
"params": {
"es.nodes": "${esURL}",
"es.port": "${port}",
"es.nodes.wan.only": "${wanOnly}",
"es.net.ssl": "${ssl}"
}
}
]
}"""
val pipelineEither = ArcPipeline.parseConfig(Left(conf), arcContext)
pipelineEither match {
case Left(_) => {
println(pipelineEither)
assert(false)
}
case Right((pipeline, _)) => ARC.run(pipeline)(spark, logger, arcContext)
}
}
test("ElasticsearchLoad: Structured Streaming") {
implicit val spark = session
import spark.implicits._
implicit val logger = TestUtils.getLogger()
implicit val arcContext = TestUtils.getARCContext(isStreaming=true)
FileUtils.deleteQuietly(new java.io.File(checkpointLocation))
val indexName = randStr(10)
val readStream = spark
.readStream
.format("rate")
.option("rowsPerSecond", "1")
.load
readStream.createOrReplaceTempView(inputView)
load.ElasticsearchLoadStage.execute(
load.ElasticsearchLoadStage(
plugin=new load.ElasticsearchLoad,
name="df",
description=None,
inputView=inputView,
output=indexName,
numPartitions=None,
params=Map("es.nodes.wan.only" -> wanOnly, "es.port" -> port, "es.net.ssl" -> ssl, "es.nodes" -> esURL),
saveMode=SaveMode.Overwrite,
outputMode=OutputModeTypeAppend,
partitionBy=Nil
)
)
Thread.sleep(2000)
spark.streams.active.foreach(streamingQuery => streamingQuery.stop)
// call _search rest api to get all documents for new index
val client = HttpClientBuilder.create.build
val get = new HttpGet(s"http://${esURL}:${port}/${indexName}/_search")
val response = client.execute(get)
val body = Source.fromInputStream(response.getEntity.getContent).mkString
response.close
// assert that the documents array returned in the search is not empty
// if no documents then hits.hits will fail anyway
spark.read.json(spark.sparkContext.parallelize(Seq(body)).toDF.as[String]).createOrReplaceTempView("response")
val hitsSize = spark.sql("""
SELECT SIZE(hits.hits) FROM response
""")
assert(hitsSize.first.getInt(0) != 0)
}
}
|
tripl-ai/arc-elasticsearch-pipeline-plugin
|
version.sbt
|
<reponame>tripl-ai/arc-elasticsearch-pipeline-plugin
version := "1.3.0"
|
tripl-ai/arc-elasticsearch-pipeline-plugin
|
src/main/scala/ai/tripl/arc/extract/ElasticsearchExtract.scala
|
package ai.tripl.arc.extract
import java.io._
import java.net.URI
import java.util.Properties
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.TaskContext
import org.elasticsearch.spark.sql._
import ai.tripl.arc.api._
import ai.tripl.arc.api.API._
import ai.tripl.arc.config._
import ai.tripl.arc.config.Error._
import ai.tripl.arc.plugins.PipelineStagePlugin
import ai.tripl.arc.util.CloudUtils
import ai.tripl.arc.util.DetailException
import ai.tripl.arc.util.EitherUtils._
import ai.tripl.arc.util.ExtractUtils
import ai.tripl.arc.util.MetadataUtils
import ai.tripl.arc.util.Utils
class ElasticsearchExtract extends PipelineStagePlugin {
val version = ai.tripl.arc.elasticsearch.BuildInfo.version
def instantiate(index: Int, config: com.typesafe.config.Config)(implicit spark: SparkSession, logger: ai.tripl.arc.util.log.logger.Logger, arcContext: ARCContext): Either[List[ai.tripl.arc.config.Error.StageError], PipelineStage] = {
import ai.tripl.arc.config.ConfigReader._
import ai.tripl.arc.config.ConfigUtils._
implicit val c = config
val expectedKeys = "type" :: "name" :: "description" :: "environments" :: "input" :: "outputView" :: "numPartitions" :: "partitionBy" :: "persist" :: "params" :: Nil
val name = getValue[String]("name")
val description = getOptionalValue[String]("description")
val input = getValue[String]("input")
val outputView = getValue[String]("outputView")
val persist = getValue[java.lang.Boolean]("persist", default = Some(false))
val numPartitions = getOptionalValue[Int]("numPartitions")
val partitionBy = getValue[StringList]("partitionBy", default = Some(Nil))
val params = readMap("params", c)
val invalidKeys = checkValidKeys(c)(expectedKeys)
(name, description, input, outputView, persist, numPartitions, partitionBy, invalidKeys) match {
case (Right(name), Right(description), Right(input), Right(outputView), Right(persist), Right(numPartitions), Right(partitionBy), Right(invalidKeys)) =>
val stage = ElasticsearchExtractStage(
plugin=this,
name=name,
description=description,
input=input,
outputView=outputView,
params=params,
persist=persist,
numPartitions=numPartitions,
partitionBy=partitionBy
)
stage.stageDetail.put("input", input)
stage.stageDetail.put("outputView", outputView)
stage.stageDetail.put("params", params.asJava)
Right(stage)
case _ =>
val allErrors: Errors = List(name, description, input, outputView, persist, numPartitions, partitionBy, invalidKeys).collect{ case Left(errs) => errs }.flatten
val stageName = stringOrDefault(name, "unnamed stage")
val err = StageError(index, stageName, c.origin.lineNumber, allErrors)
Left(err :: Nil)
}
}
}
case class ElasticsearchExtractStage(
plugin: ElasticsearchExtract,
name: String,
description: Option[String],
input: String,
outputView: String,
params: Map[String, String],
persist: Boolean,
numPartitions: Option[Int],
partitionBy: List[String]
) extends PipelineStage {
override def execute()(implicit spark: SparkSession, logger: ai.tripl.arc.util.log.logger.Logger, arcContext: ARCContext): Option[DataFrame] = {
ElasticsearchExtractStage.execute(this)
}
}
object ElasticsearchExtractStage {
def execute(stage: ElasticsearchExtractStage)(implicit spark: SparkSession, logger: ai.tripl.arc.util.log.logger.Logger, arcContext: ARCContext): Option[DataFrame] = {
import spark.implicits._
// if incoming dataset is empty create empty dataset with a known schema
val df = try {
if (arcContext.isStreaming) {
spark.emptyDataFrame
} else {
spark.read.format("org.elasticsearch.spark.sql").options(stage.params).load(stage.input)
}
} catch {
case e: Exception => throw new Exception(e) with DetailException {
override val detail = stage.stageDetail
}
}
// repartition to distribute rows evenly
val repartitionedDF = stage.partitionBy match {
case Nil => {
stage.numPartitions match {
case Some(numPartitions) => df.repartition(numPartitions)
case None => df
}
}
case partitionBy => {
// create a column array for repartitioning
val partitionCols = partitionBy.map(col => df(col))
stage.numPartitions match {
case Some(numPartitions) => df.repartition(numPartitions, partitionCols:_*)
case None => df.repartition(partitionCols:_*)
}
}
}
if (arcContext.immutableViews) repartitionedDF.createTempView(stage.outputView) else repartitionedDF.createOrReplaceTempView(stage.outputView)
if (!repartitionedDF.isStreaming) {
stage.stageDetail.put("inputFiles", java.lang.Integer.valueOf(repartitionedDF.inputFiles.length))
stage.stageDetail.put("outputColumns", java.lang.Integer.valueOf(repartitionedDF.schema.length))
stage.stageDetail.put("numPartitions", java.lang.Integer.valueOf(repartitionedDF.rdd.partitions.length))
if (stage.persist) {
repartitionedDF.persist(arcContext.storageLevel)
stage.stageDetail.put("records", java.lang.Long.valueOf(repartitionedDF.count))
}
}
Option(repartitionedDF)
}
}
|
tripl-ai/arc-elasticsearch-pipeline-plugin
|
src/it/scala/ai/tripl/arc/extract/ElasticsearchExtractSuite.scala
|
package ai.tripl.arc
import java.net.URI
import org.scalatest.FunSuite
import org.scalatest.BeforeAndAfter
import collection.JavaConverters._
import org.apache.http.client.methods.HttpDelete
import org.apache.http.impl.client.HttpClientBuilder
import org.apache.commons.io.FileUtils
import org.apache.commons.io.IOUtils
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import ai.tripl.arc.api._
import ai.tripl.arc.api.API._
import ai.tripl.arc.util._
import ai.tripl.arc.util.ControlUtils._
import org.elasticsearch.spark.sql._
class ElasticsearchExtractSuite extends FunSuite with BeforeAndAfter {
var session: SparkSession = _
val testData = getClass.getResource("/akc_breed_info.csv").toString
val outputView = "actual"
val index = "dogs"
val esURL = "elasticsearch"
val port = "9200"
val wanOnly = "true"
val ssl = "false"
before {
implicit val spark = SparkSession
.builder()
.master("local[*]")
.config("spark.ui.port", "9999")
.appName("Spark ETL Test")
.getOrCreate()
spark.sparkContext.setLogLevel("INFO")
implicit val logger = TestUtils.getLogger()
// set for deterministic timezone
spark.conf.set("spark.sql.session.timeZone", "UTC")
session = spark
}
after {
session.stop
}
test("ElasticsearchExtract") {
implicit val spark = session
import spark.implicits._
implicit val logger = TestUtils.getLogger()
implicit val arcContext = TestUtils.getARCContext(isStreaming=false)
val df0 = spark.read.option("header","true").csv(testData)
df0.createOrReplaceTempView("expected")
val client = HttpClientBuilder.create.build
val delete = new HttpDelete(s"http://${esURL}:9200/index")
val response = client.execute(delete)
response.close
df0.write
.format("org.elasticsearch.spark.sql")
.option("es.nodes.wan.only",wanOnly)
.option("es.port",port)
.option("es.net.ssl",ssl)
.option("es.nodes", esURL)
.mode("overwrite")
.save(index)
extract.ElasticsearchExtractStage.execute(
extract.ElasticsearchExtractStage(
plugin=new extract.ElasticsearchExtract,
name="df",
description=None,
input=index,
outputView=outputView,
numPartitions=None,
params=Map("es.nodes.wan.only" -> wanOnly, "es.port" -> port, "es.net.ssl" -> ssl, "es.nodes" -> esURL),
partitionBy=Nil,
persist=true
)
)
// reselect fields to ensure correct order
val expected = spark.sql(s"""
SELECT Breed, height_high_inches, height_low_inches, weight_high_lbs, weight_low_lbs FROM expected
""")
// reselect fields to ensure correct order
val actual = spark.sql(s"""
SELECT Breed, height_high_inches, height_low_inches, weight_high_lbs, weight_low_lbs FROM ${outputView}
""")
val actualExceptExpectedCount = actual.except(expected).count
val expectedExceptActualCount = expected.except(actual).count
if (actualExceptExpectedCount != 0 || expectedExceptActualCount != 0) {
println("actual")
actual.show(100000, false)
println("expected")
expected.show(100000, false)
}
assert(actualExceptExpectedCount === 0)
assert(expectedExceptActualCount === 0)
}
}
|
tripl-ai/arc-elasticsearch-pipeline-plugin
|
src/main/scala/ai/tripl/arc/load/ElasticsearchLoad.scala
|
package ai.tripl.arc.load
import java.net.URI
import java.util.Properties
import scala.collection.JavaConverters._
import org.apache.spark.sql._
import org.apache.spark.sql.types._
import com.typesafe.config._
import org.elasticsearch.spark.sql._
import ai.tripl.arc.api._
import ai.tripl.arc.api.API._
import ai.tripl.arc.config._
import ai.tripl.arc.config.Error._
import ai.tripl.arc.plugins.PipelineStagePlugin
import ai.tripl.arc.util.CloudUtils
import ai.tripl.arc.util.DetailException
import ai.tripl.arc.util.EitherUtils._
import ai.tripl.arc.util.ExtractUtils
import ai.tripl.arc.util.MetadataUtils
import ai.tripl.arc.util.ListenerUtils
import ai.tripl.arc.util.Utils
import org.apache.spark.sql.streaming.OutputMode
class ElasticsearchLoad extends PipelineStagePlugin {
val version = ai.tripl.arc.elasticsearch.BuildInfo.version
def instantiate(index: Int, config: com.typesafe.config.Config)(implicit spark: SparkSession, logger: ai.tripl.arc.util.log.logger.Logger, arcContext: ARCContext): Either[List[ai.tripl.arc.config.Error.StageError], PipelineStage] = {
import ai.tripl.arc.config.ConfigReader._
import ai.tripl.arc.config.ConfigUtils._
implicit val c = config
val expectedKeys = "type" :: "name" :: "description" :: "environments" :: "inputView" :: "output" :: "numPartitions" :: "partitionBy" :: "saveMode" :: "params" :: "outputMode" :: Nil
val name = getValue[String]("name")
val description = getOptionalValue[String]("description")
val inputView = getValue[String]("inputView")
val output = getValue[String]("output")
val numPartitions = getOptionalValue[Int]("numPartitions")
val partitionBy = getValue[StringList]("partitionBy", default = Some(Nil))
val saveMode = getValue[String]("saveMode", default = Some("Overwrite"), validValues = "Append" :: "ErrorIfExists" :: "Ignore" :: "Overwrite" :: Nil) |> parseSaveMode("saveMode") _
val outputMode = getValue[String]("outputMode", default = Some("Append"), validValues = "Append" :: "Complete" :: "Update" :: Nil) |> parseOutputModeType("outputMode") _
val params = readMap("params", c)
val invalidKeys = checkValidKeys(c)(expectedKeys)
(name, description, inputView, output, numPartitions, partitionBy, saveMode, invalidKeys, outputMode) match {
case (Right(name), Right(description), Right(inputView), Right(output), Right(numPartitions), Right(partitionBy), Right(saveMode), Right(invalidKeys), Right(outputMode)) =>
val stage = ElasticsearchLoadStage(
plugin=this,
name=name,
description=description,
inputView=inputView,
output=output,
params=params,
numPartitions=numPartitions,
partitionBy=partitionBy,
saveMode=saveMode,
outputMode=outputMode
)
stage.stageDetail.put("inputView", inputView)
stage.stageDetail.put("output", output)
stage.stageDetail.put("params", params.asJava)
stage.stageDetail.put("partitionBy", partitionBy.asJava)
stage.stageDetail.put("saveMode", saveMode.toString.toLowerCase)
stage.stageDetail.put("outputMode", outputMode.sparkString)
Right(stage)
case _ =>
val allErrors: Errors = List(name, description, inputView, output, numPartitions, partitionBy, saveMode, invalidKeys, outputMode).collect{ case Left(errs) => errs }.flatten
val stageName = stringOrDefault(name, "unnamed stage")
val err = StageError(index, stageName, c.origin.lineNumber, allErrors)
Left(err :: Nil)
}
}
}
case class ElasticsearchLoadStage(
plugin: ElasticsearchLoad,
name: String,
description: Option[String],
inputView: String,
output: String,
partitionBy: List[String],
numPartitions: Option[Int],
saveMode: SaveMode,
outputMode: OutputModeType,
params: Map[String, String]
) extends PipelineStage {
override def execute()(implicit spark: SparkSession, logger: ai.tripl.arc.util.log.logger.Logger, arcContext: ARCContext): Option[DataFrame] = {
ElasticsearchLoadStage.execute(this)
}
}
object ElasticsearchLoadStage {
def execute(stage: ElasticsearchLoadStage)(implicit spark: SparkSession, logger: ai.tripl.arc.util.log.logger.Logger, arcContext: ARCContext): Option[DataFrame] = {
val df = spark.table(stage.inputView)
if (!df.isStreaming) {
stage.numPartitions match {
case Some(partitions) => stage.stageDetail.put("numPartitions", Integer.valueOf(partitions))
case None => stage.stageDetail.put("numPartitions", Integer.valueOf(df.rdd.getNumPartitions))
}
}
val dropMap = new java.util.HashMap[String, Object]()
// elasticsearch cannot support a column called _index
val unsupported = df.schema.filter(_.name == "_index").map(_.name)
if (!unsupported.isEmpty) {
dropMap.put("Unsupported", unsupported.asJava)
}
stage.stageDetail.put("drop", dropMap)
val nonNullDF = df.drop(unsupported:_*)
val listener = ListenerUtils.addStageCompletedListener(stage.stageDetail)
// Elasticsearch will convert date and times to epoch milliseconds
val outputDF = try {
if (arcContext.isStreaming) {
nonNullDF.writeStream.options(stage.params).outputMode(stage.outputMode.sparkString).format("es").start(stage.output)
nonNullDF
} else {
stage.partitionBy match {
case Nil =>
val dfToWrite = stage.numPartitions.map(nonNullDF.repartition(_)).getOrElse(nonNullDF)
dfToWrite.write.options(stage.params).mode(stage.saveMode).format("org.elasticsearch.spark.sql").save(stage.output)
dfToWrite
case partitionBy => {
// create a column array for repartitioning
val partitionCols = partitionBy.map(col => nonNullDF(col))
stage.numPartitions match {
case Some(n) =>
val dfToWrite = nonNullDF.repartition(n, partitionCols:_*)
dfToWrite.write.options(stage.params).partitionBy(partitionBy:_*).mode(stage.saveMode).format("org.elasticsearch.spark.sql").save(stage.output)
dfToWrite
case None =>
val dfToWrite = nonNullDF.repartition(partitionCols:_*)
dfToWrite.write.options(stage.params).partitionBy(partitionBy:_*).mode(stage.saveMode).format("org.elasticsearch.spark.sql").save(stage.output)
dfToWrite
}
}
}
}
} catch {
case e: Exception => throw new Exception(e) with DetailException {
override val detail = stage.stageDetail
}
}
spark.sparkContext.removeSparkListener(listener)
Option(outputDF)
}
}
|
tripl-ai/arc-elasticsearch-pipeline-plugin
|
project/Dependencies.scala
|
import sbt._
object Dependencies {
// versions
lazy val sparkVersion = "2.4.5"
// testing
val scalaTest = "org.scalatest" %% "scalatest" % "3.0.7" % "test,it"
// arc
val arc = "ai.tripl" %% "arc" % "2.8.0" % "provided"
val typesafeConfig = "com.typesafe" % "config" % "1.3.1" intransitive()
// spark
val sparkSql = "org.apache.spark" %% "spark-sql" % sparkVersion % "provided"
val sparkHive = "org.apache.spark" %% "spark-hive" % sparkVersion % "provided"
// elasticsearch
val elasticsearch = "org.elasticsearch" % "elasticsearch-hadoop" % "7.6.1"
// Project
val etlDeps = Seq(
scalaTest,
arc,
typesafeConfig,
sparkSql,
sparkHive,
elasticsearch
)
}
|
AlexandraGYG/snowplow
|
3-enrich/stream-enrich/integration-tests/src/test/scala/com.snowplowanalytics.snowplow.enrich.stream/BeforeAfterAll.scala
|
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.stream
import org.specs2.mutable.SpecificationLike
import org.specs2.specification.Fragments
import org.specs2.specification.Step
/**
* The content of `beforeAll` is executed before a spec and the content of `afterAll` is executed
* once the spec is done.
* TODO: To remove once specs2 has been updated.
*/
trait BeforeAfterAll extends SpecificationLike {
override def map(fragments: => Fragments) =
Step(beforeAll) ^ fragments ^ Step(afterAll)
def beforeAll(): Unit
def afterAll(): Unit
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/SchemaEnrichmentTest.scala
|
<reponame>AlexandraGYG/snowplow
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich
package common
package enrichments
// Iglu
import com.snowplowanalytics.iglu.client.SchemaKey
// Common
import outputs.EnrichedEvent
import enrichments.SchemaEnrichment._
// Specs2
import org.specs2.Specification
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
// Scalaz
import scalaz._
import Scalaz._
class SchemaEnrichmentTest extends Specification with DataTables with ValidationMatchers {
implicit val resolver = SpecHelpers.IgluResolver
val signupFormSubmitted =
"""{"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0","data":{"schema":"iglu:com.snowplowanalytics.snowplow-website/signup_form_submitted/jsonschema/1-0-0","data":{"name":"Χαριτίνη NEW Unicode test","email":"<EMAIL>","company":"SP","eventsPerMonth":"< 1 million","serviceType":"unsure"}}}"""
val invalidPayload =
"""{"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0","data":{"schema":"iglu:com.snowplowanalytics.snowplow-website/signup_form_submitted/jsonschema/1-0-0","data":{"serviceType":"unsure"}}}"""
def is = s2"""
Extracting SchemaKeys from valid events should work $e1
Invalid events should fail when extracting SchemaKeys $e2
"""
def e1 =
"SPEC NAME" || "EVENT" | "EXPECTED SCHEMA" |
"page view" !! event("page_view") ! SchemaKey("com.snowplowanalytics.snowplow",
"page_view",
"jsonschema",
"1-0-0") |
"ping ping" !! event("page_ping") ! SchemaKey("com.snowplowanalytics.snowplow",
"page_ping",
"jsonschema",
"1-0-0") |
"transaction" !! event("transaction") ! SchemaKey("com.snowplowanalytics.snowplow",
"transaction",
"jsonschema",
"1-0-0") |
"transaction item" !! event("transaction_item") ! SchemaKey("com.snowplowanalytics.snowplow",
"transaction_item",
"jsonschema",
"1-0-0") |
"struct event" !! event("struct") ! SchemaKey("com.google.analytics", "event", "jsonschema", "1-0-0") |
"invalid unstruct event" !! unstructEvent(invalidPayload) ! SchemaKey("com.snowplowanalytics.snowplow-website",
"signup_form_submitted",
"jsonschema",
"1-0-0") |
"unstruct event" !! unstructEvent(signupFormSubmitted) ! SchemaKey("com.snowplowanalytics.snowplow-website",
"signup_form_submitted",
"jsonschema",
"1-0-0") |> { (_, event, expected) =>
{
val schema = SchemaEnrichment.extractSchema(event)
schema must beSuccessful(expected)
}
}
val nonSchemedPayload =
"""{"name":"Χαριτίνη NEW Unicode test","email":"<EMAIL>","company":"SP","eventsPerMonth":"< 1 million","serviceType":"unsure"}"""
val invalidKeyPayload =
"""{"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0","data":{"schema":"iglu:com.snowplowanalytics.snowplow-website/signup_form_submitted/jsonschema","data":{"name":"<NAME>","email":"<EMAIL>","company":"SP","eventsPerMonth":"< 1 million","serviceType":"unsure"}}}"""
def e2 =
"SPEC NAME" || "EVENT" |
"unknown event" !! event("unknown") |
"missing event" !! event(null) |
"not schemed" !! unstructEvent(nonSchemedPayload) |
"invalid key" !! unstructEvent(invalidKeyPayload) |> { (_, event) =>
{
val schema = SchemaEnrichment.extractSchema(event)
schema must beFailing
}
}
def event(eventType: String) = {
val event: EnrichedEvent = new EnrichedEvent()
event.setEvent(eventType)
event
}
def unstructEvent(unstruct: String) = {
val event: EnrichedEvent = new EnrichedEvent()
event.setEvent("unstruct")
event.setUnstruct_event(unstruct)
event
}
}
|
AlexandraGYG/snowplow
|
3-enrich/beam-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.beam/SpecHelpers.scala
|
<reponame>AlexandraGYG/snowplow<gh_stars>1-10
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package beam
import scalaz._
import Scalaz._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import common.enrichments.EnrichmentRegistry
import common.utils.JsonUtils
import iglu.client.Resolver
object SpecHelpers {
val resolverConfig = """
{
"schema": "iglu:com.snowplowanalytics.iglu/resolver-config/jsonschema/1-0-2",
"data": {
"cacheSize": 500,
"repositories": [
{
"name": "Iglu Central",
"priority": 0,
"vendorPrefixes": [ "com.snowplowanalytics" ],
"connection": { "http": { "uri": "http://iglucentral.com" } }
}
]
}
}
"""
implicit val resolver = (for {
json <- JsonUtils.extractJson("", resolverConfig)
resolver <- Resolver.parse(json).leftMap(_.toString)
} yield resolver).fold(
e => throw new RuntimeException(e),
r => r
)
val enrichmentConfig = """
{
"schema": "iglu:com.snowplowanalytics.snowplow/anon_ip/jsonschema/1-0-0",
"data": {
"name": "anon_ip",
"vendor": "com.snowplowanalytics.snowplow",
"enabled": true,
"parameters": { "anonOctets": 1 }
}
}
"""
val ipLookupsEnrichmentConfig = """
{
"schema": "iglu:com.snowplowanalytics.snowplow/ip_lookups/jsonschema/2-0-0",
"data": {
"name": "ip_lookups",
"vendor": "com.snowplowanalytics.snowplow",
"enabled": true,
"parameters": {
"geo": {
"database": "GeoLite2-City.mmdb",
"uri": "http://acme.com"
}
}
}
}
"""
val enrichmentRegistry = (for {
combinedJson <-
(("schema" -> "iglu:com.snowplowanalytics.snowplow/enrichments/jsonschema/1-0-0") ~
("data" -> List(parse(enrichmentConfig)))).success
registry <- EnrichmentRegistry.parse(combinedJson, false).leftMap(_.toList.mkString("\n"))
} yield registry).fold(
e => throw new RuntimeException(e),
r => r
)
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/registry/OlarkAdapter.scala
|
/*
* Copyright (c) 2016-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package adapters
package registry
// Java
import java.net.URI
import org.apache.http.client.utils.URLEncodedUtils
import org.joda.time.DateTime
// Scala
import scala.util.matching.Regex
import scala.util.control.NonFatal
import scala.collection.JavaConversions._
import scala.util.{Try, Success => TS, Failure => TF}
// Scalaz
import scalaz._
import Scalaz._
// Jackson
import com.fasterxml.jackson.core.JsonParseException
// json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
// Iglu
import iglu.client.{Resolver, SchemaKey}
// This project
import loaders.CollectorPayload
import utils.{JsonUtils => JU}
/**
* Transforms a collector payload which conforms to
* a known version of the Olark Tracking webhook
* into raw events.
*/
object OlarkAdapter extends Adapter {
// Vendor name for Failure Message
private val VendorName = "Olark"
// Tracker version for an Olark Tracking webhook
private val TrackerVersion = "com.olark-v1"
// Expected content type for a request body
private val ContentType = "application/x-www-form-urlencoded"
// Schemas for reverse-engineering a Snowplow unstructured event
private val EventSchemaMap = Map(
"transcript" -> SchemaKey("com.olark", "transcript", "jsonschema", "1-0-0").toSchemaUri,
"offline_message" -> SchemaKey("com.olark", "offline_message", "jsonschema", "1-0-0").toSchemaUri
)
/**
* Converts a CollectorPayload instance into raw events.
*
* An Olark Tracking payload contains one single event
* in the body of the payload, stored within a HTTP encoded
* string.
*
* @param payload The CollectorPayload containing one or more
* raw events as collected by a Snowplow collector
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation. Not used
* @return a Validation boxing either a NEL of RawEvents on
* Success, or a NEL of Failure Strings
*/
def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents =
(payload.body, payload.contentType) match {
case (None, _) => s"Request body is empty: no ${VendorName} events to process".failureNel
case (_, None) =>
s"Request body provided but content type empty, expected ${ContentType} for ${VendorName}".failureNel
case (_, Some(ct)) if ct != ContentType =>
s"Content type of ${ct} provided, expected ${ContentType} for ${VendorName}".failureNel
case (Some(body), _) if (body.isEmpty) => s"${VendorName} event body is empty: nothing to process".failureNel
case (Some(body), _) => {
val qsParams = toMap(payload.querystring)
Try { toMap(URLEncodedUtils.parse(URI.create("http://localhost/?" + body), "UTF-8").toList) } match {
case TF(e) => s"${VendorName} could not parse body: [${JU.stripInstanceEtc(e.getMessage).orNull}]".failureNel
case TS(bodyMap) =>
payloadBodyToEvent(bodyMap).flatMap {
case event => {
val eventType = (event \ "operators") match {
case (JNothing) => Some("offline_message")
case (_) => Some("transcript")
}
lookupSchema(eventType, VendorName, EventSchemaMap).flatMap {
case schema =>
transformTimestamps(event).flatMap {
case transformedEvent =>
NonEmptyList(
RawEvent(
api = payload.api,
parameters = toUnstructEventParams(TrackerVersion,
qsParams,
schema,
camelize(transformedEvent),
"srv"),
contentType = payload.contentType,
source = payload.source,
context = payload.context
)).success
}
}
}
}
}
}
}
/**
* Converts all olark timestamps in a parsed transcript or offline_message json object to iso8601 strings
*
* @param json a parsed event
* @return JObject the event with timstamps replaced
*/
private def transformTimestamps(json: JValue): Validated[JValue] = {
def toMsec(oTs: String): Long =
(oTs.split('.') match {
case Array(sec) => s"${sec}000"
case Array(sec, msec) => s"${sec}${msec.take(3).padTo(3, '0')}"
}).toLong
Try {
json.transformField {
case JField("items", jArray) =>
("items", jArray.transform {
case jo: JObject =>
jo.transformField {
case JField("timestamp", JString(value)) =>
("timestamp", JString(JsonSchemaDateTimeFormat.print(new DateTime(toMsec(value)))))
}
})
}.successNel
} match {
case TF(e) =>
s"${VendorName} could not convert timestamps: [${JU.stripInstanceEtc(e.getMessage).orNull}]".failureNel
case TS(s) => s
}
}
/**
* Converts a querystring payload into an event
* @param bodyMap The converted map from the querystring
*/
private def payloadBodyToEvent(bodyMap: Map[String, String]): Validated[JObject] =
bodyMap.get("data") match {
case None => s"${VendorName} event data does not have 'data' as a key".failureNel
case Some("") => s"${VendorName} event data is empty: nothing to process".failureNel
case Some(json) => {
try {
val event = parse(json)
event match {
case obj: JObject => obj.successNel
case _ => s"${VendorName} event wrong type: [%s]".format(event.getClass).failureNel
}
} catch {
case e: JsonParseException =>
s"${VendorName} event string failed to parse into JSON: [${JU.stripInstanceEtc(e.getMessage).orNull}]".failureNel
case NonFatal(e) =>
s"${VendorName} incorrect event string : [${JU.stripInstanceEtc(e.getMessage).orNull}]".failureNel
}
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/spark-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.spark/SparkSpec.scala
|
<reponame>AlexandraGYG/snowplow<gh_stars>1-10
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.spark
// Spark
import org.apache.spark.SparkConf
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.SparkSession
/**
* Trait to mix in in every spec which has to run a Spark job.
* Create a spark session before the spec and delete it afterwards.
*/
trait SparkSpec extends BeforeAfterAll {
def appName: String
// local[1] means the tests will run locally on one thread
val conf = new SparkConf()
.setMaster("local[1]")
.setAppName(appName)
.set("spark.serializer", classOf[KryoSerializer].getName())
.set("spark.kryo.registrationRequired", "true")
.registerKryoClasses(EnrichJob.classesToRegister)
var spark: SparkSession =
SparkSession
.builder()
.config(conf)
.getOrCreate()
val hadoopConfig = spark.sparkContext.hadoopConfiguration
hadoopConfig.set("io.compression.codecs", classOf[com.hadoop.compression.lzo.LzopCodec].getName())
hadoopConfig.set(
"io.compression.codec.lzo.class",
classOf[com.hadoop.compression.lzo.LzoCodec].getName())
override def beforeAll(): Unit =
SparkSession
.builder()
.config(conf)
.getOrCreate()
override def afterAll(): Unit = {
if (spark != null) spark.stop()
spark = null
}
}
|
AlexandraGYG/snowplow
|
3-enrich/spark-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich.spark/EnrichJobConfig.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
package com.snowplowanalytics
package snowplow.enrich
package spark
// Java
import java.net.URI
// Joda
import org.joda.time.DateTime
// Scalaz
import scalaz._
import Scalaz._
// Scopt
import scopt._
// Snowplow
import common.ValidatedNelMessage
import common.enrichments.EnrichmentRegistry
import common.loaders.Loader
import iglu.client.validation.ProcessingMessageMethods._
sealed trait EnrichJobConfig {
def inFolder: String
def inFormat: String
def outFolder: String
def badFolder: String
def enrichments: String
def igluConfig: String
def local: Boolean
}
private case class RawEnrichJobConfig(
override val inFolder: String = "",
override val inFormat: String = "",
override val outFolder: String = "",
override val badFolder: String = "",
override val enrichments: String = "",
override val igluConfig: String = "",
override val local: Boolean = false,
etlTstamp: Long = 0L
) extends EnrichJobConfig
/**
* Case class representing the configuration for the enrich job.
* @param inFolder Folder where the input events are located
* @param inFormat Collector format in which the data is coming in
* @param outFolder Output folder where the enriched events will be stored
* @param badFolder Output folder where the malformed events will be stored
* @param enrichments JSON representing the enrichments that need performing
* @param igluConfig JSON representing the Iglu configuration
* @param local Whether to build a registry from local data
* @param etlTstamp Timestamp at which the job was launched
*/
case class ParsedEnrichJobConfig(
override val inFolder: String,
override val inFormat: String,
override val outFolder: String,
override val badFolder: String,
override val enrichments: String,
override val igluConfig: String,
override val local: Boolean,
etlTstamp: DateTime,
filesToCache: List[(URI, String)]
) extends EnrichJobConfig
object EnrichJobConfig {
private val parser = new scopt.OptionParser[RawEnrichJobConfig]("EnrichJob") {
head("EnrichJob")
opt[String]("input-folder")
.required()
.valueName("<input folder>")
.action((f, c) => c.copy(inFolder = f))
.text("Folder where the input events are located")
opt[String]("input-format")
.required()
.valueName("<input format>")
.action((f, c) => c.copy(inFormat = f))
.text("The format in which the collector is saving data")
opt[String]("output-folder")
.required()
.valueName("<output folder>")
.action((f, c) => c.copy(outFolder = f))
.text("Output folder where the enriched events will be stored")
opt[String]("bad-folder")
.required()
.valueName("<bad folder>")
.action((f, c) => c.copy(badFolder = f))
.text("Output folder where the malformed events will be stored")
opt[String]("enrichments")
.required()
.valueName("<enrichments>")
.action((e, c) => c.copy(enrichments = e))
.text("Directory where the JSONs describing the enrichments are stored")
opt[String]("iglu-config")
.required()
.valueName("<iglu config>")
.action((i, c) => c.copy(igluConfig = i))
.text("Iglu resolver configuration")
opt[Long]("etl-timestamp")
.required()
.valueName("<ETL timestamp>")
.action((t, c) => c.copy(etlTstamp = t))
.text("Timestamp at which the job was launched, in milliseconds")
opt[Unit]("local")
.hidden()
.action((_, c) => c.copy(local = true))
.text("Whether to build a local enrichment registry")
help("help").text("Prints this usage text")
}
/** Turn a RawEnrichJobConfig into a ParsedEnrichJobConfig */
private def transform(
c: RawEnrichJobConfig
): ValidatedNelMessage[ParsedEnrichJobConfig] = {
// We try to build all the components early to detect failures before starting the job
import singleton._
val resolver = ResolverSingleton.getIgluResolver(c.igluConfig)
val registry = resolver
.flatMap(RegistrySingleton.getEnrichmentRegistry(c.enrichments, c.local)(_))
val loader = Loader
.getLoader(c.inFormat)
.fold(_.toProcessingMessage.failureNel, _.successNel)
(resolver |@| registry |@| loader) { (_, reg, _) =>
ParsedEnrichJobConfig(
c.inFolder,
c.inFormat,
c.outFolder,
c.badFolder,
c.enrichments,
c.igluConfig,
c.local,
new DateTime(c.etlTstamp),
filesToCache(reg))
}
}
/**
* Load a EnrichJobConfig from command line arguments.
* @param args The command line arguments
* @return The job config or one or more error messages boxed in a Scalaz ValidationNel
*/
def loadConfigFrom(
args: Array[String]
): ValidatedNelMessage[ParsedEnrichJobConfig] =
parser.parse(args, RawEnrichJobConfig()).map(transform) match {
case Some(c) => c
case _ => "Parsing of the configuration failed".toProcessingMessage.failureNel
}
/**
* Build the list of enrichment files to cache.
* @param registry EnrichmentRegistry used to find the files that need caching
* @return A list of URIs representing the files that need caching
*/
private def filesToCache(registry: EnrichmentRegistry): List[(URI, String)] =
registry.filesToCache
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/pii/PiiPseudonymizerEnrichment.scala
|
<reponame>AlexandraGYG/snowplow
/*
* Copyright (c) 2017-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow.enrich
package common.enrichments.registry
package pii
// Scala
import scala.collection.JavaConverters._
import scala.collection.mutable.MutableList
// Scala libraries
import org.json4s
import org.json4s.{DefaultFormats, Diff, JValue}
import org.json4s.JsonAST._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods.{compact, parse, render}
import org.json4s.jackson.Serialization.write
import org.json4s.Extraction.decompose
// Java
import org.apache.commons.codec.digest.DigestUtils
// Java libraries
import com.fasterxml.jackson.databind.JsonNode
import com.fasterxml.jackson.databind.node.{ArrayNode, ObjectNode, TextNode}
import com.jayway.jsonpath.{Configuration, JsonPath => JJsonPath}
import com.jayway.jsonpath.MapFunction
// Scalaz
import scalaz._
import Scalaz._
// Iglu
import iglu.client.validation.ProcessingMessageMethods._
import iglu.client.{SchemaCriterion, SchemaKey}
// This project
import common.ValidatedNelMessage
import common.utils.ScalazJson4sUtils.{extract, fieldExists}
import common.outputs.EnrichedEvent
/**
* Companion object. Lets us create a PiiPseudonymizerEnrichment
* from a JValue.
*/
object PiiPseudonymizerEnrichment extends ParseableEnrichment {
implicit val formats = DefaultFormats + new PiiStrategyPseudonymizeSerializer
override val supportedSchema =
SchemaCriterion("com.snowplowanalytics.snowplow.enrichments", "pii_enrichment_config", "jsonschema", 2, 0, 0)
def parse(config: JValue, schemaKey: SchemaKey): ValidatedNelMessage[PiiPseudonymizerEnrichment] = {
for {
conf <- matchesSchema(config, schemaKey)
emitIdentificationEvent = extract[Boolean](conf, "emitEvent").toOption
.getOrElse(false)
piiFields <- extract[List[JObject]](conf, "parameters", "pii").leftMap(_.getMessage)
piiStrategy <- extractStrategy(config)
piiFieldList <- extractFields(piiFields)
} yield PiiPseudonymizerEnrichment(piiFieldList, emitIdentificationEvent, piiStrategy)
}.leftMap(_.toProcessingMessageNel)
private[pii] def getHashFunction(strategyFunction: String): Validation[String, DigestFunction] =
strategyFunction match {
case "MD2" => { DigestUtils.md2Hex(_: Array[Byte]) }.success
case "MD5" => { DigestUtils.md5Hex(_: Array[Byte]) }.success
case "SHA-1" => { DigestUtils.sha1Hex(_: Array[Byte]) }.success
case "SHA-256" => { DigestUtils.sha256Hex(_: Array[Byte]) }.success
case "SHA-384" => { DigestUtils.sha384Hex(_: Array[Byte]) }.success
case "SHA-512" => { DigestUtils.sha512Hex(_: Array[Byte]) }.success
case fName => s"Unknown function $fName".failure
}
private def extractFields(piiFields: List[JObject]): Validation[String, List[PiiField]] =
piiFields.map {
case field: JObject =>
if (fieldExists(field, "pojo"))
extractString(field, "pojo", "field").flatMap(extractPiiScalarField)
else if (fieldExists(field, "json")) extractPiiJsonField(field \ "json")
else
s"PII Configuration: pii field does not include 'pojo' nor 'json' fields. Got: [${compact(field)}]"
.failure[PiiField]
case json => s"PII Configuration: pii field does not contain an object. Got: [${compact(json)}]".failure[PiiField]
}.sequenceU
private def extractPiiScalarField(fieldName: String): Validation[String, PiiScalar] =
ScalarMutators
.get(fieldName)
.map(PiiScalar(_).success)
.getOrElse(s"The specified pojo field $fieldName is not supported".failure)
private def extractPiiJsonField(jsonField: JValue): Validation[String, PiiJson] = {
val schemaCriterion = extractString(jsonField, "schemaCriterion")
.flatMap(sc => SchemaCriterion.parse(sc).leftMap(_.getMessage))
.toValidationNel
val jsonPath = extractString(jsonField, "jsonPath").toValidationNel
val mutator = extractString(jsonField, "field")
.flatMap(getJsonMutator)
.toValidationNel
val validatedNel = (mutator |@| schemaCriterion |@| jsonPath)(PiiJson.apply)
validatedNel.leftMap(x => s"Unable to extract PII JSON: ${x.list.mkString(",")}")
}
private def getJsonMutator(fieldName: String): Validation[String, Mutator] =
JsonMutators
.get(fieldName)
.map(_.success)
.getOrElse(s"The specified json field $fieldName is not supported".failure)
private def extractString(jValue: JValue, field: String, tail: String*): Validation[String, String] =
extract[String](jValue, field, tail: _*).leftMap(_.getMessage)
private def extractStrategy(config: JValue): Validation[String, PiiStrategyPseudonymize] =
extract[PiiStrategyPseudonymize](config, "parameters", "strategy")
.leftMap(_.getMessage)
private def matchesSchema(config: JValue, schemaKey: SchemaKey): Validation[String, JValue] =
if (supportedSchema.matches(schemaKey))
config.success
else
s"Schema key $schemaKey is not supported. A '${supportedSchema.name}' enrichment must have schema '$supportedSchema'.".failure
}
/**
* Implements a pseudonymization strategy using any algorithm known to DigestFunction
* @param functionName string representation of the function
* @param hashFunction the DigestFunction to apply
* @param salt salt added to the plain string before hashing
*/
final case class PiiStrategyPseudonymize(functionName: String, hashFunction: DigestFunction, salt: String)
extends PiiStrategy {
val TextEncoding = "UTF-8"
override def scramble(clearText: String): String = hash(clearText + salt)
def hash(text: String): String = hashFunction(text.getBytes(TextEncoding))
}
/**
* The PiiPseudonymizerEnrichment runs after all other enrichments to find fields that are configured as PII (personally
* identifiable information) and apply some anonymization (currently only pseudonymization) on them. Currently a single
* strategy for all the fields is supported due to the configuration format, and there is only one implemented strategy,
* however the enrichment supports a strategy per field.
*
* The user may specify two types of fields in the config `pojo` or `json`. A `pojo` field is effectively a scalar field in the
* EnrichedEvent, whereas a `json` is a "context" formatted field and it can either contain a single value in the case of
* unstruct_event, or an array in the case of derived_events and contexts.
*
* @param fieldList a list of configured PiiFields
* @param emitIdentificationEvent whether to emit an identification event
* @param strategy the pseudonymization strategy to use
*/
case class PiiPseudonymizerEnrichment(fieldList: List[PiiField],
emitIdentificationEvent: Boolean,
strategy: PiiStrategy)
extends Enrichment {
implicit val json4sFormats = DefaultFormats +
new PiiModifiedFieldsSerializer +
new PiiStrategyPseudonymizeSerializer
private val UnstructEventSchema =
SchemaKey("com.snowplowanalytics.snowplow", "unstruct_event", "jsonschema", "1-0-0").toSchemaUri
def transformer(event: EnrichedEvent): Unit = {
val modifiedFields: ModifiedFields = fieldList.flatMap(_.transform(event, strategy))
event.pii =
if (emitIdentificationEvent && modifiedFields.nonEmpty)
write(
("schema" -> UnstructEventSchema) ~ ("data" -> decompose(PiiModifiedFields(modifiedFields, strategy)))
)
else null
}
}
/**
* Specifies a scalar field in POJO and the strategy that should be applied to it.
* @param fieldMutator the field mutator where the strategy will be applied
*/
final case class PiiScalar(fieldMutator: Mutator) extends PiiField {
override def applyStrategy(fieldValue: String, strategy: PiiStrategy): (String, ModifiedFields) =
if (fieldValue != null) {
val modifiedValue = strategy.scramble(fieldValue)
(modifiedValue, List(ScalarModifiedField(fieldMutator.fieldName, fieldValue, modifiedValue)))
} else (null, List())
}
/**
* Specifies a strategy to use, a field mutator where the JSON can be found in the EnrichedEvent POJO, a schema criterion to
* discriminate which contexts to apply this strategy to, and a JSON path within the contexts where this strategy will
* be applied (the path may correspond to multiple fields).
*
* @param fieldMutator the field mutator for the JSON field
* @param schemaCriterion the schema for which the strategy will be applied
* @param jsonPath the path where the strategy will be applied
*/
final case class PiiJson(fieldMutator: Mutator, schemaCriterion: SchemaCriterion, jsonPath: String) extends PiiField {
implicit val json4sFormats = DefaultFormats
override def applyStrategy(fieldValue: String, strategy: PiiStrategy): (String, ModifiedFields) =
if (fieldValue != null) {
val (parsedAndSubistuted: JValue, modifiedFields: List[JsonModifiedField]) = parse(fieldValue) match {
case JObject(jObject) => {
val jObjectMap: Map[String, JValue] = jObject.toMap
val contextMapped: Map[String, (JValue, List[JsonModifiedField])] =
jObjectMap.map(mapContextTopFields(_, strategy))
(JObject(contextMapped.mapValues(_._1).toList), contextMapped.values.map(_._2).flatten)
}
case x => (x, List.empty[JsonModifiedField])
}
val compacted = compact(render(parsedAndSubistuted))
(compacted, modifiedFields)
} else (null, List.empty[JsonModifiedField])
/**
* Map context top fields with strategy if they match.
*/
private def mapContextTopFields(tuple: (String, json4s.JValue),
strategy: PiiStrategy): (String, (JValue, List[JsonModifiedField])) = tuple match {
case (k: String, contexts: JValue) if k == "data" =>
(k, contexts match {
case JArray(contexts) =>
val updatedAndModified: List[(JValue, List[JsonModifiedField])] =
contexts.map(getModifiedContext(_, strategy))
(JArray(updatedAndModified.map(_._1)), updatedAndModified.map(_._2).flatten)
case x => getModifiedContext(x, strategy)
})
case (k: String, x: JValue) => (k, (x, List.empty[JsonModifiedField]))
}
/**
* Returns a modified context or unstruct event along with a list of modified fields.
*/
private def getModifiedContext(jv: JValue, strategy: PiiStrategy): (JValue, List[JsonModifiedField]) = jv match {
case JObject(context) => modifyObjectIfSchemaMatches(context, strategy)
case x => (x, List.empty[JsonModifiedField])
}
/**
* Tests whether the schema for this event matches the schema criterion and if it does modifies it.
*/
private def modifyObjectIfSchemaMatches(context: List[(String, json4s.JValue)],
strategy: PiiStrategy): (JObject, List[JsonModifiedField]) = {
val fieldsObj = context.toMap
(for {
schema <- fieldsObj.get("schema")
schemaStr <- schema.extractOpt[String]
parsedSchemaMatches <- SchemaKey.parse(schemaStr).map(schemaCriterion.matches).toOption
data <- fieldsObj.get("data")
if parsedSchemaMatches
updated = jsonPathReplace(data, strategy, schemaStr)
} yield (JObject(fieldsObj.updated("schema", schema).updated("data", updated._1).toList), updated._2))
.getOrElse((JObject(context), List()))
}
/**
* Replaces a value in the given context data with the result of applying the strategy that value.
*/
private def jsonPathReplace(jValue: JValue,
strategy: PiiStrategy,
schema: String): (JValue, List[JsonModifiedField]) = {
val objectNode = JsonMethods.mapper.valueToTree[ObjectNode](jValue)
val documentContext = JJsonPath.using(JsonPathConf).parse(objectNode)
val modifiedFields = MutableList[JsonModifiedField]()
val documentContext2 = documentContext.map(
jsonPath,
new ScrambleMapFunction(strategy, modifiedFields, fieldMutator.fieldName, jsonPath, schema))
// make sure it is a structure preserving method, see #3636
val transformedJValue = JsonMethods.fromJsonNode(documentContext.json[JsonNode]())
val Diff(_, erroneouslyAdded, _) = jValue diff transformedJValue
val Diff(_, withoutCruft, _) = erroneouslyAdded diff transformedJValue
(withoutCruft, modifiedFields.toList)
}
}
private final class ScrambleMapFunction(strategy: PiiStrategy,
modifiedFields: MutableList[JsonModifiedField],
fieldName: String,
jsonPath: String,
schema: String)
extends MapFunction {
override def map(currentValue: AnyRef, configuration: Configuration): AnyRef = currentValue match {
case s: String =>
val newValue = strategy.scramble(s)
val _ = modifiedFields += JsonModifiedField(fieldName, s, newValue, jsonPath, schema)
newValue
case a: ArrayNode =>
a.elements.asScala.map {
case t: TextNode =>
val originalValue = t.asText()
val newValue = strategy.scramble(originalValue)
modifiedFields += JsonModifiedField(fieldName, originalValue, newValue, jsonPath, schema)
newValue
case default: AnyRef => default
}
case default: AnyRef => default
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/utils/HttpClient.scala
|
<filename>3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/utils/HttpClient.scala
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich
package common
package utils
import scala.util.control.NonFatal
// Scalaz
import scalaz._
import Scalaz._
// Scalaj
import scalaj.http._
object HttpClient {
// The defaults are from scalaj library
val DEFAULT_CONNECTION_TIMEOUT_MS = 1000
val DEFAULT_READ_TIMEOUT_MS = 5000
/**
* Blocking method to get body of HTTP response
*
* @param request assembled request object
* @return validated body of HTTP request
*/
def getBody(request: HttpRequest): Validation[Throwable, String] =
try {
val res = request.asString
if (res.isSuccess) res.body.success
else new Exception(s"Request failed with status ${res.code} and body ${res.body}").failure
} catch {
case NonFatal(e) => e.failure
}
/**
* Build HTTP request object
*
* @param uri full URI to request
* @param authUser optional username for basic auth
* @param authPassword optional password for basic auth
* @param body optional request body
* @param method HTTP method
* @param connectionTimeout connection timeout, if not set default is 1000ms
* @param readTimeout read timeout, if not set default is 5000ms
* @return HTTP request
*/
def buildRequest(
uri: String,
authUser: Option[String],
authPassword: Option[String],
body: Option[String],
method: String = "GET",
connectionTimeout: Option[Long],
readTimeout: Option[Long]
): HttpRequest = {
val req: HttpRequest = Http(uri).method(method).maybeTimeout(connectionTimeout, readTimeout)
req.maybeAuth(authUser, authPassword).maybePostData(body)
}
implicit class RichHttpRequest(request: HttpRequest) {
def maybeAuth(user: Option[String], password: Option[String]): HttpRequest =
if (user.isDefined || password.isDefined) request.auth(user.getOrElse(""), password.getOrElse(""))
else request
def maybeTimeout(connectionTimeout: Option[Long], readTimeout: Option[Long]): HttpRequest =
(connectionTimeout, readTimeout) match {
case (Some(ct), Some(rt)) => request.timeout(ct.toInt, rt.toInt)
case (Some(ct), None) => request.timeout(ct.toInt, DEFAULT_READ_TIMEOUT_MS)
case (None, Some(rt)) => request.timeout(DEFAULT_CONNECTION_TIMEOUT_MS, rt.toInt)
case _ => request.timeout(DEFAULT_CONNECTION_TIMEOUT_MS, DEFAULT_READ_TIMEOUT_MS)
}
def maybePostData(body: Option[String]): HttpRequest =
body.map(data => request.postData(data).header("content-type", "application/json")).getOrElse(request)
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/web/ParseCrossDomainSpec.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common.enrichments.web
// Java
import java.net.URI
// Specs2 & Scalaz-Specs2
import org.specs2.Specification
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
// Scalaz
import scalaz._
import Scalaz._
class ParseCrossDomainSpec extends Specification with DataTables with ValidationMatchers {
def is = s2"""
This is a specification to test the parseCrossDomain function
parseCrossDomain should return None when the querystring contains no _sp parameter $e1
parseCrossDomain should return a failure when the _sp timestamp is unparseable $e2
parseCrossDomain should successfully extract the domain user ID when available $e3
parseCrossDomain should successfully extract the domain user ID and timestamp when available $e4
parseCrossDomain should extract neither field from an empty _sp parameter $e5
"""
def e1 =
PageEnrichments.parseCrossDomain(Map()) must beSuccessful((None, None))
def e2 = {
val expected = "Field [sp_dtm]: [not-a-timestamp] is not in the expected format (ms since epoch)"
PageEnrichments.parseCrossDomain(Map("_sp" -> "abc.not-a-timestamp")) must beFailing(expected)
}
def e3 =
PageEnrichments.parseCrossDomain(Map("_sp" -> "abc")) must beSuccessful(("abc".some, None))
def e4 =
PageEnrichments.parseCrossDomain(Map("_sp" -> "abc.1426245561368")) must beSuccessful(
("abc".some, "2015-03-13 11:19:21.368".some))
def e5 =
PageEnrichments.parseCrossDomain(Map("_sp" -> "")) must beSuccessful(None -> None)
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/adapters/registry/RemoteAdapterSpec.scala
|
/*
* Copyright (c) 2019-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package adapters
package registry
import java.io.InputStream
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import com.snowplowanalytics.snowplow.enrich.common.loaders.{
CollectorApi,
CollectorContext,
CollectorPayload,
CollectorSource
}
import com.sun.net.httpserver.{HttpExchange, HttpHandler, HttpServer}
import org.joda.time.DateTime
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JArray
import org.json4s.jackson.JsonMethods.parse
import org.json4s.JsonDSL._
import org.json4s.jackson.Serialization.write
import org.specs2.Specification
import org.specs2.scalaz.ValidationMatchers
import org.specs2.specification.BeforeAfter
import scalaz.Scalaz._
import scalaz.{Failure, NonEmptyList, Success}
import scala.concurrent.duration.Duration
import scala.util.control.NonFatal
class RemoteAdapterSpec extends Specification with ValidationMatchers {
override def is =
sequential ^
s2"""
This is a specification to test the RemoteAdapter functionality.
RemoteAdapter must return any events parsed by this local test adapter ${testWrapperLocal(e1)}
This local enricher (well, any remote enricher) must treat an empty list as an error ${testWrapperLocal(e2)}
RemoteAdapter must also return any other errors issued by this local adapter ${testWrapperLocal(e3)}
HTTP response contains string that is not a correct JSON, should fail ${testWrapperLocal(e4)}
HTTP response contains empty string, should fail ${testWrapperLocal(e5)}
HTTP response contains well-formatted JSON but without events and error, will fail ${testWrapperLocal(e6)}
HTTP response contains well-formatted JSON, events not List[Map[String, String]], fail ${testWrapperLocal(e7)}
HTTP response contains well-formatted JSON, events that contains an empty list, will fail ${testWrapperLocal(e8)}
HTTP response contains status code other than 200, fail ${testWrapperLocal(e9)}
"""
implicit val resolver = SpecHelpers.IgluResolver
val actionTimeout = Duration(5, TimeUnit.SECONDS)
val mockTracker = "testTracker-v0.1"
val mockPlatform = "srv"
val mockSchemaKey = "moodReport"
val mockSchemaVendor = "org.remoteEnricherTest"
val mockSchemaName = "moodChange"
val mockSchemaFormat = "jsonschema"
val mockSchemaVersion = "1-0-0"
private def localHttpServer(tcpPort: Int, basePath: String): HttpServer = {
val httpServer = HttpServer.create(new InetSocketAddress(tcpPort), 0)
httpServer.createContext(
s"/$basePath",
new HttpHandler {
def handle(exchange: HttpExchange): Unit = {
val response = MockRemoteAdapter.handle(getBodyAsString(exchange.getRequestBody))
if (response != "\"server error\"") {
exchange.sendResponseHeaders(200, 0)
} else {
exchange.sendResponseHeaders(500, 0)
}
exchange.getResponseBody.write(response.getBytes)
exchange.getResponseBody.close()
}
}
)
httpServer
}
private def getBodyAsString(body: InputStream): String = {
val s = new java.util.Scanner(body).useDelimiter("\\A")
if (s.hasNext) s.next() else ""
}
object MockRemoteAdapter {
val sampleTracker = "testTracker-v0.1"
val samplePlatform = "srv"
val sampleSchemaKey = "moodReport"
val sampleSchemaVendor = "org.remoteEnricherTest"
val sampleSchemaName = "moodChange"
implicit val formats = DefaultFormats
sealed case class Payload(
queryString: Map[String, String],
headers: List[String],
body: Option[String],
contentType: Option[String]
)
sealed case class Response(
events: List[Map[String, String]],
error: String
)
def handle(body: String) =
try {
parse(body).extract[Payload] match {
case payload: Payload =>
parse(payload.body.get) \ "mood" match {
case JArray(list) =>
val output = list.map { item =>
val json =
("schema" -> "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0") ~
("data" -> (("schema" -> s"iglu:$mockSchemaVendor/$mockSchemaName/$mockSchemaFormat/$mockSchemaVersion") ~
("data" -> item)))
Map(("tv" -> sampleTracker),
("e" -> "ue"),
("p" -> payload.queryString.getOrElse("p", samplePlatform)),
("ue_pr" -> write(json))) ++ payload.queryString
}
write(Response(output, null))
case _ => write("server error") // an example case for non 200 response
}
case anythingElse =>
write(Response(null, s"expecting a payload json but got a ${anythingElse.getClass}"))
}
} catch {
case NonFatal(e) => write(Response(null, s"aack, sampleAdapter exception $e"))
}
}
object Shared {
val api = CollectorApi("org.remoteEnricherTest", "v1")
val cljSource = CollectorSource("clj-tomcat", "UTF-8", None)
val context = CollectorContext(DateTime.parse("2013-08-29T00:18:48.000+00:00").some,
"192.168.127.12".some,
None,
None,
List("testHeader: testValue"),
None)
}
var testAdapter: RemoteAdapter = _
object testWrapperLocal extends BeforeAfter {
val mockServerPort = 8091
val mockServerPath = "myEnrichment"
var httpServer: HttpServer = _
def before = {
httpServer = localHttpServer(mockServerPort, mockServerPath)
httpServer.start()
testAdapter = new RemoteAdapter(s"http://localhost:$mockServerPort/$mockServerPath", Some(1000L), Some(5000L))
}
def after =
httpServer.stop(0)
}
def e1 = {
val eventData = List(("anonymous", -0.3), ("subscribers", 0.6))
val eventsAsJson = eventData.map(evt => s"""{"${evt._1}":${evt._2}}""")
val payloadBody = s""" {"mood": [${eventsAsJson.mkString(",")}]} """
val payload = CollectorPayload(Shared.api, Nil, None, payloadBody.some, Shared.cljSource, Shared.context)
val expected = eventsAsJson
.map(
evtJson =>
RawEvent(
Shared.api,
Map(
"tv" -> mockTracker,
"e" -> "ue",
"p" -> mockPlatform,
"ue_pr" -> s"""{"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0","data":{"schema":"iglu:$mockSchemaVendor/$mockSchemaName/$mockSchemaFormat/$mockSchemaVersion","data":$evtJson}}"""
),
None,
Shared.cljSource,
Shared.context
))
.toNel
.get
val they = testAdapter.toRawEvents(payload)
they must beSuccessful(expected)
}
def e2 = {
val emptyListPayload =
CollectorPayload(Shared.api, Nil, None, "".some, Shared.cljSource, Shared.context)
val expected = NonEmptyList("Missing payload body")
testAdapter.toRawEvents(emptyListPayload) must beFailing(expected)
}
def e3 = {
val bodylessPayload = CollectorPayload(Shared.api, Nil, None, None, Shared.cljSource, Shared.context)
val expected = NonEmptyList("Missing payload body")
testAdapter.toRawEvents(bodylessPayload) must beFailing(expected)
}
def e4 = {
val bodylessPayload = CollectorPayload(Shared.api, Nil, None, None, Shared.cljSource, Shared.context)
val invalidJsonResponse = Success("{invalid json")
val result = testAdapter.processResponse(bodylessPayload, invalidJsonResponse)
val expected = NonEmptyList(
"Json is not parsable, error: com.fasterxml.jackson.core.JsonParseException: Unexpected character ('i' (code 105)): was expecting double-quote to start field name\n at [Source: (String)\"{invalid json\"; line: 1, column: 3] - response: {invalid json")
result must beFailing(expected)
}
def e5 = {
val bodylessPayload = CollectorPayload(Shared.api, Nil, None, None, Shared.cljSource, Shared.context)
val emptyJsonResponse = Success("")
val expected = NonEmptyList("Empty response")
val result = testAdapter.processResponse(bodylessPayload, emptyJsonResponse)
result must beFailing(expected)
}
def e6 = {
val bodylessPayload = CollectorPayload(Shared.api, Nil, None, None, Shared.cljSource, Shared.context)
val unexpectedJsonResponse = Success("{\"invalid\":\"response\"}")
val result = testAdapter.processResponse(bodylessPayload, unexpectedJsonResponse)
val expected = NonEmptyList("Incompatible response, missing error and events fields")
result must beFailing(expected)
}
def e7 = {
val bodylessPayload = CollectorPayload(Shared.api, Nil, None, None, Shared.cljSource, Shared.context)
val badStrucutredJsonResponse = Success("{\"events\":\"response\"}")
val result = testAdapter.processResponse(bodylessPayload, badStrucutredJsonResponse)
val expected = NonEmptyList(
"The events field should be List[Map[String, String]], error: org.json4s.package$MappingException: Expected collection but got JString(response) for root JString(response) and mapping List[Map[String, String]] - response: {\"events\":\"response\"}")
result must beFailing(expected)
}
def e8 = {
val bodylessPayload = CollectorPayload(Shared.api, Nil, None, None, Shared.cljSource, Shared.context)
val emptyJsonResponse = Success("{\"error\":\"\", \"events\":[]}")
val result = testAdapter.processResponse(bodylessPayload, emptyJsonResponse)
val expected = NonEmptyList("Unable to parse response: {\"error\":\"\", \"events\":[]}")
result must beFailing(expected)
}
def e9 = {
val payloadBody = s""" {"non-mood": []} """
val bodylessPayload = CollectorPayload(Shared.api, Nil, None, payloadBody.some, Shared.cljSource, Shared.context)
val result = testAdapter.toRawEvents(bodylessPayload)
val expected = NonEmptyList("Request failed with status 500 and body \"server error\"")
result must beFailing(expected)
}
}
|
AlexandraGYG/snowplow
|
3-enrich/beam-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.beam/ConfigSpec.scala
|
<filename>3-enrich/beam-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.beam/ConfigSpec.scala<gh_stars>1-10
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.beam
import java.nio.file.Files
import com.spotify.scio.Args
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.scalatest._
import Matchers._
import scalaz._
import config._
import SpecHelpers._
class ConfigSpec extends FreeSpec {
"the config object should" - {
"make an EnrichConfig smart ctor available" - {
"which fails if --job-name is not present" in {
EnrichConfig(Args(Array.empty)) shouldEqual Failure(
"Missing `job-name` argument\n" +
"Missing `raw` argument\n" +
"Missing `enriched` argument\n" +
"Missing `bad` argument\n" +
"Missing `resolver` argument"
)
}
"which fails if --raw is not present" in {
EnrichConfig(Args(Array("--job-name=j"))) shouldEqual Failure(
"Missing `raw` argument\n" +
"Missing `enriched` argument\n" +
"Missing `bad` argument\n" +
"Missing `resolver` argument"
)
}
"which fails if --enriched is not present" in {
EnrichConfig(Args(Array("--job-name=j", "--raw=i"))) shouldEqual Failure(
"Missing `enriched` argument\n" +
"Missing `bad` argument\n" +
"Missing `resolver` argument"
)
}
"which fails if --bad is not present" in {
EnrichConfig(Args(Array("--job-name=j", "--raw=i", "--enriched=o"))) shouldEqual Failure(
"Missing `bad` argument\n" +
"Missing `resolver` argument"
)
}
"which fails if --resolver is not present" in {
EnrichConfig(Args(Array("--job-name=j", "--raw=i", "--enriched=o", "--bad=b"))) shouldEqual
Failure("Missing `resolver` argument")
}
"which succeeds otherwise" in {
EnrichConfig(Args(
Array("--job-name=j", "--raw=i", "--enriched=o", "--bad=b", "--resolver=r"))) shouldEqual
Success(EnrichConfig("j", "i", "o", "b", None, "r", None))
}
"which succeeds if --enrichments is present" in {
val args = Args(Array(
"--job-name=j", "--raw=i", "--enriched=o", "--bad=b", "--resolver=r", "--enrichments=e"))
EnrichConfig(args) shouldEqual Success(EnrichConfig("j", "i", "o", "b", None, "r", Some("e")))
}
"which succeeds if --pii is present" in {
val args = Args(Array(
"--job-name=j", "--raw=i", "--enriched=o", "--bad=b", "--pii=p", "--resolver=r"))
EnrichConfig(args) shouldEqual Success(EnrichConfig("j", "i", "o", "b", Some("p"), "r", None))
}
}
"make a parseResolver function available" - {
"which fails if there is no resolver file" in {
parseResolver("doesnt-exist") shouldEqual
Failure("Iglu resolver configuration file `doesnt-exist` does not exist")
}
"which fails if the resolver file is not json" in {
val path = writeToFile("not-json", "not-json")
parseResolver(path) match {
case Failure(e) => e should startWith("Field []: invalid JSON [not-json]")
case _ => fail()
}
}
"which fails if it's not a resolver" in {
val path = writeToFile("json", """{"a":2}""")
parseResolver(path) match {
case Failure(e) =>
e should startWith("error: Resolver configuration failed JSON Schema validation")
case _ => fail()
}
}
"which succeeds if it's a resolver" in {
val path = writeToFile("resolver", resolverConfig)
parseResolver(path) match {
case Success(_) => succeed
case _ => fail()
}
}
}
"make a parseEnrichmentRegistry function available" - {
"which fails if there is no enrichments dir" in {
parseEnrichmentRegistry(Some("doesnt-exist")) shouldEqual
Failure("Enrichment directory `doesnt-exist` does not exist")
}
"which fails if the contents of the enrichment dir are not json" in {
val path = writeToFile("not-json", "not-json", "not-json")
parseEnrichmentRegistry(Some(path)) match {
case Failure(e) => e should startWith("Field []: invalid JSON [not-json]")
case _ => fail()
}
}
"which fails if the contents of the enrichment dir are not enrichments" in {
val path = writeToFile("json", "json", """{"a":2}""")
parseEnrichmentRegistry(Some(path)) match {
case Failure(e) =>
e should startWith("error: NonEmptyList(error: object instance has properties")
case _ => fail()
}
}
"which succeeds if the contents of the enrichment dir are enrichments" in {
val path = writeToFile("enrichments", "enrichments", enrichmentConfig)
parseEnrichmentRegistry(Some(path)) shouldEqual Success(
("schema" -> "iglu:com.snowplowanalytics.snowplow/enrichments/jsonschema/1-0-0") ~
("data" -> List(parse(enrichmentConfig)))
)
}
"which succeeds if no enrichments dir is given" in {
parseEnrichmentRegistry(None) shouldEqual Success(
("schema" -> "iglu:com.snowplowanalytics.snowplow/enrichments/jsonschema/1-0-0") ~
("data" -> List.empty[String])
)
}
}
}
private def writeToFile(dir: String, name: String, content: String): String = {
val d = Files.createTempDirectory(dir)
Files.write(Files.createTempFile(d.toAbsolutePath, name, ".json"), content.getBytes).toFile
.deleteOnExit()
val f = d.toFile()
f.deleteOnExit()
f.getAbsolutePath
}
private def writeToFile(name: String, content: String): String = {
val f = Files.write(Files.createTempFile(name, ".json"), content.getBytes).toFile
f.deleteOnExit()
f.getAbsolutePath
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/RefererParserEnrichment.scala
|
<gh_stars>1-10
/*
* Copyright (c) 2014-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package enrichments
package registry
// Java
import java.net.URI
// Maven Artifact
import org.apache.maven.artifact.versioning.DefaultArtifactVersion
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s.{DefaultFormats, JValue}
// Iglu
import iglu.client.{SchemaCriterion, SchemaKey}
import iglu.client.validation.ProcessingMessageMethods._
// Snowplow referer-parser
import com.snowplowanalytics.refererparser.scala.{Parser => RefererParser}
import com.snowplowanalytics.refererparser.scala.Referer
// This project
import utils.{ConversionUtils => CU}
import utils.MapTransformer
import utils.MapTransformer._
import utils.ScalazJson4sUtils
/**
* Companion object. Lets us create a
* RefererParserEnrichment from a JValue
*/
object RefererParserEnrichment extends ParseableEnrichment {
implicit val formats = DefaultFormats
val supportedSchema = SchemaCriterion("com.snowplowanalytics.snowplow", "referer_parser", "jsonschema", 1, 0)
/**
* Creates a RefererParserEnrichment instance from a JValue.
*
* @param config The referer_parser enrichment JSON
* @param schemaKey The SchemaKey provided for the enrichment
* Must be a supported SchemaKey for this enrichment
* @return a configured RefererParserEnrichment instance
*/
def parse(config: JValue, schemaKey: SchemaKey): ValidatedNelMessage[RefererParserEnrichment] =
isParseable(config, schemaKey).flatMap(conf => {
(for {
param <- ScalazJson4sUtils.extract[List[String]](config, "parameters", "internalDomains")
enrich = RefererParserEnrichment(param)
} yield enrich).toValidationNel
})
}
/**
* Config for a referer_parser enrichment
*
* @param domains List of internal domains
*/
case class RefererParserEnrichment(
domains: List[String]
) extends Enrichment {
/**
* A Scalaz Lens to update the term within
* a Referer object.
*/
private val termLens: Lens[Referer, MaybeString] = Lens.lensu((r, newTerm) => r.copy(term = newTerm), _.term)
/**
* Extract details about the referer (sic).
*
* Uses the referer-parser library.
*
* @param uri The referer URI to extract
* referer details from
* @param pageHost The host of the current
* page (used to determine
* if this is an internal
* referer)
* @return a Tuple3 containing referer medium,
* source and term, all Strings
*/
def extractRefererDetails(uri: URI, pageHost: String): Option[Referer] =
for {
r <- RefererParser.parse(uri, pageHost, domains)
t = r.term.flatMap(t => CU.fixTabsNewlines(t))
} yield termLens.set(r, t)
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/adapters/registry/HubSpotAdapterSpec.scala
|
<filename>3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/adapters/registry/HubSpotAdapterSpec.scala
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package adapters
package registry
// Joda-Time
import org.joda.time.DateTime
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.json4s.scalaz.JsonScalaz._
// Snowplow
import loaders.{CollectorApi, CollectorContext, CollectorPayload, CollectorSource}
import utils.ConversionUtils
import SpecHelpers._
// Specs2
import org.specs2.{ScalaCheck, Specification}
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
class HubSpotAdapterSpec extends Specification with DataTables with ValidationMatchers with ScalaCheck {
def is = s2"""
This is a specification to test the HubSpotAdapter functionality
payloadBodyToEvents must return a Success list of event JSON's from a valid payload body $e1
payloadBodyToEvents must return a Failure Nel for an invalid payload body being passed $e2
toRawEvents must return a Success Nel if all events are successful $e3
toRawEvents must return a Failure Nel if any of the events where not successes $e4
toRawEvents must return a Nel Failure if the request body is missing $e5
toRawEvents must return a Nel Failure if the content type is missing $e6
toRawEvents must return a Nel Failure if the content type is incorrect $e7
"""
implicit val resolver = SpecHelpers.IgluResolver
object Shared {
val api = CollectorApi("com.hubspot", "v1")
val cljSource = CollectorSource("clj-tomcat", "UTF-8", None)
val context = CollectorContext(DateTime.parse("2013-08-29T00:18:48.000+00:00").some,
"172.16.17.32".some,
None,
None,
Nil,
None)
}
val ContentType = "application/json"
def e1 = {
val bodyStr = """[{"subscriptionType":"company.change","eventId":16}]"""
val expected = List(JObject(List(("subscriptionType", JString("company.change")), ("eventId", JInt(16)))))
HubSpotAdapter.payloadBodyToEvents(bodyStr) must beSuccessful(expected)
}
def e2 =
"SPEC NAME" || "INPUT" | "EXPECTED OUTPUT" |
"Failure, parse exception" !! """{"something:"some"}""" ! """HubSpot payload failed to parse into JSON: [com.fasterxml.jackson.core.JsonParseException: Unexpected character ('s' (code 115)): was expecting a colon to separate field name and value at [Source: (String)"{"something:"some"}"; line: 1, column: 15]]""" |> {
(_, input, expected) =>
HubSpotAdapter.payloadBodyToEvents(input) must beFailing(expected)
}
def e3 = {
val bodyStr =
"""[{"eventId":1,"subscriptionId":25458,"portalId":4737818,"occurredAt":1539145399845,"subscriptionType":"contact.creation","attemptNumber":0,"objectId":123,"changeSource":"CRM","changeFlag":"NEW","appId":177698}]"""
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, bodyStr.some, Shared.cljSource, Shared.context)
val expected = NonEmptyList(
RawEvent(
Shared.api,
Map(
"tv" -> "com.hubspot-v1",
"e" -> "ue",
"p" -> "srv",
"ue_pr" -> """{"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0","data":{"schema":"iglu:com.hubspot/contact_creation/jsonschema/1-0-0","data":{"eventId":1,"subscriptionId":25458,"portalId":4737818,"occurredAt":"2018-10-10T04:23:19.845Z","attemptNumber":0,"objectId":123,"changeSource":"CRM","changeFlag":"NEW","appId":177698}}}"""
),
ContentType.some,
Shared.cljSource,
Shared.context
))
HubSpotAdapter.toRawEvents(payload) must beSuccessful(expected)
}
def e4 = {
val bodyStr =
"""[{"eventId":1,"subscriptionId":25458,"portalId":4737818,"occurredAt":1539145399845,"subscriptionType":"contact","attemptNumber":0,"objectId":123,"changeSource":"CRM","changeFlag":"NEW","appId":177698}]"""
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, bodyStr.some, Shared.cljSource, Shared.context)
val expected = "HubSpot event at index [0] failed: type parameter [contact] not recognized"
HubSpotAdapter.toRawEvents(payload) must beFailing(NonEmptyList(expected))
}
def e5 = {
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, None, Shared.cljSource, Shared.context)
HubSpotAdapter.toRawEvents(payload) must beFailing(
NonEmptyList("Request body is empty: no HubSpot events to process"))
}
def e6 = {
val payload = CollectorPayload(Shared.api, Nil, None, "stub".some, Shared.cljSource, Shared.context)
HubSpotAdapter.toRawEvents(payload) must beFailing(
NonEmptyList("Request body provided but content type empty, expected application/json for HubSpot"))
}
def e7 = {
val payload = CollectorPayload(Shared.api,
Nil,
"application/x-www-form-urlencoded".some,
"stub".some,
Shared.cljSource,
Shared.context)
HubSpotAdapter.toRawEvents(payload) must beFailing(
NonEmptyList("Content type of application/x-www-form-urlencoded provided, expected application/json for HubSpot"))
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/adapters/registry/CloudfrontAccessLogAdapterSpec.scala
|
<filename>3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/adapters/registry/CloudfrontAccessLogAdapterSpec.scala
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package adapters
package registry
// Joda-Time
import org.joda.time.DateTime
// Scalaz
import scalaz._
import Scalaz._
// Snowplow
import loaders.{CollectorApi, CollectorContext, CollectorPayload, CollectorSource, TsvLoader}
import utils.ConversionUtils
import SpecHelpers._
// Specs2
import org.specs2.{ScalaCheck, Specification}
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
class CloudfrontAccessLogAdapterSpec extends Specification with DataTables with ValidationMatchers with ScalaCheck {
def is = s2"""
This is a specification to test the CloudfrontAccessLogAdapter functionality
toRawEvents should return a NEL containing one RawEvent if the line contains 12 fields $e1
toRawEvents should return a NEL containing one RawEvent if the line contains 15 fields $e2
toRawEvents should return a NEL containing one RawEvent if the line contains 18 fields $e3
toRawEvents should return a NEL containing one RawEvent if the line contains 19 fields $e4
toRawEvents should return a NEL containing one RawEvent if the line contains 23 fields $e5
toRawEvents should return a NEL containing one RawEvent if the line contains 24 fields $e6
toRawEvents should return a NEL containing one RawEvent if the line contains 26 fields $e7
toRawEvents should return a Validation Failure if the line is the wrong length $e8
toRawEvents should return a Validation Failure if the line contains an unparseable field $e9
"""
implicit val resolver = SpecHelpers.IgluResolver
val loader = new TsvLoader("com.amazon.aws.cloudfront/wd_access_log")
val doubleEncodedUa =
"Mozilla/5.0%2520(Macintosh;%2520Intel%2520Mac%2520OS%2520X%252010_9_2)%2520AppleWebKit/537.36%2520(KHTML,%2520like%2520Gecko)%2520Chrome/34.0.1847.131%2520Safari/537.36"
val singleEncodedUa =
"Mozilla/5.0%20(Macintosh;%20Intel%20Mac%20OS%20X%2010_9_2)%20AppleWebKit/537.36%20(KHTML,%20like%20Gecko)%20Chrome/34.0.1847.131%20Safari/537.36"
val unEncodedUa =
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36"
val doubleEncodedQs = "a=b%2520c"
val singleEncodedQs = "a=b%20c"
val url = "http://snowplowanalytics.com/analytics/index.html"
object Shared {
val api = CollectorApi("com.amazon.aws.cloudfront", "wd_access_log")
val source = CollectorSource("tsv", "UTF-8", None)
val context =
CollectorContext(DateTime.parse("2013-10-07T23:35:30.000Z").some,
"255.255.255.255".some,
singleEncodedUa.some,
None,
Nil,
None)
}
object Expected {
val staticNoPlatform = Map(
"tv" -> "com.amazon.aws.cloudfront/wd_access_log",
"e" -> "ue",
"url" -> url
)
val static = staticNoPlatform ++ Map(
"p" -> "srv"
)
}
def e1 = {
val input = s"2013-10-07\t23:35:30\tc\t100\t255.255.255.255\tf\tg\th\ti\t$url\t$doubleEncodedUa\t$doubleEncodedQs"
val payload = loader.toCollectorPayload(input)
val actual = payload.map(_.map(CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(_)))
val expectedJson =
s"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.amazon.aws.cloudfront/wd_access_log/jsonschema/1-0-0",
|"data":{
|"dateTime":"2013-10-07T23:35:30Z",
|"xEdgeLocation":"c",
|"scBytes":100,
|"cIp":"255.255.255.255",
|"csMethod":"f",
|"csHost":"g",
|"csUriStem":"h",
|"scStatus":"i",
|"csReferer":"$url",
|"csUserAgent":"$unEncodedUa",
|"csUriQuery":"$singleEncodedQs"
|}
|}
|}""".stripMargin.replaceAll("[\n\r]", "")
actual must beSuccessful(
Some(Success(NonEmptyList(
RawEvent(Shared.api, Expected.static ++ Map("ue_pr" -> expectedJson), None, Shared.source, Shared.context)))))
}
def e2 = {
val input =
s"2013-10-07\t23:35:30\tc\t100\t255.255.255.255\tf\tg\th\ti\t$url\t$doubleEncodedUa\t$doubleEncodedQs\tm\tn\to"
val payload = loader.toCollectorPayload(input)
val actual = payload.map(_.map(CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(_)))
val expectedJson =
s"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.amazon.aws.cloudfront/wd_access_log/jsonschema/1-0-1",
|"data":{
|"dateTime":"2013-10-07T23:35:30Z",
|"xEdgeLocation":"c",
|"scBytes":100,
|"cIp":"255.255.255.255",
|"csMethod":"f",
|"csHost":"g",
|"csUriStem":"h",
|"scStatus":"i",
|"csReferer":"$url",
|"csUserAgent":"$unEncodedUa",
|"csUriQuery":"$singleEncodedQs",
|"csCookie":"m",
|"xEdgeResultType":"n",
|"xEdgeRequestId":"o"
|}
|}
|}""".stripMargin.replaceAll("[\n\r]", "")
actual must beSuccessful(
Some(Success(NonEmptyList(
RawEvent(Shared.api, Expected.static ++ Map("ue_pr" -> expectedJson), None, Shared.source, Shared.context)))))
}
def e3 = {
val input =
s"2013-10-07\t23:35:30\tc\t100\t255.255.255.255\tf\tg\th\ti\t$url\t$doubleEncodedUa\t$doubleEncodedQs\tm\tn\to\tp\tq\t90"
val payload = loader.toCollectorPayload(input)
val actual = payload.map(_.map(CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(_)))
val expectedJson =
s"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.amazon.aws.cloudfront/wd_access_log/jsonschema/1-0-2",
|"data":{
|"dateTime":"2013-10-07T23:35:30Z",
|"xEdgeLocation":"c",
|"scBytes":100,
|"cIp":"255.255.255.255",
|"csMethod":"f",
|"csHost":"g",
|"csUriStem":"h",
|"scStatus":"i",
|"csReferer":"$url",
|"csUserAgent":"$unEncodedUa",
|"csUriQuery":"$singleEncodedQs",
|"csCookie":"m",
|"xEdgeResultType":"n",
|"xEdgeRequestId":"o",
|"xHostHeader":"p",
|"csProtocol":"q",
|"csBytes":90
|}
|}
|}""".stripMargin.replaceAll("[\n\r]", "")
actual must beSuccessful(
Some(Success(NonEmptyList(
RawEvent(Shared.api, Expected.static ++ Map("ue_pr" -> expectedJson), None, Shared.source, Shared.context)))))
}
def e4 = {
val input =
s"2013-10-07\t23:35:30\tc\t100\t255.255.255.255\tf\tg\th\ti\t$url\t$doubleEncodedUa\t$doubleEncodedQs\tm\tn\to\tp\tq\t90\t0.001"
val payload = loader.toCollectorPayload(input)
val actual = payload.map(_.map(CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(_)))
val expectedJson =
s"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.amazon.aws.cloudfront/wd_access_log/jsonschema/1-0-3",
|"data":{
|"dateTime":"2013-10-07T23:35:30Z",
|"xEdgeLocation":"c",
|"scBytes":100,
|"cIp":"255.255.255.255",
|"csMethod":"f",
|"csHost":"g",
|"csUriStem":"h",
|"scStatus":"i",
|"csReferer":"$url",
|"csUserAgent":"$unEncodedUa",
|"csUriQuery":"$singleEncodedQs",
|"csCookie":"m",
|"xEdgeResultType":"n",
|"xEdgeRequestId":"o",
|"xHostHeader":"p",
|"csProtocol":"q",
|"csBytes":90,
|"timeTaken":0.001
|}
|}
|}""".stripMargin.replaceAll("[\n\r]", "")
actual must beSuccessful(
Some(Success(NonEmptyList(
RawEvent(Shared.api, Expected.static ++ Map("ue_pr" -> expectedJson), None, Shared.source, Shared.context)))))
}
def e5 = {
val input =
s"2013-10-07\t23:35:30\tc\t100\t255.255.255.255\tf\tg\th\ti\t$url\t$doubleEncodedUa\t$doubleEncodedQs\tm\tn\to\tp\tq\t90\t0.001\tr\ts\tt\tu"
val payload = loader.toCollectorPayload(input)
val actual = payload.map(_.map(CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(_)))
val expectedJson =
s"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.amazon.aws.cloudfront/wd_access_log/jsonschema/1-0-4",
|"data":{
|"dateTime":"2013-10-07T23:35:30Z",
|"xEdgeLocation":"c",
|"scBytes":100,
|"cIp":"255.255.255.255",
|"csMethod":"f",
|"csHost":"g",
|"csUriStem":"h",
|"scStatus":"i",
|"csReferer":"$url",
|"csUserAgent":"$unEncodedUa",
|"csUriQuery":"$singleEncodedQs",
|"csCookie":"m",
|"xEdgeResultType":"n",
|"xEdgeRequestId":"o",
|"xHostHeader":"p",
|"csProtocol":"q",
|"csBytes":90,
|"timeTaken":0.001,
|"xForwardedFor":"r",
|"sslProtocol":"s",
|"sslCipher":"t",
|"xEdgeResponseResultType":"u"
|}
|}
|}""".stripMargin.replaceAll("[\n\r]", "")
actual must beSuccessful(
Some(Success(NonEmptyList(
RawEvent(Shared.api, Expected.static ++ Map("ue_pr" -> expectedJson), None, Shared.source, Shared.context)))))
}
def e6 = {
val input =
s"2013-10-07\t23:35:30\tc\t100\t255.255.255.255\tf\tg\th\ti\t$url\t$doubleEncodedUa\t$doubleEncodedQs\tm\tn\to\tp\tq\t90\t0.001\tr\ts\tt\tu\tHTTP/2.0"
val payload = loader.toCollectorPayload(input)
val actual = payload.map(_.map(CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(_)))
val expectedJson =
s"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.amazon.aws.cloudfront/wd_access_log/jsonschema/1-0-5",
|"data":{
|"dateTime":"2013-10-07T23:35:30Z",
|"xEdgeLocation":"c",
|"scBytes":100,
|"cIp":"255.255.255.255",
|"csMethod":"f",
|"csHost":"g",
|"csUriStem":"h",
|"scStatus":"i",
|"csReferer":"$url",
|"csUserAgent":"$unEncodedUa",
|"csUriQuery":"$singleEncodedQs",
|"csCookie":"m",
|"xEdgeResultType":"n",
|"xEdgeRequestId":"o",
|"xHostHeader":"p",
|"csProtocol":"q",
|"csBytes":90,
|"timeTaken":0.001,
|"xForwardedFor":"r",
|"sslProtocol":"s",
|"sslCipher":"t",
|"xEdgeResponseResultType":"u",
|"csProtocolVersion":"HTTP/2.0"
|}
|}
|}""".stripMargin.replaceAll("[\n\r]", "")
actual must beSuccessful(
Some(Success(NonEmptyList(
RawEvent(Shared.api, Expected.static ++ Map("ue_pr" -> expectedJson), None, Shared.source, Shared.context)))))
}
def e7 = {
val input =
s"2013-10-07\t23:35:30\tc\t100\t255.255.255.255\tf\tg\th\ti\t$url\t$doubleEncodedUa\t$doubleEncodedQs\tm\tn\to\tp\tq\t90\t0.001\tr\ts\tt\tu\tHTTP/2.0\tProcessed\t12"
val payload = loader.toCollectorPayload(input)
val actual = payload.map(_.map(CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(_)))
val expectedJson =
s"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.amazon.aws.cloudfront/wd_access_log/jsonschema/1-0-6",
|"data":{
|"dateTime":"2013-10-07T23:35:30Z",
|"xEdgeLocation":"c",
|"scBytes":100,
|"cIp":"255.255.255.255",
|"csMethod":"f",
|"csHost":"g",
|"csUriStem":"h",
|"scStatus":"i",
|"csReferer":"$url",
|"csUserAgent":"$unEncodedUa",
|"csUriQuery":"$singleEncodedQs",
|"csCookie":"m",
|"xEdgeResultType":"n",
|"xEdgeRequestId":"o",
|"xHostHeader":"p",
|"csProtocol":"q",
|"csBytes":90,
|"timeTaken":0.001,
|"xForwardedFor":"r",
|"sslProtocol":"s",
|"sslCipher":"t",
|"xEdgeResponseResultType":"u",
|"csProtocolVersion":"HTTP/2.0",
|"fleStatus":"Processed",
|"fleEncryptedFields":"12"
|}
|}
|}""".stripMargin.replaceAll("[\n\r]", "")
actual must beSuccessful(
Some(Success(NonEmptyList(
RawEvent(Shared.api, Expected.static ++ Map("ue_pr" -> expectedJson), None, Shared.source, Shared.context)))))
}
def e8 = {
val params = toNameValuePairs()
val payload =
CollectorPayload(Shared.api, params, None, "2013-10-07\t23:35:30\tc\t\t".some, Shared.source, Shared.context)
val actual = CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(payload)
actual must beFailing(NonEmptyList("Access log TSV line contained 5 fields, expected 12, 15, 18, 19, 23, 24 or 26"))
}
def e9 = {
val params = toNameValuePairs()
val payload =
CollectorPayload(Shared.api,
params,
None,
"a\tb\tc\td\te\tf\tg\th\ti\t$url\tk\t$doubleEncodedQs".some,
Shared.source,
Shared.context)
val actual = CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(payload)
actual must beFailing(
NonEmptyList(
"Unexpected exception converting Cloudfront web distribution access log date [a] and time [b] to timestamp: [Invalid format: \"aTb+00:00\"]",
"Field [scBytes]: cannot convert [d] to Int"
))
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/registry/HubSpotAdapter.scala
|
<gh_stars>1-10
/*
* Copyright (c) 2014-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package adapters
package registry
// Jackson
import com.fasterxml.jackson.databind.JsonNode
import com.fasterxml.jackson.core.JsonParseException
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.json4s.scalaz.JsonScalaz._
// Iglu
import iglu.client.{Resolver, SchemaKey}
import iglu.client.validation.ValidatableJsonMethods._
// Joda Time
import org.joda.time.DateTime
// This project
import loaders.CollectorPayload
import utils.{JsonUtils => JU}
/**
* Transforms a collector payload which conforms to
* a known version of the HubSpot webhook subscription
* into raw events.
*/
object HubSpotAdapter extends Adapter {
// Vendor name for Failure Message
private val VendorName = "HubSpot"
// Tracker version for a HubSpot webhook
private val TrackerVersion = "com.hubspot-v1"
// Expected content type for a request body
private val ContentType = "application/json"
// Event-Schema Map for reverse-engineering a Snowplow unstructured event
private val EventSchemaMap = Map(
"contact.creation" -> SchemaKey("com.hubspot", "contact_creation", "jsonschema", "1-0-0").toSchemaUri,
"contact.deletion" -> SchemaKey("com.hubspot", "contact_deletion", "jsonschema", "1-0-0").toSchemaUri,
"contact.propertyChange" -> SchemaKey("com.hubspot", "contact_change", "jsonschema", "1-0-0").toSchemaUri,
"company.creation" -> SchemaKey("com.hubspot", "company_creation", "jsonschema", "1-0-0").toSchemaUri,
"company.deletion" -> SchemaKey("com.hubspot", "company_deletion", "jsonschema", "1-0-0").toSchemaUri,
"company.propertyChange" -> SchemaKey("com.hubspot", "company_change", "jsonschema", "1-0-0").toSchemaUri,
"deal.creation" -> SchemaKey("com.hubspot", "deal_creation", "jsonschema", "1-0-0").toSchemaUri,
"deal.deletion" -> SchemaKey("com.hubspot", "deal_deletion", "jsonschema", "1-0-0").toSchemaUri,
"deal.propertyChange" -> SchemaKey("com.hubspot", "deal_change", "jsonschema", "1-0-0").toSchemaUri
)
/**
* Converts a CollectorPayload instance into raw events.
* A HubSpot Tracking payload can contain many events in one.
* We expect the type parameter to be 1 of 9 options otherwise
* we have an unsupported event type.
*
* @param payload The CollectorPaylod containing one or more
* raw events as collected by a Snowplow collector
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation. Not used
* @return a Validation boxing either a NEL of RawEvents on
* Success, or a NEL of Failure Strings
*/
def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents =
(payload.body, payload.contentType) match {
case (None, _) => s"Request body is empty: no ${VendorName} events to process".failNel
case (_, None) =>
s"Request body provided but content type empty, expected ${ContentType} for ${VendorName}".failNel
case (_, Some(ct)) if ct != ContentType =>
s"Content type of ${ct} provided, expected ${ContentType} for ${VendorName}".failNel
case (Some(body), _) => {
payloadBodyToEvents(body) match {
case Failure(str) => str.failNel
case Success(list) => {
// Create our list of Validated RawEvents
val rawEventsList: List[Validated[RawEvent]] =
for {
(event, index) <- list.zipWithIndex
} yield {
val eventType: Option[String] = (event \ "subscriptionType").extractOpt[String]
for {
schema <- lookupSchema(eventType, VendorName, index, EventSchemaMap)
} yield {
val formattedEvent = reformatParameters(event)
val qsParams = toMap(payload.querystring)
RawEvent(
api = payload.api,
parameters = toUnstructEventParams(TrackerVersion, qsParams, schema, formattedEvent, "srv"),
contentType = payload.contentType,
source = payload.source,
context = payload.context
)
}
}
// Processes the List for Failures and Successes and returns ValidatedRawEvents
rawEventsListProcessor(rawEventsList)
}
}
}
}
/**
* Returns a list of JValue events from the
* HubSpot payload
*
* @param body The payload body from the HubSpot
* event
* @return either a Successful List of JValue JSONs
* or a Failure String
*/
private[registry] def payloadBodyToEvents(body: String): Validation[String, List[JValue]] =
try {
val parsed = parse(body)
parsed match {
case JArray(list) => list.success
case _ => s"Could not resolve ${VendorName} payload into a JSON array of events".fail
}
} catch {
case e: JsonParseException => {
val exception = JU.stripInstanceEtc(e.toString).orNull
s"${VendorName} payload failed to parse into JSON: [${exception}]".fail
}
}
/**
* Returns an updated HubSpot event JSON where
* the "subscriptionType" field is removed
* and "occurredAt" fields' values have been converted
*
* @param json The event JSON which we need to
* update values for
* @return the updated JSON with updated fields and values
*/
def reformatParameters(json: JValue): JValue = {
def toStringField(value: Long): JString = {
val dt: DateTime = new DateTime(value)
JString(JsonSchemaDateTimeFormat.print(dt))
}
json removeField {
case ("subscriptionType", JString(s)) => true
case _ => false
} transformField {
case ("occurredAt", JInt(value)) => ("occurredAt", toStringField(value.toLong))
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/apirequest/ApiRequestEnrichmentSpec.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common.enrichments.registry.apirequest
// Scalaz
import org.json4s.jackson.JsonMethods
import scalaz.Scalaz._
// json4s
import org.json4s._
import org.json4s.jackson.parseJson
// specs2
import org.specs2.Specification
import org.specs2.mock.Mockito
import org.specs2.scalaz.ValidationMatchers
// Iglu
import com.snowplowanalytics.iglu.client.JsonSchemaPair
import com.snowplowanalytics.iglu.client.SchemaKey
// Snowplow
import com.snowplowanalytics.snowplow.enrich.common.outputs.EnrichedEvent
class ApiRequestEnrichmentSpec extends Specification with ValidationMatchers with Mockito {
def is = s2"""
This is a specification to test the ApiRequestEnrichment configuration
Extract correct configuration for GET request and perform the request $e1
Skip incorrect input (none of json or pojo) in configuration $e2
Skip incorrect input (both json and pojo) in configuration $e3
Extract correct configuration for POST request and perform the request $e4
""""
val SCHEMA_KEY =
SchemaKey("com.snowplowanalytics.snowplow.enrichments", "api_request_enrichment_config", "jsonschema", "1-0-0")
def e1 = {
val inputs = List(
Input("user", pojo = Some(PojoInput("user_id")), json = None),
Input(
"userSession",
pojo = None,
json = Some(
JsonInput("contexts", "iglu:com.snowplowanalytics.snowplow/client_session/jsonschema/1-*-*", "$.userId"))),
Input("client", pojo = Some(PojoInput("app_id")), json = None)
)
val api =
HttpApi("GET",
"http://api.acme.com/users/{{client}}/{{user}}?format=json",
1000,
Authentication(Some(HttpBasic(Some("xxx"), None))))
val apiSpy = spy(api)
val output = Output("iglu:com.acme/user/jsonschema/1-0-0", Some(JsonOutput("$.record")))
val cache = Cache(3000, 60)
val config = ApiRequestEnrichment(inputs, apiSpy, List(output), cache)
val fakeEnrichedEvent = new EnrichedEvent {
app_id = "some-fancy-app-id"
user_id = "some-fancy-user-id"
}
val clientSession: JsonSchemaPair = (
SchemaKey(vendor = "com.snowplowanalytics.snowplow",
name = "client_session",
format = "jsonschema",
version = "1-0-1"),
JsonMethods.asJsonNode(parseJson("""|{
| "data": {
| "userId": "some-fancy-user-session-id",
| "sessionId": "42c8a55b-c0c2-4749-b9ac-09bb0d17d000",
| "sessionIndex": 1,
| "previousSessionId": null,
| "storageMechanism": "COOKIE_1"
| }
|}""".stripMargin))
)
val configuration = parseJson(
"""|{
| "vendor": "com.snowplowanalytics.snowplow.enrichments",
| "name": "api_request_enrichment_config",
| "enabled": true,
| "parameters": {
| "inputs": [
| {
| "key": "user",
| "pojo": {
| "field": "user_id"
| }
| },
| {
| "key": "userSession",
| "json": {
| "field": "contexts",
| "schemaCriterion": "iglu:com.snowplowanalytics.snowplow/client_session/jsonschema/1-*-*",
| "jsonPath": "$.userId"
| }
| },
| {
| "key": "client",
| "pojo": {
| "field": "app_id"
| }
| }
| ],
| "api": {
| "http": {
| "method": "GET",
| "uri": "http://api.acme.com/users/{{client}}/{{user}}?format=json",
| "timeout": 1000,
| "authentication": {
| "httpBasic": {
| "username": "xxx",
| "password": <PASSWORD>
| }
| }
| }
| },
| "outputs": [{
| "schema": "iglu:com.acme/user/jsonschema/1-0-0",
| "json": {
| "jsonPath": "$.record"
| }
| }],
| "cache": {
| "size": 3000,
| "ttl": 60
| }
| }
| }""".stripMargin)
ApiRequestEnrichmentConfig.parse(configuration, SCHEMA_KEY) must beSuccessful(config)
val user = parseJson("""|{
| "schema": "iglu:com.acme/user/jsonschema/1-0-0",
| "data": {
| "name": "<NAME>",
| "company": "Acme"
| }
|}
""".stripMargin)
apiSpy.perform(
url = "http://api.acme.com/users/some-fancy-app-id/some-fancy-user-id?format=json",
body = None
) returns """{"record": {"name": "<NAME>", "company": "Acme"}}""".success
val enrichedContextResult = config.lookup(
event = fakeEnrichedEvent,
derivedContexts = List.empty,
customContexts = List(clientSession),
unstructEvent = List.empty
)
enrichedContextResult must beSuccessful(List(user))
}
def e2 = {
val configuration = parseJson("""|{
| "vendor": "com.snowplowanalytics.snowplow.enrichments",
| "name": "api_request_enrichment_config",
| "enabled": true,
| "parameters": {
| "inputs": [
| {
| "key": "user",
| "pojo": {
| "field": "user_id"
| }
| },
| {
| "key": "user"
| },
| {
| "key": "client",
| "pojo": {
| "field": "app_id"
| }
| }
| ],
| "api": {
| "http": {
| "method": "GET",
| "uri": "http://api.acme.com/users/{{client}}/{{user}}?format=json",
| "timeout": 1000,
| "authentication": {
| "httpBasic": {
| "username": "xxx",
| "password": "<PASSWORD>"
| }
| }
| }
| },
| "outputs": [{
| "schema": "iglu:com.acme/user/jsonschema/1-0-0",
| "json": {
| "jsonPath": "$.record"
| }
| }],
| "cache": {
| "size": 3000,
| "ttl": 60
| }
| }
| }""".stripMargin)
ApiRequestEnrichmentConfig.parse(configuration, SCHEMA_KEY) must beFailing
}
def e3 = {
val configuration = parseJson(
"""|{
| "vendor": "com.snowplowanalytics.snowplow.enrichments",
| "name": "api_request_enrichment_config",
| "enabled": true,
| "parameters": {
| "inputs": [
| {
| "key": "user",
| "pojo": {
| "field": "user_id"
| }
| },
| {
| "key": "client",
| "pojo": {
| "field": "app_id"
| },
| "json": {
| "field": "contexts",
| "schemaCriterion": "iglu:com.snowplowanalytics.snowplow/client_session/jsonschema/1-*-*",
| "jsonPath": "$.userId"
| }
| }
| ],
| "api": {
| "http": {
| "method": "GET",
| "uri": "http://api.acme.com/users/{{client}}/{{user}}?format=json",
| "timeout": 1000,
| "authentication": {
| "httpBasic": {
| "username": "xxx",
| "password": "<PASSWORD>"
| }
| }
| }
| },
| "outputs": [{
| "schema": "iglu:com.acme/user/jsonschema/1-0-0",
| "json": {
| "jsonPath": "$.record"
| }
| }],
| "cache": {
| "size": 3000,
| "ttl": 60
| }
| }
| }""".stripMargin)
ApiRequestEnrichmentConfig.parse(configuration, SCHEMA_KEY) must beFailing
}
def e4 = {
val inputs = List(
Input(key = "user", pojo = Some(PojoInput("user_id")), json = None),
Input(
key = "userSession",
pojo = None,
json = Some(
JsonInput("contexts", "iglu:com.snowplowanalytics.snowplow/client_session/jsonschema/1-*-*", "$.userId"))),
Input(key = "client", pojo = Some(PojoInput("app_id")), json = None)
)
val api = HttpApi(method = "POST",
uri = "http://api.acme.com/users?format=json",
timeout = 1000,
authentication = Authentication(Some(HttpBasic(Some("xxx"), None))))
val apiSpy = spy(api)
val output = Output(schema = "iglu:com.acme/user/jsonschema/1-0-0", json = Some(JsonOutput("$.record")))
val cache = Cache(size = 3000, ttl = 60)
val config = ApiRequestEnrichment(inputs, apiSpy, List(output), cache)
val fakeEnrichedEvent = new EnrichedEvent {
app_id = "some-fancy-app-id"
user_id = "some-fancy-user-id"
}
val clientSession: JsonSchemaPair = (
SchemaKey(vendor = "com.snowplowanalytics.snowplow",
name = "client_session",
format = "jsonschema",
version = "1-0-1"),
JsonMethods.asJsonNode(parseJson("""|{
| "data": {
| "userId": "some-fancy-user-session-id",
| "sessionId": "42c8a55b-c0c2-4749-b9ac-09bb0d17d000",
| "sessionIndex": 1,
| "previousSessionId": null,
| "storageMechanism": "COOKIE_1"
| }
|}""".stripMargin))
)
val configuration = parseJson(
"""|{
| "vendor": "com.snowplowanalytics.snowplow.enrichments",
| "name": "api_request_enrichment_config",
| "enabled": true,
| "parameters": {
| "inputs": [
| {
| "key": "user",
| "pojo": {
| "field": "user_id"
| }
| },
| {
| "key": "userSession",
| "json": {
| "field": "contexts",
| "schemaCriterion": "iglu:com.snowplowanalytics.snowplow/client_session/jsonschema/1-*-*",
| "jsonPath": "$.userId"
| }
| },
| {
| "key": "client",
| "pojo": {
| "field": "app_id"
| }
| }
| ],
| "api": {
| "http": {
| "method": "POST",
| "uri": "http://api.acme.com/users?format=json",
| "timeout": 1000,
| "authentication": {
| "httpBasic": {
| "username": "xxx",
| "password": <PASSWORD>
| }
| }
| }
| },
| "outputs": [{
| "schema": "iglu:com.acme/user/jsonschema/1-0-0",
| "json": {
| "jsonPath": "$.record"
| }
| }],
| "cache": {
| "size": 3000,
| "ttl": 60
| }
| }
| }""".stripMargin)
ApiRequestEnrichmentConfig.parse(configuration, SCHEMA_KEY) must beSuccessful(config)
val user = parseJson("""|{
| "schema": "iglu:com.acme/user/jsonschema/1-0-0",
| "data": {
| "name": "<NAME>",
| "company": "Acme"
| }
|}
""".stripMargin)
apiSpy.perform(
url = "http://api.acme.com/users?format=json",
body = Some(
"""{"client":"some-fancy-app-id","user":"some-fancy-user-id","userSession":"some-fancy-user-session-id"}""")
) returns """{"record": {"name": "<NAME>", "company": "Acme"}}""".success
val enrichedContextResult = config.lookup(
event = fakeEnrichedEvent,
derivedContexts = List.empty,
customContexts = List(clientSession),
unstructEvent = List.empty
)
enrichedContextResult must beSuccessful(List(user))
}
}
|
AlexandraGYG/snowplow
|
2-collectors/scala-stream-collector/core/src/main/scala/com.snowplowanalytics.snowplow.collectors.scalastream/metrics/MetricsRoute.scala
|
<filename>2-collectors/scala-stream-collector/core/src/main/scala/com.snowplowanalytics.snowplow.collectors.scalastream/metrics/MetricsRoute.scala
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors.scalastream.metrics
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.util.ByteString
trait MetricsRoute {
def metricsService: MetricsService
def metricsRoute: Route =
(path("metrics") & get) {
complete(HttpResponse(
StatusCodes.OK,
entity = HttpEntity.Strict(MetricsRoute.`text/plain(UTF-8) v0.0.4`, ByteString(metricsService.report()))
))
}
}
object MetricsRoute {
val `text/plain(UTF-8) v0.0.4`: ContentType.WithCharset =
MediaTypes.`text/plain` withParams Map("version" -> "0.0.4") withCharset HttpCharsets.`UTF-8`
}
|
AlexandraGYG/snowplow
|
2-collectors/scala-stream-collector/core/src/main/scala/com.snowplowanalytics.snowplow.collectors.scalastream/metrics/MetricsService.scala
|
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors.scalastream
package metrics
import java.time.Duration
import akka.http.scaladsl.model.{HttpMethod, StatusCode, Uri}
import io.prometheus.client.exporter.common.TextFormat
import io.prometheus.client.{CollectorRegistry, Counter, Gauge, Histogram}
import org.apache.commons.io.output.StringBuilderWriter
import generated.BuildInfo
import PrometheusMetricsService.Metrics._
import PrometheusMetricsService.NanosecondsInSecond
import model.PrometheusMetricsConfig
/**
* Service which is used to keep track of processed http requests
* and report generation based on collected statistics
*/
trait MetricsService {
def observeRequest(method: HttpMethod, uri: Uri, status: StatusCode, duration: Duration): Unit
def report(): String
}
/**
* Implementation of [[com.snowplowanalytics.snowplow.collectors.scalastream.metrics.MetricsService]]
* which uses [[https://prometheus.io/]] data format for storing metrics and report generation
* @param metricsConfig Configuration of metrics format
*/
class PrometheusMetricsService(metricsConfig: PrometheusMetricsConfig) extends MetricsService {
private val registry = new CollectorRegistry
private val requestCounter: Counter =
Counter.build(HttpRequestCount, HttpRequestCountHelp).labelNames(Labels: _*).register(registry)
private val requestDurationHistogramBuilder = Histogram.build(HttpRequestDuration, HttpRequestDurationHelp).labelNames(Labels: _*)
private val requestDurationHistogram: Histogram =
metricsConfig.durationBucketsInSeconds
.map(buckets => requestDurationHistogramBuilder.buckets(buckets: _*).register(registry))
.getOrElse(requestDurationHistogramBuilder.register(registry))
private val applicationVersionGauge = Gauge.build("service_version", "Java, scala versions and collector version")
.labelNames("java_version", "scala_version", "version")
.register(registry)
applicationVersionGauge.labels(System.getProperty("java.version"), BuildInfo.scalaVersion, BuildInfo.version).set(1)
override def observeRequest(method: HttpMethod, uri: Uri, status: StatusCode, duration: Duration): Unit = {
val path = uri.path.toString
val methodValue = method.value
val code = status.intValue().toString
requestDurationHistogram.labels(path, methodValue, code).observe(duration.toNanos / NanosecondsInSecond)
requestCounter.labels(path, methodValue, code).inc()
}
override def report(): String = {
val writer = new StringBuilderWriter()
TextFormat.write004(writer, registry.metricFamilySamples())
writer.getBuilder.toString
}
}
object PrometheusMetricsService {
final val NanosecondsInSecond: Double = Math.pow(10, 9)
object Metrics {
final val HttpRequestDuration = "http_request_duration_seconds"
final val HttpRequestDurationHelp = "Latency per endpoint"
final val HttpRequestCount = "http_requests_total"
final val HttpRequestCountHelp = "Total count of requests to http endpoint"
final val Labels = Seq("endpoint", "method", "code")
}
}
|
AlexandraGYG/snowplow
|
3-enrich/stream-enrich/integration-tests/src/test/scala/com.snowplowanalytics.snowplow.enrich.stream/PiiEmitSpec.scala
|
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package stream
// Scala
import scala.util.{Try, Success, Failure}
import scala.collection.JavaConversions._
import scala.concurrent.duration.FiniteDuration
import scala.io.Source
// Scala libraries
import pureconfig._
import com.typesafe.config.ConfigFactory
import org.specs2.mutable.Specification
import org.specs2.matcher.{FutureMatchers, Matcher}
// Java
import java.util.regex.Pattern
import java.util.concurrent.TimeUnit
// Java libraries
import org.apache.commons.codec.binary.Base64
import com.hubspot.jinjava.Jinjava
// This project
import good._
import model.{StreamsConfig, SourceSinkConfig}
class PiiEmitSpec
extends Specification with FutureMatchers with KafkaIntegrationSpec with BeforeAfterAll {
var ktu: KafkaTestUtils = _
override def beforeAll(): Unit = {
ktu = new KafkaTestUtils
ktu.setup()
ktu.createTopics(kafkaTopics.toList: _*)
}
override def afterAll(): Unit = {
if (ktu != null) {
ktu = null
}
}
import KafkaIntegrationSpecValues._
def configValues = Map(
"sinkType" -> "kafka",
"streamsInRaw" -> s"$testGoodIn",
"outEnriched" -> s"$testGood",
"outPii" -> s"$testPii",
"outBad" -> s"$testBad",
"partitionKeyName" -> "\"\"",
"kafkaBrokers" -> ktu.brokerAddress,
"bufferTimeThreshold" -> "1",
"bufferRecordThreshold" -> "1",
"bufferByteThreshold" -> "100000",
"enrichAppName" -> "Jim",
"enrichStreamsOutMaxBackoff" -> "1000",
"enrichStreamsOutMinBackoff" -> "1000",
"appName" -> "jim"
)
def config: String = Try {
val configRes = getClass.getResourceAsStream("/config.hocon.sample")
Source.fromInputStream(configRes).getLines.mkString("\n")
} match {
case Failure(t) => {
println(s"Unable to get config.hocon.sample: $t"); throw new Exception(t)
}
case Success(s) => s
}
def configInstance: String = {
val jinJava = new Jinjava()
jinJava.render(config, configValues)
}
private def decode(s: String): Array[Byte] = Base64.decodeBase64(s)
// Input
override val inputGood = List(
decode(PagePingWithContextSpec.raw),
decode(PageViewWithContextSpec.raw),
decode(StructEventSpec.raw),
decode(StructEventWithContextSpec.raw),
decode(TransactionItemSpec.raw),
decode(TransactionSpec.raw)
)
// Expected output counts
override val (expectedGood, expectedBad, expectedPii) = (inputGood.size, 0, inputGood.size)
// Timeout for the producer
override val producerTimeoutSec = 5
// Timeout for all the consumers (good, bad, and pii) (running in parallel)
// You may want to adjust this if you are doing lots of slow work in the app
// Ordinarily the consumers return in less than 1 sec
override val consumerExecutionTimeoutSec = 15
"Pii" should {
"emit all events" in {
implicit def hint[T]: ProductHint[T] =
ProductHint[T](ConfigFieldMapping(CamelCase, CamelCase))
implicit val sourceSinkConfigHint = new FieldCoproductHint[SourceSinkConfig]("enabled")
val parsedConfig = ConfigFactory.parseString(configInstance).resolve()
val configObject = Try {
loadConfigOrThrow[StreamsConfig](parsedConfig.getConfig("enrich.streams"))
}
configObject aka "enrichment config loading" must not beAFailedTry
val app = getMainApplicationFuture(
configObject.get,
SpecHelpers.resolver,
SpecHelpers.adapterRegistry,
SpecHelpers.enrichmentRegistry,
None)
inputProduced(ktu.brokerAddress) aka "sending input" must beSuccessfulTry
def spaceJoinResult(expected: List[StringOrRegex]) =
expected
.flatMap({
case JustRegex(r) => Some(r.toString)
case JustString(s) if s.nonEmpty => Some(Pattern.quote(s))
case _ => None
})
.mkString("\\s*")
val expectedMatcher: Matcher[(List[String], List[String], List[String])] = beLike {
case (good: List[String], bad: List[String], pii: List[String]) => {
(bad aka "bad result list" must have size (expectedBad)) and
(pii aka "pii result list" must have size (expectedPii)) and
(good aka "good result list" must have size (expectedGood)) and
(good aka "good result list" must containMatch(
spaceJoinResult(PagePingWithContextSpec.expected))) and
(pii aka "pii result list" must containMatch(spaceJoinResult(PagePingWithContextSpec.pii))) and
(good aka "good result list" must containMatch(
spaceJoinResult(PageViewWithContextSpec.expected))) and
(pii aka "pii result list" must containMatch(spaceJoinResult(PageViewWithContextSpec.pii))) and
(good aka "good result list" must containMatch(
spaceJoinResult(StructEventSpec.expected))) and
(pii aka "pii result list" must containMatch(spaceJoinResult(StructEventSpec.pii))) and
(good aka "good result list" must containMatch(
spaceJoinResult(StructEventWithContextSpec.expected))) and
(pii aka "pii result list" must containMatch(spaceJoinResult(StructEventWithContextSpec.pii))) and
(good aka "good result list" must containMatch(
spaceJoinResult(TransactionItemSpec.expected))) and
(pii aka "pii result list" must containMatch(spaceJoinResult(TransactionItemSpec.pii))) and
(good aka "good result list" must containMatch(
spaceJoinResult(TransactionSpec.expected))) and
(pii aka "pii result list" must containMatch(spaceJoinResult(TransactionSpec.pii)))
}
}
allResults(ktu.brokerAddress) must expectedMatcher.await(
retries = 0,
timeout = FiniteDuration(consumerExecutionTimeoutSec.toLong, TimeUnit.SECONDS))
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/stream-enrich/core/src/main/scala/com.snowplowanalytics.snowplow.enrich.stream/Enrich.scala
|
<gh_stars>1-10
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package stream
import java.io.File
import java.net.URI
import scala.io.Source
import scala.util.Try
import scala.sys.process._
import com.typesafe.config.ConfigFactory
import org.json4s.jackson.JsonMethods._
import org.json4s.JsonDSL._
import org.slf4j.LoggerFactory
import pureconfig._
import scalaz.{Sink => _, Source => _, _}
import Scalaz._
import common.adapters.AdapterRegistry
import common.adapters.registry.RemoteAdapter
import common.enrichments.EnrichmentRegistry
import common.utils.JsonUtils
import config._
import iglu.client.Resolver
import model._
import scalatracker.Tracker
/** Interface for the entry point for Stream Enrich. */
trait Enrich {
lazy val log = LoggerFactory.getLogger(getClass())
val FilepathRegex = "^file:(.+)$".r
private val regexMsg = "'file:[filename]'"
implicit val creds: Credentials = NoCredentials
def run(args: Array[String]): Unit = {
val trackerSource = for {
config <- parseConfig(args).validation
(enrichConfig, resolverArg, enrichmentsArg, forceDownload) = config
resolver <- parseResolver(resolverArg)
enrichmentRegistry <- parseEnrichmentRegistry(enrichmentsArg)(resolver, implicitly)
_ <- cacheFiles(enrichmentRegistry, forceDownload)
adapterRegistry = new AdapterRegistry(prepareRemoteAdapters(enrichConfig.remoteAdapters))
tracker = enrichConfig.monitoring.map(c => SnowplowTracking.initializeTracker(c.snowplow))
source <- getSource(enrichConfig.streams, resolver, adapterRegistry, enrichmentRegistry, tracker)
} yield (tracker, source)
trackerSource match {
case Failure(e) =>
System.err.println(s"An error occured: $e")
System.exit(1)
case Success((tracker, source)) =>
tracker.foreach(SnowplowTracking.initializeSnowplowTracking)
source.run()
}
}
/**
* Source of events
* @param streamsConfig configuration for the streams
* @param resolver iglu resolver
* @param enrichmentRegistry registry of enrichments
* @param tracker optional tracker
* @return a validated source, ready to be read from
*/
def getSource(
streamsConfig: StreamsConfig,
resolver: Resolver,
adapterRegistry: AdapterRegistry,
enrichmentRegistry: EnrichmentRegistry,
tracker: Option[Tracker]
): Validation[String, sources.Source]
/**
* Parses the configuration from cli arguments
* @param args cli arguments
* @return a validated tuple containing the parsed enrich configuration, the resolver argument,
* the optional enrichments argument and the force download flag
*/
def parseConfig(
args: Array[String]): \/[String, (EnrichConfig, String, Option[String], Boolean)] = {
implicit def hint[T] = ProductHint[T](ConfigFieldMapping(CamelCase, CamelCase))
implicit val sourceSinkConfigHint = new FieldCoproductHint[SourceSinkConfig]("enabled")
for {
parsedCliArgs <- \/.fromEither(
parser.parse(args, FileConfig()).toRight("Error while parsing command line arguments"))
unparsedConfig = utils.fold(Try(ConfigFactory.parseFile(parsedCliArgs.config).resolve()))(
t => t.getMessage.left,
c =>
(c, parsedCliArgs.resolver, parsedCliArgs.enrichmentsDir, parsedCliArgs.forceDownload).right
)
validatedConfig <- utils.filterOrElse(unparsedConfig)(
t => t._1.hasPath("enrich"),
"No top-level \"enrich\" could be found in the configuration")
(config, resolverArg, enrichmentsArg, forceDownload) = validatedConfig
parsedConfig <- utils
.toEither(Try(loadConfigOrThrow[EnrichConfig](config.getConfig("enrich"))))
.map(ec => (ec, resolverArg, enrichmentsArg, forceDownload))
.leftMap(_.getMessage)
} yield parsedConfig
}
/** Cli arguments parser */
def parser: scopt.OptionParser[FileConfig]
val localParser =
new scopt.OptionParser[FileConfig](generated.BuildInfo.name) with FileConfigOptions {
head(generated.BuildInfo.name, generated.BuildInfo.version)
help("help")
version("version")
configOption()
localResolverOption()
localEnrichmentsOption()
forceCachedFilesDownloadOption()
}
/**
* Retrieve and parse an iglu resolver from the corresponding cli argument value
* @param resolverArg location of the resolver as a cli argument
a * @param creds optionally necessary credentials to download the resolver
* @return a validated iglu resolver
*/
def parseResolver(resolverArg: String)(
implicit creds: Credentials): Validation[String, Resolver] =
for {
parsedResolver <- extractResolver(resolverArg)
json <- JsonUtils.extractJson("", parsedResolver)
resolver <- Resolver.parse(json).leftMap(_.toString)
} yield resolver
/**
* Return a JSON string based on the resolver argument
* @param resolverArg location of the resolver
* @param creds optionally necessary credentials to download the resolver
* @return JSON from a local file or stored in DynamoDB
*/
def extractResolver(resolverArg: String)(implicit creds: Credentials): Validation[String, String]
val localResolverExtractor = (resolverArgument: String) =>
resolverArgument match {
case FilepathRegex(filepath) =>
val file = new File(filepath)
if (file.exists) Source.fromFile(file).mkString.success
else "Iglu resolver configuration file \"%s\" does not exist".format(filepath).failure
case _ => s"Resolver argument [$resolverArgument] must match $regexMsg".failure
}
/**
* Retrieve and parse an enrichment registry from the corresponding cli argument value
* @param enrichmentsDirArg location of the enrichments directory as a cli argument
* @param resolver iglu resolver
* @param creds optionally necessary credentials to download the enrichments
* @return a validated enrichment registry
*/
def parseEnrichmentRegistry(enrichmentsDirArg: Option[String])(
implicit resolver: Resolver,
creds: Credentials): Validation[String, EnrichmentRegistry] =
for {
enrichmentConfig <- extractEnrichmentConfigs(enrichmentsDirArg)
registryConfig <- JsonUtils.extractJson("", enrichmentConfig)
reg <- EnrichmentRegistry.parse(fromJsonNode(registryConfig), false).leftMap(_.toString)
} yield reg
/**
* Return an enrichment configuration JSON based on the enrichments argument
* @param enrichmentArgument location of the enrichments directory
* @param creds optionally necessary credentials to download the enrichments
* @return JSON containing configuration for all enrichments
*/
def extractEnrichmentConfigs(enrichmentArgument: Option[String])(
implicit creds: Credentials): Validation[String, String]
val localEnrichmentConfigsExtractor = (enrichmentArgument: Option[String]) => {
val jsons: Validation[String, List[String]] = enrichmentArgument
.map {
case FilepathRegex(path) =>
new File(path).listFiles
.filter(_.getName.endsWith(".json"))
.map(scala.io.Source.fromFile(_).mkString)
.toList
.success
case other => s"Enrichments argument [$other] must match $regexMsg".failure
}
.getOrElse(Nil.success)
jsons.map { js =>
val combinedJson =
("schema" -> "iglu:com.snowplowanalytics.snowplow/enrichments/jsonschema/1-0-0") ~
("data" -> js.toList.map(parse(_)))
compact(combinedJson)
}
}
/**
* Download a file locally
* @param uri of the file to be downloaded
* @param targetFile local file to download to
* @param creds optionally necessary credentials to download the file
* @return the return code of the downloading command
*/
def download(uri: URI, targetFile: File)(implicit creds: Credentials): Validation[String, Int]
val httpDownloader = (uri: URI, targetFile: File) =>
uri.getScheme match {
case "http" | "https" => (uri.toURL #> targetFile).!.success
case s => s"Scheme $s for file $uri not supported".failure
}
/**
* Download the IP lookup files locally.
* @param registry Enrichment registry
* @param forceDownload CLI flag that invalidates the cached files on each startup
* @param creds optionally necessary credentials to cache the files
* @return a list of download command return codes
*/
def cacheFiles(
registry: EnrichmentRegistry,
forceDownload: Boolean
)(implicit creds: Credentials): ValidationNel[String, List[Int]] =
registry.filesToCache
.map { case (uri, path) =>
(new java.net.URI(uri.toString.replaceAll("(?<!(http:|https:|s3:))//", "/")),
new File(path))
}
.filter { case (_, targetFile) => forceDownload || targetFile.length == 0L }
.map {
case (cleanURI, targetFile) =>
download(cleanURI, targetFile).flatMap {
case i if i != 0 => s"Attempt to download $cleanURI to $targetFile failed".failure
case o => o.success
}.toValidationNel
}
.sequenceU
/**
* Sets up the Remote adapters for the ETL
* @param remoteAdaptersConfig List of configuration per remote adapter
* @return Mapping of vender-version and the adapter assigned for it
*/
def prepareRemoteAdapters(remoteAdaptersConfig: Option[List[RemoteAdapterConfig]]) = {
remoteAdaptersConfig match {
case Some(configList) => configList.map { config =>
val adapter = new RemoteAdapter(
config.url,
config.connectionTimeout,
config.readTimeout
)
(config.vendor, config.version) -> adapter
}.toMap
case None => Map.empty[(String, String), RemoteAdapter]
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/beam-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich.beam/config.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package beam
import java.io.File
import scala.io.Source
// import order conflict with json4s
import scalaz._
import Scalaz._
import com.spotify.scio.Args
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import common.enrichments.EnrichmentRegistry
import common.utils.JsonUtils
import iglu.client.Resolver
object config {
/** Case class holding the raw job configuration */
final case class EnrichConfig(
jobName: String,
raw: String,
enriched: String,
bad: String,
pii: Option[String],
resolver: String,
enrichments: Option[String]
)
object EnrichConfig {
/** Smart constructor taking SCIO's [[Args]] */
def apply(args: Args): Validation[String, EnrichConfig] = for {
_ <- if (args.optional("help").isDefined) helpString(configurations).failure else "".success
l <- configurations.collect {
case RequiredConfiguration(key, _) => args.optional(key).toSuccess(s"Missing `$key` argument").toValidationNel
}.sequenceU.leftMap(_.toList.mkString("\n"))
List(jobName, raw, enriched, bad, resolver) = l
} yield EnrichConfig(jobName, raw, enriched, bad, args.optional("pii"), resolver, args.optional("enrichments"))
private val configurations = List(
RequiredConfiguration("job-name", "Name of the Dataflow job that will be launched"),
RequiredConfiguration("raw", "Name of the subscription to the raw topic projects/{project}/subscriptions/{subscription}"),
RequiredConfiguration("enriched", "Name of the enriched topic projects/{project}/topics/{topic}"),
RequiredConfiguration("bad", "Name of the bad topic projects/{project}/topics/{topic}"),
OptionalConfiguration("pii", "Name of the pii topic projects/{project}/topics/{topic}"),
RequiredConfiguration("resolver", "Path to the resolver file"),
OptionalConfiguration("enrichments", "Path to the directory containing the enrichment files")
)
/** Generates an help string from a list of conifugration */
private def helpString(configs: List[Configuration]): String =
"Possible configuration are:\n" +
configs.map {
case OptionalConfiguration(key, desc) => s"--$key=VALUE, optional, $desc"
case RequiredConfiguration(key, desc) => s"--$key=VALUE, required, $desc"
}.mkString("\n") +
"\n--help, Display this message" +
"\nA full list of all the Beam CLI options can be found at: https://cloud.google.com/dataflow/pipelines/specifying-exec-params#setting-other-cloud-pipeline-options"
}
/** ADT for configuration parameters */
sealed trait Configuration {
def key: String
def desc: String
}
final case class OptionalConfiguration(key: String, desc: String) extends Configuration
final case class RequiredConfiguration(key: String, desc: String) extends Configuration
/** Case class holding the parsed job configuration */
final case class ParsedEnrichConfig(
raw: String,
enriched: String,
bad: String,
pii: Option[String],
resolver: JValue,
enrichmentRegistry: JObject
)
/**
* Parses a resolver at the specified path.
* @param resolverPath path where the resolver is located
* @return the parsed JValue if the parsing was successful
*/
def parseResolver(resolverPath: String): Validation[String, JValue] = for {
fileContent <- readResolverFile(resolverPath)
jsonNode <- JsonUtils.extractJson("", fileContent)
json = fromJsonNode(jsonNode)
_ <- Resolver.parse(json).leftMap(_.toList.mkString("\n"))
} yield json
/** Reads a resolver file at the specfied path. */
private def readResolverFile(path: String): Validation[String, String] = {
val file = new File(path)
if (file.exists) Source.fromFile(file).mkString.success
else s"Iglu resolver configuration file `$path` does not exist".failure
}
/**
* Parses an enrichment registry at the specified path.
* @param enrichmentsPath path where the enrichment directory is located
* @return the enrichment registry built from the enrichments found
*/
def parseEnrichmentRegistry(enrichmentsPath: Option[String])(
implicit resolver: Resolver): Validation[String, JObject] = for {
fileContents <- readEnrichmentFiles(enrichmentsPath)
jsons <- fileContents.map(JsonUtils.extractJson("", _)).sequenceU
combinedJson =
("schema" -> "iglu:com.snowplowanalytics.snowplow/enrichments/jsonschema/1-0-0") ~
("data" -> jsons.map(fromJsonNode))
_ <- EnrichmentRegistry.parse(combinedJson, false).leftMap(_.toList.mkString("\n"))
} yield combinedJson
/** Reads all the enrichment files contained in a directory at the specified path. */
private def readEnrichmentFiles(path: Option[String]): Validation[String, List[String]] =
path.map { p =>
for {
files <- Option(new File(p).listFiles)
.toSuccess(s"Enrichment directory `$p` does not exist")
read = files
.filter(_.getName.endsWith(".json"))
.map(Source.fromFile(_).mkString)
.toList
} yield read
}.getOrElse(Nil.success)
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/MiscEnrichments.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package enrichments
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
// This project
import utils.{ConversionUtils => CU}
// Get our project settings
import generated.ProjectSettings
/**
* Miscellaneous enrichments which don't fit into
* one of the other modules.
*/
object MiscEnrichments {
/**
* The version of this ETL. Appends this version
* to the supplied "host" ETL.
*
* @param hostEtlVersion The version of the host ETL
* running this library
* @return the complete ETL version
*/
def etlVersion(hostEtlVersion: String): String =
"%s-common-%s".format(hostEtlVersion, ProjectSettings.version)
/**
* Validate the specified
* platform.
*
* @param field The name of
* the field being
* processed
* @param platform The code
* for the platform
* generating this
* event.
* @return a Scalaz
* ValidatedString.
*/
val extractPlatform: (String, String) => ValidatedString = (field, platform) => {
platform match {
case "web" => "web".success // Web, including Mobile Web
case "iot" => "iot".success // Internet of Things (e.g. Arduino tracker)
case "app" => "app".success // General App
case "mob" => "mob".success // Mobile / Tablet
case "pc" => "pc".success // Desktop / Laptop / Netbook
case "cnsl" => "cnsl".success // Games Console
case "tv" => "tv".success // Connected TV
case "srv" => "srv".success // Server-side App
case p => "Field [%s]: [%s] is not a supported tracking platform".format(field, p).fail
}
}
/**
* Identity transform.
* Straight passthrough.
*/
val identity: (String, String) => ValidatedString = (field, value) => value.success
/**
* Make a String TSV safe
*/
val toTsvSafe: (String, String) => ValidatedString = (field, value) => CU.makeTsvSafe(value).success
/**
* The X-Forwarded-For header can contain a comma-separated list of IPs especially if it has
* gone through multiple load balancers.
* Here we retrieve the first one as it is supposed to be the client one, c.f.
* https://en.m.wikipedia.org/wiki/X-Forwarded-For#Format
*/
val extractIp: (String, String) => ValidatedString = (field, value) => {
val lastIp = Option(value).map(_.split("[,|, ]").head).orNull
CU.makeTsvSafe(lastIp).success
}
/**
* Turn a list of custom contexts into a self-describing JSON
*
* @param derivedContexts
* @return Self-describing JSON of custom contexts
*/
def formatDerivedContexts(derivedContexts: List[JObject]): String =
compact(
render(
("schema" -> "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-1") ~
("data" -> JArray(derivedContexts))
))
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/UaParserEnrichmentSpec.scala
|
<reponame>AlexandraGYG/snowplow
/**Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package enrichments
package registry
// Specs2
import java.net.URI
import org.specs2.matcher.DataTables
import org.specs2.scalaz._
// Scalaz
// Json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
class UaParserEnrichmentSpec extends org.specs2.mutable.Specification with ValidationMatchers with DataTables {
val mobileSafariUserAgent =
"Mozilla/5.0 (iPhone; CPU iPhone OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3"
val mobileSafariJson =
"""{"schema":"iglu:com.snowplowanalytics.snowplow/ua_parser_context/jsonschema/1-0-0","data":{"useragentFamily":"Mobile Safari","useragentMajor":"5","useragentMinor":"1","useragentPatch":null,"useragentVersion":"Mobile Safari 5.1","osFamily":"iOS","osMajor":"5","osMinor":"1","osPatch":"1","osPatchMinor":null,"osVersion":"iOS 5.1.1","deviceFamily":"iPhone"}}"""
val safariUserAgent =
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/600.1.25 (KHTML, like Gecko) Version/8.0 Safari/600.1.25"
val safariJson =
"""{"schema":"iglu:com.snowplowanalytics.snowplow/ua_parser_context/jsonschema/1-0-0","data":{"useragentFamily":"Safari","useragentMajor":"8","useragentMinor":"0","useragentPatch":null,"useragentVersion":"Safari 8.0","osFamily":"Mac OS X","osMajor":"10","osMinor":"10","osPatch":null,"osPatchMinor":null,"osVersion":"Mac OS X 10.10","deviceFamily":"Other"}}"""
// The URI is irrelevant here, but the local file name needs to point to our test resource
val testRulefile = getClass.getResource("uap-test-rules.yml").toURI.getPath
val customRules = (new URI("s3://private-bucket/files/uap-rules.yml"), testRulefile)
val testAgentJson =
"""{"schema":"iglu:com.snowplowanalytics.snowplow/ua_parser_context/jsonschema/1-0-0","data":{"useragentFamily":"UAP Test Family","useragentMajor":null,"useragentMinor":null,"useragentPatch":null,"useragentVersion":"UAP Test Family","osFamily":"UAP Test OS","osMajor":null,"osMinor":null,"osPatch":null,"osPatchMinor":null,"osVersion":"UAP Test OS","deviceFamily":"UAP Test Device"}}"""
"useragent parser enrichment" should {
"report files needing to be cached" in {
"Custom Rules" | "Cached Files" |
None !! List.empty |
Some(customRules) !! List(customRules) |> { (rules, cachedFiles) =>
{
UaParserEnrichment(rules).filesToCache must_== cachedFiles
}
}
}
}
"useragent parser" should {
"parse useragent according to configured rules" in {
"Custom Rules" | "Input UserAgent" | "Parsed UserAgent" |
None !! mobileSafariUserAgent !! mobileSafariJson |
None !! safariUserAgent !! safariJson |
Some(customRules) !! mobileSafariUserAgent !! testAgentJson |> { (rules, input, expected) =>
{
UaParserEnrichment(rules).extractUserAgent(input) must beSuccessful.like {
case a => compact(render(a)) must_== compact(render(parse(expected)))
}
}
}
}
}
val badRulefile = (new URI("s3://private-bucket/files/uap-rules.yml"), "NotAFile")
"useragent parser" should {
"report initialization error" in {
"Custom Rules" | "Input UserAgent" | "Parsed UserAgent" |
Some(badRulefile) !! mobileSafariUserAgent !! "Failed to initialize ua parser" |> {
(rules, input, errorPrefix) =>
{
UaParserEnrichment(rules).extractUserAgent(input) must beFailing.like {
case a => a must startWith(errorPrefix)
}
}
}
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/EventFingerprintEnrichment.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package enrichments
package registry
// Maven Artifact
import org.apache.maven.artifact.versioning.DefaultArtifactVersion
// Apache Commons
import org.apache.commons.codec.digest.DigestUtils
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.jackson.JsonMethods
// Iglu
import iglu.client.{SchemaCriterion, SchemaKey}
import iglu.client.validation.ProcessingMessageMethods._
// This project
import utils.ScalazJson4sUtils
/**
* Lets us create an EventFingerprintEnrichmentConfig from a JValue.
*/
object EventFingerprintEnrichmentConfig extends ParseableEnrichment {
implicit val formats = DefaultFormats
val supportedSchema =
SchemaCriterion("com.snowplowanalytics.snowplow", "event_fingerprint_config", "jsonschema", 1, 0)
/**
* Creates an EventFingerprintEnrichment instance from a JValue.
*
* @param config The enrichment JSON
* @param schemaKey The SchemaKey provided for the enrichment
* Must be a supported SchemaKey for this enrichment
* @return a configured EventFingerprintEnrichment instance
*/
def parse(config: JValue, schemaKey: SchemaKey): ValidatedNelMessage[EventFingerprintEnrichment] =
isParseable(config, schemaKey).flatMap(conf => {
(for {
excludedParameters <- ScalazJson4sUtils.extract[List[String]](config, "parameters", "excludeParameters")
algorithmName <- ScalazJson4sUtils.extract[String](config, "parameters", "hashAlgorithm")
algorithm <- getAlgorithm(algorithmName)
} yield EventFingerprintEnrichment(algorithm, excludedParameters)).toValidationNel
})
/**
* Look up the fingerprinting algorithm by name
*
* @param algorithmName
* @return A hashing algorithm
*/
private[registry] def getAlgorithm(algorithmName: String): ValidatedMessage[String => String] = algorithmName match {
case "MD5" => ((s: String) => DigestUtils.md5Hex(s)).success
case other => s"[$other] is not a supported event fingerprint generation algorithm".toProcessingMessage.fail
}
}
/**
* Companion object
*/
object EventFingerprintEnrichment {
private val UnitSeparator = "\u001f"
}
/**
* Config for an event fingerprint enrichment
*
* @param algorithm Hashing algorithm
* @param excludedParameters List of querystring parameters to exclude from the calculation
* @return Event fingerprint
*/
case class EventFingerprintEnrichment(algorithm: String => String, excludedParameters: List[String])
extends Enrichment {
/**
* Calculate an event fingerprint using all querystring fields except the excludedParameters
*
* @param parameterMap
* @return Event fingerprint
*/
def getEventFingerprint(parameterMap: Map[String, String]): String = {
val builder = new StringBuilder
parameterMap.toList.sortWith(_._1 < _._1).foreach {
case (key, value) =>
if (!excludedParameters.contains(key)) {
builder.append(key)
builder.append(EventFingerprintEnrichment.UnitSeparator)
builder.append(value)
builder.append(EventFingerprintEnrichment.UnitSeparator)
}
}
algorithm(builder.toString)
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/project/Dependencies.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
// =======================================================
// scalafmt: {align.tokens = ["%", "%%"]}
// =======================================================
import sbt._
object Dependencies {
val resolutionRepos = Seq(
// Required for our json4s snapshot
"Sonatype Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots/",
// For some misc Scalding and Twitter libs
"Concurrent Maven Repo" at "http://conjars.org/repo",
// For Twitter's util functions
"Twitter Maven Repo" at "http://maven.twttr.com/",
// For Snowplow libs
"Snowplow Analytics Maven repo" at "http://maven.snplow.com/releases/",
"Snowplow Analytics Maven snapshot repo" at "http://maven.snplow.com/snapshots/",
// For uaParser utils
"user-agent-parser repo" at "https://clojars.org/repo/"
)
object V {
// Java
val http = "4.3.3"
val commonsLang = "3.4"
val commonsIo = "2.4"
val commonsCodec = "1.11"
val yodaTime = "2.2"
val yodaConvert = "1.2"
val useragent = "1.21"
val jacksonDatabind = "2.2.3"
val jsonValidator = "2.2.3"
val mavenArtifact = "3.2.2"
val uaParser = "1.4.0"
val postgresDriver = "9.4.1208.jre7"
val mysqlConnector = "5.1.39"
val jaywayJsonpath = "2.4.0"
val iabClient = "0.1.0"
val yauaa = "5.8"
val kryo = "2.24.0"
// Scala
val scalaz7 = "7.0.9"
val snowplowRawEvent = "0.1.0"
val collectorPayload = "0.0.0"
val schemaSniffer = "0.0.0"
val refererParser = "0.3.0"
val maxmindIplookups = "0.4.0"
val json4s = "3.2.11"
val igluClient = "0.5.0"
val scalaForex = "0.5.0"
val scalaWeather = "0.3.0"
val scalaj = "2.3.0"
val gatlingJsonpath = "0.6.4"
val scalaUri = "1.4.3"
// Scala (test only)
val specs2 = "2.3.13"
val scalazSpecs2 = "0.2"
val scalaCheck = "1.10.0"
val scaldingArgs = "0.13.0"
val mockito = "1.10.19"
}
object Libraries {
// Java
val httpClient = "org.apache.httpcomponents" % "httpclient" % V.http
val commonsLang = "org.apache.commons" % "commons-lang3" % V.commonsLang
val commonsIo = "commons-io" % "commons-io" % V.commonsIo
val commonsCodec = "commons-codec" % "commons-codec" % V.commonsCodec
val yodaTime = "joda-time" % "joda-time" % V.yodaTime
val yodaConvert = "org.joda" % "joda-convert" % V.yodaConvert
val useragent = "eu.bitwalker" % "UserAgentUtils" % V.useragent
val jacksonDatabind = "com.fasterxml.jackson.core" % "jackson-databind" % V.jacksonDatabind
val jsonValidator = "com.github.fge" % "json-schema-validator" % V.jsonValidator
val mavenArtifact = "org.apache.maven" % "maven-artifact" % V.mavenArtifact
val uaParser = "com.github.ua-parser" % "uap-java" % V.uaParser
val postgresDriver = "org.postgresql" % "postgresql" % V.postgresDriver
val mysqlConnector = "mysql" % "mysql-connector-java" % V.mysqlConnector
val jaywayJsonpath = "com.jayway.jsonpath" % "json-path" % V.jaywayJsonpath
val iabClient = "com.snowplowanalytics" % "iab-spiders-and-robots-client" % V.iabClient
val yauaa = "nl.basjes.parse.useragent" % "yauaa" % V.yauaa
val kryo = "com.esotericsoftware.kryo" % "kryo" % V.kryo
// Scala
val scalaForex = "com.snowplowanalytics" %% "scala-forex" % V.scalaForex
val scalaz7 = "org.scalaz" %% "scalaz-core" % V.scalaz7
val snowplowRawEvent = "com.snowplowanalytics" % "snowplow-thrift-raw-event" % V.snowplowRawEvent
val collectorPayload = "com.snowplowanalytics" % "collector-payload-1" % V.collectorPayload
val schemaSniffer = "com.snowplowanalytics" % "schema-sniffer-1" % V.schemaSniffer
val refererParser = "com.snowplowanalytics" %% "referer-parser" % V.refererParser
val maxmindIplookups = "com.snowplowanalytics" %% "scala-maxmind-iplookups" % V.maxmindIplookups
val json4sJackson = "org.json4s" %% "json4s-jackson" % V.json4s
val json4sScalaz = "org.json4s" %% "json4s-scalaz" % V.json4s
val igluClient = "com.snowplowanalytics" %% "iglu-scala-client" % V.igluClient
val scalaUri = "io.lemonlabs" %% "scala-uri" % V.scalaUri
val scalaWeather = "com.snowplowanalytics" %% "scala-weather" % V.scalaWeather
val scalaj = "org.scalaj" %% "scalaj-http" % V.scalaj
val gatlingJsonpath = "io.gatling" %% "jsonpath" % V.gatlingJsonpath
// Scala (test only)
val specs2 = "org.specs2" %% "specs2" % V.specs2 % "test"
val scalazSpecs2 = "org.typelevel" %% "scalaz-specs2" % V.scalazSpecs2 % "test"
val scalaCheck = "org.scalacheck" %% "scalacheck" % V.scalaCheck % "test"
val scaldingArgs = "com.twitter" %% "scalding-args" % V.scaldingArgs % "test"
val mockito = "org.mockito" % "mockito-core" % V.mockito % "test"
}
}
|
AlexandraGYG/snowplow
|
2-collectors/scala-stream-collector/core/src/main/scala/com.snowplowanalytics.snowplow.collectors.scalastream/model.scala
|
<reponame>AlexandraGYG/snowplow
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors.scalastream
import scala.concurrent.duration.FiniteDuration
import akka.http.scaladsl.model.headers.HttpCookiePair
import sinks.Sink
package model {
/**
* Case class for holding both good and
* bad sinks for the Stream Collector.
*/
case class CollectorSinks(good: Sink, bad: Sink)
/**
* Case class for holding the results of
* splitAndSerializePayload.
*
* @param good All good results
* @param bad All bad results
*/
case class EventSerializeResult(good: List[Array[Byte]], bad: List[Array[Byte]])
/**
* Class for the result of splitting a too-large array of events in the body of a POST request
*
* @param goodBatches List of batches of events
* @param failedBigEvents List of events that were too large
*/
case class SplitBatchResult(goodBatches: List[List[String]], failedBigEvents: List[String])
final case class CookieConfig(
enabled: Boolean,
name: String,
expiration: FiniteDuration,
domain: Option[String]
)
final case class DoNotTrackCookieConfig(
enabled: Boolean,
name: String,
value: String
)
final case class DntCookieMatcher(name: String, value: String) {
private val pattern = value.r.pattern
def matches(httpCookiePair: HttpCookiePair): Boolean = pattern.matcher(httpCookiePair.value).matches()
}
final case class CookieBounceConfig(
enabled: Boolean,
name: String,
fallbackNetworkUserId: String,
forwardedProtocolHeader: Option[String]
)
final case class RedirectMacroConfig(
enabled: Boolean,
placeholder: Option[String]
)
final case class RootResponseConfig(
enabled: Boolean,
statusCode: Int,
headers: Map[String, String] = Map.empty[String, String],
body: String = ""
)
final case class P3PConfig(policyRef: String, CP: String)
final case class CrossDomainConfig(enabled: Boolean, domains: List[String], secure: Boolean)
final case class CORSConfig(accessControlMaxAge: FiniteDuration)
final case class KinesisBackoffPolicyConfig(minBackoff: Long, maxBackoff: Long)
final case class GooglePubSubBackoffPolicyConfig(
minBackoff: Long,
maxBackoff: Long,
totalBackoff: Long,
multiplier: Double
)
sealed trait SinkConfig
final case class AWSConfig(accessKey: String, secretKey: String)
final case class Kinesis(
region: String,
threadPoolSize: Int,
aws: AWSConfig,
backoffPolicy: KinesisBackoffPolicyConfig,
customEndpoint: Option[String]
) extends SinkConfig {
val endpoint = customEndpoint.getOrElse(region match {
case cn@"cn-north-1" => s"https://kinesis.$cn.amazonaws.com.cn"
case _ => s"https://kinesis.$region.amazonaws.com"
})
}
final case class GooglePubSub(
googleProjectId: String,
backoffPolicy: GooglePubSubBackoffPolicyConfig
) extends SinkConfig
final case class Kafka(
brokers: String,
retries: Int,
producerConf: Option[Map[String,String]]
) extends SinkConfig
final case class Nsq(host: String, port: Int) extends SinkConfig
case object Stdout extends SinkConfig
final case class BufferConfig(byteLimit: Long, recordLimit: Long, timeLimit: Long)
final case class StreamsConfig(
good: String,
bad: String,
useIpAddressAsPartitionKey: Boolean,
sink: SinkConfig,
buffer: BufferConfig
)
final case class PrometheusMetricsConfig(
enabled: Boolean,
durationBucketsInSeconds: Option[List[Double]]
)
final case class CollectorConfig(
interface: String,
port: Int,
p3p: P3PConfig,
crossDomain: CrossDomainConfig,
cookie: CookieConfig,
doNotTrackCookie: DoNotTrackCookieConfig,
cookieBounce: CookieBounceConfig,
redirectMacro: RedirectMacroConfig,
rootResponse: RootResponseConfig,
cors: CORSConfig,
streams: StreamsConfig,
prometheusMetrics: PrometheusMetricsConfig
) {
val cookieConfig = if (cookie.enabled) Some(cookie) else None
val doNotTrackHttpCookie =
if (doNotTrackCookie.enabled)
Some(DntCookieMatcher(name = doNotTrackCookie.name, value = doNotTrackCookie.value))
else
None
def cookieName = cookieConfig.map(_.name)
def cookieDomain = cookieConfig.flatMap(_.domain)
def cookieExpiration = cookieConfig.map(_.expiration)
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/utils/JsonPathSpec.scala
|
<reponame>AlexandraGYG/snowplow
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common.utils
// specs2
import org.specs2.Specification
import org.specs2.scalaz.ValidationMatchers
// json4s
import org.json4s._
import org.json4s.jackson.parseJson
class JsonPathSpec extends Specification with ValidationMatchers {
def is = s2"""
This is a specification to test the JSONPath utils
Test JSONPath query $e1
Test query of non-exist value $e2
Test query of empty array $e3
Test primtive JSON type (JString) $e6
Invalid JSONPath (JQ syntax) must fail $e4
Invalid JSONPath must fail $e5
JNothing must fail $e7
"""
val someJson = parseJson("""
|{ "store": {
| "book": [
| { "category": "reference",
| "author": "<NAME>",
| "title": "Sayings of the Century",
| "price": 8.95
| },
| { "category": "fiction",
| "author": "<NAME>",
| "title": "Sword of Honour",
| "price": 12.99
| },
| { "category": "fiction",
| "author": "<NAME>",
| "title": "<NAME>",
| "isbn": "0-553-21311-3",
| "price": 8.99
| },
| { "category": "fiction",
| "author": "<NAME>",
| "title": "The Lord of the Rings",
| "isbn": "0-395-19395-8",
| "price": 22.99
| }
| ],
| "bicycle": {
| "color": "red",
| "price": 19.95
| },
| "unicorns": []
| }
|}
""".stripMargin)
def e1 =
JsonPath.query("$.store.book[1].price", someJson) must beSuccessful(List(JDouble(12.99)))
def e2 =
JsonPath.query("$.store.book[5].price", someJson) must beSuccessful(Nil)
def e3 =
JsonPath.query("$.store.unicorns", someJson) must beSuccessful(List(JArray(Nil)))
def e4 =
JsonPath.query(".notJsonPath", someJson) must beFailing.like {
case f => f must beEqualTo("`$' expected but `.' found")
}
def e5 =
JsonPath.query("$.store.book[a]", someJson) must beFailing.like {
case f => f must beEqualTo("`:' expected but `a' found")
}
def e6 =
JsonPath.query("$.store.book[2]", JString("somestring")) must beSuccessful(List())
def e7 =
JsonPath.query("$..", JNothing) must beFailing.like {
case f => f must beEqualTo("JSONPath error: Nothing was given")
}
}
|
AlexandraGYG/snowplow
|
3-enrich/spark-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.spark/good/CljTomcatTp2MegaEventsSpec.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.spark
package good
import scala.io.Source
import org.specs2.mutable.Specification
object CljTomcatTp2MegaEventsSpec {
val lines = {
val file = getClass.getResource("CljTomcatTp2MegaEventsSpec.line").getFile
val source = Source.fromFile(file)
val line = source.mkString
source.close()
EnrichJobSpec.Lines(line)
}
}
class CljTomcatTp2MegaEventsSpec extends Specification with EnrichJobSpec {
import EnrichJobSpec._
override def appName = "clj-tomcat-tp2-mega-events"
sequential
"A job which processes a Clojure-Tomcat file containing a POST raw event representing 7,500 " +
"valid events" should {
runEnrichJob(CljTomcatTp2MegaEventsSpec.lines, "clj-tomcat", "2", true, List("geo"))
"correctly output 7,500 events" in {
val Some(goods) = readPartFile(dirs.output)
goods.size must_== 7500
}
"not write any bad rows" in {
dirs.badRows must beEmptyDir
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/WeatherEnrichmentSpec.scala
|
/**Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package enrichments
package registry
// Java
import java.lang.{Float => JFloat}
// Specs2
import org.specs2.Specification
// Joda
import org.joda.time.DateTime
// Json4s
import org.json4s._
import org.json4s.jackson.JsonMethods.parse
// Snowplow
import com.snowplowanalytics.iglu.client.SchemaKey
// Scala weather
import com.snowplowanalytics.weather._
object WeatherEnrichmentSpec {
val OwmApiKey = "OWM_KEY"
}
import WeatherEnrichmentSpec._
class WeatherEnrichmentSpec extends Specification {
def is =
skipAllIf(sys.env.get(OwmApiKey).isEmpty) ^ // Actually only e4 and e6 need to be skipped
s2"""
This is a specification to test the WeatherEnrichment
Fail event for null time $e1
Fail event for invalid key $e4
Weather enrichment client is lazy $e2
Extract weather stamp $e3
Extract humidity $e5
Extract configuration $e6
Check time stamp transformation $e7
"""
lazy val validAppKey = sys.env
.get(OwmApiKey)
.getOrElse(
throw new IllegalStateException(s"No ${OwmApiKey} environment variable found, test should have been skipped"))
object invalidEvent {
var lat: JFloat = 70.98224f
var lon: JFloat = 70.98224f
var time: DateTime = null
}
object validEvent {
var lat: JFloat = 20.713052f
var lon: JFloat = 70.98224f
var time: DateTime = new DateTime("2019-04-30T23:56:01.003+00:00")
}
def e1 = {
val enr = WeatherEnrichment("KEY", 5200, 1, "history.openweathermap.org", 10)
val stamp = enr.getWeatherContext(Option(invalidEvent.lat), Option(invalidEvent.lon), Option(invalidEvent.time))
stamp.toEither must beLeft.like { case e => e must contain("tstamp: None") }
}
def e2 = WeatherEnrichment("KEY", 0, 1, "history.openweathermap.org", 5) must not(throwA[IllegalArgumentException])
def e3 = {
val enr = WeatherEnrichment(validAppKey, 5200, 1, "history.openweathermap.org", 10)
val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time))
stamp.toEither must beRight
}
def e4 = {
val enr = WeatherEnrichment("KEY", 5200, 1, "history.openweathermap.org", 10)
val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time))
stamp.toEither must beLeft.like { case e => e must contain("AuthorizationError") }
}
def e5 = {
val enr = WeatherEnrichment(validAppKey, 5200, 1, "history.openweathermap.org", 15)
val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time))
stamp.toEither must beRight.like {
case weather: JValue => {
val temp = weather.findField { case JField("humidity", _) => true; case _ => false }
temp must beSome(("humidity", JDouble(87.0)))
}
}
}
def e6 = {
val configJson = parse("""
|{
| "enabled": true,
| "vendor": "com.snowplowanalytics.snowplow.enrichments",
| "name": "weather_enrichment_config",
| "parameters": {
| "apiKey": "{{KEY}}",
| "cacheSize": 5100,
| "geoPrecision": 1,
| "apiHost": "history.openweathermap.org",
| "timeout": 5
| }
|}
""".stripMargin)
val config = WeatherEnrichmentConfig.parse(
configJson,
SchemaKey("com.snowplowanalytics.snowplow.enrichments", "weather_enrichment_config", "jsonschema", "1-0-0"))
config.toEither must beRight(
WeatherEnrichment(apiKey = "{{KEY}}",
geoPrecision = 1,
cacheSize = 5100,
apiHost = "history.openweathermap.org",
timeout = 5))
}
def e7 = {
implicit val formats = DefaultFormats
val enr = WeatherEnrichment(validAppKey, 2, 1, "history.openweathermap.org", 15)
val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time))
stamp.toEither must beRight.like { // successful request
case weather: JValue => {
val e = (weather \ "data").extractOpt[TransformedWeather]
e.map(_.dt) must beSome.like { // succesfull transformation
case dt => dt must equalTo("2019-05-01T00:00:00.000Z") // closest stamp storing on server
}
}
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/YauaaEnrichment.scala
|
<reponame>AlexandraGYG/snowplow<gh_stars>1-10
/**
* Copyright (c) 2019-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package enrichments.registry
// Json4s
import org.json4s.{DefaultFormats, Extraction, JObject, JValue}
import org.json4s.JsonDSL._
import com.snowplowanalytics.snowplow.enrich.common.utils.ScalazJson4sUtils
// Scalaz
import scalaz._
import Scalaz._
// Iglu
import iglu.client.SchemaKey
import iglu.client.SchemaCriterion
// Yauaa
import nl.basjes.parse.useragent.UserAgent
import nl.basjes.parse.useragent.UserAgentAnalyzer
// Scala
import scala.collection.JavaConverters._
/** Companion object to create an instance of YauaaEnrichment from the configuration. */
object YauaaEnrichment extends ParseableEnrichment {
implicit val formats = DefaultFormats
val supportedSchema =
SchemaCriterion("com.snowplowanalytics.snowplow.enrichments", "yauaa_enrichment_config", "jsonschema", 1, 0)
/**
* Creates a YauaaEnrichment instance from a JValue containing the configuration of the enrichment.
*
* @param config JSON containing configuration for YAUAA enrichment.
* @param schemaKey SchemaKey provided for this enrichment.
* Must be a supported SchemaKey for this enrichment.
* @return Configured YauaaEnrichment instance
*/
def parse(config: JValue, schemaKey: SchemaKey): ValidatedNelMessage[YauaaEnrichment] =
isParseable(config, schemaKey).flatMap { _ =>
val maybeCacheSize = ScalazJson4sUtils.extract[Int](config, "parameters", "cacheSize").toOption
YauaaEnrichment(maybeCacheSize).success
}
/** Helper to decapitalize a string. Used for the names of the fields returned in the context. */
def decapitalize(s: String): String = s match {
case _ if s.isEmpty => s
case _ if s.length == 1 => s.toLowerCase
case _ => s.charAt(0).toLower + s.substring(1)
}
}
/**
* Class for YAUAA enrichment, which tries to parse and analyze the user agent string
* and extract as many relevant attributes as possible, like for example the device class.
*
* @param cacheSize Amount of user agents already parsed that stay in cache for faster parsing.
*/
final case class YauaaEnrichment(cacheSize: Option[Int]) extends Enrichment {
import YauaaEnrichment.decapitalize
private val uaa: UserAgentAnalyzer = {
val a = UserAgentAnalyzer
.newBuilder()
.build()
cacheSize.foreach(a.setCacheSize)
a
}
// For unit testing
private[registry] def getCacheSize = uaa.getCacheSize
private implicit val formats = DefaultFormats
val contextSchema = "iglu:nl.basjes/yauaa_context/jsonschema/1-0-0"
val defaultDeviceClass = "UNKNOWN"
val defaultResult = Map(decapitalize(UserAgent.DEVICE_CLASS) -> defaultDeviceClass)
/**
* Gets the result of YAUAA user agent analysis as self-describing JSON, for a specific event.
* Any non-fatal error will return failure.
*
* @param userAgent User agent of the event.
* @return Attributes retrieved thanks to the user agent (if any), as self-describing JSON.
*/
def getYauaaContext(userAgent: String): Validation[String, JObject] = {
val parsed = parseUserAgent(userAgent)
Extraction.decompose(parsed) match {
case obj: JObject => addSchema(obj).success
case _ => s"Couldn't transform YAUAA fields [$parsed] into JSON".failure
}
}
/** Gets the map of attributes retrieved by YAUAA from the user agent.
* @return Map with all the fields extracted by YAUAA by parsing the user agent.
* If the input is null or empty, a map with just the DeviceClass set to UNKNOWN is returned.
*/
def parseUserAgent(userAgent: String): Map[String, String] =
userAgent match {
case null | "" =>
defaultResult
case _ =>
val parsedUA = uaa.parse(userAgent)
parsedUA.getAvailableFieldNames.asScala
.map(field => decapitalize(field) -> parsedUA.getValue(field))
.toMap
}
/**
* Add schema URI on Iglu to JSON Object
*
* @param context Yauaa context as JSON Object
* @return Self-describing JSON with the result of YAUAA enrichment.
*/
private def addSchema(context: JObject): JObject =
("schema", contextSchema) ~ (("data", context))
}
|
AlexandraGYG/snowplow
|
3-enrich/spark-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.spark/good/NdjsonUrbanAirshipSpec.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.spark
package good
import scala.collection.mutable.{ArrayBuffer, Buffer, ListBuffer}
import org.specs2.mutable.Specification
import org.json4s._
import org.json4s.jackson.JsonMethods._
object NdjsonUrbanAirshipSingleEvent {
import EnrichJobSpec._
val lines = Lines(
compact(
parse("""
|{
| "id": "e3314efb-9058-dbaf-c4bb-b754fca73613",
| "offset": "1",
| "occurred": "2015-11-13T16:31:52.393Z",
| "processed": "2015-11-13T16:31:52.393Z",
| "device": {
| "amazon_channel": "cd97c95c-ed77-f15a-3a67-5c2e26799d35"
| },
| "body": {
| "session_id": "27c75cab-a0b8-9da2-bc07-6d7253e0e13f"
| },
| "type": "CLOSE"
|}
|""".stripMargin)
))
val expected = List(
null,
"srv",
etlTimestamp,
"2015-11-13 16:31:52.393",
null,
"unstruct",
null, // We can't predict the event_id
null,
null, // No tracker namespace
"com.urbanairship.connect-v1",
"ndjson",
etlVersion,
null, // No user_id set
null, // ip address not available
null, // no fingerprint
null, // no domain userid
null, // no session index
null, // no network userid
null, // No geo-location for this IP address
null,
null,
null,
null,
null,
null,
null, // No additional MaxMind databases used
null,
null,
null,
null, // no page_url
null, // no page_title
null, // no page_referrer
null, // no page_urlscheme
null, // no page_urlhost
null, // no page_urlport
null, // no page_urlpath
null,
null,
null, // no refr_urlscheme
null, // no refr_urlhost
null, // no refr_urlport
null, // no refr_urlpath
null, // no refr_urlquery
null,
null, // no refr_medium
null, // no refr_source
null,
null, // Marketing campaign fields empty
null, //
null, //
null, //
null, //
null, // No custom contexts
null, // Structured event fields empty
null, //
null, //
null, //
null, //
compact(parse("""|{
| "schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
| "data":{
| "schema":"iglu:com.urbanairship.connect/CLOSE/jsonschema/1-0-0",
| "data":{
| "id": "e3314efb-9058-dbaf-c4bb-b754fca73613",
| "offset": "1",
| "occurred": "2015-11-13T16:31:52.393Z",
| "processed": "2015-11-13T16:31:52.393Z",
| "device": {
| "amazon_channel": "cd97c95c-ed77-f15a-3a67-5c2e26799d35"
| },
| "body": {
| "session_id": "27c75cab-a0b8-9da2-bc07-6d7253e0e13f"
| },
| "type": "CLOSE"
| }
| }
|}
""".stripMargin)),
null, // Transaction fields empty
null, //
null, //
null, //
null, //
null, //
null, //
null, //
null, // Transaction item fields empty
null, //
null, //
null, //
null, //
null, //
null, // Page ping fields empty
null, //
null, //
null, //
null, // no useragent
null, // no br_name
null, // no br_family
null, // no br_version
null, // no br_type
null, // no br_renderengine
null, // no br_lang
null, // br_features_pdf not relevant
null, // br_features_flash
null, // br_features_java
null, // br_features_director
null, // br_features_quicktime
null, // br_features_realplayer
null, // br_features_windowsmedia
null, // br_features_gears
null, // br_features_silverlight
null, // br_cookies
null, // br_colordepth
null, // br_viewwidth
null, // br_viewheight
null, // os_name
null, // os_family
null, // os_manufacturer
null, // os_timezone
null, // dvce_type
null, // dvce_ismobile
null, // dvce_screenwidth
null, // dvce_screenheight
null, // doc_charset
null, // doc_width
null // doc_height
)
}
/** Multiple events and expected data */
object NdjsonUrbanAirshipMultiEvent {
import EnrichJobSpec._
val sampleLine = compact(
parse(
"""
|{
| "id": "e3314efb-9058-dbaf-c4bb-b754fca73613",
| "offset": "1",
| "occurred": "2015-11-13T16:31:52.393Z",
| "processed": "2015-11-13T16:31:52.393Z",
| "device": {
| "amazon_channel": "cd97c95c-ed77-f15a-3a67-5c2e26799d35"
| },
| "body": {
| "session_id": "27c75cab-a0b8-9da2-bc07-6d7253e0e13f"
| },
| "type": "CLOSE"
|}
| """.stripMargin
)
)
val sampleLineResponse = compact(
parse(
"""|{
| "schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
| "data":{
| "schema":"iglu:com.urbanairship.connect/CLOSE/jsonschema/1-0-0",
| "data":{
| "id": "e3314efb-9058-dbaf-c4bb-b754fca73613",
| "offset": "1",
| "occurred": "2015-11-13T16:31:52.393Z",
| "processed": "2015-11-13T16:31:52.393Z",
| "device": {
| "amazon_channel": "cd97c95c-ed77-f15a-3a67-5c2e26799d35"
| },
| "body": {
| "session_id": "27c75cab-a0b8-9da2-bc07-6d7253e0e13f"
| },
| "type": "CLOSE"
| }
| }
|}
""".stripMargin
)
)
val sampleBlank = "\r\n"
val sampleInAppResolutionEvent = compact(
parse(
"""{
| "id": "86604c72-4b29-5501-200a-4dc965738baf",
| "offset": "137",
| "occurred": "2015-11-13T16:34:08.394Z",
| "processed": "2015-11-13T16:31:52.393Z",
| "device": {
| "ios_channel": "3c58b101-6508-b0d6-8d3c-e5e87b75b193",
| "named_user_id": "3786888c-1fb9-a5b4-15db-d6a049333081"
| },
| "body": {
| "push_id": "cc978f41-4494-2836-8672-b9fb5c9de2e4",
| "type": "USER_DISMISSED",
| "duration": 9738
| },
| "type": "IN_APP_MESSAGE_RESOLUTION"
|}""".stripMargin
)
)
val sampleInAppResolutionEventResponse = compact(
parse(
"""{
| "schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
| "data":{
| "schema":"iglu:com.urbanairship.connect/IN_APP_MESSAGE_RESOLUTION/jsonschema/1-0-0",
| "data":{
| "id": "86604c72-4b29-5501-200a-4dc965738baf",
| "offset": "137",
| "occurred": "2015-11-13T16:34:08.394Z",
| "processed": "2015-11-13T16:31:52.393Z",
| "device": {
| "ios_channel": "3c58b101-6508-b0d6-8d3c-e5e87b75b193",
| "named_user_id": "3786888c-1fb9-a5b4-15db-d6a049333081"
| },
| "body": {
| "push_id": "cc978f41-4494-2836-8672-b9fb5c9de2e4",
| "type": "USER_DISMISSED",
| "duration": 9738
| },
| "type": "IN_APP_MESSAGE_RESOLUTION"
| }
| }
|}""".stripMargin
)
)
val eventSource = "srv"
val collectorTstamp = "2015-11-13 16:31:52.393"
val eventType = "unstruct"
val adapter = "com.urbanairship.connect-v1"
val loaderType = "ndjson"
val expectedBase = {
val r = ArrayBuffer.fill(NdjsonUrbanAirshipSingleEvent.expected.size)(null: String)
r(1) = eventSource
r(2) = etlTimestamp
r(3) = collectorTstamp
r(5) = eventType
r(9) = adapter
r(10) = loaderType
r(11) = etlVersion
r.toList
}
val lines = Lines(sampleLine, sampleBlank, sampleInAppResolutionEvent, sampleBlank, sampleBlank) // the blanks should be ignored
val expectedJsonOutputIdx = 58 // position of unstruct event json in list
val expected = List(
expectedBase.updated(expectedJsonOutputIdx, sampleLineResponse),
expectedBase.updated(expectedJsonOutputIdx, sampleInAppResolutionEventResponse))
}
/** Check that all NDJSON lines are loaded and run through with the urbanairship adapter */
class NdjsonUrbanAirshipSingleSpec extends Specification with EnrichJobSpec {
import EnrichJobSpec._
override def appName = "ndjson-urban-airship-single"
sequential
"A job which processes a NDJSON file with one event" should {
runEnrichJob(
NdjsonUrbanAirshipSingleEvent.lines,
"ndjson/com.urbanairship.connect/v1",
"2",
true,
List("geo"))
"correctly output 1 event" in {
val Some(goods) = readPartFile(dirs.output)
goods.size must_== 1
val actual = goods.head.split("\t").map(s => if (s.isEmpty()) null else s)
for (idx <- NdjsonUrbanAirshipSingleEvent.expected.indices) {
actual(idx) must BeFieldEqualTo(NdjsonUrbanAirshipSingleEvent.expected(idx), idx)
}
}
"not write any bad rows" in {
dirs.badRows must beEmptyDir
}
}
}
class NdjsonUrbanAirshipMultiSpec extends Specification with EnrichJobSpec {
import EnrichJobSpec._
override def appName = "ndjson-urban-airship-multi"
sequential
"A job which processes a NDJSON file with more than one event (but two valid ones)" should {
runEnrichJob(
NdjsonUrbanAirshipMultiEvent.lines,
"ndjson/com.urbanairship.connect/v1",
"2",
true,
List("geo"))
"correctly output 2 events" in {
val Some(goods) = readPartFile(dirs.output)
goods.size must_== 2
goods.zipWithIndex foreach {
case (actual, bufIdx) => {
for (idx <- NdjsonUrbanAirshipMultiEvent.expected(bufIdx).indices) {
actual.split("\t").map(s => if (s.isEmpty()) null else s).apply(idx) must
BeFieldEqualTo(NdjsonUrbanAirshipMultiEvent.expected(bufIdx)(idx), idx)
}
}
}
}
"not write any bad rows" in {
dirs.badRows must beEmptyDir
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/stream-enrich/kafka/src/main/scala/com.snowplowanalytics.snowplow.enrich.stream/sources/KafkaSource.scala
|
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package stream
package sources
import java.util.Properties
import scala.collection.JavaConverters._
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.clients.producer._
import scalaz._
import Scalaz._
import common.adapters.AdapterRegistry
import common.enrichments.EnrichmentRegistry
import iglu.client.Resolver
import model.{Kafka, StreamsConfig}
import scalatracker.Tracker
import sinks.{KafkaSink, Sink}
/** KafkaSubSource companion object with factory method */
object KafkaSource {
def create(
config: StreamsConfig,
igluResolver: Resolver,
adapterRegistry: AdapterRegistry,
enrichmentRegistry: EnrichmentRegistry,
tracker: Option[Tracker]
): Validation[String, KafkaSource] = for {
kafkaConfig <- config.sourceSink match {
case c: Kafka => c.success
case _ => "Configured source/sink is not Kafka".failure
}
goodProducer <- KafkaSink
.validateAndCreateProducer(kafkaConfig, config.buffer, config.out.enriched)
.validation
emitPii = utils.emitPii(enrichmentRegistry)
_ <- utils.validatePii(emitPii, config.out.pii).validation
piiProducer <- config.out.pii match {
case Some(piiStreamName) =>
KafkaSink.validateAndCreateProducer(kafkaConfig, config.buffer, piiStreamName).validation
.map(Some(_))
case None => None.success
}
badProducer <- KafkaSink
.validateAndCreateProducer(kafkaConfig, config.buffer, config.out.bad)
.validation
} yield new KafkaSource(goodProducer, piiProducer, badProducer, igluResolver, adapterRegistry, enrichmentRegistry, tracker, config, kafkaConfig)
}
/** Source to read events from a Kafka topic */
class KafkaSource private (
goodProducer: KafkaProducer[String, String],
piiProducer: Option[KafkaProducer[String, String]],
badProducer: KafkaProducer[String, String],
igluResolver: Resolver,
adapterRegistry: AdapterRegistry,
enrichmentRegistry: EnrichmentRegistry,
tracker: Option[Tracker],
config: StreamsConfig,
kafkaConfig: Kafka
) extends Source(igluResolver, adapterRegistry, enrichmentRegistry, tracker, config.out.partitionKey) {
override val MaxRecordSize = None
override val threadLocalGoodSink: ThreadLocal[Sink] = new ThreadLocal[Sink] {
override def initialValue: Sink =
new KafkaSink(goodProducer, config.out.enriched)
}
override val threadLocalPiiSink: Option[ThreadLocal[Sink]] = piiProducer.flatMap { somePiiProducer =>
config.out.pii.map { piiTopicName => new ThreadLocal[Sink] {
override def initialValue: Sink =
new KafkaSink(somePiiProducer, piiTopicName)
}}}
override val threadLocalBadSink: ThreadLocal[Sink] = new ThreadLocal[Sink] {
override def initialValue: Sink =
new KafkaSink(badProducer, config.out.bad)
}
/** Never-ending processing loop over source stream. */
override def run(): Unit = {
val consumer = createConsumer(kafkaConfig.brokers, config.appName)
log.info(s"Running Kafka consumer group: ${config.appName}.")
log.info(s"Processing raw input Kafka topic: ${config.in.raw}")
consumer.subscribe(List(config.in.raw).asJava)
while (true) {
val recordValues = consumer
.poll(100) // Wait 100 ms if data is not available
.asScala
.toList
.map(_.value) // Get the values
enrichAndStoreEvents(recordValues)
}
}
private def createConsumer(
brokers: String,
groupId: String): KafkaConsumer[String, Array[Byte]] = {
val properties = createProperties(brokers, groupId)
properties.putAll(kafkaConfig.consumerConf.getOrElse(Map()).asJava)
new KafkaConsumer[String, Array[Byte]](properties)
}
private def createProperties(brokers: String, groupId: String): Properties = {
val props = new Properties()
props.put("bootstrap.servers", brokers)
props.put("group.id", groupId)
props.put("enable.auto.commit", "true")
props.put("auto.commit.interval.ms", "1000")
props.put("auto.offset.reset", "earliest")
props.put("session.timeout.ms", "30000")
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer")
props
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/adapters/registry/CallrailAdapterSpec.scala
|
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package adapters
package registry
// Joda-Time
import org.joda.time.DateTime
// Scalaz
import scalaz._
import Scalaz._
// Snowplow
import loaders.{CollectorApi, CollectorContext, CollectorPayload, CollectorSource}
import SpecHelpers._
// Specs2
import org.specs2.{ScalaCheck, Specification}
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
class CallrailAdapterSpec extends Specification with DataTables with ValidationMatchers with ScalaCheck {
def is = s2"""
This is a specification to test the CallrailAdapter functionality
toRawEvents should return a NEL containing one RawEvent if the querystring is correctly populated $e1
toRawEvents should return a Validation Failure if there are no parameters on the querystring $e2
"""
implicit val resolver = SpecHelpers.IgluResolver
object Shared {
val api = CollectorApi("com.callrail", "v1")
val source = CollectorSource("clj-tomcat", "UTF-8", None)
val context = CollectorContext(DateTime.parse("2013-08-29T00:18:48.000+00:00").some,
"172.16.31.10".some,
None,
None,
Nil,
None)
}
object Expected {
val staticNoPlatform = Map(
"tv" -> "com.callrail-v1",
"e" -> "ue",
"cv" -> "clj-0.6.0-tom-0.0.4"
)
val static = staticNoPlatform + ("p" -> "srv")
}
def e1 = {
val params = toNameValuePairs(
"answered" -> "true",
"callercity" -> "BAKERSFIELD",
"callercountry" -> "US",
"callername" -> "<NAME>",
"callernum" -> "+12612230240",
"callerstate" -> "CA",
"callerzip" -> "92307",
"callsource" -> "keyword",
"datetime" -> "2014-10-09 16:23:45",
"destinationnum" -> "2012032051",
"duration" -> "247",
"first_call" -> "true",
"ga" -> "",
"gclid" -> "",
"id" -> "201235151",
"ip" -> "172.16.31.10",
"keywords" -> "",
"kissmetrics_id" -> "",
"landingpage" -> "http://acme.com/",
"recording" -> "http://app.callrail.com/calls/201235151/recording/9f59ad59ba1cfa264312",
"referrer" -> "direct",
"referrermedium" -> "Direct",
"trackingnum" -> "+12012311668",
"transcription" -> "",
"utm_campaign" -> "",
"utm_content" -> "",
"utm_medium" -> "",
"utm_source" -> "",
"utm_term" -> "",
"utma" -> "",
"utmb" -> "",
"utmc" -> "",
"utmv" -> "",
"utmx" -> "",
"utmz" -> "",
"cv" -> "clj-0.6.0-tom-0.0.4",
"nuid" -> "-"
)
val payload = CollectorPayload(Shared.api, params, None, None, Shared.source, Shared.context)
val actual = CallrailAdapter.toRawEvents(payload)
val expectedJson =
"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.callrail/call_complete/jsonschema/1-0-2",
|"data":{
|"duration":247,
|"utm_source":null,
|"utmv":null,
|"ip":"172.16.31.10",
|"utmx":null,
|"ga":null,
|"destinationnum":"2012032051",
|"datetime":"2014-10-09T16:23:45.000Z",
|"kissmetrics_id":null,
|"landingpage":"http://acme.com/",
|"callerzip":"92307",
|"gclid":null,
|"callername":"<NAME>",
|"utmb":null,
|"id":"201235151",
|"callernum":"+12612230240",
|"utm_content":null,
|"trackingnum":"+12012311668",
|"referrermedium":"Direct",
|"utm_campaign":null,
|"keywords":null,
|"transcription":null,
|"utmz":null,
|"utma":null,
|"referrer":"direct",
|"callerstate":"CA",
|"recording":"http://app.callrail.com/calls/201235151/recording/9f59ad59ba1cfa264312",
|"first_call":true,
|"utmc":null,
|"callercountry":"US",
|"utm_medium":null,
|"callercity":"BAKERSFIELD",
|"utm_term":null,
|"answered":true,
|"callsource":"keyword"
|}
|}
|}""".stripMargin.replaceAll("[\n\r]", "")
actual must beSuccessful(
NonEmptyList(
RawEvent(Shared.api,
Expected.static ++ Map("ue_pr" -> expectedJson, "nuid" -> "-"),
None,
Shared.source,
Shared.context)))
}
def e2 = {
val params = toNameValuePairs()
val payload = CollectorPayload(Shared.api, params, None, None, Shared.source, Shared.context)
val actual = CallrailAdapter.toRawEvents(payload)
actual must beFailing(NonEmptyList("Querystring is empty: no CallRail event to process"))
}
}
|
AlexandraGYG/snowplow
|
3-enrich/spark-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.spark/MasterCljTomcatSpec.scala
|
<reponame>AlexandraGYG/snowplow<gh_stars>1-10
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.spark
import org.specs2.mutable.Specification
object MasterCljTomcatSpec {
// Concatenate ALL lines from ALL other jobs
val lines = good.CljTomcatTp1SingleEventSpec.lines.lines ++ // 1 good
good.CljTomcatCallrailEventSpec.lines.lines ++ // 1 good
good.CljTomcatTp2MultiEventsSpec.lines.lines ++ // 3 good
good.CljTomcatTp2MegaEventsSpec.lines.lines // 7,500 good = 7,505 GOOD
object expected {
val goodCount = 7505
}
}
/** Master test which runs using all of the individual good, bad and misc tests */
class MasterCljTomcatSpec extends Specification with EnrichJobSpec {
import EnrichJobSpec._
override def appName = "master-clj-tomcat"
sequential
"A job which processes a Clojure-Tomcat file containing 7,505 valid events, 0 bad lines and " +
"3 discardable lines" should {
runEnrichJob(Lines(MasterCljTomcatSpec.lines: _*), "clj-tomcat", "1", false, List("geo"))
"write 7,505 events" in {
val Some(goods) = readPartFile(dirs.output)
goods.size must_== MasterCljTomcatSpec.expected.goodCount
}
"write 0 bad rows" in {
dirs.badRows must beEmptyDir
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/registry/RemoteAdapter.scala
|
<filename>3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/registry/RemoteAdapter.scala
/*
* Copyright (c) 2014-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package adapters
package registry
import com.fasterxml.jackson.core.JsonParseException
import iglu.client.Resolver
import common.loaders.CollectorPayload
import common.utils.HttpClient
import org.json4s.JsonAST.{JNothing, JNull}
import org.json4s.JsonDSL._
import org.json4s.MappingException
import org.json4s.jackson.JsonMethods._
import scalaz.Scalaz._
import scalaz.{Failure, Success, Validation}
import scala.util.control.NonFatal
/**
* An adapter for an enrichment that is handled by a remote webservice.
*
* @constructor create a new client to talk to the given remote webservice.
* @param remoteUrl the url of the remote webservice, e.g. http://localhost/myEnrichment
* @param connectionTimeout max duration of each connection attempt
* @param readTimeout max duration of read wait time
*/
class RemoteAdapter(val remoteUrl: String, val connectionTimeout: Option[Long], val readTimeout: Option[Long])
extends Adapter {
val bodyMissingErrorText = "Missing payload body"
val missingEventsErrorText = "Missing events in the response"
val emptyResponseErrorText = "Empty response"
val incompatibleResponseErrorText = "Incompatible response, missing error and events fields"
/**
* POST the given payload to the remote webservice,
* wait for it to respond with an Either[List[String], List[RawEvent] ],
* and return that as a ValidatedRawEvents
*
* @param payload The CollectorPaylod containing one or more
* raw events as collected by a Snowplow collector
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation
* @return a Validation boxing either a NEL of RawEvents on
* Success, or a NEL of Failure Strings
*/
def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents =
payload.body match {
case Some(body) if body.nonEmpty =>
val json = ("contentType" -> payload.contentType) ~
("queryString" -> toMap(payload.querystring)) ~
("headers" -> payload.context.headers) ~
("body" -> payload.body)
val request = HttpClient.buildRequest(remoteUrl,
authUser = None,
authPassword = <PASSWORD>,
Some(compact(render(json))),
"POST",
connectionTimeout,
readTimeout)
processResponse(payload, HttpClient.getBody(request))
case _ => bodyMissingErrorText.failNel
}
def processResponse(payload: CollectorPayload, response: Validation[Throwable, String]) =
response match {
case Failure(throwable) =>
throwable.getMessage.failNel
case Success(bodyAsString) =>
try {
if (bodyAsString == "") {
emptyResponseErrorText.failNel
} else {
(parse(bodyAsString) \ "error", parse(bodyAsString) \ "events") match {
case (JNull, JNull) | (JNothing, JNothing) => incompatibleResponseErrorText.failNel
case (error, JNull | JNothing) => error.extract[String].failNel
case (JNull | JNothing, eventsObj) =>
val events = eventsObj.extract[List[Map[String, String]]]
rawEventsListProcessor(events.map { event =>
RawEvent(
api = payload.api,
parameters = event,
contentType = payload.contentType,
source = payload.source,
context = payload.context
).success
})
case _ => s"Unable to parse response: ${bodyAsString}".failNel
}
}
} catch {
case e: MappingException =>
s"The events field should be List[Map[String, String]], error: ${e} - response: ${bodyAsString}".failNel
case e: JsonParseException => s"Json is not parsable, error: ${e} - response: ${bodyAsString}".failNel
case NonFatal(e) => s"Unexpected error: $e".failNel
}
}
}
|
AlexandraGYG/snowplow
|
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/HttpHeaderExtractorEnrichmentSpec.scala
|
<gh_stars>1-10
/**Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package enrichments
package registry
// Specs2
import org.specs2.Specification
import org.specs2.scalaz._
// Scalaz
import scalaz._
import Scalaz._
// Json4s
import org.json4s._
import org.json4s.JValue
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
class HttpHeaderExtractorEnrichmentSpec extends Specification with ValidationMatchers {
def is = s2"""
This is a specification to test the HttpHeaderExtractorEnrichment
returns X-Forwarded-For header $e1
returns Accept header after Regex matching $e2
No headers $e3
"""
def e1 = {
val expected = List(
"""{"schema":"iglu:org.ietf/http_header/jsonschema/1-0-0","data":{"name":"X-Forwarded-For","value":"192.168.3.11, 172.16.17.32"}}"""
)
HttpHeaderExtractorEnrichment("X-Forwarded-For")
.extract(List("X-Forwarded-For: 192.168.3.11, 172.16.17.32"))
.map(h => compact(render(h))) must_== expected.map(e => compact(render(parse(e))))
}
def e2 = {
val expected = List(
"""{"schema":"iglu:org.ietf/http_header/jsonschema/1-0-0","data":{"name":"Accept","value":"text/html"}}"""
)
HttpHeaderExtractorEnrichment(".*").extract(List("Accept: text/html")).map(h => compact(render(h))) must_== expected
.map(e => compact(render(parse(e))))
}
def e3 = {
val expected = List()
HttpHeaderExtractorEnrichment(".*").extract(Nil).map(h => compact(render(h))) must_== expected.map(e =>
compact(render(parse(e))))
}
}
|
AlexandraGYG/snowplow
|
3-enrich/stream-enrich/core/src/test/scala/com.snowplowanalytics.snowplow.enrich.stream/bad/InvalidEnrichedEventSpec.scala
|
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.stream
package bad
import org.apache.commons.codec.binary.Base64
import org.specs2.mutable.Specification
import SpecHelpers._
object InvalidEnrichedEventSpec {
val raw =
"<KEY>
}
class InvalidEnrichedEventSpec extends Specification {
// TODO: update this after https://github.com/snowplow/snowplow/issues/463
"Stream Enrich" should {
"return None for a valid SnowplowRawEvent which fails enrichment" in {
val rawEvent = Base64.decodeBase64(InvalidEnrichedEventSpec.raw)
val enrichedEvent = TestSource.enrichEvents(rawEvent)(0)
enrichedEvent.isFailure must beTrue
}
}
}
|
kornelrabczak/scalac-simple-profiling
|
src/main/scala/com/thecookiezen/tools/Profiling.scala
|
<gh_stars>0
package com.thecookiezen.tools
import java.nio.file.{Files, Path, StandardOpenOption}
import scala.tools.nsc.Global
import scala.reflect.internal.util.StatisticsStatics
import scala.collection.mutable
import com.thecookiezen.ProfilerPlugin.PluginConfig
import com.thecookiezen.tools.Profiling.MacroInfo
import pprint.TPrint
import com.thecookiezen.metrics.Timer
import com.thecookiezen.metrics.Timer.TimerSnapshot
import scala.jdk.CollectionConverters._
final class Profiling[G <: Global](override val global: G, config: PluginConfig, logger: Logger[G]) extends ProfilingStats {
import global._
def registerProfilers(): Unit = {
// Register our profiling macro plugin
analyzer.addMacroPlugin(ProfilingMacroPlugin)
analyzer.addAnalyzerPlugin(ProfilingAnalyzerPlugin)
}
import scala.reflect.internal.util.SourceFile
case class MacroProfiler(
perCallSite: Map[Position, MacroInfo],
perFile: Map[SourceFile, MacroInfo],
inTotal: MacroInfo
)
def toMillis(nanos: Long): Long =
java.util.concurrent.TimeUnit.NANOSECONDS.toMillis(nanos)
def groupPerFile[V](kvs: Map[Position, V])(empty: V, aggregate: (V, V) => V): Map[SourceFile, V] = {
kvs
.groupBy(_._1.source)
.view
.mapValues {
posInfos: Map[Position, V] => posInfos.valuesIterator.fold(empty)(aggregate)
}.toMap
}
lazy val macroProfiler: MacroProfiler = {
import ProfilingMacroPlugin.macroInfos
val perCallSite = macroInfos.toMap
val perFile = groupPerFile(perCallSite)(MacroInfo.Empty, _ + _)
.view
.mapValues(i => i.copy(expansionNanos = toMillis(i.expansionNanos)))
.toMap
val inTotal = MacroInfo.aggregate(perFile.valuesIterator)
val callSiteNanos = perCallSite
.view
.mapValues(i => i.copy(expansionNanos = toMillis(i.expansionNanos)))
.toMap
MacroProfiler(callSiteNanos, perFile, inTotal)
}
case class ImplicitInfo(count: Int) {
def +(other: ImplicitInfo): ImplicitInfo = ImplicitInfo(count + other.count)
}
object ImplicitInfo {
final val Empty = ImplicitInfo(0)
def aggregate(infos: Iterator[ImplicitInfo]): ImplicitInfo = infos.fold(Empty)(_ + _)
implicit val infoOrdering: Ordering[ImplicitInfo] = Ordering.by(_.count)
}
case class ImplicitProfiler(
perCallSite: Map[Position, ImplicitInfo],
perFile: Map[SourceFile, ImplicitInfo],
perType: Map[Type, ImplicitInfo],
inTotal: ImplicitInfo
)
lazy val implicitProfiler: ImplicitProfiler = {
val perCallSite = implicitSearchesByPos.view.mapValues(ImplicitInfo.apply).toMap
val perFile = groupPerFile[ImplicitInfo](perCallSite)(ImplicitInfo.Empty, _ + _)
val perType = implicitSearchesByType.view.mapValues(ImplicitInfo.apply).toMap
val inTotal = ImplicitInfo.aggregate(perFile.valuesIterator)
ImplicitProfiler(perCallSite, perFile, perType, inTotal)
}
// Copied from `TypeDiagnostics` to have expanded types in implicit search
private object DealiasedType extends TypeMap {
def apply(tp: Type): Type = tp match {
case TypeRef(_, sym, _) if sym.isAliasType && !sym.isInDefaultNamespace =>
mapOver(tp.dealias)
case _ => mapOver(tp)
}
}
def concreteTypeFromSearch(tree: Tree, default: Type): Type = {
tree match {
case EmptyTree => default
case Block(_, expr) => expr.tpe
case Try(block, _, _) =>
block match {
case Block(_, expr) => expr.tpe
case t => t.tpe
}
case t =>
val treeType = t.tpe
if (treeType == null || treeType == NoType) default else treeType
}
}
def generateGraphData(outputDir: Path): List[Path] = {
Files.createDirectories(outputDir)
val randomId = java.lang.Long.toString(System.currentTimeMillis())
val implicitGraphName = s"implicit-searches-$randomId"
val macroGraphName = s"macros-$randomId"
val implicitFlamegraphFile = outputDir.resolve(s"$implicitGraphName.flamegraph")
val implicits = ProfilingAnalyzerPlugin.getImplicitStacks
Files.write(implicitFlamegraphFile, implicits.asJava, StandardOpenOption.WRITE, StandardOpenOption.CREATE)
if (config.generateMacroFlamegraph) {
val macroFlamegraphFile = outputDir.resolve(s"$macroGraphName.flamegraph")
val macroStacks = ProfilingMacroPlugin.getMacroStacks
Files.write(macroFlamegraphFile, macroStacks.asJava, StandardOpenOption.WRITE, StandardOpenOption.CREATE)
List(implicitFlamegraphFile, macroFlamegraphFile)
} else List(implicitFlamegraphFile)
}
private def typeToString(`type`: Type): String =
global.exitingTyper(`type`.toLongString).trim
// Moving this here so that it's accessible to the macro plugin
private type Entry = (global.analyzer.ImplicitSearch, TimerSnapshot, TimerSnapshot)
private var implicitsStack: List[Entry] = Nil
object FoldableStack {
def fold(name: String)(names: mutable.Map[Int, List[String]], times: mutable.Map[Int, Long]): mutable.Seq[String] = {
val stacks = mutable.Buffer[String]()
times.foreach {
case (id, nanos) =>
val stackNames = names.getOrElse(id, sys.error(s"Stack name for $name id $id doesn't exist!"))
val stackName = stackNames.mkString(";")
stacks += s"$stackName ${nanos / 1000}"
}
stacks.sorted
}
}
private object ProfilingAnalyzerPlugin extends global.analyzer.AnalyzerPlugin {
private val implicitsTimers = perRunCaches.newAnyRefMap[Type, Timer]()
private val stackedNanos = perRunCaches.newMap[Int, Long]()
private val stackedNames = perRunCaches.newMap[Int, List[String]]()
private val searchIdsToTimers = perRunCaches.newMap[Int, Timer]()
private val searchIdChildren = perRunCaches.newMap[Int, List[analyzer.ImplicitSearch]]()
def getImplicitStacks: mutable.Seq[String] = FoldableStack.fold("search")(stackedNames, stackedNanos)
private def getImplicitTimerFor(candidate: Type): Timer =
implicitsTimers.getOrElse(candidate, sys.error(s"Timer for $candidate doesn't exist"))
private def getSearchTimerFor(searchId: Int): Timer = {
searchIdsToTimers.getOrElse(searchId, sys.error(s"Missing non-cumulative timer for $searchId"))
}
override def pluginsNotifyImplicitSearch(search: global.analyzer.ImplicitSearch): Unit = {
if (StatisticsStatics.areSomeColdStatsEnabled() && statistics.areStatisticsLocallyEnabled) {
val targetType = search.pt
val targetPos = search.pos
// Stop counter of dependant implicit search
implicitsStack.headOption.foreach {
case (search, _, searchStart) => getSearchTimerFor(search.searchId).stop(searchStart)
}
// We add ourselves to the child list of our parent implicit search
implicitsStack.headOption match {
case Some((prevSearch, _, _)) =>
val prevId = prevSearch.searchId
val prevChilds = searchIdChildren.getOrElse(prevId, Nil)
searchIdChildren.update(prevId, search :: prevChilds)
case None => ()
}
// Create timer
val prefix = s"$targetType"
val perTypeTimer = implicitsTimers.getOrElseUpdate(targetType, Timer(prefix))
// Create non-cumulative timer for the search
val searchId = search.searchId
val searchPrefix = s"implicit search $searchId"
val searchTimer = Timer(searchPrefix)
searchIdsToTimers.+=(searchId -> searchTimer)
// Start the timer as soon as possible
val implicitTypeStart = perTypeTimer.start
val searchStart = searchTimer.start
// Update all timers and counters
val typeCounter = implicitSearchesByType.getOrElse(targetType, 0)
implicitSearchesByType.update(targetType, typeCounter + 1)
val posCounter = implicitSearchesByPos.getOrElse(targetPos, 0)
implicitSearchesByPos.update(targetPos, posCounter + 1)
if (global.analyzer.openMacros.nonEmpty)
statistics.incCounter(implicitSearchesByMacrosCount)
implicitsStack = (search, implicitTypeStart, searchStart) :: implicitsStack
}
}
override def pluginsNotifyImplicitSearchResult(result: global.analyzer.SearchResult): Unit = {
super.pluginsNotifyImplicitSearchResult(result)
if (StatisticsStatics.areSomeColdStatsEnabled() && statistics.areStatisticsLocallyEnabled) {
// 1. Get timer of the running search
val (search, implicitTypeStart, searchStart) = implicitsStack.head
val targetType = search.pt
val timer = getImplicitTimerFor(targetType)
// 2. Register the timing diff for every stacked name.
def stopTimerFlamegraph(prev: Option[analyzer.ImplicitSearch]): Unit = {
val searchId = search.searchId
def missing(name: String): Nothing =
sys.error(s"Missing $name for $searchId ($targetType).")
val forcedExpansions = ProfilingMacroPlugin.searchIdsToMacroStates.getOrElse(searchId, Nil)
val expandedStr = s"(expanded macros ${forcedExpansions.size})"
// Detect macro name if the type we get comes from a macro to add it to the stack
val suffix = {
val errorTag = if (result.isFailure) " _[j]" else ""
result.tree.attachments.get[analyzer.MacroExpansionAttachment] match {
case Some(analyzer.MacroExpansionAttachment(expandee: Tree, _)) =>
val expandeeSymbol = treeInfo.dissectApplied(expandee).core.symbol
analyzer.loadMacroImplBinding(expandeeSymbol) match {
case Some(a) =>
val l = if (errorTag.isEmpty) " _[i]" else errorTag
s" (id $searchId) $expandedStr (tree from `${a.className}.${a.methName}`)$l"
case None => s" $expandedStr $errorTag"
}
case None => s" $expandedStr $errorTag"
}
}
// Complete stack names of triggered implicit searches
val children = searchIdChildren.getOrElse(searchId, Nil)
prev.foreach { p =>
val current = searchIdChildren.getOrElse(p.searchId, Nil)
searchIdChildren.update(p.searchId, children ::: current)
}
val typeForStack = DealiasedType {
if (!config.concreteTypeParamsInImplicits) targetType
else concreteTypeFromSearch(result.subst(result.tree), targetType)
}
if (config.printSearchIds.contains(searchId) || (result.isFailure && config.printFailedMacroImplicits)) {
logger.info(
s"""implicit search $searchId:
| -> valid ${result.isSuccess}
| -> type `$typeForStack`
| -> ${search.undet_s}
| -> ${search.ctx_s}
| -> tree:
|${showCode(result.tree)}
| -> forced expansions:
|${forcedExpansions.mkString(" ", " \n", "\n")}
|""".stripMargin
)
}
val thisStackName = s"${typeToString(typeForStack)}$suffix"
stackedNames.update(searchId, List(thisStackName))
children.foreach { childSearch =>
val id = childSearch.searchId
val childrenStackName = stackedNames.getOrElse(id, missing("stack name"))
stackedNames.update(id, thisStackName :: childrenStackName)
}
// Save the nanos for this implicit search
val searchTimer = getSearchTimerFor(searchId)
searchTimer.stop(searchStart)
val previousNanos = stackedNanos.getOrElse(searchId, 0L)
stackedNanos.+=((searchId, searchTimer.nanos + previousNanos))
}
// 3. Reset the stack and stop timer if there is a dependant search
val previousImplicits = implicitsStack.tail
implicitsStack = previousImplicits.headOption match {
case Some((prevSearch, prevImplicitTypeStart, _)) =>
stopTimerFlamegraph(Some(prevSearch))
timer.stop(implicitTypeStart)
val newPrevStart = getSearchTimerFor(prevSearch.searchId).start
(prevSearch, prevImplicitTypeStart, newPrevStart) :: previousImplicits.tail
case None =>
stopTimerFlamegraph(None)
timer.stop(implicitTypeStart)
previousImplicits
}
}
}
}
sealed trait MacroState {
def pt: Type
def tree: Tree
}
case class DelayedMacro(pt: Type, tree: Tree) extends MacroState
case class SkippedMacro(pt: Type, tree: Tree) extends MacroState
case class SuppressedMacro(pt: Type, tree: Tree) extends MacroState
case class FallbackMacro(pt: Type, tree: Tree) extends MacroState
case class FailedMacro(pt: Type, tree: Tree) extends MacroState
case class SucceededMacro(pt: Type, tree: Tree) extends MacroState
case class MacroEntry(
id: Int,
originalPt: Type,
start: TimerSnapshot,
state: Option[MacroState]
)
private var macrosStack: List[MacroEntry] = Nil
private var macroCounter: Int = 0
object ProfilingMacroPlugin extends global.analyzer.MacroPlugin {
type Typer = analyzer.Typer
type RepeatedKey = (String, String)
val macroInfos = perRunCaches.newAnyRefMap[Position, MacroInfo]
val searchIdsToMacroStates = perRunCaches.newMap[Int, List[MacroState]]
private val macroIdsToTimers = perRunCaches.newMap[Int, Timer]()
private val macroChildren = perRunCaches.newMap[Int, List[MacroEntry]]()
private val stackedNanos = perRunCaches.newMap[Int, Long]()
private val stackedNames = perRunCaches.newMap[Int, List[String]]()
def getMacroStacks = FoldableStack.fold("macro")(stackedNames, stackedNanos)
import scala.tools.nsc.Mode
override def pluginsMacroExpand(t: Typer, expandee: Tree, md: Mode, pt: Type): Option[Tree] = {
val macroId = macroCounter
macroCounter = macroCounter + 1
object expander extends analyzer.DefMacroExpander(t, expandee, md, pt) {
/** The default method that expands all macros. */
override def apply(desugared: Tree): Tree = {
val prevData = macrosStack.headOption.map { prev =>
macroIdsToTimers.getOrElse(
prev.id,
sys.error(s"fatal error: missing timer for ${prev.id}")
) -> prev
}
// Let's first stop the previous timer to have consistent times for the flamegraph
prevData.foreach {
case (prevTimer, prev) => prevTimer.stop(prev.start)
}
// Let's create our own timer
val macroTimer = Timer(s"macro $macroId")
macroIdsToTimers += ((macroId, macroTimer))
val start = macroTimer.start
val entry = MacroEntry(macroId, pt, start, None)
if (config.generateMacroFlamegraph) {
// We add ourselves to the child list of our parent macro
prevData.foreach {
case (_, entry) =>
val prevId = entry.id
val prevChilds = macroChildren.getOrElse(prevId, Nil)
macroChildren.update(prevId, entry :: prevChilds)
}
}
macrosStack = entry :: macrosStack
try super.apply(desugared)
finally {
val children = macroChildren.getOrElse(macroId, Nil)
if (config.generateMacroFlamegraph) {
// Complete stack names of triggered implicit searches
prevData.foreach {
case (_, p) =>
val prevChildren = macroChildren.getOrElse(p.id, Nil)
macroChildren.update(p.id, children ::: prevChildren)
}
}
// We need to fetch the entry from the stack as it can be modified
val parents = macrosStack.tail
macrosStack.headOption match {
case Some(head) =>
if (config.generateMacroFlamegraph) {
val thisStackName = head.state match {
case Some(FailedMacro(pt, _)) => s"${typeToString(pt)} [failed]"
case Some(DelayedMacro(pt, _)) => s"${typeToString(pt)} [delayed]"
case Some(SucceededMacro(pt, _)) => s"${typeToString(pt)}"
case Some(SuppressedMacro(pt, _)) => s"${typeToString(pt)} [suppressed]"
case Some(SkippedMacro(pt, _)) => s"${typeToString(pt)} [skipped]"
case Some(FallbackMacro(pt, _)) => s"${typeToString(pt)} [fallback]"
case None => sys.error("Fatal error: macro has no state!")
}
stackedNames.update(macroId, thisStackName :: Nil)
children.foreach { childSearch =>
val id = childSearch.id
val childrenStackName = stackedNames.getOrElse(id, sys.error("no stack name"))
stackedNames.update(id, thisStackName :: childrenStackName)
}
}
macroTimer.stop(head.start)
val previousNanos = stackedNanos.getOrElse(macroId, 0L)
// Updates expansionNanos time after super.apply() for MacroInfo at the specified position
val nanosPassed = macroTimer.nanos + previousNanos
macroInfos.get(desugared.pos).foreach { oldMacroInfo =>
macroInfos.update(desugared.pos, oldMacroInfo.copy(expansionNanos = nanosPassed))
}
stackedNanos.+=((macroId, nanosPassed))
prevData match {
case Some((prevTimer, prev)) =>
// Let's restart the timer of the previous macro expansion
val newStart = prevTimer.start
// prev is the head of `parents`, so let's replace it on stack with the new start
macrosStack = prev.copy(start = newStart) :: parents.tail
case None => macrosStack = parents
}
case None => sys.error(s"fatal error: expected macro entry for macro id $macroId")
}
}
}
def mapToCurrentImplicitSearch(exp: MacroState): Unit = {
implicitsStack.headOption match {
case Some(i) =>
val id = i._1.searchId
val currentMacros = searchIdsToMacroStates.getOrElse(id, Nil)
searchIdsToMacroStates.update(id, exp :: currentMacros)
case None => ()
}
}
def updateStack(state: MacroState): Unit = {
macrosStack.headOption match {
case Some(entry) =>
macrosStack = entry.copy(state = Some(state)) :: macrosStack.tail
case None => sys.error("fatal error: stack cannot be empty while updating!")
}
}
override def onFailure(expanded: Tree) = {
val state = FailedMacro(pt, expanded)
mapToCurrentImplicitSearch(state)
statistics.incCounter(failedMacros)
updateStack(state)
super.onFailure(expanded)
}
override def onSkipped(expanded: Tree) = {
val state = SkippedMacro(pt, expanded)
mapToCurrentImplicitSearch(state)
statistics.incCounter(skippedMacros)
updateStack(state)
super.onDelayed(expanded)
}
override def onFallback(expanded: Tree) = {
val state = FallbackMacro(pt, expanded)
mapToCurrentImplicitSearch(state)
statistics.incCounter(fallbackMacros)
updateStack(state)
super.onFallback(expanded)
}
override def onSuppressed(expanded: Tree) = {
val state = SuppressedMacro(pt, expanded)
mapToCurrentImplicitSearch(state)
statistics.incCounter(suppressedMacros)
updateStack(state)
super.onSuppressed(expanded)
}
override def onDelayed(expanded: Tree) = {
val state = DelayedMacro(pt, expanded)
mapToCurrentImplicitSearch(state)
statistics.incCounter(delayedMacros)
updateStack(state)
super.onDelayed(expanded)
}
override def onSuccess(expanded0: Tree) = {
val expanded = super.onSuccess(expanded0)
val expandedType = concreteTypeFromSearch(expanded, pt)
val state = SucceededMacro(expandedType, expanded)
mapToCurrentImplicitSearch(state)
updateStack(state)
// Update macro counter per type returned
val macroTypeCounter = macrosByType.getOrElse(expandedType, 0)
macrosByType.update(expandedType, macroTypeCounter + 1)
val callSitePos = expandee.pos
val macroInfo = macroInfos.getOrElse(callSitePos, MacroInfo.Empty)
val expandedMacros = macroInfo.expandedMacros + 1
// Use 0L for the timer because it will be filled in by the caller `apply`
macroInfos.put(callSitePos, MacroInfo(expandedMacros, 0, 0L))
expanded
}
}
Some(expander(expandee))
}
}
}
trait ProfilingStats {
val global: Global
import global.statistics.{newSubCounter, macroExpandCount, implicitSearchCount}
macroExpandCount.children.clear()
final val failedMacros = newSubCounter(" of which failed macros", macroExpandCount)
final val delayedMacros = newSubCounter(" of which delayed macros", macroExpandCount)
final val suppressedMacros = newSubCounter(" of which suppressed macros", macroExpandCount)
final val fallbackMacros = newSubCounter(" of which fallback macros", macroExpandCount)
final val skippedMacros = newSubCounter(" of which skipped macros", macroExpandCount)
final val implicitSearchesByMacrosCount = newSubCounter(" from macros", implicitSearchCount)
import scala.reflect.internal.util.Position
final val macrosByType = new scala.collection.mutable.HashMap[global.Type, Int]()
final val implicitSearchesByType = global.perRunCaches.newMap[global.Type, Int]()
final val implicitSearchesByPos = global.perRunCaches.newMap[Position, Int]()
}
object Profiling {
/**
* Represents the profiling information about expanded macros.
*
* Note that we could derive the value of expanded macros from the
* number of instances of [[MacroInfo]] if it were not by the fact
* that a macro can expand in the same position more than once. We
* want to be able to report/analyse such cases on their own, so
* we keep it as a paramater of this entity.
*/
case class MacroInfo(expandedMacros: Int, expandedNodes: Int, expansionNanos: Long) {
def +(other: MacroInfo): MacroInfo = {
val totalExpanded = expandedMacros + other.expandedMacros
val totalNodes = expandedNodes + other.expandedNodes
val totalTime = expansionNanos + other.expansionNanos
MacroInfo(totalExpanded, totalNodes, totalTime)
}
}
object MacroInfo {
final val Empty = MacroInfo(0, 0, 0L)
implicit val macroInfoOrdering: Ordering[MacroInfo] = Ordering.by(_.expansionNanos)
def aggregate(infos: Iterator[MacroInfo]): MacroInfo = {
infos.foldLeft(MacroInfo.Empty)(_ + _)
}
implicit val intPrint: TPrint[Int] = TPrint.default[Int]
implicit val stringPrint: TPrint[String] = TPrint.default[String]
implicit val macroInfoPrint: TPrint[MacroInfo] = TPrint.default[MacroInfo]
}
}
|
kornelrabczak/scalac-simple-profiling
|
src/main/scala/com/thecookiezen/ProfilerPlugin.scala
|
<filename>src/main/scala/com/thecookiezen/ProfilerPlugin.scala
package com.thecookiezen
import java.nio.file.{Path, Paths}
import com.thecookiezen.ProfilerPlugin.PluginConfig
import com.thecookiezen.tools.{Logger, Profiling}
import scala.tools.nsc.plugins.{Plugin, PluginComponent}
import scala.tools.nsc.{Global, Phase}
import scala.util.matching.Regex
class ProfilerPlugin(val global: Global) extends Plugin {
private final lazy val ShowProfiles = "show-profiles"
private final lazy val SourceRoot = "sourceroot"
private final lazy val PrintSearchResult = "print-search-result"
private final lazy val GenerateMacroFlamegraph = "generate-macro-flamegraph"
private final lazy val PrintFailedMacroImplicits = "print-failed-implicit-macro-candidates"
private final lazy val ShowConcreteImplicitTparams = "show-concrete-implicit-tparams"
private final lazy val PrintSearchRegex = s"$PrintSearchResult:(.*)".r
private final lazy val SourceRootRegex = s"$SourceRoot:(.*)".r
private final lazy val config = PluginConfig(
super.options.contains(ShowProfiles),
findOption(SourceRoot, SourceRootRegex).map(Paths.get(_)),
findSearchIds(findOption(PrintSearchResult, PrintSearchRegex)),
super.options.contains(GenerateMacroFlamegraph),
super.options.contains(PrintFailedMacroImplicits),
super.options.contains(ShowConcreteImplicitTparams)
)
lazy val implementation = new Profiling(ProfilerPlugin.this.global, config, logger)
private lazy val logger = new Logger(global)
val name = "profiler-plugin"
override val optionsHelp: Option[String] = Some(s"""
|-P:$name:${pad20(SourceRoot)}:_ Sets the source root for this project.
|-P:$name:${pad20(ShowProfiles)} Logs profile information for every call-site.
|-P:$name:${pad20(ShowConcreteImplicitTparams)} Shows types in flamegraphs of implicits with concrete type params.
|-P:$name:${pad20(PrintSearchResult)}:_ Print implicit search result trees for a list of search ids separated by a comma.
""".stripMargin)
val description = "Profiles macros and implicit at the compilation time"
val components = List[PluginComponent](ProfilerComponent)
def findOption(name: String, pattern: Regex): Option[String] = {
super.options.find(_.startsWith(name)).flatMap {
case pattern(matched) => Some(matched)
case _ => None
}
}
def findSearchIds(userOption: Option[String]): Set[Int] = {
userOption match {
case Some(value) => value.split(",", Int.MaxValue).map(_.toInt).toSet
case None => Set.empty
}
}
override def init(ops: List[String], e: String => Unit): Boolean = true
private def pad20(option: String): String = option + (" " * (20 - option.length))
implementation.registerProfilers()
private object ProfilerComponent extends PluginComponent {
lazy val globalOutputDir = new java.io.File(
global.settings.outputDirs.getSingleOutput
.map(_.file.getAbsolutePath)
.getOrElse(global.settings.d.value)
).toPath
override val global: implementation.global.type = implementation.global
override val phaseName: String = "scalac-profiling"
override val runsAfter: List[String] = List("jvm")
override val runsBefore: List[String] = List("terminal")
override def newPhase(prev: Phase): Phase = {
new StdPhase(prev) {
override def apply(unit: global.CompilationUnit): Unit = ()
override def run(): Unit = {
super.run()
val graphsDir = globalOutputDir.resolve(Paths.get("META-INF", "graphs"))
reportStatistics(graphsDir)
}
}
}
private def reportStatistics(graphsPath: Path): Unit = {
val macroProfiler = implementation.macroProfiler
val persistedGraphData = implementation.generateGraphData(graphsPath)
persistedGraphData.foreach(p => logger.info(s"Writing graph to $p"))
if (config.showProfiles) {
logger.info("Macro data per call-site", macroProfiler.perCallSite)
logger.info("Macro data per file", macroProfiler.perFile)
logger.info("Macro data in total", macroProfiler.inTotal)
val macrosType = implementation.macrosByType.toList.sortBy(_._2)
val macrosTypeLines = global.exitingTyper(macrosType.map(kv => kv._1.toString -> kv._2))
logger.info("Macro expansions by type", macrosTypeLines.toMap)
val implicitSearchesPosition = implementation.implicitSearchesByPos.toList.sortBy(_._2).toMap
logger.info("Implicit searches by position", implicitSearchesPosition)
val sortedImplicitSearches = implementation.implicitSearchesByType.toList.sortBy(_._2)
// Make sure to stringify types right after typer to avoid compiler crashes
val stringifiedSearchCounter =
global.exitingTyper(
sortedImplicitSearches.map(kv => kv._1.toString -> kv._2)
)
logger.info("Implicit searches by type", stringifiedSearchCounter.toMap)
()
}
}
}
}
object ProfilerPlugin {
case class PluginConfig(
showProfiles: Boolean,
sourceRoot: Option[Path],
printSearchIds: Set[Int],
generateMacroFlamegraph: Boolean,
printFailedMacroImplicits: Boolean,
concreteTypeParamsInImplicits: Boolean
)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.