repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
lindemer/pmp
|
src/test/scala/PmpConfig.scala
|
<reponame>lindemer/pmp
package config
import spinal.core._
import spinal.core.SpinalConfig
import spinal.core.sim.SimConfig
object PmpConfig {
val spinalConfig = SpinalConfig(
defaultClockDomainFrequency = FixedFrequency(10 MHz),
targetDirectory = "rtl"
)
def apply() = SimConfig.withWave
.withConfig(spinalConfig)
.workspacePath("waves")
}
|
ruuda/matasano
|
src/main/scala/Set1/Challenge7.scala
|
<filename>src/main/scala/Set1/Challenge7.scala
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import io.Source
import com.matasano.Aes
import com.matasano.Encoding
object Challenge7 {
def main(args: Array[String]) = {
val file = Source.fromFile("data/challenge7.md")
val ciphertext = Encoding.decodeBase64(file.getLines.mkString)
val key = Encoding.encodeAscii("YELLOW SUBMARINE")
val decrypt = Aes.decrypt(key)
val plaintext = ciphertext
.grouped(16)
.map(decrypt)
.map(Encoding.decodeAscii)
.mkString
println(s"plaintext:\n\n$plaintext")
file.close()
}
}
|
ruuda/matasano
|
src/main/scala/Crypto.scala
|
<filename>src/main/scala/Crypto.scala
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
package com.matasano
object Crypto {
def xor(a: Vector[Byte], b: Vector[Byte]): Vector[Byte] =
a.zip(b).map { case (x, y) => (x ^ y).toByte }
def xorSingle(a: Vector[Byte], key: Byte): Vector[Byte] = {
val repeated = Vector.fill(a.length) { key }
xor(a, repeated)
}
def xorRepeat(a: Vector[Byte], key: Vector[Byte]): Vector[Byte] = {
val repeated = Stream.continually(key).flatten.take(a.length).toVector
xor(a, repeated)
}
def encryptCbc(encrypt: Vector[Byte] => Vector[Byte])
(iv: Vector[Byte], plaintext: Vector[Byte]): Vector[Byte] = {
// Assume a block size of 16 for now.
require(iv.length == 16)
require(plaintext.length % 16 == 0)
plaintext.grouped(16).scanLeft(iv) {
case (prev, block) => encrypt(xor(prev, block))
}
.flatMap(block => block)
.toVector
}
def decryptCbc(decrypt: Vector[Byte] => Vector[Byte])
(iv: Vector[Byte], ciphertext: Vector[Byte]): Vector[Byte] = {
// Assume a block size of 16 for now.
require(iv.length == 16)
require(ciphertext.length % 16 == 0)
val blocks = ciphertext.grouped(16).toVector
val withIv = Vector(iv) ++ blocks
withIv.zip(blocks).flatMap {
case (prev, block) => xor(prev, decrypt(block))
}
.toVector
}
}
|
ruuda/matasano
|
src/main/scala/Util.scala
|
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
package com.matasano
object Util {
val bitCount: Int => Int = java.lang.Integer.bitCount
def hammingDistance(a: Vector[Byte], b: Vector[Byte]): Int = {
// Number of bits that differ.
val baseDiff = a.zip(b).map { case (x, y) => bitCount((x ^ y) & 0xff) } .sum
// Also count a length mismatch as eight different bits per byte.
val extraDiff = math.abs(a.length - b.length) * 8
baseDiff + extraDiff
}
def padPkcs7(a: Vector[Byte], blockSize: Int): Vector[Byte] = {
if (blockSize == 0) return a ++ Vector(1.toByte)
val modSz = (a.length % blockSize)
val padLen = if (modSz == 0) { blockSize } else { blockSize - modSz }
a.padTo(a.length + padLen, padLen.toByte)
}
def unpadPkcs7(padded: Vector[Byte]): Vector[Byte] = {
require(padded.length > 0)
val b = padded.last
assert(b > 0)
val data = padded.take(padded.length - b)
val padding = padded.drop(padded.length - b)
assert(padding.forall(x => x == b))
data
}
}
|
ruuda/matasano
|
src/test/scala/Aes.scala
|
<gh_stars>0
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import org.scalacheck.Gen
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import com.matasano.Aes
object AesTests extends Properties("Aes") {
val blocks = Gen.listOfN(16, Gen.choose(0, 255)).map(_.map(_.toByte).toVector)
property("bijectionBlock") = forAll(blocks, blocks) { (block: Vector[Byte], key: Vector[Byte]) =>
Aes.decrypt(key)(Aes.encrypt(key)(block)) == block
}
property("bijectionBlock") = forAll(blocks, blocks) { (block: Vector[Byte], key: Vector[Byte]) =>
Aes.encrypt(key)(Aes.decrypt(key)(block)) == block
}
}
|
ruuda/matasano
|
src/main/scala/Set1/Challenge5.scala
|
<filename>src/main/scala/Set1/Challenge5.scala
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import com.matasano.Crypto
import com.matasano.Encoding
object Challenge5 {
def main(args: Array[String]) = {
val key = Encoding.encodeAscii("ICE")
val input = Encoding.encodeAscii("Burning 'em, " +
"if you ain't quick and nimble\n" +
"I go crazy when I hear a cymbal")
val data = Crypto.xorRepeat(input, key)
val output = Encoding.encodeHex(data)
println(output)
}
}
|
ruuda/matasano
|
src/test/scala/Encoding.scala
|
<reponame>ruuda/matasano<filename>src/test/scala/Encoding.scala<gh_stars>0
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import com.matasano.Encoding
object EncodingTests extends Properties("Encoding") {
property("bijectionByte") = forAll { (byte: Byte) =>
Encoding.decodeByte(Encoding.encodeByte(byte)) == byte
}
property("bijectionTriple") = forAll { (b0: Byte, b1: Byte, b2: Byte) =>
val triple = Vector(b0, b1, b2)
Encoding.decodeTriple(Encoding.encodeTriple(triple)) == triple
}
property("bijectionTriplePad1") = forAll { (b0: Byte, b1: Byte) =>
val pair = Vector(b0, b1)
Encoding.decodeTriple(Encoding.encodeTriple(pair)) == pair
}
property("bijectionTriplePad2") = forAll { (b0: Byte) =>
val byte = Vector(b0)
Encoding.decodeTriple(Encoding.encodeTriple(byte)) == byte
}
property("bijectionHex") = forAll { (data: Vector[Byte]) =>
Encoding.decodeHex(Encoding.encodeHex(data)) == data
}
property("bijectionBase64") = forAll { (data: Vector[Byte]) =>
Encoding.decodeBase64(Encoding.encodeBase64(data)) == data
}
property("bijectionAscii") = forAll { (data: Vector[Byte]) =>
Encoding.encodeAscii(Encoding.decodeAscii(data)) == data
}
}
|
ruuda/matasano
|
src/main/scala/Set1/Challenge6.scala
|
<filename>src/main/scala/Set1/Challenge6.scala
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import io.Source
import com.matasano.Crypto
import com.matasano.Encoding
import com.matasano.TextDetection
import com.matasano.Util
object Challenge6 {
/// Returns a list of key size guesses, from most likely to least likely.
def guessKeySize(ciphertext: Vector[Byte], minSize: Int, maxSize: Int): Vector[Int] = {
val rankedSizes = (minSize to maxSize).map(keySize => {
// Inspect the first ten blocks for this key size.
val groups = ciphertext.grouped(keySize).take(10).toVector
val distances = for {
i <- 0 to groups.length - 1
j <- i + 1 to groups.length - 1
} yield Util.hammingDistance(groups(i), groups(j))
// Compute average number of different bits per byte.
val numGroups = groups.length * (groups.length - 1) / 2
var meanDistance = distances.sum.toFloat / numGroups.toFloat
var normDistance = meanDistance / keySize.toFloat
(normDistance, keySize)
})
rankedSizes.sorted.map { case (d, sz) => sz } .toVector
}
def guessKey(ciphertext: Vector[Byte], keySize: Int): Vector[Byte] = {
val rankHist = TextDetection.buildHistogramRanker("data/frequency.md")
Vector.tabulate(keySize) { i => {
val cipherChars = ciphertext.grouped(keySize)
.filter(block => block.length > i)
.map(block => block(i))
.toVector
val keys = Vector.tabulate(256) { key => {
val plaintext = Crypto.xorSingle(cipherChars, key.toByte)
val histogram = TextDetection.buildHistogram(plaintext.map(byte => byte.toChar).to)
val rank = rankHist(histogram)
(key, rank)
}}
val (bestKey, _) = keys.maxBy { case (key, rank) => rank }
bestKey.toByte
}}
}
def main(args: Array[String]) = {
val file = Source.fromFile("data/challenge6.md")
val ciphertext = Encoding.decodeBase64(file.getLines.mkString)
val keySize = guessKeySize(ciphertext, 2, 40).head
println(s"key size guess: $keySize")
val key = guessKey(ciphertext, keySize)
val keyStr = Encoding.decodeAscii(key)
println(s"key guess: $keyStr")
val plaintext = Encoding.decodeAscii(Crypto.xorRepeat(ciphertext, key))
println(s"plaintext:\n\n$plaintext")
file.close()
}
}
|
ruuda/matasano
|
src/test/scala/Set1.scala
|
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import org.scalacheck.Properties
import com.matasano.Crypto
import com.matasano.Encoding
object Set1Spec extends Properties("Set1") {
property("challenge1") = {
val input = "49276d206b696c6c696e6720796f757220627261696e206c" +
"696b65206120706f69736f6e6f7573206d757368726f6f6d"
val output = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29" +
"ub3VzIG11c2hyb29t"
output == Encoding.encodeBase64(Encoding.decodeHex(input))
}
property("challenge2") = {
val inputA = Encoding.decodeHex("1c0111001f010100061a024b53535009181c")
val inputB = Encoding.decodeHex("686974207468652062756c6c277320657965")
val output = Encoding.decodeHex("746865206b696420646f6e277420706c6179")
output == Crypto.xor(inputA, inputB)
}
property("challenge5") = {
val input = Encoding.encodeAscii("Burning 'em, " +
"if you ain't quick and nimble\n" +
"I go crazy when I hear a cymbal")
val output = Encoding.decodeHex("0b3637272a2b2e63622c2e69692a23693a2a3" +
"<KEY>" +
"<KEY>" +
"<KEY>")
val key = Encoding.encodeAscii("ICE")
output == Crypto.xorRepeat(input, key)
}
}
|
ruuda/matasano
|
src/test/scala/Util.scala
|
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import org.scalacheck.Arbitrary
import org.scalacheck.Gen
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import com.matasano.Encoding
import com.matasano.Util
object UtilTests extends Properties("Util") {
val bytevectors = Arbitrary.arbitrary[Vector[Byte]]
val blockLength = Gen.choose(0, 64)
property("hammingDistance") = {
val a = Encoding.encodeAscii("this is a test")
val b = Encoding.encodeAscii("wokka wokka!!!")
Util.hammingDistance(a, b) == 37
}
property("hammingDistanceBoundedByInput") = forAll { (a: Vector[Byte], b: Vector[Byte]) =>
val diff = Util.hammingDistance(a, b)
val maxDiff = math.max(a.length, b.length) * 8
diff <= maxDiff
}
property("bijectionPadPkcs7") = forAll(bytevectors, blockLength) { (a: Vector[Byte], b: Int) =>
a == Util.unpadPkcs7(Util.padPkcs7(a, b))
}
}
|
ruuda/matasano
|
build.sbt
|
name := "matasano"
libraryDependencies += "org.scalacheck" %% "scalacheck" % "1.13.4" % "test"
|
ruuda/matasano
|
src/main/scala/Aes.scala
|
<filename>src/main/scala/Aes.scala
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
package com.matasano
object Aes {
import javax.crypto.Cipher
import javax.crypto.spec.SecretKeySpec
def encrypt(key: Vector[Byte]): Vector[Byte] => Vector[Byte] = {
// Use the Java AES implementation only for its block cipher.
// The point of this exercise it to learn how to do things myself,
// so I will not use the standard block-chaining or padding mechanisms.
val cipher = Cipher.getInstance("AES/ECB/NoPadding")
val keySpec = new SecretKeySpec(key.toArray, "AES")
cipher.init(Cipher.ENCRYPT_MODE, keySpec)
block => {
require(block.length == 16, "block length must be 16")
cipher.doFinal(block.toArray).toVector
}
}
def decrypt(key: Vector[Byte]): Vector[Byte] => Vector[Byte] = {
val cipher = Cipher.getInstance("AES/ECB/NoPadding")
val keySpec = new SecretKeySpec(key.toArray, "AES")
cipher.init(Cipher.DECRYPT_MODE, keySpec)
block => {
require(block.length == 16, "block length must be 16")
cipher.doFinal(block.toArray).toVector
}
}
}
|
ruuda/matasano
|
src/main/scala/Set1/Challenge3.scala
|
<filename>src/main/scala/Set1/Challenge3.scala
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import com.matasano.Crypto
import com.matasano.Encoding
import com.matasano.TextDetection
object Challenge3 {
def main(args: Array[String]) = {
val input = "1b37373331363f78151b7f2b783431333d" +
"78397828372d363c78373e783a393b3736"
val ciphertext = Encoding.decodeHex(input)
val candidates = (0 to 255).map(key => Crypto.xorSingle(ciphertext, key.toByte))
val strcandidates = candidates.map(bytes => Encoding.decodeAscii(bytes))
val rank = TextDetection.buildRanker("data/frequency.md")
val plaintext = strcandidates.maxBy(rank)
println(plaintext)
}
}
|
ruuda/matasano
|
src/main/scala/Set1/Challenge1.scala
|
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import com.matasano.Encoding
object Challenge1 {
def main(args: Array[String]) = {
val input = "49276d206b696c6c696e6720796f757220627261696e206c" +
"<KEY>"
val data = Encoding.decodeHex(input)
val output = Encoding.encodeBase64(data)
println(output)
}
}
|
ruuda/matasano
|
src/test/scala/Crypto.scala
|
<reponame>ruuda/matasano
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import org.scalacheck.Gen
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import com.matasano.Aes
import com.matasano.Crypto
object CryptoTests extends Properties("Crypto") {
val blocks = Gen.listOfN(16, Gen.choose(0, 255)).map(_.map(_.toByte).toVector)
val nBlocks = Gen.listOfN(64, Gen.choose(0, 255)).map(_.map(_.toByte).toVector)
property("bijectionEncryptCbc") = forAll(blocks, blocks, nBlocks) {
(iv: Vector[Byte], key: Vector[Byte], plaintext: Vector[Byte]) => {
val decrypt = Crypto.decryptCbc(Aes.decrypt(key)) _
val encrypt = Crypto.encryptCbc(Aes.encrypt(key)) _
decrypt(iv, encrypt(iv, plaintext)) == plaintext
}
}
}
|
ruuda/matasano
|
src/main/scala/TextDetection.scala
|
<reponame>ruuda/matasano
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
package com.matasano
import collection.immutable.SortedMap
import io.Source
object TextDetection {
type Histogram[A] = SortedMap[(Char, Char), A]
def buildHistogramPairFromFile(fname: String): Histogram[Int] = {
val file = Source.fromFile(fname)
var histogram: SortedMap[(Char, Char), Int] = SortedMap()
for (line <- file.getLines()) {
val lowerLine = line.map(c => c.toLower)
for (pair <- lowerLine.zip(lowerLine.drop(1))) {
val count = histogram.getOrElse(pair, 0)
histogram = histogram.updated(pair, count + 1)
}
}
file.close()
histogram
}
def buildHistogramSingleFromFile(fname: String): SortedMap[Char, Int] = {
val file = Source.fromFile(fname)
val chars = file.getLines().flatMap(line => line.to)
val histogram = buildHistogram(chars)
file.close()
histogram
}
def buildHistogram(chars: Iterator[Char]): SortedMap[Char, Int] = {
val empty: SortedMap[Char, Int] = SortedMap()
chars.map(c => c.toLower).foldLeft(empty) { (hist, c) =>
val count = hist.getOrElse(c, 0)
hist.updated(c, count + 1)
}
}
// Ensures that every value in the histogram is in the interval [0, 2].
def normalizeHistogramMax[Key](counts: SortedMap[Key, Int]): SortedMap[Key, Double] = {
val maxCount = counts.foldLeft(0) { case (acc, (k, n)) => math.max(acc, n) }
counts.mapValues(n => 2.0 * n / maxCount)
}
// Ensures that all values in the histogram sum to 1.0.
def normalizeHistogram[Key](counts: SortedMap[Key, Int]): SortedMap[Key, Double] = {
val sum = counts.foldLeft(0) { case (acc, (k, n)) => acc + n }
counts.mapValues(n => n / sum.toDouble)
}
// Builds a function that ranks strings based on consecutive character
// frequencies extracted from the sample file.
def buildRanker(fname: String): String => Double = {
val histInt = buildHistogramPairFromFile(fname)
val histDouble = normalizeHistogramMax(histInt)
def rankChars(chars: (Char, Char)): Double =
histDouble.getOrElse(chars, 0.05)
def rankString(str: String): Double = {
val lowerStr = str.map(c => c.toLower)
val pairs = lowerStr.zip(lowerStr.drop(1))
pairs.foldLeft(1.0) { (acc, chars) => acc * rankChars(chars) }
}
rankString
}
// Builds a function that ranks histograms based on how similar they are to
// the character frequencies in the sample file.
def buildHistogramRanker(fname: String): SortedMap[Char, Int] => Double = {
val histInt = buildHistogramSingleFromFile(fname)
val histDouble = normalizeHistogram(histInt)
def rankHistogram(sampleHist: SortedMap[Char, Int]): Double = {
val sampleDouble = normalizeHistogram(sampleHist)
// Consider the histograms as vectors, and compute the squared L2 norm of
// their difference.
val keys = histDouble.keySet ++ sampleDouble.keySet
val diffSqr = keys.foldLeft(0.0) { (acc, c) => {
val left = histDouble.getOrElse(c, 0.0)
val right = sampleDouble.getOrElse(c, 0.0)
acc + (left - right) * (left - right)
}}
// A higher ranking is better, so negate the difference.
-diffSqr
}
rankHistogram
}
}
|
ruuda/matasano
|
src/main/scala/Set1/Challenge2.scala
|
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import com.matasano.Crypto
import com.matasano.Encoding
object Challenge2 {
def main(args: Array[String]) = {
val inputA = Encoding.decodeHex("1c0111001f010100061a024b53535009181c")
val inputB = Encoding.decodeHex("686974207468652062756c6c277320657965")
val data = Crypto.xor(inputA, inputB)
val output = Encoding.encodeHex(data)
println(output)
}
}
|
ruuda/matasano
|
src/main/scala/Set1/Challenge8.scala
|
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import io.Source
import com.matasano.Encoding
import com.matasano.Util
object Challenge8 {
def repetition(ciphertext: Vector[Byte]): Float = {
// Compute the Hamming distance between all pairs of blocks. If the
// blocks are encrypted properly then they are random, so their Hamming
// distance is 0.5 * 8 * 16 on average. If the blocks were encrypted
// individually, similarities will yield a lower distance.
val groups = ciphertext.grouped(16).toVector
val distances = for {
i <- 0 to groups.length - 1
j <- i + 1 to groups.length - 1
} yield {
require(groups(i).length == groups(j).length)
Util.hammingDistance(groups(i), groups(j))
}
val numPairs = groups.length * (groups.length - 1) / 2
var meanDistance = distances.sum.toFloat / numPairs.toFloat
// Express the distance relative to the expected distance.
meanDistance / 64.0f
}
def main(args: Array[String]) = {
val file = Source.fromFile("data/challenge8.md")
val ciphertexts = file.getLines.map(line => Encoding.decodeHex(line))
val ranked = ciphertexts.map(ct => (repetition(ct), ct))
val top = ranked.toVector.sortWith {
case ((r1, _), (r2, _)) => r1 < r2
} .take(10)
println("Candidates (with confidence, lower is better)")
for ((r, ct) <- top) {
val hdata = Encoding.encodeHex(ct)
println(s"$r $hdata")
}
file.close()
}
}
|
ruuda/matasano
|
src/test/scala/Set2.scala
|
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import org.scalacheck.Properties
import com.matasano.Encoding
import com.matasano.Util
object Set2Spec extends Properties("Set2") {
property("challenge9") = {
val input = Encoding.encodeAscii("YELLOW SUBMARINE")
val output = Encoding.encodeAscii("YELLOW SUBMARINE\u0004\u0004\u0004\u0004")
output == Util.padPkcs7(input, 20)
}
}
|
ruuda/matasano
|
src/main/scala/Set2/Challenge10.scala
|
<filename>src/main/scala/Set2/Challenge10.scala
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
import io.Source
import com.matasano.Aes
import com.matasano.Crypto
import com.matasano.Encoding
import com.matasano.Util
object Challenge10 {
def main(args: Array[String]) = {
val file = Source.fromFile("data/challenge10.txt")
val ciphertext = Encoding.decodeBase64(file.getLines.mkString)
val key = Encoding.encodeAscii("YELLOW SUBMARINE")
val decrypt = Aes.decrypt(key)
val iv: Vector[Byte] = Vector(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
val plaintext = Crypto.decryptCbc(decrypt)(iv, ciphertext)
println(Encoding.decodeAscii(Util.unpadPkcs7(plaintext)))
file.close()
}
}
|
ruuda/matasano
|
src/main/scala/Encoding.scala
|
// Copyright 2016 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License is available in the root of the repository.
package com.matasano
object Encoding {
val hexChars = "0123456789abcdef"
val base64Chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def decodeByte(str: String): Byte = {
if (str.length != 2) {
throw new Exception("Invalid input, not an integer number of bytes.")
}
val high = hexChars.indexOf(str(0))
val low = hexChars.indexOf(str(1))
if (high == -1 || low == -1) {
throw new Exception(s"Invalid input, '$str' is not hexadecimal.")
} else {
((high << 4) | low).toByte
}
}
def encodeByte(byte: Byte): String = {
val index0 = (byte & 255) >> 4
val index1 = (byte & 255)
val indices = Vector(index0 & 15, index1 & 15)
val chars = indices.map(i => hexChars(i))
chars.mkString
}
def decodeTriple(str: String): Vector[Byte] = {
if (str.length != 4) {
throw new Exception("Invalid input, not an integer number of bytes.")
}
// Drop the padding characters.
val withoutPadding = str.stripSuffix("==").stripSuffix("=")
val numBytes = withoutPadding.length - 1
// Fill up with zeroes to avoid index out of bounds.
val nullStr = withoutPadding.padTo(4, base64Chars(0))
val index0 = base64Chars.indexOf(nullStr(0))
val index1 = base64Chars.indexOf(nullStr(1))
val index2 = base64Chars.indexOf(nullStr(2))
val index3 = base64Chars.indexOf(nullStr(3))
if (index0 == -1 || index1 == -1 || index2 == -1 || index3 == -1) {
throw new Exception(s"Invalid input, '$str' is not base64.")
}
val byte0 = (index0 << 2) | (index1 >> 4)
val byte1 = (index1 << 4) | (index2 >> 2)
val byte2 = (index2 << 6) | (index3)
Vector(byte0.toByte, byte1.toByte, byte2.toByte).take(numBytes)
}
def encodeTriple(triple: Vector[Byte]): String = {
// Pad with zeroes if the triple contains less than three bytes.
val numBytes = triple.length
val padded = triple.padTo(3, 0.toByte)
// Note: The & 255 is required here to make the shift behave like a regular
// shift of an unsigned integer.
val index0 = (padded(0) & 255) >> 2
val index1 = ((padded(0) & 3) << 4) | ((padded(1) & 255) >> 4)
val index2 = ((padded(1) & 15) << 2) | ((padded(2) & 255) >> 6)
val index3 = (padded(2) & 255)
val indices = Vector(index0 & 63, index1 & 63, index2 & 63, index3 & 63)
val chars = indices.map(i => base64Chars(i))
// Replace encoded zeroes with padding characters.
chars.mkString.take(numBytes + 1).padTo(4, '=')
}
def decodeHex(str: String): Vector[Byte] =
str.grouped(2).map(b => decodeByte(b)).toVector
def encodeHex(data: Vector[Byte]): String =
data.map(b => encodeByte(b)).mkString
def decodeBase64(str: String): Vector[Byte] =
str.grouped(4).flatMap(t => decodeTriple(t)).toVector
def encodeBase64(data: Vector[Byte]): String =
data.grouped(3).map(t => encodeTriple(t)).mkString
def decodeAscii(data: Vector[Byte]): String =
// Actually use Latin 1 to ensure that the upper 128 bytes
// can be mapped as well. Do not use UTF-8, because not every
// byte sequence is valid UTF-8.
new String(data.toArray, "ISO8859_1")
def encodeAscii(str: String): Vector[Byte] =
str.getBytes("ISO8859_1").toVector
}
|
kmbnw/multitreat
|
scala/build.sbt
|
name := "multitreat"
version := "0.1a"
organization := "net.kmbnw"
scalaVersion := "2.10.5"
libraryDependencies += "org.apache.spark" %% "spark-core" % "1.6.2"
libraryDependencies += "org.apache.spark" %% "spark-sql" % "1.6.2"
libraryDependencies += "org.apache.spark" %% "spark-hive" % "1.6.2"
libraryDependencies += "com.holdenkarau" %% "spark-testing-base" % "1.6.1_0.3.3"
resolvers += Resolver.mavenLocal
javaOptions ++= Seq("-Xms512M", "-Xmx2048M", "-XX:MaxPermSize=2048M", "-XX:+CMSClassUnloadingEnabled")
parallelExecution in Test := false
|
kmbnw/multitreat
|
scala/src/test/scala/net/kmbnw/multitreat/TestMultitreat.scala
|
<gh_stars>1-10
/*
* Copyright 2016 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.kmbnw.multitreat
import org.apache.spark.sql.functions._
import org.apache.spark.sql.DataFrame
import com.holdenkarau.spark.testing._
class test extends DataFrameSuiteBase {
private val tol = 0.001
test("designNumericSingleGroup") {
val sqlCtx = sqlContext
import sqlCtx.implicits._
val expected = sqlContext.read.json("src/test/resources/salary_one_group.json")
val df = expected.drop("title_catN")
val treatmentPlan = new Multitreat("amount", List("title"))
val treatments = treatmentPlan.designNumeric(df)
val treated = treatmentPlan.applyTreatments(df, treatments)
// apparently these have to be explicitly ordered or the equality check can fail
assertDataFrameApproximateEquals(
expected.select("amount", "employer", "title", "title_catN"),
treated.select("amount", "employer", "title", "title_catN"),
tol)
/*val input2 = sc.parallelize(List(4, 5, 6)).toDF
intercept[org.scalatest.exceptions.TestFailedException] {
assertDataFrameEquals(input1, input2) // not equal
}*/
}
test("designNumericMultiGroup") {
val sqlCtx = sqlContext
import sqlCtx.implicits._
val expected = sqlContext.read.json("src/test/resources/salary_multi_group.json")
val df = expected.drop("title_catN").drop("employer_catN")
val treatmentPlan = new Multitreat("amount", List("title", "employer"))
val treatments = treatmentPlan.designNumeric(df)
val treated = treatmentPlan.applyTreatments(df, treatments)
// apparently these have to be explicitly ordered or the equality check can fail
assertDataFrameApproximateEquals(
expected.select("amount", "employer", "title", "title_catN", "employer_catN"),
treated.select("amount", "employer", "title", "title_catN", "employer_catN"),
tol)
}
// single row means standard deviation of zero; ensure that is OK
test("designNumericOneRow") {
val sqlCtx = sqlContext
import sqlCtx.implicits._
val expected = sqlContext.read.json("src/test/resources/salary_one_row.json")
val df = expected.drop("title_catN")
val treatmentPlan = new Multitreat("amount", List("title"))
val treatments = treatmentPlan.designNumeric(df)
val treated = treatmentPlan.applyTreatments(df, treatments)
// apparently these have to be explicitly ordered or the equality check can fail
assertDataFrameApproximateEquals(
expected.select("amount", "employer", "title", "title_catN"),
treated.select("amount", "employer", "title", "title_catN"),
tol)
}
}
|
kmbnw/multitreat
|
scala/src/main/scala/net/kmbnw/multitreat/TreatmentPlan.scala
|
<filename>scala/src/main/scala/net/kmbnw/multitreat/TreatmentPlan.scala
/*
* Copyright 2016 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.kmbnw.multitreat
import org.apache.spark.sql.functions._
import org.apache.spark.sql.DataFrame
// e.g. val treatmentPlan = new TreatmentPlan("salary", List("job_title", "employer"))
// val treatments = treatmentPlan.designNumeric(df)
// treatments.show()
// val treated = treatmentPlan.applyTreatments(df, treatments)
class TreatmentPlan(
val targetCol: String,
val treatmentCols: Seq[String]) {
def designNumeric(df: DataFrame): Map[String, DataFrame] = {
return treatmentCols.zip(
treatmentCols.map(x => designNumericOnCol(df, x))
).toMap
}
def applyTreatments(
df: DataFrame,
treatments: Map[String, DataFrame]): DataFrame = {
var treated: DataFrame = df
// TODO this can probably be done with fold or reduce
for ((colname, treatment) <- treatments) {
treated = treated.join(treatment, colname)
}
return treated
}
private def designNumericOnCol(df: DataFrame, groupCol: String): DataFrame = {
// overall dataframe mean and standard deviation
val dfNoNA = df.select(targetCol, groupCol).na.drop()
val naFill = 1e-6
val sampleMean = dfNoNA.select(avg(targetCol)).first().getDouble(0)
val sampleSd = dfNoNA.select(stddev(targetCol)).first().getDouble(0)
// using the simple version of lambda from the paper: lambda = n / (m + n)
// where m = group_sd / sample_sd
// there is a fill-in for when only one sample exists of 1e-6
// TODO I would like to make lambda user-settable
val dfMeans = dfNoNA.groupBy(groupCol).agg(
avg(targetCol).as("mean"),
count(targetCol).divide(
stddev(targetCol).divide(sampleSd)
.plus(count(targetCol))).as("lambda")
).na.fill(naFill, Array("lambda"))
// this is the Bayesian formula:
// lambda * group_mean + (1 - lambda) * sample_mean
val lambda = dfMeans.col("lambda")
val groupMean = dfMeans.col("mean")
val treatedCol = groupCol + "_catN"
val ret = dfMeans.withColumn(
treatedCol,
lambda.multiply(groupMean).plus(
lambda.minus(1).multiply(-1 * sampleMean))
)
return ret.select(groupCol, treatedCol)
}
}
|
phymbert/spark-search
|
sql/src/test/scala/org/apache/spark/search/sql/TestData.scala
|
<reponame>phymbert/spark-search<filename>sql/src/test/scala/org/apache/spark/search/sql/TestData.scala
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import java.io.{File, Reader}
import java.util.function.Consumer
import org.apache.lucene.analysis.Analyzer.TokenStreamComponents
import org.apache.lucene.analysis.shingle.ShingleFilter
import org.apache.lucene.analysis.standard.StandardTokenizer
import org.apache.lucene.analysis.synonym.{SynonymGraphFilter, SynonymMap}
import org.apache.lucene.analysis.{Analyzer, LowerCaseFilter, TokenStream}
import org.apache.lucene.util.CharsRef
import org.apache.spark.sql.{Dataset, SparkSession}
object TestData {
case class Company(name: String)
lazy val companies1: String =
new File(this.getClass.getResource("/companies-1.csv").toURI).getAbsolutePath
def companies1DS(spark: SparkSession): Dataset[Company] = {
import spark.implicits._
spark.read
.option("header", "true")
.option("inferSchema", "true")
.csv(companies1)
.as[Company]
}
lazy val companies2: String =
new File(this.getClass.getResource("/companies-2.csv").toURI).getAbsolutePath
def companies2DS(spark: SparkSession): Dataset[Company] = {
import spark.implicits._
spark.read
.option("header", "true")
.option("inferSchema", "true")
.csv(companies2)
.as[Company]
}
class TestCompanyNameAnalyzer extends Analyzer {
override def createComponents(fieldName: String): TokenStreamComponents = {
val src = new StandardTokenizer()
var tok: TokenStream = new LowerCaseFilter(src)
val builder = new SynonymMap.Builder(true)
builder.add(new CharsRef("ltd"), new CharsRef("l.t.d"), true)
builder.add(new CharsRef("ltd"), new CharsRef("limited"), true)
builder.add(new CharsRef("inc"), new CharsRef("corporation"), true)
builder.add(new CharsRef("inc"), new CharsRef("corp"), true)
tok = new SynonymGraphFilter(tok, builder.build(), false)
val shingle = new ShingleFilter(tok, 2, 2)
shingle.setOutputUnigrams(true)
shingle.setOutputUnigramsIfNoShingles(true)
tok = shingle
new TokenStreamComponents(new Consumer[Reader] {
override def accept(r: Reader): Unit = src.setReader(r)
}, tok)
}
}
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/SearchRDD.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import org.apache.lucene.search.Query
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.search._
import scala.reflect.ClassTag
/**
* A search RDD Spark Search brings
* advanced full text search features to your RDD.
*
* {@link org.apache.spark.search.rdd.SearchRDDLucene}
*
* @tparam S Doc type to index
* @author <NAME>
*/
trait SearchRDD[S] {
/**
* Default top K.
*/
val defaultTopK = 1000000
/**
* Return the number of indexed elements in the RDD.
*/
def count(): Long
/**
* Count how many documents match the given text query.
*
* @param query Matching query
* @return Matched doc count
*/
def count(query: String): Long =
count(parseQueryString(query, options))
/**
* Count how many documents match the given lucene query.
*
* @param query Query to match
* @return Matched doc count
*/
def count(query: StaticQueryProvider): Long
/**
* Searches the top k hits for this query string.
*
* @param query Lucene query syntax
* @param topK topK to return
* @param minScore minimum score of matching documents
* @return topK hits collected to the driver as an array
* @note this method should only be used if the topK is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def searchList(query: String,
topK: Int = defaultTopK,
minScore: Double = 0): Array[SearchRecord[S]]
= searchListQuery(parseQueryString(query, options), topK, minScore)
/**
* Searches the top topK hits for this Lucene query.
*
* @param query Lucene query syntax
* @param topK topK to return
* @param minScore minimum score of matching documents
* @return topK hits
* @note this method should only be used if the topK is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def searchListQuery(query: StaticQueryProvider,
topK: Int = defaultTopK,
minScore: Double = 0): Array[SearchRecord[S]]
/**
* Searches for the top K hits
* per partition for this query string
* and returns an RDD with all hits sorted by score in descendent order.
*
* @param query Lucene query syntax
* @param topKByPartition topK to return per partition
* @param minScore minimum score of matching documents
* @return topK per partition hits RDD sorted by score in descendent order
*/
def search(query: String,
topKByPartition: Int = defaultTopK,
minScore: Double = 0): RDD[SearchRecord[S]] =
searchQuery(parseQueryString(query, options), topKByPartition, minScore)
/**
* Searches for the top K hits per partition for this lucene query
* and returns an RDD with all hits sorted by score in descendent order.
*
* @param query Lucene query
* @param topKByPartition topK to return per partition
* @param minScore minimum score of matching documents
* @return topK per partition hits RDD sorted by score in descendent order
*/
def searchQuery(query: StaticQueryProvider,
topKByPartition: Int = defaultTopK,
minScore: Double = 0): RDD[SearchRecord[S]]
/**
* Searches join for this input RDD elements matches against these ones
* by building a lucene query string per doc
* and returns matching hit as tuples.
*
* @param other to match with
* @param queryBuilder builds the query string to match with the searched document
* @param topKByPartition – topK to return by partition
* @param minScore minimum score of matching documents
* @tparam W Doc type to match with
* @return matches doc and related hit RDD
*/
def searchJoin[W: ClassTag](other: RDD[W],
queryBuilder: W => String,
topKByPartition: Int = defaultTopK,
minScore: Double = 0): RDD[(W, Option[SearchRecord[S]])] =
searchJoinQuery(other, queryStringBuilder(queryBuilder, options), topKByPartition, minScore)
/**
* Searches join for this input RDD elements matches against these ones
* by building a lucene query per doc
* and returns matching hit as tuples.
*
* @param other to match with
* @param queryBuilder builds the query string to match with the searched document
* @param topKByPartition – topK to return by partition
* @param minScore minimum score of matching documents
* @tparam W Doc type to match with
* @return matches doc and related hit RDD
*/
def searchJoinQuery[W: ClassTag](other: RDD[W],
queryBuilder: W => Query,
topKByPartition: Int = defaultTopK,
minScore: Double = 0): RDD[(W, Option[SearchRecord[S]])]
/**
* Searches for this input RDD elements matches against these ones
* by building a lucene query string per doc
* and returns matching hits per doc.
*
* @param rdd to match with
* @param queryBuilder builds the query string to match with the searched document
* @param topK topK to return
* @param minScore minimum score of matching documents
* @tparam K key type of doc used in the top k monoid
* @tparam V Doc type to match with
* @return matches doc and related hits RDD
*/
def matches[K, V](rdd: RDD[(K, V)],
queryBuilder: V => String,
topK: Int = defaultTopK,
minScore: Double = 0)
(implicit kClassTag: ClassTag[K],
vClassTag: ClassTag[V]): RDD[(K, (V, Array[SearchRecord[S]]))] =
matchesQuery(rdd, queryStringBuilder(queryBuilder, options), topK, minScore)
/**
* Searches for this input RDD elements matches against these ones
* by building a lucene query per doc
* and returns matching hits per doc.
*
* @param rdd to match with
* @param queryBuilder builds the lucene query to join with the searched document
* @param topK topK to return
* @param minScore minimum score of matching documents
* @tparam K key type of doc used in the top k monoid
* @tparam V Doc type to match with
* @return matches doc and related hits RDD
*/
def matchesQuery[K, V](rdd: RDD[(K, V)],
queryBuilder: V => Query,
topK: Int = defaultTopK,
minScore: Double = 0)
(implicit kClassTag: ClassTag[K],
vClassTag: ClassTag[V]): RDD[(K, (V, Array[SearchRecord[S]]))]
private[spark] def elementClassTag: ClassTag[S]
/**
* Alias for searchDropDuplicates
*/
def distinct(numPartitions: Int): RDD[S] = {
implicit val classTagS: ClassTag[S] = elementClassTag
searchDropDuplicates[Long, S]()
}
/**
* Drops duplicated records by applying lookup for matching hits of the query against this RDD.
*
* @param queryBuilder builds the lucene query to search for duplicate
* @param minScore minimum score of matching documents
*/
def searchDropDuplicates[K: ClassTag, C: ClassTag](
queryBuilder: S => Query = null,
createKey: S => K = (s: S) => s.hashCode.toLong.asInstanceOf[K],
minScore: Double = 0,
createCombiner: Seq[SearchRecord[S]] => C = (ss: Seq[SearchRecord[S]]) => ss.head.source.asInstanceOf[C],
mergeValue: (C, Seq[SearchRecord[S]]) => C = (c: C, _: Seq[SearchRecord[S]]) => c,
mergeCombiners: (C, C) => C = (c: C, _: C) => c,
numPartitionInJoin: Int = getNumPartitions,
topKToDeduplicate: Int = defaultTopK
)(implicit ord: Ordering[K]): RDD[C]
/**
* Save the current indexed RDD onto hdfs
* in order to be able to reload it later on.
*
* @param path Path on the spark file system (hdfs) to save on
*/
def save(path: String): Unit
/**
* @return the number of partitions of this RDD.
*/
def getNumPartitions: Int
/**
* @return current search options
*/
def options: SearchOptions[S]
}
object SearchRDD {
/**
* Reload an indexed RDD from spark FS.
*
* @param sc current spark context
* @param path Path where the index were saved
* @param options Search option
* @tparam T Type of beans or case class this RDD is binded to
* @return Restored RDD
*/
def load[T: ClassTag](sc: SparkContext,
path: String,
options: SearchOptions[T] = defaultOpts[T]
): SearchRDD[T] =
new SearchRDDLucene[T](sc, new SearchIndexReloadedRDD[T](sc, path, options), options, Nil)
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/SearchRDDImpl.scala
|
<filename>core/src/main/scala/org/apache/spark/search/rdd/SearchRDDImpl.scala
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import java.io.{IOException, ObjectOutputStream}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.lucene.search.Query
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.search._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.{BoundedPriorityQueue, Utils}
import scala.collection.JavaConverters.asScalaIteratorConverter
import scala.reflect.ClassTag
/**
* A search RDD indexes parent RDD partitions to lucene indexes.
* It builds for all parent RDD partitions a one-2-one volatile Lucene index
* available during the lifecycle of the spark session across executors local directories and RAM.
*
* @author <NAME>
*/
private[search] class SearchRDDLucene[S: ClassTag](sc: SparkContext,
val indexerRDD: SearchRDDIndexer[S],
val options: SearchOptions[S],
val deps: Seq[Dependency[_]])
extends RDD[S](sc, Seq(new OneToOneDependency(indexerRDD)) ++ deps)
with SearchRDD[S] {
def this(rdd: RDD[S], options: SearchOptions[S]) {
this(rdd.sparkContext,
new SearchRDDIndexer(rdd, options),
options,
Seq(new OneToOneDependency(rdd)))
}
if (options.getIndexationOptions.isCacheSearchIndexRDD) {
indexerRDD.persist(StorageLevel.DISK_ONLY)
}
override def count(): Long = runSearchJob[Long, Long](spr => spr.count(), _.sum)
override def count(query: StaticQueryProvider): Long =
runSearchJob[Long, Long](spr => spr.count(query()), _.sum)
override def searchListQuery(query: StaticQueryProvider,
topK: Int = defaultTopK,
minScore: Double = 0
): Array[SearchRecord[S]] =
runSearchJob[Array[SearchRecord[S]], Array[SearchRecord[S]]](
spr => _partitionReaderSearchList(spr, query(), topK, minScore),
reduceSearchRecordsByTopK(topK))
override def searchQuery(query: StaticQueryProvider,
topKByPartition: Int = defaultTopK,
minScore: Double = 0
): RDD[SearchRecord[S]] = {
val indexDirectoryByPartition = indexerRDD._indexDirectoryByPartition
indexerRDD.mapPartitionsWithIndex(
(index, _) =>
tryAndClose(reader(indexDirectoryByPartition, index)) {
spr => _partitionReaderSearchList(spr, query(), topKByPartition, minScore)
}.iterator
).sortBy(_.score, ascending = false)
}
override def matchesQuery[K, V](other: RDD[(K, V)],
queryBuilder: V => Query,
topK: Int = 10,
minScore: Double = 0
)
(implicit kClassTag: ClassTag[K],
vClassTag: ClassTag[V]): RDD[(K, (V, Array[SearchRecord[S]]))] = {
val unwrapDoc = sparkContext.clean((kv: (K, V)) => queryBuilder(kv._2))
val cartesianRDD: RDD[((K, V), Option[SearchRecord[S]])] =
new SearchRDDCartesian[(K, V), S](
indexerRDD, other, unwrapDoc,
options.getReaderOptions, topK, minScore
)
val pairedRDD: RDD[(K, (V, Option[SearchRecord[S]]))] = cartesianRDD.map {
case ((k: K, v: V), Some(sr)) => (k, (v, Some(sr)))
case ((k: K, v: V), None) => (k, (v, None))
}
// TopK monoid
implicit val ord: Ordering[(V, Option[SearchRecord[S]])] = Ordering.by(_._2.map(_.score))
val topKByKey = pairedRDD
.aggregateByKey(new BoundedPriorityQueue[(V, Option[SearchRecord[S]])](topK)(ord))(
seqOp = (topK, searchRecord) => topK += searchRecord,
combOp = (topK1, topK2) => topK1 ++= topK2
)
val matchesByKey = topKByKey
.mapValues(matches => (matches.head._1, matches.filter(_._2.isDefined).map(_._2.get).toArray.reverse))
matchesByKey
}
override def searchJoinQuery[W: ClassTag](other: RDD[W],
queryBuilder: W => Query,
topKByPartition: Int = defaultTopK,
minScore: Double = 0): RDD[(W, Option[SearchRecord[S]])] = {
new SearchRDDCartesian[W, S](
indexerRDD,
other, queryBuilder,
options.getReaderOptions, topKByPartition, minScore
)
}
/**
* Alias for
* [[org.apache.spark.search.rdd.SearchRDD#searchDropDuplicates(scala.Function1, int, double, int)}]]
*/
override def distinct(numPartitions: Int): RDD[S] =
searchDropDuplicates[Long, S]()
/**
* Drops duplicated records by applying lookup for matching hits of the query against this RDD.
*
* @param queryBuilder builds the lucene query to search for duplicate
* @param minScore minimum score of matching documents
*/
override def searchDropDuplicates[K: ClassTag, C: ClassTag](
queryBuilder: S => Query = null, // Default query builder
createKey: S => K = (s: S) => s.hashCode.toLong.asInstanceOf[K],
minScore: Double = 0,
createCombiner: Seq[SearchRecord[S]] => C = (ss: Seq[SearchRecord[S]]) => ss.head.source.asInstanceOf[C],
mergeValue: (C, Seq[SearchRecord[S]]) => C = (c: C, _: Seq[SearchRecord[S]]) => c,
mergeCombiners: (C, C) => C = (c: C, _: C) => c,
numPartitionInJoin: Int = getNumPartitions,
topKToDeduplicate: Int = defaultTopK
)(implicit ord: Ordering[K]): RDD[C] = {
val cleanedKey = sparkContext.clean(createKey)
val unwrapDoc = sparkContext.clean((ks: (K, S)) => (queryBuilder match {
case null => defaultQueryBuilder[S](options)(elementClassTag)
case _ => queryBuilder
}) (ks._2))
val pairedRDD = map(s => (cleanedKey(s), s)).repartition(numPartitionInJoin)
val cartesianRDD: RDD[((K, S), Option[SearchRecord[S]])] =
new SearchRDDCartesian[(K, S), S](
indexerRDD, pairedRDD,
unwrapDoc,
options.getReaderOptions,
topKToDeduplicate,
minScore)
val pairedWithSearchedRDD: RDD[(K, (S, Option[SearchRecord[S]]))] = cartesianRDD.map {
case ((k: K, s: S), Some(sr)) => (k, (s, Some(sr)))
case ((k: K, s: S), None) => (k, (s, None))
}
val hitsByKey: RDD[(K, List[(S, Option[SearchRecord[S]])])] =
pairedWithSearchedRDD.aggregateByKey(List[(S, Option[SearchRecord[S]])]())(
seqOp = (matches, searchRecord) => {
matches ++ List(searchRecord)
},
combOp = (matches1, matches2) => {
matches1 ++ matches2
}
)
val keysAndDocs: RDD[(Seq[K], Seq[SearchRecord[S]])] = hitsByKey.map {
case (key: K, matches: List[(S, Option[SearchRecord[S]])]) =>
val doc: S = matches.head._1 // assumed left join
val otherMatches = matches
.filter(_._2.isDefined)
.map(m => (createKey(m._2.get.source), m._2.get))
// Remove self matching if exists (depending on score filter)
.filter((ks) => ks._1 != key)
val keys = (Seq(key) ++ otherMatches.map(_._1)).sorted
(keys, Seq(new SearchRecord[S](-1, -1, 0, -1, doc)) ++ otherMatches.map(_._2))
}
keysAndDocs.combineByKey(createCombiner, mergeValue, mergeCombiners)
.values
}
override def save(pathString: String): Unit = {
logInfo(s"Saving index with $getNumPartitions partitions to $pathString ...")
// Be sure we are indexed
count()
val hadoopConf = new Configuration()
val hdfs = FileSystem.get(hadoopConf)
val path = new Path(pathString)
if (hdfs.exists(path)) {
// FIXME issue github #77 https://github.com/phymbert/spark-search/issues/77
throw new SearchException(s"HDFS path $path already exists, delete it first")
}
indexerRDD.save(pathString)
logInfo(s"Index with $getNumPartitions partitions saved to $path")
}
private[spark] override def elementClassTag: ClassTag[S] = super.elementClassTag
override val partitioner: Option[Partitioner] = indexerRDD.partitioner
override def getPreferredLocations(split: Partition): Seq[String] =
firstParent[S].asInstanceOf[SearchRDDIndexer[S]]
.getPreferredLocations(split.asInstanceOf[SearchPartition[S]].searchIndexPartition)
override def repartition(numPartitions: Int)(implicit ord: Ordering[S]): RDD[S]
= new SearchRDDLucene[S](firstParent.firstParent.repartition(numPartitions), options)
def _partitionReaderSearchList(r: SearchPartitionReader[S],
query: Query, topK: Int, minScore: Double): Array[SearchRecord[S]] =
r.search(query, topK, minScore).map(searchRecordJavaToProduct)
protected[rdd] def reduceSearchRecordsByTopK(topK: Int): Iterator[Array[SearchRecord[S]]] => Array[SearchRecord[S]] =
_.reduce(_ ++ _).sortBy(_.score)(Ordering[Double].reverse).take(topK)
protected[rdd] def runSearchJob[R: ClassTag, A: ClassTag](searchByPartition: SearchPartitionReader[S] => R,
reducer: Iterator[R] => A): A =
runSearchJobWithContext((_searchByPartition, _) => searchByPartition(_searchByPartition), reducer)
protected[rdd] def runSearchJobWithContext[R: ClassTag, A: ClassTag](searchByPartitionWithContext: (SearchPartitionReader[S], TaskContext) => R,
reducer: Iterator[R] => A): A = {
val indexDirectoryByPartition = indexerRDD._indexDirectoryByPartition
val ret = sparkContext.runJob(indexerRDD, (context: TaskContext, it: Iterator[Array[Byte]]) => {
val index = context.partitionId()
// Unzip if needed
ZipUtils.unzipPartition(indexDirectoryByPartition(index), it)
tryAndClose(reader(indexDirectoryByPartition, index)) {
r => searchByPartitionWithContext(r, context)
}
})
reducer(ret.toIterator)
}
private def reader(indexDirectoryByPartition: Map[Int, String], index: Int): SearchPartitionReader[S] =
reader(index, indexDirectoryByPartition(index))
private def reader(index: Int, indexDirectory: String): SearchPartitionReader[S] =
new SearchPartitionReader[S](index, indexDirectory,
elementClassTag.runtimeClass.asInstanceOf[Class[S]],
options.getReaderOptions)
override def compute(split: Partition, context: TaskContext): Iterator[S] = {
val partition = split.asInstanceOf[SearchPartition[S]]
val indexedRDD = firstParent[Array[Byte]].asInstanceOf[SearchRDDIndexer[S]]
// Trigger indexation if not done yet on parent rdd partition node
val it: Iterator[Array[Byte]] = indexedRDD.iterator(partition.searchIndexPartition, context)
val indexDirectory = partition.searchIndexPartition.indexDir
// Unzip if needed
ZipUtils.unzipPartition(indexDirectory, it)
val spr = reader(partition.searchIndexPartition.index,
partition.searchIndexPartition.indexDir)
context.addTaskCompletionListener[Unit](ctx => {
spr.close()
})
spr.docs().asScala.map(searchRecordJavaToProduct).map(_.source)
}
override protected def getPartitions: Array[Partition] = {
// One-2-One partition
firstParent.partitions.map(p =>
new SearchPartition(p.index, indexerRDD)).toArray
}
override def clearDependencies(): Unit = {
super.clearDependencies()
if (options.getIndexationOptions.isCacheSearchIndexRDD) {
indexerRDD.unpersist()
}
}
}
class SearchPartition[T](val idx: Int,
@transient private val searchRDD: SearchRDDIndexer[T]) extends Partition {
override def index: Int = idx
var searchIndexPartition: SearchPartitionIndex[T] = searchRDD.partitions(idx).asInstanceOf[SearchPartitionIndex[T]]
@throws(classOf[IOException])
private def writeObject(oos: ObjectOutputStream): Unit = Utils.tryOrIOException {
// Update the reference to parent split at the time of task serialization
searchIndexPartition = searchRDD.partitions(idx).asInstanceOf[SearchPartitionIndex[T]]
oos.defaultWriteObject()
}
}
|
phymbert/spark-search
|
sql/src/test/scala/org/apache/spark/search/sql/LocalSparkSession.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import io.netty.util.internal.logging.{InternalLoggerFactory, Slf4JLoggerFactory}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}
trait LocalSparkSession extends BeforeAndAfterEach with BeforeAndAfterAll {
self: Suite =>
@transient var _spark: SparkSession = _
override def beforeAll() {
super.beforeAll()
InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE)
}
override def afterEach() {
try {
resetSparkSession()
} finally {
super.afterEach()
}
}
def resetSparkSession(): Unit = {
LocalSparkSession.stop(_spark)
_spark = null
}
override def beforeEach() {
_spark = SparkSession.builder()
.master("local[*]")
.appName("Spark Search Test")
.getOrCreate()
}
}
object LocalSparkSession {
def stop(sc: SparkSession) {
if (sc != null) {
sc.stop()
}
// To avoid RPC rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.driver.port")
}
/** Runs `f` by passing in `sc` and ensures that `sc` is stopped. */
def withSpark[T](sc: SparkSession)(f: SparkSession => T): T = {
try {
f(sc)
} finally {
stop(sc)
}
}
}
|
phymbert/spark-search
|
core/src/test/scala/org/apache/spark/search/rdd/DocumentBeanUpdaterSuite.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import org.apache.spark.search.reflect.DocumentBeanUpdater
import org.apache.spark.search._
import org.scalatest.funsuite.AnyFunSuite
class DocumentBeanUpdaterSuite extends AnyFunSuite {
test("document bean updater should support case class") {
val documentBeanUpdater = new DocumentBeanUpdater[TestData.Person]
val indexingDocument = new DocumentUpdater.IndexingDocument[TestData.Person](IndexationOptions.defaultOptions
.asInstanceOf[IndexationOptions[TestData.Person]])
val doc = indexingDocument.doc
indexingDocument.element = TestData.Person("John", "Doe", 34)
documentBeanUpdater.update(indexingDocument)
assertResult(3)(doc.getFields.size)
assertResult("John")(doc.get("firstName"))
}
}
|
phymbert/spark-search
|
benchmark/src/main/scala-2.11/benchmark/LuceneRDDBenchmark.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package benchmark
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.apache.spark.rdd.RDD
import org.zouzias.spark.lucenerdd.LuceneRDD
object LuceneRDDBenchmark extends BaseBenchmark("LuceneRDD") {
def main(args: Array[String]): Unit = run()
override def countNameMatches(companies: RDD[Company], name: String): RDD[(Double, String)] = {
import spark.implicits._
val luceneRDD = LuceneRDD(companies.toDF(),
classOf[StandardAnalyzer].getName,
classOf[StandardAnalyzer].getName,
"classic")
luceneRDD.query(s"name:${name}", Int.MaxValue)
.map(r => (r.getAs[Float]("__score__").toDouble, r.getAs[String]("name")))
.sortBy(_._1, ascending = false) // Not sorted by RDD but by partition
}
override def joinMatch(companies: RDD[Company], secEdgarCompanies: RDD[SecEdgarCompanyInfo]): RDD[(String, Double, String)] = {
import spark.implicits._
val luceneRDD = LuceneRDD(companies.toDF(),
classOf[StandardAnalyzer].getName,
classOf[StandardAnalyzer].getName,
"classic")
val prefixLinker = (company: SecEdgarCompanyInfo) => {
val skipped = company.companyName.slice(0, 64).replaceAll("([+\\-=&|<>!(){}\\[\\]^\"~*?:/])", "\\\\$1")
s"name:${"\"" + skipped + "\""}"
}
luceneRDD.link(secEdgarCompanies, prefixLinker, 1)
.filter(_._2.nonEmpty)
.map(t => (t._1.companyName, t._2.head.getAs[Double]("__score__"), t._2.head.getAs[String]("name")))
}
}
|
phymbert/spark-search
|
examples/src/main/scala/all/examples/org/apache/spark/search/rdd/SearchRDDExamples.scala
|
<filename>examples/src/main/scala/all/examples/org/apache/spark/search/rdd/SearchRDDExamples.scala
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package all.examples.org.apache.spark.search.rdd
import org.apache.lucene.analysis.en.EnglishAnalyzer
import org.apache.spark.search._
import org.apache.spark.sql.SparkSession
import ExampleData._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
/**
* Spark Search RDD examples.
*/
object SearchRDDExamples {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("Spark Search Examples")
.getOrCreate()
val sc = spark.sparkContext
sc.setLogLevel("ERROR")
Console.setOut(Console.err)
// Amazon computers customer reviews
val computersReviews: RDD[Review] = loadReviews(spark, "http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Computers.json.gz")
// Search RDD API
import org.apache.spark.search.rdd._ // to implicitly enhance RDD with search features
// Count positive review: indexation + count matched doc
val happyReview = computersReviews.count("reviewText:happy OR reviewText:best OR reviewText:good OR reviewText:\"sounds great\"~")
println(s"$happyReview positive reviews :)")
// Search for key words
println(s"Full text search results:")
computersReviews.searchList("reviewText:\"World of Warcraft\" OR reviewText:\"Civilization IV\"",
topK = 10, minScore = 10)
.foreach(println)
// /!\ Important lucene indexation is done each time a SearchRDD is computed,
// if you do multiple operations on the same parent RDD, you might have a variable in the driver:
val computersReviewsSearchRDD: SearchRDD[Review] = computersReviews.searchRDD(
SearchOptions.builder[Review]() // See all other options SearchRDDOptions, IndexationOptions and ReaderOptions
.read((r: ReaderOptions.Builder[Review]) => r.defaultFieldName("reviewText"))
.analyzer(classOf[EnglishAnalyzer])
.build())
println("All reviews speaking about hardware:")
computersReviewsSearchRDD.searchList("(RAM OR memory) AND (CPU OR processor~)^4",
topK = 10, minScore = 15).foreach(println)
// Fuzzy matching
println("Some typo in names:")
computersReviews.search("(reviewerName:Mikey~0.8) OR (reviewerName:\"Patrik\"~0.4) OR (reviewerName:jonh~0.2)",
topKByPartition = 10)
.map(doc => s"${doc.source.reviewerName}=${doc.score}")
.collect()
.map(_.toLowerCase)
.distinct
.foreach(println)
// Amazon software customer reviews
println("Downloading software reviews...")
val softwareReviews: RDD[Review] = loadReviews(spark, "http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Software_10.json.gz")
// Match software and computer reviewers
println("Joined software and computer reviews by reviewer names:")
val matchesReviewers: RDD[(Review, Array[SearchRecord[Review]])] = computersReviews.matches[String, Review](
softwareReviews.filter(_.reviewerName != null).map(sr => (sr.asin, sr)),
(sr: Review) => "reviewerName:\"" + sr.reviewerName.replace('"', ' ') + "\"~0.4", 10)
.values
matchesReviewers
.filter(_._2.nonEmpty)
.sortBy(_._2.length, ascending = false)
.map(m => (s"Reviewer ${m._1.reviewerName} reviews computer ${m._1.asin} but also on software:",
m._2.map(h => s"${h.source.reviewerName}=${h.score}=${h.source.asin}").toList))
.collect()
.take(20)
.foreach(println)
// Drop duplicates
println("Dropping duplicated reviewers:")
val distinctReviewers: RDD[String] = computersReviews
.filter(_.reviewerName != null)
.searchDropDuplicates[Long, Review](
queryBuilder = queryStringBuilder[Review](sr => "reviewerName:\"" + sr.reviewerName.replace('"', ' ') + "\"~0.4")
).map(sr => sr.reviewerName)
distinctReviewers.collect().sorted.take(20).foreach(println)
// Save & restore example
FileSystem.get(new Configuration).delete(new Path("/hdfs-tmp/hdfs-pathname"), true)
println(s"Saving index to hdfs...")
computersReviews.save("/hdfs-tmp/hdfs-pathname")
println(s"Restoring from previous indexation:")
val restoredSearchRDD: SearchRDD[Review] = SearchRDD.load[Review](sc, "/hdfs-tmp/hdfs-pathname")
val happyReview2 = restoredSearchRDD.count("reviewText:happy OR reviewText:best OR reviewText:good")
println(s"$happyReview2 positive reviews after restoration")
// Restored index can be use as classical rdd
val topReviewer = restoredSearchRDD.map(r => (r.reviewerID, 1))
.reduceByKey(_ + _)
.sortBy(_._2, ascending = false)
.take(1).head
println(s"${topReviewer._1} has submitted ${topReviewer._2} reviews")
val topReviewNameFiltered = restoredSearchRDD.filter(_.reviewerID.equals(topReviewer._1))
.take(1).head
println(s"He is named ${topReviewNameFiltered.reviewerName}")
spark.stop()
}
}
|
phymbert/spark-search
|
sql/src/main/scala/org/apache/spark/search/sql/expression.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.expressions.codegen.{CodeGenerator, CodegenContext, ExprCode, FalseLiteral, JavaCode}
import org.apache.spark.sql.catalyst.expressions.{BinaryExpression, Expression, LeafExpression}
import org.apache.spark.sql.types.{DataType, _}
trait SearchExpression
case class ScoreExpression() extends LeafExpression
with SearchExpression {
override def toString: String = s"SCORE"
override def dataType: DataType = DoubleType
override def nullable: Boolean = false
override def eval(input: InternalRow): Any = {
if (input != null) input.getDouble(input.numFields - 1) else Double.NaN
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
/*
if (ctx.currentVars != null && ctx.currentVars.last != null) {
val oev = ctx.currentVars.head
ev.isNull = oev.isNull
ev.value = oev.value
ev.copy(code = oev.code)
} else {
assert(ctx.INPUT_ROW != null, "INPUT_ROW and currentVars cannot both be null.")
val javaType = JavaCode.javaType(dataType)
ev.copy(code = code"$javaType ${ev.value} = ${ctx.INPUT_ROW}.getDouble(i.numFields() - 1);", isNull = FalseLiteral)
}*/
ev.copy(code =
code"""
final ${CodeGenerator.javaType(dataType)} ${ev.value} = 10000d;""", isNull = FalseLiteral)
}
}
/**
* @author <NAME>
*/
case class MatchesExpression(left: Expression, right: Expression)
extends BinaryExpression
with SearchExpression {
override def toString: String = s"$left MATCHES $right"
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
ev.copy(code =
code"""
final ${CodeGenerator.javaType(dataType)} ${ev.value} = true;""", isNull = FalseLiteral)
}
override def dataType: DataType = BooleanType
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/SearchJavaBaseRDD.scala
|
<filename>core/src/main/scala/org/apache/spark/search/rdd/SearchJavaBaseRDD.scala
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import org.apache.spark.SparkContext
import org.apache.spark.api.java.{JavaPairRDD, JavaRDD}
import org.apache.spark.search._
import org.apache.spark.search.rdd.SearchRDDJava.{QueryBuilder, QueryStringBuilder}
import scala.reflect.ClassTag
/**
* Java friendly version of [[SearchRDD]].
*/
trait SearchRDDJavaWrapper[S] extends SearchRDDJava[S] {
val searchRDD: SearchRDD[S]
val classTag: ClassTag[S]
override def count(): Long = searchRDD.count()
override def count(query: String): Long =
searchRDD.count(parseQueryString(query))
override def searchList(query: String, topK: Int): Array[SearchRecordJava[S]] =
searchRDD.searchListQuery(parseQueryString(query), topK).map(searchRecordAsJava(_))
override def searchList(query: String, topK: Int, minScore: Double): Array[SearchRecordJava[S]] =
searchRDD.searchListQuery(parseQueryString(query), topK, minScore).map(searchRecordAsJava)
override def search(query: String, topK: Int, minScore: Double): JavaRDD[SearchRecordJava[S]] =
searchRDD.search(query, topK).map(searchRecordAsJava(_)).toJavaRDD()
override def matches[K, V](rdd: JavaPairRDD[K, V],
queryBuilder: QueryStringBuilder[V],
topK: Int,
minScore: Double): JavaPairRDD[K, (V, Array[SearchRecordJava[S]])] = {
implicit val kClassTag: ClassTag[K] = rdd.kClassTag
implicit val vClassTag: ClassTag[V] = rdd.vClassTag
new JavaPairRDD(
searchRDD.matches[K, V](rdd.rdd, v => queryBuilder.build(v), topK, minScore)
.mapValues(m => (m._1, m._2.map(searchRecordAsJava(_)))))
}
override def save(path: String): Unit = searchRDD.save(path)
override def matchesQuery[K, V](rdd: JavaPairRDD[K, V],
queryBuilder: QueryBuilder[V],
topK: Int,
minScore: Double): JavaPairRDD[K, (V, Array[SearchRecordJava[S]])] = {
implicit val kClassTag: ClassTag[K] = rdd.kClassTag
implicit val vClassTag: ClassTag[V] = rdd.vClassTag
new JavaPairRDD(
searchRDD.matchesQuery[K, V](rdd.rdd, v => queryBuilder.build(v), topK, minScore)
.mapValues(m => (m._1, m._2.map(searchRecordAsJava(_)))))
}
override def javaRDD(): JavaRDD[S] = {
implicit val classTag: ClassTag[S] = this.classTag
new JavaRDD(searchAsRDD(searchRDD))
}
private def searchRecordAsJava(sr: SearchRecord[S]) =
new SearchRecordJava[S](sr.id, sr.partitionIndex, sr.score, sr.shardIndex, sr.source)
}
class SearchJavaBaseRDD[T: ClassTag](rdd: JavaRDD[T], opts: SearchOptions[T])
extends JavaRDD[T](rdd.rdd)
with SearchRDDJavaWrapper[T] {
override lazy val searchRDD: SearchRDD[T] = rdd.rdd.searchRDD(opts)
override val classTag: ClassTag[T] = searchRDD.elementClassTag
override def count(): Long = searchRDD.count()
}
class SearchRDDReloadedJava[T: ClassTag](val searchRdd: SearchRDD[T])
extends SearchRDDJavaWrapper[T]
with Serializable {
override lazy val searchRDD: SearchRDD[T] = searchRdd
override val classTag: ClassTag[T] = searchRDD.elementClassTag
}
object SearchRDDReloadedJava {
def load[T: ClassTag](sc: SparkContext,
path: String,
options: SearchOptions[T]
): SearchRDDJava[T] = new SearchRDDReloadedJava(SearchRDD.load(sc, path, options))
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/ZipUtils.scala
|
<reponame>phymbert/spark-search
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import java.io.{File, FileInputStream, InputStream, OutputStream}
import java.nio.file.{Files, StandardCopyOption}
import java.util.function.Consumer
import java.util.zip.{ZipEntry, ZipInputStream, ZipOutputStream}
private[rdd] object ZipUtils {
class IteratorInputStream(it: Iterator[Array[Byte]]) extends InputStream {
var offset: Int = 0
var buff: Array[Byte] = Array.empty
override def read(): Int = { // FIXME the slowest implementation ever
if (offset < buff.length) {
val b = buff(offset)
offset = offset + 1
b & 0xFF // Make sure value is between > 0..255
} else if (it.hasNext) {
offset = 1
buff = it.next
buff(0) & 0xFF
} else {
-1
}
}
}
class FileInputStreamIterator(filePath: File) extends Iterator[Array[Byte]] {
override def hasNext: Boolean = {
if (!finished) {
if (is == null) {
is = new FileInputStream(filePath)
}
read = is.read(_next)
finished = read < 0
if (finished) {
is.close()
}
}
!finished
}
var is: InputStream = _
var read: Int = -1
val _next: Array[Byte] = new Array[Byte](8192)
var finished = false
override def next(): Array[Byte] = {
_next.slice(0, read)
}
}
def unzipPartition(indexDir: String, it: Iterator[Array[Byte]]): Unit = {
unzipPartition(indexDir, new IteratorInputStream(it))
}
def unzipPartition(indexDir: String, is: InputStream): Unit = {
val parentLocalFile = new File(indexDir)
if (parentLocalFile.mkdirs()) {
val zis = new ZipInputStream(is)
Stream.continually(zis.getNextEntry).takeWhile(_ != null).foreach { file =>
Files.copy(zis, new File(parentLocalFile, file.getName).toPath, StandardCopyOption.REPLACE_EXISTING)
zis.closeEntry()
}
zis.close()
}
is.close()
}
def zipPartition(localIndexDirPath: java.nio.file.Path, fos: OutputStream): Unit = {
val zip = new ZipOutputStream(fos)
val files = Files.list(localIndexDirPath)
files.forEach {
new Consumer[java.nio.file.Path] {
override def accept(file: java.nio.file.Path): Unit = {
zip.putNextEntry(new ZipEntry(file.toFile.getName))
Files.copy(file, zip)
zip.closeEntry()
}
}
}
files.close()
zip.close()
fos.close()
}
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/SearchRDDCartesian.scala
|
<filename>core/src/main/scala/org/apache/spark/search/rdd/SearchRDDCartesian.scala<gh_stars>10-100
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import java.io.{IOException, ObjectOutputStream}
import org.apache.lucene.search.Query
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.search.{ReaderOptions, SearchException, SearchRecord}
import org.apache.spark.util.Utils
import collection.JavaConverters._
import scala.reflect.{ClassTag, classTag}
/**
* Result RDD of a cartesian search.
*
* @author <NAME>
*/
class SearchRDDCartesian[V: ClassTag, S: ClassTag](
@transient var searchRDDLuceneIndexer: SearchRDDIndexer[S],
@transient var other: RDD[V],
queryBuilder: V => Query,
readerOptions: ReaderOptions[S],
topK: Int = Int.MaxValue,
minScore: Double = 0)
extends RDD[(V, Option[SearchRecord[S]])](searchRDDLuceneIndexer.context, Nil)
with Serializable {
override val partitioner: Option[Partitioner] = searchRDDLuceneIndexer.partitioner
override protected def getPreferredLocations(split: Partition): Seq[String] =
firstParent[S].asInstanceOf[SearchRDDIndexer[S]]
.getPreferredLocations(split.asInstanceOf[MatchRDDPartition].searchIndexPartition)
override def compute(split: Partition, context: TaskContext): Iterator[(V, Option[SearchRecord[S]])] = {
val matchPartition = split.asInstanceOf[MatchRDDPartition]
// Be sure partition is indexed in our worker
val it = firstParent[Array[Byte]].iterator(matchPartition.searchIndexPartition, context)
// Unzip if needed
ZipUtils.unzipPartition(matchPartition.searchIndexPartition.indexDir, it)
val spr = reader(matchPartition.searchIndexPartition.index,
matchPartition.searchIndexPartition.indexDir)
context.addTaskCompletionListener[Unit](ctx => {
spr.close()
})
// Match other partition against our one
parent[V](1).iterator(matchPartition.otherPartition, context)
.flatMap(searchFor => {
try {
val searchResultIt = spr.iterator(queryBuilder(searchFor), topK, minScore)
val result = if (searchResultIt.size() == 0) Iterator((searchFor, None)) // FIXME left join always
else searchResultIt.asScala
.map(searchRecordJavaToProduct)
.map(s => (searchFor, Some(s)))
result
} catch {
case e: SearchException => throw new SearchException(s"error during matching $searchFor: $e", e)
}
})
}
override def clearDependencies(): Unit = {
super.clearDependencies()
searchRDDLuceneIndexer = null
other = null
}
private val numPartitionsInOtherRdd = other.partitions.length
override protected def getPartitions: Array[Partition] = {
val parts = new Array[Partition](searchRDDLuceneIndexer.partitions.length * numPartitionsInOtherRdd)
for (s1 <- searchRDDLuceneIndexer.partitions; s2 <- other.partitions) {
val idx = s1.index * numPartitionsInOtherRdd + s2.index
parts(idx) = new MatchRDDPartition(idx, searchRDDLuceneIndexer, other, s1.index, s2.index)
}
parts
}
override def getDependencies: Seq[Dependency[_]] = List(
new NarrowDependency(searchRDDLuceneIndexer) {
def getParents(id: Int): Seq[Int] = List(id / numPartitionsInOtherRdd)
},
new NarrowDependency(other) {
def getParents(id: Int): Seq[Int] = List(id % numPartitionsInOtherRdd)
}
)
private def reader(index: Int, indexDirectory: String): SearchPartitionReader[S] =
new SearchPartitionReader[S](index, indexDirectory, classTag[S].runtimeClass.asInstanceOf[Class[S]],
readerOptions)
class MatchRDDPartition(val idx: Int,
@transient private val searchRDDLuceneIndexer: SearchRDDIndexer[S],
@transient private val other: RDD[V],
val searchRDDIndex: Int,
val otherIndex: Int
) extends Partition {
override val index: Int = idx
var searchIndexPartition: SearchPartitionIndex[S] = searchRDDLuceneIndexer.partitions(searchRDDIndex).asInstanceOf[SearchPartitionIndex[S]]
var otherPartition: Partition = other.partitions(otherIndex)
@throws(classOf[IOException])
private def writeObject(oos: ObjectOutputStream): Unit = Utils.tryOrIOException {
// Update the reference to parent split at the time of task serialization
searchIndexPartition = searchRDDLuceneIndexer.partitions(searchRDDIndex).asInstanceOf[SearchPartitionIndex[S]]
otherPartition = other.partitions(otherIndex)
oos.defaultWriteObject()
}
}
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/RDDWithSearch.scala
|
<reponame>phymbert/spark-search<filename>core/src/main/scala/org/apache/spark/search/rdd/RDDWithSearch.scala<gh_stars>10-100
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import org.apache.lucene.search.Query
import org.apache.spark.rdd.RDD
import org.apache.spark.search.{SearchOptions, _}
import scala.reflect.ClassTag
/**
* Add search features to [[org.apache.spark.rdd.RDD]]
* using [[org.apache.spark.search.rdd.SearchRDDLucene]].
*
* @author <NAME>
*/
private[rdd] class RDDWithSearch[S: ClassTag](val rdd: RDD[S],
val opts: SearchOptions[S] = defaultOpts[S]
) extends SearchRDD[S] {
private[rdd] lazy val _searchRDD: SearchRDD[S] = searchRDD(opts)
override def count(): Long = _searchRDD.count()
override def count(query: StaticQueryProvider): Long = _searchRDD.count(query)
override def searchListQuery(query: StaticQueryProvider,
topK: Int = defaultTopK,
minScore: Double = 0): Array[SearchRecord[S]] =
_searchRDD.searchListQuery(query, topK, minScore)
override def searchQuery(query: StaticQueryProvider,
topKByPartition: Int = defaultTopK,
minScore: Double = 0): RDD[SearchRecord[S]] =
_searchRDD.searchQuery(query, topKByPartition, minScore)
override def matchesQuery[K, V](rdd: RDD[(K, V)],
queryBuilder: V => Query,
topK: Int = 10,
minScore: Double = 0
)
(implicit kClassTag: ClassTag[K],
vClassTag: ClassTag[V]): RDD[(K, (V, Array[SearchRecord[S]]))]=
_searchRDD.matchesQuery(rdd, queryBuilder, topK, minScore)
override def save(path: String): Unit = _searchRDD.save(path)
override def getNumPartitions: Int = _searchRDD.getNumPartitions
override def options: SearchOptions[S] = _searchRDD.options
/**
* Builds a search rdd with that custom search options.
*
* @param opts Search options
* @return Dependent RDD with configurable search features
*/
def searchRDD(opts: SearchOptions[S] = defaultOpts): SearchRDD[S] = new SearchRDDLucene[S](rdd, opts)
override def searchJoinQuery[W: ClassTag](other: RDD[W],
queryBuilder: W => Query,
topKByPartition: Int,
minScore: Double): RDD[(W, Option[SearchRecord[S]])] =
_searchRDD.searchJoinQuery(other, queryBuilder, topKByPartition, minScore)
override def distinct(numPartitions: Int): RDD[S] =
_searchRDD.distinct(numPartitions)
override def searchDropDuplicates[K: ClassTag, C: ClassTag](queryBuilder: S => Query = null,
createKey: S => K = (s: S) => s.hashCode.toLong.asInstanceOf[K],
minScore: Double = 0,
createCombiner: Seq[SearchRecord[S]] => C = (ss: Seq[SearchRecord[S]]) => ss.head.source.asInstanceOf[C],
mergeValue: (C, Seq[SearchRecord[S]]) => C = (c: C, _: Seq[SearchRecord[S]]) => c,
mergeCombiners: (C, C) => C = (c: C, _: C) => c,
numPartitionInJoin: Int = getNumPartitions,
topKToDeduplicate: Int = defaultTopK
)
(implicit ord: Ordering[K]): RDD[C] =
_searchRDD.searchDropDuplicates(queryBuilder, createKey, minScore, createCombiner, mergeValue, mergeCombiners)
private[spark] override def elementClassTag: ClassTag[S] = _searchRDD.elementClassTag
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/package.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.util.function.{Function => JFunction}
import org.apache.lucene.analysis.Analyzer
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.apache.lucene.queryparser.classic.QueryParser
import org.apache.lucene.search.Query
import org.apache.lucene.util.QueryBuilder
import org.apache.spark.search.reflect.DefaultQueryBuilder
import scala.collection.Iterable
import scala.language.implicitConversions
import scala.reflect.ClassTag
/**
* Spark Search brings advanced full text search features
* to your Dataframe, Dataset and RDD. Powered by Apache Lucene.
*/
package object search {
/**
* Search record.
*/
case class SearchRecord[S](id: Long, partitionIndex: Long, score: Double, shardIndex: Long, source: S)
extends Ordering[SearchRecord[S]] {
override def compare(sr1: SearchRecord[S], sr2: SearchRecord[S]): Int = sr2.score.compare(sr1.score) // Reverse
}
/**
* Default search options.
*/
def defaultOpts[S]: SearchOptions[S] = SearchOptions.defaultOptions.asInstanceOf[SearchOptions[S]]
/**
* Abstract class to ease building lucene queries using query string lucene syntax with spark search RDD.
*
* @param queryStringBuilder Generate lucene query string for this input element
* @tparam S Type of input class
*/
class QueryStringBuilderWithAnalyzer[S](val queryStringBuilder: S => String,
val defaultFieldName: String = ReaderOptions.DEFAULT_FIELD_NAME,
override val analyzerClass: Class[_ <: Analyzer] = classOf[StandardAnalyzer])
extends CanBuildQueryWithAnalyzer[S](analyzerClass) {
override def apply(s: S): Query =
new QueryParser(defaultFieldName, _analyzer).parse(queryStringBuilder.apply(s))
}
/**
* Abstract class to ease building lucene query with spark search RDD, support serialization
* and query builder creation in a distributed world.
*
* @param queryBuilder Generate lucene query for this input element
* @tparam S Type of input class
*/
class QueryBuilderWithAnalyzer[S](queryBuilder: (S, QueryBuilder) => Query,
override val analyzerClass: Class[_ <: Analyzer] = classOf[StandardAnalyzer]
)
extends CanBuildQueryWithAnalyzer[S](analyzerClass) {
@transient private lazy val _luceneQueryBuilder: QueryBuilder = new QueryBuilder(_analyzer)
override def apply(s: S): Query = queryBuilder.apply(s, _luceneQueryBuilder)
}
/**
* Abstract class to ease building lucene query with spark search RDD, support serialization
* and analyzer creation in a distributed world.
*
* @param analyzerClass Type of the analyzer to use with the query
* @tparam S Type of input class
*/
abstract class CanBuildQueryWithAnalyzer[S](val analyzerClass: Class[_ <: Analyzer] = classOf[StandardAnalyzer])
extends (S => Query) with Serializable {
@transient lazy val _analyzer: Analyzer = analyzerClass.newInstance()
}
def defaultQueryBuilder[S](opts: SearchOptions[_] = defaultOpts)(implicit cls: ClassTag[S]): S => Query =
new QueryBuilderWithAnalyzer[S](new DefaultQueryBuilder[S](cls.runtimeClass.asInstanceOf[Class[_ <: S]]).asInstanceOf[(S, QueryBuilder) => Query],
opts.getReaderOptions.analyzer)
def queryBuilder[S](builder: (S, QueryBuilder) => Query, opts: SearchOptions[_] = defaultOpts): S => Query =
new QueryBuilderWithAnalyzer[S](builder, opts.getReaderOptions.analyzer)
def queryStringBuilder[S](builder: S => String, opts: SearchOptions[_] = defaultOpts): S => Query =
new QueryStringBuilderWithAnalyzer[S](builder, opts.getReaderOptions.getDefaultFieldName, opts.getReaderOptions.analyzer)
implicit def indexOptions[S](optionsBuilderFunc: Function[IndexationOptions.Builder[S], IndexationOptions.Builder[S]]): JFunction[IndexationOptions.Builder[S], IndexationOptions.Builder[S]] =
new JFunction[IndexationOptions.Builder[S], IndexationOptions.Builder[S]] {
override def apply(opts: IndexationOptions.Builder[S]): IndexationOptions.Builder[S] = {
optionsBuilderFunc.apply(opts)
}
}
implicit def readerOptions[S](optionsBuilderFunc: Function[ReaderOptions.Builder[S], ReaderOptions.Builder[S]]): JFunction[ReaderOptions.Builder[S], ReaderOptions.Builder[S]] =
new JFunction[ReaderOptions.Builder[S], ReaderOptions.Builder[S]] {
override def apply(opts: ReaderOptions.Builder[S]): ReaderOptions.Builder[S] = {
optionsBuilderFunc.apply(opts)
}
}
private[search] def searchRecordJavaToProduct[S](sr: SearchRecordJava[S]) = {
SearchRecord(sr.id, sr.partitionIndex, sr.score, sr.shardIndex, sr.source)
}
}
|
phymbert/spark-search
|
sql/src/test/scala/org/apache/spark/search/sql/SearchDatasetSuite.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import org.apache.spark.search.sql.TestData._
import org.scalatest.flatspec.AnyFlatSpec
class SearchDatasetSuite extends AnyFlatSpec with LocalSparkSession {
ignore should "be searchable" in {
val spark = _spark
import spark.sqlContext.implicits._
val appleCompany = companies1DS(spark)
.where($"name".matches("apple") && score() > 1d)
assertResult(Company("Apple, Inc")) {
appleCompany.collect()
}
}
}
|
phymbert/spark-search
|
sql/src/main/scala/org/apache/spark/search/sql/SearchRule.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import org.apache.spark.sql.catalyst.expressions.{And, BinaryExpression, Expression, Or}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
/**
* Spark Search SQL rule.
*
* Rewrite logical plan involving search expression to their equivalent in search RDD.
*
* @author <NAME>
*/
object SearchRule extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case filter@Filter(conditions, _) if hasSearchCondition(conditions) =>
extractSearchFilter(filter)
}
private def hasSearchCondition(e: Expression): Boolean = {
e match {
case _: MatchesExpression => true
case _ => e.children.exists(hasSearchCondition)
}
}
def extractSearchFilter(filter: Filter): LogicalPlan = {
val (searchExpression, other) = searchConditions(filter.condition)
val rddPlan = SearchIndexPlan(filter.child, searchExpression.get)
val joinPlan = SearchJoin(filter.child, rddPlan, searchExpression.get)
/*if (other.children.nonEmpty)
Filter(other, joinPlan)
else*/
joinPlan // FIXME lost
}
private def searchConditions(e: Expression): (Option[Expression], Expression) = {
e match {
case m: MatchesExpression => (Option(e), m)
case be: BinaryExpression =>
val (leftSearchExpression, _) = searchConditions(be.left)
val (rightSearchExpression, _) = searchConditions(be.right)
val searchExpression = if (leftSearchExpression.nonEmpty && rightSearchExpression.nonEmpty)
Option(be match {
case _: And => And(leftSearchExpression.get, rightSearchExpression.get)
case _: Or => Or(leftSearchExpression.get, rightSearchExpression.get)
case _ => throw new UnsupportedOperationException
})
else if (leftSearchExpression.nonEmpty)
leftSearchExpression
else if (rightSearchExpression.nonEmpty)
rightSearchExpression
else Option.empty
(searchExpression, be)
case _ => (Option.empty, e)
}
}
}
|
phymbert/spark-search
|
core/src/test/scala/org/apache/spark/search/rdd/SearchRDDSuite.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import java.io.File
import org.apache.commons.io.FileUtils
import org.apache.lucene.analysis.en.EnglishAnalyzer
import org.apache.lucene.search.BooleanClause.Occur
import org.apache.lucene.util.QueryBuilder
import org.apache.spark.RangePartitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.search._
import org.apache.spark.search.rdd.TestData._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.must.Matchers.{convertToAnyMustWrapper, sorted}
import scala.language.implicitConversions
class SearchRDDSuite extends AnyFunSuite with LocalSparkContext {
test("count all indexed documents") {
assertResult(3)(sc.parallelize(persons).count)
}
test("count matched indexed documents") {
assertResult(1)(sc.parallelize(persons)
.count("firstName:bob"))
}
test("search list hits matching query") {
assertResult(Array(new SearchRecord[Person](0, 1, 0.3150668740272522f, 0,
Person("Bob", "Marley", 37))))(sc.parallelize(persons)
.searchList("firstName:bob", 10))
}
test("search RDD hits matching query") {
assertResult(Array(new SearchRecord[Person](0, 1, 0.3150668740272522f, 0,
Person("Bob", "Marley", 37))))(
sc.parallelize(persons)
.search("firstName:bob", 10).take(10)
)
}
test("search RDD hits matching fuzzy query should return topK per partition") {
val personsKeyedBy = sc.parallelize(personsDuplicated)
.union(sc.parallelize(personsDuplicated))
.zipWithIndex()
.keyBy(_._2)
val personsRDD = personsKeyedBy.partitionBy(new RangePartitioner(2, personsKeyedBy))
.map(_._2._1)
personsRDD.getNumPartitions mustBe 2
val matches = personsRDD
// <NAME> will have a better score than <NAME> and she should not be hit
.search("firstName:agnès~ OR firstName:bob~ lastName:marley~", topKByPartition = 2)
// Assert we have only one lucene index per partition
matches.map(_.partitionIndex).distinct().sortBy(identity).collect() mustBe Array(0, 1)
// Assert we only have bob
matches.map(_.source.firstName).map(_.toLowerCase.substring(0, 3)).collect().distinct mustBe Array("bob")
// Assert we have all bobs matched
matches.count() mustBe 4
// Should be sorted by score score descending
matches.map(_.score).collect().reverse mustBe sorted
}
test("search RDD query must use default field") {
assertResult(Array(new SearchRecord[Person](0, 1, 0.3150668740272522f, 0,
Person("Bob", "Marley", 37))))(sc.parallelize(persons).searchRDD(
SearchOptions.builder[Person]()
.read((r: ReaderOptions.Builder[Person]) => r.defaultFieldName("firstName"))
.analyzer(classOf[EnglishAnalyzer])
.build()).search("bob", 10).take(10))
}
test("Matching RDD") {
val persons2 = Seq(
Person("George", "Michal", 0),
Person("Georgee", "Michall", 0),
Person("Bobb", "Marley", 0),
Person("Bob", "Marlley", 0),
Person("Agnes", "Bartol", 0),
Person("Agnec", "Barttol", 0))
val searchRDD = sc.parallelize(persons2).repartition(2).searchRDD()
val matchingRDD = sc.parallelize(persons)
val personsKeyed: RDD[(Long, Person)] = matchingRDD.zipWithIndex().map(_.swap)
val matches = searchRDD.matches(personsKeyed, (p: Person) => s"firstName:${p.firstName}~0.5 AND lastName:${p.lastName}~0.5", 2).collect
assertResult(3)(matches.length)
assertResult(3)(matches.map(m => m._2._2.length).count(_ == 2))
}
test("self matches with one query value should returns all documents with one match") {
val rdd = sc.parallelize(persons)
val result = rdd
.matches(rdd.zipWithIndex().map(_.swap), (_: Person) => "lastName:Marley", 1)
val resultFiltered = result
.filter(_._2._2.count(_.source.lastName.equals("Marley")) == 1)
assertResult(3)(resultFiltered.count)
}
test("self matches with self query value should returns all document with self match") {
val rdd = sc.parallelize(persons)
val searchRDD = rdd.matchesQuery(rdd.zipWithIndex().map(_.swap),
queryBuilder((c: Person, lqb: QueryBuilder) => lqb.createBooleanQuery("firstName", c.firstName, Occur.MUST)),
topK = 1)
.map(_._2)
.filter(m => m._2.count(h => h.source.firstName.equals(h.source.firstName)) == 1)
.cache
assertResult(3)(searchRDD.count)
}
test("search join should work") {
val opts = SearchOptions.builder[Person]().analyzer(classOf[TestPersonAnalyzer]).build()
val searchRDD = sc.parallelize(personsDuplicated).repartition(1)
.searchRDD(opts)
.searchJoin(sc.parallelize(personsDuplicated),
(c: Person) => s"firstName:${c.firstName}",
topKByPartition = 2)
assertResult(12)(searchRDD.count) // FIXME add good unit test
}
test("search matches should work") {
val opts = SearchOptions.builder[Person]().analyzer(classOf[TestPersonAnalyzer]).build()
val searchRDD = sc.parallelize(personsDuplicated).repartition(1)
.searchRDD(opts)
.matchesQuery(sc.parallelize(persons).zipWithIndex().map(_.swap),
queryBuilder((c: Person, lqb: QueryBuilder) => lqb.createBooleanQuery("firstName", c.firstName), opts),
topK = 2)
.filter(_._2._2.length == 2)
assertResult(3)(searchRDD.count)
}
test("Distinct with no minimum score") {
val searchRDD = sc.parallelize(personsDuplicated)
.repartition(1)
.searchRDD()
val deduplicated = searchRDD.distinct(1).collect
assertResult(1)(deduplicated.length)
}
test("Drop duplicates with min score") {
val searchRDD = sc.parallelize(personsDuplicated).repartition(1)
.searchRDD(opts = SearchOptions.builder().analyzer(classOf[TestPersonAnalyzer]).build())
val deduplicated = searchRDD.searchDropDuplicates[Long, Person](minScore = 8).collect
assertResult(3)(deduplicated.length)
}
test("Drop duplicate with query builder and min score") {
val searchRDD = sc.parallelize(personsDuplicated).repartition(1)
val deduplicated = searchRDD.searchDropDuplicates[Long, Person](
queryBuilder = queryStringBuilder(p => s"firstName:${p.firstName}~0.5 AND lastName:${p.lastName}~0.5"),
minScore = 1
).collect
assertResult(3)(deduplicated.length)
}
test("SearchRDD should be iterable as an RDD") {
val searchRDD = sc.parallelize(persons).searchRDD()
assertResult(Array("Agnès", "Bob", "Geoorge"))(searchRDD
.filter(_.firstName.nonEmpty)
.map(_.firstName)
.collect().sorted)
}
test("Save and restore index from/to hdfs") {
FileUtils.deleteDirectory(new File("target/test-save"))
val searchRDD = sc.parallelize(persons).searchRDD()
assertResult(3)(searchRDD.count())
searchRDD.save("target/test-save")
val restoredSearchRDD = SearchRDD.load[Person](sc, "target/test-save")
assertResult(3)(restoredSearchRDD.count())
assertResult(Array("Bartoll", "Marley", "Michael"))(restoredSearchRDD
.map(_.lastName)
.collect().sorted)
}
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/SearchRDDIndexer.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import java.io._
import java.util
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.search._
import org.apache.spark.search.rdd.ZipUtils._
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
/**
* RDD responsible to index element to lucene index on local disk where the partition resides,
* it then sends the partition as zipped byte array.
*
* @author <NAME>
*/
private[search] class SearchRDDIndexer[S: ClassTag](sc: SparkContext,
val options: SearchOptions[S],
val deps: Seq[Dependency[_]])
extends RDD[Array[Byte]](sc, deps) {
def this(rdd: RDD[S], options: SearchOptions[S]) {
this(rdd.context, options, Seq(new OneToOneDependency(rdd)))
}
override def compute(split: Partition, context: TaskContext): Iterator[Array[Byte]] = {
val searchRDDPartition = split.asInstanceOf[SearchPartitionIndex[S]]
val elements = firstParent.iterator(searchRDDPartition.parent, context).asJava
.asInstanceOf[util.Iterator[S]]
searchRDDPartition.index(elements, options.getIndexationOptions)
streamPartitionIndexZip(context, searchRDDPartition)
}
protected def streamPartitionIndexZip(context: TaskContext, searchRDDPartition: SearchPartitionIndex[S]): Iterator[Array[Byte]] = {
val localIndexDirPath = new File(searchRDDPartition.indexDir)
val targetPath = new File(localIndexDirPath.getParent, s"${localIndexDirPath.getName}.zip")
zipPartition(localIndexDirPath.toPath, new FileOutputStream(targetPath))
new InterruptibleIterator[Array[Byte]](context, new FileInputStreamIterator(targetPath))
}
override protected def getPartitions: Array[Partition] = {
val parentPartitions = firstParent.partitions
// One-2-One partition
parentPartitions.map(p =>
new SearchPartitionIndex[S](p.index, rootDir,
getPreferredLocation(context, p.index, parentPartitions.length, super.getPreferredLocations(p)),
p)).toArray
}
protected val rootDir: String =
s"${options.getIndexationOptions.getRootIndexDirectory}${File.separator}${sc.applicationId}-sparksearch-rdd$id"
override protected[rdd] def getPreferredLocations(split: Partition): Seq[String] =
split.asInstanceOf[SearchPartitionIndex[S]].preferredLocations
lazy val _indexDirectoryByPartition: Map[Int, String] =
partitions.map(_.asInstanceOf[SearchPartitionIndex[S]]).map(t => (t.index, t.indexDir)).toMap
def save(pathString: String): Unit = {
val indexDirectoryByPartition = _indexDirectoryByPartition
mapPartitionsWithIndex((index, _) => {
val hadoopConf = new Configuration()
val hdfs = FileSystem.get(hadoopConf)
val localIndexDirPath = new File(indexDirectoryByPartition(index)).toPath
val targetPath = new Path(pathString, s"${localIndexDirPath.getFileName}.zip")
logInfo(s"Saving partition $localIndexDirPath to $targetPath")
val fos = hdfs.create(targetPath)
zipPartition(localIndexDirPath, fos)
logInfo(s"Partition $localIndexDirPath saved to $targetPath")
Iterator()
}).collect
}
override def unpersist(blocking: Boolean): SearchRDDIndexer.this.type = {
// TODO support non blocking
val indexDirectoryByPartition = _indexDirectoryByPartition
sparkContext.runJob(this, (context: TaskContext, _: Iterator[Array[Byte]]) => {
val indexDir = new File(indexDirectoryByPartition(context.partitionId()))
FileUtils.deleteDirectory(indexDir)
FileUtils.deleteQuietly(new File(indexDir.getParent, s"${indexDir.getName}.zip"))
})
super.unpersist(blocking)
}
}
private[rdd] object SearchRDDIndexer {
}
|
phymbert/spark-search
|
core/src/test/scala/org/apache/spark/search/rdd/TestData.scala
|
<filename>core/src/test/scala/org/apache/spark/search/rdd/TestData.scala
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import org.apache.lucene.analysis.Analyzer
import org.apache.lucene.analysis.Analyzer.TokenStreamComponents
import org.apache.lucene.analysis.ngram.NGramTokenizer
object TestData {
case class Person(firstName: String, lastName: String, age: Int)
def persons = Seq(
Person("Geoorge", "Michael", 53),
Person("Bob", "Marley", 37),
Person("Agnès", "Bartoll", -1))
def personsDuplicated = Seq(
Person("George", "Michal", 0),
Person("Georgee", "Michall", 0),
Person("Bobb", "Marley", 0),
Person("Bob", "Marlley", 0),
Person("Agnes", "Bartol", 0),
Person("Agnec", "Barttol", 0))
class TestPersonAnalyzer extends Analyzer {
override def createComponents(fieldName: String): TokenStreamComponents = {
new TokenStreamComponents(new NGramTokenizer(1, 3))
}
}
}
|
phymbert/spark-search
|
examples/src/main/scala/all/examples/org/apache/spark/search/rdd/ExampleData.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package all.examples.org.apache.spark.search.rdd
import java.io.File
import java.net.URL
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import scala.language.postfixOps
import scala.sys.process._
object ExampleData {
case class Review(asin: String, helpful: Array[Long], overall: Double,
reviewText: String, reviewTime: String, reviewerID: String,
reviewerName: String, summary: String, unixReviewTime: Long)
def loadReviews(spark: SparkSession, reviewURL: String): RDD[Review] = {
import spark.implicits._
val hadoopConf = new Configuration()
val hdfs = FileSystem.get(hadoopConf)
val reviewsFile = File.createTempFile("reviews_", ".json.gz")
reviewsFile.deleteOnExit()
new URL(reviewURL) #> reviewsFile !!
val dstPathName = "/hdfs-tmp/reviews.json.gz"
hdfs.copyFromLocalFile(new Path(reviewsFile.getAbsolutePath), new Path(dstPathName))
hdfs.deleteOnExit(new Path(dstPathName))
spark.read.json(dstPathName).as[Review].rdd.repartition(4)
}
}
|
phymbert/spark-search
|
core/src/test/scala/org/apache/spark/search/rdd/ZipUtilsSuite.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import java.io.{ByteArrayOutputStream, File, FileInputStream, FileOutputStream}
import org.apache.commons.io.{FileUtils, IOUtils}
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.apache.lucene.document.Field.Store
import org.apache.lucene.document.{Document, StringField}
import org.apache.lucene.index.{IndexWriter, IndexWriterConfig}
import org.apache.lucene.store.MMapDirectory
import org.apache.spark.search.rdd.ZipUtils._
import org.scalatest.flatspec.AnyFlatSpec
class ZipUtilsSuite extends AnyFlatSpec {
it should "create an iterator from a file input stream" in {
val f = File.createTempFile("test", "test")
f.deleteOnExit()
val expected = "abcdef\n" * 100000
FileUtils.write(f, expected)
val it = new FileInputStreamIterator(f)
var actual = ""
it.foreach { a =>
actual += new String(a)
}
assertResult(expected) {
actual
}
}
it should "create an input stream from iterator" in {
val bos = new ByteArrayOutputStream()
val it = Seq(Seq('a', 'b'), Seq('c', 'd', 'e'), Seq('f'))
.map(s => s.map(c => c.toByte).toArray)
.iterator
IOUtils.copy(new IteratorInputStream(it), bos)
assertResult("abcdef") {
new String(bos.toByteArray)
}
}
it should "creates valid zip and able to unzip it" in {
val testDir = new File(System.getProperty("java.io.tmpdir"), "test-zipping")
testDir.delete()
testDir.mkdir()
val f = new File(testDir, "test")
f.deleteOnExit()
val expected = "abcdef\n" * 100000
FileUtils.write(f, expected)
val zipped = File.createTempFile("zipped", ".zip")
f.deleteOnExit()
zipPartition(testDir.toPath, new FileOutputStream(zipped))
f.delete()
testDir.delete()
unzipPartition(testDir.getAbsolutePath, new FileInputStreamIterator(zipped))
zipped.delete()
val bos = new ByteArrayOutputStream()
IOUtils.copy(new FileInputStream(f), bos)
assertResult(expected) {
new String(bos.toByteArray)
}
}
}
|
phymbert/spark-search
|
benchmark/src/main/scala/benchmark/SparkRDDRegexBenchmark.scala
|
<reponame>phymbert/spark-search<gh_stars>10-100
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package benchmark
import org.apache.spark.rdd.RDD
object SparkRDDRegexBenchmark extends BaseBenchmark("Spark RDD Regex") {
def main(args: Array[String]): Unit = run()
override def countNameMatches(companies: RDD[Company], name: String): RDD[(Double, String)] = {
val re = s".*\\Q${name.toLowerCase}\\E.*"
companies
.filter(_.name != null)
.filter(_.name.toLowerCase.matches(re))
.map(c => (0, c.name))
}
override def joinMatch(companies: RDD[Company], secEdgarCompanies: RDD[SecEdgarCompanyInfo]): RDD[(String, Double, String)] = {
secEdgarCompanies
.filter(_.companyName != null)
.zipWithIndex()
.map(_.swap)
.cartesian(companies.filter(_.name != null))
.map(c => (c._1._1, (c._1._2.companyName, c._2.name)))
.filter(t => t._2._2.toLowerCase.matches(s".*\\Q${t._2._1.replaceAllLiterally(" ", "\\E\\s+\\Q")}\\E.*"))
.reduceByKey((c1, _) => c1)
.map(t => (t._2._1, 0d, t._2._2))
}
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/SearchRDDReloaded.scala
|
<reponame>phymbert/spark-search<filename>core/src/main/scala/org/apache/spark/search/rdd/SearchRDDReloaded.scala
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path, PathFilter}
import org.apache.spark.search.SearchOptions
import org.apache.spark.{Partition, SparkContext, TaskContext}
import scala.reflect.ClassTag
/**
* Reloaded index from hdfs.
*
* @author <NAME>
*/
private[search] class SearchIndexReloadedRDD[S: ClassTag](sc: SparkContext,
path: String,
override val options: SearchOptions[S])
extends SearchRDDIndexer[S](sc, options, Nil) {
override protected def getPartitions: Array[Partition] = {
val hadoopConf = new Configuration()
val hdfs = FileSystem.get(hadoopConf)
val partitionsZipped = hdfs.listStatus(new Path(path), new PathFilter {
override def accept(path: Path): Boolean = path.getName.endsWith(".zip")
}).zipWithIndex
partitionsZipped.map(p => new SearchIndexReloadedPartition(p._2, rootDir, p._1.getPath.toUri.toString,
getPreferredLocation(context, p._2, partitionsZipped.length, Seq())))
}
override def compute(split: Partition, context: TaskContext): Iterator[Array[Byte]] = {
val part = split.asInstanceOf[SearchIndexReloadedPartition]
val hadoopConf = new Configuration()
val hdfs = FileSystem.get(hadoopConf)
ZipUtils.unzipPartition(part.indexDir, hdfs.open(new Path(part.zipPath)))
streamPartitionIndexZip(context, part.asInstanceOf[SearchPartitionIndex[S]])
}
}
class SearchIndexReloadedPartition(val idx: Int,
val rootDir: String,
val zipPath: String,
val preferredLocations2: Array[String])
extends SearchPartitionIndex(idx, rootDir, preferredLocations2, null) {
}
|
phymbert/spark-search
|
sql/src/main/scala/org/apache/spark/search/sql/SearchStrategy.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.{SparkPlan, SparkStrategy}
/**
* Search strategy.
*
* @author <NAME>
*/
object SearchStrategy extends SparkStrategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = {
plan match {
case SearchJoin(left, right, searchExpression) => SearchJoinExec(planLater(left), planLater(right), searchExpression):: Nil
case p: SearchIndexPlan =>
SearchRDDExec(planLater(p.child), p.searchExpression) :: Nil
case _ => Seq.empty
}
}
}
|
phymbert/spark-search
|
sql/src/main/scala/org/apache/spark/search/sql/physical.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import org.apache.lucene.util.QueryBuilder
import org.apache.spark.rdd.RDD
import org.apache.spark.search.rdd.SearchRDDLucene
import org.apache.spark.search.{IndexationOptions, ReaderOptions, SearchOptions, _}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Expression, Literal, UnsafeRow}
import org.apache.spark.sql.execution._
import org.apache.spark.sql.types.{DataTypes, StringType, StructField, StructType}
import org.apache.spark.unsafe.types.UTF8String
case class SearchJoinExec(left: SparkPlan, right: SparkPlan, searchExpression: Expression)
extends BinaryExecNode {
override protected def doExecute(): RDD[InternalRow] = {
val leftRDD = left.execute()
val searchRDD = right.execute().asInstanceOf[SearchRDDLucene[InternalRow]]
val opts = searchRDD.options
val qb = searchExpression match { // FIXME support AND / OR
case MatchesExpression(left, right) => right match {
case Literal(value, dataType) =>
dataType match {
case StringType => left match {
case a: AttributeReference =>
queryBuilder[InternalRow]((_: InternalRow, lqb: QueryBuilder) =>
lqb.createBooleanQuery(a.name, value.asInstanceOf[UTF8String].toString), opts)
case _ => throw new UnsupportedOperationException
}
case _ => throw new UnsupportedOperationException
}
case _ => throw new UnsupportedOperationException
}
case _ => throw new IllegalArgumentException
}
searchRDD.matchesQuery(leftRDD.zipWithIndex().map(_.swap), qb, 1)
.filter(_._2._2.nonEmpty) // TODO move this filter at partition level
.values.map(m =>
toRow(m._1.asInstanceOf[UnsafeRow],
m._2.head.score))
}
private def toRow(doc: UnsafeRow, score: Double): InternalRow = {
val row = new UnsafeRow(doc.numFields + 1)
val bs = new Array[Byte](row.getSizeInBytes)
row.pointTo(bs, bs.length)
row.copyFrom(doc)
row.setDouble(doc.numFields, score)
row
}
override def output: Seq[Attribute] = left.output ++ Seq(scoreAttribute)
}
case class SearchRDDExec(child: SparkPlan, searchExpression: Expression)
extends UnaryExecNode {
override protected def doExecute(): RDD[InternalRow] = {
val inputRDDs = child match {
case wsce: WholeStageCodegenExec => wsce.child.asInstanceOf[CodegenSupport].inputRDDs()
case cs: CodegenSupport => cs.inputRDDs()
case _ => throw new UnsupportedOperationException("no input rdd supported")
}
if (inputRDDs.length != 1) {
throw new UnsupportedOperationException("one input RDD expected")
}
val rdd = inputRDDs.head
val schema = StructType(Seq(searchExpression match { // FIXME support AND / OR
case MatchesExpression(left, _) => left match {
case a: AttributeReference =>
StructField(a.name, DataTypes.StringType)
case _ => throw new UnsupportedOperationException
}
case _ => throw new IllegalArgumentException
}))
val opts = SearchOptions.builder[InternalRow]()
.read((readOptsBuilder: ReaderOptions.Builder[InternalRow]) => readOptsBuilder.documentConverter(new DocumentRowConverter(schema)))
.index((indexOptsBuilder: IndexationOptions.Builder[InternalRow]) => indexOptsBuilder.documentUpdater(new DocumentRowUpdater(schema)))
.build()
new SearchRDDLucene[InternalRow](rdd, opts)
}
override def output: Seq[Attribute] = Seq()
}
|
phymbert/spark-search
|
sql/src/main/scala/org/apache/spark/search/sql/logical.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Expression}
import org.apache.spark.sql.catalyst.plans.logical.{BinaryNode, LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.types.DoubleType
trait SearchLogicalPlan
case class SearchJoin(left: LogicalPlan, right: SearchIndexPlan, searchExpression: Expression)
extends BinaryNode
with SearchLogicalPlan {
override def output: Seq[Attribute] = left.output ++ right.output
}
case class SearchIndexPlan(child: LogicalPlan, searchExpression: Expression)
extends LeafNode
with SearchLogicalPlan {
override def output: Seq[Attribute] = Seq(AttributeReference(SCORE, DoubleType, nullable = false)())
override def computeStats(): Statistics = Statistics(
sizeInBytes = BigInt(Long.MaxValue) // Broadcast forbidden
)
}
|
phymbert/spark-search
|
core/src/main/scala/org/apache/spark/search/rdd/package.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search
import org.apache.lucene.queryparser.classic.QueryParser
import org.apache.lucene.search.Query
import org.apache.spark.rdd.RDD
import org.apache.spark.{ExecutorAllocationClient, SparkContext}
import scala.language.implicitConversions
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
/**
* Spark Search RDD.
*/
package object rdd {
implicit def rddWithSearch[S: ClassTag](rdd: RDD[S]): RDDWithSearch[S] =
new RDDWithSearch[S](rdd)
implicit def searchAsRDD[S: ClassTag](searchRDD: SearchRDD[S]): RDD[S] =
searchRDD.asInstanceOf[RDD[S]]
/**
* Provide a static query to pass to SearchRDD serializable.
*
* /!\ Important as Lucene Query is not serializable.
*/
type StaticQueryProvider = () => Query
def parseQueryString[S](queryString: String, opts: SearchOptions[_] = defaultOpts): StaticQueryProvider =
// Query parser is not thread safe
() => new QueryParser(opts.getReaderOptions.getDefaultFieldName, opts.getReaderOptions.analyzer.newInstance())
.parse(queryString)
private[rdd] def searchRecordJavaToProduct[S](sr: SearchRecordJava[S]) = {
SearchRecord(sr.id, sr.partitionIndex, sr.score, sr.shardIndex, sr.source)
}
private[rdd] def tryAndClose[A <: AutoCloseable, B](resource: A)(block: A => B): B = {
Try(block(resource)) match {
case Success(result) =>
resource.close()
result
case Failure(e) =>
resource.close()
throw e
}
}
private[rdd] def getPreferredLocation(sc: SparkContext,
index: Int,
numPartition: Int,
parentPreferredLocation: Seq[String]): Array[String] =
sc.schedulerBackend match {
case b: ExecutorAllocationClient =>
// Try to balance partitions across executors
val allIds = sc.getExecutorIds()
val ids = allIds.grouped(numPartition).toList
ids(index % ids.length).toArray
case _ => parentPreferredLocation.toArray
}
}
|
phymbert/spark-search
|
sql/src/main/scala/org/apache/spark/search/sql/ColumnWithSearch.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.sql
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{Column, SparkSession}
/**
*
* @author <NAME>
*/
class ColumnWithSearch(col: Column) {
@transient private final val sqlContext = SparkSession.getActiveSession.map(_.sqlContext).orNull
def matches(literal: String): Column = withSearchExpr {
MatchesExpression(col.expr, lit(literal).expr)
}
def matches(other: Column): Column = withSearchExpr {
MatchesExpression(col.expr, other.expr)
}
/** Creates a column based on the given expression. */
private def withSearchExpr(newExpr: Expression): Column = {
val searchSQLEnabled = sqlContext.experimental.extraStrategies.contains(SearchStrategy)
if (!searchSQLEnabled) {
sqlContext.experimental.extraStrategies = Seq(SearchStrategy) ++ sqlContext.experimental.extraStrategies
sqlContext.experimental.extraOptimizations = Seq(SearchRule) ++ sqlContext.experimental.extraOptimizations
}
new Column(newExpr)
}
}
|
phymbert/spark-search
|
sql/src/main/scala/org/apache/spark/search/sql/package.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search
import org.apache.lucene.search.Query
import org.apache.spark.search
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.types.DoubleType
import scala.language.implicitConversions
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
/**
* Search SQL package provides search features to spark [[org.apache.spark.sql.Dataset]].
*
* @author <NAME>
*/
package object sql {
/**
* Score column name.
*/
val SCORE: String = "__score__"
private[sql] val scoreAttribute: Attribute = AttributeReference(SCORE, DoubleType, nullable = false)()
/**
* Score of the hit in the search request.
*/
def score(): Column = new Column(ScoreExpression())
/**
* Default query builder.
*/
def defaultQueryBuilder[S: ClassTag](implicit enc: Encoder[S]): S => Query = search.defaultQueryBuilder[S]()
/**
* Add search feature to column.
*/
implicit def columnWithSearch(col: Column): ColumnWithSearch = new ColumnWithSearch(col)
/**
* Allow search record rdd transformation to Row.
*/
implicit def searchRecordEncoder[S <: Product : TypeTag](implicit enc: Encoder[S]): Encoder[SearchRecord[S]] = Encoders.product[SearchRecord[S]]
}
|
phymbert/spark-search
|
benchmark/src/main/scala/benchmark/BaseBenchmark.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package benchmark
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SaveMode, SparkSession}
abstract class BaseBenchmark(appName: String) extends Serializable {
val spark: SparkSession = SparkSession.builder().appName(appName).getOrCreate()
protected def run(): Unit = {
import spark.implicits._
// Convert CSV to parquet
// https://www.kaggle.com/peopledatalabssf/free-7-million-company-dataset
spark.read
.option("header", "true")
.option("inferSchema", "true")
.csv("hdfs:///companies_sorted.csv")
.withColumnRenamed("year founded", "yearFounded")
.withColumnRenamed("size range", "sizeRange")
.withColumnRenamed("linkedin url", "linkedinUrl")
.withColumnRenamed("current employee estimate", "currentEmployeeEstimate")
.withColumnRenamed("total employee estimate", "totalEmployeeEstimate")
.withColumnRenamed("_c0", "id")
.na.fill("", Seq("domain", "yearFounded", "industry", "sizeRange", "locality", "country", "linkedinUrl", "currentEmployeeEstimate", "totalEmployeeEstimate"))
.write
.mode(SaveMode.Ignore)
.parquet("hdfs:///companies_sorted.parquet")
// https://www.kaggle.com/dattapiy/sec-edgar-companies-list
spark.read.option("header", "true")
.option("inferSchema", "true")
.csv("hdfs:///sec__edgar_company_info.csv")
.withColumnRenamed("Line Number", "lineNumber")
.withColumnRenamed("Company Name", "companyName")
.withColumnRenamed("Company CIK Key", "companyCIKKey")
.write
.mode(SaveMode.Ignore)
.parquet("hdfs:///sec__edgar_company_info.parquet")
def loadCompanies = {
spark.read
.load("hdfs:///companies_sorted.parquet")
.as[Company]
.rdd
}
def loadSecEdgarCompanies = {
spark.read.load("hdfs:///sec__edgar_company_info.parquet")
.as[SecEdgarCompanyInfo]
.rdd
}
// Join matches
val (countJoinedMatches, joinedMatches) = spark.time({
val jm = joinMatch(loadCompanies, loadSecEdgarCompanies).cache
(jm.count, jm)
})
println(s"for joined ${countJoinedMatches} matches")
joinedMatches.take(10).foreach(println(_))
joinedMatches.unpersist()
val (countMatches, matches) = spark.time({
val m = countNameMatches(loadCompanies, "IBM").cache
(m.count, m)
})
println(s"for count ${countMatches} matches")
matches.take(10).foreach(println(_))
matches.unpersist()
spark.stop()
}
def countNameMatches(companies: RDD[Company], name: String): RDD[(Double, String)]
def joinMatch(companies: RDD[Company], secEdgarCompany: RDD[SecEdgarCompanyInfo]): RDD[(String, Double, String)]
}
|
phymbert/spark-search
|
benchmark/src/main/scala/benchmark/SearchRDDBenchmark.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package benchmark
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.apache.lucene.util.QueryBuilder
import org.apache.spark.rdd.RDD
import org.apache.spark.search.rdd._
import org.apache.spark.search.{SearchOptions, _}
object SearchRDDBenchmark extends BaseBenchmark("SearchRDD") {
def main(args: Array[String]): Unit = run()
override def countNameMatches(companies: RDD[Company], name: String): RDD[(Double, String)] = {
val opts = SearchOptions
.builder[Company]
.analyzer(classOf[StandardAnalyzer]).build
companies.searchRDD(opts).search(s"name:${name}").map(sr => (sr.score, sr.source.name))
}
override def joinMatch(companies: RDD[Company], secEdgarCompanies: RDD[SecEdgarCompanyInfo]): RDD[(String, Double, String)] = {
val opts = SearchOptions
.builder[Company]
.analyzer(classOf[StandardAnalyzer]).build
companies.searchRDD(opts)
.searchJoinQuery(secEdgarCompanies,
queryBuilder[SecEdgarCompanyInfo]((c: SecEdgarCompanyInfo, lqb: QueryBuilder) =>
lqb.createPhraseQuery("name", c.companyName.slice(0, 64)), opts), 1, 0d)
.filter(_._2.isDefined)
.map(m => (m._1.companyName, m._2.get.score, m._2.get.source.name))
}
}
|
phymbert/spark-search
|
benchmark/src/main/scala/benchmark/ElasticsearchBenchmark.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package benchmark
import org.apache.http.client.methods.HttpDelete
import org.apache.http.impl.client.DefaultHttpClient
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions._
import org.elasticsearch.spark._
object ElasticsearchBenchmark extends BaseBenchmark("Elasticsearch") {
val esSQLSource = "org.elasticsearch.spark.sql"
val esOpts = Map("es.nodes.wan.only" -> "true")
def main(args: Array[String]): Unit = run()
override def countNameMatches(companies: RDD[Company], name: String): RDD[(Double, String)] = {
import spark.implicits._
clearES()
companies.saveToEs("companies", esOpts) // FIXME maybe adjust replica/shard preferences
spark.read.format(esSQLSource)
.option("es.nodes.wan.only", "true")
.load("companies")
.filter($"name".equalTo(name))
.as[Company]
.map(c => (0d, c.name)) // FIXME no score ?
.rdd
}
override def joinMatch(companies: RDD[Company], secEdgarCompanies: RDD[SecEdgarCompanyInfo]): RDD[(String, Double, String)] = {
clearES()
import spark.implicits._
companies.saveToEs("companies", esOpts)
secEdgarCompanies.saveToEs("sec_edgar_companies", esOpts)
val companiesES = spark.read
.format(esSQLSource)
.option("es.nodes.wan.only", "true")
.load("companies")
.as[Company]
val secEdgarCompaniesES = spark.read.format(esSQLSource)
.option("es.nodes.wan.only", "true")
.load("sec_edgar_companies")
.as[SecEdgarCompanyInfo]
companiesES.join(secEdgarCompaniesES, $"name".equalTo($"companyName")) //FIXME check pushdown filters
.select($"companyName", lit(0d), $"name")
.distinct
.as[(String, Double, String)]
.rdd
}
private def clearES() = {
spark.conf.set("es.nodes", spark.conf.get("spark.es.nodes"))
spark.conf.set("es.port", "80")
val deleteIndices = new HttpDelete(s"http://${spark.conf.get("es.nodes")}/companies,sec_edgar_companies")
println(s"Deleting indices ${deleteIndices}...")
val resp = (new DefaultHttpClient).execute(deleteIndices)
println(s"Indices deleted: ${resp}")
}
}
|
phymbert/spark-search
|
core/src/test/scala/org/apache/spark/search/rdd/SearchIndexRDDSuite.scala
|
/*
* Copyright © 2020 Spark Search (The Spark Search Contributors)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.search.rdd
import java.io.File
import org.apache.commons.io.FileUtils
import org.apache.spark.search.rdd.TestData._
import org.scalatest.flatspec.AnyFlatSpec
import ZipUtils._
class SearchIndexRDDSuite extends AnyFlatSpec with LocalSparkContext {
it should "creates zip index and stream to the next rdd" in {
val searchIndexedRDD = sc.parallelize(persons)
.searchRDD()
.asInstanceOf[SearchRDDLucene[Person]]
.indexerRDD
searchIndexedRDD.count()
val indexDirectoryByPartition = searchIndexedRDD._indexDirectoryByPartition
indexDirectoryByPartition.foreach(t => {
val indexDir = new File(t._2)
FileUtils.deleteDirectory(indexDir)
FileUtils.deleteQuietly(new File(indexDir.getParent, s"${indexDir.getName}.zip"))
})
searchIndexedRDD.mapPartitionsWithIndex((t, p) => {
unzipPartition(indexDirectoryByPartition(t), p)
Iterator()
}, preservesPartitioning = true).count()
}
}
|
leozc/lambda_java_example_pantsbuild
|
lambda/src/scala/com/example/slack/main/slack.scala
|
<reponame>leozc/lambda_java_example_pantsbuild<filename>lambda/src/scala/com/example/slack/main/slack.scala
package com.example.slack.main
import java.io.{InputStream, OutputStream}
import java.nio.charset.StandardCharsets.UTF_8
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
case class Command(
operation : String,
message : String
)
case class LongResponse(
text : String,
response_type : String
)
object Slack{
def main(args: Array[String]): Unit = {
println("Hello, world!")
}
}
class Slack {
implicit val formats = DefaultFormats
def time(in: InputStream, out: OutputStream): Unit = {
val payload = scala.io.Source.fromInputStream(in).mkString("")
val cmd : Command = parse(payload).extract[Command]
val lr = LongResponse(text = cmd.message, response_type = cmd.operation)
val json = ("text" -> lr.text) ~ ("response_type" -> lr.response_type)
out.write(compact(render(json)).getBytes(UTF_8))
}
}
|
foursquare/util
|
util-core/src/main/scala/com/twitter/util/Future.scala
|
<gh_stars>1-10
package com.twitter.util
import com.twitter.concurrent.{Offer, IVar, Tx, Scheduler}
import java.util.concurrent.atomic.{
AtomicBoolean, AtomicInteger, AtomicReference,
AtomicReferenceArray}
import java.util.concurrent.{Future => JavaFuture, TimeUnit}
import scala.annotation.tailrec
import scala.collection.JavaConversions.{asScalaBuffer, seqAsJavaList}
import scala.collection.mutable
object Future {
val DEFAULT_TIMEOUT = Duration.Top
val Unit = apply(())
val Void = apply[Void](null)
val Done = Unit
val None: Future[Option[Nothing]] = new ConstFuture(Return(Option.empty))
val Nil: Future[Seq[Nothing]] = new ConstFuture(Return(Seq.empty))
/**
* Makes a Future with a constant result.
*/
def const[A](result: Try[A]): Future[A] = new ConstFuture[A](result)
/**
* Make a Future with a constant value. E.g., Future.value(1) is a Future[Int].
*/
def value[A](a: A): Future[A] = const[A](Return(a))
/**
* Make a Future with an error. E.g., Future.exception(new
* Exception("boo")).
*/
def exception[A](e: Throwable): Future[A] = const[A](Throw(e))
/**
* Make a Future with an error. E.g., Future.exception(new
* Exception("boo")). The exception is not wrapped in any way.
*/
def rawException[A](e: Throwable): Future[A] = const[A](Throw(e))
/**
* A new future that can never complete.
*/
def never: Future[Nothing] = new NoFuture
@deprecated("Prefer static Future.Void.", "5.x")
def void(): Future[Void] = value[Void](null)
/**
* A factory function to "lift" computations into the Future monad.
* It will catch nonfatal (see: [[com.twitter.util.NonFatal]])
* exceptions and wrap them in the Throw[_] type. Non-exceptional
* values are wrapped in the Return[_] type.
*/
def apply[A](a: => A): Future[A] = const(Try(a))
def unapply[A](f: Future[A]): Option[Try[A]] = f.poll
/**
* Run the computation {{mkFuture}} while installing a monitor that
* translates any exception thrown into an encoded one. If an
* exception is thrown anywhere, the underlying computation is
* interrupted with that exception.
*
* This function is usually called to wrap a computation that
* returns a Future (f0) whose value is satisfied by the invocation
* of an onSuccess/onFailure/ensure callbacks of another future
* (f1). If an exception happens in the callbacks on f1, f0 is
* never satisfied. In this example, `Future.monitored { f1
* onSuccess g; f0 }` will cancel f0 so that f0 never hangs.
*/
def monitored[A](mkFuture: => Future[A]): Future[A] = {
// We define this outside the scope of the following
// Promise to guarantee that it is not captured by any
// closures.
val promiseRef = new AtomicReference[Promise[A]]
val monitor = Monitor.mk { case exc =>
promiseRef.getAndSet(null) match {
case null => false
case p =>
p.raise(exc)
p.setException(exc)
true
}
}
val p = new Promise[A]
promiseRef.set(p)
monitor {
val f = mkFuture
p.forwardInterruptsTo(f)
f respond { r =>
promiseRef.getAndSet(null) match {
case null => ()
case p => p.update(r)
}
}
}
p
}
/**
* Flattens a nested future. Same as ffa.flatten, but easier to call from Java.
*/
def flatten[A](ffa: Future[Future[A]]): Future[A] = ffa.flatten
/**
* Take a sequence of Futures, wait till they all complete
* successfully. The future fails immediately if any of the joined
* Futures do, mimicking the semantics of exceptions.
*
* @param fs a sequence of Futures
* @return a Future[Unit] whose value is populated when all of the fs return.
*/
def join[A](fs: Seq[Future[A]]): Future[Unit] = {
if (fs.isEmpty) Unit else {
val count = new AtomicInteger(fs.size)
val p = Promise.interrupts[Unit](fs:_*)
for (f <- fs) {
f onSuccess { _ =>
if (count.decrementAndGet() == 0)
p.update(Return(()))
} onFailure { cause =>
p.updateIfEmpty(Throw(cause))
}
}
p
}
}
/* The following joins are generated with this code:
scala -e '
val meths = for (end <- ''b'' to ''v''; ps = ''a'' to end) yield
"""/**
* Join %d futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[%s](%s): Future[(%s)] = join(Seq(%s)) map { _ => (%s) }""".format(
ps.size,
ps map (_.toUpper) mkString ",",
ps map(p => "%c: Future[%c]".format(p, p.toUpper)) mkString ",",
ps map (_.toUpper) mkString ",",
ps mkString ",",
ps map(p => "Await.result("+p+")") mkString ","
)
meths foreach println
'
*/
/**
* Join 2 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B](a: Future[A],b: Future[B]): Future[(A,B)] = join(Seq(a,b)) map { _ => (Await.result(a),Await.result(b)) }
/**
* Join 3 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C](a: Future[A],b: Future[B],c: Future[C]): Future[(A,B,C)] = join(Seq(a,b,c)) map { _ => (Await.result(a),Await.result(b),Await.result(c)) }
/**
* Join 4 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D](a: Future[A],b: Future[B],c: Future[C],d: Future[D]): Future[(A,B,C,D)] = join(Seq(a,b,c,d)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d)) }
/**
* Join 5 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E]): Future[(A,B,C,D,E)] = join(Seq(a,b,c,d,e)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e)) }
/**
* Join 6 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F]): Future[(A,B,C,D,E,F)] = join(Seq(a,b,c,d,e,f)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f)) }
/**
* Join 7 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G]): Future[(A,B,C,D,E,F,G)] = join(Seq(a,b,c,d,e,f,g)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g)) }
/**
* Join 8 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H]): Future[(A,B,C,D,E,F,G,H)] = join(Seq(a,b,c,d,e,f,g,h)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h)) }
/**
* Join 9 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I]): Future[(A,B,C,D,E,F,G,H,I)] = join(Seq(a,b,c,d,e,f,g,h,i)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i)) }
/**
* Join 10 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J]): Future[(A,B,C,D,E,F,G,H,I,J)] = join(Seq(a,b,c,d,e,f,g,h,i,j)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j)) }
/**
* Join 11 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K]): Future[(A,B,C,D,E,F,G,H,I,J,K)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k)) }
/**
* Join 12 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L]): Future[(A,B,C,D,E,F,G,H,I,J,K,L)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l)) }
/**
* Join 13 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m)) }
/**
* Join 14 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n)) }
/**
* Join 15 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N],o: Future[O]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n),Await.result(o)) }
/**
* Join 16 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N],o: Future[O],p: Future[P]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n),Await.result(o),Await.result(p)) }
/**
* Join 17 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N],o: Future[O],p: Future[P],q: Future[Q]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n),Await.result(o),Await.result(p),Await.result(q)) }
/**
* Join 18 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N],o: Future[O],p: Future[P],q: Future[Q],r: Future[R]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n),Await.result(o),Await.result(p),Await.result(q),Await.result(r)) }
/**
* Join 19 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N],o: Future[O],p: Future[P],q: Future[Q],r: Future[R],s: Future[S]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n),Await.result(o),Await.result(p),Await.result(q),Await.result(r),Await.result(s)) }
/**
* Join 20 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N],o: Future[O],p: Future[P],q: Future[Q],r: Future[R],s: Future[S],t: Future[T]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n),Await.result(o),Await.result(p),Await.result(q),Await.result(r),Await.result(s),Await.result(t)) }
/**
* Join 21 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N],o: Future[O],p: Future[P],q: Future[Q],r: Future[R],s: Future[S],t: Future[T],u: Future[U]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n),Await.result(o),Await.result(p),Await.result(q),Await.result(r),Await.result(s),Await.result(t),Await.result(u)) }
/**
* Join 22 futures. The returned future is complete when all
* underlying futures complete. It fails immediately if any of them
* do.
*/
def join[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V](a: Future[A],b: Future[B],c: Future[C],d: Future[D],e: Future[E],f: Future[F],g: Future[G],h: Future[H],i: Future[I],j: Future[J],k: Future[K],l: Future[L],m: Future[M],n: Future[N],o: Future[O],p: Future[P],q: Future[Q],r: Future[R],s: Future[S],t: Future[T],u: Future[U],v: Future[V]): Future[(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V)] = join(Seq(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v)) map { _ => (Await.result(a),Await.result(b),Await.result(c),Await.result(d),Await.result(e),Await.result(f),Await.result(g),Await.result(h),Await.result(i),Await.result(j),Await.result(k),Await.result(l),Await.result(m),Await.result(n),Await.result(o),Await.result(p),Await.result(q),Await.result(r),Await.result(s),Await.result(t),Await.result(u),Await.result(v)) }
/**
* Take a sequence of Futures, wait till they all complete
* successfully. The future fails immediately if any of the joined
* Futures do, mimicking the semantics of exceptions.
*
* @param fs a java.util.List of Futures
* @return a Future[Unit] whose value is populated when all of the fs return.
*/
def join[A](fs: java.util.List[Future[A]]): Future[Unit] = join(asScalaBuffer(fs))
/**
* Collect the results from the given futures into a new future of
* Seq[A].
*
* @param fs a sequence of Futures
* @return a Future[Seq[A]] containing the collected values from fs.
*/
def collect[A](fs: Seq[Future[A]]): Future[Seq[A]] = {
if (fs.isEmpty) {
Future(Seq[A]())
} else {
val results = new AtomicReferenceArray[A](fs.size)
val count = new AtomicInteger(fs.size)
val p = Promise.interrupts[Seq[A]](fs:_*)
for (i <- 0 until fs.size) {
val f = fs(i)
f onSuccess { x =>
results.set(i, x)
if (count.decrementAndGet() == 0) {
val resultsArray = new mutable.ArrayBuffer[A](fs.size)
for (j <- 0 until fs.size) resultsArray += results.get(j)
p.setValue(resultsArray)
}
} onFailure { cause =>
p.updateIfEmpty(Throw(cause))
}
}
p
}
}
/**
* Collect the results from the given futures into a new future of
* Seq[A].
*
* @param fs a java.util.List of Futures
* @return a Future[java.util.List[A]] containing the collected values from fs.
*/
def collect[A](fs: java.util.List[Future[A]]): Future[java.util.List[A]] =
collect(asScalaBuffer(fs)) map(seqAsJavaList(_))
/**
* "Select" off the first future to be satisfied. Return this as a
* result, with the remainder of the Futures as a sequence.
*
* @param fs a scala.collection.Seq
*/
def select[A](fs: Seq[Future[A]]): Future[(Try[A], Seq[Future[A]])] = {
if (fs.isEmpty) {
Future.exception(new IllegalArgumentException("empty future list!"))
} else {
val p = Promise.interrupts[(Try[A], Seq[Future[A]])](fs:_*)
@tailrec
def stripe(heads: Seq[Future[A]], elem: Future[A], tail: Seq[Future[A]]) {
elem respond { res =>
if (!p.isDefined) {
p.updateIfEmpty(Return((res, heads ++ tail)))
}
}
if (!tail.isEmpty)
stripe(heads ++ Seq(elem), tail.head, tail.tail)
}
stripe(Seq(), fs.head, fs.tail)
p
}
}
/**
* "Select" off the first future to be satisfied. Return this as a
* result, with the remainder of the Futures as a sequence.
*
* @param fs a java.util.List
* @return a Future[Tuple2[Try[A], java.util.List[Future[A]]]] representing the first future
* to be satisfied and the rest of the futures.
*/
def select[A](fs: java.util.List[Future[A]]): Future[(Try[A], java.util.List[Future[A]])] = {
select(asScalaBuffer(fs)) map { case (first, rest) =>
(first, seqAsJavaList(rest))
}
}
/**
* Repeat a computation that returns a Future some number of times, after each
* computation completes.
*/
def times[A](n: Int)(f: => Future[A]): Future[Unit] = {
val count = new AtomicInteger(0)
whileDo(count.getAndIncrement() < n)(f)
}
/**
* Repeat a computation that returns a Future while some predicate obtains,
* after each computation completes.
*/
def whileDo[A](p: => Boolean)(f: => Future[A]): Future[Unit] = {
def loop(): Future[Unit] = {
if (p) f flatMap { _ => loop() }
else Future.Unit
}
loop()
}
def parallel[A](n: Int)(f: => Future[A]): Seq[Future[A]] = {
(0 until n) map { i => f }
}
}
class FutureCancelledException
extends Exception("The future was cancelled with Future.cancel")
/**
* An alternative interface for handling Future Events. This
* interface is designed to be friendly to Java users since it does
* not require creating many small Function objects.
*/
trait FutureEventListener[T] {
/**
* Invoked if the computation completes successfully
*/
def onSuccess(value: T): Unit
/**
* Invoked if the computation completes unsuccessfully
*/
def onFailure(cause: Throwable): Unit
}
/**
* An alternative interface for performing Future transformations;
* that is, converting a Future[A] to a Future[B]. This interface is
* designed to be friendly to Java users since it does not require
* creating many small Function objects. It is used in conjunction
* with `transformedBy`.
*
* You must override one of `{map, flatMap}`. If you override both
* `map` and `flatMap`, `flatMap` takes precedence. If you fail to
* override one of `{map, flatMap}`, an `AbstractMethodError` will be
* thrown at Runtime.
*
* '''Note:''' an exception e thrown in any of
* map/flatMap/handle/rescue will make the result of transformedBy be
* equivalent to Future.exception(e).
*/
abstract class FutureTransformer[-A, +B] {
/**
* Invoked if the computation completes successfully. Returns the
* new transformed value in a Future.
*/
def flatMap(value: A): Future[B] = Future.value(map(value))
/**
* Invoked if the computation completes successfully. Returns the
* new transformed value.
*
* ''Note'': this method will throw an `AbstractMethodError` if it is not overridden.
*/
def map(value: A): B = throw new AbstractMethodError
/**
* Invoked if the computation completes unsuccessfully. Returns the
* new Future value.
*/
def rescue(throwable: Throwable): Future[B] = Future.value(handle(throwable))
/**
* Invoked if the computation fails. Returns the new transformed
* value.
*/
def handle(throwable: Throwable): B = throw throwable
}
/**
* A computation evaluated asynchronously. This implementation of
* Future does not assume any concrete implementation; in particular,
* it does not couple the user to a specific executor or event loop.
*
* Note that this class extends Try[_] indicating that the results of
* the computation may succeed or fail.
*
* Futures are also [[com.twitter.util.Cancellable]], but with
* special semantics: the cancellation signal is only guaranteed to
* be delivered when the promise has not yet completed.
*/
abstract class Future[+A] extends Awaitable[A] {
import Future.DEFAULT_TIMEOUT
/**
* When the computation completes, invoke the given callback
* function. Respond() yields a Try (either a Return or a Throw).
* This method is most useful for very generic code (like
* libraries). Otherwise, it is a best practice to use one of the
* alternatives (onSuccess(), onFailure(), etc.). Note that almost
* all methods on Future[_] are written in terms of respond(), so
* this is the essential template method for use in concrete
* subclasses.
*
* @return a chained Future[A]
*/
def respond(k: Try[A] => Unit): Future[A]
/**
* Invoked regardless of whether the computation completed successfully or unsuccessfully.
* Implemented in terms of `respond` so that subclasses control evaluation order. Returns a
* chained Future.
*/
def ensure(f: => Unit): Future[A] = respond { _ => f }
/**
* Block indefinitely, wait for the result of the Future to be available.
*/
@deprecated("Use Await.result", "6.2.x")
def apply(): A = apply(DEFAULT_TIMEOUT)
/**
* Block, but only as long as the given Timeout.
*/
@deprecated("Use Await.result", "6.2.x")
def apply(timeout: Duration): A = get(timeout)()
/**
* Alias for apply().
*/
@deprecated("Use Await.result", "6.2.x")
def get() = apply()
@deprecated("Use Await.result", "6.2.x")
def isReturn = get(DEFAULT_TIMEOUT) isReturn
@deprecated("Use Await.result", "6.2.x")
def isThrow = get(DEFAULT_TIMEOUT) isThrow
/**
* Is the result of the Future available yet?
*/
def isDefined: Boolean = poll.isDefined
/**
* Demands that the result of the future be available within
* `timeout`. The result is a Return[_] or Throw[_] depending upon
* whether the computation finished in time.
*/
@deprecated("Use Await.result", "6.2.x")
final def get(timeout: Duration): Try[A] = {
Await.ready(this, timeout)
try Return(Await.result(this, Duration.Zero)) catch {
// For legacy reasons, we catch even
// fatal exceptions.
case e => Throw(e)
}
}
/**
* Polls for an available result. If the Future has been
* satisfied, returns Some(result), otherwise None.
*/
def poll: Option[Try[A]]
/**
* Raise the given throwable as an interrupt. Interrupts are
* one-shot and latest-interrupt wins. That is, the last interrupt
* to have been raised is delivered exactly once to the Promise
* responsible for making progress on the future (multiple such
* promises may be involed in `flatMap` chains).
*
* Raising an interrupt does not alter the externally observable
* state of the Future. They are used to signal to the ''producer''
* of the future's value that the result is no longer desired (for
* whatever reason given in the passed Throwable).
*/
def raise(interrupt: Throwable)
@deprecated("Provided for API compatibility; use raise() instead.", "6.0.0")
def cancel() { raise(new FutureCancelledException) }
/**
* Same as the other within, but with an implicit timer. Sometimes this is more convenient.
*/
def within(timeout: Duration)(implicit timer: Timer): Future[A] =
within(timer, timeout)
/**
* Returns a new Future that will error if this Future does not return in time.
*
* @param timeout indicates how long you are willing to wait for the result to be available.
*/
def within(timer: Timer, timeout: Duration): Future[A] = {
if (timeout == Duration.Top)
return this
val p = Promise.interrupts[A](this)
val task = timer.schedule(timeout.fromNow) {
p.updateIfEmpty(Throw(new TimeoutException(timeout.toString)))
}
respond { r =>
task.cancel()
p.updateIfEmpty(r)
}
p
}
def transform[B](f: Try[A] => Future[B]): Future[B]
/**
* If this, the original future, succeeds, run f on the result.
*
* The returned result is a Future that is satisfied when the original future
* and the callback, f, are done.
* If the original future fails, this one will also fail, without executing f.
*
* @see map()
*/
def flatMap[B](f: A => Future[B]): Future[B] =
transform({
case Return(v) => f(v)
case Throw(t) => Future.rawException(t)
})
def rescue[B >: A](
rescueException: PartialFunction[Throwable, Future[B]]
): Future[B] = transform({
case Throw(t) if rescueException.isDefinedAt(t) => rescueException(t)
case _ => this
})
/**
* Invoke the callback only if the Future returns successfully. Useful for Scala `for`
* comprehensions. Use `onSuccess` instead of this method for more readable code.
*/
def foreach(k: A => Unit) = onSuccess(k)
/**
* If this, the original future, succeeds, run f on the result.
*
* The returned result is a Future that is satisfied when the original future
* and the callback, f, are done.
* If the original future fails, this one will also fail, without executing f.
*
* @see flatMap()
*/
def map[B](f: A => B): Future[B] = flatMap { a => Future { f(a) } }
def filter(p: A => Boolean): Future[A] = transform { x: Try[A] => Future.const(x.filter(p)) }
def withFilter(p: A => Boolean): Future[A] = filter(p)
/**
* Invoke the function on the result, if the computation was
* successful. Returns a chained Future as in `respond`.
*
* @return chained Future
*/
def onSuccess(f: A => Unit): Future[A] =
respond({
case Return(value) => f(value)
case _ =>
})
/**
* Invoke the function on the error, if the computation was
* unsuccessful. Returns a chained Future as in `respond`.
*
* @return chained Future
*/
def onFailure(rescueException: Throwable => Unit): Future[A] =
respond({
case Throw(throwable) => rescueException(throwable)
case _ =>
})
/**
* Register a FutureEventListener to be invoked when the
* computation completes. This method is typically used by Java
* programs because it avoids the use of small Function objects.
*
* Compare this method to `transformedBy`. The difference is that
* `addEventListener` is used to perform a simple action when a
* computation completes, such as recording data in a log-file. It
* analogous to a `void` method in Java: it has side-effects and no
* return value. `transformedBy`, on the other hand, is used to
* transform values from one type to another, or to chain a series
* of asynchronous calls and return the result. It is analogous to
* methods in Java that have a return-type. Note that
* `transformedBy` and `addEventListener` are not mutually
* exclusive and may be profitably combined.
*/
def addEventListener(listener: FutureEventListener[_ >: A]) = respond({
case Throw(cause) => listener.onFailure(cause)
case Return(value) => listener.onSuccess(value)
})
/**
* Transform the Future[A] into a Future[B] using the
* FutureTransformer. The FutureTransformer handles both success
* (Return) and failure (Throw) values by implementing map/flatMap
* and handle/rescue. This method is typically used by Java
* programs because it avoids the use of small Function objects.
*
* Compare this method to `addEventListener`. The difference is
* that `addEventListener` is used to perform a simple action when
* a computation completes, such as recording data in a log-file.
* It analogous to a `void` method in Java: it has side-effects and
* no return value. `transformedBy`, on the other hand, is used to
* transform values from one type to another, or to chain a series
* of asynchronous calls and return the result. It is analogous to
* methods in Java that have a return-type. Note that
* `transformedBy` and `addEventListener` are not mutually
* exclusive and may be profitably combined.
*
* ''Note'': The FutureTransformer must implement either `flatMap`
* or `map` and may optionally implement `handle`. Failing to
* implement a method will result in a run-time (AbstractMethod)
* error.
*/
def transformedBy[B](transformer: FutureTransformer[A, B]): Future[B] =
transform {
case Return(v) => transformer.flatMap(v)
case Throw(t) => transformer.rescue(t)
}
def handle[B >: A](rescueException: PartialFunction[Throwable, B]): Future[B] = rescue {
case e: Throwable if rescueException.isDefinedAt(e) => Future(rescueException(e))
case e: Throwable => this
}
/**
* Choose the first Future to succeed.
*
* @param other another Future
* @return a new Future whose result is that of the first of this and other to return
*/
def select[U >: A](other: Future[U]): Future[U] = {
val p = Promise.interrupts[U](other, this)
other respond { p.updateIfEmpty(_) }
this respond { p.updateIfEmpty(_) }
p
}
/**
* A synonym for select(): Choose the first Future to succeed.
*/
def or[U >: A](other: Future[U]): Future[U] = select(other)
/**
* Combines two Futures into one Future of the Tuple of the two results.
*/
def join[B](other: Future[B]): Future[(A, B)] = {
val p = Promise.interrupts[(A, B)](this, other)
this.respond {
case Throw(t) => p() = Throw(t)
case Return(a) => other respond {
case Throw(t) => p() = Throw(t)
case Return(b) => p() = Return((a, b))
}
}
p
}
/**
* Convert this Future[A] to a Future[Unit] by discarding the result.
*/
def unit: Future[Unit] = map(_ => ())
/**
* Convert this Future[A] to a Future[Void] by discarding the result.
*/
def voided: Future[Void] = map(_ => null.asInstanceOf[Void])
@deprecated("'void' is a reserved word in javac.", "5.x")
def void: Future[Void] = voided
/**
* Send updates from this Future to the other.
* ``other'' must not yet be satisfied.
*/
def proxyTo[B >: A](other: Promise[B]) {
respond { other() = _ }
}
/**
* An offer for this future. The offer is activated when the future
* is satisfied.
*/
def toOffer: Offer[Try[A]] = new Offer[Try[A]] {
def prepare() = transform { res: Try[A] =>
val tx = new Tx[Try[A]] {
def ack() = Future.value(Tx.Commit(res))
def nack() {}
}
Future.value(tx)
}
}
/**
* Convert a Twitter Future to a Java native Future. This should
* match the semantics of a Java Future as closely as possible to
* avoid issues with the way another API might use them. See:
*
* http://download.oracle.com/javase/6/docs/api/java/util/concurrent/Future.html#cancel(boolean)
*/
def toJavaFuture: JavaFuture[_ <: A] = {
val f = this
new JavaFuture[A] {
val wasCancelled = new AtomicBoolean(false)
override def cancel(mayInterruptIfRunning: Boolean): Boolean = {
if (wasCancelled.compareAndSet(false, true))
f.raise(new java.util.concurrent.CancellationException)
true
}
override def isCancelled: Boolean = wasCancelled.get
override def isDone: Boolean = isCancelled || f.isDefined
override def get(): A = {
if (isCancelled)
throw new java.util.concurrent.CancellationException()
Await.result(f)
}
override def get(time: Long, timeUnit: TimeUnit): A = {
if (isCancelled)
throw new java.util.concurrent.CancellationException()
Await.result(f, Duration.fromTimeUnit(time, timeUnit))
}
}
}
/**
* Converts a Future[Future[B]] into a Future[B]
*/
def flatten[B](implicit ev: A <:< Future[B]): Future[B] =
flatMap[B] { x => x }
/**
* Returns a Future[Boolean] indicating whether two Futures are equivalent. Note that
* Future.exception(e).willEqual(Future.exception(e)) == Future.value(true).
*/
def willEqual[B](that: Future[B]) = {
val areEqual = new Promise[Boolean]
this respond { thisResult =>
that respond { thatResult =>
areEqual.setValue(thisResult == thatResult)
}
}
areEqual
}
}
/**
* A Future that is already completed. These are cheap in
* construction compared to Promises.
*/
class ConstFuture[A](result: Try[A]) extends Future[A] {
def respond(k: Try[A] => Unit): Future[A] = {
val saved = Local.save()
Scheduler.submit(new Runnable {
def run() {
val current = Local.save()
Local.restore(saved)
try Monitor { k(result) } finally Local.restore(current)
}
})
this
}
def raise(interrupt: Throwable) {}
def transform[B](f: Try[A] => Future[B]): Future[B] = {
val p = new Promise[B]
respond({ r =>
val result = try f(r) catch { case NonFatal(e) => Future.exception(e) }
p.become(result)
})
p
}
def poll: Option[Try[A]] = Some(result)
// Awaitable
@throws(classOf[TimeoutException])
@throws(classOf[InterruptedException])
def ready(timeout: Duration)(implicit permit: Awaitable.CanAwait): this.type = this
@throws(classOf[Exception])
def result(timeout: Duration)(implicit permit: Awaitable.CanAwait): A = result()
}
/**
* A future with no future (never completes).
*/
class NoFuture extends Future[Nothing] {
def respond(k: Try[Nothing] => Unit): Future[Nothing] = this
def transform[B](f: Try[Nothing] => Future[B]): Future[B] = this
def raise(interrupt: Throwable) {}
// Awaitable
@throws(classOf[TimeoutException])
@throws(classOf[InterruptedException])
def ready(timeout: Duration)(implicit permit: Awaitable.CanAwait): this.type = {
Thread.sleep(timeout.inMilliseconds)
throw new TimeoutException(timeout.toString)
}
@throws(classOf[Exception])
def result(timeout: Duration)(implicit permit: Awaitable.CanAwait): Nothing = {
Thread.sleep(timeout.inMilliseconds)
throw new TimeoutException(timeout.toString)
}
def poll: Option[Try[Nothing]] = None
}
class FutureTask[A](fn: => A) extends Promise[A] with Runnable {
def run() {
update(Try(fn))
}
}
object FutureTask {
def apply[A](fn: => A) = new FutureTask[A](fn)
}
private[util] object FutureBenchmark {
/**
* Admittedly, this is not very good microbenchmarking technique.
*/
import com.twitter.conversions.storage._
private[this] val NumIters = 100.million
private[this] def bench[A](numIters: Long)(f: => A): Long = {
val begin = System.currentTimeMillis()
(0L until numIters) foreach { _ => f }
System.currentTimeMillis() - begin
}
private[this] def run[A](name: String)(work: => A) {
printf("Warming up %s.. ", name)
val warmupTime = bench(NumIters)(work)
printf("%d ms\n", warmupTime)
printf("Running .. ")
val runTime = bench(NumIters)(work)
printf(
"%d ms, %d %s/sec\n",
runTime, 1000 * NumIters / runTime, name)
}
def main(args: Array[String]) {
run("respond") {
val promise = new Promise[Unit]
promise respond { res => () }
promise() = Return(())
}
run("flatMaps") {
val promise = new Promise[Unit]
promise flatMap { _ => Future.value(()) }
promise() = Return(())
}
}
}
private[util] object Leaky {
def main(args: Array[String]) {
def loop(i: Int): Future[Int] = Future.value(i) flatMap { count =>
if (count % 1000000 == 0) {
System.gc()
println("iter %d %dMB".format(
count, Runtime.getRuntime().totalMemory()>>20))
}
loop(count + 1)
}
loop(1)
}
}
|
foursquare/util
|
util-app/src/test/scala/com/twitter/app/FlagTest.scala
|
package com.twitter.app
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
object MyGlobalFlag extends GlobalFlag("a test flag", "a global test flag")
@RunWith(classOf[JUnitRunner])
class FlagTest extends FunSuite {
test("Flaggable: parse booleans") {
assert(Flaggable.ofBoolean.parse("true"))
assert(!Flaggable.ofBoolean.parse("false"))
intercept[Throwable] { Flaggable.ofBoolean.parse("") }
intercept[Throwable] { Flaggable.ofBoolean.parse("gibberish") }
}
test("Flaggable: parse strings") {
assert(Flaggable.ofString.parse("blah") === "blah")
}
test("Flaggable: parse/show inet addresses") {
val local8080 = Flaggable.ofInetSocketAddress.parse(":8080")
assert(local8080.getAddress.isAnyLocalAddress)
assert(local8080.getPort === 8080)
val ip8080 = Flaggable.ofInetSocketAddress.parse("172.16.31.10:8080")
assert(ip8080.getHostName === "172.16.31.10")
assert(ip8080.getPort === 8080)
assert(Flaggable.ofInetSocketAddress.show(local8080) === ":8080")
assert(Flaggable.ofInetSocketAddress.show(ip8080) ==="172.16.31.10:8080")
}
test("Flaggable: parse seqs") {
assert(Flaggable.ofSeq[Int].parse("1,2,3,4") === Seq(1,2,3,4))
}
test("Flaggable: parse tuples") {
assert(Flaggable.ofTuple[Int, String].parse("1,hello") === (1, "hello"))
intercept[IllegalArgumentException] { Flaggable.ofTuple[Int, String].parse("1") }
}
class Ctx {
val flag = new Flags("test")
val fooFlag = flag("foo", 123, "The foo value")
val barFlag = flag("bar", "okay", "The bar value")
}
test("Flag: defaults") {
val ctx = new Ctx
import ctx._
assert(fooFlag() === 123)
assert(barFlag() === "okay")
}
test("Flag: add and parse flags") {
val ctx = new Ctx
import ctx._
assert(flag.parse(Array("-foo", "973", "-bar", "hello there")).isEmpty)
assert(fooFlag() === 973)
assert(barFlag() === "hello there")
}
class Bctx extends Ctx {
val yesFlag = flag("yes", false, "Just say yes.")
}
test("Boolean: default") {
val ctx = new Bctx
import ctx._
assert(!yesFlag())
}
test("Boolean: -yes") {
val ctx = new Bctx
import ctx._
assert(flag.parse(Array("-yes")).isEmpty)
assert(yesFlag())
}
test("Boolean: -yes=true") {
val ctx = new Bctx
import ctx._
assert(flag.parse(Array("-yes=true")).isEmpty)
assert(yesFlag())
}
test("Boolean: -yes=false") {
val ctx = new Bctx
import ctx._
assert(flag.parse(Array("-yes=false")).isEmpty)
assert(!yesFlag())
}
test("Flag: handle remainders (sequential)") {
val ctx = new Ctx
import ctx._
assert(flag.parse(Array("-foo", "333", "arg0", "arg1")) === Seq("arg0", "arg1"))
}
test("Flag: handle remainders (interpspersed)") {
val ctx = new Ctx
import ctx._
assert(flag.parse(Array("arg0", "-foo", "333", "arg1")) === Seq("arg0", "arg1"))
}
test("Flag: give nice parse errors") {
val ctx = new Ctx
import ctx._
val thr = intercept[Exception] { flag.parse(Array("-foo", "blah")) }
}
test("Flag: handle -help") {
val ctx = new Ctx
import ctx._
intercept[FlagUsageError] { flag.parse(Array("-help")) }
}
test("Flag: mandatory flag without argument") {
val ctx = new Ctx
import ctx._
val thr = intercept[FlagParseException] { flag.parse(Array("-foo")) }
}
test("Flag: undefined") {
val ctx = new Ctx
import ctx._
val thr = intercept[FlagParseException] { flag.parse(Array("-undefined")) }
assert(flag.parse(Array("-undefined"), true) === Seq("-undefined"))
}
test("GlobalFlag") {
assert(MyGlobalFlag() === "a test flag")
val flag = new Flags("my", includeGlobal=true)
flag.parse(Array("-com.twitter.app.MyGlobalFlag", "okay"))
assert(MyGlobalFlag() === "okay")
System.setProperty("com.twitter.app.MyGlobalFlag", "not okay")
assert(MyGlobalFlag() === "okay")
MyGlobalFlag.reset()
assert(MyGlobalFlag() === "not okay")
}
}
|
foursquare/util
|
util-core/src/main/scala/com/twitter/concurrent/Spool.scala
|
<gh_stars>1-10
package com.twitter.concurrent
import scala.collection.mutable.ArrayBuffer
import com.twitter.util.{Future, Promise, Return, Await, Duration}
/**
* A spool is an asynchronous stream. It more or less
* mimics the scala {{Stream}} collection, but with cons
* cells that have deferred tails.
*
* Construction is done with Spool.cons, Spool.empty. Convenience
* syntax like that of [[scala.Stream]] is provided. In order to use
* these operators for deconstruction, they must be imported
* explicitly ({{import Spool.{*::, **::}}})
*
* {{{
* def fill(rest: Promise[Spool[Int]]) {
* asyncProcess foreach { result =>
* if (result.last) {
* rest() = Return(result **:: Spool.empty)
* } else {
* val next = new Promise[Spool[Int]]
* rest() = Return(result *:: next)
* fill(next)
* }
* }
* }
* val rest = new Promise[Spool[Int]]
* fill(rest)
* firstElem *:: rest
* }}}
*/
sealed trait Spool[+A] {
import Spool.{cons, empty}
def isEmpty: Boolean
/**
* The first element of the spool. Invalid for empty spools.
*/
def head: A
/**
* The (deferred) tail of the spool. Invalid for empty spools.
*/
def tail: Future[Spool[A]]
/**
* Apply {{f}} for each item in the spool, until the end. {{f}} is
* applied as the items become available.
*/
def foreach[B](f: A => B) = foreachElem { _ foreach(f) }
/**
* A version of {{foreach}} that wraps each element in an
* {{Option}}, terminating the stream (EOF or failure) with
* {{None}}.
*/
def foreachElem[B](f: Option[A] => B) {
if (!isEmpty) {
f(Some(head))
tail onSuccess { s =>
s.foreachElem(f)
} onFailure { _ =>
f(None)
}
} else {
f(None)
}
}
/**
* The standard Scala collect, in order to implement map & filter.
*
* It may seem unnatural to return a Future[…] here, but we cannot
* know whether the first element exists until we have applied its
* filter.
*/
def collect[B](f: PartialFunction[A, B]): Future[Spool[B]]
def map[B](f: A => B): Spool[B] = {
val s = collect { case x => f(x) }
Await.result(s, Duration.Zero)
}
def filter(f: A => Boolean): Future[Spool[A]] = collect {
case x if f(x) => x
}
/**
* Concatenates two spools.
*/
def ++[B >: A](that: Spool[B]): Spool[B] =
if (isEmpty) that else cons(head: B, tail map { _ ++ that })
/**
* Concatenates two spools.
*/
def ++[B >: A](that: Future[Spool[B]]): Future[Spool[B]] =
if (isEmpty) that else Future.value(cons(head: B, tail flatMap { _ ++ that }))
/**
* Applies a function that generates a spool to each element in this spool,
* flattening the result into a single spool.
*/
def flatMap[B](f: A => Future[Spool[B]]): Future[Spool[B]] =
if (isEmpty) Future.value(empty[B])
else f(head) flatMap { _ ++ (tail flatMap { _ flatMap f }) }
/**
* Fully buffer the spool to a {{Seq}}. The returned future is
* satisfied when the entire result is ready.
*/
def toSeq: Future[Seq[A]] = {
val p = new Promise[Seq[A]]
val as = new ArrayBuffer[A]
foreachElem {
case Some(a) => as += a
case None => p() = Return(as)
}
p
}
}
object Spool {
case class Cons[A](value: A, next: Future[Spool[A]])
extends Spool[A]
{
def isEmpty = false
def head = value
def tail = next
def collect[B](f: PartialFunction[A, B]) = {
val next_ = next flatMap { _.collect(f) }
if (f.isDefinedAt(head)) Future.value(Cons(f(head), next_))
else next_
}
override def toString = "Cons(%s, %c)".format(head, if (tail.isDefined) '*' else '?')
}
object Empty extends Spool[Nothing] {
def isEmpty = true
def head = throw new NoSuchElementException("spool is empty")
def tail = Future.exception(new NoSuchElementException("spool is empty"))
def collect[B](f: PartialFunction[Nothing, B]) = Future.value(this)
override def toString = "Empty"
}
/**
* Cons a value & (possibly deferred) tail to a new {{Spool}}.
*/
def cons[A](value: A, next: Future[Spool[A]]): Spool[A] = Cons(value, next)
def cons[A](value: A, nextSpool: Spool[A]): Spool[A] = Cons(value, Future.value(nextSpool))
/**
* The empty spool.
*/
def empty[A]: Spool[A] = Empty
/**
* Syntax support. We retain different constructors for future
* resolving vs. not.
*
* *:: constructs and deconstructs deferred tails
* **:: constructs and deconstructs eager tails
*/
class Syntax[A](tail: => Future[Spool[A]]) {
def *::(head: A) = cons(head, tail)
}
implicit def syntax[A](s: Future[Spool[A]]) = new Syntax(s)
object *:: {
def unapply[A](s: Spool[A]): Option[(A, Future[Spool[A]])] = {
if (s.isEmpty) None
else Some((s.head, s.tail))
}
}
class Syntax1[A](tail: => Spool[A]) {
def **::(head: A) = cons(head, tail)
}
implicit def syntax1[A](s: Spool[A]) = new Syntax1(s)
object **:: {
def unapply[A](s: Spool[A]): Option[(A, Spool[A])] = {
if (s.isEmpty) None
else Some((s.head, Await.result(s.tail)))
}
}
}
|
foursquare/util
|
util-core/src/main/scala/com/twitter/util/Timer.scala
|
<filename>util-core/src/main/scala/com/twitter/util/Timer.scala
package com.twitter.util
import com.twitter.concurrent.NamedPoolThreadFactory
import com.twitter.concurrent.Serialized
import com.twitter.conversions.time._
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.{CancellationException, ExecutorService, RejectedExecutionHandler,
ScheduledThreadPoolExecutor, ThreadFactory, TimeUnit}
import scala.collection.mutable.ArrayBuffer
trait TimerTask {
def cancel()
}
trait Timer {
def schedule(when: Time)(f: => Unit): TimerTask
def schedule(when: Time, period: Duration)(f: => Unit): TimerTask
def schedule(period: Duration)(f: => Unit): TimerTask = {
schedule(period.fromNow, period)(f)
}
/**
* Performs an operation after the specified delay. Interrupting the Future
* will cancel the scheduled timer task, if not too late.
*/
def doLater[A](delay: Duration)(f: => A): Future[A] = {
doAt(Time.now + delay)(f)
}
/**
* Performs an operation at the specified time. Interrupting the Future
* will cancel the scheduled timer task, if not too late.
*/
def doAt[A](time: Time)(f: => A): Future[A] = {
val pending = new AtomicBoolean(true)
val p = new Promise[A]
val task = schedule(time) {
if (pending.compareAndSet(true, false))
p.update(Try(f))
}
p.setInterruptHandler {
case cause =>
if (pending.compareAndSet(true, false)) {
task.cancel()
val exc = new CancellationException
exc.initCause(cause)
p.setException(exc)
}
}
p
}
def stop()
}
/**
* A NullTimer is not a timer at all: it invokes all tasks immediately
* and inline.
*/
class NullTimer extends Timer {
def schedule(when: Time)(f: => Unit): TimerTask = {
f
NullTimerTask
}
def schedule(when: Time, period: Duration)(f: => Unit): TimerTask = {
f
NullTimerTask
}
def stop() {}
}
object NullTimerTask extends TimerTask {
def cancel() {}
}
class ThreadStoppingTimer(underlying: Timer, executor: ExecutorService) extends Timer {
def schedule(when: Time)(f: => Unit): TimerTask =
underlying.schedule(when)(f)
def schedule(when: Time, period: Duration)(f: => Unit): TimerTask =
underlying.schedule(when, period)(f)
def stop() {
executor.submit(new Runnable { def run() = underlying.stop() })
}
}
trait ReferenceCountedTimer extends Timer {
def acquire()
}
class ReferenceCountingTimer(factory: () => Timer)
extends ReferenceCountedTimer
{
private[this] var refcount = 0
private[this] var underlying: Timer = null
def acquire() = synchronized {
refcount += 1
if (refcount == 1) {
require(underlying == null)
underlying = factory()
}
}
def stop() = synchronized {
refcount -= 1
if (refcount == 0) {
underlying.stop()
underlying = null
}
}
// Just dispatch to the underlying timer. It's the responsibility of
// the API consumer to not call into the timer once it has been
// stopped.
def schedule(when: Time)(f: => Unit) = underlying.schedule(when)(f)
def schedule(when: Time, period: Duration)(f: => Unit) = underlying.schedule(when, period)(f)
}
class JavaTimer(isDaemon: Boolean) extends Timer {
def this() = this(false)
private[this] val underlying = new java.util.Timer(isDaemon)
def schedule(when: Time)(f: => Unit) = {
val task = toJavaTimerTask(f)
underlying.schedule(task, when.toDate)
toTimerTask(task)
}
def schedule(when: Time, period: Duration)(f: => Unit) = {
val task = toJavaTimerTask(f)
underlying.schedule(task, when.toDate, period.inMillis)
toTimerTask(task)
}
def stop() = underlying.cancel()
/**
* log any Throwables caught by the internal TimerTask.
*
* By default we log to System.err but users may subclass and log elsewhere.
*
* This method MUST NOT throw or else your Timer will die.
*/
def logError(t: Throwable) {
System.err.println("WARNING: JavaTimer caught exception running task: %s".format(t))
t.printStackTrace(System.err)
}
private[this] def toJavaTimerTask(f: => Unit) = new java.util.TimerTask {
def run {
try {
f
} catch {
case NonFatal(t) => logError(t)
case fatal: Throwable =>
logError(fatal)
throw fatal
}
}
}
private[this] def toTimerTask(task: java.util.TimerTask) = new TimerTask {
def cancel() { task.cancel() }
}
}
class ScheduledThreadPoolTimer(
poolSize: Int,
threadFactory: ThreadFactory,
rejectedExecutionHandler: Option[RejectedExecutionHandler])
extends Timer {
def this(poolSize: Int, threadFactory: ThreadFactory) =
this(poolSize, threadFactory, None)
def this(poolSize: Int, threadFactory: ThreadFactory, handler: RejectedExecutionHandler) =
this(poolSize, threadFactory, Some(handler))
/** Construct a ScheduledThreadPoolTimer with a NamedPoolThreadFactory. */
def this(poolSize: Int = 2, name: String = "timer", makeDaemons: Boolean = false) =
this(poolSize, new NamedPoolThreadFactory(name, makeDaemons), None)
private[this] val underlying = rejectedExecutionHandler match {
case None =>
new ScheduledThreadPoolExecutor(poolSize, threadFactory)
case Some(h: RejectedExecutionHandler) =>
new ScheduledThreadPoolExecutor(poolSize, threadFactory, h)
}
def schedule(when: Time)(f: => Unit): TimerTask = {
val runnable = new Runnable { def run = f }
val javaFuture = underlying.schedule(runnable, when.sinceNow.inMillis, TimeUnit.MILLISECONDS)
new TimerTask {
def cancel() {
javaFuture.cancel(true)
underlying.remove(runnable)
}
}
}
def schedule(when: Time, period: Duration)(f: => Unit): TimerTask =
schedule(when.sinceNow, period)(f)
def schedule(wait: Duration, period: Duration)(f: => Unit): TimerTask = {
val runnable = new Runnable { def run = f }
val javaFuture = underlying.scheduleAtFixedRate(runnable,
wait.inMillis, period.inMillis, TimeUnit.MILLISECONDS)
new TimerTask {
def cancel() {
javaFuture.cancel(true)
underlying.remove(runnable)
}
}
}
def stop() = underlying.shutdown()
}
// Exceedingly useful for writing well-behaved tests.
class MockTimer extends Timer {
// These are weird semantics admittedly, but there may
// be a bunch of tests that rely on them already.
case class Task(var when: Time, runner: () => Unit)
extends TimerTask
{
var isCancelled = false
def cancel() { isCancelled = true; nCancelled += 1; when = Time.now; tick() }
}
var isStopped = false
var tasks = ArrayBuffer[Task]()
var nCancelled = 0
def tick() {
if (isStopped)
throw new IllegalStateException("timer is stopped already")
val now = Time.now
val (toRun, toQueue) = tasks.partition { task => task.when <= now }
tasks = toQueue
toRun filter { !_.isCancelled } foreach { _.runner() }
}
def schedule(when: Time)(f: => Unit): TimerTask = {
val task = Task(when, () => f)
tasks += task
task
}
def schedule(when: Time, period: Duration)(f: => Unit): TimerTask =
throw new Exception("periodic scheduling not supported")
def stop() { isStopped = true }
}
|
foursquare/util
|
util-core/src/main/scala/com/twitter/util/Closable.scala
|
package com.twitter.util
/**
* Closable is a mixin trait to describe a closable ``resource``.
*/
trait Closable { self =>
/**
* Close the resource. The returned Future is completed when
* the resource has been fully relinquished.
*/
final def close(): Future[Unit] = close(Time.Bottom)
/**
* Close the resource with the given deadline. This deadline is advisory,
* giving the callee some leeway, for example to drain clients or finish
* up other tasks.
*/
def close(deadline: Time): Future[Unit]
}
object Closable {
/**
* Concurrent composition: creates a new closable which, when
* closed, closes all of the underlying resources simultaneously.
*/
def all(closables: Closable*): Closable = new Closable {
def close(deadline: Time) = Future.join(closables map(_.close(deadline)))
}
/**
* Sequential composition: create a new Closable which, when
* closed, closes all of the underlying ones in sequence: that is,
* resource ''n+1'' is not closed until resource ''n'' is.
*/
def sequence(closables: Closable*): Closable = new Closable {
private final def closeSeq(deadline: Time, closables: Seq[Closable]): Future[Unit] =
closables match {
case Seq() => Future.Done
case Seq(hd, tl@_*) => hd.close(deadline) flatMap { _ => closeSeq(deadline, tl) }
}
def close(deadline: Time) = closeSeq(deadline, closables)
}
}
|
foursquare/util
|
util-jvm/src/main/scala/com/twitter/jvm/ContentionSnapshot.scala
|
package com.twitter.jvm
import java.lang.management.{ManagementFactory, ThreadInfo}
import java.lang.Thread.State._
import scala.collection.mutable
/**
* A thread contention summary. This provides a brief overview of threads
* that are blocked or otherwise waiting.
*
* While this could be an object, we use instantiation as a signal of intent
* and enable contention monitoring.
*/
class ContentionSnapshot {
ManagementFactory.getThreadMXBean.setThreadContentionMonitoringEnabled(true)
case class Snapshot(
blockedThreads: Seq[String],
lockOwners: Seq[String])
private[this] object Blocked {
def unapply(t: ThreadInfo): Option[ThreadInfo] = {
t.getThreadState match {
case BLOCKED | WAITING | TIMED_WAITING => Some(t)
case _ => None
}
}
}
def snap(): Snapshot = {
val bean = ManagementFactory.getThreadMXBean
val lockOwners = mutable.Set[Long]()
val blocked = bean.getThreadInfo(bean.getAllThreadIds, true, true)
.filter(_ != null)
.collect { case Blocked(info) => info }
val ownerIds = blocked map(_.getLockOwnerId) filter(_ != -1)
val owners = if (ownerIds.size == 0) Seq[String]() else
bean.getThreadInfo(ownerIds.toArray, true, true).map(_.toString).toSeq
Snapshot(
blockedThreads = blocked.map(_.toString).toSeq,
lockOwners = owners)
}
}
|
foursquare/util
|
util-core/src/test/scala/com/twitter/concurrent/SpoolSpec.scala
|
<gh_stars>1-10
package com.twitter.concurrent
import scala.collection.mutable.ArrayBuffer
import org.specs.SpecificationWithJUnit
import com.twitter.util.{Await, Future, Promise, Return, Throw}
import Spool.{*::, **::}
class SpoolSpec extends SpecificationWithJUnit {
"Empty Spool" should {
val s = Spool.empty[Int]
"iterate over all elements" in {
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
xs.size must be_==(0)
}
"map" in {
(s map { _ * 2 } ) must be_==(Spool.empty[Int])
}
"deconstruct" in {
s must beLike {
case x **:: rest => false
case _ => true
}
}
"append via ++" in {
(s ++ Spool.empty[Int]) must be_==(Spool.empty[Int])
(Spool.empty[Int] ++ s) must be_==(Spool.empty[Int])
val s2 = s ++ (3 **:: 4 **:: Spool.empty[Int])
Await.result(s2.toSeq) must be_==(Seq(3, 4))
}
"append via ++ with Future rhs" in {
Await.result(s ++ Future(Spool.empty[Int])) must be_==(Spool.empty[Int])
Await.result(Spool.empty[Int] ++ Future(s)) must be_==(Spool.empty[Int])
val s2 = s ++ Future(3 **:: 4 **:: Spool.empty[Int])
Await.result(s2 flatMap (_.toSeq)) must be_==(Seq(3, 4))
}
"flatMap" in {
val f = (x: Int) => Future(x.toString **:: (x * 2).toString **:: Spool.empty)
Await.result(s flatMap f) must be_==(Spool.empty[Int])
}
}
"Simple resolved Spool" should {
val s = 1 **:: 2 **:: Spool.empty
"iterate over all elements" in {
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
xs.toSeq must be_==(Seq(1,2))
}
"buffer to a sequence" in {
Await.result(s.toSeq) must be_==(Seq(1, 2))
}
"map" in {
Await.result(s map { _ * 2 } toSeq) must be_==(Seq(2, 4))
}
"deconstruct" in {
s must beLike {
case x **:: rest =>
x must be_==(1)
rest must beLike {
case y **:: rest if y == 2 && rest.isEmpty => true
}
}
}
"append via ++" in {
Await.result((s ++ Spool.empty[Int]).toSeq) must be_==(Seq(1, 2))
Await.result((Spool.empty[Int] ++ s).toSeq) must be_==(Seq(1, 2))
val s2 = s ++ (3 **:: 4 **:: Spool.empty)
Await.result(s2.toSeq) must be_==(Seq(1, 2, 3, 4))
}
"append via ++ with Future rhs" in {
Await.result(s ++ Future(Spool.empty[Int]) flatMap (_.toSeq)) must be_==(Seq(1, 2))
Await.result(Spool.empty[Int] ++ Future(s) flatMap (_.toSeq)) must be_==(Seq(1, 2))
val s2 = s ++ Future(3 **:: 4 **:: Spool.empty)
Await.result(s2 flatMap (_.toSeq)) must be_==(Seq(1, 2, 3, 4))
}
"flatMap" in {
val f = (x: Int) => Future(x.toString **:: (x * 2).toString **:: Spool.empty)
val s2 = s flatMap f
Await.result(s2 flatMap (_.toSeq)) must be_==(Seq("1", "2", "2", "4"))
}
}
"Simple resolved spool with error" should {
val p = new Promise[Spool[Int]](Throw(new Exception("sad panda")))
val s = 1 **:: 2 *:: p
"EOF iteration on error" in {
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
xs.toSeq must be_==(Seq(Some(1), Some(2), None))
}
}
"Simple delayed Spool" should {
val p = new Promise[Spool[Int]]
val p1 = new Promise[Spool[Int]]
val p2 = new Promise[Spool[Int]]
val s = 1 *:: p
"iterate as results become available" in {
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
xs.toSeq must be_==(Seq(1))
p() = Return(2 *:: p1)
xs.toSeq must be_==(Seq(1, 2))
p1() = Return(Spool.empty)
xs.toSeq must be_==(Seq(1, 2))
}
"EOF iteration on failure" in {
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
xs.toSeq must be_==(Seq(Some(1)))
p() = Throw(new Exception("sad panda"))
xs.toSeq must be_==(Seq(Some(1), None))
}
"return a buffered seq when complete" in {
val f = s.toSeq
f.isDefined must beFalse
p() = Return(2 *:: p1)
f.isDefined must beFalse
p1() = Return(Spool.empty)
f.isDefined must beTrue
Await.result(f) must be_==(Seq(1,2))
}
"deconstruct" in {
s must beLike {
case fst *:: rest if fst == 1 && !rest.isDefined => true
}
}
"collect" in {
val f = s collect {
case x if x % 2 == 0 => x * 2
}
f.isDefined must beFalse // 1 != 2 mod 0
p() = Return(2 *:: p1)
f.isDefined must beTrue
val s1 = Await.result(f)
s1 must beLike {
case x *:: rest if x == 4 && !rest.isDefined => true
}
p1() = Return(3 *:: p2)
s1 must beLike {
case x *:: rest if x == 4 && !rest.isDefined => true
}
p2() = Return(4 **:: Spool.empty)
val s1s = s1.toSeq
s1s.isDefined must beTrue
Await.result(s1s) must be_==(Seq(4, 8))
}
}
}
|
foursquare/util
|
util-core/src/test/scala/com/twitter/concurrent/AsyncSemaphoreSpec.scala
|
package com.twitter.concurrent
import org.specs.SpecificationWithJUnit
import org.specs.mock.Mockito
import java.util.concurrent.{ConcurrentLinkedQueue, RejectedExecutionException}
import com.twitter.util.Await
class AsyncSemaphoreSpec extends SpecificationWithJUnit with Mockito {
"AsyncSemaphore" should {
var count = 0
val s = new AsyncSemaphore(2)
val permits = new ConcurrentLinkedQueue[Permit]
def acquire() {
s.acquire() onSuccess { permit =>
count += 1
permits add permit
}
}
"execute immediately while permits are available" in {
acquire()
count must be_==(1)
acquire()
count must be_==(2)
acquire()
count must be_==(2)
}
"execute deferred computations when permits are released" in {
acquire()
acquire()
acquire()
acquire()
count must be_==(2)
permits.poll().release()
count must be_==(3)
permits.poll().release()
count must be_==(4)
permits.poll().release()
count must be_==(4)
}
"bound the number of waiters" in {
val s2 = new AsyncSemaphore(2, 3)
def acquire2() = {
s2.acquire() onSuccess { permit =>
count += 1
permits add permit
}
}
// The first two acquires obtain a permit.
acquire2()
acquire2()
count must be_==(2)
// The next three acquires wait.
acquire2()
acquire2()
acquire2()
count must be_==(2)
s2.numWaiters mustEqual(3)
// The next acquire should be rejected.
val futurePermit = acquire2()
s2.numWaiters mustEqual(3)
Await.result(futurePermit) must throwA[RejectedExecutionException]
// Waiting tasks should still execute once permits are available.
permits.poll().release()
permits.poll().release()
permits.poll().release()
count must be_==(5)
}
}
}
|
foursquare/util
|
util-logging/src/main/scala/com/twitter/logging/App.scala
|
<reponame>foursquare/util
package com.twitter.logging
import com.twitter.app.{App, Flaggable}
import com.twitter.util.{Throw, Return}
object Logging {
implicit object LevelFlaggable extends Flaggable[Level] {
def parse(s: String) =
if (Logger.levelNames contains s)
Logger.levelNames(s)
else
throw new Exception("Invalid log level: "+s)
}
}
/**
* A [[com.twitter.app.App]] mixin to use for logging. Defines flags
* to configure the (default) logger setup.
*/
trait Logging { self: App =>
import Logging._
lazy val log = Logger(name)
def defaultLogLevel: Level = Level.INFO
private val levelFlag = flag("log.level", defaultLogLevel, "Log level")
private val outputFlag = flag("log.output", "/dev/stderr", "Output file")
premain {
val factory = LoggerFactory(
node = "",
level = Some(levelFlag()),
handlers = FileHandler(outputFlag()) :: Nil
)
Logger.configure(factory :: Nil)
}
}
|
foursquare/util
|
util-zk-common/src/test/scala/com/twitter/zk/CommonConnectorSpec.scala
|
<filename>util-zk-common/src/test/scala/com/twitter/zk/CommonConnectorSpec.scala
package com.twitter.zk
import com.twitter.common.net.InetSocketAddressHelper
import com.twitter.common.zookeeper.ZooKeeperClient
import com.twitter.conversions.common.quantity._
import com.twitter.conversions.common.zookeeper._
import com.twitter.conversions.time._
import com.twitter.util.{Await, FuturePool}
import org.specs.SpecificationWithJUnit
import java.net.InetSocketAddress
import scala.collection.JavaConverters._
class CommonConnectorSpec extends SpecificationWithJUnit {
val timeout = 2.seconds
val addresses = new InetSocketAddress("localhost", 2181) :: Nil
"CommonConnector" should {
"initialize" in {
"with addresses" in {
implicit val pool = FuturePool.immediatePool
CommonConnector(addresses, timeout) must notBeNull
}
"with a ZooKeeperClient instance" in {
implicit val pool = FuturePool.immediatePool
val zookeeper = new ZooKeeperClient(timeout.toIntAmount, addresses.asJava)
val connector = CommonConnector(zookeeper, timeout)
connector.underlying mustBe zookeeper
}
}
}
// A simple live test
Option { System.getProperty("com.twitter.zk.TEST_CONNECT") } foreach { connectString =>
val address = InetSocketAddressHelper.parse(connectString)
"A live server @ %s".format(connectString) should {
val commonClient: ZooKeeperClient = new ZooKeeperClient(timeout.toIntAmount, address)
val zkClient = commonClient.toZkClient(timeout)(FuturePool.immediatePool)
doAfter {
Await.ready(zkClient.release())
}
"have 'zookeeper' in '/'" in {
Await.result(zkClient("/").getChildren(), timeout).children map { _.name } mustContain("zookeeper")
}
}
}
}
|
foursquare/util
|
util-core/src/main/scala/com/twitter/util/Awaitable.scala
|
<reponame>foursquare/util
package com.twitter.util
import java.util.concurrent.atomic.AtomicBoolean
/**
* Wait for the result of some action. Awaitable is not used
* directly, but through the `Await` object.
*/
trait Awaitable[+T] {
import Awaitable._
/**
* Support for `Await.ready`. The use of the implicit permit is an
* access control mechanism: only `Await.ready` may call this
* method.
*/
@throws(classOf[TimeoutException])
@throws(classOf[InterruptedException])
def ready(timeout: Duration)(implicit permit: CanAwait): this.type
/**
* Support for `Await.result`. The use of the implicit permit is an
* access control mechanism: only `Await.result` may call this
* method.
*/
@throws(classOf[Exception])
def result(timeout: Duration)(implicit permit: CanAwait): T
}
object Awaitable {
sealed trait CanAwait
}
/**
* Await the result of some action.
*
* @define ready
*
* Returns the object when the action has completed.
*
* @define result
*
* Returns the result of the action when it has completed.
*/
object Await {
import Awaitable._
private object AwaitPermit extends CanAwait
/** $ready */
@throws(classOf[TimeoutException])
@throws(classOf[InterruptedException])
def ready[T <: Awaitable[_]](awaitable: T): T =
ready(awaitable, Duration.Top)
/** $ready */
@throws(classOf[TimeoutException])
@throws(classOf[InterruptedException])
def ready[T <: Awaitable[_]](awaitable: T, timeout: Duration): T =
awaitable.ready(timeout)(AwaitPermit)
/** $result */
@throws(classOf[Exception])
def result[T](awaitable: Awaitable[T]): T =
result(awaitable, Duration.Top)
/** $result */
@throws(classOf[Exception])
def result[T](awaitable: Awaitable[T], timeout: Duration): T =
awaitable.result(timeout)(AwaitPermit)
}
/**
* A mixin to make an [[com.twitter.util.Awaitable]] out
* of a [[com.twitter.util.Closable]].
*
* Use `closeAwaitably` in the definition of `close`:
*
* {{{
* class MyClosable extends Closable with CloseAwaitably {
* def close(deadline: Time) = closeAwaitably {
* // close the resource
* }
* }
* }}}
*/
trait CloseAwaitably extends Awaitable[Unit] {
private[this] val onClose = new Promise[Unit]
private[this] val closed = new AtomicBoolean(false)
/**
* closeAwaitably is intended to be used as a wrapper for
* `close`. The underlying `f` will be called at most once.
*/
protected def closeAwaitably(f: => Future[Unit]): Future[Unit] = {
if (closed.compareAndSet(false, true))
onClose.become(f)
onClose
}
def ready(timeout: Duration)(implicit permit: Awaitable.CanAwait): this.type = {
Await.ready(onClose, timeout)
this
}
def result(timeout: Duration)(implicit permit: Awaitable.CanAwait): Unit =
Await.result(onClose, timeout)
}
|
foursquare/util
|
project/Build.scala
|
<filename>project/Build.scala
import sbt._
import Keys._
object Util extends Build {
val zkVersion = "3.3.4"
val sharedSettings = Seq(
version := "6.3.0",
organization := "com.twitter",
crossScalaVersions := Seq("2.9.2", "2.10.0"),
// Workaround for a scaladoc bug which causes it to choke on
// empty classpaths.
unmanagedClasspath in Compile += Attributed.blank(new java.io.File("doesnotexist")),
libraryDependencies ++= Seq(
"junit" % "junit" % "4.8.1" % "test" withSources(),
"org.scalatest" %% "scalatest" %"1.9.1" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test" withSources() cross CrossVersion.binaryMapped {
case "2.9.2" => "2.9.1"
case "2.10.0" => "2.10"
case x => x
},
"org.mockito" % "mockito-all" % "1.8.5" % "test" withSources()
),
resolvers += "twitter repo" at "http://maven.twttr.com",
ivyXML :=
<dependencies>
<exclude org="com.sun.jmx" module="jmxri" />
<exclude org="com.sun.jdmk" module="jmxtools" />
<exclude org="javax.jms" module="jms" />
</dependencies>,
scalacOptions ++= Seq("-encoding", "utf8"),
scalacOptions += "-deprecation",
javacOptions ++= Seq("-source", "1.6", "-target", "1.6"),
javacOptions in doc := Seq("-source", "1.6"),
// This is bad news for things like com.twitter.util.Time
parallelExecution in Test := false,
// Sonatype publishing
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
publishMavenStyle := true,
pomExtra := (
<url>https://github.com/twitter/util</url>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0</url>
</license>
</licenses>
<scm>
<url><EMAIL>:twitter/util.git</url>
<connection>scm:git:<EMAIL>:twitter/util.git</connection>
</scm>
<developers>
<developer>
<id>twitter</id>
<name>Twitter Inc.</name>
<url>https://www.twitter.com/</url>
</developer>
</developers>),
publishTo <<= version { (v: String) =>
val nexus = "https://oss.sonatype.org/"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
)
val jmockSettings = Seq(
libraryDependencies ++= Seq(
"org.jmock" % "jmock" % "2.4.0" % "test",
"cglib" % "cglib" % "2.1_3" % "test",
"asm" % "asm" % "1.5.3" % "test",
"org.objenesis" % "objenesis" % "1.1" % "test",
"org.hamcrest" % "hamcrest-all" % "1.1" % "test"
)
)
lazy val util = Project(
id = "util",
base = file("."),
settings = Project.defaultSettings ++
sharedSettings
) aggregate(
utilCore, utilEval, utilCodec, utilCollection, utilReflect,
utilLogging, utilThrift, utilHashing, utilJvm, utilZk,
utilZkCommon, utilClassPreloader, utilBenchmark, utilApp
)
lazy val utilCore = Project(
id = "util-core",
base = file("util-core"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-core",
libraryDependencies ++= Seq(
"com.twitter.common" % "objectsize" % "0.0.7" % "test"
),
testOptions in Test <<= scalaVersion map {
// There seems to be an issue with mockito spies,
// specs1, and scala 2.10
case "2.10" | "2.10.0" => Seq(Tests.Filter(s => !s.endsWith("MonitorSpec")))
case _ => Seq()
}
)
lazy val utilEval = Project(
id = "util-eval",
base = file("util-eval"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-eval",
libraryDependencies <+= scalaVersion { "org.scala-lang" % "scala-compiler" % _ % "compile" }
).dependsOn(utilCore)
lazy val utilCodec = Project(
id = "util-codec",
base = file("util-codec"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-codec",
libraryDependencies ++= Seq(
"commons-codec" % "commons-codec" % "1.5"
)
).dependsOn(utilCore)
lazy val utilCollection = Project(
id = "util-collection",
base = file("util-collection"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-collection",
libraryDependencies ++= Seq(
"com.google.guava" % "guava" % "11.0.2",
"commons-collections" % "commons-collections" % "3.2.1"
)
).dependsOn(utilCore)
lazy val utilReflect = Project(
id = "util-reflect",
base = file("util-reflect"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-reflect",
libraryDependencies ++= Seq(
"asm" % "asm" % "3.3.1",
"asm" % "asm-util" % "3.3.1",
"asm" % "asm-commons" % "3.3.1",
"cglib" % "cglib" % "2.2"
)
).dependsOn(utilCore)
lazy val utilLogging = Project(
id = "util-logging",
base = file("util-logging"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-logging",
libraryDependencies ++= Seq(
"org.scala-tools.testing" % "specs" % "1.6.9" % "provided" cross CrossVersion.binaryMapped {
case "2.9.2" => "2.9.1"
case "2.10.0" => "2.10"
case x => x
}
)
).dependsOn(utilCore, utilApp)
lazy val utilThrift = Project(
id = "util-thrift",
base = file("util-thrift"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-thrift",
libraryDependencies ++= Seq(
"thrift" % "libthrift" % "0.5.0",
"org.slf4j" % "slf4j-nop" % "1.5.8" % "provided",
"org.codehaus.jackson" % "jackson-core-asl" % "1.8.1",
"org.codehaus.jackson" % "jackson-mapper-asl" % "1.8.1"
)
).dependsOn(utilCodec)
lazy val utilHashing = Project(
id = "util-hashing",
base = file("util-hashing"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-hashing",
libraryDependencies ++= Seq(
"commons-codec" % "commons-codec" % "1.5" % "test"
)
).dependsOn(utilCore)
lazy val utilJvm = Project(
id = "util-jvm",
base = file("util-jvm"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-jvm"
).dependsOn(utilCore, utilLogging % "test")
lazy val utilZk = Project(
id = "util-zk",
base = file("util-zk"),
settings = Project.defaultSettings ++
sharedSettings ++
jmockSettings
).settings(
name := "util-zk",
ivyXML :=
<dependencies>
<exclude org="com.sun.jmx" module="jmxri" />
<exclude org="com.sun.jdmk" module="jmxtools" />
<exclude org="javax.jms" module="jms" />
<override org="junit" rev="4.8.1"/>
</dependencies>,
libraryDependencies ++= Seq(
"org.apache.zookeeper" % "zookeeper" % zkVersion
)
).dependsOn(utilCore, utilCollection, utilLogging)
lazy val utilZkCommon = Project(
id = "util-zk-common",
base = file("util-zk-common"),
settings = Project.defaultSettings ++
sharedSettings ++
jmockSettings
).settings(
name := "util-zk-common",
libraryDependencies ++= Seq(
"com.twitter.common.zookeeper" % "client" % "0.0.21",
"com.twitter.common.zookeeper" % "group" % "0.0.14",
"com.twitter.common.zookeeper" % "server-set" % "1.0.3",
"org.apache.zookeeper" % "zookeeper" % zkVersion
)
).dependsOn(utilCore, utilLogging, utilZk,
// These are dependended on to provide transitive dependencies
// that would otherwise cause incompatibilities. See above comment.
utilEval, utilCollection, utilHashing
)
lazy val utilClassPreloader = Project(
id = "util-class-preloader",
base = file("util-class-preloader"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-class-preloader"
).dependsOn(utilCore)
lazy val utilBenchmark = Project(
id = "util-benchmark",
base = file("util-benchmark"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-benchmark",
libraryDependencies ++= Seq(
"com.google.caliper" % "caliper" % "0.5-rc1"
)
).dependsOn(utilCore, utilJvm)
lazy val utilApp = Project(
id = "util-app",
base = file("util-app"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-app"
).dependsOn(utilCore)
}
|
foursquare/util
|
util-jvm/src/main/scala/com/twitter/jvm/Estimator.scala
|
<reponame>foursquare/util
package com.twitter.jvm
import scala.util.Random
/**
* An estimator for values of type T.
*/
trait Estimator[T] {
/** A scalar measurement `m` was taken */
def measure(m: T)
/** Estimate the current value */
def estimate: T
}
/**
* A simple Kalman filter to estimate a scalar value.
*/
class Kalman(N: Int) {
private[this] val mbuf = new Array[Double](N)
private[this] val ebuf = new Array[Double](N)
private[this] var est: Double = _
private[this] var n = 0L
private[this] var weight: Double = 0.9
/**
* Update the filter with measurement `m`
* and measurement error `e`.
*/
def measure(m: Double, e: Double) {
val i = (n%N).toInt
mbuf(i) = m
ebuf(i) = e
if (n == 0)
est = m
est += weight*(m-est)
val mv = mvar
val ev = evar
if (mv + ev == 0)
weight = 1D
else
weight = mv / (mv+ev)
n += 1
}
private[this] def mvar = variance(
if (n < N) mbuf take n.toInt
else mbuf
)
private[this] def evar = variance(
if (n < N) ebuf take n.toInt
else ebuf
)
def estimate = est
private[this] def variance(samples: Array[Double]): Double = {
if (samples.size == 1)
return 0D
val sum = samples.sum
val mean = sum / samples.size
val diff = (samples map { x => (x-mean)*(x-mean) }).sum
diff/(samples.size-1)
}
override def toString =
"Kalman<estimate=%f, weight=%f, mvar=%f, evar=%f>".format(estimate, weight, mvar, evar)
}
/**
* A Kalman filter in which measurement errors are normally
* distributed over the given range (as a fraction of the measured
* value).
*/
class KalmanGaussianError(N: Int, range: Double) extends Kalman(N) with Estimator[Double] {
require(range >= 0D && range <1D)
private[this] val rng = new Random
def measure(m: Double) {
measure(m, rng.nextGaussian()*range*m)
}
}
/**
* An estimator for weighted windows of means.
*/
class WindowedMeans(N: Int, windows: Seq[(Int, Int)]) extends Estimator[Double] {
require(windows forall { case (_, i) => i <= N })
private[this] val normalized = {
val sum = (windows map { case (w, _) => w }).sum
windows map { case (w, i) => (w.toDouble / sum, i) }
}
private[this] val buf = new Array[Double](N)
private[this] var n = 0L
private[this] def mean(from: Long, count: Int): Double = {
require(count <= N && count > 0)
val i = {
val x = ((from-count)%N).toInt
if (x < 0) (x + N)
else x
}
val j = (from%N).toInt
val sum =
if (i == j) buf.sum
else if (i < j) buf.slice(i, j).sum
else buf.slice(i, N).sum + buf.slice(0, j).sum
sum/count
}
def measure(m: Double) {
if (n == 0)
java.util.Arrays.fill(buf, m)
else
buf((n%N).toInt) = m
n += 1
}
def estimate = {
require(n > 0)
val weightedMeans = normalized map { case (w, i) => w*mean(n, i) }
weightedMeans.sum
}
}
/**
* Unix-like load average, an exponentially weighted moving average,
* smoothed to the given interval (counted in number of
* measurements). See:
* http://web.mit.edu/saltzer/www/publications/instrumentation.html
*/
class LoadAverage(interval: Double) extends Estimator[Double] {
private[this] val a = math.exp(-1D/interval)
private[this] var load = Double.NaN
private[this] var first = true
def measure(m: Double) {
load =
if (load.isNaN) m
else load*a + m*(1-a)
}
def estimate = load
}
/**
* Take a GC log produced by:
*
* {{{
* $ jstat -gc $PID 250 ...
* }}}
*
* And report on GC prediction accuracy. Time is
* indexed by the jstat output, and the columns are,
* in order: current time, next gc, estimated next GC.
*/
object EstimatorTest extends App {
import com.twitter.conversions.storage._
val estimator = args match {
case Array("kalman", n, error) =>
new KalmanGaussianError(n.toInt, error.toDouble)
case Array("windowed", n, windows) =>
new WindowedMeans(n.toInt,
windows.split(",") map { w =>
w.split(":") match {
case Array(w, i) => (w.toInt, i.toInt)
case _ => throw new IllegalArgumentException("bad weight, count pair "+w)
}
}
)
case Array("load", interval) =>
new LoadAverage(interval.toDouble)
case _ => throw new IllegalArgumentException("bad args ")
}
val lines = scala.io.Source.stdin.getLines().drop(1)
val states = lines.toArray map(_.split(" ") filter(_ != "") map(_.toDouble)) collect {
case Array(s0c, s1c, s0u, s1u, ec, eu, oc, ou, pc, pu, ygc, ygct, fgc, fgct, gct) =>
PoolState(ygc.toLong, ec.toLong.bytes, eu.toLong.bytes)
}
var elapsed = 1
for (List(begin, end) <- states.toList.sliding(2)) {
val allocated = (end - begin).used
estimator.measure(allocated.inBytes)
val r = end.capacity - end.used
val i = (r.inBytes/estimator.estimate.toLong) + elapsed
val j = states.indexWhere(_.numCollections > end.numCollections)
if (j > 0)
println("%d %d %d".format(elapsed, j, i))
elapsed += 1
}
}
/*
The following script is useful for plotting
results from EstimatorTest:
% scala ... com.twitter.jvm.EstimatorTest [ARGS] > /tmp/out
#!/usr/bin/env gnuplot
set terminal png size 800,600
set title "GC predictor"
set macros
set grid
set timestamp "Generated on %Y-%m-%d by `whoami`" font "Helvetica-Oblique, 8pt"
set noclip
set xrange [0:1100]
set key reverse left Left # box
set ylabel "Time of GC" textcolor lt 3
set yrange [0:1100]
set mytics 5
set y2tics
set xlabel "Time" textcolor lt 4
set mxtics 5
set boxwidth 0.5
set style fill transparent pattern 4 bo
plot "< awk '{print $1 \" \" $2}' /tmp/out" title "Actual", \
"< awk '{print $1 \" \" $3}' /tmp/out" title "Predicted", \
"< awk '{print $1 \" \" $1}' /tmp/out" title "time" with lines
*/
|
foursquare/util
|
util-logging/src/main/scala/com/twitter/logging/SyslogHandler.scala
|
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.logging
import com.twitter.concurrent.NamedPoolThreadFactory
import com.twitter.conversions.string._
import java.net.{DatagramPacket, DatagramSocket, InetAddress, InetSocketAddress, SocketAddress}
import java.text.SimpleDateFormat
import java.util.concurrent.Executors
import java.util.{logging => javalog}
object SyslogHandler {
val DEFAULT_PORT = 514
val PRIORITY_USER = 8
val PRIORITY_DAEMON = 24
val PRIORITY_LOCAL0 = 128
val PRIORITY_LOCAL1 = 136
val PRIORITY_LOCAL2 = 144
val PRIORITY_LOCAL3 = 152
val PRIORITY_LOCAL4 = 160
val PRIORITY_LOCAL5 = 168
val PRIORITY_LOCAL6 = 176
val PRIORITY_LOCAL7 = 184
private val SEVERITY_EMERGENCY = 0
private val SEVERITY_ALERT = 1
private val SEVERITY_CRITICAL = 2
private val SEVERITY_ERROR = 3
private val SEVERITY_WARNING = 4
private val SEVERITY_NOTICE = 5
private val SEVERITY_INFO = 6
private val SEVERITY_DEBUG = 7
/**
* Convert the java/scala log level into its closest syslog-ng severity.
*/
private[logging] def severityForLogLevel(level: Int): Int = {
if (level >= Level.FATAL.value) {
SEVERITY_ALERT
} else if (level >= Level.CRITICAL.value) {
SEVERITY_CRITICAL
} else if (level >= Level.ERROR.value) {
SEVERITY_ERROR
} else if (level >= Level.WARNING.value) {
SEVERITY_WARNING
} else if (level >= Level.INFO.value) {
SEVERITY_INFO
} else {
SEVERITY_DEBUG
}
}
val ISO_DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss")
val OLD_SYSLOG_DATE_FORMAT = new SimpleDateFormat("MMM dd HH:mm:ss")
/**
* Generates a HandlerFactory that returns a SyslogHandler.
*
* @param server
* Syslog server hostname.
*
* @param port
* Syslog server port.
*/
def apply(
server: String = "localhost",
port: Int = SyslogHandler.DEFAULT_PORT,
formatter: Formatter = new Formatter(),
level: Option[Level] = None
) = () => new SyslogHandler(server, port, formatter, level)
}
class SyslogHandler(
val server: String,
val port: Int,
formatter: Formatter,
level: Option[Level])
extends Handler(formatter, level) {
private val socket = new DatagramSocket
private[logging] val dest = new InetSocketAddress(server, port)
def flush() = { }
def close() = { }
def publish(record: javalog.LogRecord) = {
val data = formatter.format(record).getBytes
val packet = new DatagramPacket(data, data.length, dest)
SyslogFuture {
try {
socket.send(packet)
} catch {
case e =>
System.err.println(Formatter.formatStackTrace(e, 30).mkString("\n"))
}
}
}
}
/**
* @param hostname
* Hostname to prepend to log lines.
*
* @param serverName
* Optional server name to insert before log entries.
*
* @param useIsoDateFormat
* Use new standard ISO-format timestamps instead of old BSD-format?
*
* @param priority
* Priority level in syslog numbers.
*
* @param timezone
* Should dates in log messages be reported in a different time zone rather than local time?
* If set, the time zone name must be one known by the java `TimeZone` class.
*
* @param truncateAt
* Truncate log messages after N characters. 0 = don't truncate (the default).
*
* @param truncateStackTracesAt
* Truncate stack traces in exception logging (line count).
*/
class SyslogFormatter(
val hostname: String = InetAddress.getLocalHost().getHostName(),
val serverName: Option[String] = None,
val useIsoDateFormat: Boolean = true,
val priority: Int = SyslogHandler.PRIORITY_USER,
timezone: Option[String] = None,
truncateAt: Int = 0,
truncateStackTracesAt: Int = Formatter.DefaultStackTraceSizeLimit)
extends Formatter(
timezone,
truncateAt,
truncateStackTracesAt,
useFullPackageNames = false,
prefix = "") {
override def dateFormat = if (useIsoDateFormat) {
SyslogHandler.ISO_DATE_FORMAT
} else {
SyslogHandler.OLD_SYSLOG_DATE_FORMAT
}
override def lineTerminator = ""
override def formatPrefix(level: javalog.Level, date: String, name: String): String = {
val syslogLevel = level match {
case x: Level => SyslogHandler.severityForLogLevel(x.value)
case x: javalog.Level => SyslogHandler.severityForLogLevel(x.intValue)
}
serverName match {
case None =>
"<%d>%s %s %s: ".format(priority | syslogLevel, date, hostname, name)
case Some(serverName) =>
"<%d>%s %s [%s] %s: ".format(priority | syslogLevel, date, hostname, serverName, name)
}
}
}
object SyslogFuture {
private val executor = Executors.newSingleThreadExecutor(
new NamedPoolThreadFactory("TWITTER-UTIL-SYSLOG", true/*daemon*/))
private val noop = new Runnable { def run() {} }
def apply(action: => Unit) = executor.submit(new Runnable {
def run() { action }
})
def sync() {
val f = executor.submit(noop)
f.get()
}
}
|
foursquare/util
|
util-core/src/test/scala/com/twitter/concurrent/BrokerSpec.scala
|
package com.twitter.concurrent
import org.specs.SpecificationWithJUnit
import org.specs.mock.Mockito
import org.mockito.{Matchers, ArgumentCaptor}
import com.twitter.util.{Await, Return}
import com.twitter.common.objectsize.ObjectSizeCalculator
class BrokerSpec extends SpecificationWithJUnit with Mockito {
"Broker" should {
"send data (send, recv)" in {
val br = new Broker[Int]
val sendF = br.send(123).sync()
sendF.isDefined must beFalse
val recvF = br.recv.sync()
recvF.isDefined must beTrue
Await.result(recvF) must be_==(123)
sendF.isDefined must beTrue
}
"send data (recv, send)" in {
val br = new Broker[Int]
val recvF = br.recv.sync()
recvF.isDefined must beFalse
val sendF = br.send(123).sync()
sendF.isDefined must beTrue
recvF.isDefined must beTrue
Await.result(recvF) must be_==(123)
}
"queue receivers (recv, recv, send, send)" in {
val br = new Broker[Int]
val r0, r1 = br.recv.sync()
r0.isDefined must beFalse
r1.isDefined must beFalse
val s = br.send(123)
s.sync().poll must beSome(Return(()))
r0.poll must beSome(Return(123))
r1.isDefined must beFalse
s.sync().poll must beSome(Return(()))
r1.poll must beSome(Return(123))
s.sync().isDefined must beFalse
}
"queue senders (send, send, recv, recv)" in {
val br = new Broker[Int]
val s0, s1 = br.send(123).sync()
s0.isDefined must beFalse
s1.isDefined must beFalse
val r = br.recv
r.sync().poll must beSome(Return(123))
s0.poll must beSome(Return(()))
s1.isDefined must beFalse
r.sync().poll must beSome(Return(123))
s1.poll must beSome(Return(()))
r.sync().isDefined must beFalse
}
"interrupts" in {
"removes queued receiver" in {
val br = new Broker[Int]
val recvF = br.recv.sync()
recvF.raise(new Exception)
br.send(123).sync().poll must beNone
recvF.poll must beNone
}
"removes queued sender" in {
val br = new Broker[Int]
val sendF = br.send(123).sync()
sendF.raise(new Exception)
br.recv.sync().poll must beNone
sendF.poll must beNone
}
"doesn't result in space leaks" in {
val br = new Broker[Int]
Offer.select(Offer.const(1), br.recv).poll must beSome(Return(1))
val initial = ObjectSizeCalculator.getObjectSize(br)
for (_ <- 0 until 1000) {
Offer.select(Offer.const(1), br.recv).poll must beSome(Return(1))
ObjectSizeCalculator.getObjectSize(br) must be_==(initial)
}
}
"works with orElse" in {
val b0, b1 = new Broker[Int]
val o = b0.recv orElse b1.recv
val f = o.sync()
f.isDefined must beFalse
val sendf0 = b0.send(12).sync()
sendf0.isDefined must beFalse
val sendf1 = b1.send(32).sync()
sendf1.isDefined must beTrue
f.poll must beSome(Return(32))
o.sync().poll must beSome(Return(12))
sendf0.poll must beSome(Return(()))
}
}
"integrate" in {
val br = new Broker[Int]
val offer = Offer.choose(br.recv, Offer.const(999))
offer.sync().poll must beSome(Return(999))
val item = br.recv.sync()
item.isDefined must beFalse
br.send(123).sync().poll must beSome(Return(()))
item.poll must beSome(Return(123))
}
}
}
|
scarraway/scala-item-microservice
|
src/main/scala/AkkaHttpMicroservice.scala
|
import akka.actor.ActorSystem
import akka.http.scaladsl.server.{Directives, PathMatchers, Route, RouteResult}
import akka.event.{Logging, LoggingAdapter}
import akka.http.scaladsl.Http
import akka.stream.{ActorMaterializer, Materializer}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model.StatusCodes
import ch.megard.akka.http.cors.scaladsl.settings.CorsSettings
import ch.megard.akka.http.cors.scaladsl.CorsDirectives.cors
import com.mongodb.client.model.{Filters, UpdateOptions}
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.mongodb.scala.{Completed, MongoClient, MongoCollection, Observer}
import org.mongodb.scala.bson.codecs.Macros._
import org.mongodb.scala.bson.codecs.DEFAULT_CODEC_REGISTRY
import org.bson.codecs.configuration.CodecRegistries.{fromProviders, fromRegistries}
import org.bson.types.ObjectId
import org.mongodb.scala.model.Updates
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}
import spray.json.DefaultJsonProtocol
import scala.util.{Failure, Success, Try}
object Item {
def apply(name: String): Item =
Item(Some(new ObjectId().toString), name)
}
final case class Item(_id: Option[String] = None, name: String)
final case class ItemNameUpdate(name: String)
trait JsonSupport extends SprayJsonSupport with DefaultJsonProtocol {
implicit val itemFormat = jsonFormat(Item.apply, "_id", "name")
implicit val itemNameUpdateFormat = jsonFormat1(ItemNameUpdate)
}
trait Service extends Directives with JsonSupport {
implicit val system: ActorSystem
implicit def executor: ExecutionContextExecutor
implicit val materializer: Materializer
// Use a Connection String
val mongoClient: MongoClient = MongoClient()//"mongodb://mongo"
def config: Config
val logger: LoggingAdapter
val settings = CorsSettings.defaultSettings.copy(
)
val routes = cors(settings) {
logRequestResult("akka-http-microservice") {
path("items") {
get {
completeWith(instanceOf[Seq[Item]]) { c =>
getItemsCollection.find().collect().head().map(a=>a) onComplete {
case Success(result) => {
c(result)
}// use result for something
case Failure(t) => t.printStackTrace()
}
}
} ~
post {
decodeRequest {
entity(as[List[Item]]){ itemList =>
completeWith(instanceOf[String]) { c =>
getItemsCollection.insertMany(addMissingIdsToItemList(itemList)).toFuture().map(a=>a) onComplete {
case Success(result) => {
c(result.toString())
}// use result for something
case Failure(t) => t.printStackTrace()
}
}
}
}
}
} ~
pathPrefix("items" / PathMatchers.RemainingPath) { id =>
val possibleItem = getItem(id.toString())
onSuccess(possibleItem) {
case Some(item) => specificItemRoutes(item)
case None => complete(StatusCodes.NotFound)
}
}
}
}
private def specificItemRoutes(item: Item): Route = {
get {
complete(item)
} ~
delete {
val possibleDeletion:Future[Item] = deleteItem(item._id.get)
onComplete(possibleDeletion){ deletionResult =>
complete(deletionResult)
}
} ~
put {
decodeRequest {
entity(as[ItemNameUpdate]) { itemNameUpdate =>
val possibleUpdate:Future[Item] = updateItem(item._id.get, itemNameUpdate.name)
onComplete(possibleUpdate){ updateResult =>
complete(updateResult)
}
}
}
}
}
private def addMissingIdsToItemList(itemList: List[Item]) = {
itemList.map(i => if (i._id == None) Item.apply(i.name) else i)
}
private def getItemsCollection:MongoCollection[Item] = {
val codecRegistry = fromRegistries(fromProviders(classOf[Item]), DEFAULT_CODEC_REGISTRY )
mongoClient.getDatabase("item").withCodecRegistry(codecRegistry).getCollection("items")
}
private def getItem(id: String):Future[Option[Item]] = {
getItemsCollection.find(Filters.eq("_id", id))
.toFuture()
.recoverWith { case e: Throwable => { Future.failed(e) } }
.map(_.headOption)
}
private def deleteItem(id: String):Future[Item] = {
getItemsCollection.findOneAndDelete(Filters.eq("_id", id))
.toFuture()
.recoverWith { case e: Throwable => { Future.failed(e) } }
}
private def updateItem(id: String, newName: String):Future[Item] = {
getItemsCollection.findOneAndUpdate(Filters.eq("_id", id), Updates.set("name", newName))
.toFuture()
.recoverWith { case e: Throwable => { Future.failed(e) } }
}
}
object AkkaHttpMicroservice extends App with Service {
override implicit val system = ActorSystem()
override implicit val executor = system.dispatcher
override implicit val materializer = ActorMaterializer()
override val config = ConfigFactory.load()
override val logger = Logging(system, getClass)
Http().bindAndHandle(routes, config.getString("http.interface"), config.getInt("http.port"))
}
|
scarraway/scala-item-microservice
|
src/test/scala/ServiceSpec.scala
|
import akka.event.NoLogging
import akka.http.scaladsl.model.ContentTypes._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.scalatest._
class ServiceSpec extends FlatSpec with Matchers with ScalatestRouteTest with Service {
override def testConfigSource = "akka.loglevel = WARNING"
override def config = testConfig
override val logger = NoLogging
val baseRoute = "/items"
val testIdentifier = "testId"
val testItemRoute = baseRoute + "/" + testIdentifier
val invalidItemRoute = baseRoute + "/" + "invalid"
val testItem = new Item(Some(testIdentifier), "TestItem")
val testItemUpdate = new ItemNameUpdate("Test Item Updated")
override def afterAll(): Unit ={
Delete(testItemRoute) ~> routes ~> check {
println("Double checked deletion of " + testIdentifier)
}
}
//Must be first since these tests are stateful
"The service" should "allow inserting new Items via POST" in {
Post(baseRoute, List(testItem)) ~> routes ~> check {
status shouldBe OK
}
}
"The service" should "return all items for GET requests to the root path" in {
Get(baseRoute) ~> routes ~> check {
status shouldBe OK
contentType shouldBe `application/json`
responseAs[Seq[Item]] shouldBe a [Seq[_]]
responseAs[Seq[Item]] contains testItem
}
}
"The service" should "return 200 for GET test item" in {
Get(testItemRoute) ~> routes ~> check {
status shouldBe OK
}
}
"The service" should "return 200 for updating(PUT) test item" in {
Put(testItemRoute, testItemUpdate) ~> routes ~> check {
status shouldBe OK
}
}
"The item" should "be updated" in {
Get(testItemRoute) ~> routes ~> check {
status shouldBe OK
responseAs[Item] should have (
'name (testItemUpdate.name)
)
}
}
"The service" should "return 200 for DELETE test item" in {
Delete(testItemRoute) ~> routes ~> check {
status shouldBe OK
}
}
"The service" should "return 404 for GET nonsense id" in {
Get(invalidItemRoute) ~> routes ~> check {
status shouldBe NotFound
}
}
"The service" should "return 404 for PUT nonsense id" in {
Put(invalidItemRoute) ~> routes ~> check {
status shouldBe NotFound
}
}
"The service" should "return 404 for DELETE nonsense id" in {
Delete(invalidItemRoute) ~> routes ~> check {
status shouldBe NotFound
}
}
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Commands.scala
|
<gh_stars>1-10
package com.wix.pay.creditguard.model
object Commands {
/** The doDeal command is used to process transactions in the CG Gateway. */
val doDeal = "doDeal"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/TransactionCodes.scala
|
package com.wix.pay.creditguard.model
object TransactionCodes {
/** Swiping of magnetic card. */
val regular = "Regular"
/** Self service. */
val selfService = "SelfService"
/** Fuel self service. */
val fuelSelfService = "FuelSelfService"
val contactless = "Contactless"
val contactlessSelfService = "ContactlessSelfService"
val contactlessSelfServiceInGasStation = "ContactlessSelfServiceInGasStation"
/** Transaction through Internet/phone with card number. */
val phone = "Phone"
/** Card holder is present, however card is not swiped. */
val signature = "Signature"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Currencies.scala
|
<filename>libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Currencies.scala
package com.wix.pay.creditguard.model
object Currencies {
/** New Israeli Shekel USD linked. */
val ilsByUsd = "IlsByUsd"
/** New Israeli Shekel index linked. */
val ilsbyIndex = "IlsbyIndex"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Conversions.scala
|
package com.wix.pay.creditguard.model
import java.math.{BigDecimal => JBigDecimal}
object Conversions {
def toCreditguardAmount(amount: Double): Int = {
JBigDecimal.valueOf(amount).movePointRight(2).intValueExact()
}
def toCreditguardYearMonth(year: Int, month: Int): String = {
f"$month%02d${year % 100}%02d"
}
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/main/scala/com/wix/pay/creditguard/CreditguardMerchantParser.scala
|
<filename>libpay-creditguard/src/main/scala/com/wix/pay/creditguard/CreditguardMerchantParser.scala
package com.wix.pay.creditguard
trait CreditguardMerchantParser {
def parse(merchantKey: String): CreditguardMerchant
def stringify(merchant: CreditguardMerchant): String
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/RequestTransactionTypes.scala
|
<reponame>wix/libpay-creditguard
package com.wix.pay.creditguard.model
object RequestTransactionTypes {
/** Card holder is charged. */
val debit = "Debit"
/** Card holder is credited. */
val credit = "Credit"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Versions.scala
|
package com.wix.pay.creditguard.model
object Versions {
val standard = "1001"
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/test/scala/com/wix/pay/creditguard/JsonCreditguardAuthorizationParserTest.scala
|
package com.wix.pay.creditguard
import com.wix.pay.creditguard.CreditguardMatchers._
import org.specs2.mutable.SpecWithJUnit
import org.specs2.specification.Scope
class JsonCreditguardAuthorizationParserTest extends SpecWithJUnit {
trait Ctx extends Scope {
val authorizationParser: CreditguardAuthorizationParser = new JsonCreditguardAuthorizationParser
}
"stringify and then parse" should {
"yield an authorization similar to the original one" in new Ctx {
val someAuthorization = CreditguardAuthorization(
authNumber = "some authorization number",
currency = "some currency",
tranId = "some transaction ID",
cardId = "some card ID",
cardExpiration = "some card expiration",
user = "some user"
)
val authorizationKey = authorizationParser.stringify(someAuthorization)
authorizationParser.parse(authorizationKey) must beAuthorization(
authNumber = ===(someAuthorization.authNumber),
currency = ===(someAuthorization.currency),
tranId = ===(someAuthorization.tranId),
cardId = ===(someAuthorization.cardId),
cardExpiration = ===(someAuthorization.cardExpiration),
user = ===(someAuthorization.user)
)
}
}
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/test/scala/com/wix/pay/creditguard/JsonCreditguardMerchantParserTest.scala
|
<reponame>wix/libpay-creditguard
package com.wix.pay.creditguard
import com.wix.pay.creditguard.CreditguardMatchers._
import org.specs2.mutable.SpecWithJUnit
import org.specs2.specification.Scope
class JsonCreditguardMerchantParserTest extends SpecWithJUnit {
trait Ctx extends Scope {
val merchantParser: CreditguardMerchantParser = new JsonCreditguardMerchantParser
}
"stringify and then parse" should {
"yield a merchant similar to the original one" in new Ctx {
val someMerchant = CreditguardMerchant(
user = "some user",
password = "<PASSWORD>",
terminalNumber = "some terminal number",
supplierNumber = "some supplier number",
idPrefix = "some ID prefix"
)
val merchantKey = merchantParser.stringify(someMerchant)
merchantParser.parse(merchantKey) must beMerchant(
user = ===(someMerchant.user),
password = === (<PASSWORD>),
terminalNumber = ===(someMerchant.terminalNumber),
supplierNumber = ===(someMerchant.supplierNumber),
idPrefix = ===(someMerchant.idPrefix)
)
}
}
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Cvvs.scala
|
package com.wix.pay.creditguard.model
object Cvvs {
/** Merchant chooses not to pass CVV. */
val notProvided = "0"
/** CVV is not readable. */
val unreadable = "2"
/** card does not have CVV. */
val doesNotExist = "9"
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/main/scala/com/wix/pay/creditguard/CreditguardMerchant.scala
|
package com.wix.pay.creditguard
/**
* @param idPrefix Prefix for ''com.wix.pay.creditguard.model.DoDealRequest#user'' field
*/
case class CreditguardMerchant(user: String,
password: String,
terminalNumber: String,
supplierNumber: String,
idPrefix: String)
|
wix/libpay-creditguard
|
libpay-creditguard/src/main/scala/com/wix/pay/creditguard/CreditguardAuthorizationParser.scala
|
package com.wix.pay.creditguard
trait CreditguardAuthorizationParser {
def parse(authorizationKey: String): CreditguardAuthorization
def stringify(authorization: CreditguardAuthorization): String
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/main/scala/com/wix/pay/creditguard/CreditguardAuthorization.scala
|
package com.wix.pay.creditguard
case class CreditguardAuthorization(authNumber: String,
currency: String,
tranId: String,
cardId: String,
cardExpiration: String,
user: String)
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/AuthSources.scala
|
<filename>libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/AuthSources.scala
package com.wix.pay.creditguard.model
object AuthSources {
val shva = "Shva"
val creditCompany = "Credit Company"
val voiceMail = "VoiceMail"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Validations.scala
|
package com.wix.pay.creditguard.model
object Validations {
/**
* J1: Verifies card locally.
* If the card is ok and the total amount of the deal is under the ceiling, a debit is made without communication to Shva.
* If it's above the ceiling, an error occurs.
*/
val noComm = "NoComm"
/**
* J2: A local check on the CG Gateway for the validity of the credit card number and if it exist in the blocked cards list.
* No actual debit occurs.
*/
val normal = "Normal"
/**
* J3: Same as J2 (Normal).
* It also returns ceiling limit in the total field. for Israeli cards Only
* A positive response results in actual settlement.
*/
val creditLimit = "CreditLimit"
/**
* J4: Verifies card locally or in credit company; depends on ceiling ZFL terminal parameters
* A positive response results in actual settlement.
*/
val autoComm = "AutoComm"
/**
* J5: Verifies card by credit company regardless of the ceiling ZFL terminal parameters.
* No settlement is performed; the amount of verify without settlement is held in card holder's obligor.
* (This is used for authorization purposes only.)
*/
val verify = "Verify"
/**
* J6: Verifies card by credit company regardless of the ceiling ZFL terminal parameters; settlement is performed.
*/
val dealer = "Dealer"
/**
* J9: Performs a J4 transaction. Yet the transaction will not be deposited.
* The method of depositing the transactions can be configured per merchant or by releasing the transaction with AutoCommRelease command.
*/
val autoCommHold = "AutoCommHold"
/**
* J102: A local check on the CG Gateway for the validity of the credit card number for tokenization purposes.
* Perform an actual J2 request and return cardId when terminal is configured to do so.
*/
val token = "Token"
/**
* J109: Used for releasing a transaction (previously performed by using J9).
* Realeasing a transaction can be done by using the original card number, the cardId (when supported on the terminal)
* or track2 when the original transaction was performed with track2.
*/
val autoCommRelease = "AutoCommRelease"
/**
* J201: Used for retrieving card number of an existing card id that was generated for the merchant.
* This option is configuration dependent.
*/
val cardNo = "cardNo"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Languages.scala
|
<gh_stars>1-10
package com.wix.pay.creditguard.model
object Languages {
val hebrew = "Heb"
val english = "Eng"
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/main/scala/com/wix/pay/creditguard/JsonCreditguardMerchantParser.scala
|
package com.wix.pay.creditguard
import org.json4s.DefaultFormats
import org.json4s.native.Serialization
class JsonCreditguardMerchantParser() extends CreditguardMerchantParser {
private implicit val formats = DefaultFormats
override def parse(merchantKey: String): CreditguardMerchant = {
Serialization.read[CreditguardMerchant](merchantKey)
}
override def stringify(merchant: CreditguardMerchant): String = {
Serialization.write(merchant)
}
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/test/scala/com/wix/pay/creditguard/CreditguardMatchers.scala
|
package com.wix.pay.creditguard
import org.specs2.matcher.{AlwaysMatcher, Matcher, Matchers}
trait CreditguardMatchers extends Matchers {
def authorizationParser: CreditguardAuthorizationParser
def beMerchant(user: Matcher[String] = AlwaysMatcher(),
password: Matcher[String] = AlwaysMatcher(),
terminalNumber: Matcher[String] = AlwaysMatcher(),
supplierNumber: Matcher[String] = AlwaysMatcher(),
idPrefix: Matcher[String] = AlwaysMatcher()): Matcher[CreditguardMerchant] = {
user ^^ { (_: CreditguardMerchant).user aka "user" } and
password ^^ { (_: CreditguardMerchant).password aka "password" } and
terminalNumber ^^ { (_: CreditguardMerchant).terminalNumber aka "terminal number" } and
supplierNumber ^^ { (_: CreditguardMerchant).supplierNumber aka "supplier number" } and
idPrefix ^^ { (_: CreditguardMerchant).idPrefix aka "ID prefix" }
}
def beAuthorization(authNumber: Matcher[String] = AlwaysMatcher(),
currency: Matcher[String] = AlwaysMatcher(),
tranId: Matcher[String] = AlwaysMatcher(),
cardId: Matcher[String] = AlwaysMatcher(),
cardExpiration: Matcher[String] = AlwaysMatcher(),
user: Matcher[String] = AlwaysMatcher()): Matcher[CreditguardAuthorization] = {
authNumber ^^ { (_: CreditguardAuthorization).authNumber aka "authorization number" } and
currency ^^ { (_: CreditguardAuthorization).currency aka "currency" } and
tranId ^^ { (_: CreditguardAuthorization).tranId aka "transaction ID" } and
cardId ^^ { (_: CreditguardAuthorization).cardId aka "card ID" } and
cardExpiration ^^ { (_: CreditguardAuthorization).cardExpiration aka "card expiration" } and
user ^^ { (_: CreditguardAuthorization).user aka "user" }
}
def beAuthorizationKey(authorization: Matcher[CreditguardAuthorization]): Matcher[String] = {
authorization ^^ { authorizationParser.parse(_: String) aka "parsed authorization"}
}
}
object CreditguardMatchers extends CreditguardMatchers {
override val authorizationParser = new JsonCreditguardAuthorizationParser()
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Fields.scala
|
package com.wix.pay.creditguard.model
object Fields {
val user = "user"
val password = "password"
val int_in = "int_in"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/CommReasons.scala
|
package com.wix.pay.creditguard.model
object CommReasons {
val random = "1"
val creditLimit = "2"
val confidentialNumer = "3" // sic.
val serviceCode = "4"
val verifyOnly = "5"
val ambiguousBlocked = "6"
val zfl = "7"
val initiative = "8"
val charging = "9"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/ResponseTransactionTypes.scala
|
<reponame>wix/libpay-creditguard
package com.wix.pay.creditguard.model
object ResponseTransactionTypes {
/** Card holder is charged. */
val blocked = "Blocked"
/** Card holder is charged. */
val regularDebit = "RegularDebit"
/** Card holder is charged. */
val authDebit = "AuthDebit"
/** Card holder is charged. */
val forcedDebit = "ForcedDebit"
/** Card holder is credited. */
val regularCredit = "RegularCredit"
/** Card holder is credited. */
val refund = "Refund"
/** Card holder is credited. */
val authCredit = "AuthCredit"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/CardBrands.scala
|
package com.wix.pay.creditguard.model
object CardBrands {
val privateLabel = "0"
val mastercard = "1"
val visa = "2"
val maestro = "3"
val isracard = "5"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/CardAcquirers.scala
|
package com.wix.pay.creditguard.model
object CardAcquirers {
val isracard = "Isracard"
val visa = "Visa"
val diners = "Diners"
val amex = "Amex"
val jcb = "Jcb"
val alphacard = "Alphacard"
}
|
wix/libpay-creditguard
|
libpay-creditguard-common/src/main/scala/com/wix/pay/creditguard/model/Statuses.scala
|
<gh_stars>1-10
package com.wix.pay.creditguard.model
object Statuses {
val absent = "Absent"
val valid = "Valid"
val invalid = "Invalid"
val notValidated = "NotValidated"
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/test/scala/com/wix/pay/creditguard/it/CreditguardGatewayIT.scala
|
<filename>libpay-creditguard/src/test/scala/com/wix/pay/creditguard/it/CreditguardGatewayIT.scala
package com.wix.pay.creditguard.it
import com.google.api.client.http.javanet.NetHttpTransport
import com.wix.pay.creditcard.{CreditCard, CreditCardOptionalFields, YearMonth}
import com.wix.pay.creditguard.CreditguardMatchers._
import com.wix.pay.creditguard.testkit.CreditguardDriver
import com.wix.pay.creditguard.{CreditguardAuthorization, CreditguardMerchant, _}
import com.wix.pay.model.{CurrencyAmount, Deal, Payment}
import com.wix.pay.{PaymentErrorException, PaymentGateway, PaymentRejectedException}
import org.specs2.mutable.SpecWithJUnit
import org.specs2.specification.Scope
class CreditguardGatewayIT extends SpecWithJUnit {
val creditguardPort = 10010
val requestFactory = new NetHttpTransport().createRequestFactory()
val driver = new CreditguardDriver(port = creditguardPort)
step {
driver.start()
}
sequential
trait Ctx extends Scope {
val merchantParser = new JsonCreditguardMerchantParser()
val authorizationParser = new JsonCreditguardAuthorizationParser()
val someMerchant = CreditguardMerchant(
user = "some user",
password = "<PASSWORD>",
terminalNumber = "some terminal number",
supplierNumber = "some supplier number",
idPrefix = "some ID prefix"
)
val merchantKey = merchantParser.stringify(someMerchant)
val somePayment = Payment(currencyAmount = CurrencyAmount("ILS", 33.3))
val someCreditCard = CreditCard(
number = "4012888818888",
expiration = YearMonth(2020, 12),
additionalFields = Some(CreditCardOptionalFields.withFields(
csc = Some("123"),
holderId = Some("some holder ID"))
)
)
val someDeal = Deal(id = "some deal ID")
val someAuthorization = CreditguardAuthorization(
authNumber = "someAuthorizationNumber",
currency = "someCurrency",
tranId = "someTransactionId",
cardId = "someCardId",
cardExpiration = "someCardExpiration",
user = "someUser"
)
val authorizationKey = authorizationParser.stringify(someAuthorization)
val someCaptureAmount = 11.1
val creditguard: PaymentGateway = new CreditguardGateway(
requestFactory = requestFactory,
endpointUrl = s"http://localhost:$creditguardPort/",
merchantParser = merchantParser,
authorizationParser = authorizationParser
)
driver.reset()
}
"sale request via CreditGuard gateway" should {
"gracefully fail on invalid merchant" in new Ctx {
val someErrorMessage = "some error message"
driver.aSaleFor(
user = someMerchant.user,
password = <PASSWORD>,
terminalNumber = someMerchant.terminalNumber,
supplierNumber = someMerchant.supplierNumber,
idPrefix = someMerchant.idPrefix,
orderId = Some(someDeal.id),
card = someCreditCard,
currencyAmount = somePayment.currencyAmount
) failsOnInvalidMerchant(someErrorMessage)
creditguard.sale(
merchantKey = merchantKey,
creditCard = someCreditCard,
payment = somePayment,
deal = Some(someDeal)
) must beAFailedTry.like {
case e: PaymentErrorException => e.message must contain(someErrorMessage)
}
}
"gracefully fail on rejected card" in new Ctx {
val someErrorMessage = "some error message"
driver.aSaleFor(
user = someMerchant.user,
password = <PASSWORD>,
terminalNumber = someMerchant.terminalNumber,
supplierNumber = someMerchant.supplierNumber,
idPrefix = someMerchant.idPrefix,
orderId = Some(someDeal.id),
card = someCreditCard,
currencyAmount = somePayment.currencyAmount
) getsRejected(someErrorMessage)
creditguard.sale(
merchantKey = merchantKey,
creditCard = someCreditCard,
payment = somePayment,
deal = Some(someDeal)
) must beAFailedTry.like {
case e: PaymentRejectedException => e.message must contain(someErrorMessage)
}
}
"successfully yield a transaction ID on valid request" in new Ctx {
driver.aSaleFor(
user = someMerchant.user,
password = <PASSWORD>,
terminalNumber = someMerchant.terminalNumber,
supplierNumber = someMerchant.supplierNumber,
idPrefix = someMerchant.idPrefix,
orderId = Some(someDeal.id),
card = someCreditCard,
somePayment.currencyAmount
) returns(
transactionId = someAuthorization.tranId
)
creditguard.sale(
merchantKey = merchantKey,
creditCard = someCreditCard,
payment = somePayment,
deal = Some(someDeal)
) must beASuccessfulTry(
check = ===(someAuthorization.tranId)
)
}
}
"authorize request via CreditGuard gateway" should {
"gracefully fail on invalid merchant" in new Ctx {
val someErrorMessage = "some error message"
driver.anAuthorizeFor(
user = someMerchant.user,
password = <PASSWORD>,
terminalNumber = someMerchant.terminalNumber,
supplierNumber = someMerchant.supplierNumber,
idPrefix = someMerchant.idPrefix,
orderId = Some(someDeal.id),
card = someCreditCard,
currencyAmount = somePayment.currencyAmount
) failsOnInvalidMerchant(someErrorMessage)
creditguard.authorize(
merchantKey = merchantKey,
creditCard = someCreditCard,
payment = somePayment,
deal = Some(someDeal)
) must beAFailedTry.like {
case e: PaymentErrorException => e.message must contain(someErrorMessage)
}
}
"gracefully fail on rejected card" in new Ctx {
val someErrorMessage = "some error message"
driver.anAuthorizeFor(
user = someMerchant.user,
password = <PASSWORD>,
terminalNumber = someMerchant.terminalNumber,
supplierNumber = someMerchant.supplierNumber,
idPrefix = someMerchant.idPrefix,
orderId = Some(someDeal.id),
card = someCreditCard,
somePayment.currencyAmount
) getsRejected(someErrorMessage)
creditguard.authorize(
merchantKey = merchantKey,
creditCard = someCreditCard,
payment = somePayment,
deal = Some(someDeal)
) must beAFailedTry.like {
case e: PaymentRejectedException => e.message must contain(someErrorMessage)
}
}
"successfully yield an authorization key on valid request" in new Ctx {
driver.anAuthorizeFor(
user = someMerchant.user,
password = <PASSWORD>,
terminalNumber = someMerchant.terminalNumber,
supplierNumber = someMerchant.supplierNumber,
idPrefix = someMerchant.idPrefix,
orderId = Some(someDeal.id),
card = someCreditCard,
currencyAmount = somePayment.currencyAmount
) returns(
authNumber = someAuthorization.authNumber,
cardId = someAuthorization.cardId,
cardExpiration = someAuthorization.cardExpiration,
currency = someAuthorization.currency,
transactionId = someAuthorization.tranId
)
creditguard.authorize(
merchantKey = merchantKey,
creditCard = someCreditCard,
payment = somePayment,
deal = Some(someDeal)
) must beASuccessfulTry(
check = beAuthorizationKey(
authorization = beAuthorization(
authNumber = ===(someAuthorization.authNumber),
currency = ===(someAuthorization.currency),
tranId = ===(someAuthorization.tranId),
cardId = ===(someAuthorization.cardId),
cardExpiration = ===(someAuthorization.cardExpiration)
)
)
)
}
}
"capture request via CreditGuard gateway" should {
"gracefully fail on invalid merchant" in new Ctx {
val someErrorMessage = "some error message"
driver.aCaptureFor(
user = someMerchant.user,
password = <PASSWORD>,
terminalNumber = someMerchant.terminalNumber,
supplierNumber = someMerchant.supplierNumber,
authNumber = someAuthorization.authNumber,
currency = someAuthorization.currency,
amount = someCaptureAmount,
cardId = someAuthorization.cardId,
cardExpiration = someAuthorization.cardExpiration,
userField = someAuthorization.user
) failsOnInvalidMerchant(someErrorMessage)
creditguard.capture(
merchantKey = merchantKey,
authorizationKey = authorizationKey,
amount = someCaptureAmount
) must beAFailedTry.like {
case e: PaymentErrorException => e.message must contain(someErrorMessage)
}
}
"successfully yield a transaction ID on valid request" in new Ctx {
driver.aCaptureFor(
user = someMerchant.user,
password = <PASSWORD>,
terminalNumber = someMerchant.terminalNumber,
supplierNumber = someMerchant.supplierNumber,
authNumber = someAuthorization.authNumber,
currency = someAuthorization.currency,
amount = someCaptureAmount,
cardId = someAuthorization.cardId,
cardExpiration = someAuthorization.cardExpiration,
userField = someAuthorization.user
) returns(
transactionId = someAuthorization.tranId
)
creditguard.capture(
merchantKey = merchantKey,
authorizationKey = authorizationKey,
amount = someCaptureAmount
) must beASuccessfulTry(
check = ===(someAuthorization.tranId)
)
}
}
step {
driver.stop()
}
}
|
wix/libpay-creditguard
|
libpay-creditguard/src/main/scala/com/wix/pay/creditguard/CreditguardGateway.scala
|
<reponame>wix/libpay-creditguard<filename>libpay-creditguard/src/main/scala/com/wix/pay/creditguard/CreditguardGateway.scala
package com.wix.pay.creditguard
import com.google.api.client.http._
import com.wix.pay.creditcard.CreditCard
import com.wix.pay.creditguard.model.Fields
import com.wix.pay.model.{Customer, Deal, Payment}
import com.wix.pay.shva.model.{IsShvaRejectedStatusCode, StatusCodes}
import com.wix.pay.{PaymentErrorException, PaymentException, PaymentGateway, PaymentRejectedException}
import scala.collection.JavaConversions._
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success, Try}
object Endpoints {
val test = "https://cguat2.creditguard.co.il/xpo/Relay"
val caspitProduction = "https://payment.caspitgroup.co.il:8443/xpo/Relay"
}
class CreditguardGateway(requestFactory: HttpRequestFactory,
connectTimeout: Option[Duration] = None,
readTimeout: Option[Duration] = None,
numberOfRetries: Int = 0,
endpointUrl: String = Endpoints.caspitProduction,
merchantParser: CreditguardMerchantParser = new JsonCreditguardMerchantParser,
authorizationParser: CreditguardAuthorizationParser = new JsonCreditguardAuthorizationParser) extends PaymentGateway {
override def authorize(merchantKey: String, creditCard: CreditCard, payment: Payment, customer: Option[Customer], deal: Option[Deal]): Try[String] = {
Try {
require(payment.installments == 1, "Installments are not implemented yet")
val merchant = merchantParser.parse(merchantKey)
val request = CreditguardHelper.createAuthorizeRequest(
terminalNumber = merchant.terminalNumber,
supplierNumber = merchant.supplierNumber,
idPrefix = merchant.idPrefix,
orderId = deal.map { _.id },
card = creditCard,
currencyAmount = payment.currencyAmount
)
val requestXml = RequestParser.stringify(request)
val responseXml = doRequest(
user = merchant.user,
password = <PASSWORD>,
requestXml = requestXml
)
val response = ResponseParser.parse(responseXml)
verifyShvaStatusCode(
statusCode = response.response.doDeal.status,
errorMessage = response.response.doDeal.statusText
)
val authorization = CreditguardAuthorization(
authNumber = response.response.doDeal.authNumber,
currency = response.response.doDeal.currency,
tranId = response.response.tranId,
cardId = response.response.doDeal.cardId,
cardExpiration = response.response.doDeal.cardExpiration,
user = response.response.doDeal.user
)
authorizationParser.stringify(authorization)
} match {
case Success(authorizationKey) => Success(authorizationKey)
case Failure(e: PaymentException) => Failure(e)
case Failure(e) => Failure(new PaymentErrorException(e.getMessage, e))
}
}
override def capture(merchantKey: String, authorizationKey: String, amount: Double): Try[String] = {
Try {
val merchant = merchantParser.parse(merchantKey)
val authorization = authorizationParser.parse(authorizationKey)
val request = CreditguardHelper.createCaptureRequest(
terminalNumber = merchant.terminalNumber,
supplierNumber = merchant.supplierNumber,
cardId = authorization.cardId,
cardExpiration = authorization.cardExpiration,
authNumber = authorization.authNumber,
currency = authorization.currency,
amount = amount,
user = authorization.user
)
val requestXml = RequestParser.stringify(request)
val responseXml = doRequest(
user = merchant.user,
password = <PASSWORD>,
requestXml = requestXml
)
val response = ResponseParser.parse(responseXml)
verifyShvaStatusCode(
statusCode = response.response.doDeal.status,
errorMessage = response.response.doDeal.statusText
)
response.response.tranId
} match {
case Success(transactionId) => Success(transactionId)
case Failure(e: PaymentException) => Failure(e)
case Failure(e) => Failure(new PaymentErrorException(e.getMessage, e))
}
}
override def sale(merchantKey: String, creditCard: CreditCard, payment: Payment, customer: Option[Customer], deal: Option[Deal]): Try[String] = {
Try {
require(payment.installments == 1, "Installments are not implemented yet")
val merchant = merchantParser.parse(merchantKey)
val request = CreditguardHelper.createSaleRequest(
terminalNumber = merchant.terminalNumber,
supplierNumber = merchant.supplierNumber,
idPrefix = merchant.idPrefix,
orderId = deal.map { _.id },
card = creditCard,
currencyAmount = payment.currencyAmount
)
val requestXml = RequestParser.stringify(request)
val responseXml = doRequest(
user = merchant.user,
password = <PASSWORD>,
requestXml = requestXml
)
val response = ResponseParser.parse(responseXml)
verifyShvaStatusCode(
statusCode = response.response.doDeal.status,
errorMessage = response.response.doDeal.statusText
)
response.response.tranId
} match {
case Success(transactionId) => Success(transactionId)
case Failure(e: PaymentException) => Failure(e)
case Failure(e) => Failure(new PaymentErrorException(e.getMessage, e))
}
}
override def voidAuthorization(merchantKey: String, authorizationKey: String): Try[String] = {
Try {
// val merchant = merchantParser.parse(merchantKey)
val authorization = authorizationParser.parse(authorizationKey)
// CreditGuard doesn't support voiding an authorization. Authorizations should be automatically voided after a while.
authorization.tranId
}
}
private def doRequest(user: String, password: String, requestXml: String): String = {
val params = Map(
Fields.user -> user,
Fields.password -> password,
Fields.int_in -> requestXml
)
val httpRequest = requestFactory.buildPostRequest(
new GenericUrl(endpointUrl),
new UrlEncodedContent(mapAsJavaMap(params))
)
connectTimeout foreach (duration => httpRequest.setConnectTimeout(duration.toMillis.toInt))
readTimeout foreach (duration => httpRequest.setReadTimeout(duration.toMillis.toInt))
httpRequest.setNumberOfRetries(numberOfRetries)
val httpResponse = httpRequest.execute()
try {
httpResponse.parseAsString()
} finally {
httpResponse.ignore()
}
}
private def verifyShvaStatusCode(statusCode: String, errorMessage: String): Unit = {
statusCode match {
case StatusCodes.success => // Operation successful.
case IsShvaRejectedStatusCode(rejectedStatusCode) => throw PaymentRejectedException(s"$errorMessage (code = $rejectedStatusCode)")
case _ => throw PaymentErrorException(s"$errorMessage (code = $statusCode)")
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.