code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.util.Comparator
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.{CodeGenContext, CodegenFallback, GeneratedExpressionCode}
import org.apache.spark.sql.types._
/**
* Given an array or map, returns its size.
*/
case class Size(child: Expression) extends UnaryExpression with ExpectsInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(ArrayType, MapType))
override def nullSafeEval(value: Any): Int = child.dataType match {
case _: ArrayType => value.asInstanceOf[ArrayData].numElements()
case _: MapType => value.asInstanceOf[MapData].numElements()
}
override def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
nullSafeCodeGen(ctx, ev, c => s"${ev.primitive} = ($c).numElements();")
}
}
/**
* Sorts the input array in ascending / descending order according to the natural ordering of
* the array elements and returns it.
*/
case class SortArray(base: Expression, ascendingOrder: Expression)
extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
def this(e: Expression) = this(e, Literal(true))
override def left: Expression = base
override def right: Expression = ascendingOrder
override def dataType: DataType = base.dataType
override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType, BooleanType)
override def checkInputDataTypes(): TypeCheckResult = base.dataType match {
case ArrayType(dt, _) if RowOrdering.isOrderable(dt) =>
TypeCheckResult.TypeCheckSuccess
case ArrayType(dt, _) =>
TypeCheckResult.TypeCheckFailure(
s"$prettyName does not support sorting array of type ${dt.simpleString}")
case _ =>
TypeCheckResult.TypeCheckFailure(s"$prettyName only supports array input.")
}
@transient
private lazy val lt: Comparator[Any] = {
val ordering = base.dataType match {
case _ @ ArrayType(n: AtomicType, _) => n.ordering.asInstanceOf[Ordering[Any]]
}
new Comparator[Any]() {
override def compare(o1: Any, o2: Any): Int = {
if (o1 == null && o2 == null) {
0
} else if (o1 == null) {
-1
} else if (o2 == null) {
1
} else {
ordering.compare(o1, o2)
}
}
}
}
@transient
private lazy val gt: Comparator[Any] = {
val ordering = base.dataType match {
case _ @ ArrayType(n: AtomicType, _) => n.ordering.asInstanceOf[Ordering[Any]]
}
new Comparator[Any]() {
override def compare(o1: Any, o2: Any): Int = {
if (o1 == null && o2 == null) {
0
} else if (o1 == null) {
1
} else if (o2 == null) {
-1
} else {
-ordering.compare(o1, o2)
}
}
}
}
override def nullSafeEval(array: Any, ascending: Any): Any = {
val elementType = base.dataType.asInstanceOf[ArrayType].elementType
val data = array.asInstanceOf[ArrayData].toArray[AnyRef](elementType)
java.util.Arrays.sort(data, if (ascending.asInstanceOf[Boolean]) lt else gt)
new GenericArrayData(data.asInstanceOf[Array[Any]])
}
override def prettyName: String = "sort_array"
}
/**
* Checks if the array (left) has the element (right)
*/
case class ArrayContains(left: Expression, right: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = BooleanType
override def inputTypes: Seq[AbstractDataType] = right.dataType match {
case NullType => Seq()
case _ => left.dataType match {
case n @ ArrayType(element, _) => Seq(n, element)
case _ => Seq()
}
}
override def checkInputDataTypes(): TypeCheckResult = {
if (right.dataType == NullType) {
TypeCheckResult.TypeCheckFailure("Null typed values cannot be used as arguments")
} else if (!left.dataType.isInstanceOf[ArrayType]
|| left.dataType.asInstanceOf[ArrayType].elementType != right.dataType) {
TypeCheckResult.TypeCheckFailure(
"Arguments must be an array followed by a value of same type as the array members")
} else {
TypeCheckResult.TypeCheckSuccess
}
}
override def nullable: Boolean = {
left.nullable || right.nullable || left.dataType.asInstanceOf[ArrayType].containsNull
}
override def nullSafeEval(arr: Any, value: Any): Any = {
var hasNull = false
arr.asInstanceOf[ArrayData].foreach(right.dataType, (i, v) =>
if (v == null) {
hasNull = true
} else if (v == value) {
return true
}
)
if (hasNull) {
null
} else {
false
}
}
override def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
nullSafeCodeGen(ctx, ev, (arr, value) => {
val i = ctx.freshName("i")
val getValue = ctx.getValue(arr, right.dataType, i)
s"""
for (int $i = 0; $i < $arr.numElements(); $i ++) {
if ($arr.isNullAt($i)) {
${ev.isNull} = true;
} else if (${ctx.genEqual(right.dataType, value, getValue)}) {
${ev.isNull} = false;
${ev.primitive} = true;
break;
}
}
"""
})
}
override def prettyName: String = "array_contains"
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala | Scala | apache-2.0 | 6,157 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.util
import java.io.File
import org.scalatest.Suite
import org.apache.spark.SparkContext
import org.apache.spark.ml.{PipelineModel, Transformer}
import org.apache.spark.sql.{DataFrame, Encoder, Row}
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.streaming.StreamTest
import org.apache.spark.sql.test.TestSparkSession
import org.apache.spark.util.Utils
trait MLTest extends StreamTest with TempDirectory { self: Suite =>
@transient var sc: SparkContext = _
@transient var checkpointDir: String = _
protected override def createSparkSession: TestSparkSession = {
new TestSparkSession(new SparkContext("local[2]", "MLlibUnitTest", sparkConf))
}
override def beforeAll(): Unit = {
super.beforeAll()
sc = spark.sparkContext
checkpointDir = Utils.createDirectory(tempDir.getCanonicalPath, "checkpoints").toString
sc.setCheckpointDir(checkpointDir)
}
override def afterAll() {
try {
Utils.deleteRecursively(new File(checkpointDir))
} finally {
super.afterAll()
}
}
private[util] def testTransformerOnStreamData[A : Encoder](
dataframe: DataFrame,
transformer: Transformer,
firstResultCol: String,
otherResultCols: String*)
(globalCheckFunction: Seq[Row] => Unit): Unit = {
val columnNames = dataframe.schema.fieldNames
val stream = MemoryStream[A]
val streamDF = stream.toDS().toDF(columnNames: _*)
val data = dataframe.as[A].collect()
val streamOutput = transformer.transform(streamDF)
.select(firstResultCol, otherResultCols: _*)
testStream(streamOutput) (
AddData(stream, data: _*),
CheckAnswer(globalCheckFunction)
)
}
private[util] def testTransformerOnDF(
dataframe: DataFrame,
transformer: Transformer,
firstResultCol: String,
otherResultCols: String*)
(globalCheckFunction: Seq[Row] => Unit): Unit = {
val dfOutput = transformer.transform(dataframe)
val outputs = dfOutput.select(firstResultCol, otherResultCols: _*).collect()
globalCheckFunction(outputs)
}
def testTransformer[A : Encoder](
dataframe: DataFrame,
transformer: Transformer,
firstResultCol: String,
otherResultCols: String*)
(checkFunction: Row => Unit): Unit = {
testTransformerByGlobalCheckFunc(
dataframe,
transformer,
firstResultCol,
otherResultCols: _*) { rows: Seq[Row] => rows.foreach(checkFunction(_)) }
}
def testTransformerByGlobalCheckFunc[A : Encoder](
dataframe: DataFrame,
transformer: Transformer,
firstResultCol: String,
otherResultCols: String*)
(globalCheckFunction: Seq[Row] => Unit): Unit = {
testTransformerOnStreamData(dataframe, transformer, firstResultCol,
otherResultCols: _*)(globalCheckFunction)
testTransformerOnDF(dataframe, transformer, firstResultCol,
otherResultCols: _*)(globalCheckFunction)
}
}
| esi-mineset/spark | mllib/src/test/scala/org/apache/spark/ml/util/MLTest.scala | Scala | apache-2.0 | 3,784 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import org.openjdk.jol.info.ClassLayout
import org.openjdk.jol.info.GraphLayout
import munit.FunSuite
import scala.util.Random
class LongHashSetSuite extends FunSuite {
test("add") {
val s = new LongHashSet(-1, 10)
s.add(11)
assertEquals(List(11L), s.toList)
assertEquals(1, s.size)
}
test("dedup") {
val s = new LongHashSet(-1, 10)
s.add(42)
assertEquals(List(42L), s.toList)
assertEquals(1, s.size)
s.add(42)
assertEquals(List(42L), s.toList)
assertEquals(1, s.size)
}
test("resize") {
val s = new LongHashSet(-1L, 10)
(0L until 10000L).foreach(s.add)
assertEquals((0L until 10000L).toSet, s.toList.toSet)
assertEquals(s.size, 10000)
}
test("random") {
val jset = new scala.collection.mutable.HashSet[Long]
val iset = new LongHashSet(-1, 10)
(0 until 10000).foreach { i =>
val v = Random.nextLong()
iset.add(v)
jset.add(v)
}
assertEquals(jset.toSet, iset.toList.toSet)
}
private def arrayCompare(a1: Array[Long], a2: Array[Long]): Unit = {
// Need to sort as traversal order could be different when generating the arrays
java.util.Arrays.sort(a1)
java.util.Arrays.sort(a2)
assertEquals(a1.toSeq, a2.toSeq)
}
test("toArray") {
val jset = new scala.collection.mutable.HashSet[Long]
val iset = new LongHashSet(-1, 10)
(0 until 10000).foreach { i =>
val v = Random.nextLong()
iset.add(v)
jset.add(v)
}
arrayCompare(jset.toArray, iset.toArray)
}
test("memory per set") {
// Sanity check to verify if some change introduces more overhead per set
val bytes = ClassLayout.parseClass(classOf[LongHashSet]).instanceSize()
assertEquals(bytes, 32L)
}
test("memory - 5 items") {
val iset = new LongHashSet(-1, 10)
val jset = new java.util.HashSet[Int](10)
(0 until 5).foreach { i =>
iset.add(i)
jset.add(i)
}
val igraph = GraphLayout.parseInstance(iset)
//val jgraph = GraphLayout.parseInstance(jset)
//println(igraph.toFootprint)
//println(jgraph.toFootprint)
// Only objects should be the array and the set itself
assertEquals(igraph.totalCount(), 2L)
// Sanity check size is < 100 bytes
assert(igraph.totalSize() <= 250)
}
test("memory - 10k items") {
val iset = new LongHashSet(-1, 10)
val jset = new java.util.HashSet[Int](10)
(0 until 10000).foreach { i =>
iset.add(i)
jset.add(i)
}
val igraph = GraphLayout.parseInstance(iset)
//val jgraph = GraphLayout.parseInstance(jset)
//println(igraph.toFootprint)
//println(jgraph.toFootprint)
// Only objects should be the array and the set itself
assertEquals(igraph.totalCount(), 2L)
// Sanity check size is < 220kb
assert(igraph.totalSize() <= 220000)
}
test("negative absolute value") {
val s = new LongHashSet(-1, 10)
s.add(java.lang.Long.MIN_VALUE)
}
}
| brharrington/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/util/LongHashSetSuite.scala | Scala | apache-2.0 | 3,588 |
package scalacookbook.chapter03
/**
* Created by liguodong on 2016/6/28.
*/
object UsePatternMatchInMatchExpression extends App{
//常量
// trigger the constant patterns
println(echoWhatYouGaveMe(0))
println(echoWhatYouGaveMe(true))
println(echoWhatYouGaveMe("hello"))
println(echoWhatYouGaveMe(Nil))
//序列
// trigger the sequence patterns
println(echoWhatYouGaveMe(List(0,1,2)))
println(echoWhatYouGaveMe(List(1,2)))
println(echoWhatYouGaveMe(List(1,2,3)))
println(echoWhatYouGaveMe(Vector(1,2,3)))
//元组
// trigger the tuple patterns
println(echoWhatYouGaveMe((1,2))) // two element tuple
println(echoWhatYouGaveMe((1,2,3))) // three element
case class Person(firstName: String, lastName: String)
case class Dog(name: String)
//构造函数
// trigger the constructor patterns
println(echoWhatYouGaveMe(Person("Melissa", "Alexander")))
println(echoWhatYouGaveMe(Dog("Suka")))
//类型
// trigger the typed patterns
println(echoWhatYouGaveMe("Hello, world"))
println(echoWhatYouGaveMe(42))
println(echoWhatYouGaveMe(42F))
println(echoWhatYouGaveMe(Array(1,2,3)))
println(echoWhatYouGaveMe(Array("coffee", "apple pie")))
println(echoWhatYouGaveMe(Dog("Fido")))
println(echoWhatYouGaveMe(List("apple", "banana")))
println(echoWhatYouGaveMe(Map(1->"Al", 2->"Alexander")))
//通配符
// trigger the wildcard pattern
println(echoWhatYouGaveMe("33d"))
def echoWhatYouGaveMe(x: Any): String = x match {
// constant patterns
case 0 => "zero"
case true => "true"
case "hello" => "you said 'hello'"
case Nil => "an empty List"
// sequence patterns
case List(0, _, _) => "a three-element list with 0 as the first element"
case List(1, _*) => "a list beginning with 1, having any number of elements"
case Vector(1, _*) => "a vector starting with 1, having any number of elements"
// tuples
case (a, b) => s"got $a and $b"
case (a, b, c) => s"got $a, $b, and $c"
// constructor patterns
case Person(first, "Alexander") => s"found an Alexander, first name = $first"
case Dog("Suka") => "found a dog named Suka"
// typed patterns
case s: String => s"you gave me this string: $s"
case i: Int => s"thanks for the int: $i"
case f: Float => s"thanks for the float: $f"
case a: Array[Int] => s"an array of int: ${a.mkString(",")}"
case as: Array[String] => s"an array of strings: ${as.mkString(",")}"
case d: Dog => s"dog: ${d.name}"
case list: List[_] => s"thanks for the List: $list"
case m: Map[_, _] => m.toString
// the default wildcard pattern
case _ => "Unknown"
}
//二者等价
//similar
/*
case list: List[_] => s"thanks for the List: $list"
case m: Map[_, _] => m.toString
<==========>
case m: Map[a, b] => m.toString
case list: List[x] => s"thanks for the List: $list"
*/
println("/////////////////////Discussion//////////////////////")
import section11._
val test = new RandomNoiseMaker
test.makeRandomNoise(RandomString("liguodong"))
//Adding variables to patterns
//variableName @ pattern
//在模式中增加一个变量
def matchType(x: Any): String = x match {
//case x: List(1, _*) => s"$x" // doesn't compile
case x @ List(1, _*) => s"$x" // works; prints the list
//case Some(_) => "got a Some" // works, but can't access the Some
//case Some(x) => s"$x" // works, returns "foo"
case x @ Some(_) => s"$x" // works, returns "Some(foo)"
case p @ Person(first, "Doe") => s"$p" // works, returns "Person(John,Doe)"
}
println(matchType(List(1,2,3))) // prints "List(1, 2, 3)"
println(matchType(Some("foo"))) // prints "Some(foo)"
println(matchType(Person("John", "Doe"))) // prints "Person(John,Doe)"
//Using Some and None in match expressions
def toInt(s: String): Option[Int] = {
try {
Some(Integer.parseInt(s.trim))
} catch {
case e: Exception => None
}
}
toInt("42") match {
case Some(i) => println(i)
case None => println("That wasn't an Int.")
}
}
package section11{
import java.io.File
sealed trait RandomThing
case class RandomFile(f: File) extends RandomThing
case class RandomString(s: String) extends RandomThing
class RandomNoiseMaker {
def makeRandomNoise(t: RandomThing) = t match {
case RandomFile(f) => println("playSoundFile(f)")//playSoundFile(f)
case RandomString(s) => println("speak(s)")//speak(s)
}
}
}
| liguodongIOT/java-scala-mix-sbt | src/main/scala/scalacookbook/chapter03/UsePatternMatchInMatchExpression.scala | Scala | apache-2.0 | 4,491 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
import minitest.TestSuite
import monix.execution.schedulers.TestScheduler
import monix.execution.internal.Platform
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Random, Success}
object AsyncSemaphoreSuite extends TestSuite[TestScheduler] {
def setup() = TestScheduler()
def tearDown(env: TestScheduler): Unit =
assert(env.state.tasks.isEmpty, "should not have tasks left to execute")
test("simple greenLight") { implicit s =>
val semaphore = AsyncSemaphore(provisioned = 4)
val future = semaphore.withPermit(() => Future(100))
assertEquals(semaphore.available(), 3)
assert(!future.isCompleted, "!future.isCompleted")
s.tick()
assertEquals(future.value, Some(Success(100)))
assertEquals(semaphore.available(), 4)
}
test("should back-pressure when full") { implicit s =>
val semaphore = AsyncSemaphore(provisioned = 2)
val p1 = Promise[Int]()
val f1 = semaphore.withPermit(() => p1.future)
val p2 = Promise[Int]()
val f2 = semaphore.withPermit(() => p2.future)
s.tick()
assertEquals(semaphore.available(), 0)
val f3 = semaphore.withPermit(() => Future(3))
s.tick()
assertEquals(f3.value, None)
assertEquals(semaphore.available(), 0)
p1.success(1); s.tick()
assertEquals(semaphore.available(), 1)
assertEquals(f1.value, Some(Success(1)))
assertEquals(f3.value, Some(Success(3)))
p2.success(2); s.tick()
assertEquals(f2.value, Some(Success(2)))
assertEquals(semaphore.available(), 2)
}
testAsync("real async test of many futures") { _ =>
// Executing Futures on the global scheduler!
import scala.concurrent.ExecutionContext.Implicits.global
val semaphore = AsyncSemaphore(provisioned = 4)
val count = if (Platform.isJVM) 100000 else 1000
val futures = for (i <- 0 until count) yield semaphore.withPermit(() => Future(i)): Future[Int]
val sum =
Future.sequence(futures).map(_.sum)
// Asynchronous result, to be handled by Minitest
for (result <- sum) yield {
assertEquals(result, count * (count - 1) / 2)
}
}
test("await for release of all active and pending permits") { implicit s =>
val semaphore = AsyncSemaphore(provisioned = 2)
val p1 = semaphore.acquire()
assertEquals(p1.value, Some(Success(())))
val p2 = semaphore.acquire()
assertEquals(p2.value, Some(Success(())))
val p3 = semaphore.acquire()
assert(!p3.isCompleted, "!p3.isCompleted")
val p4 = semaphore.acquire()
assert(!p4.isCompleted, "!p4.isCompleted")
val all1 = semaphore.awaitAvailable(2)
assert(!all1.isCompleted, "!all1.isCompleted")
semaphore.release(); s.tick()
assert(!all1.isCompleted, "!all1.isCompleted")
semaphore.release(); s.tick()
assert(!all1.isCompleted, "!all1.isCompleted")
semaphore.release(); s.tick()
assert(!all1.isCompleted, "!all1.isCompleted")
semaphore.release(); s.tick()
assert(all1.isCompleted, "all1.isCompleted")
// REDO
val p5 = semaphore.acquire()
assert(p5.isCompleted, "p5.isCompleted")
val all2 = semaphore.awaitAvailable(2)
s.tick(); assert(!all2.isCompleted, "!all2.isCompleted")
semaphore.release(); s.tick()
assert(all2.isCompleted, "all2.isCompleted")
// Already completed
val all3 = semaphore.awaitAvailable(2)
assert(all3.isCompleted, "all3.isCompleted")
}
test("acquire is cancelable") { implicit s =>
val semaphore = AsyncSemaphore(provisioned = 2)
val p1 = semaphore.acquire()
assert(p1.isCompleted, "p1.isCompleted")
val p2 = semaphore.acquire()
assert(p2.isCompleted, "p2.isCompleted")
val p3 = semaphore.acquire()
assert(!p3.isCompleted, "!p3.isCompleted")
assertEquals(semaphore.available(), 0)
p3.cancel()
semaphore.release()
assertEquals(semaphore.available(), 1)
semaphore.release()
assertEquals(semaphore.available(), 2)
s.tick()
assertEquals(semaphore.available(), 2)
assert(!p3.isCompleted, "!p3.isCompleted")
}
testAsync("withPermitN / awaitAvailable concurrent test") { _ =>
// Executing Futures on the global scheduler!
import scala.concurrent.ExecutionContext.Implicits.global
repeatTest(100) { () =>
val available = 6L
val semaphore = AsyncSemaphore(provisioned = available)
val count = if (Platform.isJVM) 10000 else 100
val allReleased = Promise[Unit]()
val f = semaphore.withPermit { () =>
allReleased.completeWith(semaphore.awaitAvailable(available))
val futures = for (i <- 0 until count) yield {
val n = (Math.floorMod(Random.nextInt(), 3) + 1).toLong
semaphore.withPermitN(n) { () =>
Future(1).map { x =>
assert(!allReleased.isCompleted, s"!allReleased.isCompleted (index $i)")
x
}
}: Future[Int]
}
Future.sequence(futures).map { x =>
x.sum
}
}
for (r <- f; _ <- allReleased.future) yield {
assertEquals(r, count)
assertEquals(semaphore.available(), available)
}
}
}
testAsync("tryAcquireN / awaitAvailable concurrent test") { _ =>
// Executing Futures on the global scheduler!
import scala.concurrent.ExecutionContext.Implicits.global
def acquireN(semaphore: AsyncSemaphore, n: Long): Future[Unit] =
Future(semaphore.tryAcquireN(n)).flatMap {
case true => Future.successful(())
case false => acquireN(semaphore, n)
}
def withPermitN[A](semaphore: AsyncSemaphore, n: Long)(f: () => Future[A]): Future[A] =
acquireN(semaphore, n).flatMap { _ =>
FutureUtils.transform[A, A](f(), r => { semaphore.releaseN(n); r })
}
repeatTest(10) { () =>
val available = 6L
val semaphore = AsyncSemaphore(provisioned = available)
val count = if (Platform.isJVM) 1000 else 100
val allReleased = Promise[Unit]()
val f = withPermitN(semaphore, 1) { () =>
allReleased.completeWith(semaphore.awaitAvailable(available))
val futures = for (i <- 0 until count) yield {
withPermitN(semaphore, (Math.floorMod(Random.nextInt(), 3) + 1).toLong) { () =>
Future(1).map { x =>
assert(!allReleased.isCompleted, s"!allReleased.isCompleted (index $i)")
x
}
}
}
Future.sequence(futures).map { x =>
x.sum
}
}
for (r <- f; _ <- allReleased.future) yield {
assertEquals(r, count)
assertEquals(semaphore.available(), available)
}
}
}
def repeatTest(n: Int)(f: () => Future[Unit])(implicit ec: ExecutionContext): Future[Unit] =
if (n > 0) f().flatMap(_ => repeatTest(n - 1)(f))
else Future.successful(())
}
| alexandru/monifu | monix-execution/shared/src/test/scala/monix/execution/AsyncSemaphoreSuite.scala | Scala | apache-2.0 | 7,515 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage
import com.twitter.algebird.Monoid
import com.twitter.util.{Closable, CloseAwaitably, Time, Future}
import com.twitter.zipkin.common.Dependencies
/**
* Storage and retrieval interface for realtime aggregates that are computed online
* and write into online storage
*/
trait RealtimeAggregates extends Closable with CloseAwaitably {
def getSpanDurations(
timeStamp: Time,
serverServiceName: String,
rpcName: String
): Future[Map[String, List[Long]]]
def getServiceNamesToTraceIds(
timeStamp: Time,
serverServiceName: String,
rpcName: String
): Future[Map[String, List[Long]]]
}
object NullRealtimeAggregates extends RealtimeAggregates {
def close(deadline: Time): Future[Unit] = closeAwaitably {
Future.Done
}
def getSpanDurations(timeStamp: Time, serverServiceName: String, rpcName: String) =
Future.value(Map.empty[String, List[Long]])
def getServiceNamesToTraceIds(timeStamp: Time, serverServiceName: String, rpcName: String) =
Future.value(Map.empty[String, List[Long]])
}
| travisbrown/zipkin | zipkin-common/src/main/scala/com/twitter/zipkin/storage/RealtimeAggregates.scala | Scala | apache-2.0 | 1,669 |
package athena.connector.pipeline
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
//This is a port of the 2.2.x Pipeline code to 2.3 - this should go away when Akka releases
//their reactive streams support.
import scala.beans.BeanProperty
import scala.util.{ Failure, Success }
import akka.actor._
import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics }
import akka.util.ByteString
import akka.event.LoggingAdapter
import akka.io.Tcp
object TcpPipelineHandler {
/**
* This class wraps up a pipeline with its external (i.e. “top”) command and
* event types and providing unique wrappers for sending commands and
* receiving events (nested and non-static classes which are specific to each
* instance of [[Init]]). All events emitted by the pipeline will be sent to
* the registered handler wrapped in an Event.
*/
abstract class Init[Ctx <: PipelineContext, Cmd, Evt](
val stages: PipelineStage[_ >: Ctx <: PipelineContext, Cmd, Tcp.Command, Evt, Tcp.Event]) {
/**
* This method must be implemented to return the [[PipelineContext]]
* necessary for the operation of the given [[PipelineStage]].
*/
def makeContext(actorContext: ActorContext): Ctx
/**
* Java API: construct a command to be sent to the [[TcpPipelineHandler]]
* actor.
*/
def command(cmd: Cmd): Command = Command(cmd)
/**
* Java API: extract a wrapped event received from the [[TcpPipelineHandler]]
* actor.
*
* @throws MatchError if the given object is not an Event matching this
* specific Init instance.
*/
def event(evt: AnyRef): Evt = evt match {
case Event(event) ⇒ event
}
/**
* Wrapper class for commands to be sent to the [[TcpPipelineHandler]] actor.
*/
case class Command(@BeanProperty cmd: Cmd) extends NoSerializationVerificationNeeded
/**
* Wrapper class for events emitted by the [[TcpPipelineHandler]] actor.
*/
case class Event(@BeanProperty evt: Evt) extends NoSerializationVerificationNeeded
}
/**
* This interface bundles logging and ActorContext for Java.
*/
trait WithinActorContext extends HasLogging with HasActorContext
def withLogger[Cmd, Evt](log: LoggingAdapter,
stages: PipelineStage[_ >: WithinActorContext <: PipelineContext, Cmd, Tcp.Command, Evt, Tcp.Event]): Init[WithinActorContext, Cmd, Evt] =
new Init[WithinActorContext, Cmd, Evt](stages) {
override def makeContext(ctx: ActorContext): WithinActorContext = new WithinActorContext {
override def getLogger = log
override def getContext = ctx
}
}
/**
* Wrapper class for management commands sent to the [[TcpPipelineHandler]] actor.
*/
case class Management(@BeanProperty cmd: AnyRef)
/**
* This is a new Tcp.Command which the pipeline can emit to effect the
* sending a message to another actor. Using this instead of doing the send
* directly has the advantage that other pipeline stages can also see and
* possibly transform the send.
*/
case class Tell(receiver: ActorRef, msg: Any, sender: ActorRef) extends Tcp.Command
/**
* The pipeline may want to emit a [[Tcp.Event]] to the registered handler
* actor, which is enabled by emitting this [[Tcp.Command]] wrapping an event
* instead. The [[TcpPipelineHandler]] actor will upon reception of this command
* forward the wrapped event to the handler.
*/
case class TcpEvent(@BeanProperty evt: Tcp.Event) extends Tcp.Command
/**
* create [[akka.actor.Props]] for a pipeline handler
*/
def props[Ctx <: PipelineContext, Cmd, Evt](init: TcpPipelineHandler.Init[Ctx, Cmd, Evt], connection: ActorRef, handler: ActorRef) =
Props(classOf[TcpPipelineHandler[_, _, _]], init, connection, handler)
}
/**
* This actor wraps a pipeline and forwards commands and events between that
* one and a [[Tcp]] connection actor. In order to inject commands into the
* pipeline send an [[TcpPipelineHandler.Init.Command]] message to this actor; events will be sent
* to the designated handler wrapped in [[TcpPipelineHandler.Init.Event]] messages.
*
* When the designated handler terminates the TCP connection is aborted. When
* the connection actor terminates this actor terminates as well; the designated
* handler may want to watch this actor’s lifecycle.
*
* <b>IMPORTANT:</b>
*
* Proper function of this actor (and of other pipeline stages like [[TcpReadWriteAdapter]]
* depends on the fact that stages handling TCP commands and events pass unknown
* subtypes through unaltered. There are more commands and events than are declared
* within the [[Tcp]] object and you can even define your own.
*/
class TcpPipelineHandler[Ctx <: PipelineContext, Cmd, Evt](
init: TcpPipelineHandler.Init[Ctx, Cmd, Evt],
connection: ActorRef,
handler: ActorRef)
extends Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
import init._
import TcpPipelineHandler._
// sign death pact
context watch connection
// watch so we can Close
context watch handler
val ctx = init.makeContext(context)
val pipes = PipelineFactory.buildWithSinkFunctions(ctx, init.stages)({
case Success(cmd) ⇒
cmd match {
case Tell(receiver, msg, sender) ⇒ receiver.tell(msg, sender)
case TcpEvent(ev) ⇒ handler ! ev
case _ ⇒ connection ! cmd
}
case Failure(ex) ⇒ throw ex
}, {
case Success(evt) ⇒ handler ! Event(evt)
case Failure(ex) ⇒ throw ex
})
def receive = {
case Command(cmd) ⇒ pipes.injectCommand(cmd)
case evt: Tcp.Event ⇒ pipes.injectEvent(evt)
case Management(cmd) ⇒ pipes.managementCommand(cmd)
case Terminated(`handler`) ⇒ connection ! Tcp.Abort
case Terminated(`connection`) ⇒ context.stop(self)
}
}
/**
* Adapts a ByteString oriented pipeline stage to a stage that communicates via Tcp Commands and Events. Every ByteString
* passed down to this stage will be converted to Tcp.Write commands, while incoming Tcp.Receive events will be unwrapped
* and their contents passed up as raw ByteStrings. This adapter should be used together with TcpPipelineHandler.
*
* While this adapter communicates to the stage above it via raw ByteStrings, it is possible to inject Tcp Command
* by sending them to the management port, and the adapter will simply pass them down to the stage below. Incoming Tcp Events
* that are not Receive events will be passed downwards wrapped in a [[TcpPipelineHandler.TcpEvent]]; the [[TcpPipelineHandler]] will
* send these notifications to the registered event handler actor.
*/
class TcpReadWriteAdapter extends PipelineStage[PipelineContext, ByteString, Tcp.Command, ByteString, Tcp.Event] {
import TcpPipelineHandler.TcpEvent
override def apply(ctx: PipelineContext) = new PipePair[ByteString, Tcp.Command, ByteString, Tcp.Event] {
override val commandPipeline = {
data: ByteString ⇒ ctx.singleCommand(Tcp.Write(data))
}
override val eventPipeline = (evt: Tcp.Event) ⇒ evt match {
case Tcp.Received(data) ⇒ ctx.singleEvent(data)
case ev: Tcp.Event ⇒ ctx.singleCommand(TcpEvent(ev))
}
override val managementPort: Mgmt = {
case cmd: Tcp.Command ⇒ ctx.singleCommand(cmd)
}
}
} | vast-engineering/athena | src/main/scala/athena/connector/pipeline/TcpPipelineHandler.scala | Scala | apache-2.0 | 7,695 |
package championships
import eu.ace_design.island.arena.Run
import eu.ace_design.island.game.{Directions, Plane}
import eu.ace_design.island.map.IslandMap
import eu.ace_design.island.stdlib.Resources._
import library.Islands
import library.SI3
object Week08 extends Run with SI3 {
override val number: String = "08"
override val seed: Long = Islands.s08
override lazy val theIsland: IslandMap = Islands.week08
override val crew: Int = 15
override val budget: Int = 20000
override val plane: Plane = Plane(1, 159, Directions.NORTH)
override val objectives = Set((WOOD, 10000), (LEATHER, 300), (GLASS, 50))
override def players = all - "qac" - "qae" - "qcf"
}
| mosser/QGL-15-16 | arena/src/main/scala/championships/Week08.scala | Scala | lgpl-3.0 | 703 |
/*
* StructuredVE.scala
* A structured variable elimination algorithm.
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: March 1, 2015
*
* Copyright 2015 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.structured.algorithm.structured
import com.cra.figaro.language._
import com.cra.figaro.algorithm.structured._
import com.cra.figaro.algorithm.structured.strategy._
import com.cra.figaro.algorithm.structured.solver._
import com.cra.figaro.algorithm.structured.strategy.solve.ConstantStrategy
import com.cra.figaro.algorithm.structured.algorithm._
import com.cra.figaro.algorithm.structured.strategy.decompose._
import com.cra.figaro.algorithm.factored.factors.factory._
import com.cra.figaro.algorithm.factored.factors.MaxProductSemiring
class StructuredMPEVE(universe: Universe) extends StructuredMPEAlgorithm(universe) {
val semiring = MaxProductSemiring()
def run() {
val strategy = DecompositionStrategy.recursiveStructuredStrategy(problem, new ConstantStrategy(mpeVariableElimination), defaultRangeSizer, Lower, false)
strategy.execute(initialComponents)
}
}
object StructuredMPEVE {
/** Create a structured variable elimination algorithm with the given query targets. */
def apply()(implicit universe: Universe) = {
new StructuredMPEVE(universe)
}
/**
* Use VE to compute the probability that the given element satisfies the given predicate.
*/
def mostLikelyValue[T](target: Element[T]): T = {
val alg = new StructuredMPEVE(target.universe)
alg.start()
val result = alg.mostLikelyValue(target)
alg.kill()
result
}
}
| scottcb/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/structured/algorithm/structured/StructuredMPEVE.scala | Scala | bsd-3-clause | 1,819 |
package model.services
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.Future
class TokenServiceImpl extends TokenService[TokenUser] {
def create (token: TokenUser): Future[Option[TokenUser]] = {
TokenUser.save(token).map(Some(_))
}
def retrieve (id: String): Future[Option[TokenUser]] = {
TokenUser.findById(id)
}
def consume (id: String): Unit = {
TokenUser.delete(id)
}
}
| scify/DemocracIT-Web | app/model/services/TokenServiceImpl.scala | Scala | apache-2.0 | 432 |
package com.arcusys.valamis.updaters.version320
import com.arcusys.valamis.persistence.common.DatabaseLayer
import com.arcusys.valamis.updaters.common.BaseDBUpdater
import com.arcusys.valamis.updaters.version320.schema3209.TrainingEventGoalTableComponent
class DBUpdater3209 extends BaseDBUpdater
with TrainingEventGoalTableComponent
with DatabaseLayer {
override def getThreshold = 3209
import driver.api._
override def doUpgrade(): Unit = execSync {
trainingEventGoals.schema.create
}
} | arcusys/Valamis | valamis-updaters/src/main/scala/com/arcusys/valamis/updaters/version320/DBUpdater3209.scala | Scala | gpl-3.0 | 509 |
package mesosphere.marathon.integration.facades
import org.scalatest.{ Matchers, GivenWhenThen, FunSuite }
import play.api.libs.json.Json
class MesosFormatsTest extends FunSuite with Matchers with GivenWhenThen {
import MesosFacade._
import MesosFormats._
test("parse sample") {
Given("a sample state.json")
val f = new Fixture
When("parsing it")
val status = Json.parse(f.sampleStatusJson).as[ITMesosState]
Then("we can extract some base info")
status.version should equal("0.28.0")
status.gitTag should equal("0.28.0-rc1")
And("we have info about one agent")
status.agents should have size (1)
val agent = status.agents.head
And("resources of that agent are correct")
/*
* "resources": {
"cpus": 8.0,
"disk": 52830.0,
"mem": 5078.0,
"ports": "[31000-32000]"
},
*/
agent.resources should equal(
ITResources(
"cpus" -> 8.0,
"disk" -> 52830.0,
"mem" -> 5078.0,
"ports" -> "[31000-32000]"
)
)
/* "used_resources": {
"cpus": 1.0,
"disk": 128.0,
"mem": 128.0,
"ports": "[31903-31903]"
}, */
agent.usedResources should equal(
ITResources(
"cpus" -> 1.0,
"disk" -> 128.0,
"mem" -> 128.0,
"ports" -> "[31903-31903]"
)
)
/*
"offered_resources": {
"cpus": 0.0,
"disk": 0.0,
"mem": 0.0
},
*/
agent.offeredResources should equal(
ITResources(
"cpus" -> 0.0,
"disk" -> 0.0,
"mem" -> 0.0
)
)
/*
"reserved_resources": {
"marathon": {
"cpus": 1.1,
"disk": 138.0,
"mem": 144.0,
"ports": "[31903-31903]"
}
},
*/
agent.reservedResourcesByRole should equal(
Map(
"marathon" -> ITResources(
"cpus" -> 1.1,
"disk" -> 138.0,
"mem" -> 144.0,
"ports" -> "[31903-31903]"
)
)
)
/*
"unreserved_resources": {
"cpus": 6.9,
"disk": 52692.0,
"mem": 4934.0,
"ports": "[31000-31902, 31904-32000]"
},
*/
agent.unreservedResources should equal(
ITResources(
"cpus" -> 6.9,
"disk" -> 52692.0,
"mem" -> 4934.0,
"ports" -> "[31000-31902, 31904-32000]"
)
)
}
class Fixture {
val sampleStatusJson =
"""
|{
| "version": "0.28.0",
| "git_sha": "ab1ec6a0d9ed4ba7180f4576c1bb267e58f94e00",
| "git_tag": "0.28.0-rc1",
| "build_date": "2016-03-04 05:39:57",
| "build_time": 1457069997.0,
| "build_user": "root",
| "start_time": 1457617308.28327,
| "elected_time": 1457617308.29501,
| "id": "3b81e796-9d06-4556-99d4-a5ac09a229d3",
| "pid": "master@192.168.99.10:5050",
| "hostname": "master",
| "activated_slaves": 1.0,
| "deactivated_slaves": 0.0,
| "leader": "master@192.168.99.10:5050",
| "log_dir": "/var/log/mesos",
| "flags": {
| "acls": "register_frameworks { principals { type: ANY } roles { type: ANY }} run_tasks { principals { type: ANY } users { type: ANY }}",
| "allocation_interval": "1secs",
| "allocator": "HierarchicalDRF",
| "authenticate": "false",
| "authenticate_http": "false",
| "authenticate_slaves": "false",
| "authenticators": "crammd5",
| "authorizers": "local",
| "credentials": "/etc/mesos.cfg/credentials",
| "framework_sorter": "drf",
| "help": "false",
| "hostname_lookup": "true",
| "http_authenticators": "basic",
| "initialize_driver_logging": "true",
| "ip": "192.168.99.10",
| "log_auto_initialize": "true",
| "log_dir": "/var/log/mesos",
| "logbufsecs": "0",
| "logging_level": "INFO",
| "max_completed_frameworks": "50",
| "max_completed_tasks_per_framework": "1000",
| "max_slave_ping_timeouts": "5",
| "port": "5050",
| "quiet": "false",
| "quorum": "1",
| "recovery_slave_removal_limit": "100%",
| "registry": "replicated_log",
| "registry_fetch_timeout": "1mins",
| "registry_store_timeout": "20secs",
| "registry_strict": "false",
| "root_submissions": "true",
| "slave_ping_timeout": "15secs",
| "slave_reregister_timeout": "10mins",
| "user_sorter": "drf",
| "version": "false",
| "webui_dir": "/usr/share/mesos/webui",
| "work_dir": "/var/lib/mesos",
| "zk": "zk://localhost:2181/mesos",
| "zk_session_timeout": "10secs"
| },
| "slaves": [
| {
| "id": "32edd5ac-248e-437d-bc31-60ef6ece59ec-S0",
| "pid": "slave(1)@192.168.99.10:5051",
| "hostname": "master",
| "registered_time": 1457617309.27261,
| "reregistered_time": 1457617309.27294,
| "resources": {
| "cpus": 8.0,
| "disk": 52830.0,
| "mem": 5078.0,
| "ports": "[31000-32000]"
| },
| "used_resources": {
| "cpus": 1.0,
| "disk": 128.0,
| "mem": 128.0,
| "ports": "[31903-31903]"
| },
| "offered_resources": {
| "cpus": 0.0,
| "disk": 0.0,
| "mem": 0.0
| },
| "reserved_resources": {
| "marathon": {
| "cpus": 1.1,
| "disk": 138.0,
| "mem": 144.0,
| "ports": "[31903-31903]"
| }
| },
| "unreserved_resources": {
| "cpus": 6.9,
| "disk": 52692.0,
| "mem": 4934.0,
| "ports": "[31000-31902, 31904-32000]"
| },
| "attributes": {
|
| },
| "active": true,
| "version": "0.28.0"
| }
| ],
| "frameworks": [
|
| ],
| "completed_frameworks": [
|
| ],
| "orphan_tasks": [
| {
| "id": "test.753fc1ec-e5d7-11e5-a741-ac87a3211095",
| "name": "test",
| "framework_id": "70c87ff5-537d-4576-ba03-62e8130787ac-0001",
| "executor_id": "",
| "slave_id": "32edd5ac-248e-437d-bc31-60ef6ece59ec-S0",
| "state": "TASK_FINISHED",
| "resources": {
| "cpus": 1.0,
| "disk": 0.0,
| "mem": 128.0,
| "ports": "[31308-31308]"
| },
| "statuses": [
| {
| "state": "TASK_RUNNING",
| "timestamp": 1457514925.78141,
| "container_status": {
| "network_infos": [
| {
| "ip_address": "192.168.99.10",
| "ip_addresses": [
| {
| "ip_address": "192.168.99.10"
| }
| ]
| }
| ]
| }
| },
| {
| "state": "TASK_FINISHED",
| "timestamp": 1457515925.81577,
| "container_status": {
| "network_infos": [
| {
| "ip_address": "192.168.99.10",
| "ip_addresses": [
| {
| "ip_address": "192.168.99.10"
| }
| ]
| }
| ]
| }
| }
| ],
| "discovery": {
| "visibility": "FRAMEWORK",
| "name": "test",
| "ports": {
| "ports": [
| {
| "number": 10000,
| "protocol": "tcp"
| }
| ]
| }
| }
| },
| {
| "id": "test.b759a32a-e608-11e5-979a-da5f91e88a84",
| "name": "test",
| "framework_id": "ee85eedd-674c-40f9-ae8f-67158a923d65-0000",
| "executor_id": "",
| "slave_id": "32edd5ac-248e-437d-bc31-60ef6ece59ec-S0",
| "state": "TASK_RUNNING",
| "resources": {
| "cpus": 1.0,
| "disk": 128.0,
| "mem": 128.0,
| "ports": "[31903-31903]"
| },
| "statuses": [
| {
| "state": "TASK_RUNNING",
| "timestamp": 1457536474.32426,
| "container_status": {
| "network_infos": [
| {
| "ip_address": "192.168.99.10",
| "ip_addresses": [
| {
| "ip_address": "192.168.99.10"
| }
| ]
| }
| ]
| }
| }
| ],
| "discovery": {
| "visibility": "FRAMEWORK",
| "name": "test",
| "ports": {
| "ports": [
| {
| "number": 10000,
| "protocol": "tcp"
| }
| ]
| }
| },
| "container": {
| "type": "MESOS",
| "mesos": {
|
| }
| }
| },
| {
| "id": "resident1.02ccdd3e-e60a-11e5-979a-da5f91e88a84",
| "name": "resident1",
| "framework_id": "ee85eedd-674c-40f9-ae8f-67158a923d65-0000",
| "executor_id": "",
| "slave_id": "32edd5ac-248e-437d-bc31-60ef6ece59ec-S0",
| "state": "TASK_FINISHED",
| "resources": {
| "cpus": 0.1,
| "disk": 10.0,
| "mem": 16.0
| },
| "statuses": [
| {
| "state": "TASK_RUNNING",
| "timestamp": 1457536638.50787,
| "container_status": {
| "network_infos": [
| {
| "ip_address": "192.168.99.10",
| "ip_addresses": [
| {
| "ip_address": "192.168.99.10"
| }
| ]
| }
| ]
| }
| },
| {
| "state": "TASK_FINISHED",
| "timestamp": 1457537638.57408,
| "container_status": {
| "network_infos": [
| {
| "ip_address": "192.168.99.10",
| "ip_addresses": [
| {
| "ip_address": "192.168.99.10"
| }
| ]
| }
| ]
| }
| }
| ],
| "discovery": {
| "visibility": "FRAMEWORK",
| "name": "resident1",
| "ports": {
|
| }
| },
| "container": {
| "type": "MESOS",
| "mesos": {
|
| }
| }
| }
| ],
| "unregistered_frameworks": [
| "70c87ff5-537d-4576-ba03-62e8130787ac-0001",
| "ee85eedd-674c-40f9-ae8f-67158a923d65-0000"
| ]
|}
""".stripMargin
}
}
| pgkelley4/marathon | src/test/scala/mesosphere/marathon/integration/facades/MesosFormatsTest.scala | Scala | apache-2.0 | 12,360 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5.content
import org.scalatest.{FunSpec, Matchers}
import play.api.libs.json.{JsPath, JsValue, Json, JsonValidationError}
class CompleteReplySpec extends FunSpec with Matchers {
val completeReplyJson: JsValue = Json.parse("""
{
"matches": [],
"cursor_start": 1,
"cursor_end": 5,
"metadata": {},
"status": "<STRING>"
}
""")
val completeReply: CompleteReply = CompleteReply(
List(), 1, 5, Map(), "<STRING>", None, None, None
)
describe("CompleteReply") {
describe("#toTypeString") {
it("should return correct type") {
CompleteReply.toTypeString should be ("complete_reply")
}
}
describe("implicit conversions") {
it("should implicitly convert from valid json to a CompleteReply instance") {
// This is the least safe way to convert as an error is thrown if it fails
completeReplyJson.as[CompleteReply] should be (completeReply)
}
it("should also work with asOpt") {
// This is safer, but we lose the error information as it returns
// None if the conversion fails
val newCompleteReply = completeReplyJson.asOpt[CompleteReply]
newCompleteReply.get should be (completeReply)
}
it("should also work with validate") {
// This is the safest as it collects all error information (not just first error) and reports it
val CompleteReplyResults = completeReplyJson.validate[CompleteReply]
CompleteReplyResults.fold(
(invalid: Seq[(JsPath, Seq[JsonValidationError])]) => println("Failed!"),
(valid: CompleteReply) => valid
) should be (completeReply)
}
it("should implicitly convert from a CompleteReply instance to valid json") {
Json.toJson(completeReply) should be (completeReplyJson)
}
}
}
}
| apache/incubator-toree | protocol/src/test/scala/org/apache/toree/kernel/protocol/v5/content/CompleteReplySpec.scala | Scala | apache-2.0 | 2,685 |
package lila.analyse
import chess.format.pgn.Glyph
import lila.tree.Eval._
import scala.util.chaining._
sealed trait Advice {
def judgment: Advice.Judgement
def info: Info
def prev: Info
def ply = info.ply
def turn = info.turn
def color = info.color
def cp = info.cp
def mate = info.mate
def makeComment(withEval: Boolean, withBestMove: Boolean): String =
withEval.??(evalComment ?? { c =>
s"($c) "
}) +
(this match {
case MateAdvice(seq, _, _, _) => seq.desc
case CpAdvice(judgment, _, _) => judgment.toString
}) + "." + {
withBestMove ?? {
info.variation.headOption ?? { move =>
s" $move was best."
}
}
}
def evalComment: Option[String] = {
List(prev.evalComment, info.evalComment).flatten mkString " → "
}.some filter (_.nonEmpty)
}
object Advice {
sealed abstract class Judgement(val glyph: Glyph, val name: String) {
override def toString = name
def isBlunder = this == Judgement.Blunder
}
object Judgement {
object Inaccuracy extends Judgement(Glyph.MoveAssessment.dubious, "Inaccuracy")
object Mistake extends Judgement(Glyph.MoveAssessment.mistake, "Mistake")
object Blunder extends Judgement(Glyph.MoveAssessment.blunder, "Blunder")
val all = List(Inaccuracy, Mistake, Blunder)
}
def apply(prev: Info, info: Info): Option[Advice] = CpAdvice(prev, info) orElse MateAdvice(prev, info)
}
private[analyse] case class CpAdvice(
judgment: Advice.Judgement,
info: Info,
prev: Info
) extends Advice
private[analyse] object CpAdvice {
private def cpWinningChances(cp: Double): Double = 2 / (1 + Math.exp(-0.004 * cp)) - 1
private val winningChanceJudgements = List(
.3 -> Advice.Judgement.Blunder,
.2 -> Advice.Judgement.Mistake,
.1 -> Advice.Judgement.Inaccuracy
)
def apply(prev: Info, info: Info): Option[CpAdvice] =
for {
cp <- prev.cp map (_.ceiled.centipawns)
infoCp <- info.cp map (_.ceiled.centipawns)
prevWinningChances = cpWinningChances(cp)
currentWinningChances = cpWinningChances(infoCp)
delta = (currentWinningChances - prevWinningChances) pipe { d =>
info.color.fold(-d, d)
}
judgement <- winningChanceJudgements find { case (d, _) => d <= delta } map (_._2)
} yield CpAdvice(judgement, info, prev)
}
sealed abstract private[analyse] class MateSequence(val desc: String)
private[analyse] case object MateCreated
extends MateSequence(
desc = "Checkmate is now unavoidable"
)
private[analyse] case object MateDelayed
extends MateSequence(
desc = "Not the best checkmate sequence"
)
private[analyse] case object MateLost
extends MateSequence(
desc = "Lost forced checkmate sequence"
)
private[analyse] object MateSequence {
def apply(prev: Option[Mate], next: Option[Mate]): Option[MateSequence] =
(prev, next).some collect {
case (None, Some(n)) if n.negative => MateCreated
case (Some(p), None) if p.positive => MateLost
case (Some(p), Some(n)) if p.positive && n.negative => MateLost
}
}
private[analyse] case class MateAdvice(
sequence: MateSequence,
judgment: Advice.Judgement,
info: Info,
prev: Info
) extends Advice
private[analyse] object MateAdvice {
def apply(prev: Info, info: Info): Option[MateAdvice] = {
def invertCp(cp: Cp) = cp invertIf info.color.black
def invertMate(mate: Mate) = mate invertIf info.color.black
def prevCp = prev.cp.map(invertCp).??(_.centipawns)
def nextCp = info.cp.map(invertCp).??(_.centipawns)
MateSequence(prev.mate map invertMate, info.mate map invertMate) flatMap { sequence =>
import Advice.Judgement._
val judgment: Option[Advice.Judgement] = sequence match {
case MateCreated if prevCp < -999 => Option(Inaccuracy)
case MateCreated if prevCp < -700 => Option(Mistake)
case MateCreated => Option(Blunder)
case MateLost if nextCp > 999 => Option(Inaccuracy)
case MateLost if nextCp > 700 => Option(Mistake)
case MateLost => Option(Blunder)
case MateDelayed => None
}
judgment map { MateAdvice(sequence, _, info, prev) }
}
}
}
| luanlv/lila | modules/analyse/src/main/Advice.scala | Scala | mit | 4,402 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers
import cmwell.ctrl.utils.ProcUtil
import cmwell.ws.Settings
import cmwell.ws.util.DateParser.fdf
import com.typesafe.config.ConfigFactory
import javax.inject._
import logic.CRUDServiceFS
import org.joda.time._
import play.api.libs.ws.WSClient
import play.api.mvc._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.sys.process._
import scala.util._
/**
* Created by michael on 8/11/14.
*/
object HealthUtils {
val config = ConfigFactory.load()
val ip = config.getString("ftsService.transportAddress")
val path = config.getString("user.dir")
/**
* to avoid abuse, we guard the nodetool with this proxy.
* can't have more than 1 request per node in 3 minutes,
* but also, we always save an up to date status within
* the last 20 minutes.
*/
object CassandraNodetoolProxy {
private[this] val timeout = Settings.cacheTimeout seconds
@volatile private[this] var status: String = null
@volatile private[this] var modified: DateTime = new DateTime(0L)
private[this] val nodetoolDaemonCancellable = {
cmwell.util.concurrent.SimpleScheduler.scheduleAtFixedRate(30 seconds, 20 minutes){
getStatus
}
}
private[this] def getStatus: Future[String] = {
val f = Future(ProcUtil.executeCommand(s"JAVA_HOME=$path/../java/bin $path/../cas/cur/bin/nodetool -h $ip status").get)
val p = Promise[String]()
f.onComplete{
case Failure(e) => p.failure(e)
case Success(s) => {
modified = new DateTime()
status = s"${fdf(modified)}\n$s"
p.success(status)
}
}
p.future
}
def get: String = (new DateTime()).minus(modified.getMillis).getMillis match {
case ms if (ms milliseconds) < (3 minutes) => status
case _ => Try(Await.result(getStatus, timeout)).getOrElse(status)
}
}
def CassNodetoolStatus: String = CassandraNodetoolProxy.get
}
@Singleton
class Health @Inject()(crudServiceFS: CRUDServiceFS, ws: WSClient) extends InjectedController {
import HealthUtils._
def getCassandaraHealth = Action.async {implicit req =>
Future(Ok(CassNodetoolStatus))
}
def getElasticsearchHealth ={
esRequestHelper(s"http://$ip:9201/_cluster/health?pretty&level=shards")
}
def getElasticsearchTop = {
esRequestHelper(s"http://$ip:9201/_nodes/hot_threads")
}
def getElasticsearchStats = {
esRequestHelper(s"http://$ip:9201/_cluster/stats?human&pretty")
}
def getElasticsearchSegments = {
esRequestHelper(s"http://$ip:9201/_segments?pretty")
}
def getElasticsearchStatus = {
esRequestHelper(s"http://$ip:9201/_status?pretty")
}
def getElasticsearchThreadPool = {
esRequestHelper(s"http://$ip:9201/_cat/thread_pool?v")
}
def getKafkaStatus = Action.async {implicit req =>
val javaHomeLocation = s"$path/../java"
val javaHomeAddition = s"""if [ -d $javaHomeLocation ] ;
then export JAVA_HOME=$javaHomeLocation ;
fi ;"""
val res = Seq("bash", "-c", javaHomeAddition + s" $path/../kafka/cur/bin/kafka-topics.sh --zookeeper $ip:2181 --describe") !!
Future(Ok(res))
}
def getZkStat = Action.async {implicit req =>
val res = Seq("echo", "stats" ) #| Seq("nc", ip, "2181") !!
Future(Ok(res))
}
def getZkRuok = Action.async {implicit req =>
val res = Seq("echo", "ruok" ) #| Seq("nc", ip, "2181") !!
Future(Ok(res))
}
def getZkMntr = Action.async {implicit req =>
val res = Seq("echo", "mntr" ) #| Seq("nc", ip, "2181") !!
Future(Ok(res))
}
def getIndex = Action.async {implicit req =>
Future{
val xml =
"""
|<html>
| <head>
| <title>CM-Well Cluster Health</title>
| </head>
| <body>
| <a href="/health/cas">Cassandra Ring</a><br>
| <!-- a href="/health/cas_cfh">Cassandra cfhistograms</a><br -->
| <a href="/health/es">Elasticsearch Cluster Health</a><br>
| <a href="/health/es_top">Elasticsearch Top</a><br>
| <a href="/health/es_stats">Elasticsearch Stats</a><br>
| <a href="/health/es_seg">Elasticsearch Segments</a><br>
| <a href="/health/es_status">Elasticsearch Status</a><br>
| <a href="/health/es_thread_pool">Elasticsearch Thread Pool</a><br>
| <a href="/health/kafka">Kafka</a><br>
| <a href="/health/zk-stat">zk-stat</a><br>
| <a href="/health/zk-ruok">zk-ruok</a><br>
| <a href="/health/zk-mntr">zk-mntr</a><br>
| <a href="/health/ws">ws</a><br>
| </body>
|</html>
""".stripMargin
Ok(xml).as("text/html")
}
}
def getWsHealth = Action { implicit req =>
Ok(s"IRW ReadCache Size: ${crudServiceFS.irwService.dataCahce.size()}")
}
def esRequestHelper(url : String) = Action.async {
ws.url(url).withRequestTimeout(30.seconds).execute().map {
response => Ok(response.body)
}.recover {
case e: scala.concurrent.TimeoutException =>
ServiceUnavailable("Timeout reached during method execution. ")
case e: Exception =>
InternalServerError(e.getMessage)
}
}
}
| bryaakov/CM-Well | server/cmwell-ws/app/controllers/Health.scala | Scala | apache-2.0 | 5,897 |
/**
* Copyright (c) 2013 Saddle Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.saddle
import mat._
import scalar.{Scalar, ScalarTag}
import ops.{BinOpMat, NumericOps}
import scala.{specialized => spec}
import java.io.OutputStream
import org.saddle.index.{IndexIntRange, Slice}
/**
* `Mat` is an immutable container for 2D homogeneous data (a "matrix"). It is
* backed by a single array. Data is stored in row-major order.
*
* Several element access methods are provided.
*
* The `at` method returns an instance of a [[org.saddle.scalar.Scalar]], which behaves
* much like an `Option` in that it can be either an instance of [[org.saddle.scalar.NA]]
* or a [[org.saddle.scalar.Value]] case class:
*
* {{{
* val m = Mat(2,2,Array(1,2,3,4))
* m.at(0,0) == Value(1)
* }}}
*
* The method `raw` accesses the underlying value directly.
*
* {{{
* val m = Mat(2,2,Array(1,2,3,4))
* m.raw(0,0) == 1d
* }}}
*
* `Mat` may be used in arithemetic expressions which operate on two `Mat`s or on a
* `Mat` and a primitive value. A fe examples:
*
* {{{
* val m = Mat(2,2,Array(1,2,3,4))
* m * m == Mat(2,2,Array(1,4,9,16))
* m dot m == Mat(2,2,Array(7d,10,15,22))
* m * 3 == Mat(2, 2, Array(3,6,9,12))
* }}}
*
* Note, Mat is generally compatible with EJML's DenseMatrix. It may be convenient
* to induce this conversion to do more complex linear algebra, or to work with a
* mutable data structure.
*
* @tparam A Type of elements within the Mat
*/
trait Mat[@spec(Boolean, Int, Long, Double) A] extends NumericOps[Mat[A]] with Serializable{
def scalarTag: ScalarTag[A]
/**
* Returns number of rows in the matrix shape
*
*/
def numRows: Int
/**
* Returns number of columns in the matrix shape
*
*/
def numCols: Int
/**
* Returns total number of entries in the matrix
*
*/
def length: Int = numRows * numCols
/**
* Returns true if rows == cols
*
*/
def isSquare: Boolean = numCols == numRows
/**
* Returns true if the matrix is empty
*
*/
def isEmpty: Boolean = length == 0
/**
* Return unboxed value of matrix at an offset from zero in row-major order
*
* @param i index
*/
def raw(i: Int): A = apply(i)
/**
* Return unboxed value of matrix at row/column
*
* @param r row index
* @param c col index
*/
def raw(r: Int, c: Int): A = apply(r, c)
/**
* Return scalar value of matrix at offset from zero in row-major order
*
* @param i index
*/
def at(i: Int)(implicit st: ScalarTag[A]): Scalar[A] = {
Scalar(raw(i))
}
/**
* Return scalar value of Mat at at row/column
* @param r row index
* @param c col index
*/
def at(r: Int, c: Int)(implicit st: ScalarTag[A]): Scalar[A] = {
Scalar(raw(r, c))
}
/**
* Access a slice of the Mat by integer offsets
* @param r Array of row offsets
* @param c Array of col offsets
*/
def at(r: Array[Int], c: Array[Int])(implicit st: ScalarTag[A]): Mat[A] = {
row(r).col(c)
}
/**
* Access a slice of the Mat by integer offsets
* @param r Array of row offsets
* @param c Integer col offset
*/
def at(r: Array[Int], c: Int)(implicit st: ScalarTag[A]): Vec[A] = {
row(r).col(c)
}
/**
* Access a slice of the Mat by integer offsets
* @param r Integer row offset
* @param c Array of col offsets
*/
def at(r: Int, c: Array[Int])(implicit st: ScalarTag[A]): Vec[A] = {
col(c).row(r)
}
/**
* Access a slice of the Mat by Slice parameters
* @param r Slice to apply to rows
* @param c Slice to apply to cols
*/
def at(r: Slice[Int], c: Slice[Int])(implicit st: ScalarTag[A]): Mat[A] =
row(r).col(c)
/**
* Returns (a copy of) the contents of matrix as a single array in
* row-major order
*
*/
def contents: Array[A] = toVec.toArray
// Must implement specialized methods using non-specialized subclasses as workaround to
// https://issues.scala-lang.org/browse/SI-5281
/**
* Maps a function over each element in the matrix
*/
def map[@spec(Boolean, Int, Long, Double) B: ST](f: A => B): Mat[B]
/**
* Changes the shape of matrix without changing the underlying data
*/
def reshape(r: Int, c: Int): Mat[A]
/**
* Transpose of original matrix
*/
def transpose: Mat[A]
/**
* Transpose of original matrix
*/
def T = transpose
/**
* Create Mat comprised of same values in specified rows
*/
def takeRows(locs: Array[Int]): Mat[A]
/**
* Create Mat comprised of same values in specified rows
*/
def takeRows(locs: Int*): Mat[A] = takeRows(locs.toArray)
/**
* Create Mat comprised of same values in specified columns
*/
def takeCols(locs: Array[Int]): Mat[A] = T.takeRows(locs).T
/**
* Create Mat comprised of same values in specified columns
*/
def takeCols(locs: Int*): Mat[A] = takeCols(locs.toArray)
/**
* Create Mat comprised of same values without the specified rows
*
* @param locs Row locations to exclude
*/
def withoutRows(locs: Array[Int]): Mat[A]
/**
* Create Mat comprised of same values without the specified rows
*
* @param locs Row locations to exclude
*/
def withoutRows(locs: Int*): Mat[A] = withoutRows(locs.toArray)
/**
* Create Mat comprised of same values without the specified columns
*
* @param locs Col locations to exclude
*/
def withoutCols(locs: Array[Int]): Mat[A] = T.withoutRows(locs).T
/**
* Create Mat comprised of same values without the specified columns
*
* @param locs Col locations to exclude
*/
def withoutCols(locs: Int*): Mat[A] = withoutCols(locs.toArray)
/**
* Yields row indices where row has some NA value
*/
def rowsWithNA(implicit ev: ST[A]): Set[Int] = {
val builder = Set.newBuilder[Int]
var i = 0
while (i < numRows) {
if (row(i).hasNA) builder += i
i += 1
}
builder.result()
}
/**
* Yields column indices where column has some NA value
*/
def colsWithNA(implicit ev: ST[A]): Set[Int] = T.rowsWithNA
/**
* Yields a matrix without those rows that have NA
*/
def dropRowsWithNA(implicit ev: ST[A]): Mat[A] = withoutRows(rowsWithNA.toArray)
/**
* Yields a matrix without those cols that have NA
*/
def dropColsWithNA(implicit ev: ST[A]): Mat[A] = withoutCols(colsWithNA.toArray)
/**
* Returns a specific column of the Mat as a Vec
*
* @param c Column index
*/
def col(c: Int)(implicit ev: ST[A]): Vec[A] = {
assert(c >= 0 && c < numCols, "Array index %d out of bounds" format c)
flattenT.slice(c * numRows, (c + 1) * numRows)
}
/**
* Access Mat columns at a particular integer offsets
* @param locs a sequence of integer offsets
*/
def col(locs: Int*)(implicit ev: ST[A]): Mat[A] = takeCols(locs.toArray)
/**
* Access Mat columns at a particular integer offsets
* @param locs an array of integer offsets
*/
def col(locs: Array[Int])(implicit ev: ST[A]): Mat[A] = takeCols(locs)
/**
* Access mat columns specified by a slice
* @param slice a slice specifier
*/
def col(slice: Slice[Int]): Mat[A] = {
val (a, b) = slice(IndexIntRange(numCols))
takeCols(a until b toArray)
}
/**
* Returns columns of Mat as an indexed sequence of Vec instances
*/
def cols()(implicit ev: ST[A]): IndexedSeq[Vec[A]] = Range(0, numCols).map(col _)
/**
* Returns columns of Mat as an indexed sequence of Vec instances
*/
def cols(seq: IndexedSeq[Int])(implicit ev: ST[A]): IndexedSeq[Vec[A]] = seq.map(col _)
/**
* Returns a specific row of the Mat as a Vec
*
* @param r Row index
*/
def row(r: Int)(implicit ev: ST[A]): Vec[A] = {
assert(r >= 0 && r < numRows, "Array index %d out of bounds" format r)
flatten.slice(r * numCols, (r + 1) * numCols)
}
/**
* Access Mat rows at a particular integer offsets
* @param locs a sequence of integer offsets
*/
def row(locs: Int*)(implicit ev: ST[A]): Mat[A] = takeRows(locs.toArray)
/**
* Access Mat rows at a particular integer offsets
* @param locs an array of integer offsets
*/
def row(locs: Array[Int])(implicit ev: ST[A]): Mat[A] = takeRows(locs)
/**
* Access Mat rows specified by a slice
* @param slice a slice specifier
*/
def row(slice: Slice[Int]): Mat[A] = {
val (a, b) = slice(IndexIntRange(numCols))
takeRows(a until b toArray)
}
/**
* Returns rows of matrix as an indexed sequence of Vec instances
*/
def rows()(implicit ev: ST[A]): IndexedSeq[Vec[A]] = Range(0, numRows).map(row _)
/**
* Returns rows of matrix as an indexed sequence of Vec instances
*/
def rows(seq: IndexedSeq[Int])(implicit ev: ST[A]): IndexedSeq[Vec[A]] = seq.map(row _)
/**
* Multiplies this matrix against another
*
*/
def mult[B](m: Mat[B])(implicit evA: NUM[A], evB: NUM[B]): Mat[Double] = {
if (numCols != m.numRows) {
val errMsg = "Cannot multiply (%d %d) x (%d %d)".format(numRows, numCols, m.numRows, m.numCols)
throw new IllegalArgumentException(errMsg)
}
MatMath.mult(this, m)
}
/**
* Rounds elements in the matrix (which must be numeric) to
* a significance level
*
* @param sig Significance level to round to (e.g., 2 decimal places)
*/
def roundTo(sig: Int = 2)(implicit ev: NUM[A]): Mat[Double] = {
val pwr = math.pow(10, sig)
val rounder = (x: A) => math.round(scalarTag.toDouble(x) * pwr) / pwr
map(rounder)
}
/**
* Concatenate all rows into a single row-wise Vec instance
*/
def toVec: Vec[A]
private var flatCache: Option[Vec[A]] = None
private def flatten(implicit st: ST[A]): Vec[A] = flatCache.getOrElse {
this.synchronized {
flatCache = Some(toVec)
flatCache.get
}
}
private var flatCacheT: Option[Vec[A]] = None
private def flattenT(implicit st: ST[A]): Vec[A] = flatCacheT.getOrElse {
this.synchronized {
flatCacheT = Some(T.toVec)
flatCacheT.get
}
}
// access like vector in row-major order
private[saddle] def apply(i: Int): A
// implement access like matrix(i, j)
private[saddle] def apply(r: Int, c: Int): A
// use with caution, may not return copy
private[saddle] def toArray: Array[A]
// use with caution, may not return copy
private[saddle] def toDoubleArray(implicit ev: NUM[A]): Array[Double]
/**
* Creates a string representation of Mat
* @param nrows Max number of rows to include
* @param ncols Max number of cols to include
*/
def stringify(nrows: Int = 8, ncols: Int = 8): String = {
val halfr = nrows / 2
val halfc = ncols / 2
val buf = new StringBuilder()
buf.append("[%d x %d]\\n".format(numRows, numCols))
implicit val st = scalarTag
val maxStrLen = (a: Int, b: String) => a.max(b.length)
val maxColLen = (c: Vec[A]) => (c.head(halfr) concat c.tail(halfr)).map(scalarTag.show(_)).foldLeft(0)(maxStrLen)
val colIdx = util.grab(Range(0, numCols), halfc)
val lenSeq = colIdx.map { c => c -> maxColLen(col(c)) }
val lenMap = lenSeq.toMap.withDefault(_ => 1)
// function to build a row
def createRow(r: Int) = {
val buf = new StringBuilder()
val strFn = (col: Int) => {
val l = lenMap(col)
"%" + { if (l > 0) l else 1 } + "s " format scalarTag.show(apply(r, col))
}
buf.append(util.buildStr(ncols, numCols, strFn))
buf.append("\\n")
buf.toString()
}
// build all rows
buf.append(util.buildStr(nrows, numRows, createRow, "...\\n"))
buf.toString()
}
override def toString = stringify()
/**
* Pretty-printer for Mat, which simply outputs the result of stringify.
* @param nrows Number of elements to display
*/
def print(nrows: Int = 8, ncols: Int = 8, stream: OutputStream = System.out) {
stream.write(stringify(nrows, ncols).getBytes)
}
/** Default hashcode is simple rolling prime multiplication of sums of hashcodes for all values. */
override def hashCode(): Int = toVec.foldLeft(1)(_ * 31 + _.hashCode())
/**
* Row-by-row equality check of all values.
* NB: to avoid boxing, overwrite in child classes
*/
override def equals(o: Any): Boolean = o match {
case rv: Mat[_] => (this eq rv) || this.numRows == rv.numRows && this.numCols == rv.numCols && {
var i = 0
var eq = true
while(eq && i < length) {
eq &&= (apply(i) == rv(i) || this.scalarTag.isMissing(apply(i)) && rv.scalarTag.isMissing(rv(i)))
i += 1
}
eq
}
case _ => false
}
}
object Mat extends BinOpMat {
/**
* Factory method to create a new Mat from raw materials
* @param rows Number of rows in Mat
* @param cols Number of cols in Mat
* @param arr A 1D array of backing data in row-major order
* @tparam T Type of data in array
*/
def apply[T](rows: Int, cols: Int, arr: Array[T])(implicit st: ST[T]): Mat[T] = {
val (r, c, a) = if (rows == 0 || cols == 0) (0, 0, Array.empty[T]) else (rows, cols, arr)
st.makeMat(r, c, a)
}
/**
* Allows implicit promoting from a Mat to a Frame instance
* @param m Mat instance
* @tparam T The type of elements in Mat
*/
implicit def matToFrame[T: ST](m: Mat[T]) = Frame(m)
/**
* Factory method to create an empty Mat
* @tparam T Type of Mat
*/
def empty[T: ST]: Mat[T] = apply(0, 0, Array.empty[T])
/**
* Factory method to create an zero Mat (all zeros)
* @param numRows Number of rows in Mat
* @param numCols Number of cols in Mat
* @tparam T Type of elements in Mat
*/
def apply[T: ST](numRows: Int, numCols: Int): Mat[T] =
apply(numRows, numCols, Array.ofDim[T](numRows * numCols))
/**
* Factory method to create a Mat from an array of arrays. Each inner array
* will become a column of the new Mat instance.
* @param values Array of arrays, each of which is to be a column
* @tparam T Type of elements in inner array
*/
def apply[T: ST](values: Array[Array[T]]): Mat[T] = implicitly[ST[T]].makeMat(values.map(Vec(_)))
/**
* Factory method to create a Mat from an array of Vec. Each inner Vec
* will become a column of the new Mat instance.
* @param values Array of Vec, each of which is to be a column
* @tparam T Type of elements in Vec
*/
def apply[T: ST](values: Array[Vec[T]]): Mat[T] = implicitly[ST[T]].makeMat(values)
/**
* Factory method to create a Mat from a sequence of Vec. Each inner Vec
* will become a column of the new Mat instance.
* @param values Sequence of Vec, each of which is to be a column
* @tparam T Type of elements in array
*/
def apply[T: ST](values: Vec[T]*): Mat[T] = implicitly[ST[T]].makeMat(values.toArray)
/**
* Factory method to create an identity matrix; ie with ones along the
* diagonal and zeros off-diagonal.
* @param n The width of the square matrix
*/
def ident(n: Int): Mat[Double] = mat.ident(n)
}
| jyt109/saddle | saddle-core/src/main/scala/org/saddle/Mat.scala | Scala | apache-2.0 | 15,492 |
package suggestions
package gui
import scala.concurrent.duration._
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.swing._
import scala.util.{ Try, Success, Failure }
import scala.swing.event._
import swing.Swing._
import javax.swing.UIManager
import Orientation._
import rx.subscriptions.CompositeSubscription
import rx.lang.scala.Observable
import rx.lang.scala.Subscription
import observablex._
import search._
object WikipediaSuggest extends SimpleSwingApplication with ConcreteSwingApi with ConcreteWikipediaApi {
{
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName())
} catch {
case t: Throwable =>
}
}
def top = new MainFrame {
/* gui setup */
title = "Query Wikipedia"
minimumSize = new Dimension(900, 600)
val button = new Button("Get") {
icon = new javax.swing.ImageIcon(javax.imageio.ImageIO.read(this.getClass.getResourceAsStream("/suggestions/wiki-icon.png")))
}
val searchTermField = new TextField
val suggestionList = new ListView(ListBuffer[String]())
val status = new Label(" ")
val editorpane = new EditorPane {
import javax.swing.border._
border = new EtchedBorder(EtchedBorder.LOWERED)
editable = false
peer.setContentType("text/html")
}
contents = new BoxPanel(orientation = Vertical) {
border = EmptyBorder(top = 5, left = 5, bottom = 5, right = 5)
contents += new BoxPanel(orientation = Horizontal) {
contents += new BoxPanel(orientation = Vertical) {
maximumSize = new Dimension(240, 900)
border = EmptyBorder(top = 10, left = 10, bottom = 10, right = 10)
contents += new BoxPanel(orientation = Horizontal) {
maximumSize = new Dimension(640, 30)
border = EmptyBorder(top = 5, left = 0, bottom = 5, right = 0)
contents += searchTermField
}
contents += new ScrollPane(suggestionList)
contents += new BorderPanel {
maximumSize = new Dimension(640, 30)
add(button, BorderPanel.Position.Center)
}
}
contents += new ScrollPane(editorpane)
}
contents += status
}
val eventScheduler = SchedulerEx.SwingEventThreadScheduler
/**
* Observables
* You may find the following methods useful when manipulating GUI elements:
* `myListView.listData = aList` : sets the content of `myListView` to `aList`
* `myTextField.text = "react"` : sets the content of `myTextField` to "react"
* `myListView.selection.items` returns a list of selected items from `myListView`
* `myEditorPane.text = "act"` : sets the content of `myEditorPane` to "act"
*/
// TO IMPLEMENT
val searchTerms: Observable[String] = Observable.create { obr =>
searchTermField.subscribe {
case e => {
obr.onNext(searchTermField.text)
}
}
Subscription{
searchTermField.unsubscribe{
case e =>
}
}
}
// TO IMPLEMENT
val suggestions: Observable[Try[List[String]]] = searchTerms.throttleLast(Duration(1,SECONDS)).distinctUntilChanged.sanitized.flatMap(s => {
Observable{ subscriber =>
wikipediaSuggestion(s).onComplete{ t =>
subscriber.onNext(t)
}
}
})
// TO IMPLEMENT
val suggestionSubscription: Subscription = suggestions.observeOn(eventScheduler) subscribe {
x => x match {
case Success(v) => suggestionList.listData = v
case Failure(e) => status.text = e.toString
}
}
// TO IMPLEMENT
val selections: Observable[String] = Observable.create { obr =>
button.subscribe {
case e => {
suggestionList.selection.items.foreach{i => obr.onNext(i)}
}
}
Subscription{
button.unsubscribe{
case e =>
}
}
}
// TO IMPLEMENT
val pages: Observable[Try[String]] = selections.flatMap(s => {
Observable.create{ obr =>
wikipediaPage(s).onComplete{ t =>
obr.onNext(t)
}
Subscription{
obr.onCompleted()
}
}
})
// TO IMPLEMENT
val pageSubscription: Subscription = pages.observeOn(eventScheduler) subscribe {
x => x match {
case Success(v) => editorpane.text = v
case Failure(e) => status.text = e.toString
}
}
}
}
trait ConcreteWikipediaApi extends WikipediaApi {
def wikipediaSuggestion(term: String) = Search.wikipediaSuggestion(term)
def wikipediaPage(term: String) = Search.wikipediaPage(term)
}
trait ConcreteSwingApi extends SwingApi {
type ValueChanged = scala.swing.event.ValueChanged
object ValueChanged {
def unapply(x: Event) = x match {
case vc: ValueChanged => Some(vc.source.asInstanceOf[TextField])
case _ => None
}
}
type ButtonClicked = scala.swing.event.ButtonClicked
object ButtonClicked {
def unapply(x: Event) = x match {
case bc: ButtonClicked => Some(bc.source.asInstanceOf[Button])
case _ => None
}
}
type TextField = scala.swing.TextField
type Button = scala.swing.Button
}
| foomorrow/coursera-reactive | suggestions/src/main/scala/suggestions/gui/WikipediaSuggest.scala | Scala | gpl-2.0 | 5,341 |
package scalarules.test.junit.specs2
import org.specs2.mutable.SpecWithJUnit
class SuiteWithOneFailingTest extends SpecWithJUnit {
"specs2 tests" should {
"succeed" >> success
"fail" >> failure("boom")
}
"some other suite" should {
"do stuff" >> success
}
}
| smparkes/rules_scala | test_expect_failure/scala_junit_test/specs2/SuiteWithOneFailingTest.scala | Scala | apache-2.0 | 282 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.submit.submitsteps.initcontainer
import org.apache.spark.deploy.k8s.{PodWithDetachedInitContainer, SparkPodInitContainerBootstrap}
import org.apache.spark.deploy.k8s.config._
import org.apache.spark.deploy.k8s.submit.KubernetesFileUtils
private[spark] class BaseInitContainerConfigurationStep(
sparkJars: Seq[String],
sparkFiles: Seq[String],
jarsDownloadPath: String,
filesDownloadPath: String,
configMapName: String,
configMapKey: String,
podAndInitContainerBootstrap: SparkPodInitContainerBootstrap)
extends InitContainerConfigurationStep {
override def configureInitContainer(initContainerSpec: InitContainerSpec): InitContainerSpec = {
val remoteJarsToDownload = KubernetesFileUtils.getOnlyRemoteFiles(sparkJars)
val remoteFilesToDownload = KubernetesFileUtils.getOnlyRemoteFiles(sparkFiles)
val remoteJarsConf = if (remoteJarsToDownload.nonEmpty) {
Map(INIT_CONTAINER_REMOTE_JARS.key -> remoteJarsToDownload.mkString(","))
} else {
Map.empty[String, String]
}
val remoteFilesConf = if (remoteFilesToDownload.nonEmpty) {
Map(INIT_CONTAINER_REMOTE_FILES.key -> remoteFilesToDownload.mkString(","))
} else {
Map.empty[String, String]
}
val baseInitContainerConfig = Map[String, String](
INIT_CONTAINER_JARS_DOWNLOAD_LOCATION.key -> jarsDownloadPath,
INIT_CONTAINER_FILES_DOWNLOAD_LOCATION.key -> filesDownloadPath) ++
remoteJarsConf ++
remoteFilesConf
val bootstrappedPodAndInitContainer =
podAndInitContainerBootstrap.bootstrapInitContainerAndVolumes(
PodWithDetachedInitContainer(
initContainerSpec.podToInitialize,
initContainerSpec.initContainer,
initContainerSpec.driverContainer))
initContainerSpec.copy(
initContainer = bootstrappedPodAndInitContainer.initContainer,
driverContainer = bootstrappedPodAndInitContainer.mainContainer,
podToInitialize = bootstrappedPodAndInitContainer.pod,
initContainerProperties = baseInitContainerConfig)
}
}
| publicRoman/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/submitsteps/initcontainer/BaseInitContainerConfigurationStep.scala | Scala | apache-2.0 | 2,917 |
package fpinscala.datastructures
import org.scalatest.FlatSpec
import org.scalatest.Matchers
class TreeSpec extends FlatSpec with Matchers {
import Tree._
val leaf1 = Leaf(1);
val leaf2 = Leaf(2);
val leaf3 = Leaf(3);
val leaf4 = Leaf(4);
val branch1 = Branch(leaf1, leaf2);
val branch2 = Branch(branch1, leaf3);
val branch3 = Branch(leaf4, branch2);
//3.25
"size" should "return 1 given a leaf" in {
Tree.size(leaf1) shouldBe 1
}
it should "return 3 for branch1" in {
Tree.size(branch1) shouldBe 3
}
it should "return 5 for branch2" in {
Tree.size(branch2) shouldBe 5
}
it should "return 7 for branch3" in {
Tree.size(branch3) shouldBe 7
}
//3.26
"maximum" should "return the leaf value when given a leaf" in {
maximum(leaf1) shouldBe leaf1.value
}
it should "return leaf2 for branch1" in {
maximum(branch1) shouldBe leaf2.value
}
it should "return leaf4 for branch3" in {
maximum(branch3) shouldBe leaf4.value
}
//3.27
"depth" should "return 0 for a leaf" in {
depth(leaf1) shouldBe 0
}
it should "return 1 for a branch with two leaves" in {
depth(branch1) shouldBe 1
}
it should "return 3 for branch 3" in {
depth(branch3) shouldBe 3
}
//3.28
"map" should "add 1 to a leaf" in {
map(leaf1)(_ + 1) shouldBe Leaf(2)
}
it should "add 1 to a branch" in {
map(branch1)(_ + 1) shouldBe Branch(Leaf(2),Leaf(3))
}
//3.29
"fold" should "have the same behaviour as size" in {
sizeUsingFold(leaf1) shouldBe Tree.size(leaf1)
sizeUsingFold(branch1) shouldBe Tree.size(branch1)
sizeUsingFold(branch2) shouldBe Tree.size(branch2)
sizeUsingFold(branch3) shouldBe Tree.size(branch3)
}
it should "have the same behaviour as maximum" in {
maximumUsingFold(leaf1) shouldBe maximum(leaf1)
maximumUsingFold(branch1) shouldBe maximum(branch1)
maximumUsingFold(branch2) shouldBe maximum(branch2)
}
it should "have the same behaviour as depth" in {
depthUsingFold(leaf1) shouldBe depth(leaf1)
depthUsingFold(branch1) shouldBe depth(branch1)
depthUsingFold(branch2) shouldBe depth(branch2)
}
it should "have the same behaviour as map" in {
mapUsingFold(leaf1)(_.toString) shouldBe map(leaf1)(_.toString)
mapUsingFold(branch1)(_.toString) shouldBe map(branch1)(_.toString)
}
}
| onewheelonly/fpinscala | exercises/src/test/scala/fpinscala/datastructures/TreeSpec.scala | Scala | mit | 2,360 |
import io.hydrosphere.mist.api.{MistJob, SQLSupport}
object SimpleSQLContext extends MistJob with SQLSupport {
/** Contains implementation of spark job with ordinary [[org.apache.spark.sql.SQLContext]]
* Abstract method must be overridden
*
* @param file json file path
* @return result of the job
*/
def execute(file: String): Map[String, Any] = {
val df = sqlContext.read.json(file)
df.registerTempTable("people")
Map("result" -> sqlContext.sql("SELECT AVG(age) AS avg_age FROM people").collect())
}
}
| KineticCookie/mist | examples-spark1/src/main/scala/SimpleSQLContext.scala | Scala | apache-2.0 | 543 |
object Test {
def main(args: Array[String]): Unit = {
println(Index.succ["bar", "foo", ("bar", ("baz", Unit))])
}
}
| lampepfl/dotty | tests/run-macros/quote-impure-by-name/quoted_2.scala | Scala | apache-2.0 | 125 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp.annotators.classifier.dl
import com.johnsnowlabs.ml.tensorflow.{ClassifierDatasetEncoder, ClassifierDatasetEncoderParams, ReadTensorflowModel, TensorflowSentiment, TensorflowWrapper, WriteTensorflowModel}
import com.johnsnowlabs.nlp.{Annotation, AnnotatorModel, AnnotatorType, HasPretrained, ParamsAndFeaturesReadable, ParamsAndFeaturesWritable, HasSimpleAnnotate}
import com.johnsnowlabs.nlp.AnnotatorType.{CATEGORY, SENTENCE_EMBEDDINGS}
import com.johnsnowlabs.nlp.annotators.ner.Verbose
import com.johnsnowlabs.nlp.pretrained.ResourceDownloader
import com.johnsnowlabs.nlp.serialization.StructFeature
import com.johnsnowlabs.storage.HasStorageRef
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.param.{FloatParam, IntArrayParam, Param, StringArrayParam}
import org.apache.spark.ml.util.Identifiable
import org.apache.spark.sql.{Dataset, SparkSession}
/** SentimentDL, an annotator for multi-class sentiment analysis.
*
* In natural language processing, sentiment analysis is the task of classifying the affective state or subjective view
* of a text. A common example is if either a product review or tweet can be interpreted positively or negatively.
*
* This is the instantiated model of the [[SentimentDLApproach]].
* For training your own model, please see the documentation of that class.
*
* Pretrained models can be loaded with `pretrained` of the companion object:
* {{{
* val sentiment = SentimentDLModel.pretrained()
* .setInputCols("sentence_embeddings")
* .setOutputCol("sentiment")
* }}}
* The default model is `"sentimentdl_use_imdb"`, if no name is provided. It is english sentiment analysis trained on
* the IMDB dataset.
* For available pretrained models please see the [[https://nlp.johnsnowlabs.com/models?task=Sentiment+Analysis Models Hub]].
*
* For extended examples of usage, see the [[https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/5.Text_Classification_with_ClassifierDL.ipynb Spark NLP Workshop]]
* and the [[https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/classifier/dl/SentimentDLTestSpec.scala SentimentDLTestSpec]].
*
* ==Example==
* {{{
* import spark.implicits._
* import com.johnsnowlabs.nlp.base.DocumentAssembler
* import com.johnsnowlabs.nlp.annotator.UniversalSentenceEncoder
* import com.johnsnowlabs.nlp.annotators.classifier.dl.SentimentDLModel
* import org.apache.spark.ml.Pipeline
*
* val documentAssembler = new DocumentAssembler()
* .setInputCol("text")
* .setOutputCol("document")
*
* val useEmbeddings = UniversalSentenceEncoder.pretrained()
* .setInputCols("document")
* .setOutputCol("sentence_embeddings")
*
* val sentiment = SentimentDLModel.pretrained("sentimentdl_use_twitter")
* .setInputCols("sentence_embeddings")
* .setThreshold(0.7F)
* .setOutputCol("sentiment")
*
* val pipeline = new Pipeline().setStages(Array(
* documentAssembler,
* useEmbeddings,
* sentiment
* ))
*
* val data = Seq(
* "Wow, the new video is awesome!",
* "bruh what a damn waste of time"
* ).toDF("text")
* val result = pipeline.fit(data).transform(data)
*
* result.select("text", "sentiment.result").show(false)
* +------------------------------+----------+
* |text |result |
* +------------------------------+----------+
* |Wow, the new video is awesome!|[positive]|
* |bruh what a damn waste of time|[negative]|
* +------------------------------+----------+
* }}}
*
* @see [[ClassifierDLModel]] for general single-class classification
* @see [[MultiClassifierDLModel]] for general multi-class classification
* @param uid required uid for storing annotator to disk
* @groupname anno Annotator types
* @groupdesc anno Required input and expected output annotator types
* @groupname Ungrouped Members
* @groupname param Parameters
* @groupname setParam Parameter setters
* @groupname getParam Parameter getters
* @groupname Ungrouped Members
* @groupprio param 1
* @groupprio anno 2
* @groupprio Ungrouped 3
* @groupprio setParam 4
* @groupprio getParam 5
* @groupdesc param A list of (hyper-)parameter keys this annotator can take. Users can set and get the parameter values through setters and getters, respectively.
*/
class SentimentDLModel(override val uid: String)
extends AnnotatorModel[SentimentDLModel] with HasSimpleAnnotate[SentimentDLModel]
with WriteTensorflowModel
with HasStorageRef
with ParamsAndFeaturesWritable {
def this() = this(Identifiable.randomUID("SentimentDLModel"))
/** Input Annotator Types: SENTENCE_EMBEDDINGS
*
* @group anno
*/
override val inputAnnotatorTypes: Array[AnnotatorType] = Array(SENTENCE_EMBEDDINGS)
/** Output Annotator Types: CATEGORY
*
* @group anno
*/
override val outputAnnotatorType: String = CATEGORY
/** The minimum threshold for the final result otherwise it will be either neutral or the value set in thresholdLabel (Default: `0.6f`)
*
* @group param
*/
val threshold = new FloatParam(this, "threshold", "The minimum threshold for the final result otherwise it will be either neutral or the value set in thresholdLabel.s")
/** In case the score is less than threshold, what should be the label (Default: `"neutral"`)
*
* @group param
*/
val thresholdLabel = new Param[String](this, "thresholdLabel", "In case the score is less than threshold, what should be the label. Default is neutral.")
/** @group setParam */
def setThreshold(threshold: Float): SentimentDLModel.this.type = set(this.threshold, threshold)
/** @group setParam */
def setThresholdLabel(label: String): SentimentDLModel.this.type = set(this.thresholdLabel, label)
/** @group getParam */
def getThreshold: Float = $(this.threshold)
/** @group getParam */
def getThresholdLabel: String = $(this.thresholdLabel)
/** ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()
*
* @group param
* */
val configProtoBytes = new IntArrayParam(
this,
"configProtoBytes",
"ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()"
)
/** @group setParam */
def setConfigProtoBytes(
bytes: Array[Int]
): SentimentDLModel.this.type = set(this.configProtoBytes, bytes)
def getConfigProtoBytes: Option[Array[Byte]] =
get(this.configProtoBytes).map(_.map(_.toByte))
/** Dataset Params
*
* @group param
*/
val datasetParams = new StructFeature[ClassifierDatasetEncoderParams](this, "datasetParams")
/** @group setParam */
def setDatasetParams(params: ClassifierDatasetEncoderParams): SentimentDLModel.this.type =
set(this.datasetParams, params)
/** Labels that the model was trained with
*
* @group param
*/
val classes = new StringArrayParam(this, "classes", "keep an internal copy of classes for Python")
private var _model: Option[Broadcast[TensorflowSentiment]] = None
/** @group setParam */
def setModelIfNotSet(spark: SparkSession, tf: TensorflowWrapper): this.type = {
if (_model.isEmpty) {
require(datasetParams.isSet, "datasetParams must be set before usage")
val encoder = new ClassifierDatasetEncoder(datasetParams.get.get)
_model = Some(
spark.sparkContext.broadcast(
new TensorflowSentiment(
tf,
encoder,
Verbose.Silent
)
)
)
}
this
}
/** @group getParam */
def getModelIfNotSet: TensorflowSentiment = _model.get.value
/** get the tags used to trained this NerDLModel
*
* @group getParam
* */
def getClasses: Array[String] = {
val encoder = new ClassifierDatasetEncoder(datasetParams.get.get)
set(classes, encoder.tags)
encoder.tags
}
setDefault(
threshold -> 0.6f,
thresholdLabel -> "neutral"
)
override protected def beforeAnnotate(dataset: Dataset[_]): Dataset[_] = {
validateStorageRef(dataset, $(inputCols), AnnotatorType.SENTENCE_EMBEDDINGS)
dataset
}
/**
* takes a document and annotations and produces new annotations of this annotator's annotation type
*
* @param annotations Annotations that correspond to inputAnnotationCols generated by previous annotators if any
* @return any number of annotations processed for every input annotation. Not necessary one to one relationship
*/
override def annotate(annotations: Seq[Annotation]): Seq[Annotation] = {
val sentences = annotations
.filter(_.annotatorType == SENTENCE_EMBEDDINGS)
.groupBy(_.metadata.getOrElse[String]("sentence", "0").toInt)
.toSeq
.sortBy(_._1)
if (sentences.nonEmpty)
getModelIfNotSet.predict(sentences, getConfigProtoBytes, $(threshold), $(thresholdLabel))
else Seq.empty[Annotation]
}
override def onWrite(path: String, spark: SparkSession): Unit = {
super.onWrite(path, spark)
writeTensorflowModel(
path,
spark,
getModelIfNotSet.tensorflow,
"_sentimentdl",
SentimentDLModel.tfFile,
configProtoBytes = getConfigProtoBytes
)
}
}
trait ReadablePretrainedSentimentDL
extends ParamsAndFeaturesReadable[SentimentDLModel]
with HasPretrained[SentimentDLModel] {
override val defaultModelName: Some[String] = Some("sentimentdl_use_imdb")
override def pretrained(name: String, lang: String, remoteLoc: String): SentimentDLModel = {
ResourceDownloader.downloadModel(SentimentDLModel, name, Option(lang), remoteLoc)
}
/** Java compliant-overrides */
override def pretrained(): SentimentDLModel = pretrained(defaultModelName.get, defaultLang, defaultLoc)
override def pretrained(name: String): SentimentDLModel = pretrained(name, defaultLang, defaultLoc)
override def pretrained(name: String, lang: String): SentimentDLModel = pretrained(name, lang, defaultLoc)
}
trait ReadSentimentDLTensorflowModel extends ReadTensorflowModel {
this: ParamsAndFeaturesReadable[SentimentDLModel] =>
override val tfFile: String = "sentimentdl_tensorflow"
def readTensorflow(instance: SentimentDLModel, path: String, spark: SparkSession): Unit = {
val tf = readTensorflowModel(path, spark, "_sentimentdl_tf", initAllTables = true)
instance.setModelIfNotSet(spark, tf)
// This allows for Python to access getClasses function
val encoder = new ClassifierDatasetEncoder(instance.datasetParams.get.get)
instance.set(instance.classes, encoder.tags)
}
addReader(readTensorflow)
}
/**
* This is the companion object of [[SentimentDLModel]]. Please refer to that class for the documentation.
*/
object SentimentDLModel extends ReadablePretrainedSentimentDL with ReadSentimentDLTensorflowModel
| JohnSnowLabs/spark-nlp | src/main/scala/com/johnsnowlabs/nlp/annotators/classifier/dl/SentimentDLModel.scala | Scala | apache-2.0 | 11,460 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.computations.calculations.AdjustedTradingProfitCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP256(value: Int) extends CtBoxIdentifier(name = "Adjusted Trading Or Loss") with CtInteger
object CP256 extends Calculated[CP256, ComputationsBoxRetriever] with AdjustedTradingProfitCalculator {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): CP256 = {
adjustedTradingProfitCalculationNonOptional(cp117 = fieldValueRetriever.retrieveCP117())
}
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP256.scala | Scala | apache-2.0 | 1,251 |
package com.aktaion.ml.weka.randomforest
import java.io.File
import _root_.weka.core.converters.ArffSaver
import _root_.weka.core.Instances
import _root_.weka.filters.unsupervised.instance.Randomize
import _root_.weka.filters.supervised.instance.Resample
import _root_.weka.classifiers.trees.RandomForest
import _root_.weka.classifiers.meta.CostSensitiveClassifier
import _root_.weka.classifiers.{CostMatrix, Evaluation}
import _root_.weka.filters.Filter
import com.aktaion.common.SimpleTestTools
class CrossValidationExample extends SimpleTestTools {
//use https://weka.wikispaces.com/Use+Weka+in+your+Java+code
ignore("firstTest") {
//code from http://www.codemiles.com/weka-examples/weka-java-code-for-random-forest-cross-validation-t11128.html
val numFolds: Double = 10.0d
var precisionOne: Double = 0.0d
var recallOne: Double = 0.0d
var fmeansureOne: Double = 0.0d
var precisionTwo: Double = 0.0d
var recallTwo: Double = 0.0d
var fmeansureTwo: Double = 0.0d
var ROCone: Double = 0.0d
var ROCtwo: Double = 0.0d
val PRCone: Double = 0.0d
val PRCtwo: Double = 0.0d
// ArffSaver saverTets = new ArffSaver();
val br = getWekaReaderFromResourcePath("/ml.weka/synthetic_train.arff")
val saverTraining: ArffSaver = new ArffSaver
var trainData: Instances = new Instances(br)
trainData.setClassIndex(trainData.numAttributes - 1)
br.close
val randFilterMain: Randomize = new Randomize
randFilterMain.setInputFormat(trainData)
trainData = Filter.useFilter(trainData, randFilterMain)
val mySize: Int = (trainData.numInstances / numFolds).toInt
var begin: Int = 0
var end: Int = mySize - 1
System.out.println("Total mySize of instances" + trainData.numInstances + " , flod mySize=" + mySize)
var i: Int = 1
while (i <= numFolds) {
{
System.out.println("Iteration # " + i + " Begin =" + begin + " , end=" + end)
val tempTraining: Instances = new Instances(trainData)
val tempTesting: Instances = new Instances(trainData, begin, (end - begin))
var j: Int = 0
while (j < (end - begin)) {
{tempTraining.delete(begin)}
({j += 1;j - 1})
}
val resample: Resample = new Resample
resample.setBiasToUniformClass(0.5f)
resample.setInvertSelection(false)
resample.setNoReplacement(false)
resample.setRandomSeed(1)
resample.setInputFormat(tempTraining)
System.out.println("Number of instances before filter " + tempTraining.numInstances)
val resmapleTempTraining: Instances = Filter.useFilter(tempTraining, resample)
System.out.println("Number of instances after filter " + resmapleTempTraining.numInstances)
val randomForest: RandomForest = new RandomForest
randomForest.setNumTrees(100)
System.out.println("Started building the model #" + i)
val costSensitiveClassifier: CostSensitiveClassifier = new CostSensitiveClassifier
val costMatrix: CostMatrix = new CostMatrix(2)
costMatrix.setCell(1, 0, 2d)
costSensitiveClassifier.setClassifier(randomForest)
costSensitiveClassifier.setCostMatrix(costMatrix)
costSensitiveClassifier.buildClassifier(resmapleTempTraining)
saverTraining.setInstances(resmapleTempTraining)
saverTraining.setFile(new File("/Users/User/Aktaion/wekaData/" + i + "_training.arff"))
// saverTets.setInstances(tempTesting)
// saverTets.setFile(new File("D:\\SumCost\\eclipse\\" + i + "_testing.arff"))
saverTraining.writeBatch
// saverTets.writeBatch
System.out.println("Done with building the model")
val evaluation: Evaluation = new Evaluation(tempTesting)
evaluation.evaluateModel(costSensitiveClassifier, tempTesting)
System.out.println("Results For Class -1- ")
System.out.println("Precision= " + evaluation.precision(0))
System.out.println("Recall= " + evaluation.recall(0))
System.out.println("F-measure= " + evaluation.fMeasure(0))
System.out.println("ROC= " + evaluation.areaUnderROC(0))
System.out.println("Results For Class -2- ")
System.out.println("Precision= " + evaluation.precision(1))
System.out.println("Recall= " + evaluation.recall(1))
System.out.println("F-measure= " + evaluation.fMeasure(1))
System.out.println("ROC= " + evaluation.areaUnderROC(1))
precisionOne += evaluation.precision(0)
recallOne += evaluation.recall(0)
fmeansureOne += evaluation.fMeasure(0)
precisionTwo += evaluation.precision(1)
recallTwo += evaluation.recall(1)
fmeansureTwo += evaluation.fMeasure(1)
ROCone += evaluation.areaUnderROC(0)
ROCtwo += evaluation.areaUnderROC(1)
begin = end + 1
end += mySize
if (i == (numFolds - 1)) {
end = trainData.numInstances
}
}
({
i += 1;
i - 1
})
}
System.out.println("####################################################")
System.out.println("Results For Class -1- ")
System.out.println("Precision= " + precisionOne / numFolds)
System.out.println("Recall= " + recallOne / numFolds)
System.out.println("F-measure= " + fmeansureOne / numFolds)
System.out.println("ROC= " + ROCone / numFolds)
System.out.println("PRC= " + PRCone / numFolds)
System.out.println("Results For Class -2- ")
System.out.println("Precision= " + precisionTwo / numFolds)
System.out.println("Recall= " + recallTwo / numFolds)
System.out.println("F-measure= " + fmeansureTwo / numFolds)
System.out.println("ROC= " + ROCtwo / numFolds)
System.out.println("PRC= " + PRCtwo / numFolds)
}
}
| jzadeh/Aktaion | src/test/scala/com.aktaion/ml/weka/randomforest/CrossValidationExample.scala | Scala | apache-2.0 | 5,808 |
package io.protoless.fields
import io.protoless.tests.ProtolessSuite
import shapeless.test.illTyped
case class ValueClass(x: Int) extends AnyVal
case class NotValueClass(x: Int)
class ValueClassClassSuite extends ProtolessSuite {
private val bytes = Seq(0x08, 0x96, 0x01).map(_.toByte).toArray // number 150 at field 1
private val valueClass = ValueClass(150)
"value class must be decoded" in {
val dec: FieldDecoder[ValueClass] = FieldDecoder[ValueClass]
dec.decode(bytes, 1) must ===(Right(valueClass))
}
"value class must be encoded" in {
val enc: FieldEncoder[ValueClass] = FieldEncoder[ValueClass]
enc.encodeAsBytes(1, valueClass) must ===(bytes)
}
"Class not inheriting AnyVal must not be decoded like ValueClass" in {
illTyped("""FieldDecoder[NotValueClass]""")
illTyped("""FieldDecoder.decodeValueClass.read(bytes, 1)""")
}
"Class not inheriting AnyVal must not be encoded like ValueClass" in {
illTyped("""FieldEncoder[NotValueClass]""")
illTyped("""FieldEncoder.encodeValueClass.encodeAsByte(1, NotValueClass(1))""")
}
}
| julien-lafont/protoless | modules/core/src/test/scala/io/protoless/fields/ValueClassSuite.scala | Scala | apache-2.0 | 1,093 |
/*
* Happy Melly Teller
* Copyright (C) 2013 - 2016, Happy Melly http://www.happymelly.com
*
* This file is part of the Happy Melly Teller.
*
* Happy Melly Teller is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Happy Melly Teller is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Happy Melly Teller. If not, see <http://www.gnu.org/licenses/>.
*
* If you have questions concerning this license or the applicable additional
* terms, you may contact by email Sergey Kotlov, sergey.kotlov@happymelly.com or
* in writing Happy Melly One, Handelsplein 37, Rotterdam, The Netherlands, 3071 PR
*/
package models.database.core.payment
import com.github.tototoshi.slick.MySQLJodaSupport._
import models.core.payment.CreditCard
import org.joda.time.DateTime
import slick.driver.JdbcProfile
private[models] trait CreditCardTable {
protected val driver: JdbcProfile
import driver.api._
class CreditCards(tag: Tag) extends Table[CreditCard](tag, "CREDIT_CARD") {
def id = column[Long]("ID", O.PrimaryKey, O.AutoInc)
def customerId = column[Long]("CUSTOMER_ID")
def remoteId = column[String]("REMOTE_ID")
def brand = column[String]("BRAND", O.Length(10, varying = true))
def number = column[String]("NUMBER", O.Length(4, varying = false))
def expMonth = column[Int]("EXP_MONTH")
def expYear = column[Int]("EXP_YEAR")
def active = column[Boolean]("ACTIVE")
def created = column[DateTime]("CREATED")
type CreditCardFields = (Option[Long], Long, String, String, String, Int, Int, Boolean, DateTime)
def * = (id.?, customerId, remoteId, brand, number, expMonth, expYear, active, created) <> (
(c: CreditCardFields) ⇒ CreditCard(c._1, c._2, c._3, c._4, c._5, c._6, c._7, c._8, c._9),
(c: CreditCard) ⇒ Some(c.id, c.customerId, c.remoteId, c.brand, c.number, c.expMonth, c.expYear,
c.active, c.created))
}
} | HappyMelly/teller | app/models/database/core/payment/CreditCardTable.scala | Scala | gpl-3.0 | 2,344 |
package com.containant.casestudies.util
case class Prob(values: List[Int]) {
def apply(backlevel: Int): Double = if (backlevel == 0) 1 else if (values.length > backlevel) values(backlevel) else 0
}
class SkipList(val bottom: Int, prob: Prob) {
val rng: java.util.Random = new java.util.Random(0xDEADBEEF)
override def toString: String = "SkipList with " + (bottom+1) + " levels, prob: " + (prob.values.take(bottom+1) mkString ",")
def printContents: Unit = {
for (level <- 0 to bottom) {
printf("Level %d: ",level)
var current = head
while( current.next(level) != null ) {
printf("%d ", current.value)
current = current.next(level)
}
println(current.value)
}
}
class SkipNode() {
var value: Int = Integer.MIN_VALUE
var next: Array[SkipNode] = Array.ofDim(bottom+1)
for( x <- 0 until next.length ) next.update(x, null)
}
var head: SkipNode = new SkipNode()
def insert(value: Int): Unit = {
var level: Int = 0
var lastVisited: Array[SkipNode] = Array.ofDim(bottom+1)
for( x <- 1 until lastVisited.length ) lastVisited(x) = head
var current: SkipNode = head
// find insertion point
while(level < bottom) {
// advance to insertion point
while( current.next(level) != null && current.next(level).value < value ) {
current = current.next(level)
}
// move down
lastVisited.update(level, current)
level = level + 1
}
while( current.next(level) != null && current.next(level).value < value ) {
current = current.next(level)
}
lastVisited.update(level, current)
// if the value is indeed new
if (current.next(level) == null || value != current.next(level).value) {
// create new node
val newNode = new SkipNode()
newNode.value = value
// update links
var backlevel = 0
val threshold = rng.nextDouble()
while (backlevel <= bottom && threshold < 1/prob(backlevel)) {
current = lastVisited(level)
newNode.next.update(level, current.next(level))
current.next.update(level, newNode)
backlevel = backlevel + 1
level = level - 1
}
// end insert
}
}
def find(value: Int): Boolean = {
var level: Int = 0
var current: SkipNode = head
while(level < bottom) {
// advance to detection point
while( current.next(level) != null && current.value < value ) {
current = current.next(level)
}
// move down a level
level = level + 1
}
current.value == value
}
def findTime(value: Int): Int = {
var iterations = 0
var level: Int = 0
var current: SkipNode = head
while(level < bottom) {
// advance to detection point
while( current.next(level) != null && current.value < value ) {
current = current.next(level)
iterations = iterations + 1
}
// move down a level
iterations = iterations + 1
level = level + 1
}
iterations
}
}
| zaklogician/ContainAnt-devel | src/main/scala/com/containant/casestudies/util/SkipList.scala | Scala | bsd-3-clause | 3,079 |
package com.krrrr38.play.autodoc
import java.io.File
import play.api.libs.json.Json
import play.api.libs.iteratee.{ Enumerator, Iteratee }
import play.api.mvc.{ Results, Action, Result, ResponseHeader }
import play.api.test._
import play.api.test.Helpers._
import org.scalatest._
import org.scalatestplus.play._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
class AutodocHelpersSpec extends FunSpec with Matchers with BeforeAndAfterAll with OneServerPerSuite {
val documentPath = "doc/com/krrrr38/play/autodoc/AutodocHelpers.md"
implicit val caller = com.krrrr38.play.autodoc.AutodocHelpers.AutodocCaller(this.getClass)
override implicit lazy val app: FakeApplication = FakeApplication(withRoutes = {
case ("GET", "/api/users/yuno") =>
Action { req =>
Results.Ok(Json.obj("user" -> Json.obj("name" -> "yuno", "height" -> 144)))
}
case ("GET", "/api/bytes") =>
Action { req =>
Result(
header = ResponseHeader(200, Map(CONTENT_TYPE -> "application/x-bytes")),
body = Enumerator("bytes-data".getBytes)
)
}
case _ => throw new IllegalStateException("invalid routes")
})
override protected def beforeAll(): Unit = {
System.setProperty("play.autodoc", "1")
}
override protected def afterAll(): Unit = {
delete(new File("doc"))
System.setProperty("play.autodoc", "0")
}
def delete(file: File) {
if (file.isDirectory) Option(file.listFiles).map(_.toList).getOrElse(Nil).foreach(delete(_))
file.delete
}
describe("AutodocHelpers#autodoc") {
it("generate document") {
val req = FakeRequest("GET", "/api/users/yuno")
.withHeaders("X-Secret-Key" -> "will be hide")
.withHeaders("X-Api-Key" -> "will be converted")
.withHeaders("X-Public-Key" -> "PUBLIC_KEY")
val res = AutodocHelpers.autodoc(
title = "GET /api/users/$name",
requestHeaderConverter = {
case ("X-Secret-Key", v) => None
case ("X-Api-Key", v) => Some("YOUR_API_KEY")
case (k, v) => Some(v)
}
).route(req).get
status(res) shouldBe OK
Thread.sleep(100)
val doc = new File(documentPath)
doc.exists() shouldBe true
val contents = scala.io.Source.fromFile(doc).getLines().mkString("\\n")
contents should include("## GET /api/users/$name")
contents should not include ("X-Secret-Key")
contents should include("X-Api-Key: YOUR_API_KEY")
contents should include("X-Public-Key: PUBLIC_KEY")
}
it("generate document in the case of byte array response") {
val req = FakeRequest("GET", "/api/bytes")
val res = AutodocHelpers.autodoc(
title = "GET /api/bytes",
responseBodyParser = (result: Result) => {
new String(Await.result(result.body |>>> Iteratee.consume[Array[Byte]](), Duration.Inf), "utf-8")
}
).route(req).get
status(res) shouldBe OK
Thread.sleep(100)
val doc = new File(documentPath)
doc.exists() shouldBe true
val contents = scala.io.Source.fromFile(doc).getLines().mkString("\\n")
contents should include("## GET /api/bytes")
contents should include("bytes-data")
}
}
}
| krrrr38/play-autodoc | play-autodoc-core/src/test/scala/com/krrrr38/play/autodoc/AutodocHelpersSpec.scala | Scala | mit | 3,243 |
package io.github.hjuergens.util
import org.specs2.mutable._
class RingSpec extends Specification { override def is = s2"""
This is a specification for the 'Hello world' string
The 'Hello world' string should
contain 11 characters $e1
start with 'Hello' $e2
end with 'world' $e3
"""
def e1 = "Hello world" must haveSize(11)
def e2 = "Hello world" must startWith("Hello")
def e3 = "Hello world" must endWith("world")
"This is a specification for the 'Hello world' string".txt
"The 'Hello world' string should" >> {
"contain 11 characters" >> {
"Hello world" must haveSize(11)
}
"start with 'Hello'" >> {
"Hello world" must startWith("Hello")
}
"end with 'world'" >> {
"Hello world" must endWith("world")
}
}
}
/*
class PluralSpec extends Specification { def is = s2"""
Names can be pluralized depending on a quantity
${ "apple".plural(1) === "apple" }
${ "apple".plural(2) === "apples" }
${ "foot".plural(2) === "feet" }
${ 1.qty("apple") === "1 apple" }
${ 2.qty("apple") === "2 apples" }
${ 2.qty("foot") === "2 feet" }
"""
}
import org.specs2.mutable._
object ArithmeticSpec2 extends Specification {
"Arithmetic2" should {
"add" in {
"two numbers" in {
1 + 1 mustEqual 2
}
"three numbers" in {
1 + 1 + 1 mustEqual 3
}
}
}
}
*/ | hjuergens/date-parser | date-rule-antlr/src/integration-test/scala/io/github/hjuergens/util/RingSpec.scala | Scala | apache-2.0 | 1,849 |
/*
* Copyright 2008 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package ${package} {
package model {
object Genre extends Enumeration with Enumv {
val Mystery = Value("Mystery", "Mystery")
val SciFi = Value("SciFi", "SciFi")
val Classic = Value("Classic", "Classic")
val Childrens = Value("Childrens", "Childrens")
val Horror = Value("Horror", "Horror")
val Poetry = Value("Poetry", "Poetry")
val unknown = Value("Unknown", "Unknown genre")
}
class GenreType extends EnumvType(Genre) {}
}
}
| wsaccaco/lift | archetypes/lift-archetype-jpa-basic/src/main/resources/archetype-resources/spa/src/main/scala/model/Genre.scala | Scala | apache-2.0 | 1,053 |
package vtpassim
import org.apache.spark.sql.SparkSession
import scala.collection.mutable.StringBuilder
import scala.util.Try
import scala.xml.pull._
object TEIPages {
def main(args: Array[String]) {
val spark = SparkSession.builder().appName("TEIPages Import").getOrCreate()
import spark.implicits._
spark.sparkContext.hadoopConfiguration
.set("mapreduce.input.fileinputformat.input.dir.recursive", "true")
spark.sparkContext.binaryFiles(args(0), spark.sparkContext.defaultParallelism)
.filter(_._1.endsWith(".xml"))
.flatMap( in => {
val fname = new java.io.File(new java.net.URL(in._1).toURI)
val id = fname.getName.replaceAll(".xml$", "")
val buf = new StringBuilder
var buffering = false
var seq = -1
val pass = new XMLEventReader(scala.io.Source.fromURL(in._1))
pass.flatMap { event =>
event match {
case EvElemStart(_, "pb", attr, _) => {
val rec = if ( buffering ) {
Some((f"$id%s_$seq%04d", id, seq, buf.toString.trim))
} else {
None
}
buffering = true
seq += 1
buf.clear
rec
}
case EvElemEnd(_, "text") => {
if ( buffering ) {
seq += 1
val text = buf.toString.trim
buffering = false
buf.clear
Some((f"$id%s_$seq%04d", id, seq, text))
} else {
None
}
}
case EvText(t) => {
if ( buffering ) buf ++= t
None
}
case EvEntityRef(n) => {
if ( buffering ) buf ++= "&" + n + ";"
None
}
case _ => None
}
}
})
.toDF("id", "book", "seq", "text")
.write.save(args(1))
spark.stop()
}
}
| ViralTexts/vt-passim | src/main/scala/TEIPages.scala | Scala | apache-2.0 | 1,964 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ssttparrangement
import com.google.inject.Inject
import play.api.Logger
import play.api.http.Status
import play.api.mvc.Request
import uk.gov.hmrc.http.{HttpClient, HttpException, HttpResponse, UpstreamErrorResponse}
import uk.gov.hmrc.http.HttpReads.Implicits._
import uk.gov.hmrc.play.bootstrap.config.ServicesConfig
import uk.gov.hmrc.selfservicetimetopay.jlogger.JourneyLogger
import uk.gov.hmrc.selfservicetimetopay.models.TTPArrangement
import views.Views
import scala.concurrent.{ExecutionContext, Future}
class ArrangementConnector @Inject() (
servicesConfig: ServicesConfig,
httpClient: HttpClient,
views: Views)(
implicit
ec: ExecutionContext
) {
private val logger = Logger(getClass)
import req.RequestSupport._
type SubmissionResult = Either[SubmissionError, SubmissionSuccess]
val arrangementURL: String = servicesConfig.baseUrl("time-to-pay-arrangement")
def submitArrangement(ttpArrangement: TTPArrangement)(implicit request: Request[_]): Future[SubmissionResult] = {
JourneyLogger.info(s"ArrangementConnector.submitArrangements")
httpClient.POST[TTPArrangement, HttpResponse](s"$arrangementURL/ttparrangements", ttpArrangement).map { _ =>
Right(SubmissionSuccess()) //todo OPS-3930
}.recover {
case e: Throwable =>
JourneyLogger.info(s"ArrangementConnector.submitArrangements: Error, $e", ttpArrangement)
onError(e)
}
}
private def onError(ex: Throwable) = {
val (code, message) = ex match {
case e: HttpException => (e.responseCode, e.getMessage)
case e: UpstreamErrorResponse => (e.reportAs, e.getMessage)
case e: Throwable => (Status.INTERNAL_SERVER_ERROR, e.getMessage)
}
logger.error(s"Failure from DES, code $code and body $message")
Left(SubmissionError(code, message))
}
}
| hmrc/self-service-time-to-pay-frontend | app/ssttparrangement/ArrangementConnector.scala | Scala | apache-2.0 | 2,469 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import Keys._
object Dependencies {
val crossScalaVersionNumbers = Seq("2.11.8")
val scalaVersionNumber = crossScalaVersionNumbers.last
val akkaVersion = "2.5.13"
val akkaHttpVersion = "10.1.3"
val hadoopVersion = "2.6.0"
val hbaseVersion = "1.0.0"
val commonsHttpVersion = "3.1"
val commonsLoggingVersion = "1.1.3"
val commonsLangVersion = "2.6"
val commonsIOVersion = "2.4"
val dataReplicationVersion = "0.7"
val upickleVersion = "0.3.4"
val junitVersion = "4.12"
val kafkaVersion = "0.8.2.1"
val kuduVersion = "1.7.0"
val jsonSimpleVersion = "1.1"
val storm09Version = "0.9.6"
val stormVersion = "0.10.0"
val slf4jVersion = "1.7.16"
val guavaVersion = "16.0.1"
val codahaleVersion = "3.0.2"
val kryoVersion = "0.4.1"
val gsCollectionsVersion = "6.2.0"
val sprayVersion = "1.3.2"
val sprayJsonVersion = "1.3.1"
val scalaTestVersion = "2.2.0"
val scalaCheckVersion = "1.11.3"
val mockitoVersion = "1.10.17"
val bijectionVersion = "0.8.0"
val scalazVersion = "7.1.1"
val algebirdVersion = "0.9.0"
val chillVersion = "0.6.0"
val jedisVersion = "2.9.0"
val rabbitmqVersion = "3.5.3"
val calciteVersion = "1.12.0"
val annotationDependencies = Seq(
// work around for compiler warnings like
// "Class javax.annotation.CheckReturnValue not found - continuing with a stub"
// see https://issues.scala-lang.org/browse/SI-8978
// marked as "provided" to be excluded from assembling
"com.google.code.findbugs" % "jsr305" % "3.0.2" % "provided"
)
val coreDependencies = Seq(
libraryDependencies ++= Seq(
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.slf4j" % "slf4j-log4j12" % slf4jVersion,
"commons-lang" % "commons-lang" % commonsLangVersion,
/**
* Overrides Netty version 3.10.3.Final used by Akka 2.4.2 to work-around netty hang issue
* (https://github.com/gearpump/gearpump/issues/2020)
*
* Akka 2.4.2 by default use Netty 3.10.3.Final, which has a serious issue which can hang
* the network. The same issue also happens in version range (3.10.0.Final, 3.10.5.Final)
* Netty 3.10.6.Final have this issue fixed, however, we find there is a 20% performance
* drop. So we decided to downgrade netty to 3.8.0.Final (Same version used in akka 2.3.12).
*
* @see https://github.com/gearpump/gearpump/pull/2017 for more discussions.
*/
"io.netty" % "netty" % "3.8.0.Final",
"com.typesafe.akka" %% "akka-remote" % akkaVersion
exclude("io.netty", "netty"),
"com.typesafe.akka" %% "akka-cluster" % akkaVersion,
"com.typesafe.akka" %% "akka-cluster-tools" % akkaVersion,
"commons-logging" % "commons-logging" % commonsLoggingVersion,
"com.typesafe.akka" %% "akka-distributed-data" % akkaVersion,
"com.typesafe.akka" %% "akka-actor" % akkaVersion,
"com.typesafe.akka" %% "akka-agent" % akkaVersion,
"com.typesafe.akka" %% "akka-slf4j" % akkaVersion,
"com.typesafe.akka" %% "akka-http" % akkaHttpVersion,
"com.typesafe.akka" %% "akka-http-spray-json" % akkaHttpVersion,
"org.scala-lang" % "scala-reflect" % scalaVersionNumber,
"com.github.romix.akka" %% "akka-kryo-serialization" % kryoVersion,
"com.google.guava" % "guava" % guavaVersion,
"com.codahale.metrics" % "metrics-graphite" % codahaleVersion
exclude("org.slf4j", "slf4j-api"),
"com.codahale.metrics" % "metrics-jvm" % codahaleVersion
exclude("org.slf4j", "slf4j-api"),
"com.typesafe.akka" %% "akka-testkit" % akkaVersion % "test",
"org.scalatest" %% "scalatest" % scalaTestVersion % "test",
"org.scalacheck" %% "scalacheck" % scalaCheckVersion % "test",
"org.mockito" % "mockito-core" % mockitoVersion % "test",
"junit" % "junit" % junitVersion % "test"
) ++ annotationDependencies
)
} | manuzhang/incubator-gearpump | project/Dependencies.scala | Scala | apache-2.0 | 4,705 |
abstract class LIST[+T] {
def isEmpty: Boolean
def head: T
def tail: LIST[T]
def prepend [U >: T] (x: U): LIST[U] = new CONS(x, this)
def map[U](f: T => U): LIST[U] = if (isEmpty) NIL else tail.map(f).prepend(f(head))
}
object NIL extends LIST[Nothing] {
def isEmpty = true
def head = throw new Error
def tail = ???
}
class CONS[U](hd: U, tl: LIST[U]) extends LIST[U] {
def isEmpty = false
def head: U = hd
def tail = tl
}
object Inferred {
def foo[T](x: T): T = x
val x = foo(1)
val y = foo("abc")
def bar[U](xs: LIST[U]): LIST[U] = xs
val n = NIL
val nn = bar(NIL)
val ints: LIST[Int] = NIL prepend 1
val ints1 = NIL prepend 1 prepend 2
val a = if (1 == 0) NIL else ints
val n2 = scala.collection.immutable.Nil
val ss2: scala.collection.immutable.List[String] = "abc" :: n2
val ss3 = "abc" :: n2
def cl = ((x: Int) => x + 1)
val ints2 = ints map (_ + 1)
val ints3 = new CONS[Int](1, NIL)
val ints4 = new CONS(1, NIL)
} | AlexSikia/dotty | tests/pos/inferred.scala | Scala | bsd-3-clause | 997 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.concurrent.Await
import scala.concurrent.duration._
import akka.actor._
import akka.pattern.ask
import akka.remote.{DisassociatedEvent, RemotingLifecycleEvent}
import org.apache.spark.{ExecutorAllocationClient, Logging, SparkEnv, SparkException, TaskState}
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.util.{ActorLogReceive, SerializableBuffer, AkkaUtils, Utils}
/**
* A scheduler backend that waits for coarse grained executors(coarse grained executors:粗粒度执行器)
* to connect to it through Akka.
* <br>
* <br>scheduler backend 等待coarse grained executors(coarse grained executors:粗粒度执行器)使用Akka连接到它
* <br>
* <br>
* This backend holds onto(保存到) each executor for the duration of the Spark job rather than relinquishing(放弃)
* executors whenever a task is done and asking the scheduler to launch a new executor for each new task.
* Executors may be launched in a variety of ways, such as Mesos tasks for the
* coarse-grained Mesos mode or standalone processes for Spark's standalone deploy mode
* (spark.deploy.*).
*
* <br><br>
* <br>SchedulerBackend主要起到的作用是为Task分配计算资源。
*/
private[spark] class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val actorSystem: ActorSystem)
extends ExecutorAllocationClient with SchedulerBackend with Logging {
// Use an atomic variable to track total number of cores in the cluster for simplicity and speed
var totalCoreCount = new AtomicInteger(0)
// Total number of executors that are currently registered
var totalRegisteredExecutors = new AtomicInteger(0)
val conf = scheduler.sc.conf
private val timeout = AkkaUtils.askTimeout(conf)
/**
* 返回akka系统中消息的大小
*/
private val akkaFrameSize = AkkaUtils.maxFrameSizeBytes(conf)
// Submit tasks only after (registered resources / total expected resources)
// is equal to at least this value, that is double between 0 and 1.
var minRegisteredRatio =
math.min(1, conf.getDouble("spark.scheduler.minRegisteredResourcesRatio", 0))
// Submit tasks after maxRegisteredWaitingTime milliseconds
// if minRegisteredRatio has not yet been reached
val maxRegisteredWaitingTime =
conf.getInt("spark.scheduler.maxRegisteredResourcesWaitingTime", 30000)
val createTime = System.currentTimeMillis()
/**
* executorId ->ExecutorData 的映射
*/
private val executorDataMap = new HashMap[String, ExecutorData]
// Number of executors requested from the cluster manager that have not registered yet
private var numPendingExecutors = 0
private val listenerBus = scheduler.sc.listenerBus
// Executors we have requested the cluster manager to kill that have not died yet
private val executorsPendingToRemove = new HashSet[String]
/**
*
* 客户端称为Driver的原因:因为类名字为:DriverActor
* <br><br>
* 负责 Driver和Worker的Executors通信
* <br>DriverActor负责提交task给Executor
*
* @param sparkProperties
*
* DriverActor定义开始于:96-297行
*
*/
class DriverActor(sparkProperties: Seq[(String, String)]) extends Actor with ActorLogReceive {
override protected def log = CoarseGrainedSchedulerBackend.this.log
private val addressToExecutorId = new HashMap[Address, String]
override def preStart() {
// Listen for remote client disconnection events, since they don't go through Akka's watch()
context.system.eventStream.subscribe(self, classOf[RemotingLifecycleEvent])
/**
* Periodically revive offers to allow delay scheduling to work
* <br>
* 定期恢复延迟调度的任务去执行的时间间隔
*
*/
val reviveInterval = conf.getLong("spark.scheduler.revive.interval", 1000)
import context.dispatcher
context.system.scheduler.schedule(0.millis, reviveInterval.millis, self, ReviveOffers)
}
def receiveWithLogging = {
/**
*
* CoarseGrainedExecutorBackend给DriverActor发的消息
* <br>
*
*/
//TODO CoarseGrainedExecutorBackend给DriverActor发的消息
case RegisterExecutor(executorId, hostPort, cores, logUrls) =>
Utils.checkHostPort(hostPort, "Host port expected " + hostPort)
if (executorDataMap.contains(executorId)) {
//TODO sender为 CoarseGrainedExecutorBackend的 actor
sender ! RegisterExecutorFailed("Duplicate executor ID: " + executorId)
} else {
logInfo("Registered executor: " + sender + " with ID " + executorId)
//TODO sender为 CoarseGrainedExecutorBackend的 actor
sender ! RegisteredExecutor
addressToExecutorId(sender.path.address) = executorId
totalCoreCount.addAndGet(cores)
totalRegisteredExecutors.addAndGet(1)
val (host, _) = Utils.parseHostPort(hostPort)
val data = new ExecutorData(sender, sender.path.address, host, cores, cores, logUrls)
// This must be synchronized because variables mutated
// in this block are read when requesting executors
CoarseGrainedSchedulerBackend.this.synchronized {
executorDataMap.put(executorId, data)
if (numPendingExecutors > 0) {
numPendingExecutors -= 1
logDebug(s"Decremented number of pending executors ($numPendingExecutors left)")
}
}
listenerBus.post(
SparkListenerExecutorAdded(System.currentTimeMillis(), executorId, data))
//TODO 重点: 查看是否有任务需要提交?! (DriverActor提交任务给Executor)
makeOffers()
}
case StatusUpdate(executorId, taskId, state, data) =>
scheduler.statusUpdate(taskId, state, data.value)
if (TaskState.isFinished(state)) {
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.freeCores += scheduler.CPUS_PER_TASK
makeOffers(executorId)
case None =>
// Ignoring the update since we don't know about the executor.
logWarning(s"Ignored task status update ($taskId state $state) " +
"from unknown executor $sender with ID $executorId")
}
}
/**
*
*/
case ReviveOffers =>
//TODO 调用makeOffers向Executor提交Task
makeOffers()
case KillTask(taskId, executorId, interruptThread) =>
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.executorActor ! KillTask(taskId, executorId, interruptThread)
case None =>
// Ignoring the task kill since the executor is not registered.
logWarning(s"Attempted to kill task $taskId for unknown executor $executorId.")
}
case StopDriver =>
sender ! true
context.stop(self)
case StopExecutors =>
logInfo("Asking each executor to shut down")
for ((_, executorData) <- executorDataMap) {
executorData.executorActor ! StopExecutor
}
sender ! true
case RemoveExecutor(executorId, reason) =>
removeExecutor(executorId, reason)
sender ! true
case DisassociatedEvent(_, address, _) =>
addressToExecutorId.get(address).foreach(removeExecutor(_,
"remote Akka client disassociated"))
case RetrieveSparkProps =>
sender ! sparkProperties
}
/**
* Make fake resource offers on all executors
*/
//调用makeOffers向Executor提交Task
def makeOffers() {
//TODO 调用launchTasks向Executor启动Task
launchTasks(scheduler.resourceOffers(executorDataMap.map { case (id, executorData) =>
new WorkerOffer(id, executorData.executorHost, executorData.freeCores)
}.toSeq))
}
// Make fake resource offers on just one executor
def makeOffers(executorId: String) {
val executorData = executorDataMap(executorId)
launchTasks(scheduler.resourceOffers(
Seq(new WorkerOffer(executorId, executorData.executorHost, executorData.freeCores))))
}
/**
* Launch tasks returned by a set of resource offers
* @param tasks
*/
def launchTasks(tasks: Seq[Seq[TaskDescription]]) {
//TODO 这个是DriverActor的方法
for (task <- tasks.flatten) {
//TODO 创建序列化器,使用序列化器然后序列化Task
val ser = SparkEnv.get.closureSerializer.newInstance()
/**
* 任务序列化之后的数据
*/
val serializedTask = ser.serialize(task) //TODO 序列化之后的任务
//Buffer有limit、capacity、position三个指标
if (serializedTask.limit >= akkaFrameSize - AkkaUtils.reservedSizeBytes) {
//TODO serializedTask过大,剩余空间不足reservedSizeBytes
val taskSetId = scheduler.taskIdToTaskSetId(task.taskId)
//TaskSetManager的数据结构:
// TaskSetManager(sched: TaskSchedulerImpl,val taskSet: TaskSet,val maxTaskFailures: Int,clock: Clock = new SystemClock())
scheduler.activeTaskSets.get(taskSetId).foreach { taskSet => //scheduler为TaskSchedulerImpl类型
try {
var msg = "Serialized task %s:%d was %d bytes, which exceeds max allowed: " +
"spark.akka.frameSize (%d bytes) - reserved (%d bytes). Consider increasing " +
"spark.akka.frameSize or using broadcast variables for large values."
msg = msg.format(task.taskId, task.index, serializedTask.limit, akkaFrameSize, AkkaUtils.reservedSizeBytes)
taskSet.abort(msg)
} catch {
case e: Exception => logError("Exception in error callback", e)
}
}
}
else {
// TODO ExecutorData 里面有Executor的ActorRef
val executorData = executorDataMap(task.executorId)
executorData.freeCores -= scheduler.CPUS_PER_TASK
//TODO DriverActor拿到executorActor的ref给其发消息,给Executor(其实此处是给CoarseGrainedExecutorBackend)发消息启动Task
//TODO CoarseGrainedExecutorBackend 是Executor的线程名字,Executor运行在CoarseGrainedExecutorBackend线程中
executorData.executorActor ! LaunchTask(new SerializableBuffer(serializedTask))
}
}
}
// Remove a disconnected slave from the cluster
def removeExecutor(executorId: String, reason: String): Unit = {
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
// This must be synchronized because variables mutated
// in this block are read when requesting executors
CoarseGrainedSchedulerBackend.this.synchronized {
addressToExecutorId -= executorInfo.executorAddress
executorDataMap -= executorId
executorsPendingToRemove -= executorId
}
totalCoreCount.addAndGet(-executorInfo.totalCores)
totalRegisteredExecutors.addAndGet(-1)
scheduler.executorLost(executorId, SlaveLost(reason))
listenerBus.post(
SparkListenerExecutorRemoved(System.currentTimeMillis(), executorId, reason))
case None => logError(s"Asked to remove non-existent executor $executorId")
}
}
}
//class DriverActor 定义结束
var driverActor: ActorRef = null
val taskIdsOnSlave = new HashMap[String, HashSet[String]]
/**
* <br>CoarseGrainedSchedulerBackend的start()方法主要完成driverActor的创建
* <br><br>
* <br>SparkSubmit中有两个actor,分别是:1)driverActor负责与Executor通信 ,2)clientActor负责与Master通信
*/
override def start() {
val properties = new ArrayBuffer[(String, String)]
for ((key, value) <- scheduler.sc.conf.getAll) {
//将SparkConf中的所有以为spark开头的配置加载到properties中
if (key.startsWith("spark.")) {
properties += ((key, value))
}
}
// TODO (prashant) send conf instead of properties
// val ACTOR_NAME = "CoarseGrainedScheduler"
driverActor = actorSystem.actorOf(Props(new DriverActor(properties)), name = CoarseGrainedSchedulerBackend.ACTOR_NAME)
}
def stopExecutors() {
try {
if (driverActor != null) {
logInfo("Shutting down all executors")
val future = driverActor.ask(StopExecutors)(timeout)
Await.ready(future, timeout)
}
} catch {
case e: Exception =>
throw new SparkException("Error asking standalone scheduler to shut down executors", e)
}
}
override def stop() {
stopExecutors()
try {
if (driverActor != null) {
val future = driverActor.ask(StopDriver)(timeout)
Await.ready(future, timeout)
}
} catch {
case e: Exception =>
throw new SparkException("Error stopping standalone scheduler's driver actor", e)
}
}
/**
* 向driverActor发送消息<br>
* ReviveOffers 为 Driver内部自己使用的消息(Internal messages in driver)<br>
*调用makeOffers向Executor提交Task
*/
override def reviveOffers() {
driverActor ! ReviveOffers
}
override def killTask(taskId: Long, executorId: String, interruptThread: Boolean) {
driverActor ! KillTask(taskId, executorId, interruptThread)
}
override def defaultParallelism(): Int = {
conf.getInt("spark.default.parallelism", math.max(totalCoreCount.get(), 2))
}
// Called by subclasses when notified of a lost worker
def removeExecutor(executorId: String, reason: String) {
try {
val future = driverActor.ask(RemoveExecutor(executorId, reason))(timeout)
Await.ready(future, timeout)
} catch {
case e: Exception =>
throw new SparkException("Error notifying standalone scheduler's driver actor", e)
}
}
def sufficientResourcesRegistered(): Boolean = true
override def isReady(): Boolean = {
if (sufficientResourcesRegistered) {
logInfo("SchedulerBackend is ready for scheduling beginning after " +
s"reached minRegisteredResourcesRatio: $minRegisteredRatio")
return true
}
if ((System.currentTimeMillis() - createTime) >= maxRegisteredWaitingTime) {
logInfo("SchedulerBackend is ready for scheduling beginning after waiting " +
s"maxRegisteredResourcesWaitingTime: $maxRegisteredWaitingTime(ms)")
return true
}
false
}
/**
* Return the number of executors currently registered with this backend.
*/
def numExistingExecutors: Int = executorDataMap.size
/**
* Request an additional number of executors from the cluster manager.
*
* @return whether the request is acknowledged.
*/
final override def requestExecutors(numAdditionalExecutors: Int): Boolean = synchronized {
if (numAdditionalExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of additional executor(s) " +
s"$numAdditionalExecutors from the cluster manager. Please specify a positive number!")
}
logInfo(s"Requesting $numAdditionalExecutors additional executor(s) from the cluster manager")
logDebug(s"Number of pending executors is now $numPendingExecutors")
numPendingExecutors += numAdditionalExecutors
// Account for executors pending to be added or removed
val newTotal = numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size
doRequestTotalExecutors(newTotal)
}
/**
* Express a preference to the cluster manager for a given total number of executors. This can
* result in canceling pending requests or filing additional requests.
*
* @return whether the request is acknowledged.
*/
final override def requestTotalExecutors(numExecutors: Int): Boolean = synchronized {
if (numExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of executor(s) " +
s"$numExecutors from the cluster manager. Please specify a positive number!")
}
numPendingExecutors =
math.max(numExecutors - numExistingExecutors + executorsPendingToRemove.size, 0)
doRequestTotalExecutors(numExecutors)
}
/**
* Request executors from the cluster manager by specifying the total number desired,
* including existing pending and running executors.
*
* The semantics here guarantee that we do not over-allocate executors for this application,
* since a later request overrides the value of any prior request. The alternative interface
* of requesting a delta of executors risks double counting new executors when there are
* insufficient resources to satisfy the first request. We make the assumption here that the
* cluster manager will eventually fulfill all requests when resources free up.
*
* @return whether the request is acknowledged.
*/
protected def doRequestTotalExecutors(requestedTotal: Int): Boolean = false
/**
* Request that the cluster manager kill the specified executors.
* Return whether the kill request is acknowledged.
*/
final override def killExecutors(executorIds: Seq[String]): Boolean = synchronized {
logInfo(s"Requesting to kill executor(s) ${executorIds.mkString(", ")}")
val filteredExecutorIds = new ArrayBuffer[String]
executorIds.foreach { id =>
if (executorDataMap.contains(id)) {
filteredExecutorIds += id
} else {
logWarning(s"Executor to kill $id does not exist!")
}
}
// Killing executors means effectively that we want less executors than before, so also update
// the target number of executors to avoid having the backend allocate new ones.
val newTotal = (numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size
- filteredExecutorIds.size)
doRequestTotalExecutors(newTotal)
executorsPendingToRemove ++= filteredExecutorIds
doKillExecutors(filteredExecutorIds)
}
/**
* Kill the given list of executors through the cluster manager.
* Return whether the kill request is acknowledged.
*/
protected def doKillExecutors(executorIds: Seq[String]): Boolean = false
}
private[spark] object CoarseGrainedSchedulerBackend {
val ACTOR_NAME = "CoarseGrainedScheduler"
}
| Dax1n/spark-core | core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala | Scala | apache-2.0 | 19,512 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.net.URI
import scala.collection.mutable
import org.apache.hadoop.conf.Configuration
import org.json4s.JsonAST.JValue
import org.json4s.jackson.JsonMethods._
import org.apache.spark.{SPARK_VERSION, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.history.EventLogFileWriter
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.util.{JsonProtocol, Utils}
/**
* A SparkListener that logs events to persistent storage.
*
* Event logging is specified by the following configurable parameters:
* spark.eventLog.enabled - Whether event logging is enabled.
* spark.eventLog.dir - Path to the directory in which events are logged.
* spark.eventLog.logBlockUpdates.enabled - Whether to log block updates
* spark.eventLog.logStageExecutorMetrics.enabled - Whether to log stage executor metrics
*
* Event log file writer maintains its own parameters: refer the doc of [[EventLogFileWriter]]
* and its descendant for more details.
*/
private[spark] class EventLoggingListener(
appId: String,
appAttemptId : Option[String],
logBaseDir: URI,
sparkConf: SparkConf,
hadoopConf: Configuration)
extends SparkListener with Logging {
import EventLoggingListener._
def this(appId: String, appAttemptId : Option[String], logBaseDir: URI, sparkConf: SparkConf) =
this(appId, appAttemptId, logBaseDir, sparkConf,
SparkHadoopUtil.get.newConfiguration(sparkConf))
// For testing.
private[scheduler] val logWriter: EventLogFileWriter =
EventLogFileWriter(appId, appAttemptId, logBaseDir, sparkConf, hadoopConf)
// For testing. Keep track of all JSON serialized events that have been logged.
private[scheduler] val loggedEvents = new mutable.ArrayBuffer[JValue]
private val shouldLogBlockUpdates = sparkConf.get(EVENT_LOG_BLOCK_UPDATES)
private val shouldLogStageExecutorMetrics = sparkConf.get(EVENT_LOG_STAGE_EXECUTOR_METRICS)
private val testing = sparkConf.get(EVENT_LOG_TESTING)
// map of (stageId, stageAttempt) to executor metric peaks per executor/driver for the stage
private val liveStageExecutorMetrics =
mutable.HashMap.empty[(Int, Int), mutable.HashMap[String, ExecutorMetrics]]
/**
* Creates the log file in the configured log directory.
*/
def start(): Unit = {
logWriter.start()
initEventLog()
}
private def initEventLog(): Unit = {
val metadata = SparkListenerLogStart(SPARK_VERSION)
val eventJson = JsonProtocol.logStartToJson(metadata)
val metadataJson = compact(eventJson)
logWriter.writeEvent(metadataJson, flushLogger = true)
if (testing && loggedEvents != null) {
loggedEvents += eventJson
}
}
/** Log the event as JSON. */
private def logEvent(event: SparkListenerEvent, flushLogger: Boolean = false): Unit = {
val eventJson = JsonProtocol.sparkEventToJson(event)
logWriter.writeEvent(compact(render(eventJson)), flushLogger)
if (testing) {
loggedEvents += eventJson
}
}
// Events that do not trigger a flush
override def onStageSubmitted(event: SparkListenerStageSubmitted): Unit = {
logEvent(event)
if (shouldLogStageExecutorMetrics) {
// record the peak metrics for the new stage
liveStageExecutorMetrics.put((event.stageInfo.stageId, event.stageInfo.attemptNumber()),
mutable.HashMap.empty[String, ExecutorMetrics])
}
}
override def onTaskStart(event: SparkListenerTaskStart): Unit = logEvent(event)
override def onTaskGettingResult(event: SparkListenerTaskGettingResult): Unit = logEvent(event)
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = {
logEvent(event)
if (shouldLogStageExecutorMetrics) {
val stageKey = (event.stageId, event.stageAttemptId)
liveStageExecutorMetrics.get(stageKey).map { metricsPerExecutor =>
val metrics = metricsPerExecutor.getOrElseUpdate(
event.taskInfo.executorId, new ExecutorMetrics())
metrics.compareAndUpdatePeakValues(event.taskExecutorMetrics)
}
}
}
override def onEnvironmentUpdate(event: SparkListenerEnvironmentUpdate): Unit = {
logEvent(redactEvent(event))
}
// Events that trigger a flush
override def onStageCompleted(event: SparkListenerStageCompleted): Unit = {
if (shouldLogStageExecutorMetrics) {
// clear out any previous attempts, that did not have a stage completed event
val prevAttemptId = event.stageInfo.attemptNumber() - 1
for (attemptId <- 0 to prevAttemptId) {
liveStageExecutorMetrics.remove((event.stageInfo.stageId, attemptId))
}
// log the peak executor metrics for the stage, for each live executor,
// whether or not the executor is running tasks for the stage
val executorOpt = liveStageExecutorMetrics.remove(
(event.stageInfo.stageId, event.stageInfo.attemptNumber()))
executorOpt.foreach { execMap =>
execMap.foreach { case (executorId, peakExecutorMetrics) =>
logEvent(new SparkListenerStageExecutorMetrics(executorId, event.stageInfo.stageId,
event.stageInfo.attemptNumber(), peakExecutorMetrics))
}
}
}
// log stage completed event
logEvent(event, flushLogger = true)
}
override def onJobStart(event: SparkListenerJobStart): Unit = logEvent(event, flushLogger = true)
override def onJobEnd(event: SparkListenerJobEnd): Unit = logEvent(event, flushLogger = true)
override def onBlockManagerAdded(event: SparkListenerBlockManagerAdded): Unit = {
logEvent(event, flushLogger = true)
}
override def onBlockManagerRemoved(event: SparkListenerBlockManagerRemoved): Unit = {
logEvent(event, flushLogger = true)
}
override def onUnpersistRDD(event: SparkListenerUnpersistRDD): Unit = {
logEvent(event, flushLogger = true)
}
override def onApplicationStart(event: SparkListenerApplicationStart): Unit = {
logEvent(event, flushLogger = true)
}
override def onApplicationEnd(event: SparkListenerApplicationEnd): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorAdded(event: SparkListenerExecutorAdded): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorRemoved(event: SparkListenerExecutorRemoved): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorBlacklisted(event: SparkListenerExecutorBlacklisted): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorBlacklistedForStage(
event: SparkListenerExecutorBlacklistedForStage): Unit = {
logEvent(event, flushLogger = true)
}
override def onNodeBlacklistedForStage(event: SparkListenerNodeBlacklistedForStage): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorUnblacklisted(event: SparkListenerExecutorUnblacklisted): Unit = {
logEvent(event, flushLogger = true)
}
override def onNodeBlacklisted(event: SparkListenerNodeBlacklisted): Unit = {
logEvent(event, flushLogger = true)
}
override def onNodeUnblacklisted(event: SparkListenerNodeUnblacklisted): Unit = {
logEvent(event, flushLogger = true)
}
override def onBlockUpdated(event: SparkListenerBlockUpdated): Unit = {
if (shouldLogBlockUpdates) {
logEvent(event, flushLogger = true)
}
}
override def onExecutorMetricsUpdate(event: SparkListenerExecutorMetricsUpdate): Unit = {
if (shouldLogStageExecutorMetrics) {
event.executorUpdates.foreach { case (stageKey1, newPeaks) =>
liveStageExecutorMetrics.foreach { case (stageKey2, metricsPerExecutor) =>
// If the update came from the driver, stageKey1 will be the dummy key (-1, -1),
// so record those peaks for all active stages.
// Otherwise, record the peaks for the matching stage.
if (stageKey1 == DRIVER_STAGE_KEY || stageKey1 == stageKey2) {
val metrics = metricsPerExecutor.getOrElseUpdate(
event.execId, new ExecutorMetrics())
metrics.compareAndUpdatePeakValues(newPeaks)
}
}
}
}
}
override def onOtherEvent(event: SparkListenerEvent): Unit = {
if (event.logEvent) {
logEvent(event, flushLogger = true)
}
}
/** Stop logging events. */
def stop(): Unit = {
logWriter.stop()
}
private[spark] def redactEvent(
event: SparkListenerEnvironmentUpdate): SparkListenerEnvironmentUpdate = {
// environmentDetails maps a string descriptor to a set of properties
// Similar to:
// "JVM Information" -> jvmInformation,
// "Spark Properties" -> sparkProperties,
// ...
// where jvmInformation, sparkProperties, etc. are sequence of tuples.
// We go through the various of properties and redact sensitive information from them.
val redactedProps = event.environmentDetails.map{ case (name, props) =>
name -> Utils.redact(sparkConf, props)
}
SparkListenerEnvironmentUpdate(redactedProps)
}
}
private[spark] object EventLoggingListener extends Logging {
val DEFAULT_LOG_DIR = "/tmp/spark-events"
// Dummy stage key used by driver in executor metrics updates
val DRIVER_STAGE_KEY = (-1, -1)
}
| caneGuy/spark | core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala | Scala | apache-2.0 | 10,099 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.adt
import scala.language.implicitConversions
import java.time.format.DateTimeFormatter
import java.time.{ZonedDateTime,ZoneOffset}
import com.krux.hyperion.expression._
import com.krux.hyperion.common.{HdfsUri, S3Uri, OptionalOrdered}
sealed abstract class HType {
def value: Either[Any, TypedExpression]
lazy val serialize: String = value match {
case Left(v) => v.toString
case Right(r) => r.serialize
}
override def toString = serialize
}
object HType {
implicit def string2HString(value: String): HString = HString(Left(value))
implicit def stringOption2HStringOption(value: Option[String]): Option[HString] =
value.map(v => HString(Left(v)))
implicit def stringExp2HString(value: StringExp): HString = HString(Right(value))
implicit def int2HInt(value: Int): HInt = HInt(Left(value))
implicit def intExp2HInt(value: IntExp): HInt = HInt(Right(value))
implicit def double2HDouble(value: Double): HDouble = HDouble(Left(value))
implicit def doubleExp2HDouble(value: DoubleExp): HDouble = HDouble(Right(value))
implicit def boolean2HBoolean(value: Boolean): HBoolean = HBoolean(Left(value))
implicit def booleanExp2HBoolean(value: BooleanExp): HBoolean = HBoolean(Right(value))
implicit def dateTime2HDateTime(value: ZonedDateTime): HDateTime = HDateTime(Left(value))
implicit def dateTimeExp2HDateTime(value: DateTimeExp): HDateTime = HDateTime(Right(value))
implicit def duration2HDuration(value: Duration): HDuration = HDuration(Left(value))
implicit def durationExp2HDuration(value: DurationExp): HDuration = HDuration(Right(value))
implicit def s3Uri2HS3Uri(value: S3Uri): HS3Uri = HS3Uri(Left(value))
implicit def s3UriExp2HS3Uri(value: S3UriExp): HS3Uri = HS3Uri(Right(value))
implicit def hdfsUri2HHdfsUri(value: HdfsUri): HHdfsUri = HHdfsUri(Left(value))
implicit def hdfsUriExp2HHdfsUri(value: HdfsUriExp): HHdfsUri = HHdfsUri(Right(value))
implicit def long2HLong(value: Long): HLong = HLong(Left(value))
implicit def longExp2HLong(value: LongExp): HLong = HLong(Right(value))
}
case class HString(value: Either[String, StringExp]) extends HType
case class HInt(value: Either[Int, IntExp]) extends HType with OptionalOrdered[Int] {
def isZero: Option[Boolean] = value match {
case Left(v) => Option(v == 0)
case _ => None
}
def compare(that: Int): Option[Int] = value match {
case Left(v) => Some(v - that)
case Right(v) =>
v match {
case x: Evaluatable[_] => Some(x.evaluate().asInstanceOf[Int] - that)
case _ => None
}
}
def + (that: HInt): HInt = this.value match {
case Left(i) => that.value match {
case Left(j) => HInt(Left(i + j))
case Right(j) => HInt(Right(IntConstantExp(i) + j))
}
case Right(i) => that.value match {
case Left(j) => HInt(Right(i + IntConstantExp(j)))
case Right(j) => HInt(Right(i + j))
}
}
}
case class HLong(value: Either[Long, LongExp]) extends HType with OptionalOrdered[Long] {
def compare(that: Long): Option[Int] = value match {
case Left(v) => Some(java.lang.Long.compare(v, that))
case Right(v) =>
v match {
case x: Evaluatable[_] =>
Some(java.lang.Long.compare(x.evaluate().asInstanceOf[Long], that))
case _ =>
None
}
}
}
case class HDouble(value: Either[Double, DoubleExp]) extends HType with OptionalOrdered[Double] {
def compare(that: Double): Option[Int] = value match {
case Left(v) => Some(java.lang.Double.compare(v, that))
case Right(v) =>
v match {
case x: Evaluatable[_] =>
Some(java.lang.Double.compare(x.evaluate().asInstanceOf[Double], that))
case _ =>
None
}
}
}
case class HBoolean(value: Either[Boolean, BooleanExp]) extends HType {
def exists[B](fn: => B) = value match {
case Left(true) => Option(fn)
case _ => None
}
}
object HBoolean {
final val True = HBoolean(Left(true))
final val False = HBoolean(Left(false))
implicit def hboolean2Boolean(b: HBoolean): Boolean = b.value match {
case Left(v) => v
case Right(v) => v.evaluate()
}
}
case class HDateTime(value: Either[ZonedDateTime, DateTimeExp]) extends HType {
val datetimeFormat = DateTimeFormatter.ofPattern( "yyyy-MM-dd'T'HH:mm:ss")
override lazy val serialize: String = value match {
case Left(dt) => dt.withZoneSameLocal(ZoneOffset.UTC).format(datetimeFormat)
case Right(expr) => expr.toString
}
}
object HDateTime {
implicit def hDateTime2DateTimeExp(dt: HDateTime): DateTimeExp = dt.value match {
case Left(x) => DateTimeConstantExp(x)
case Right(x) => x
}
}
case class HDuration(value: Either[Duration, DurationExp]) extends HType
case class HS3Uri(value: Either[S3Uri, S3UriExp]) extends HType
case class HHdfsUri(value: Either[HdfsUri, HdfsUriExp]) extends HType
| realstraw/hyperion | core/src/main/scala/com/krux/hyperion/adt/HType.scala | Scala | bsd-3-clause | 5,127 |
package se.gigurra.leavu3.util
import com.twitter.util.{Duration, JavaTimer, Timer}
/**
* Created by kjolh on 3/27/2016.
*/
object DefaultTimer extends UtilTimer
case class UtilTimer(underlying: Timer = new JavaTimer(isDaemon = true)) {
def fps(fps: Int)(op: => Unit): Unit = {
require(fps > 0, "Must run with at least 1 fps!")
underlying.schedule(Duration.fromMilliseconds(1000 / fps))(op)
}
def onceAfter(delay: Duration)(f: => Unit) = {
underlying.doLater(delay)(f)
}
} | GiGurra/leavu3 | src/main/scala/se/gigurra/leavu3/util/UtilTimer.scala | Scala | mit | 501 |
package org.http4s
import cats.effect.IO
import fs2.Stream
import fs2.text.utf8Encode
import org.http4s.headers._
class ServerSentEventSpec extends Http4sSpec {
import ServerSentEvent._
def toStream(s: String): Stream[IO, Byte] =
Stream.emit(s).through(utf8Encode)
"decode" should {
"decode multi-line messages" in {
val stream = toStream("""
|data: YHOO
|data: +2
|data: 10
|""".stripMargin('|'))
stream.through(ServerSentEvent.decoder).compile.toVector.unsafeRunSync must_== Vector(
ServerSentEvent(data = "YHOO\n+2\n10")
)
}
"decode test stream" in {
val stream = toStream("""
|: test stream
|data: first event
|id: 1
|
|data:second event
|id
|
|data: third event
|""".stripMargin('|'))
//test stream\n\ndata: first event\nid: 1\n\ndata:second event\nid\n\ndata: third event\n")
stream.through(ServerSentEvent.decoder).compile.toVector.unsafeRunSync must_== Vector(
ServerSentEvent(data = "first event", id = Some(EventId("1"))),
ServerSentEvent(data = "second event", id = Some(EventId.reset)),
ServerSentEvent(data = " third event", id = None)
)
}
"fire empty events" in {
val stream = toStream("""
|data
|
|data
|data
|
|data:
|""".stripMargin('|'))
//test stream\n\ndata: first event\nid: 1\n\ndata:second event\nid\n\ndata: third event\n")
stream.through(ServerSentEvent.decoder).compile.toVector.unsafeRunSync must_== Vector(
ServerSentEvent(data = ""),
ServerSentEvent(data = "\n"),
ServerSentEvent(data = "")
)
}
"ignore single space after colon" in {
val stream = toStream("""
|data:test
|
|data: test
|""".stripMargin('|'))
//test stream\n\ndata: first event\nid: 1\n\ndata:second event\nid\n\ndata: third event\n")
stream.through(ServerSentEvent.decoder).compile.toVector.unsafeRunSync must_== Vector(
ServerSentEvent(data = "test"),
ServerSentEvent(data = "test")
)
}
}
"encode" should {
"be consistent with decode" in prop { sses: Vector[ServerSentEvent] =>
val roundTrip = Stream
.emits(sses)
.covary[IO]
.through(ServerSentEvent.encoder)
.through(ServerSentEvent.decoder)
.compile
.toVector
.unsafeRunSync
roundTrip must_== sses
}
"handle leading spaces" in {
// This is a pathological case uncovered by scalacheck
val sse = ServerSentEvent(" a", Some(" b"), Some(EventId(" c")), Some(1L))
Stream
.emit(sse)
.covary[IO]
.through(ServerSentEvent.encoder)
.through(ServerSentEvent.decoder)
.compile
.last
.unsafeRunSync must beSome(sse)
}
}
"EntityEncoder[ServerSentEvent]" should {
val eventStream: Stream[IO, ServerSentEvent] =
Stream.range(0, 5).map(i => ServerSentEvent(data = i.toString))
"set Content-Type to text/event-stream" in {
Response[IO]().withBody(eventStream).unsafeRunSync.contentType must beSome(
`Content-Type`(MediaType.`text/event-stream`))
}
"decode to original event stream" in {
val resp = Response[IO]().withBody(eventStream).unsafeRunSync
resp.body
.through(ServerSentEvent.decoder)
.compile
.toVector
.unsafeRunSync must_== eventStream.compile.toVector.unsafeRunSync
}
}
}
| reactormonk/http4s | tests/src/test/scala/org/http4s/ServerSentEventSpec.scala | Scala | apache-2.0 | 3,531 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.controller
import org.apache.predictionio.annotation.DeveloperApi
import org.apache.predictionio.core.BaseAlgorithm
import org.apache.predictionio.workflow.PersistentModelManifest
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
/** Base class of a parallel algorithm.
*
* A parallel algorithm can be run in parallel on a cluster and produces a
* model that can also be distributed across a cluster.
*
* If your input query class requires custom JSON4S serialization, the most
* idiomatic way is to implement a trait that extends [[CustomQuerySerializer]],
* and mix that into your algorithm class, instead of overriding
* [[querySerializer]] directly.
*
* To provide evaluation feature, one must override and implement the
* [[batchPredict]] method. Otherwise, an exception will be thrown when pio eval`
* is used.
*
* @tparam PD Prepared data class.
* @tparam M Trained model class.
* @tparam Q Input query class.
* @tparam P Output prediction class.
* @group Algorithm
*/
abstract class PAlgorithm[PD, M, Q, P]
extends BaseAlgorithm[PD, M, Q, P] {
def trainBase(sc: SparkContext, pd: PD): M = train(sc, pd)
/** Implement this method to produce a model from prepared data.
*
* @param pd Prepared data for model training.
* @return Trained model.
*/
def train(sc: SparkContext, pd: PD): M
def batchPredictBase(sc: SparkContext, bm: Any, qs: RDD[(Long, Q)])
: RDD[(Long, P)] = batchPredict(bm.asInstanceOf[M], qs)
/** To provide evaluation feature, one must override and implement this method
* to generate many predictions in batch. Otherwise, an exception will be
* thrown when `pio eval` is used.
*
* The default implementation throws an exception.
*
* @param m Trained model produced by [[train]].
* @param qs An RDD of index-query tuples. The index is used to keep track of
* predicted results with corresponding queries.
*/
def batchPredict(m: M, qs: RDD[(Long, Q)]): RDD[(Long, P)] =
throw new NotImplementedError("batchPredict not implemented")
def predictBase(baseModel: Any, query: Q): P = {
predict(baseModel.asInstanceOf[M], query)
}
/** Implement this method to produce a prediction from a query and trained
* model.
*
* @param model Trained model produced by [[train]].
* @param query An input query.
* @return A prediction.
*/
def predict(model: M, query: Q): P
/** :: DeveloperApi ::
* Engine developers should not use this directly (read on to see how parallel
* algorithm models are persisted).
*
* In general, parallel models may contain multiple RDDs. It is not easy to
* infer and persist them programmatically since these RDDs may be
* potentially huge. To persist these models, engine developers need to mix
* the [[PersistentModel]] trait into the model class and implement
* [[PersistentModel.save]]. If it returns true, a
* [[org.apache.predictionio.workflow.PersistentModelManifest]] will be
* returned so that during deployment, PredictionIO will use
* [[PersistentModelLoader]] to retrieve the model. Otherwise, Unit will be
* returned and the model will be re-trained on-the-fly.
*
* @param sc Spark context
* @param modelId Model ID
* @param algoParams Algorithm parameters that trained this model
* @param bm Model
* @return The model itself for automatic persistence, an instance of
* [[org.apache.predictionio.workflow.PersistentModelManifest]] for manual
* persistence, or Unit for re-training on deployment
*/
@DeveloperApi
override
def makePersistentModel(
sc: SparkContext,
modelId: String,
algoParams: Params,
bm: Any): Any = {
val m = bm.asInstanceOf[M]
if (m.isInstanceOf[PersistentModel[_]]) {
if (m.asInstanceOf[PersistentModel[Params]].save(
modelId, algoParams, sc)) {
PersistentModelManifest(className = m.getClass.getName)
} else {
Unit
}
} else {
Unit
}
}
}
| pferrel/PredictionIO | core/src/main/scala/org/apache/predictionio/controller/PAlgorithm.scala | Scala | apache-2.0 | 4,917 |
object SCL8317 {
def foo(x: Boolean): Int = 1
def foo(s: String): String = null
implicit def u(u: Unit): Boolean = false
/*start*/foo()/*end*/
}
//Int | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL8317.scala | Scala | apache-2.0 | 161 |
package com.softwaremill.thegarden.mongodbtest
import org.scalatest.{ShouldMatchers, FlatSpec}
import com.mongodb.casbah.Imports._
import com.mongodb.DBCollection
class FongoSupportSpec extends FlatSpec with ShouldMatchers {
class TestClass(clearData : Boolean = false, afterAllBlock : (DBCollection) => Unit = _ => {}) extends FlatSpec with FongoSupport {
override val clearDataBeforeEachTest: Boolean = clearData
it should "run one test" in {
import com.mongodb.casbah.Imports._
val coll = fongo.getDB(dbName).getCollection("test_coll")
coll.save(DBObject("foo" -> "bar"))
}
it should "run another test" in {
val coll = fongo.getDB(dbName).getCollection("test_coll")
coll.save(DBObject("foo2" -> "bar2"))
}
override protected def afterAll() = {
afterAllBlock(fongo.getDB(dbName).getCollection("test_coll"))
super.afterAll()
}
}
it should "not clear data before every test if not asked to explicitly" in {
var collectionCountAfterExecute: Long = -1
new TestClass(false, {coll =>
collectionCountAfterExecute = coll.count()
}).execute
collectionCountAfterExecute should equal(2)
}
it should "clear data before every test if that's explicitly set" in {
var collectionCountAfterExecute: Long = -1
new TestClass(true, {coll =>
collectionCountAfterExecute = coll.count()
}).execute
collectionCountAfterExecute should equal(1)
}
}
| maciej/the-garden | mongodb-test/src/test/scala/com/softwaremill/thegarden/mongodbtest/FongoSupportSpec.scala | Scala | mit | 1,467 |
import scala.annotation.tailrec
@tailrec inline def foo() = ??? // error
| dotty-staging/dotty | tests/neg/i10970.scala | Scala | apache-2.0 | 74 |
/*
* Copyright (c) 2012 Orderly Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
import sbt._
import Keys._
object BuildSettings {
// Basic settings for our app
lazy val basicSettings = Seq[Setting[_]](
organization := "Orderly Ltd",
version := "0.1",
description := "A command-line tool for monitoring competitors' prices on Amazon Marketplace",
scalaVersion := "2.9.1",
scalacOptions := Seq("-deprecation", "-encoding", "utf8"),
resolvers ++= Dependencies.resolutionRepos
)
// Makes our SBT app settings available from within the app
lazy val scalifySettings = Seq(sourceGenerators in Compile <+= (sourceManaged in Compile, version, name, organization) map { (d, v, n, o) =>
val file = d / "settings.scala"
IO.write(file, """package co.orderly.dutch.generated
|object Settings {
| val organization = "%s"
| val version = "%s"
| val name = "%s"
|}
|""".stripMargin.format(o, v, n))
Seq(file)
})
// Proguard settings for packaging
import ProguardPlugin._
lazy val proguard = proguardSettings ++ Seq(
proguardOptions := Seq(
"-keepattributes *Annotation*,EnclosingMethod",
"-dontskipnonpubliclibraryclassmembers",
"-dontoptimize",
"-dontshrink"
)
)
lazy val dutchSettings = basicSettings ++ scalifySettings ++ proguard
}
| keplar/dutch | project/BuildSettings.scala | Scala | apache-2.0 | 1,976 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.text
import java.io.File
import org.apache.spark.SparkConf
import org.apache.spark.sql.{QueryTest, Row}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{StringType, StructType}
abstract class WholeTextFileSuite extends QueryTest with SharedSparkSession {
// Hadoop's FileSystem caching does not use the Configuration as part of its cache key, which
// can cause Filesystem.get(Configuration) to return a cached instance created with a different
// configuration than the one passed to get() (see HADOOP-8490 for more details). This caused
// hard-to-reproduce test failures, since any suites that were run after this one would inherit
// the new value of "fs.local.block.size" (see SPARK-5227 and SPARK-5679). To work around this,
// we disable FileSystem caching in this suite.
protected override def sparkConf =
super.sparkConf.set("spark.hadoop.fs.file.impl.disable.cache", "true")
ignore("reading text file with option wholetext=true") {
val df = spark.read.option("wholetext", "true")
.format("text")
.load(testFile("test-data/text-suite.txt"))
// schema
assert(df.schema == new StructType().add("value", StringType))
// verify content
val data = df.collect()
assert(data(0) ==
Row(
// scalastyle:off nonascii
"""This is a test file for the text data source
|1+1
|数据砖头
|"doh"
|""".stripMargin))
// scalastyle:on nonascii
assert(data.length == 1)
}
ignore("correctness of wholetext option") {
import org.apache.spark.sql.catalyst.util._
withTempDir { dir =>
val file1 = new File(dir, "text1.txt")
stringToFile(file1,
"""text file 1 contents.
|From: None to: ??
""".stripMargin)
val file2 = new File(dir, "text2.txt")
stringToFile(file2, "text file 2 contents.")
val file3 = new File(dir, "text3.txt")
stringToFile(file3, "text file 3 contents.")
val df = spark.read.option("wholetext", "true").text(dir.getAbsolutePath)
// Since wholetext option reads each file into a single row, df.length should be no. of files.
val data = df.sort("value").collect()
assert(data.length == 3)
// Each files should represent a single Row/element in Dataframe/Dataset
assert(data(0) == Row(
"""text file 1 contents.
|From: None to: ??
""".stripMargin))
assert(data(1) == Row(
"""text file 2 contents.""".stripMargin))
assert(data(2) == Row(
"""text file 3 contents.""".stripMargin))
}
}
ignore("Correctness of wholetext option with gzip compression mode.") {
withTempDir { dir =>
val path = dir.getCanonicalPath
val df1 = spark.range(0, 1000).selectExpr("CAST(id AS STRING) AS s").repartition(1)
df1.write.option("compression", "gzip").mode("overwrite").text(path)
// On reading through wholetext mode, one file will be read as a single row, i.e. not
// delimited by "next line" character.
val expected = Row(df1.collect().map(_.getString(0)).mkString("", "\\n", "\\n"))
Seq(10, 100, 1000).foreach { bytes =>
withSQLConf(SQLConf.FILES_MAX_PARTITION_BYTES.key -> bytes.toString) {
val df2 = spark.read.option("wholetext", "true").format("text").load(path)
val result = df2.collect().head
assert(result === expected)
}
}
}
}
}
class WholeTextFileV1Suite extends WholeTextFileSuite {
override protected def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set(SQLConf.USE_V1_SOURCE_LIST, "text")
}
class WholeTextFileV2Suite extends WholeTextFileSuite {
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
.set(SQLConf.USE_V1_SOURCE_LIST, "")
}
| Intel-bigdata/OAP | oap-native-sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/WholeTextFileSuite.scala | Scala | apache-2.0 | 6,603 |
package ca.dubey.music.theory
/** Represent a note's value (duration) */
case class NoteValue(val ticksPerQuarterNote : Int) {
/** Quaiver */
val eighthNote = ticksPerQuarterNote / 2
/** Crochet */
val quarterNote = ticksPerQuarterNote
/** Dotted Crochet */
val dottedQuarterNote = ticksPerQuarterNote + eighthNote
/** Minim */
val halfNote = ticksPerQuarterNote * 2
/** Dotted Minim */
val dottedHalfNote = ticksPerQuarterNote * 3
/** Semibreve */
val wholeNote = ticksPerQuarterNote * 4
val values = Array(
eighthNote,
quarterNote,
dottedQuarterNote,
halfNote,
dottedHalfNote,
wholeNote)
}
| adubey/music | src/main/scala/theory/NoteValue.scala | Scala | gpl-2.0 | 663 |
package lila.game
import chess.Color
import lila.common.{ SecureRandom, ThreadLocalRandom }
import lila.db.dsl._
final class IdGenerator(gameRepo: GameRepo)(implicit ec: scala.concurrent.ExecutionContext) {
import IdGenerator._
def game: Fu[Game.ID] = {
val id = uncheckedGame
gameRepo.exists(id).flatMap {
case true => game
case false => fuccess(id)
}
}
def games(nb: Int): Fu[Set[Game.ID]] =
if (nb < 1) fuccess(Set.empty)
else if (nb == 1) game.dmap(Set(_))
else if (nb < 5) Set.fill(nb)(game).sequenceFu
else {
val ids = Set.fill(nb)(uncheckedGame)
gameRepo.coll.distinctEasy[Game.ID, Set]("_id", $inIds(ids)) flatMap { collisions =>
games(collisions.size) dmap { _ ++ (ids diff collisions) }
}
}
}
object IdGenerator {
private[this] val whiteSuffixChars = ('0' to '4') ++ ('A' to 'Z') mkString
private[this] val blackSuffixChars = ('5' to '9') ++ ('a' to 'z') mkString
def uncheckedGame: Game.ID = ThreadLocalRandom nextString Game.gameIdSize
def player(color: Color): Player.ID = {
// Trick to avoid collisions between player ids in the same game.
val suffixChars = color.fold(whiteSuffixChars, blackSuffixChars)
val suffix = suffixChars(SecureRandom nextInt suffixChars.length)
SecureRandom.nextString(Game.playerIdSize - 1) + suffix
}
}
| luanlv/lila | modules/game/src/main/IdGenerator.scala | Scala | mit | 1,365 |
package io.flow.dependency.actors
import io.flow.dependency.api.lib._
import io.flow.dependency.v0.models._
import io.flow.dependency.v0.models.json._
import io.flow.postgresql.{OrderBy, Pager}
import db._
import akka.actor.Actor
import cache.ProjectsCache
import io.flow.akka.SafeReceive
import io.flow.log.RollbarLogger
import play.api.libs.json.Json
object ProjectActor {
trait Message
object Messages {
case class Delete(projectId: String) extends Message
case class SyncCompleted(projectId: String) extends Message
case class ProjectLibraryCreated(projectId: String, id: String) extends Message
case class ProjectLibrarySync(projectId: String, id: String) extends Message
case class ProjectLibraryDeleted(projectId: String, id: String, version: String) extends Message
case class ProjectBinaryCreated(projectId: String, id: String) extends Message
case class ProjectBinarySync(projectId: String, id: String) extends Message
case class ProjectBinaryDeleted(projectId: String, id: String, version: String) extends Message
case class LibrarySynced(projectId: String) extends Message
case class BinarySynced(projectId: String) extends Message
}
}
class ProjectActor @javax.inject.Inject() (
rollbar: RollbarLogger,
projectsCache: ProjectsCache,
syncsDao: SyncsDao,
projectBinariesDao:ProjectBinariesDao,
projectLibrariesDao: InternalProjectLibrariesDao,
recommendationsDao: RecommendationsDao,
librariesDao: LibrariesDao,
binariesDao: BinariesDao,
staticUserProvider: StaticUserProvider,
defaultLibraryArtifactProvider: DefaultLibraryArtifactProvider,
) extends Actor {
private[this] lazy val SystemUser = staticUserProvider.systemUser
private[this] implicit val logger: RollbarLogger = rollbar.fingerprint(getClass.getName)
def receive: Receive = SafeReceive.withLogUnhandled {
case ProjectActor.Messages.ProjectLibraryCreated(projectId, id) =>
syncProjectLibrary(projectId, id)
case ProjectActor.Messages.ProjectLibrarySync(projectId, id) =>
syncProjectLibrary(projectId, id)
case ProjectActor.Messages.ProjectBinaryCreated(projectId, id) =>
syncProjectBinary(projectId, id)
case ProjectActor.Messages.ProjectBinarySync(projectId, id) =>
syncProjectBinary(projectId, id)
case ProjectActor.Messages.LibrarySynced(projectId) =>
findProject(projectId).foreach { project =>
processPendingSync(project)
}
case ProjectActor.Messages.BinarySynced(projectId) =>
findProject(projectId).foreach { project =>
processPendingSync(project)
}
case ProjectActor.Messages.Delete(projectId: String) =>
Pager.create { offset =>
recommendationsDao.findAll(Authorization.All, projectId = Some(projectId), offset = offset)
}.foreach { rec =>
recommendationsDao.delete(SystemUser, rec)
}
case ProjectActor.Messages.ProjectLibraryDeleted(projectId, id, version) =>
findProject(projectId).foreach { project =>
recommendationsDao.findAll(
Authorization.All,
projectId = Some(project.id),
`type` = Some(RecommendationType.Library),
objectId = Some(id),
fromVersion = Some(version)
).foreach { rec =>
recommendationsDao.delete(SystemUser, rec)
}
processPendingSync(project)
}
case ProjectActor.Messages.ProjectBinaryDeleted(projectId, id, version) =>
findProject(projectId).foreach { project =>
recommendationsDao.findAll(
Authorization.All,
projectId = Some(project.id),
`type` = Some(RecommendationType.Binary),
objectId = Some(id),
fromVersion = Some(version)
).foreach { rec =>
recommendationsDao.delete(SystemUser, rec)
}
processPendingSync(project)
}
}
/**
* Attempts to resolve the library. If successful, sets the
* project_libraries.library_id
*/
def syncProjectLibrary(projectId: String, id: String): Unit = {
syncsDao.withStartedAndCompleted("project_library", id) {
findProject(projectId).foreach { project =>
projectLibrariesDao.findById(Authorization.All, id).map { projectLibrary =>
resolveLibrary(projectLibrary).map { lib =>
projectLibrariesDao.setLibrary(SystemUser, projectLibrary, lib)
}
}
processPendingSync(project)
}
}
}
def syncProjectBinary(projectId: String, id: String): Unit = {
syncsDao.withStartedAndCompleted("project_binary", id) {
findProject(projectId).foreach { project =>
projectBinariesDao.findById(Authorization.All, id).map { projectBinary =>
resolveBinary(projectBinary).map { binary =>
projectBinariesDao.setBinary(SystemUser, projectBinary, binary)
}
}
processPendingSync(project)
}
}
}
def processPendingSync(project: Project): Unit = {
dependenciesPendingCompletion(project) match {
case Nil => {
recommendationsDao.sync(SystemUser, project)
syncsDao.recordCompleted("project", project.id)
}
case _ => {
// println(s" -- project[${project.name}] id[${project.id}] waiting on dependencies to sync: " + deps.mkString(", "))
}
}
}
// NB: We don't return ALL dependencies
private[this] def dependenciesPendingCompletion(project: Project): Seq[String] = {
projectLibrariesDao.findAll(
Authorization.All,
projectId = Some(project.id),
isSynced = Some(false),
limit = None,
orderBy = Some(OrderBy("group_id,artifact_id")),
).map( lib => s"Library ${lib.groupId}.${lib.artifactId}" ) ++
projectBinariesDao.findAll(
Authorization.All,
projectId = Some(project.id),
isSynced = Some(false)
).map( bin => s"Binary ${bin.name}" )
}
private[this] def resolveLibrary(projectLibrary: InternalProjectLibrary): Option[Library] = {
librariesDao.findByGroupIdAndArtifactId(Authorization.All, projectLibrary.groupId, projectLibrary.artifactId) match {
case Some(lib) => {
Some(lib)
}
case None => {
defaultLibraryArtifactProvider.resolve(
organizationId = projectLibrary.organizationId,
groupId = projectLibrary.groupId,
artifactId = projectLibrary.artifactId
) match {
case None => {
None
}
case Some(resolution) => {
librariesDao.upsert(
SystemUser,
form = LibraryForm(
organizationId = projectLibrary.organizationId,
groupId = projectLibrary.groupId,
artifactId = projectLibrary.artifactId,
resolverId = resolution.resolver.id
)
) match {
case Left(errors) => {
logger
.organization(projectLibrary.organizationId)
.withKeyValue("project_id", projectLibrary.projectId)
.withKeyValue("errors", errors).error("Error upserting library")
None
}
case Right(library) => {
Some(library)
}
}
}
}
}
}
}
private[this] def resolveBinary(projectBinary: ProjectBinary): Option[Binary] = {
BinaryType(projectBinary.name) match {
case BinaryType.Scala | BinaryType.Sbt => {
binariesDao.upsert(
SystemUser,
BinaryForm(
organizationId = projectBinary.project.organization.id,
name = BinaryType(projectBinary.name)
)
) match {
case Left(errors) => {
logger.withKeyValue("project", Json.toJson(projectBinary)).withKeyValue("errors", errors).error(s"error upserting binary")
None
}
case Right(binary) => {
Some(binary)
}
}
}
case BinaryType.UNDEFINED(_) => {
logger.withKeyValue("project", Json.toJson(projectBinary)).warn(s"Project references an unknown binary")
None
}
}
}
private[this] def findProject(id: String): Option[Project] = {
projectsCache.findByProjectId(id)
}
}
| flowcommerce/dependency | api/app/actors/ProjectActor.scala | Scala | mit | 8,269 |
package com.twitter.inject.thrift.integration.reqrepserviceperendpoint
import com.twitter.finagle.Filter
import com.twitter.finagle.Service
import com.twitter.greeter.thriftscala.Greeter.Hello
import com.twitter.scrooge
import com.twitter.util.Future
import com.twitter.util.logging.Logging
import javax.inject.Singleton
@Singleton
class HelloFilter
extends Filter[scrooge.Request[Hello.Args], scrooge.Response[
Hello.SuccessType
], scrooge.Request[Hello.Args], scrooge.Response[Hello.SuccessType]]
with Logging {
def apply(
request: scrooge.Request[Hello.Args],
service: Service[scrooge.Request[Hello.Args], scrooge.Response[Hello.SuccessType]]
): Future[scrooge.Response[Hello.SuccessType]] = {
info("Hello called with name " + request.args.name)
service(request).onSuccess { response =>
info(response)
}
}
}
| twitter/finatra | inject/inject-thrift-client/src/test/scala/com/twitter/inject/thrift/integration/reqrepserviceperendpoint/HelloFilter.scala | Scala | apache-2.0 | 863 |
trait LeibnizLiskov {
type A // instead of picking some concrete type, use a totally unknown, abstract one
type B
type SA <: A
type SB >: B
implicitly[A =:= B]
implicitly[B =:= A]
def aEqB: A =:= B
implicitly[A <:< SA]
implicitly[SB <:< B]
implicitly[SA <:< B]
implicitly[A <:< SB]
def A(): A
def B(): B
aEqB.substituteCo (List(B(), B(), B()))
aEqB.substituteContra(List(A(), A(), A()))
locally { val xs = aEqB.flip.liftCo[List](List(B(), B(), B())); implicitly[xs.type <:< List[B]] }
def convert1[T, U](l: List[T])(ev: T =:= U): List[U] = ev.substituteContra(l)
def convert2[T, U](l: List[U])(ev: T =:= U): List[T] = ev.substituteCo(l)
implicitly[A <:< A]
implicitly[B <:< B]
val aSubB: A <:< B = aEqB
val bSubA: B <:< A = aEqB.flip
type From[X] = { type L[+Y] = X => Y }
type To [X] = { type L[-Y] = Y => X }
locally { val f = bSubA.substituteCo [To [A]#L](aSubB(_)); implicitly[f.type <:< (A => A)] }
locally { val f = aSubB.substituteContra[From[A]#L](bSubA(_)); implicitly[f.type <:< (A => A)] }
def convertSub[T, U](l: List[T])(ev: T <:< U): List[U] = ev.liftContra[List](l)
type Consumes[-X] = X => Unit
def convertConsume1[U, T](c: Consumes[T])(ev: U <:< T): Consumes[U] = ev.liftCo[Consumes](c)
def convertConsume2[U, T](c: Consumes[T])(ev: U <:< T): Consumes[U] = ev.substituteCo(c)
}
| scala/scala | test/files/neg/leibniz-liskov.scala | Scala | apache-2.0 | 1,367 |
package rere.sasl.scram.client
import org.scalamock.scalatest.MockFactory
import org.scalatest.{Matchers, WordSpec}
import rere.sasl.scram.cache.SaltedPasswordCache
import rere.sasl.scram.crypto.ScramAuthMechanism
import rere.sasl.scram.crypto.entropy.EntropySource
class SCRAMClientTest extends WordSpec with Matchers with MockFactory {
"SCRAMClient object" should {
"create instance of scram client using `apply` method" in {
val authMechanism = mock[ScramAuthMechanism]
val entropySource = mock[EntropySource]
val cache = mock[SaltedPasswordCache]
SCRAMClient(authMechanism, entropySource, cache) shouldBe an[ClientFirstStep]
}
}
}
| pbaun/rere | modules/sasl/src/test/scala/rere/sasl/scram/client/SCRAMClientTest.scala | Scala | apache-2.0 | 676 |
package jp.kenichi.pdf
import java.io.InputStream
import java.util.zip.Inflater
/** 7.4 Filters of PDF32000_2008.pdf */
trait PdfFilter {
def decode(in: InputStream): InputStream
}
object PdfFilter {
def get(name: String) = name match {
case "FlateDecode" => flateDecoder
}
lazy val flateDecoder = new PdfFilter {
// JDK-8: InflaterInputStream throws EOFException when there is no Z_STREAM_END, which is not required
// https://issues.apache.org/jira/browse/PDFBOX-1232
def decode(in: InputStream) = new InputStream {
protected val inflater = new Inflater
import inflater.{inflate, needsInput, setInput, finished}
protected val encoded = new Array[Byte](1024)
protected val decoded = new Array[Byte](1024)
protected var decodedLen = 0
protected var decodedOffset = 0
override def read: Int = {
if (decodedOffset == decodedLen) {
if (finished)
return -1
if (needsInput) {
// the passed input won't be copied
// http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8u40-b25/java/util/zip/Inflater.java#Inflater.setInput%28byte%5B%5D%2Cint%2Cint%29
val encodedLen = in.read(encoded)
//println(s"encodedLen = $encodedLen")
if (encodedLen == -1)
throw new PdfParseException("no more inflater input")
setInput(encoded, 0, encodedLen)
}
decodedLen = inflate(decoded)
//println(s"decodedLen = $decodedLen")
decodedOffset = 0
}
val ch = decoded(decodedOffset)
//println(f"$decodedOffset%2d: $ch%02x " + (if (ch >= 0x20 && ch <= 0x7e) ch.toChar else '.'))
decodedOffset += 1
ch & 0xff
}
}
}
}
| ken1ma/pades-scala-js | shared/src/main/scala/jp.kenichi/pdf/PdfFilter.scala | Scala | apache-2.0 | 1,635 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbon.flink
import java.io.{File, InputStreamReader}
import java.util
import java.util.{Base64, Collections, Properties}
import com.google.gson.Gson
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.statusmanager.StageInput
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import scala.collection.JavaConverters._
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema
class TestCarbonPartitionWriter extends QueryTest {
val tableName = "test_flink_partition"
test("Writing flink data to local partition carbon table") {
sql(s"DROP TABLE IF EXISTS $tableName").collect()
sql(
s"""
| CREATE TABLE $tableName (stringField string, intField int, shortField short)
| STORED AS carbondata
| PARTITIONED BY (hour_ string, date_ string)
| TBLPROPERTIES ('SORT_COLUMNS'='hour_,date_,stringField', 'SORT_SCOPE'='GLOBAL_SORT')
""".stripMargin
).collect()
val rootPath = System.getProperty("user.dir") + "/target/test-classes"
val dataTempPath = rootPath + "/data/temp/"
try {
val tablePath = storeLocation + "/" + tableName + "/"
val writerProperties = newWriterProperties(dataTempPath, storeLocation)
val carbonProperties = newCarbonProperties(storeLocation)
val environment = StreamExecutionEnvironment.getExecutionEnvironment
environment.setParallelism(6)
environment.enableCheckpointing(2000L)
environment.setRestartStrategy(RestartStrategies.noRestart)
val dataCount = 1000
val source = new TestSource(dataCount) {
@throws[InterruptedException]
override def get(index: Int): Array[AnyRef] = {
val data = new Array[AnyRef](7)
data(0) = "test" + index
data(1) = index.asInstanceOf[AnyRef]
data(2) = 12345.asInstanceOf[AnyRef]
data(3) = Integer.toString(TestSource.randomCache.get().nextInt(24))
data(4) = "20191218"
data
}
@throws[InterruptedException]
override def onFinish(): Unit = {
Thread.sleep(5000L)
}
}
val stream = environment.addSource(source)
val factory = CarbonWriterFactory.builder("Local").build(
"default",
tableName,
tablePath,
new Properties,
writerProperties,
carbonProperties
)
val streamSink = StreamingFileSink.forBulkFormat(new Path(ProxyFileSystem.DEFAULT_URI), factory).build
stream.keyBy(new KeySelector[Array[AnyRef], AnyRef] {
override def getKey(value: Array[AnyRef]): AnyRef = value(3) // return hour_
}).addSink(streamSink)
try environment.execute
catch {
case exception: Exception =>
// TODO
throw new UnsupportedOperationException(exception)
}
assertResult(false)(FileFactory
.getCarbonFile(CarbonTablePath.getStageDir(tablePath)).listFiles().isEmpty)
sql(s"INSERT INTO $tableName STAGE")
checkAnswer(sql(s"SELECT count(1) FROM $tableName"), Seq(Row(1000)))
} finally {
sql(s"DROP TABLE IF EXISTS $tableName").collect()
}
}
test("Test complex type") {
sql(s"DROP TABLE IF EXISTS $tableName").collect()
sql(
s"""
| CREATE TABLE $tableName (stringField string, intField int, shortField short,
| structField struct<value1:string,value2:int,value3:int>, binaryField struct<value1:binary>)
| STORED AS carbondata
| PARTITIONED BY (hour_ string, date_ string)
| TBLPROPERTIES ('SORT_COLUMNS'='hour_,date_,stringField', 'SORT_SCOPE'='GLOBAL_SORT')
""".stripMargin
).collect()
val rootPath = System.getProperty("user.dir") + "/target/test-classes"
val dataTempPath = rootPath + "/data/temp/"
try {
val tablePath = storeLocation + "/" + tableName + "/"
val writerProperties = newWriterProperties(dataTempPath, storeLocation)
val carbonProperties = newCarbonProperties(storeLocation)
val environment = StreamExecutionEnvironment.getExecutionEnvironment
environment.setParallelism(6)
environment.enableCheckpointing(2000L)
environment.setRestartStrategy(RestartStrategies.noRestart)
val dataCount = 1000
val source = new TestSource(dataCount) {
@throws[InterruptedException]
override def get(index: Int): Array[AnyRef] = {
val data = new Array[AnyRef](7)
data(0) = "test" + index
data(1) = index.asInstanceOf[AnyRef]
data(2) = 12345.asInstanceOf[AnyRef]
data(3) = "test\\0011\\0012"
data(4) = Base64.getEncoder.encodeToString(Array[Byte](2, 3, 4))
data(5) = Integer.toString(TestSource.randomCache.get().nextInt(24))
data(6) = "20191218"
data
}
@throws[InterruptedException]
override def onFinish(): Unit = {
Thread.sleep(5000L)
}
}
val stream = environment.addSource(source)
val factory = CarbonWriterFactory.builder("Local").build(
"default",
tableName,
tablePath,
new Properties,
writerProperties,
carbonProperties
)
val streamSink = StreamingFileSink.forBulkFormat(new Path(ProxyFileSystem.DEFAULT_URI), factory).build
stream.keyBy(new KeySelector[Array[AnyRef], AnyRef] {
override def getKey(value: Array[AnyRef]): AnyRef = value(3) // return hour_
}).addSink(streamSink)
try environment.execute
catch {
case exception: Exception =>
// TODO
throw new UnsupportedOperationException(exception)
}
assertResult(false)(FileFactory
.getCarbonFile(CarbonTablePath.getStageDir(tablePath)).listFiles().isEmpty)
sql(s"INSERT INTO $tableName STAGE")
checkAnswer(sql(s"SELECT count(1) FROM $tableName"), Seq(Row(1000)))
val rows = sql(s"SELECT * FROM $tableName limit 1").collect()
assertResult(1)(rows.length)
assertResult(Array[Byte](2, 3, 4))(rows(0).get(rows(0).fieldIndex("binaryfield")).asInstanceOf[GenericRowWithSchema](0))
} finally {
sql(s"DROP TABLE IF EXISTS $tableName").collect()
}
}
private def newWriterProperties(
dataTempPath: String,
storeLocation: String) = {
val properties = new Properties
properties.setProperty(CarbonLocalProperty.DATA_TEMP_PATH, dataTempPath)
properties
}
private def newCarbonProperties(storeLocation: String) = {
val properties = new Properties
properties.setProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
properties.setProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
properties.setProperty(CarbonCommonConstants.STORE_LOCATION, storeLocation)
properties.setProperty(CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB, "1024")
properties.setProperty("binary_decoder", "base64")
properties
}
private def collectStageInputs(loadDetailsDir: String): Seq[StageInput] = {
val dir = FileFactory.getCarbonFile(loadDetailsDir)
val stageFiles = if (dir.exists()) {
val allFiles = dir.listFiles()
val successFiles = allFiles.filter { file =>
file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUBFIX)
}.map { file =>
(file.getName.substring(0, file.getName.indexOf(".")), file)
}.toMap
allFiles.filter { file =>
!file.getName.endsWith(CarbonTablePath.SUCCESS_FILE_SUBFIX)
}.filter { file =>
successFiles.contains(file.getName)
}.map { file =>
(file, successFiles(file.getName))
}
} else {
Array.empty
}
val output = Collections.synchronizedList(new util.ArrayList[StageInput]())
val gson = new Gson()
stageFiles.map { stage =>
val filePath = stage._1.getAbsolutePath
val stream = FileFactory.getDataInputStream(filePath)
try {
val stageInput = gson.fromJson(new InputStreamReader(stream), classOf[StageInput])
output.add(stageInput)
} finally {
stream.close()
}
}
output.asScala
}
private def delDir(dir: File): Boolean = {
if (dir.isDirectory) {
val children = dir.list
if (children != null) {
val length = children.length
var i = 0
while (i < length) {
if (!delDir(new File(dir, children(i)))) {
return false
}
i += 1
}
}
}
dir.delete()
}
}
| jackylk/incubator-carbondata | integration/flink/src/test/scala/org/apache/carbon/flink/TestCarbonPartitionWriter.scala | Scala | apache-2.0 | 9,897 |
package mesosphere.marathon.core.task.update.impl.steps
import akka.event.EventStream
import com.google.inject.Inject
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.task.bus.TaskChangeObservables.TaskChanged
import mesosphere.marathon.core.task.update.TaskUpdateStep
import mesosphere.marathon.core.task.{ EffectiveTaskStateChange, Task, TaskStateChange, TaskStateOp }
import mesosphere.marathon.core.event.MesosStatusUpdateEvent
import mesosphere.marathon.core.task.state.MarathonTaskStatus
import mesosphere.marathon.state.Timestamp
import org.apache.mesos.Protos.TaskStatus
import org.slf4j.LoggerFactory
import scala.concurrent.Future
import scala.collection.immutable.Seq
/**
* Post this update to the internal event stream.
*/
class PostToEventStreamStepImpl @Inject() (eventBus: EventStream, clock: Clock) extends TaskUpdateStep {
private[this] val log = LoggerFactory.getLogger(getClass)
override def name: String = "postTaskStatusEvent"
override def processUpdate(taskChanged: TaskChanged): Future[_] = {
import TaskStateOp.MesosUpdate
val taskState = inferTaskState(taskChanged)
taskChanged match {
// the task was updated or expunged due to a MesosStatusUpdate
// In this case, we're interested in the mesosStatus
case TaskChanged(MesosUpdate(oldTask, taskStatus, mesosStatus, now), EffectiveTaskStateChange(task)) =>
postEvent(clock.now(), taskState, Some(mesosStatus), task, inferVersion(task, Some(oldTask)))
case TaskChanged(_, TaskStateChange.Update(newState, oldState)) =>
postEvent(clock.now(), taskState, newState.mesosStatus, newState, inferVersion(newState, oldState))
case TaskChanged(_, TaskStateChange.Expunge(task)) =>
postEvent(clock.now(), taskState, task.mesosStatus, task, inferVersion(task, None))
case _ =>
log.debug("Ignoring noop for {}", taskChanged.taskId)
}
Future.successful(())
}
// inconvenient for now because not all tasks have a version
private[this] def inferVersion(newTask: Task, oldTask: Option[Task]): Timestamp = {
newTask.version.getOrElse(oldTask.fold(Timestamp(0))(_.version.getOrElse(Timestamp(0))))
}
private[this] def inferTaskState(taskChanged: TaskChanged): MarathonTaskStatus = {
(taskChanged.stateOp, taskChanged.stateChange) match {
case (TaskStateOp.MesosUpdate(_, status, mesosStatus, _), _) => status
case (_, TaskStateChange.Update(newState, maybeOldState)) => newState.status.taskStatus
// TODO: the task status is not updated in this case, so we "assume" KILLED here
case (_, TaskStateChange.Expunge(task)) => MarathonTaskStatus.Killed
case _ => throw new IllegalStateException(s"received unexpected $taskChanged")
}
}
object Terminal {
def unapply(status: MarathonTaskStatus): Option[MarathonTaskStatus] = status match {
case _: MarathonTaskStatus.Terminal => Some(status)
case _: Any => None
}
}
private[this] def postEvent(
timestamp: Timestamp,
taskStatus: MarathonTaskStatus,
maybeStatus: Option[TaskStatus],
task: Task,
version: Timestamp): Unit = {
val taskId = task.taskId
val slaveId = maybeStatus.fold("n/a")(_.getSlaveId.getValue)
val message = maybeStatus.fold("")(status => if (status.hasMessage) status.getMessage else "")
val host = task.agentInfo.host
val ipAddresses = maybeStatus.flatMap(status => Task.MesosStatus.ipAddresses(status))
val ports = task.launched.fold(Seq.empty[Int])(_.hostPorts)
log.info("Sending event notification for {} of app [{}]: {}", taskId, taskId.runSpecId, taskStatus.toMesosStateName)
eventBus.publish(
MesosStatusUpdateEvent(
slaveId,
taskId,
// TODO if we posted the MarathonTaskStatus.toString, consumers would not get "TASK_STAGING", but "Staging"
taskStatus.toMesosStateName,
message,
appId = taskId.runSpecId,
host,
ipAddresses,
ports = ports,
version = version.toString,
timestamp = timestamp.toString
)
)
}
}
| timcharper/marathon | src/main/scala/mesosphere/marathon/core/task/update/impl/steps/PostToEventStreamStepImpl.scala | Scala | apache-2.0 | 4,112 |
package util.uima
import org.apache.uima.jcas.cas.{EmptyFSList, FSList, NonEmptyFSList, TOP}
import scala.collection.mutable.ListBuffer
/**
* @author K.Sakamoto
* Created on 2016/09/25
*/
object FSListUtils {
implicit def fsListToFSListUtils[T <: TOP](repr: FSList): FSListUtils[TOP] = {
try {
new FSListUtils(repr)//, repr.getNthElement(0).getClass)
} catch {
case e: IndexOutOfBoundsException =>
throw e
}
}
}
/**
* @author K.Sakamoto
* @param repr fs list
* @tparam T type
*/
class FSListUtils[T <: TOP](repr: FSList) {//, classType: Class[T]) {
def toSeq: Seq[T] = {
if (repr == null || repr.isInstanceOf[EmptyFSList]) {
//return an empty list
return Nil
}
var tail: FSList = repr
val buffer: ListBuffer[T] = ListBuffer.empty[T]
while (!tail.isInstanceOf[EmptyFSList] || tail.isInstanceOf[NonEmptyFSList]) {
val nonEmptyFSList: NonEmptyFSList = tail.asInstanceOf[NonEmptyFSList]
buffer += nonEmptyFSList.getHead.asInstanceOf[T]
tail = nonEmptyFSList.getTail
}
buffer.result
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/util/uima/FSListUtils.scala | Scala | apache-2.0 | 1,108 |
package org.infinispan.spark.suites
import org.infinispan.client.hotrod.RemoteCache
import org.infinispan.spark._
import org.infinispan.spark.config.ConnectorConfiguration
import org.infinispan.spark.domain.Runner
import org.infinispan.spark.test.TestingUtil._
import org.infinispan.spark.test._
import org.scalatest._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.postfixOps
@DoNotDiscover
class RDDFailOverSuite extends FunSuite with Matchers with Spark with MultipleServers with FailOver {
val NumEntries = 10000
override def getConfiguration: ConnectorConfiguration = {
super.getConfiguration.setServerList("127.0.0.1:11222;127.0.0.1:12222")
.setWriteBatchSize(5)
}
ignore("RDD read failover") {
val cache = getRemoteCache.asInstanceOf[RemoteCache[Int, Runner]]
cache.clear()
(0 until NumEntries).foreach(id => cache.put(id, new Runner(s"name$id", true, id * 10, 20)))
val infinispanRDD = createInfinispanRDD[Int, String]
val ispnIter = infinispanRDD.toLocalIterator
var count = 0
for (_ <- 1 to NumEntries / Cluster.getClusterSize) {
ispnIter.next()
count += 1
}
Cluster.failServer(0)
while (ispnIter.hasNext) {
ispnIter.next()
count += 1
}
count shouldBe NumEntries
}
ignore("RDD write failover (Re-test with 10.1.0.Final)") {
val cache = getRemoteCache.asInstanceOf[RemoteCache[Int, Runner]]
cache.clear()
val range1 = 1 to NumEntries
val entities1 = for (num <- range1) yield new Runner(s"name$num", true, num * 10, 20)
val rdd = sc.parallelize(range1.zip(entities1))
val writeRDD = Future(rdd.writeToInfinispan(getConfiguration))
waitForCondition({ () =>
cache.size() > 0 //make sure we are already writing into the cache
}, 2 seconds)
Cluster.failServer(0)
Await.ready(writeRDD, 30 second)
cache.size() shouldBe NumEntries
cache.get(350).getName shouldBe "name350"
}
}
| infinispan/infinispan-spark | src/test/scala/org/infinispan/spark/suites/RDDFailOverSuite.scala | Scala | apache-2.0 | 2,129 |
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.rdd.cl
import java.util.Map
import java.util.HashMap
import java.util.LinkedList
import java.util.ArrayList
import java.nio.file.{Paths, Files}
import java.nio.charset.StandardCharsets
import org.apache.spark.rdd.cl.tests._
import com.amd.aparapi.internal.model.ClassModel
import com.amd.aparapi.internal.model.HardCodedClassModels
import com.amd.aparapi.internal.model.HardCodedClassModels.ShouldNotCallMatcher
import com.amd.aparapi.internal.model.Entrypoint
import com.amd.aparapi.internal.writer.KernelWriter
import com.amd.aparapi.internal.writer.BlockWriter
import com.amd.aparapi.internal.writer.KernelWriter.WriterAndKernel
import com.amd.aparapi.internal.writer.ScalaArrayParameter
object CodeGenTests {
val testsPath : String = sys.env("SWAT_HOME") +
"/swat/src/test/scala/org/apache/spark/rdd/cl/tests/"
val syncTests : ArrayList[SyncCodeGenTest[_, _]] =
new ArrayList[SyncCodeGenTest[_, _]]()
syncTests.add(PrimitiveInputPrimitiveOutputTest)
syncTests.add(PrimitiveInputObjectOutputTest)
syncTests.add(ObjectInputObjectOutputTest)
syncTests.add(ReferenceExternalArrayTest)
syncTests.add(ReferenceExternalObjectArrayTest)
syncTests.add(ReferenceExternalScalarTest)
syncTests.add(ExternalFunctionTest)
syncTests.add(Tuple2InputTest)
syncTests.add(Tuple2ObjectInputTest)
syncTests.add(Tuple2ObjectInputDirectTest)
syncTests.add(Tuple2InputPassToFuncTest)
syncTests.add(Tuple2ObjectInputPassToFuncTest)
syncTests.add(Tuple2ObjectInputPassDirectlyToFuncTest)
syncTests.add(Tuple2OutputTest)
syncTests.add(Tuple2ObjectOutputTest)
syncTests.add(Tuple2InputOutputTest)
syncTests.add(KMeansTest)
syncTests.add(DenseVectorInputTest)
syncTests.add(SparseVectorInputTest)
syncTests.add(SparseVectorAssignTest)
syncTests.add(ArrayAllocTest)
syncTests.add(DenseVectorOutputTest)
syncTests.add(SparseVectorOutputTest)
syncTests.add(PrimitiveArrayBroadcastTest)
syncTests.add(DenseVectorBroadcastTest)
syncTests.add(SparseVectorBroadcastTest)
syncTests.add(Tuple2DenseInputTest)
syncTests.add(ClassExternalFunctionTest)
syncTests.add(Tuple2DenseOutputTest)
syncTests.add(Tuple2BroadcastTest)
syncTests.add(Tuple2ObjectBroadcastTest)
syncTests.add(PrimitiveArrayInputTest)
syncTests.add(ArrayOutputTest)
syncTests.add(ByteArrayInputTest)
// syncTests.add(ExtensionTest)
// syncTests.add(ASPLOSAES)
// syncTests.add(ASPLOSBlackScholes)
// syncTests.add(ASPLOSPageRank)
val asyncTests : ArrayList[AsyncCodeGenTest[_]] =
new ArrayList[AsyncCodeGenTest[_]]()
asyncTests.add(AsyncMapTest)
asyncTests.add(AsyncPrimitiveArrayInputTest)
asyncTests.add(AsyncArrayOutputTest)
asyncTests.add(AsyncByteArrayInputTest)
def verifyCodeGen(lambda : java.lang.Object, expectedKernel : String,
expectedNumArguments : Int, testName : String, expectedException : String,
test : CodeGenTest[_], devId : Int, isAsync : Boolean) {
val classModel : ClassModel = ClassModel.createClassModel(lambda.getClass,
null, new ShouldNotCallMatcher())
val method = classModel.getPrimitiveApplyMethod
val descriptor : String = method.getDescriptor
val params : LinkedList[ScalaArrayParameter] = new LinkedList[ScalaArrayParameter]
if (!isAsync) {
params.addAll(CodeGenUtil.getParamObjsFromMethodDescriptor(descriptor, expectedNumArguments))
}
params.add(CodeGenUtil.getReturnObjsFromMethodDescriptor(descriptor))
test.complete(params)
val hardCodedClassModels : HardCodedClassModels = test.init
val dev_ctx : Long = OpenCLBridge.getActualDeviceContext(devId, 1, 1024, 0.2, false)
if (dev_ctx == 0) {
System.err.println("Invalid device")
System.exit(1)
}
val config = CodeGenUtil.createCodeGenConfig(dev_ctx)
var gotExpectedException = false
var entryPoint : Entrypoint = null;
try {
entryPoint = classModel.getEntrypoint("apply", descriptor,
lambda, params, hardCodedClassModels, config)
} catch {
case e: Exception => {
if (expectedException == null) {
throw e
} else if (!e.getMessage().equals(expectedException)) {
throw new RuntimeException("Expected exception \\"" +
expectedException + "\\" but got \\"" + e.getMessage() +
"\\"")
} else {
gotExpectedException = true
}
}
}
if (expectedException != null && !gotExpectedException) {
System.err.println(testName + " FAILED")
System.err.println("Expected exception \\"" + expectedException + "\\"")
System.exit(1)
}
if (expectedException == null) {
val writerAndKernel : WriterAndKernel = KernelWriter.writeToString(
entryPoint, params, isAsync)
val openCL : String = writerAndKernel.kernel
Files.write(Paths.get("generated"), openCL.getBytes(StandardCharsets.UTF_8))
val ctx : Long = OpenCLBridge.createSwatContext(lambda.getClass.getName,
openCL, dev_ctx, 0, entryPoint.requiresDoublePragma,
entryPoint.requiresHeap, 1);
Files.write(Paths.get("correct"), expectedKernel.getBytes(StandardCharsets.UTF_8))
if (!openCL.equals(expectedKernel)) {
System.err.println(testName + " FAILED")
System.err.println("Kernel mismatch, generated output in 'generated', correct output in 'correct'")
System.err.println("Reference file is at " + getReferenceOutputPath(test.getClass))
System.err.println("Use 'vimdiff correct generated' to see the difference")
System.exit(1)
}
}
System.err.println(testName + " PASSED")
}
def getReferenceOutputPath(cls : Class[_]) : String = {
val className : String = cls.getSimpleName
var hostName : String = java.net.InetAddress.getLocalHost.getHostName
val tokens : Array[String] = hostName.split('.')
if (tokens.length > 3) {
hostName = tokens(tokens.length - 3) + "." + tokens(tokens.length - 2) + "." +
tokens(tokens.length - 1)
}
CodeGenTests.testsPath + "/" + hostName + "/" +
(if (BlockWriter.emitOcl) "opencl" else "cuda") + "/" +
className.substring(0, className.length - 1) + ".kernel"
}
def main(args : Array[String]) {
var testName : String = null
var devId : Int = 0
var runOcl : Boolean = true
var i = 0
while (i < args.length) {
if (args(i) == "-d") {
devId = args(i + 1).toInt
i += 1
} else if (args(i) == "-t") {
testName = args(i + 1)
i += 1
} else if (args(i) == "-c") {
runOcl = args(i + 1).toBoolean
i += 1
} else if (args(i) == "-h") {
System.err.println("usage: scala CodeGenTests [-d devid] [-t testname] [-c use-ocl]")
System.exit(1)
} else {
System.err.println("Unknown command line argument \\"" + args(i) + "\\"")
System.exit(1)
}
i += 1
}
BlockWriter.emitOcl = runOcl
if (runOcl) {
if (OpenCLBridge.usingCuda() > 0) {
System.err.println("Mismatch in the code generation target (OCL) and the runtime compilation target (CUDA)")
System.exit(1);
}
} else {
if (OpenCLBridge.usingCuda() == 0) {
System.err.println("Mismatch in the code generation target (CUDA) and the runtime compilation target (OCL)")
System.exit(1);
}
}
System.setProperty("com.amd.aparapi.enable.NEW", "true");
for (i <- 0 until syncTests.size) {
val test : SyncCodeGenTest[_, _] = syncTests.get(i)
if (testName == null || test.getClass.getSimpleName.equals(testName + "$")) {
val expectedOutput : String = try {
test.getExpectedKernel
} catch {
case m: MissingTestException => {
System.err.println(test.getClass.getSimpleName + " FAILED")
System.err.println("Missing expected kernel output at " + m.getMessage)
System.exit(1)
""
}
}
verifyCodeGen(test.getFunction, expectedOutput,
test.getExpectedNumInputs, test.getClass.getSimpleName,
test.getExpectedException, test, devId, false)
}
}
for (i <- 0 until asyncTests.size) {
val test : AsyncCodeGenTest[_] = asyncTests.get(i)
if (testName == null || test.getClass.getSimpleName.equals(testName + "$")) {
val expectedOutput : String = try {
test.getExpectedKernel
} catch {
case m: MissingTestException => {
System.err.println(test.getClass.getSimpleName + " FAILED")
System.err.println("Missing expected kernel output at " + m.getMessage)
System.exit(1)
""
}
}
verifyCodeGen(test.getFunction, expectedOutput,
test.getExpectedNumInputs, test.getClass.getSimpleName,
test.getExpectedException, test, devId, true)
}
}
}
}
| agrippa/spark-swat | swat/src/test/scala/org/apache/spark/rdd/cl/CodeGenTests.scala | Scala | bsd-3-clause | 10,545 |
package com.example
import akka.actor.Actor
import spray.routing._
import spray.http._
import spray.json.DefaultJsonProtocol
import spray.httpx.unmarshalling._
import spray.httpx.marshalling._
import spray.httpx.SprayJsonSupport._
import MediaTypes._
// we don't implement our route structure directly in the service actor because
// we want to be able to test it independently, without having to spin up an actor
class MyServiceActor extends Actor with MyService {
// the HttpService trait defines only one abstract member, which
// connects the services environment to the enclosing actor or test
def actorRefFactory = context
// this actor only runs our route, but you could add
// other things here, like request stream processing
// or timeout handling
def receive = runRoute(myRoute)
}
object MyJsonProtocol extends DefaultJsonProtocol {
implicit val DeviceTypeFormat = jsonFormat3(DeviceType)
}
import MyJsonProtocol._
// this trait defines our service behavior independently from the service actor
trait MyService extends HttpService {
val domain = new Domain
val deviceType = domain.get
val myRoute =
path("") {
get {
respondWithMediaType(`application/json`) {
complete(marshal(deviceType))
}
}
}
}
| rohshall/sprayreadings | src/main/scala/com/example/MyService.scala | Scala | mit | 1,284 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.BooleanType
class ConjunctiveNormalFormPredicateSuite extends SparkFunSuite with PredicateHelper with PlanTest {
private val a = AttributeReference("A", BooleanType)(exprId = ExprId(1)).withQualifier(Seq("ta"))
private val b = AttributeReference("B", BooleanType)(exprId = ExprId(2)).withQualifier(Seq("tb"))
private val c = AttributeReference("C", BooleanType)(exprId = ExprId(3)).withQualifier(Seq("tc"))
private val d = AttributeReference("D", BooleanType)(exprId = ExprId(4)).withQualifier(Seq("td"))
private val e = AttributeReference("E", BooleanType)(exprId = ExprId(5)).withQualifier(Seq("te"))
private val f = AttributeReference("F", BooleanType)(exprId = ExprId(6)).withQualifier(Seq("tf"))
private val g = AttributeReference("G", BooleanType)(exprId = ExprId(7)).withQualifier(Seq("tg"))
private val h = AttributeReference("H", BooleanType)(exprId = ExprId(8)).withQualifier(Seq("th"))
private val i = AttributeReference("I", BooleanType)(exprId = ExprId(9)).withQualifier(Seq("ti"))
private val j = AttributeReference("J", BooleanType)(exprId = ExprId(10)).withQualifier(Seq("tj"))
private val a1 =
AttributeReference("a1", BooleanType)(exprId = ExprId(11)).withQualifier(Seq("ta"))
private val a2 =
AttributeReference("a2", BooleanType)(exprId = ExprId(12)).withQualifier(Seq("ta"))
private val b1 =
AttributeReference("b1", BooleanType)(exprId = ExprId(12)).withQualifier(Seq("tb"))
// Check CNF conversion with expected expression, assuming the input has non-empty result.
private def checkCondition(input: Expression, expected: Expression): Unit = {
val cnf = conjunctiveNormalForm(input)
assert(cnf.nonEmpty)
val result = cnf.reduceLeft(And)
assert(result.semanticEquals(expected))
}
test("Keep non-predicated expressions") {
checkCondition(a, a)
checkCondition(Literal(1), Literal(1))
}
test("Conversion of Not") {
checkCondition(!a, !a)
checkCondition(!(!a), a)
checkCondition(!(!(a && b)), a && b)
checkCondition(!(!(a || b)), a || b)
checkCondition(!(a || b), !a && !b)
checkCondition(!(a && b), !a || !b)
}
test("Conversion of And") {
checkCondition(a && b, a && b)
checkCondition(a && b && c, a && b && c)
checkCondition(a && (b || c), a && (b || c))
checkCondition((a || b) && c, (a || b) && c)
checkCondition(a && b && c && d, a && b && c && d)
}
test("Conversion of Or") {
checkCondition(a || b, a || b)
checkCondition(a || b || c, a || b || c)
checkCondition(a || b || c || d, a || b || c || d)
checkCondition((a && b) || c, (a || c) && (b || c))
checkCondition((a && b) || (c && d), (a || c) && (a || d) && (b || c) && (b || d))
}
test("More complex cases") {
checkCondition(a && !(b || c), a && !b && !c)
checkCondition((a && b) || !(c && d), (a || !c || !d) && (b || !c || !d))
checkCondition(a || b || c && d, (a || b || c) && (a || b || d))
checkCondition(a || (b && c || d), (a || b || d) && (a || c || d))
checkCondition(a && !(b && c || d && e), a && (!b || !c) && (!d || !e))
checkCondition(((a && b) || c) || (d || e), (a || c || d || e) && (b || c || d || e))
checkCondition(
(a && b && c) || (d && e && f),
(a || d) && (a || e) && (a || f) && (b || d) && (b || e) && (b || f) &&
(c || d) && (c || e) && (c || f)
)
}
test("Aggregate predicate of same qualifiers to avoid expanding") {
checkCondition(((a && b && a1) || c), ((a && a1) || c) && (b ||c))
checkCondition(((a && a1 && b) || c), ((a && a1) || c) && (b ||c))
checkCondition(((b && d && a && a1) || c), ((a && a1) || c) && (b ||c) && (d || c))
checkCondition(((b && a2 && d && a && a1) || c), ((a2 && a && a1) || c) && (b ||c) && (d || c))
checkCondition(((b && d && a && a1 && b1) || c),
((a && a1) || c) && ((b && b1) ||c) && (d || c))
checkCondition((a && a1) || (b && b1), (a && a1) || (b && b1))
checkCondition((a && a1 && c) || (b && b1), ((a && a1) || (b && b1)) && (c || (b && b1)))
}
test("Return Seq.empty when exceeding MAX_CNF_NODE_COUNT") {
// The following expression contains 36 conjunctive sub-expressions in CNF
val input = (a && b && c) || (d && e && f) || (g && h && i && j)
// The following expression contains 9 conjunctive sub-expressions in CNF
val input2 = (a && b && c) || (d && e && f)
Seq(8, 9, 10, 35, 36, 37).foreach { maxCount =>
withSQLConf(SQLConf.MAX_CNF_NODE_COUNT.key -> maxCount.toString) {
if (maxCount < 36) {
assert(conjunctiveNormalForm(input).isEmpty)
} else {
assert(conjunctiveNormalForm(input).nonEmpty)
}
if (maxCount < 9) {
assert(conjunctiveNormalForm(input2).isEmpty)
} else {
assert(conjunctiveNormalForm(input2).nonEmpty)
}
}
}
}
}
| ConeyLiu/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ConjunctiveNormalFormPredicateSuite.scala | Scala | apache-2.0 | 5,934 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.orbeon.apache.xerces.impl.validation
// @ebruchez: stub to get the code to compile.
class ValidationManager {
def isCachedDTD: Boolean = false
def reset() = ()
}
| ebruchez/darius-xml.js | xerces/shared/src/main/scala/org/orbeon/apache/xerces/impl/validation/ValidationManager.scala | Scala | apache-2.0 | 983 |
/*
* Copyright (c) 2016 Mashin (http://mashin.io). All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mashin.rich.spark
import io.mashin.rich.spark.RichRDD._
import scala.util.Random
class RichRDDFunctionsSuite extends RichSparkTestSuite {
sparkTest("Scan Left RDD") {sc =>
val parts = 4
val partSize = 1000
val scanLeftTest: (Array[Int], Int, (Int, Int) => Int) => Unit = {(input, init, f) =>
val rdd = sc.makeRDD(input, parts)
val rddScanned = rdd.scanLeft(0, init, f)
rddScanned.collect() should be (input.scanLeft(init)(f))
}
val f = (a: Int, b: Int) => a + b
(-10 to 10).foreach {i =>
scanLeftTest((1 to parts * partSize).map(_ => i + Random.nextInt(10)).toArray, i, f)
}
}
sparkTest("Scan Right RDD") {sc =>
val parts = 4
val partSize = 1000
val scanRightTest: (Array[Int], Int, (Int, Int) => Int) => Unit = {(input, init, f) =>
val rdd = sc.makeRDD(input, parts)
val rddScanned = rdd.scanRight(0, init, f)
rddScanned.collect() should be (input.scanRight(init)(f))
}
val f = (a: Int, b: Int) => a + b
(-10 to 10).foreach {i =>
scanRightTest((1 to parts * partSize).map(_ => i + Random.nextInt(10)).toArray, i, f)
}
}
}
| mashin-io/rich-spark | main/src/test/scala/io/mashin/rich/spark/RichRDDFunctionsSuite.scala | Scala | apache-2.0 | 1,792 |
package ru.pavkin.todoist.api.core.decoder
import cats.Id
import org.scalacheck.Gen
import org.scalatest.{Matchers, FunSuite}
import org.scalatest.prop.{GeneratorDrivenPropertyChecks, Checkers}
import shapeless.test.illTyped
import shapeless.{::, HNil}
import scala.util.Try
class ResponseDecoderSpec extends FunSuite with Checkers with Matchers with GeneratorDrivenPropertyChecks{
case class Smth(n: Int)
val intParser = SingleResponseDecoder.using[Id, String, Int]((s: String) => Try(s.toInt).getOrElse(0))
val doubleParser = SingleResponseDecoder.using[Id, String, Double]((s: String) => Try(s.toDouble).getOrElse(0.0))
val intLengthParser = SingleResponseDecoder.using[Id, Int, Long]((s: Int) => s.toString.length.toLong)
val identityParser = SingleResponseDecoder.using[Id, Boolean, Boolean]((s: Boolean) => s)
val smthParser = SingleResponseDecoder.using[Id, Int, Smth]((n: Int) => Smth(n))
val smthCommandDecoder = SingleCommandResponseDecoder.using[Id, Smth, Smth, Boolean] {
(smth: Smth, n: Smth) => smth.n == n.n
}
val smthStringLengthDecoder = SingleCommandResponseDecoder.using[Id, String, Smth, String] {
(command: String, base: Smth) => (base.n + command.length).toString
}
test("ResponseDecoder") {
implicit val p1 = intParser
implicit val p2 = doubleParser
implicitly[MultipleResponseDecoder[Id, String, Double :: Int :: HNil]]
implicitly[MultipleResponseDecoder[Id, String, Int :: Double :: HNil]]
implicitly[MultipleResponseDecoder[Id, String, Int :: HNil]]
illTyped("""implicitly[MultipleResponseDecoder[Id, String, String :: Int :: HNil]]""")
}
test("ResponseDecoder identity") {
check { (a: Boolean) => identityParser.parse(a) == a }
}
test("ResponseDecoder combination") {
check { (a: String) =>
intParser.combine(doubleParser).parse(a) == intParser.parse(a) :: doubleParser.parse(a) :: HNil
}
}
test("ResponseDecoder composition") {
forAll(Gen.alphaStr) { (a: String) =>
intParser.compose(intLengthParser).parse(a) shouldBe intLengthParser.parse(intParser.parse(a))
}
}
test("ResponseDecoder composition with multiple") {
check { (a: String) =>
intParser.compose(intLengthParser.combine(smthParser)).parse(a) ==
intLengthParser.combine(smthParser).parse(intParser.parse(a))
}
}
test("ResponseDecoder composition with single command decoder") {
check { (s: Int, a: Int) =>
smthParser.compose(smthCommandDecoder).parse(Smth(s))(a) == (s == a)
}
}
test("ResponseDecoder composition with multiple command decoder") {
check { (c1: Int, c2: String, base: Int) =>
smthParser.compose(
smthCommandDecoder.combine(smthStringLengthDecoder)
).parse(c2 :: Smth(c1) :: HNil)(base) == {
val nBase = smthParser.parse(base)
smthStringLengthDecoder.parse(c2)(nBase) :: smthCommandDecoder.parse(Smth(c1))(nBase) :: HNil
}
}
}
}
| vpavkin/scalist | tests/src/test/scala/ru/pavkin/todoist/api/core/decoder/ResponseDecoderSpec.scala | Scala | mit | 2,944 |
/*
* Copyright 2013 Folker Bernitt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.bernitt.scalamaildir.util
import java.io.File
import org.scalatest.{Suite, BeforeAndAfterEach}
trait TemporaryDirectory extends BeforeAndAfterEach {
this: Suite =>
protected val tempDir = createTemporaryDirectory()
override def beforeEach() {
tempDir.mkdir()
if (!tempDir.exists() || !tempDir.isDirectory)
throw new RuntimeException("Failed to create temporary dir " + tempDir.getAbsolutePath)
super.beforeEach()
}
override def afterEach() {
deleteDirectory(tempDir)
if (tempDir.exists()) throw new RuntimeException("Failed to delete temp dir " + tempDir.getAbsolutePath)
super.afterEach()
}
private def createTemporaryDirectory(): File = {
val file = File.createTempFile("test", "dir")
file.delete()
file
}
private def deleteDirectory(dir: File): Boolean = {
if (dir.exists()) {
dir.listFiles().foreach {
path =>
if (path.isDirectory) {
deleteDirectory(path)
} else {
path.delete()
}
}
}
dir.delete()
}
}
| fbernitt/mailbackup | src/test/scala/de/bernitt/scalamaildir/util/TemporaryDirectory.scala | Scala | apache-2.0 | 1,671 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.dicom.internal.constructors
import java.awt.image.{ DataBufferUShort, Raster }
import java.io._
import java.util.Iterator
import javax.imageio.stream.ImageInputStream
import javax.imageio.{ ImageIO, ImageReader }
import org.apache.commons.io.IOUtils
import org.apache.spark.SparkContext
import org.apache.spark.mllib.linalg.DenseMatrix
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.trustedanalytics.sparktk.dicom.Dicom
import org.trustedanalytics.sparktk.frame.Frame
import org.trustedanalytics.sparktk.frame.internal.rdd.FrameRdd
import org.dcm4che3.imageio.plugins.dcm.{ DicomImageReadParam, DicomImageReader }
import org.dcm4che3.io.DicomInputStream
import org.dcm4che3.tool.dcm2xml.org.trustedanalytics.sparktk.Dcm2Xml
object Import extends Serializable {
/**
* Get Pixel Data from Dicom Input Stream represented as Array of Bytes
*
* @param byteArray Dicom Input Stream represented as Array of Bytes
* @return DenseMatrix Pixel Data
*/
def getPixeldata(byteArray: Array[Byte]): DenseMatrix = {
val pixeldataInputStream = new DataInputStream(new ByteArrayInputStream(byteArray))
val pixeldicomInputStream = new DicomInputStream(pixeldataInputStream)
//create matrix
val iter: Iterator[ImageReader] = ImageIO.getImageReadersByFormatName("DICOM")
val readers: DicomImageReader = iter.next.asInstanceOf[DicomImageReader]
val param: DicomImageReadParam = readers.getDefaultReadParam.asInstanceOf[DicomImageReadParam]
val iis: ImageInputStream = ImageIO.createImageInputStream(pixeldicomInputStream)
readers.setInput(iis, true)
//pixels data raster
val raster: Raster = readers.readRaster(0, param)
val w = raster.getWidth
val h = raster.getHeight
//val data = raster.getDataBuffer.asInstanceOf[DataBufferUShort].getData.map(_.toDouble)
// new DenseMatrix(h, w, data)
val data = Array.ofDim[Double](h, w)
//Filling data in column-wise order because MLLib DenseMatrix constructs matrix as column major.
/*Ex:Column-major dense matrix. The entry values are stored in a single array of doubles with columns listed in sequence. For example, the following matrix
1.0 2.0
3.0 4.0
5.0 6.0
is stored as [1.0, 3.0, 5.0, 2.0, 4.0, 6.0]*/
for (i <- 0 until w) {
for (j <- 0 until h) {
data(j)(i) = raster.getSample(j, i, 0)
}
}
new DenseMatrix(h, w, data.flatten)
}
/**
* Get Metadata Xml from Dicom Input Stream represented as byte array
*
* @param byteArray Dicom Input Stream represented as byte array
* @return String Xml Metadata
*/
def getMetadataXml(byteArray: Array[Byte]): String = {
val metadataInputStream = new DataInputStream(new ByteArrayInputStream(byteArray))
val metadataDicomInputStream = new DicomInputStream(metadataInputStream)
val dcm2xml = new Dcm2Xml()
val myOutputStream = new ByteArrayOutputStream()
dcm2xml.convert(metadataDicomInputStream, myOutputStream)
myOutputStream.toString()
}
/**
* Creates a dicom object with metadata and pixeldata frames
* |---> DataInputStream --> DicomInputStream --> Dcm2Xml --> Metadata XML (String)
* |
* Spark foreach DCM Image (FilePath, PortableDataStream) ---> ByteArray --->
* |
* |---> DataInputStream --> DicomInputStream --> ImageInputStream --> Raster --> Pixel Data (Dense Matrix)
*
* @param path Full path to the DICOM files directory
* @return Dicom object with MetadataFrame and PixeldataFrame
*/
def importDcm(sc: SparkContext, path: String): Dicom = {
val dicomFilesRdd = sc.binaryFiles(path)
val dcmMetadataPixelArrayRDD = dicomFilesRdd.mapPartitions {
case iter => for {
(filePath, fileData) <- iter
// Open PortableDataStream to retrieve the bytes
fileInputStream = fileData.open()
byteArray = IOUtils.toByteArray(fileInputStream)
//Create the metadata xml
metadata = getMetadataXml(byteArray)
//Create a dense matrix for pixel array
pixeldata = getPixeldata(byteArray)
//Metadata
} yield (metadata, pixeldata)
}.zipWithIndex()
dcmMetadataPixelArrayRDD.cache()
val sqlCtx = new SQLContext(sc)
import sqlCtx.implicits._
//create metadata pairrdd
val metaDataPairRDD: RDD[(Long, String)] = dcmMetadataPixelArrayRDD.map {
case (metadataPixeldata, id) => (id, metadataPixeldata._1)
}
val metadataDF = metaDataPairRDD.toDF("id", "metadata")
val metadataFrameRdd = FrameRdd.toFrameRdd(metadataDF)
val metadataFrame = new Frame(metadataFrameRdd, metadataFrameRdd.frameSchema)
//create image matrix pair rdd
val imageMatrixPairRDD: RDD[(Long, DenseMatrix)] = dcmMetadataPixelArrayRDD.map {
case (metadataPixeldata, id) => (id, metadataPixeldata._2)
}
val imageDF = imageMatrixPairRDD.toDF("id", "imagematrix")
val pixeldataFrameRdd = FrameRdd.toFrameRdd(imageDF)
val pixeldataFrame = new Frame(pixeldataFrameRdd, pixeldataFrameRdd.frameSchema)
new Dicom(metadataFrame, pixeldataFrame)
}
}
| ashaarunkumar/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/dicom/internal/constructors/Import.scala | Scala | apache-2.0 | 6,140 |
package spbau.scala.ordian.task02
import scala.annotation.tailrec
/* WARNING: magic constants! */
object One extends App {
print(s"${squareRootDigits(2).head}.")
squareRootDigits(2).tail take 73 foreach print
def squareRootDigits(number: Int): Stream[Int] = {
def digitByDigit(remainder: BigInt, result: BigInt): Stream[Int] = {
val (digit, newRemainder) = next(0, remainder, result * 20 + 1)
digit #:: digitByDigit(newRemainder, result * 10 + digit)
}
@tailrec
def next(digit: Int, remainder: BigInt, decrement: BigInt): (Int, BigInt) = {
if (remainder < decrement) (digit, remainder * 100)
else next(digit + 1, remainder - decrement, decrement + 2)
}
digitByDigit(number, 0)
}
}
| ordian/vm_languages_course | src/main/scala/spbau/scala/ordian/task02/One.scala | Scala | gpl-3.0 | 744 |
package org.jetbrains.plugins.scala
package lang
package psi
package types
import java.util.Objects
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.NonValueType
import org.jetbrains.plugins.scala.project.ProjectContext
/**
* This type works like undefined type, but you cannot use this type
* to resolve generics. It's important if two local type
* inferences work together.
*/
case class ScAbstractType(parameterType: TypeParameterType, lower: ScType, upper: ScType) extends ScalaType with NonValueType {
override implicit def projectContext: ProjectContext = parameterType.projectContext
private var hash: Int = -1
override def hashCode: Int = {
if (hash == -1)
hash = Objects.hash(upper, lower, parameterType.arguments)
hash
}
override def equals(obj: scala.Any): Boolean = {
obj match {
case ScAbstractType(oTpt, oLower, oUpper) =>
lower.equals(oLower) && upper.equals(oUpper) && parameterType.arguments.equals(oTpt.arguments)
case _ => false
}
}
override def equivInner(r: ScType, uSubst: ScUndefinedSubstitutor, falseUndef: Boolean): (Boolean, ScUndefinedSubstitutor) = {
r match {
case _ if falseUndef => (false, uSubst)
case _ =>
var t: (Boolean, ScUndefinedSubstitutor) = r.conforms(upper, uSubst)
if (!t._1) return (false, uSubst)
t = lower.conforms(r, t._2)
if (!t._1) return (false, uSubst)
(true, t._2)
}
}
def inferValueType: TypeParameterType = parameterType
def simplifyType: ScType = {
if (upper.equiv(Any)) lower else if (lower.equiv(Nothing)) upper else lower
}
override def removeAbstracts: ScType = simplifyType
override def updateSubtypes(update: ScType => (Boolean, ScType), visited: Set[ScType]): ScAbstractType = {
try {
ScAbstractType(
parameterType.recursiveUpdate(update, visited).asInstanceOf[TypeParameterType],
lower.recursiveUpdate(update, visited),
upper.recursiveUpdate(update, visited)
)
}
catch {
case _: ClassCastException => throw new RecursiveUpdateException
}
}
override def recursiveVarianceUpdateModifiable[T](data: T, update: (ScType, Int, T) => (Boolean, ScType, T),
variance: Int = 1): ScType = {
update(this, variance, data) match {
case (true, res, _) => res
case (_, _, newData) =>
try {
ScAbstractType(parameterType.recursiveVarianceUpdateModifiable(newData, update, variance).asInstanceOf[TypeParameterType],
lower.recursiveVarianceUpdateModifiable(newData, update, -variance),
upper.recursiveVarianceUpdateModifiable(newData, update, variance))
}
catch {
case _: ClassCastException => throw new RecursiveUpdateException
}
}
}
override def visitType(visitor: TypeVisitor): Unit = visitor match {
case scalaVisitor: ScalaTypeVisitor => scalaVisitor.visitAbstractType(this)
case _ =>
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/types/ScAbstractType.scala | Scala | apache-2.0 | 3,071 |
/**
* Copyright (c) 2007-2011 Eric Torreborre <etorreborre@yahoo.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package org.specs.util
/** The ExtendedString object adds utility functions like 'uncapitalize' to Strings */
object ExtendedString {
/** @return an ExtendedString */
implicit def toExtendedString(s: String) = ExtendedString(s)
/** This class adds utility functions to Strings */
case class ExtendedString(s: String) {
/** @return the String s with its first character being uncapitalized: "HELLO".uncapitalize -> "hELLO" */
def uncapitalize = s.head.toLower + s.drop(1)
/**
* @param remove String to suppress from the original string
* @return a String where every occurrence of remove has been suppressed
*/
def removeAll(remove: String) = s.replaceAll(toReplace(remove), "")
private def toReplace(c: String) = c.map { letter => if ("()[]{}+-\\\\^$|?.*".contains(letter)) ("\\\\" + letter) else letter }.mkString("")
/**
* Remove everything from the first occurrence of a given substring.
*/
def removeFrom(sub: String) = if (s.indexOf(sub) >= 0) s.substring(0, s.indexOf(sub)) else s
/**
* @param pattern regexp pattern with groups (defined using parenthesis) specifying what to search in the string s
* @return a list with every group found
*/
def groups(pattern: String): List[String] = {
if (pattern == null) return List[String]()
val matcher = java.util.regex.Pattern.compile(pattern).matcher(s)
val groupsFound = new scala.collection.mutable.ListBuffer[String]()
while (matcher.find) {
try {
groupsFound += matcher.group(1)
} catch { case _ => }
}
groupsFound.toList
}
def replaceGroups(pattern: String, function: String => Any): String = {
groups(pattern).foldLeft(s) { (res: String, g: String) => s.replace(g, function(g).toString) }
}
/**
* This is a shortcut for groups("(" + group + ")")
* @param group specification of the groups to find
* @return a list with every group found
*/
def findAll(group: String): List[String] = groups("(" + group + ")")
/**
* @return the uncamel-ed string: MyCamelString -> My camel string
*/
def uncamel = {
def uncamelChars(chars : List[Char]): List[Char] = chars match {
case c :: rest if (Character.isUpperCase(c)) => ' ' :: Character.toLowerCase(c) :: uncamelChars(rest)
case c :: rest => c :: uncamelChars(rest)
case Nil => Nil
}
if (s.isEmpty) ""
else (s.charAt(0) :: uncamelChars(s.substring(1).toList)).mkString("")
}
/**
* @return a list of Strings splitted so that they have a maximum size
*/
def splitToSize(n: Int): List[String] = splitToSize(s, n, Nil)
private def splitToSize(string: String, n: Int, result: List[String]): List[String] = {
if (string.size <= n)
(string :: result).reverse
else
// new Strings are necessary to avoid memory errors because substring is just a view on the underlying string
splitToSize(new String(string.drop(n)), n, new String(string.take(n)) :: result)
}
}
}
| stuhood/specs | src/main/scala/org/specs/util/ExtendedString.scala | Scala | mit | 4,421 |
/*
* Copyright 2014 Adam Rosenberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nalloc.bitb.kcits.optional
import org.nalloc.bitb.kcits.macros._
object OptionalFloat {
final def empty: OptionalFloat = new OptionalFloat(java.lang.Float.NaN)
final def apply(value: Float): OptionalFloat = new OptionalFloat(value)
}
final class OptionalFloat(val value: Float) extends AnyVal {
def isEmpty = value != value
def get: Float = value
def isNaN = value != value
def isMinValue = value == java.lang.Float.MIN_VALUE
def isMaxValue = value == java.lang.Float.MAX_VALUE
def map[T](f: Float => T)(implicit x: OptionalResolver[T]): x.OptionalType = macro OptionalMacros.map_impl[Float, T]
def flatMap[T](f: Float => T)(implicit x: PrimitiveResolver[T]): T = macro OptionalMacros.flatMap_impl[Float, T]
def foreach(f: Float => Unit): Unit = macro OptionalMacros.foreach_impl[Float]
def exists(f: Float => Boolean): Boolean = macro OptionalMacros.exists_impl[Float]
def filter(f: Float => Boolean): OptionalFloat = macro OptionalMacros.filter_impl[Float]
def orElse(f: => Float): Float = macro OptionalMacros.orElse_impl[Float]
def fold[T](ifEmpty: => T)(f: Float => T): T = macro OptionalMacros.fold_impl[Float, T]
override def toString = if (isEmpty) s"${java.lang.Float.MIN_VALUE} (empty)" else s"$value"
}
| arosenberger/nalloc_2.10 | optional/src/main/scala/org/nalloc/bitb/kcits/optional/OptionalFloat.scala | Scala | apache-2.0 | 1,859 |
package tu.coreservice.action.way2think.simulation
import tu.coreservice.action.way2think.Way2Think
import tu.model.knowledge.communication.{ContextHelper, ShortTermMemory}
import tu.model.knowledge.annotator.AnnotatedNarrative
import tu.model.knowledge.domain.ConceptNetwork
import tu.model.knowledge.{Constant, Resource}
import tu.exception.NoExpectedInformationException
import org.slf4j.LoggerFactory
/**
* Wrapper class for Simulation to provide Way2Think interface.
* @author max talanov
* date 2012-06-25
* time: 12:41 PM
*/
class SimulationWay2Think extends Way2Think {
val log = LoggerFactory.getLogger(this.getClass)
/**
* Way2Think interface.
* @param inputContext ShortTermMemory of all inbound parameters.
* @return outputContext
*/
def apply(inputContext: ShortTermMemory) = {
log debug("apply({}: ShortTermMemory)", inputContext)
try {
inputContext.findByName(Constant.LINK_PARSER_RESULT_NAME) match {
case Some(narrative: AnnotatedNarrative) => {
inputContext.simulationModel match {
case Some(model: ConceptNetwork) => {
val conceptNetworkOption = this.apply(narrative, model)
conceptNetworkOption match {
case Some(cn: ConceptNetwork) => {
val context = ContextHelper(List[Resource](), cn, this.getClass.getName + " result")
context.notUnderstoodPhrases = cn.notKnownPhrases
context.simulationResult = Some(cn)
}
case None => {
throw new NoExpectedInformationException("$No_matches_detected_in_domain_model")
}
}
}
case None => {
throw new NoExpectedInformationException("$No_domain_model_specified")
}
}
// simulation model
inputContext.simulationModel match {
case Some(model: ConceptNetwork) => {
val conceptNetworkOption = this.apply(narrative, model)
conceptNetworkOption match {
case Some(cn: ConceptNetwork) => {
val context = ContextHelper(List[Resource](), cn, this.getClass.getName + Constant.RESULT)
context.notUnderstoodPhrases = cn.notKnownPhrases
context.simulationResult = cn
}
case None => {
throw new NoExpectedInformationException("$No_matches_detected_in_domain_model")
}
}
}
case None => {
// val cry4Help = Cry4HelpWay2Think("$No_domain_model_specified")
// ContextHelper(List[Resource](cry4Help), cry4Help, this.getClass.getName + " result")
throw new NoExpectedInformationException("$No_domain_model_specified")
}
}
}
case None => {
throw new NoExpectedInformationException("$Context_lastResult_is_None")
}
}
} catch {
case e: ClassCastException => {
throw new NoExpectedInformationException("$Context_lastResult_is_not_expectedType " + e.getMessage)
}
}
}
/**
* Estimates confidence and probability of output SelectorRequest
* @param currentSituation description of current situation as ConceptNetwork
* @param domainModel overall domain model to be used to analyse current situation as ConceptNetwork.
* @return SelectorRequest with set probability
*/
def apply(currentSituation: AnnotatedNarrative, domainModel: ConceptNetwork): Option[ConceptNetwork] = {
val s = new Simulation()
s.apply(currentSituation, domainModel)
}
def start() = false
def stop() = false
}
| keskival/2 | coreservice.action.way2think/src/main/scala/tu/coreservice/action/way2think/simulation/SimulationWay2Think.scala | Scala | gpl-3.0 | 3,747 |
package org.senkbeil.debugger.akka.messages
trait MessageLike extends Serializable
| chipsenkbeil/scala-debugger-akka | src/main/scala/org/senkbeil/debugger/akka/messages/MessageLike.scala | Scala | apache-2.0 | 84 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.optim
import org.apache.spark.mllib.linalg.Vector
/**
* :: DeveloperApi ::
* Class used to perform steps (weight update) with per-coordinate learning rate.
*
*/
abstract class PerCoordinateUpdater extends Serializable {
def compute(
weightsOld: Vector,
gradient: Vector,
alpha: Double,
beta: Double,
l1: Double,
l2: Double,
n: Vector,
z: Vector): (Vector, Double, Vector, Vector)
}
| hibayesian/spark-optim | src/main/scala/org/apache/spark/ml/optim/PerCoordinateUpdater.scala | Scala | apache-2.0 | 1,273 |
package scheme
object CPSConv {
import Util.gensym
import Ast.{mkFn, mkApp, mkIf, mkDefine, mkSet}
def isPrimitive(x: Ast): Boolean =
x match {
case SSymbol(name) =>
Evaluator.lookup(Evaluator.globalEnv, name) match {
case Some(_: SPrim) => true
case _ => false
}
case _ => false
}
def cps(expr: Ast, cont: Ast): Ast = {
def cpsAtom(atom: Ast, cont: Ast): Ast =
mkApp(cont, List(atom))
def cpsIf(cexpr: Ast, texpr: Ast, fexpr: Ast, cont: Ast): Ast = {
val g = gensym()
cps(cexpr, mkFn(List(g), mkIf(g, cps(texpr, cont), cps(fexpr, cont))))
}
def cpsSet(name: SSymbol, expr: Ast, cont: Ast): Ast = {
val g = gensym()
cps(expr,
mkFn(List(g),
mkApp(cont, List(mkSet(name, g)))))
}
def cpsDefine(name: SSymbol, expr: Ast, cont: Ast): Ast = {
val g = gensym()
cps(expr,
mkFn(List(g),
mkApp(cont, List(mkDefine(name, g)))))
}
def cpsBegin(body: List[Ast], cont: Ast): Ast =
body match {
case List(x) => cps(x, cont)
case x :: xs =>
val g = gensym()
cps(x, mkFn(List(g), cpsBegin(xs, cont)))
case Nil => cps(SList(Nil), cont)
}
def cpsPrimitive(expr: List[Ast], cont: Ast): Ast = {
val fn :: args = expr
val gs = (0 until args.length).map(_ => gensym()).toList
(args zip gs).reverse.foldRight(mkApp(cont, List(mkApp(fn, gs)))) {
case ((arg, g), ast) => cps(arg, mkFn(List(g), ast))
}
}
def cpsLambda(vars: List[Ast], body: Ast, cont: Ast): Ast = {
val g = gensym()
mkApp(cont, List(mkFn(g :: vars, cps(body, g))))
}
def cpsApplication(expr: List[Ast], cont: Ast): Ast =
(expr: @unchecked) match {
case List(x) =>
val g = gensym()
cps(x, mkFn(List(g), mkApp(g, List(cont))))
case List(x, y) =>
val g, g1 = gensym()
cps(x, mkFn(List(g), cps(y, mkFn(List(g1), mkApp(g, List(cont, g1))))))
case List(x, y, z) =>
val g, g1, g2 = gensym()
cps(x, mkFn(List(g), cps(y, mkFn(List(g1), cps(z, mkFn(List(g2), mkApp(g, List(cont, g1, g2))))))))
}
(expr: @unchecked) match {
case _: SString | _: SDouble | _: SBool | _: SQuote | _: SSymbol =>
cpsAtom(expr, cont)
case SList(List(SSymbol("if"), cexpr, texpr, fexpr)) =>
cpsIf(cexpr, texpr, fexpr, cont)
case SList(List(SSymbol("define"), (name: SSymbol), value)) =>
cpsDefine(name, value, cont)
case SList(SSymbol("begin") :: rest) =>
cpsBegin(rest, cont)
case SList(List(SSymbol("set!"), (name: SSymbol), value)) =>
cpsSet(name, value, cont)
case SList(SSymbol("lambda") :: SList(lambdaList) :: rest) =>
cpsLambda(lambdaList, SList(SSymbol("begin") :: rest), cont)
case SList(x :: xs) =>
if (isPrimitive(x)) cpsPrimitive(x :: xs, cont)
else cpsApplication(x :: xs, cont)
}
}
}
| einblicker/scheme | src/main/scala/CPSConv.scala | Scala | bsd-2-clause | 2,896 |
package com.gilt.storeroom.dynamodb
import com.gilt.storeroom._
import java.util.{ Map => JMap }
import java.util.concurrent.Executors
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.collection.JavaConverters._
import play.api.libs.iteratee.{Enumerator, Enumeratee}
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDBClient, AmazonDynamoDB }
import com.amazonaws.services.dynamodbv2.model._
object DynamoStore {
/**
* Create a Store for the specified table, primary key, and value column in Dynamo.
* This assumes your AWS credentials are available in the environment, as described
* in the AWS SDK documentation.
*
* Asynchronous operations are executed in a thread pool sized according to the
* number of available processors
*
* @see <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBClient.html#AmazonDynamoDBClient()">SDK Javadoc</a>
*/
def apply(tableName: String, primaryKeyColumn: String, valueColumn: String): DynamoStore = {
val processors = Runtime.getRuntime.availableProcessors
this(tableName, primaryKeyColumn, valueColumn, processors)
}
/**
* Create a Store for the specified table, primary key, and value column in Dynamo.
* This assumes your AWS credentials are available in the environment, as described
* in the AWS SDK documentation.
*
* @see <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBClient.html#AmazonDynamoDBClient()">SDK Javadoc</a>
*/
def apply(tableName: String, primaryKeyColumn: String, valueColumn: String, numberWorkerThreads: Int): DynamoStore = {
val client = new AmazonDynamoDBClient()
this(client, tableName, primaryKeyColumn, valueColumn, numberWorkerThreads)
}
/**
* Create a Store for the specified table, primary key, and value column in Dynamo
* using the provided dynamo client
*
* Asynchronous operations are executed in a thread pool sized according to the
* number of available processors
*
* @see <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBClient.html#AmazonDynamoDBClient()">SDK Javadoc</a>
*/
def apply(client: AmazonDynamoDBClient, tableName: String, primaryKeyColumn: String, valueColumn: String): DynamoStore = {
val processors = Runtime.getRuntime.availableProcessors
this(client, tableName, primaryKeyColumn, valueColumn, processors)
}
/**
* Create a Store for the specified table, primary key, and value column in Dynamo
* using the provided dynamo client
*
* @see <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/dynamodbv2/AmazonDynamoDBClient.html#AmazonDynamoDBClient()">SDK Javadoc</a>
*/
def apply(client: AmazonDynamoDBClient, tableName: String, primaryKeyColumn: String, valueColumn: String, numberWorkerThreads: Int): DynamoStore = {
new DynamoStore(client, tableName, primaryKeyColumn, valueColumn, numberWorkerThreads)
}
}
class DynamoStore(val client: AmazonDynamoDB, val tableName: String,
val primaryKeyColumn: String, val valueColumn: String, numberWorkerThreads: Int)
extends IterableStore[String, AttributeValue]
{
implicit val apiRequestFuturePool = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(numberWorkerThreads))
override def put(kv: (String, Option[AttributeValue])): Future[Unit] = {
kv match {
case (key, Some(value)) => {
//write the new entry to AWS
val attributes = Map(
primaryKeyColumn -> new AttributeValue(key),
valueColumn -> value
).asJava
val putRequest = new PutItemRequest(tableName, attributes)
Future { blocking(client.putItem(putRequest)) }
}
case (key, None) => {
val attributes = Map(primaryKeyColumn -> new AttributeValue(key)).asJava
val deleteRequest = new DeleteItemRequest(tableName, attributes)
Future { blocking(client.deleteItem(deleteRequest)) }
}
}
}
override def get(k: String): Future[Option[AttributeValue]] = {
val attributes = Map(primaryKeyColumn -> new AttributeValue(k)).asJava
val getRequest = new GetItemRequest(tableName, attributes)
Future {
Option(blocking(client.getItem(getRequest).getItem)).map(_.get(valueColumn))
}
}
// TODO - implement multiGet and multiPut
override def getAll(limit: Int = Int.MaxValue, offset: Int = 0): Enumerator[(String, AttributeValue)] = {
val attributes = List(primaryKeyColumn, valueColumn)
val initialState: Option[Option[java.util.Map[String, AttributeValue]]] = None
Enumerator.unfoldM(initialState) { state => state match {
case Some(None) => Future.successful(None)
case _ => {
Future {
val scanRequest = new ScanRequest(tableName)
.withAttributesToGet(attributes.asJava)
.withExclusiveStartKey(state.map(_.get).getOrElse(null))
val result = blocking(client.scan(scanRequest))
val lastKey = Option(result.getLastEvaluatedKey)
val items = result.getItems.asScala.map { kavMap =>
(kavMap.get(primaryKeyColumn).getS, kavMap.get(valueColumn))
}
Some((Some(lastKey), items))
}
}
}}.flatMap(Enumerator.enumerate).through(Enumeratee.drop(offset)).through(Enumeratee.take(limit))
}
}
object DynamoStringStore {
def apply(tableName: String, primaryKeyColumn: String, valueColumn: String): DynamoStringStore = {
new DynamoStringStore(DynamoStore(tableName, primaryKeyColumn, valueColumn))
}
def apply(tableName: String, primaryKeyColumn: String, valueColumn: String, numberWorkerThreads: Int): DynamoStringStore = {
new DynamoStringStore(DynamoStore(tableName, primaryKeyColumn, valueColumn, numberWorkerThreads))
}
def apply(client: AmazonDynamoDBClient, tableName: String, primaryKeyColumn: String, valueColumn: String): DynamoStringStore = {
new DynamoStringStore(DynamoStore(client, tableName, primaryKeyColumn, valueColumn))
}
def apply(client: AmazonDynamoDBClient, tableName: String, primaryKeyColumn: String, valueColumn: String, numberWorkerThreads: Int): DynamoStringStore = {
new DynamoStringStore(DynamoStore(client, tableName, primaryKeyColumn, valueColumn, numberWorkerThreads))
}
}
class DynamoStringStore(underlying: DynamoStore)
extends ConvertedIterableStore[String, AttributeValue, String](underlying)(_.getS)(new AttributeValue(_))
object DynamoSetStore {
def apply(tableName: String, primaryKeyColumn: String, valueColumn: String): DynamoSetStore = {
new DynamoSetStore(DynamoStore(tableName, primaryKeyColumn, valueColumn))
}
def apply(tableName: String, primaryKeyColumn: String, valueColumn: String, numberWorkerThreads: Int): DynamoSetStore = {
new DynamoSetStore(DynamoStore(tableName, primaryKeyColumn, valueColumn, numberWorkerThreads))
}
def apply(client: AmazonDynamoDBClient, tableName: String, primaryKeyColumn: String, valueColumn: String): DynamoSetStore = {
new DynamoSetStore(DynamoStore(client, tableName, primaryKeyColumn, valueColumn))
}
def apply(client: AmazonDynamoDBClient, tableName: String, primaryKeyColumn: String, valueColumn: String, numberWorkerThreads: Int): DynamoSetStore = {
new DynamoSetStore(DynamoStore(client, tableName, primaryKeyColumn, valueColumn, numberWorkerThreads))
}
}
class DynamoSetStore(underlying: DynamoStore)
extends ConvertedIterableStore[String, AttributeValue, Set[String]](underlying)(_.getSS.asScala.toSet)(l => new AttributeValue(l.toSeq.asJava))
object DynamoLongStore {
def apply(tableName: String, primaryKeyColumn: String, valueColumn: String): DynamoLongStore = {
new DynamoLongStore(DynamoStore(tableName, primaryKeyColumn, valueColumn))
}
def apply(tableName: String, primaryKeyColumn: String, valueColumn: String, numberWorkerThreads: Int): DynamoLongStore = {
new DynamoLongStore(DynamoStore(tableName, primaryKeyColumn, valueColumn, numberWorkerThreads))
}
def apply(client: AmazonDynamoDBClient, tableName: String, primaryKeyColumn: String, valueColumn: String): DynamoLongStore = {
new DynamoLongStore(DynamoStore(client, tableName, primaryKeyColumn, valueColumn))
}
def apply(client: AmazonDynamoDBClient, tableName: String, primaryKeyColumn: String, valueColumn: String, numberWorkerThreads: Int): DynamoLongStore = {
new DynamoLongStore(DynamoStore(client, tableName, primaryKeyColumn, valueColumn, numberWorkerThreads))
}
}
class DynamoLongStore(underlying: DynamoStore)
extends ConvertedIterableStore[String, AttributeValue, Long](underlying)(_.getN.toLong)(l => (new AttributeValue).withN(l.toString)) {
val tableName = underlying.tableName
}
| gilt/storeroom | dynamodb/src/main/scala/com/gilt/storeroom/dynamodb/DynamoDB.scala | Scala | mit | 8,961 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.async.io
import java.nio.ByteBuffer
import java.nio.channels.{AsynchronousChannelGroup, AsynchronousSocketChannel}
import java.net.SocketAddress
import java.util.concurrent.TimeUnit
import scala.util.{Failure, Success}
import com.treode.async.{Async, Callback, Scheduler}
import com.treode.async.implicits._
import com.treode.buffer.PagedBuffer
import Async.async
import TimeUnit.MILLISECONDS
/** A socket that has useful behavior (flush/fill) and that can be mocked. */
class Socket (socket: AsynchronousSocketChannel) (implicit scheduler: Scheduler) {
import scheduler.whilst
def localAddress: SocketAddress =
socket.getLocalAddress
def remoteAddress: SocketAddress =
socket.getRemoteAddress
def connect (addr: SocketAddress): Async [Unit] =
async { cb =>
try {
socket.connect (addr, cb, Callback.UnitHandler)
} catch {
case t: Throwable => cb.fail (t)
}}
def close(): Unit =
socket.close()
private def read (dsts: Array [ByteBuffer]): Async [Long] =
async (socket.read (dsts, 0, dsts.length, -1, MILLISECONDS, _, Callback.LongHandler))
private def write (srcs: Array [ByteBuffer]): Async [Long] =
async (socket.write (srcs, 0, srcs.length, -1, MILLISECONDS, _, Callback.LongHandler))
/** Read from the socket until `input` has at least `len` readable bytes. If `input` already has
* that many readable bytes, this will immediately queue the callback on the scheduler.
*/
def fill (input: PagedBuffer, len: Int): Async [Unit] = {
input.capacity (input.writePos + len)
val bufs = input.buffers (input.writePos, input.writableBytes)
whilst (input.readableBytes < len) {
for (result <- read (bufs)) yield {
require (result <= Int.MaxValue)
if (result < 0)
throw new Exception ("End of file reached.")
input.writePos = input.writePos + result.toInt
}}}
/** Read a frame with its own length from the socket; return the length.
*
* Ensure `input` has at least four readable bytes, reading from the socket if necessary.
* Interpret those as the length of bytes needed. Read from the socket again if necessary,
* until `input` has at least that many additional readable bytes.
*
* The mated method `frame` lives in [[com.treode.pickle.Pickler Pickler]].
*/
def deframe (input: PagedBuffer): Async [Int] = {
for {
_ <- fill (input, 4)
len = input.readInt()
_ <- fill (input, len)
} yield len
}
/** Write all readable bytes from `output` to the socket. */
def flush (output: PagedBuffer): Async [Unit] = {
val bufs = output.buffers (output.readPos, output.readableBytes)
whilst (output.readableBytes > 0) {
for (result <- write (bufs)) yield {
require (result <= Int.MaxValue)
if (result < 0)
throw new Exception ("File write failed.")
output.readPos = output.readPos + result.toInt
}}}}
object Socket {
def open (group: AsynchronousChannelGroup) (implicit scheduler: Scheduler): Socket =
new Socket (AsynchronousSocketChannel.open (group)) (scheduler)
}
| Treode/store | core/src/com/treode/async/io/Socket.scala | Scala | apache-2.0 | 3,746 |
package frdomain.ch6
package streams
import java.util.{ Date, Calendar }
import util.{ Try, Success, Failure }
import scalaz._
import Scalaz._
object common {
type Amount = BigDecimal
def today = Calendar.getInstance.getTime
}
import common._
case class Balance(amount: Amount = 0)
case class Account (no: String, name: String, dateOfOpen: Option[Date], dateOfClose: Option[Date] = None,
balance: Balance = Balance())
| debasishg/frdomain | src/main/scala/frdomain/ch6/streams/Account.scala | Scala | apache-2.0 | 432 |
package example
import akka.actor.ActorDSL._
import org.scalatest.FlatSpec
import spray.testkit.ScalatestRouteTest
import akka.util.Timeout
import scala.concurrent.duration._
import spray.http.StatusCodes
import spray.httpx.SprayJsonSupport._
import spray.http.CacheDirectives.`max-age`
import spray.http.HttpHeaders.`Cache-Control`
class ServiceSpec extends FlatSpec with ScalatestRouteTest with ServiceJsonProtocol {
import ModelActor._
val data = for (i <- 0 to 100) yield Item(i, i, s"title-$i", s"desc-$i")
val summary = (i: Item) => ItemSummary(i.id, i.stock, i.title)
val model = actor(new Act {
become {
case i: Int => sender ! data.find(_.id == i).getOrElse(ItemNotFound)
case 'list => sender ! ItemSummaries(data.map(summary))
case ('query, x: String) => sender ! ItemSummaries(data.filter(_.desc.contains(x)).map(summary))
}
})
implicit def timeout = Timeout(3.second)
def route = new Service {
def actorRefFactory = system
}.route(model)
"The Service" should "return a list of 10 items" in {
Get("/items") ~> route ~> check {
assert(status === StatusCodes.OK)
assert(header[`Cache-Control`] === Some(`Cache-Control`(`max-age`(30))))
val res = responseAs[Seq[PublicItemSummary]]
assert(res.size === data.size)
assert(res.head === PublicItemSummary(summary(data.head)))
}
}
it should "return a list of 2 items containing '10'" in {
Get("/items?q=10") ~> route ~> check {
assert(status === StatusCodes.OK)
assert(header[`Cache-Control`] === Some(`Cache-Control`(`max-age`(40))))
val res = responseAs[Seq[PublicItemSummary]]
assert(res.size === 2)
assert(res === (data(10) :: data.last :: Nil map summary).map(PublicItemSummary(_)))
}
}
it should "return a list of 1 item containing '50'" in {
Get("/items?q=50") ~> route ~> check {
assert(status === StatusCodes.OK)
assert(header[`Cache-Control`] === Some(`Cache-Control`(`max-age`(70))))
val res = responseAs[Seq[PublicItemSummary]]
assert(res.size === 1)
assert(res === (data(50) :: Nil map summary).map(PublicItemSummary(_)))
}
}
it should "return an empty list if nothing matches" in {
Get("/items?q=this-query-should-match-nothing") ~> route ~> check {
assert(status === StatusCodes.OK)
assert(header[`Cache-Control`] === Some(`Cache-Control`(`max-age`(600))))
val res = responseAs[Seq[PublicItemSummary]]
assert(res === Nil)
}
}
it should "return single items" in {
Get("/items/1") ~> route ~> check {
assert(status === StatusCodes.OK)
assert(header[`Cache-Control`] === Some(`Cache-Control`(`max-age`(30))))
assert(responseAs[PublicItem] === PublicItem(1, LowStock, "title-1", "desc-1"))
}
Get("/items/9") ~> route ~> check {
assert(status === StatusCodes.OK)
assert(header[`Cache-Control`] === Some(`Cache-Control`(`max-age`(40))))
assert(responseAs[PublicItem] === PublicItem(9, InStock, "title-9", "desc-9"))
}
Get("/items/100") ~> route ~> check {
assert(status === StatusCodes.OK)
assert(header[`Cache-Control`] === Some(`Cache-Control`(`max-age`(100))))
assert(responseAs[PublicItem] === PublicItem(100, InStock, "title-100", "desc-100"))
}
}
it should "return 404 for non-existent items" in {
Get("/items/404") ~> route ~> check {
assert(status === StatusCodes.NotFound)
assert(header[`Cache-Control`] === Some(`Cache-Control`(`max-age`(600))))
response === "Not Found"
}
}
}
| stig/spray-example | src/test/scala/example/ServiceSpec.scala | Scala | mit | 3,591 |
package com.twitter.finagle.exp.mysql.codec
import com.twitter.finagle.exp.mysql.protocol.{Packet, BufferReader}
import com.twitter.finagle.exp.mysql.util.BufferUtil
import java.util.logging.Logger
import org.jboss.netty.buffer.ChannelBuffer
import org.jboss.netty.channel.{Channel, ChannelHandlerContext}
import org.jboss.netty.handler.codec.frame.FrameDecoder
/**
* Decodes logical MySQL packets that could be fragmented across
* frames. MySQL packets are a length encoded set of bytes written
* in little endian byte order.
*/
class PacketFrameDecoder extends FrameDecoder {
private[this] val logger = Logger.getLogger("finagle-mysql")
override def decode(ctx: ChannelHandlerContext, channel: Channel, buffer: ChannelBuffer): Packet = {
if (buffer.readableBytes < Packet.HeaderSize)
return null
buffer.markReaderIndex()
val header = new Array[Byte](Packet.HeaderSize)
buffer.readBytes(header)
val br = BufferReader(header)
val length = br.readInt24()
val seq = br.readUnsignedByte()
if (buffer.readableBytes < length) {
buffer.resetReaderIndex()
return null
}
val body = new Array[Byte](length)
buffer.readBytes(body)
logger.finest("RECEIVED: MySQL packet (length=%d, seq=%d)\\n%s".format(length, seq, BufferUtil.hex(body)))
Packet(length, seq, body)
}
} | joshbedo/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/codec/PacketFrameDecoder.scala | Scala | apache-2.0 | 1,346 |
package dotty.tools
package dotc
package reporting
import core.Contexts.Context
import collection.mutable
import Reporter.Diagnostic
import config.Printers._
/**
* This class implements a Reporter that stores all messages
*/
class StoreReporter extends Reporter {
private var infos: mutable.ListBuffer[Diagnostic] = null
protected def doReport(d: Diagnostic)(implicit ctx: Context): Unit = {
typr.println(s">>>> StoredError: ${d.msg}") // !!! DEBUG
if (infos == null) infos = new mutable.ListBuffer
infos += d
}
override def flush()(implicit ctx: Context) =
if (infos != null) infos foreach ctx.reporter.report
}
| DarkDimius/dotty | src/dotty/tools/dotc/reporting/StoreReporter.scala | Scala | bsd-3-clause | 644 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import org.apache.zookeeper.ZooKeeperMain
class ZooKeeperMainWrapper(args: Array[String]) extends ZooKeeperMain(args) {
def runCmd(): Unit = {
processCmd(this.cl)
System.exit(0)
}
}
/**
* ZooKeeper 3.4.6 broke being able to pass commands on command line.
* See ZOOKEEPER-1897. This class is a hack to restore this faclity.
*/
object ZooKeeperMainWrapper {
def main(args: Array[String]): Unit = {
val main: ZooKeeperMainWrapper = new ZooKeeperMainWrapper(args)
main.runCmd()
}
}
| cran/rkafkajars | java/kafka/tools/ZooKeeperMainWrapper.scala | Scala | apache-2.0 | 1,334 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.rdd
import java.util
import java.util.UUID
import java.util.concurrent._
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import scala.util.Random
import scala.util.control.Breaks._
import org.apache.hadoop.conf.{Configurable, Configuration}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit}
import org.apache.spark.{SparkEnv, SparkException}
import org.apache.spark.rdd.{DataLoadCoalescedRDD, DataLoadPartitionCoalescer, UpdateCoalescedRDD}
import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SQLContext}
import org.apache.spark.sql.execution.command.{AlterTableModel, CompactionModel, ExecutionErrors, UpdateTableModel}
import org.apache.spark.sql.hive.DistributionUtil
import org.apache.spark.util.SparkUtil
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.block.{Distributable, TableBlockInfo}
import org.apache.carbondata.core.locks.{CarbonLockFactory, ICarbonLock, LockUsage}
import org.apache.carbondata.core.metadata.{CarbonTableIdentifier, ColumnarFormatVersion}
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.mutate.CarbonUpdateUtil
import org.apache.carbondata.core.statusmanager.LoadMetadataDetails
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.CarbonStorePath
import org.apache.carbondata.processing.csvreaderstep.{BlockDetails, RddInpututilsForUpdate}
import org.apache.carbondata.processing.etl.DataLoadingException
import org.apache.carbondata.processing.model.CarbonLoadModel
import org.apache.carbondata.processing.newflow.exception.CarbonDataLoadingException
import org.apache.carbondata.spark._
import org.apache.carbondata.spark.load._
import org.apache.carbondata.spark.merger.{CarbonCompactionUtil, CarbonDataMergerUtil, CompactionType}
import org.apache.carbondata.spark.splits.TableSplit
import org.apache.carbondata.spark.util.{CarbonQueryUtil, CommonUtil}
/**
* This is the factory class which can create different RDD depends on user needs.
*
*/
object CarbonDataRDDFactory {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
def alterTableForCompaction(sqlContext: SQLContext,
alterTableModel: AlterTableModel,
carbonLoadModel: CarbonLoadModel,
storePath: String,
kettleHomePath: String,
storeLocation: String): Unit = {
var compactionSize: Long = 0
var compactionType: CompactionType = CompactionType.MINOR_COMPACTION
if (alterTableModel.compactionType.equalsIgnoreCase("major")) {
compactionSize = CarbonDataMergerUtil.getCompactionSize(CompactionType.MAJOR_COMPACTION)
compactionType = CompactionType.MAJOR_COMPACTION
} else if (alterTableModel.compactionType.equalsIgnoreCase("IUD_UPDDEL_DELTA_COMPACTION")) {
compactionType = CompactionType.IUD_UPDDEL_DELTA_COMPACTION
if (alterTableModel.segmentUpdateStatusManager.get != None) {
carbonLoadModel
.setSegmentUpdateStatusManager(alterTableModel.segmentUpdateStatusManager.get)
carbonLoadModel
.setSegmentUpdateDetails(alterTableModel.segmentUpdateStatusManager.get
.getUpdateStatusDetails.toList.asJava)
carbonLoadModel
.setLoadMetadataDetails(alterTableModel.segmentUpdateStatusManager.get
.getLoadMetadataDetails.toList.asJava)
}
}
else {
compactionType = CompactionType.MINOR_COMPACTION
}
LOGGER.audit(s"Compaction request received for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
val tableCreationTime = CarbonEnv.get.carbonMetastore
.getTableCreationTime(carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)
if (null == carbonLoadModel.getLoadMetadataDetails) {
CommonUtil.readLoadMetadataDetails(carbonLoadModel, storePath)
}
// reading the start time of data load.
val loadStartTime : Long =
if (alterTableModel.factTimeStamp.isEmpty) {
CarbonUpdateUtil.readCurrentTime
} else {
alterTableModel.factTimeStamp.get
}
carbonLoadModel.setFactTimeStamp(loadStartTime)
val isCompactionTriggerByDDl = true
val compactionModel = CompactionModel(compactionSize,
compactionType,
carbonTable,
tableCreationTime,
isCompactionTriggerByDDl
)
val isConcurrentCompactionAllowed = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION,
CarbonCommonConstants.DEFAULT_ENABLE_CONCURRENT_COMPACTION
)
.equalsIgnoreCase("true")
// if system level compaction is enabled then only one compaction can run in the system
// if any other request comes at this time then it will create a compaction request file.
// so that this will be taken up by the compaction process which is executing.
if (!isConcurrentCompactionAllowed) {
LOGGER.info("System level compaction lock is enabled.")
handleCompactionForSystemLocking(sqlContext,
carbonLoadModel,
storePath,
kettleHomePath,
storeLocation,
compactionType,
carbonTable,
compactionModel
)
} else {
// normal flow of compaction
val lock = CarbonLockFactory
.getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
LockUsage.COMPACTION_LOCK
)
if (lock.lockWithRetries()) {
LOGGER.info("Acquired the compaction lock for table" +
s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
try {
startCompactionThreads(sqlContext,
carbonLoadModel,
storePath,
kettleHomePath,
storeLocation,
compactionModel,
lock
)
} catch {
case e: Exception =>
LOGGER.error(s"Exception in start compaction thread. ${ e.getMessage }")
lock.unlock()
}
} else {
LOGGER.audit("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
LOGGER.error(s"Not able to acquire the compaction lock for table" +
s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
sys.error("Table is already locked for compaction. Please try after some time.")
}
}
}
def handleCompactionForSystemLocking(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
storePath: String,
kettleHomePath: String,
storeLocation: String,
compactionType: CompactionType,
carbonTable: CarbonTable,
compactionModel: CompactionModel): Unit = {
val lock = CarbonLockFactory
.getCarbonLockObj(CarbonCommonConstants.SYSTEM_LEVEL_COMPACTION_LOCK_FOLDER,
LockUsage.SYSTEMLEVEL_COMPACTION_LOCK
)
if (lock.lockWithRetries()) {
LOGGER.info(s"Acquired the compaction lock for table ${ carbonLoadModel.getDatabaseName }" +
s".${ carbonLoadModel.getTableName }")
try {
startCompactionThreads(sqlContext,
carbonLoadModel,
storePath,
kettleHomePath,
storeLocation,
compactionModel,
lock
)
} catch {
case e: Exception =>
LOGGER.error(s"Exception in start compaction thread. ${ e.getMessage }")
lock.unlock()
// if the compaction is a blocking call then only need to throw the exception.
if (compactionModel.isDDLTrigger) {
throw e
}
}
} else {
LOGGER.audit("Not able to acquire the system level compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
LOGGER.error("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
CarbonCompactionUtil
.createCompactionRequiredFile(carbonTable.getMetaDataFilepath, compactionType)
// do sys error only in case of DDL trigger.
if (compactionModel.isDDLTrigger) {
sys.error("Compaction is in progress, compaction request for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }" +
" is in queue.")
} else {
LOGGER.error("Compaction is in progress, compaction request for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }" +
" is in queue.")
}
}
}
def startCompactionThreads(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
storePath: String,
kettleHomePath: String,
storeLocation: String,
compactionModel: CompactionModel,
compactionLock: ICarbonLock): Unit = {
val executor: ExecutorService = Executors.newFixedThreadPool(1)
// update the updated table status.
if (compactionModel.compactionType != CompactionType.IUD_UPDDEL_DELTA_COMPACTION) {
// update the updated table status. For the case of Update Delta Compaction the Metadata
// is filled in LoadModel, no need to refresh.
CommonUtil.readLoadMetadataDetails(carbonLoadModel, storePath)
}
// clean up of the stale segments.
try {
CarbonLoaderUtil.deletePartialLoadDataIfExist(carbonLoadModel, true)
} catch {
case e: Exception =>
LOGGER.error(s"Exception in compaction thread while clean up of stale segments" +
s" ${ e.getMessage }")
}
val compactionThread = new Thread {
override def run(): Unit = {
try {
// compaction status of the table which is triggered by the user.
var triggeredCompactionStatus = false
var exception: Exception = null
try {
DataManagementFunc.executeCompaction(carbonLoadModel: CarbonLoadModel,
storePath: String,
compactionModel: CompactionModel,
executor, sqlContext, kettleHomePath, storeLocation
)
triggeredCompactionStatus = true
} catch {
case e: Exception =>
LOGGER.error(s"Exception in compaction thread ${ e.getMessage }")
exception = e
}
// continue in case of exception also, check for all the tables.
val isConcurrentCompactionAllowed = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION,
CarbonCommonConstants.DEFAULT_ENABLE_CONCURRENT_COMPACTION
).equalsIgnoreCase("true")
if (!isConcurrentCompactionAllowed) {
LOGGER.info("System level compaction lock is enabled.")
val skipCompactionTables = ListBuffer[CarbonTableIdentifier]()
var tableForCompaction = CarbonCompactionUtil
.getNextTableToCompact(CarbonEnv.get.carbonMetastore.metadata.tablesMeta.toArray,
skipCompactionTables.toList.asJava)
while (null != tableForCompaction) {
LOGGER.info("Compaction request has been identified for table " +
s"${ tableForCompaction.carbonTable.getDatabaseName }." +
s"${ tableForCompaction.carbonTableIdentifier.getTableName }")
val table: CarbonTable = tableForCompaction.carbonTable
val metadataPath = table.getMetaDataFilepath
val compactionType = CarbonCompactionUtil.determineCompactionType(metadataPath)
val newCarbonLoadModel = new CarbonLoadModel()
DataManagementFunc.prepareCarbonLoadModel(storePath, table, newCarbonLoadModel)
val tableCreationTime = CarbonEnv.get.carbonMetastore
.getTableCreationTime(newCarbonLoadModel.getDatabaseName,
newCarbonLoadModel.getTableName
)
val compactionSize = CarbonDataMergerUtil
.getCompactionSize(CompactionType.MAJOR_COMPACTION)
val newcompactionModel = CompactionModel(compactionSize,
compactionType,
table,
tableCreationTime,
compactionModel.isDDLTrigger
)
// proceed for compaction
try {
DataManagementFunc.executeCompaction(newCarbonLoadModel,
newCarbonLoadModel.getStorePath,
newcompactionModel,
executor, sqlContext, kettleHomePath, storeLocation
)
} catch {
case e: Exception =>
LOGGER.error("Exception in compaction thread for table " +
s"${ tableForCompaction.carbonTable.getDatabaseName }." +
s"${ tableForCompaction.carbonTableIdentifier.getTableName }")
// not handling the exception. only logging as this is not the table triggered
// by user.
} finally {
// delete the compaction required file in case of failure or success also.
if (!CarbonCompactionUtil
.deleteCompactionRequiredFile(metadataPath, compactionType)) {
// if the compaction request file is not been able to delete then
// add those tables details to the skip list so that it wont be considered next.
skipCompactionTables.+=:(tableForCompaction.carbonTableIdentifier)
LOGGER.error("Compaction request file can not be deleted for table " +
s"${ tableForCompaction.carbonTable.getDatabaseName }." +
s"${ tableForCompaction.carbonTableIdentifier.getTableName }")
}
}
// ********* check again for all the tables.
tableForCompaction = CarbonCompactionUtil
.getNextTableToCompact(CarbonEnv.get.carbonMetastore.metadata
.tablesMeta.toArray, skipCompactionTables.asJava
)
}
// giving the user his error for telling in the beeline if his triggered table
// compaction is failed.
if (!triggeredCompactionStatus) {
throw new Exception("Exception in compaction " + exception.getMessage)
}
}
} finally {
executor.shutdownNow()
DataManagementFunc.deletePartialLoadsInCompaction(carbonLoadModel)
compactionLock.unlock()
}
}
}
// calling the run method of a thread to make the call as blocking call.
// in the future we may make this as concurrent.
compactionThread.run()
}
def loadCarbonData(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
storePath: String,
kettleHomePath: String,
columnar: Boolean,
partitionStatus: String = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS,
useKettle: Boolean,
dataFrame: Option[DataFrame] = None,
updateModel: Option[UpdateTableModel] = None): Unit = {
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
val isAgg = false
// for handling of the segment Merging.
def handleSegmentMerging(tableCreationTime: Long): Unit = {
LOGGER.info(s"compaction need status is" +
s" ${ CarbonDataMergerUtil.checkIfAutoLoadMergingRequired() }")
if (CarbonDataMergerUtil.checkIfAutoLoadMergingRequired()) {
LOGGER.audit(s"Compaction request received for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
val compactionSize = 0
val isCompactionTriggerByDDl = false
val compactionModel = CompactionModel(compactionSize,
CompactionType.MINOR_COMPACTION,
carbonTable,
tableCreationTime,
isCompactionTriggerByDDl
)
var storeLocation = ""
val configuredStore = CarbonLoaderUtil.getConfiguredLocalDirs(SparkEnv.get.conf)
if (null != configuredStore && configuredStore.nonEmpty) {
storeLocation = configuredStore(Random.nextInt(configuredStore.length))
}
if (storeLocation == null) {
storeLocation = System.getProperty("java.io.tmpdir")
}
storeLocation = storeLocation + "/carbonstore/" + System.nanoTime()
val isConcurrentCompactionAllowed = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION,
CarbonCommonConstants.DEFAULT_ENABLE_CONCURRENT_COMPACTION
)
.equalsIgnoreCase("true")
if (!isConcurrentCompactionAllowed) {
handleCompactionForSystemLocking(sqlContext,
carbonLoadModel,
storePath,
kettleHomePath,
storeLocation,
CompactionType.MINOR_COMPACTION,
carbonTable,
compactionModel
)
} else {
val lock = CarbonLockFactory
.getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
LockUsage.COMPACTION_LOCK
)
if (lock.lockWithRetries()) {
LOGGER.info("Acquired the compaction lock.")
try {
startCompactionThreads(sqlContext,
carbonLoadModel,
storePath,
kettleHomePath,
storeLocation,
compactionModel,
lock
)
} catch {
case e: Exception =>
LOGGER.error(s"Exception in start compaction thread. ${ e.getMessage }")
lock.unlock()
throw e
}
} else {
LOGGER.audit("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${
carbonLoadModel
.getTableName
}")
LOGGER.error("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${
carbonLoadModel
.getTableName
}")
}
}
}
}
try {
LOGGER.audit(s"Data load request has been received for table" +
s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
if (!useKettle) {
LOGGER.audit("Data is loading with New Data Flow for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
}
// Check if any load need to be deleted before loading new data
DataManagementFunc.deleteLoadsAndUpdateMetadata(carbonLoadModel.getDatabaseName,
carbonLoadModel.getTableName, storePath, isForceDeletion = false)
if (null == carbonLoadModel.getLoadMetadataDetails) {
CommonUtil.readLoadMetadataDetails(carbonLoadModel, storePath)
}
var currentLoadCount = -1
val convLoadDetails = carbonLoadModel.getLoadMetadataDetails.asScala
// taking the latest segment ID present.
// so that any other segments above this will be deleted.
if (convLoadDetails.nonEmpty) {
convLoadDetails.foreach { l =>
var loadCount = 0
breakable {
try {
loadCount = Integer.parseInt(l.getLoadName)
} catch {
case e: NumberFormatException => // case of merge folder. ignore it.
break
}
if (currentLoadCount < loadCount) {
currentLoadCount = loadCount
}
}
}
}
currentLoadCount += 1
// Deleting the any partially loaded data if present.
// in some case the segment folder which is present in store will not have entry in status.
// so deleting those folders.
try {
CarbonLoaderUtil.deletePartialLoadDataIfExist(carbonLoadModel, false)
} catch {
case e: Exception =>
LOGGER
.error(s"Exception in data load while clean up of stale segments ${ e.getMessage }")
}
// reading the start time of data load.
val loadStartTime = CarbonUpdateUtil.readCurrentTime();
carbonLoadModel.setFactTimeStamp(loadStartTime)
val tableCreationTime = CarbonEnv.get.carbonMetastore
.getTableCreationTime(carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)
val schemaLastUpdatedTime = CarbonEnv.get.carbonMetastore
.getSchemaLastUpdatedTime(carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)
// get partition way from configuration
// val isTableSplitPartition = CarbonProperties.getInstance().getProperty(
// CarbonCommonConstants.TABLE_SPLIT_PARTITION,
// CarbonCommonConstants.TABLE_SPLIT_PARTITION_DEFAULT_VALUE).toBoolean
val isTableSplitPartition = false
var blocksGroupBy: Array[(String, Array[BlockDetails])] = null
var status: Array[(String, LoadMetadataDetails)] = null
var res: Array[List[(String, (LoadMetadataDetails, ExecutionErrors))]] = null
def loadDataFile(): Unit = {
if (isTableSplitPartition) {
/*
* when data handle by table split partition
* 1) get partition files, direct load or not will get the different files path
* 2) get files blocks by using SplitUtils
* 3) output Array[(partitionID,Array[BlockDetails])] to blocksGroupBy
*/
var splits = Array[TableSplit]()
if (carbonLoadModel.isDirectLoad) {
// get all table Splits, this part means files were divide to different partitions
splits = CarbonQueryUtil.getTableSplitsForDirectLoad(carbonLoadModel.getFactFilePath)
// get all partition blocks from file list
blocksGroupBy = splits.map {
split =>
val pathBuilder = new StringBuilder()
for (path <- split.getPartition.getFilesPath.asScala) {
pathBuilder.append(path).append(",")
}
if (pathBuilder.nonEmpty) {
pathBuilder.substring(0, pathBuilder.size - 1)
}
(split.getPartition.getUniqueID, SparkUtil.getSplits(pathBuilder.toString(),
sqlContext.sparkContext
))
}
} else {
// get all table Splits,when come to this, means data have been partition
splits = CarbonQueryUtil.getTableSplits(carbonLoadModel.getDatabaseName,
carbonLoadModel.getTableName, null)
// get all partition blocks from factFilePath/uniqueID/
blocksGroupBy = splits.map {
split =>
val pathBuilder = new StringBuilder()
pathBuilder.append(carbonLoadModel.getFactFilePath)
if (!carbonLoadModel.getFactFilePath.endsWith("/")
&& !carbonLoadModel.getFactFilePath.endsWith("\\")) {
pathBuilder.append("/")
}
pathBuilder.append(split.getPartition.getUniqueID).append("/")
(split.getPartition.getUniqueID,
SparkUtil.getSplits(pathBuilder.toString, sqlContext.sparkContext))
}
}
} else {
/*
* when data load handle by node partition
* 1)clone the hadoop configuration,and set the file path to the configuration
* 2)use org.apache.hadoop.mapreduce.lib.input.TextInputFormat to get splits,size info
* 3)use CarbonLoaderUtil.nodeBlockMapping to get mapping info of node and block,
* for locally writing carbondata files(one file one block) in nodes
* 4)use kettle: use DataFileLoaderRDD to load data and write to carbondata files
* non kettle: use NewCarbonDataLoadRDD to load data and write to carbondata files
*/
val hadoopConfiguration = new Configuration(sqlContext.sparkContext.hadoopConfiguration)
// FileUtils will skip file which is no csv, and return all file path which split by ','
val filePaths = carbonLoadModel.getFactFilePath
hadoopConfiguration.set(FileInputFormat.INPUT_DIR, filePaths)
hadoopConfiguration.set(FileInputFormat.INPUT_DIR_RECURSIVE, "true")
hadoopConfiguration.set("io.compression.codecs",
"""org.apache.hadoop.io.compress.GzipCodec,
org.apache.hadoop.io.compress.DefaultCodec,
org.apache.hadoop.io.compress.BZip2Codec""".stripMargin)
CommonUtil.configSplitMaxSize(sqlContext.sparkContext, filePaths, hadoopConfiguration)
val inputFormat = new org.apache.hadoop.mapreduce.lib.input.TextInputFormat
val jobContext = new Job(hadoopConfiguration)
val rawSplits = inputFormat.getSplits(jobContext).toArray
val blockList = rawSplits.map { inputSplit =>
val fileSplit = inputSplit.asInstanceOf[FileSplit]
new TableBlockInfo(fileSplit.getPath.toString,
fileSplit.getStart, "1",
fileSplit.getLocations, fileSplit.getLength, ColumnarFormatVersion.V1
).asInstanceOf[Distributable]
}
// group blocks to nodes, tasks
val startTime = System.currentTimeMillis
val activeNodes = DistributionUtil
.ensureExecutorsAndGetNodeList(blockList, sqlContext.sparkContext)
val nodeBlockMapping =
CarbonLoaderUtil
.nodeBlockMapping(blockList.toSeq.asJava, -1, activeNodes.toList.asJava).asScala
.toSeq
val timeElapsed: Long = System.currentTimeMillis - startTime
LOGGER.info("Total Time taken in block allocation: " + timeElapsed)
LOGGER.info(s"Total no of blocks: ${ blockList.length }, " +
s"No.of Nodes: ${nodeBlockMapping.size}")
var str = ""
nodeBlockMapping.foreach(entry => {
val tableBlock = entry._2
str = str + "#Node: " + entry._1 + " no.of.blocks: " + tableBlock.size()
tableBlock.asScala.foreach(tableBlockInfo =>
if (!tableBlockInfo.getLocations.exists(hostentry =>
hostentry.equalsIgnoreCase(entry._1)
)) {
str = str + " , mismatch locations: " + tableBlockInfo.getLocations
.foldLeft("")((a, b) => a + "," + b)
}
)
str = str + "\n"
}
)
LOGGER.info(str)
blocksGroupBy = nodeBlockMapping.map(entry => {
val blockDetailsList =
entry._2.asScala.map(distributable => {
val tableBlock = distributable.asInstanceOf[TableBlockInfo]
new BlockDetails(new Path(tableBlock.getFilePath),
tableBlock.getBlockOffset, tableBlock.getBlockLength, tableBlock.getLocations
)
}).toArray
(entry._1, blockDetailsList)
}
).toArray
}
if (useKettle) {
status = new DataFileLoaderRDD(sqlContext.sparkContext,
new DataLoadResultImpl(),
carbonLoadModel,
storePath,
kettleHomePath,
columnar,
currentLoadCount,
tableCreationTime,
schemaLastUpdatedTime,
blocksGroupBy,
isTableSplitPartition
).collect()
} else {
status = new NewCarbonDataLoadRDD(sqlContext.sparkContext,
new DataLoadResultImpl(),
carbonLoadModel,
currentLoadCount,
blocksGroupBy,
isTableSplitPartition).collect()
}
}
def loadDataFrame(): Unit = {
try {
val rdd = dataFrame.get.rdd
val nodeNumOfData = rdd.partitions.flatMap[String, Array[String]]{ p =>
DataLoadPartitionCoalescer.getPreferredLocs(rdd, p).map(_.host)
}.distinct.size
val nodes = DistributionUtil.ensureExecutorsByNumberAndGetNodeList(nodeNumOfData,
sqlContext.sparkContext)
val newRdd = new DataLoadCoalescedRDD[Row](rdd, nodes.toArray.distinct)
if (useKettle) {
status = new DataFrameLoaderRDD(sqlContext.sparkContext,
new DataLoadResultImpl(),
carbonLoadModel,
storePath,
kettleHomePath,
columnar,
currentLoadCount,
tableCreationTime,
schemaLastUpdatedTime,
newRdd).collect()
} else {
status = new NewDataFrameLoaderRDD(sqlContext.sparkContext,
new DataLoadResultImpl(),
carbonLoadModel,
currentLoadCount,
tableCreationTime,
schemaLastUpdatedTime,
newRdd).collect()
}
} catch {
case ex: Exception =>
LOGGER.error(ex, "load data frame failed")
throw ex
}
}
def loadDataFrameForUpdate(): Unit = {
def triggerDataLoadForSegment(key: String,
iter: Iterator[Row]): Iterator[(String, (LoadMetadataDetails, ExecutionErrors))] = {
val rddResult = new updateResultImpl()
val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
val resultIter = new Iterator[(String, (LoadMetadataDetails, ExecutionErrors))] {
var partitionID = "0"
val loadMetadataDetails = new LoadMetadataDetails
val executionErrors = new ExecutionErrors(FailureCauses.NONE, "")
var uniqueLoadStatusId = ""
try {
val segId = key
val taskNo = CarbonUpdateUtil
.getLatestTaskIdForSegment(segId,
CarbonStorePath.getCarbonTablePath(carbonLoadModel.getStorePath,
carbonTable.getCarbonTableIdentifier))
val index = taskNo + 1
uniqueLoadStatusId = carbonLoadModel.getTableName +
CarbonCommonConstants.UNDERSCORE +
index
// convert timestamp
val timeStampInLong = updateModel.get.updatedTimeStamp + ""
loadMetadataDetails.setPartitionCount(partitionID)
loadMetadataDetails.setLoadName(segId)
loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE)
carbonLoadModel.setPartitionId(partitionID)
carbonLoadModel.setSegmentId(segId)
carbonLoadModel.setTaskNo(String.valueOf(index))
carbonLoadModel.setFactTimeStamp(updateModel.get.updatedTimeStamp)
// During Block Spill case Increment of File Count and proper adjustment of Block
// naming is only done when AbstractFactDataWriter.java : initializeWriter get
// CarbondataFileName as null. For handling Block Spill not setting the
// CarbondataFileName in case of Update.
// carbonLoadModel.setCarbondataFileName(newBlockName)
// storeLocation = CarbonDataLoadRDD.initialize(carbonLoadModel, index)
loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS)
val rddIteratorKey = CarbonCommonConstants.RDDUTIL_UPDATE_KEY +
UUID.randomUUID().toString
try {
RddInpututilsForUpdate.put(rddIteratorKey,
new RddIteratorForUpdate(iter, carbonLoadModel))
carbonLoadModel.setRddIteratorKey(rddIteratorKey)
CarbonDataLoadForUpdate
.run(carbonLoadModel, index, storePath, kettleHomePath,
segId, loadMetadataDetails, executionErrors)
} finally {
RddInpututilsForUpdate.remove(rddIteratorKey)
}
} catch {
case e: Exception =>
LOGGER.info("DataLoad failure")
LOGGER.error(e)
throw e
}
var finished = false
override def hasNext: Boolean = !finished
override def next(): (String, (LoadMetadataDetails, ExecutionErrors)) = {
finished = true
rddResult
.getKey(uniqueLoadStatusId,
(loadMetadataDetails, executionErrors))
}
}
resultIter
}
val updateRdd = dataFrame.get.rdd
val keyRDD = updateRdd.map(row =>
// splitting as (key, value) i.e., (segment, updatedRows)
(row.get(row.size - 1).toString, Row(row.toSeq.slice(0, row.size - 1): _*))
)
val groupBySegmentRdd = keyRDD.groupByKey()
val nodeNumOfData = groupBySegmentRdd.partitions.flatMap[String, Array[String]] { p =>
DataLoadPartitionCoalescer.getPreferredLocs(groupBySegmentRdd, p).map(_.host)
}.distinct.size
val nodes = DistributionUtil.ensureExecutorsByNumberAndGetNodeList(nodeNumOfData,
sqlContext.sparkContext)
val groupBySegmentAndNodeRdd =
new UpdateCoalescedRDD[(String, scala.Iterable[Row])](groupBySegmentRdd,
nodes.distinct.toArray)
res = groupBySegmentAndNodeRdd.map(x =>
triggerDataLoadForSegment(x._1, x._2.toIterator).toList
).collect()
}
if (!updateModel.isDefined) {
CarbonLoaderUtil.checkAndCreateCarbonDataLocation(storePath,
carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName, currentLoadCount.toString)
}
var loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS
var errorMessage: String = "DataLoad failure"
var executorMessage: String = ""
try {
if (updateModel.isDefined) {
loadDataFrameForUpdate()
} else if (dataFrame.isDefined) {
loadDataFrame()
}
else {
loadDataFile()
}
if (updateModel.isDefined) {
res.foreach(resultOfSeg => resultOfSeg.foreach(
resultOfBlock => {
if (resultOfBlock._2._1.getLoadStatus
.equalsIgnoreCase(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE)) {
loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_FAILURE
if (resultOfBlock._2._2.failureCauses == FailureCauses.NONE) {
updateModel.get.executorErrors.failureCauses = FailureCauses.EXECUTOR_FAILURE
updateModel.get.executorErrors.errorMsg = "Failure in the Executor."
}
else {
updateModel.get.executorErrors = resultOfBlock._2._2
}
}
}
))
}
else {
val newStatusMap = scala.collection.mutable.Map.empty[String, String]
if (status.nonEmpty) {
status.foreach { eachLoadStatus =>
val state = newStatusMap.get(eachLoadStatus._1)
state match {
case Some(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
case Some(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
if eachLoadStatus._2.getLoadStatus ==
CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
case _ =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
}
}
newStatusMap.foreach {
case (key, value) =>
if (value == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_FAILURE
} else if (value == CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS &&
!loadStatus.equals(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE)) {
loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS
}
}
} else {
loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_FAILURE
}
if (loadStatus != CarbonCommonConstants.STORE_LOADSTATUS_FAILURE &&
partitionStatus == CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS) {
loadStatus = partitionStatus
}
}
} catch {
case ex: Throwable =>
loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_FAILURE
ex match {
case sparkException: SparkException =>
if (sparkException.getCause.isInstanceOf[DataLoadingException] ||
sparkException.getCause.isInstanceOf[CarbonDataLoadingException]) {
executorMessage = sparkException.getCause.getMessage
errorMessage = errorMessage + ": " + executorMessage
}
case _ =>
executorMessage = ex.getCause.getMessage
errorMessage = errorMessage + ": " + executorMessage
}
LOGGER.info(errorMessage)
LOGGER.error(ex)
}
// handle the status file updation for the update cmd.
if (updateModel.isDefined) {
if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
// updateModel.get.executorErrors.errorMsg = errorMessage
if (updateModel.get.executorErrors.failureCauses == FailureCauses.NONE) {
updateModel.get.executorErrors.failureCauses = FailureCauses.EXECUTOR_FAILURE
updateModel.get.executorErrors.errorMsg = "Update failed as the data load has failed."
}
return
}
else {
// in success case handle updation of the table status file.
// success case.
val segmentDetails = new util.HashSet[String]()
var resultSize = 0
res.foreach(resultOfSeg => {
resultSize = resultSize + resultOfSeg.size
resultOfSeg.foreach(
resultOfBlock => {
segmentDetails.add(resultOfBlock._2._1.getLoadName)
}
)}
)
// this means that the update doesnt have any records to update so no need to do table
// status file updation.
if (resultSize == 0) {
LOGGER.audit("Data update is successful with 0 rows updation for " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
return
}
if (
CarbonUpdateUtil
.updateTableMetadataStatus(segmentDetails,
carbonTable,
updateModel.get.updatedTimeStamp + "",
true,
new util.ArrayList[String](0))) {
LOGGER.audit("Data update is successful for " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
}
else {
val errorMessage = "Data update failed due to failure in table status updation."
LOGGER.audit("Data update is failed for " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
LOGGER.error("Data update failed due to failure in table status updation.")
updateModel.get.executorErrors.errorMsg = errorMessage
updateModel.get.executorErrors.failureCauses = FailureCauses
.STATUS_FILE_UPDATION_FAILURE
return
}
}
return
}
LOGGER.info("********starting clean up**********")
if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
CarbonLoaderUtil.deleteSegment(carbonLoadModel, currentLoadCount)
LOGGER.info("********clean up done**********")
LOGGER.audit(s"Data load is failed for " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
LOGGER.warn("Cannot write load metadata file as data load failed")
throw new Exception(errorMessage)
} else {
val metadataDetails = status(0)._2
if (!isAgg) {
val status = CarbonLoaderUtil.recordLoadMetadata(currentLoadCount, metadataDetails,
carbonLoadModel, loadStatus, loadStartTime)
if (!status) {
val errorMessage = "Dataload failed due to failure in table status updation."
LOGGER.audit("Data load is failed for " +
s"${ carbonLoadModel.getDatabaseName }.${
carbonLoadModel
.getTableName
}")
LOGGER.error("Dataload failed due to failure in table status updation.")
throw new Exception(errorMessage)
}
} else if (!carbonLoadModel.isRetentionRequest) {
// TODO : Handle it
LOGGER.info("********Database updated**********")
}
LOGGER.audit("Data load is successful for " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
try {
// compaction handling
handleSegmentMerging(tableCreationTime)
} catch {
case e: Exception =>
throw new Exception(
"Dataload is success. Auto-Compaction has failed. Please check logs.")
}
}
}
}
}
| JihongMA/incubator-carbondata | integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala | Scala | apache-2.0 | 42,806 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.data.stats.usage
import org.apache.accumulo.core.client.mock.MockInstance
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.apache.accumulo.core.security.Authorizations
import org.geotools.data.Query
import org.geotools.filter.text.cql2.CQL
import org.geotools.geometry.jts.ReferencedEnvelope
import org.joda.time.format.DateTimeFormat
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.audit.{AccumuloAuditService, AccumuloQueryEventTransform}
import org.locationtech.geomesa.accumulo.util.GeoMesaBatchWriterConfig
import org.locationtech.geomesa.index.audit.QueryEvent
import org.locationtech.geomesa.index.conf.QueryHints
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConverters._
@RunWith(classOf[JUnitRunner])
class QueryStatTransformTest extends Specification {
val df = DateTimeFormat.forPattern("yyyy.MM.dd HH:mm:ss")
val table = "QueryStatTransformTest"
val featureName = "stat-writer-test"
val connector = new MockInstance().getConnector("user", new PasswordToken("password"))
connector.tableOperations().create(table)
"QueryStatTransform" should {
"convert query stats to and from accumulo" in {
// currently we don't restore table and feature in the query stat - thus setting them null here
val stat = QueryEvent(AccumuloAuditService.StoreType, featureName, 500L, "user1", "attr=1", "hint1=true", 101L, 201L, 11)
val writer = connector.createBatchWriter(table, GeoMesaBatchWriterConfig())
writer.addMutation(AccumuloQueryEventTransform.toMutation(stat))
writer.flush()
writer.close()
val scanner = connector.createScanner(table, new Authorizations())
val converted = AccumuloQueryEventTransform.toEvent(scanner.iterator().asScala.toList)
converted mustEqual stat
}
"convert hints to readable string" in {
val query = new Query("test", CQL.toFilter("INCLUDE"))
val env = new ReferencedEnvelope()
query.getHints.put(QueryHints.DENSITY_BBOX, env)
query.getHints.put(QueryHints.DENSITY_WIDTH, 500)
query.getHints.put(QueryHints.DENSITY_HEIGHT, 500)
val hints = QueryEvent.hintsToString(query.getHints)
hints must contain(s"DENSITY_BBOX_KEY=$env")
hints must contain("WIDTH_KEY=500")
hints must contain("HEIGHT_KEY=500")
}
}
}
| ronq/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/data/stats/usage/QueryStatTransformTest.scala | Scala | apache-2.0 | 2,908 |
package test.crawler
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import reactivemongo.api.commands.UpdateWriteResult
import test.UtilTest
import test.crawler.CrawlerMocksUtil.HttpClientMock._
import wow.crawler.{WowCharacterCrawler, WowGuildApi, WowGuildCrawler}
import wow.dto.{WowCharacter, WowGuild}
import spray.json._
import wow.dao.{WowCharacterService, WowGuildService}
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import wow.dto.WowGuildProtocol._
import scala.concurrent.duration._
/**
* Created by Ross on 9/4/2016.
*/
class WowGuildCrawlerSpecs extends Specification with Mockito{
val wowGuildApiMock = mock[WowGuildApi]
wowGuildApiMock.getGuildInfoFromWowProgress() returns {
Future(
mockResponseWithGzip("eu_magtheridon_tier18.json.gz")
)
}
wowGuildApiMock.getGuildInfoFromRealmPop() returns {
Future(
mockResponseWithJson("eu-magtherido-realpop-short.json")
)
}
wowGuildApiMock.getGuild(org.mockito.Matchers.anyString()) returns {
val guildJson = UtilTest.readResourceAsString("WowGuildBase.json")
Future(guildJson.parseJson.convertTo[WowGuild])
}
val wowGuildServiceMock = mock[WowGuildService]
wowGuildServiceMock.insert(org.mockito.Matchers.any[WowGuild]()) returns {
Future( new WowGuild("Mock",None) -> mock[UpdateWriteResult] )
}
"WowGuildCrawler when crawls from wowProgress " should {
"return a Future list of couples (WowGuild, UpdateWriteResult)" >> {
val crawler = new WowGuildCrawler(wowGuildApiMock, wowGuildServiceMock)
val futureRes = crawler.crawlWowGuildFromWowProgressGuildList()
val res = Await.result(futureRes, 10 seconds)
res must have size(238)
}
}
"WowGuildCrawler when crawls from realmPop " should {
"return a Future list of couples (WowGuild, UpdateWriteResult)" >> {
val crawler = new WowGuildCrawler(wowGuildApiMock, wowGuildServiceMock)
val futureRes = crawler.crawlWowGuildFromRealmPopGuildList()
val res = Await.result(futureRes, 10 seconds)
res must have size(5531)
}
}
}
| MeMpy/wow-characters | src/test/scala/test/crawler/WowGuildCrawlerSpecs.scala | Scala | gpl-3.0 | 2,150 |
package patmat
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import patmat.Huffman._
@RunWith(classOf[JUnitRunner])
class HuffmanSuite extends FunSuite {
trait TestTrees {
val t1 = Fork(Leaf('a',2), Leaf('b',3), List('a','b'), 5)
val t2 = Fork(Fork(Leaf('a',2), Leaf('b',3), List('a','b'), 5), Leaf('d',4), List('a','b','d'), 9)
}
test("weight of a larger tree") {
new TestTrees {
assert(weight(t1) === 5)
}
}
test("chars of a larger tree") {
new TestTrees {
assert(chars(t2) === List('a','b','d'))
}
}
test("string2chars(\"hello, world\")") {
assert(string2Chars("hello, world") === List('h', 'e', 'l', 'l', 'o', ',', ' ', 'w', 'o', 'r', 'l', 'd'))
}
test("makeOrderedLeafList for some frequency table") {
assert(makeOrderedLeafList(List(('t', 2), ('e', 1), ('x', 3))) === List(Leaf('e',1), Leaf('t',2), Leaf('x',3)))
}
test("combine of some leaf list") {
val leaflist = List(Leaf('e', 1), Leaf('t', 2), Leaf('x', 4))
assert(combine(leaflist) === List(Fork(Leaf('e',1),Leaf('t',2),List('e', 't'),3), Leaf('x',4)))
}
test("decode and encode a very short text should be identity") {
new TestTrees {
assert(decode(t1, encode(t1)("ab".toList)) === "ab".toList)
}
}
test("decode and encodeQuick a very short text should be identity") {
new TestTrees {
assert(decode(t1, quickEncode(t1)("ab".toList)) === "ab".toList)
}
}
}
| masipauskas/coursera-scala | progfun/patmat/src/test/scala/patmat/HuffmanSuite.scala | Scala | unlicense | 1,498 |
package me.heaton.profun.week5
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class PairTupleTest extends FunSuite with TestList {
test("pair") {
val pair = ("answer", 42)
val (s, n) = pair
assert(s === "answer")
assert(n === 42)
}
test("tuple") {
val tuple = ("some", "thing", "is", 12) // scala.Tuple4
val (s, t, i ,n) = tuple
assert(s === "some")
assert(t === "thing")
assert(i === "is")
assert(n === 12)
}
}
| heaton/hello-scala | src/test/scala/me/heaton/profun/week5/PairTupleTest.scala | Scala | mit | 550 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.nio.ByteBuffer
import java.util.LinkedHashMap
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.util.{SizeEstimator, Utils}
/**
* Stores blocks in memory, either as ArrayBuffers of deserialized Java objects or as
* serialized ByteBuffers.
*/
private class MemoryStore(blockManager: BlockManager, maxMemory: Long)
extends BlockStore(blockManager) {
case class Entry(value: Any, size: Long, deserialized: Boolean)
private val entries = new LinkedHashMap[BlockId, Entry](32, 0.75f, true)
@volatile private var currentMemory = 0L
// Object used to ensure that only one thread is putting blocks and if necessary, dropping
// blocks from the memory store.
private val putLock = new Object()
logInfo("MemoryStore started with capacity %s.".format(Utils.bytesToString(maxMemory)))
def freeMemory: Long = maxMemory - currentMemory
override def getSize(blockId: BlockId): Long = {
entries.synchronized {
entries.get(blockId).size
}
}
override def putBytes(blockId: BlockId, _bytes: ByteBuffer, level: StorageLevel) {
// Work on a duplicate - since the original input might be used elsewhere.
val bytes = _bytes.duplicate()
bytes.rewind()
if (level.deserialized) {
val values = blockManager.dataDeserialize(blockId, bytes)
val elements = new ArrayBuffer[Any]
elements ++= values
val sizeEstimate = SizeEstimator.estimate(elements.asInstanceOf[AnyRef])
tryToPut(blockId, elements, sizeEstimate, true)
} else {
tryToPut(blockId, bytes, bytes.limit, false)
}
}
override def putValues(
blockId: BlockId,
values: ArrayBuffer[Any],
level: StorageLevel,
returnValues: Boolean)
: PutResult = {
if (level.deserialized) {
val sizeEstimate = SizeEstimator.estimate(values.asInstanceOf[AnyRef])
tryToPut(blockId, values, sizeEstimate, true)
PutResult(sizeEstimate, Left(values.iterator))
} else {
val bytes = blockManager.dataSerialize(blockId, values.iterator)
tryToPut(blockId, bytes, bytes.limit, false)
PutResult(bytes.limit(), Right(bytes.duplicate()))
}
}
override def getBytes(blockId: BlockId): Option[ByteBuffer] = {
val entry = entries.synchronized {
entries.get(blockId)
}
if (entry == null) {
None
} else if (entry.deserialized) {
Some(blockManager.dataSerialize(blockId, entry.value.asInstanceOf[ArrayBuffer[Any]].iterator))
} else {
Some(entry.value.asInstanceOf[ByteBuffer].duplicate()) // Doesn't actually copy the data
}
}
override def getValues(blockId: BlockId): Option[Iterator[Any]] = {
val entry = entries.synchronized {
entries.get(blockId)
}
if (entry == null) {
None
} else if (entry.deserialized) {
Some(entry.value.asInstanceOf[ArrayBuffer[Any]].iterator)
} else {
val buffer = entry.value.asInstanceOf[ByteBuffer].duplicate() // Doesn't actually copy data
Some(blockManager.dataDeserialize(blockId, buffer))
}
}
override def remove(blockId: BlockId): Boolean = {
entries.synchronized {
val entry = entries.remove(blockId)
if (entry != null) {
currentMemory -= entry.size
logInfo("Block %s of size %d dropped from memory (free %d)".format(
blockId, entry.size, freeMemory))
true
} else {
false
}
}
}
override def clear() {
entries.synchronized {
entries.clear()
currentMemory = 0
}
logInfo("MemoryStore cleared")
}
/**
* Return the RDD ID that a given block ID is from, or None if it is not an RDD block.
*/
private def getRddId(blockId: BlockId): Option[Int] = {
blockId.asRDDId.map(_.rddId)
}
/**
* Try to put in a set of values, if we can free up enough space. The value should either be
* an ArrayBuffer if deserialized is true or a ByteBuffer otherwise. Its (possibly estimated)
* size must also be passed by the caller.
*
* Locks on the object putLock to ensure that all the put requests and its associated block
* dropping is done by only on thread at a time. Otherwise while one thread is dropping
* blocks to free memory for one block, another thread may use up the freed space for
* another block.
*/
private def tryToPut(blockId: BlockId, value: Any, size: Long, deserialized: Boolean): Boolean = {
// TODO: Its possible to optimize the locking by locking entries only when selecting blocks
// to be dropped. Once the to-be-dropped blocks have been selected, and lock on entries has been
// released, it must be ensured that those to-be-dropped blocks are not double counted for
// freeing up more space for another block that needs to be put. Only then the actually dropping
// of blocks (and writing to disk if necessary) can proceed in parallel.
putLock.synchronized {
if (ensureFreeSpace(blockId, size)) {
val entry = new Entry(value, size, deserialized)
entries.synchronized {
entries.put(blockId, entry)
currentMemory += size
}
if (deserialized) {
logInfo("Block %s stored as values to memory (estimated size %s, free %s)".format(
blockId, Utils.bytesToString(size), Utils.bytesToString(freeMemory)))
} else {
logInfo("Block %s stored as bytes to memory (size %s, free %s)".format(
blockId, Utils.bytesToString(size), Utils.bytesToString(freeMemory)))
}
true
} else {
// Tell the block manager that we couldn't put it in memory so that it can drop it to
// disk if the block allows disk storage.
val data = if (deserialized) {
Left(value.asInstanceOf[ArrayBuffer[Any]])
} else {
Right(value.asInstanceOf[ByteBuffer].duplicate())
}
blockManager.dropFromMemory(blockId, data)
false
}
}
}
/**
* Tries to free up a given amount of space to store a particular block, but can fail and return
* false if either the block is bigger than our memory or it would require replacing another
* block from the same RDD (which leads to a wasteful cyclic replacement pattern for RDDs that
* don't fit into memory that we want to avoid).
*
* Assumes that a lock is held by the caller to ensure only one thread is dropping blocks.
* Otherwise, the freed space may fill up before the caller puts in their new value.
*/
private def ensureFreeSpace(blockIdToAdd: BlockId, space: Long): Boolean = {
logInfo("ensureFreeSpace(%d) called with curMem=%d, maxMem=%d".format(
space, currentMemory, maxMemory))
if (space > maxMemory) {
logInfo("Will not store " + blockIdToAdd + " as it is larger than our memory limit")
return false
}
if (maxMemory - currentMemory < space) {
val rddToAdd = getRddId(blockIdToAdd)
val selectedBlocks = new ArrayBuffer[BlockId]()
var selectedMemory = 0L
// This is synchronized to ensure that the set of entries is not changed
// (because of getValue or getBytes) while traversing the iterator, as that
// can lead to exceptions.
entries.synchronized {
val iterator = entries.entrySet().iterator()
while (maxMemory - (currentMemory - selectedMemory) < space && iterator.hasNext) {
val pair = iterator.next()
val blockId = pair.getKey
if (rddToAdd.isDefined && rddToAdd == getRddId(blockId)) {
logInfo("Will not store " + blockIdToAdd + " as it would require dropping another " +
"block from the same RDD")
return false
}
selectedBlocks += blockId
selectedMemory += pair.getValue.size
}
}
if (maxMemory - (currentMemory - selectedMemory) >= space) {
logInfo(selectedBlocks.size + " blocks selected for dropping")
for (blockId <- selectedBlocks) {
val entry = entries.synchronized { entries.get(blockId) }
// This should never be null as only one thread should be dropping
// blocks and removing entries. However the check is still here for
// future safety.
if (entry != null) {
val data = if (entry.deserialized) {
Left(entry.value.asInstanceOf[ArrayBuffer[Any]])
} else {
Right(entry.value.asInstanceOf[ByteBuffer].duplicate())
}
blockManager.dropFromMemory(blockId, data)
}
}
return true
} else {
return false
}
}
true
}
override def contains(blockId: BlockId): Boolean = {
entries.synchronized { entries.containsKey(blockId) }
}
}
| sryza/spark | core/src/main/scala/org/apache/spark/storage/MemoryStore.scala | Scala | apache-2.0 | 9,585 |
package net.katsstuff.chitchat.command
import java.lang.Iterable
import scala.collection.JavaConverters._
import org.spongepowered.api.command.CommandSource
import org.spongepowered.api.command.args.PatternMatchingCommandElement
import org.spongepowered.api.text.Text
import net.katsstuff.chitchat.chat.ChannelHandler
class ChannelCommandArgument(key: Option[Text])(implicit handler: ChannelHandler)
extends PatternMatchingCommandElement(key.orNull) {
def this(key: Text)(implicit handler: ChannelHandler) = this(Some(key))
override def getValue(choice: String): AnyRef =
handler.getChannel(choice).getOrElse(throw new IllegalArgumentException)
override def getChoices(source: CommandSource): Iterable[String] = handler.allChannels.keys.asJava
}
| Katrix-/ChitChat | shared/src/main/scala/net/katsstuff/chitchat/command/ChannelCommandArgument.scala | Scala | mit | 767 |
package com.realizationtime.btdogg.tcphashes
import akka.actor.{ActorRef, ActorSystem}
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Tcp}
import akka.util.{ByteString, Timeout}
import com.realizationtime.btdogg.BtDoggConfiguration
import com.realizationtime.btdogg.BtDoggConfiguration.TcpHashSourceConfig
import com.realizationtime.btdogg.BtDoggConfiguration.TcpHashSourceConfig.{host, port}
import com.realizationtime.btdogg.commons.TKey
import com.realizationtime.btdogg.tcphashes.TcpHashesSource.{ProcessingAborted, ProcessingDone, ProcessingResponse, RequestProcessing}
import scala.concurrent.Future
object TcpHashesServer {
def runServer(mediator: ActorRef)(implicit system: ActorSystem, materializer: Materializer): Future[Tcp.ServerBinding] = {
println(s"Starting tcp server on $host:$port...")
Tcp().bind(host, port)
.toMat(Sink.foreach(connection => {
val requestProcessing = Flow[ByteString]
.via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true))
.map(_.utf8String)
.map(_.trim)
.map(_.toUpperCase)
.map(TKey(_))
.mapAsyncUnordered(TcpHashSourceConfig.bufferPerOneConnection) { key =>
import akka.pattern.ask
implicit val timeout: Timeout = Timeout(BtDoggConfiguration.ScrapingConfig.torrentFetchTimeout * 2)
(mediator ? RequestProcessing(key)).mapTo[ProcessingResponse]
}
.map {
case ProcessingDone(key) => resultOk(key)
case ProcessingAborted(key) => resultRejected(key)
}
.map(s => s + (if (s.endsWith("\n")) "" else "\n"))
.map(ByteString(_))
connection.handleWith(requestProcessing)
}))(Keep.left)
.run()
}
private def resultOk(key: TKey) = s"processed: ${key.hash}"
private def resultRejected(key: TKey) = s"rejected: ${key.hash}"
}
| bwrega/btdogg | src/main/scala/com/realizationtime/btdogg/tcphashes/TcpHashesServer.scala | Scala | mit | 1,957 |
package cook.target
import cook.config.ConfigRef
import cook.console.ops._
import cook.error._
import cook.meta.{ Meta, MetaHelper }
import cook.meta.db.DbProvider.{ db => metaDb }
import cook.ref.TargetRef
import cook.util.GlobScanner
object TargetStatus extends Enumeration {
type TargetStatus = Value
val Pending, Cached, Built = Value
}
abstract class Target[+R <: TargetResult](
val ref: TargetRef,
private[this] val buildCmd: TargetBuildCmd[R],
private[this] val resultFn: TargetResultFn[R],
private[this] val inputMetaFn: TargetMetaFn[R],
private[this] val runCmd: Option[TargetRunCmd[R]],
val deps: Seq[TargetRef]) {
def refName = ref.refName
def buildDir = ref.targetBuildDir
def runDir = ref.targetRunDir
import TargetStatus._
private var _status: TargetStatus = Pending
def status = _status
def isResultReady = (_status == Cached) || (_status == Built)
private var _depTargets: Option[List[Target[TargetResult]]] = None
def depTargets: List[Target[TargetResult]] = _depTargets getOrError {
"Can not call target " :: strong(refName) :: ".depTargets, deps not ready yet."
}
private [cook] def setDepTargets(depTargets: List[Target[TargetResult]]) {
assert(_depTargets.isEmpty, "depTargets should only be set once: " + refName)
val f = deps.map(_.refName).toSet == depTargets.map(_.refName).toSet
assert(f, "depTargets should equal to deps: " + refName)
_depTargets = Some(depTargets)
}
private[this] var _result: Option[R] = None
private [cook] def buildResult: R = {
assert(_result.isEmpty, "result should only be built once: " + refName)
if (!isResultReady) {
reportError {
"Can not call target " :: strong(refName) :: ".result, target not built yet. " ::
"You might miss deps"
}
}
val r = resultFn(this)
_result = Some(r)
r
}
def result: R = _result getOrError {
"Can not call target " :: strong(refName) :: ".result, result not ready yet."
}
private def needBuild: Boolean = {
val meta = buildMeta
val cachedMeta = metaDb.get(ref.metaKey)
meta != cachedMeta
}
private [cook] def build {
assert(_status == Pending, "target should only be built once: " + refName)
if (needBuild) {
// need build
buildDir.deleteRecursively
buildDir.createDirectory(force = true)
ref.buildLogFile.deleteIfExists
buildCmd(this)
_status = Built
val meta = buildMeta
metaDb.put(ref.metaKey, meta)
} else {
// cache hint
_status = Cached
}
}
private [cook] def buildMeta: Meta = {
// dep
val depsMeta = new Meta
deps foreach { dep =>
depsMeta.add(Target.DepMetaGroup, dep.refName, metaDb.get(dep.metaKey).hash)
}
// config
val configMeta = new Meta
val defineConfigRefName = ConfigRef.defineConfigRefNameForTarget(refName)
val configKey = ConfigRef.configByteCodeMetaKeyFor(defineConfigRefName)
configMeta.add(Target.ConfigMetaGroup, "config", metaDb.get(configKey).hash)
// input
val inputMeta = inputMetaFn(this).withPrefix(Target.InputMetaPrefix)
// target
val targetMeta = if (buildDir.exists) {
val targets = GlobScanner(buildDir, "**" :: Nil)
MetaHelper.buildFileMeta(Target.TargetMetaGroup, targets)
} else {
new Meta
}
//
depsMeta + configMeta + inputMeta + targetMeta
}
def isRunnable = runCmd.isDefined
def run(args: List[String] = Nil): Int = {
assert(isRunnable, "can not run a target without runCmd: " + refName)
assert(isResultReady, "can not run a target that was not built yet: " + refName)
runDir.deleteRecursively
runDir.createDirectory(force = true)
ref.runLogFile.deleteIfExists
runCmd.get(this, args)
}
}
object Target {
val DepMetaGroup = "deps"
val ConfigMetaGroup = "config"
val TargetMetaGroup = "target"
val InputMetaPrefix = "input"
}
| timgreen/cook | src/cook/target/Target.scala | Scala | apache-2.0 | 3,938 |
package com.twitter.finagle.http
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ResponseTest extends FunSuite {
test("constructors") {
List(
Response(),
Response(Version.Http11, Status.Ok),
Response()
).foreach { response =>
assert(response.version == Version.Http11)
assert(response.status == Status.Ok)
}
}
}
| mkhq/finagle | finagle-base-http/src/test/scala/com/twitter/finagle/http/ResponseTest.scala | Scala | apache-2.0 | 450 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reforest.rf
import org.apache.spark.broadcast.Broadcast
import reforest.TypeInfo
import reforest.data.tree.{CutCategorical, CutDetailed}
import reforest.rf.feature.RFFeatureSizer
import reforest.rf.split.RFSplitter
/**
* Utility to compute the entropy
*
* @param typeInfo the type information of the raw data
* @param typeInfoWorking the type information of the working data
* @tparam T raw data type
* @tparam U working data type
*/
class RFEntropy[T, U](typeInfo: Broadcast[TypeInfo[T]],
typeInfoWorking: Broadcast[TypeInfo[U]]) extends Serializable {
private def entropyFromPreComputedArray(classAccumulator: Array[Int], numElement: Int) = {
var toReturn = 0d
var c = 0
while (c < classAccumulator.length) {
val tmp = classAccumulator(c).toDouble / numElement
if (tmp > 0) {
toReturn += (-tmp * Math.log(tmp))
}
c += 1
}
toReturn
}
private def entropy(valueArray: Array[Array[Int]], numElement: Int, numClasses: Int, start: Int, end: Int): Double = {
val classAccumulator = new Array[Int](numClasses)
var i = start
while (i <= end) {
var c = 0
while (c < valueArray(i).length) {
classAccumulator(c) += valueArray(i)(c)
c += 1
}
i += 1
}
entropyFromPreComputedArray(classAccumulator, numElement)
}
private def entropyCategoryExclude(valueArray: Array[Array[Int]], numElement: Int, numClasses: Int, categoryIndex: Int): Double = {
val classAccumulator = new Array[Int](numClasses)
var i = 1
while (i <= valueArray.length - 1) {
if (i != categoryIndex) {
var c = 0
while (c < valueArray(i).length) {
classAccumulator(c) += valueArray(i)(c)
c += 1
}
}
i += 1
}
entropyFromPreComputedArray(classAccumulator, numElement)
}
private def entropyCategory(valueArray: Array[Array[Int]], numElement: Int, numClasses: Int, categoryIndex: Int): Double = {
val classAccumulator = new Array[Int](numClasses)
var c = 0
while (c < valueArray(categoryIndex).length) {
classAccumulator(c) += valueArray(categoryIndex)(c)
c += 1
}
entropyFromPreComputedArray(classAccumulator, numElement)
}
private def entropy(valueArray: Array[Array[Int]], numClasses: Int): Double = {
val numElement = sum(valueArray)
entropy(valueArray, numElement, numClasses, 0, valueArray.length - 1)
}
private def getLabel(valueArray: Array[Array[Int]], numClasses: Int): Option[Int] = {
if (valueArray.isEmpty) {
Option.empty
} else {
val classAccumulator = new Array[Int](numClasses)
var count = 0
while (count < valueArray.length) {
var c = 0
while (c < valueArray(count).length) {
classAccumulator(c) += valueArray(count)(c)
c += 1
}
count += 1
}
var label = -1
var labelCount = Int.MinValue
count = 0
while (count < classAccumulator.length) {
if (classAccumulator(count) > labelCount) {
label = count
labelCount = classAccumulator(count)
}
count += 1
}
Some(label)
}
}
private def getLabelOK(valueArray: Array[Array[Int]], label: Option[Int]): Int = {
if (label.isDefined) {
getLabelOK(valueArray, label.get)
} else {
0
}
}
private def getLabelOK(valueArray: Array[Array[Int]], label: Int): Int = {
if (valueArray.isEmpty) {
0
} else {
var toReturn = 0
var count = 0
while (count < valueArray.length) {
toReturn += valueArray(count)(label)
count += 1
}
toReturn
}
}
private def getLabel(valueArray: Array[Int]): Option[Int] = {
Some(valueArray.zipWithIndex.maxBy(_._1)._2)
}
private def sum(valueArray: Array[Array[Int]]): Int = {
var toReturn = 0
var count = 0
while (count < valueArray.length) {
var c = 0
while (c < valueArray(count).length) {
toReturn += valueArray(count)(c)
c += 1
}
count += 1
}
toReturn
}
private def sum(valueArray: Array[Int]): Int = {
var toReturn = 0
var count = 0
while (count < valueArray.length) {
toReturn += valueArray(count)
count += 1
}
toReturn
}
/**
* It computes the best split
*
* @param data the information from which compute the best split
* @param featureId the feature index for which we compute the best split
* @param splitter the utility to compute the split for each feature
* @param depth the currently analyzed depth
* @param maxDepth the maximum configured depth
* @param numClasses the number of classes in the dataset
* @return the best split identified
*/
def getBestSplit(data: Array[Array[Int]],
featureId: Int,
splitter: RFSplitter[T, U],
featureSizer: RFFeatureSizer,
depth: Int,
maxDepth: Int,
numClasses: Int): CutDetailed[T, U] = {
val elNumber = sum(data)
val elNumberValid = elNumber - sum(data(0))
val elNumberNOTValid = elNumber - elNumberValid
var gBest = Double.MinValue
var cut = Int.MinValue
val eTot = entropy(data, elNumber, numClasses, 0, data.length - 1)
var elSum = 0
if (elNumberValid > 0) {
val until = data.length - 1
var i = 1
while (i <= until) {
val sumData = sum(data(i))
if (sumData > 0) {
elSum = elSum + sumData
val g = eTot - ((elSum * entropy(data, elSum, numClasses, 1, i)) / elNumber) - (((elNumberValid - elSum) * entropy(data, elNumberValid - elSum, numClasses, i + 1, data.length - 1)) / elNumber)
if (g > gBest) {
gBest = g
cut = i
}
}
i += 1
}
val left = data.slice(1, cut + 1)
// val left = mySlice(data, 1, cut + 1)
val leftTOT = sum(left)
val right = data.slice(cut + 1, data.length)
// val right = mySlice(data, cut + 1, data.length)
val rightTOT = sum(right)
val calculateLabel = true //depth >= maxDepth || leftTOT <= 1 || rightTOT <= 1 || elNumberNOTValid > 0
val leftLabel = if (calculateLabel) getLabel(left, numClasses) else Option.empty
val rightLabel = if (calculateLabel) getLabel(right, numClasses) else Option.empty
val notValidLabel = if (elNumberNOTValid > 0) getLabel(data(0)) else Option.empty
var eEnd = gBest
if ((elNumber - elNumberValid) > 0) {
val eNotValid = (((elNumber - elNumberValid) * entropy(Array(data(0)), numClasses)) / elNumber)
eEnd = eEnd - eNotValid
}
val leftOK = getLabelOK(left, leftLabel)
val rightOK = getLabelOK(right, rightLabel)
val notvalidOK = if (notValidLabel.isDefined) data(0)(notValidLabel.get) else 0
new CutDetailed[T, U](featureId,
splitter.getRealCut(featureId, typeInfoWorking.value.fromInt(featureSizer.getDeShrinkedValue(featureId, cut))),
cut,
eEnd,
if (calculateLabel) getLabel(data, numClasses) else Option.empty,
(elNumber - elNumberValid),
leftTOT,
rightTOT,
notValidLabel,
leftLabel,
rightLabel,
notvalidOK,
leftOK,
rightOK)
} else {
new CutDetailed(featureId, typeInfo.value.NaN, typeInfoWorking.value.toInt(typeInfoWorking.value.NaN), 0, getLabel(data, numClasses))
}
}
def mySlice(data: Array[Array[Int]], start: Int, end: Int) = {
val toReturn = Array.tabulate(end - start)(_ => Array[Int]())
var count = 0
while (count < toReturn.length) {
toReturn(count) = data(start + count)
count += 1
}
toReturn
}
/**
* It computes the best split for a categorical feature
*
* @param data the information from which compute the best split
* @param featureId the feature index for which we compute the best split
* @param splitter the utility to compute the split for each feature
* @param depth the currently analyzed depth
* @param maxDepth the maximum configured depth
* @param numClasses the number of classes in the dataset
* @return the best split identified
*/
def getBestSplitCategorical(data: Array[Array[Int]],
featureId: Int,
splitter: RFSplitter[T, U],
depth: Int,
maxDepth: Int,
numClasses: Int): CutDetailed[T, U] = {
val elNumber = sum(data)
val elNumberValid = elNumber - sum(data(0))
val elNumberNOTValid = elNumber - elNumberValid
var gBest = Double.MinValue
var cut = Int.MinValue
val eTot = entropy(data, elNumber, numClasses, 0, data.length - 1)
if (elNumberValid > 0) {
val until = data.length - 1
var i = 1
while (i <= until) {
val elNumberCategory = sum(data(i))
if (elNumberCategory > 0) {
val elNumberNotCategory = elNumberValid - elNumberCategory
val g = eTot - ((elNumberCategory * entropyCategory(data, elNumberCategory, numClasses, i)) / elNumber) - ((elNumberNotCategory * entropyCategoryExclude(data, elNumberNotCategory, numClasses, i)) / elNumber)
if (g > gBest) {
gBest = g
cut = i
}
}
i += 1
}
val left = data(cut)
val leftTOT = sum(left)
val right = data.take(cut) ++ data.drop(cut + 1)
val rightTOT = sum(right)
val calculateLabel = true //depth >= maxDepth || leftTOT <= 1 || rightTOT <= 1 || elNumberNOTValid > 0
val leftLabel = if (calculateLabel) getLabel(left) else Option.empty
val rightLabel = if (calculateLabel) getLabel(right, numClasses) else Option.empty
val notValidLabel = if (elNumberNOTValid > 0) getLabel(data(0)) else Option.empty
var eEnd = gBest
if ((elNumber - elNumberValid) > 0) {
val eNotValid = (((elNumber - elNumberValid) * entropy(Array(data(0)), numClasses)) / elNumber)
eEnd = eEnd - eNotValid
}
val leftOK = if (leftLabel.isDefined) left(leftLabel.get) else 0
val rightOK = getLabelOK(right, rightLabel)
val notvalidOK = if (notValidLabel.isDefined) data(0)(notValidLabel.get) else 0
new CutCategorical[T, U](featureId,
typeInfo.value.fromInt(cut),
eEnd,
cut,
if (calculateLabel) getLabel(data, numClasses) else Option.empty,
(elNumber - elNumberValid),
leftTOT,
rightTOT,
notValidLabel,
leftLabel,
rightLabel,
notvalidOK,
leftOK,
rightOK)
} else {
new CutDetailed(featureId, typeInfo.value.NaN, typeInfoWorking.value.toInt(typeInfoWorking.value.NaN))
}
}
}
| alessandrolulli/reforest | src/main/scala/reforest/rf/RFEntropy.scala | Scala | apache-2.0 | 12,120 |
package com.joescii
import org.scalatest.{ShouldMatchers, WordSpec}
class ParseSpec extends WordSpec with ShouldMatchers {
"Integers" should {
"introduce pattern matching" in {
def ints(i:Int):String = i match {
case 1 => "one"
case 2 => "two"
case n if n % 2 == 0 => s"$n is even"
case other => s"$other is whatever"
}
ints(1) shouldEqual "one"
ints(2) shouldEqual "two"
ints(3) shouldEqual "3 is whatever"
ints(4) shouldEqual "4 is even"
}
}
"Cons operator" should {
"further demonstrate pattern matching" in {
val none = Nil
val one = 3 :: Nil
val three = 1 :: 2 :: 3 :: Nil
def lists(l:List[Int]) = l match {
case Nil => "list is empty"
case last :: Nil => s"$last is alone"
case head :: tail => s"$head and ${tail.size} others"
}
lists(none) shouldEqual "list is empty"
lists(one) shouldEqual "3 is alone"
lists(three) shouldEqual "1 and 2 others"
}
}
"f" should {
"match a positive integer" in {
f.unapply("10") shouldEqual Some(10f)
}
"match a positive float with a decimal" in {
f.unapply("3.14") shouldEqual Some(3.14f)
}
"match a negative float" in {
f.unapply("-22.7") shouldEqual Some(-22.7f)
}
"not match alpha string" in {
f.unapply("bob") shouldEqual None
}
"demonstrate custom pattern match" in {
val str = "12.3"
val num = str match {
case f(x) => x
case _ => 0
}
num shouldEqual 12.3f
}
"demonstrate custom pattern match failure" in {
val str = "eleven"
val num = str match {
case f(x) => x
case _ => 0
}
num shouldEqual 0f
}
}
"n" should {
"match a positive integer" in {
z.unapply("10") shouldEqual Some(10)
}
"match a negative integer" in {
z.unapply("-42") shouldEqual Some(-42)
}
"match zero" in {
z.unapply("0") shouldEqual Some(0)
}
"not match alpha string" in {
z.unapply("jimmy") shouldEqual None
}
"demonstrate custom pattern match" in {
val str = "15"
val num = str match {
case z(x) => x
case _ => 0
}
num shouldEqual 15
}
"demonstrate custom pattern match failure" in {
val str = "15.01"
val num = str match {
case z(x) => x
case _ => 0
}
num shouldEqual 0
}
}
"The parser" should {
"work" in {
val file = """
L 55.7 65.3 -50 -60.3
C 10 12.2 5.5
T -5 23.2 0 My text label
Junk
"""
val expected = List(
Line(55.7f, 65.3f, -50f, -60.3f),
Circle(10f, 12.2f, 5.5f),
Text(-5f, 23.2f, 0, "My text label"),
Unknown("Junk")
)
Serializer.parse(file) shouldEqual expected
}
}
}
| joescii/fp-renaissance | src/test/scala/com/joescii/ParseSpec.scala | Scala | apache-2.0 | 2,905 |
package autosteamgifts.contentscript.giveaway
import autosteamgifts.GameRatingDisplay
import autosteamgifts.background.options.Options
import autosteamgifts.contentscript.UserActions
import autosteamgifts.Implicits._
import org.scalajs.dom.document
import org.scalajs.dom.raw.{HTMLElement, MouseEvent}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.scalajs.js.annotation.{JSExport, JSExportTopLevel}
@JSExportTopLevel("autosteamgifts.contentscript.Giveaway")
object Main {
private lazy val giveaway = new Giveaway(document)
@JSExport
def main(): Unit = {
for (options <- Options.getOrDefault()) {
if (options.autoEnterGiveaways) giveaway.addAutoEnterToggle()
if (options.repositionIcons) repositionIcons()
if (options.includeRating) includeRating()
}
setAutoEnterHandlers()
}
private def repositionIcons(): Unit = {
val icons = giveaway.headingElement.queryAll[HTMLElement]("a")
val parent = icons.head.parentNode
val insertionPoint = parent.firstChild
for (icon <- icons) {
parent.insertBefore(icon, insertionPoint)
}
}
private def includeRating(): Unit = {
giveaway.game.rating
.map { GameRatingDisplay }
.recover { case _ => GameRatingDisplay.Unknown }
.foreach { giveaway.displayRating }
}
private def setAutoEnterHandlers(): Unit = {
for (enterButton <- giveaway.enterButton) {
setAutoEnterClickListener(enterButton, status = true)
}
for (leaveButton <- giveaway.leaveButton) {
setAutoEnterClickListener(leaveButton, status = false)
}
}
private def setAutoEnterClickListener(element: HTMLElement, status: Boolean): Unit = {
element.style.setProperty("user-select", "none")
element.addEventListener("click", { ev: MouseEvent =>
UserActions.enterButtonClickListener(giveaway.game, status, ev) {
for (toggle <- giveaway.autoEnterToggle) {
toggle.update(status)
}
}
})
}
}
| erdavila/auto-steamgifts | src/main/scala/autosteamgifts/contentscript/giveaway/Main.scala | Scala | mit | 1,988 |
import scala.reflect.macros.Universe
def validateSyntax(u: Universe)(tree: u.Tree): (Boolean, List[u.Tree]) = {
import u._
def isLambdaParamDef(valDef: ValDef) =
valDef.mods.hasFlag(Flag.PARAM) && valDef.rhs == EmptyTree
tree match {
case _: Block | _: Select | _: Apply | _: TypeApply | _: Ident |
_: If | _: Literal | _: New | _: This | _: Typed | _: TypTree =>
(true, tree.children)
case Function(valDefs, body) if valDefs.forall(isLambdaParamDef) =>
(true, body :: valDefs.map(_.tpt))
case _ => (false, tree.children)
}
}
| AVSystem/scex | scex-java-test/src/main/resources/syntaxValidator.scala | Scala | mit | 574 |
package pl.touk.nussknacker.engine.avro.schemaregistry
import cats.data.Validated
import pl.touk.nussknacker.engine.avro.{AvroSchemaDeterminer, RuntimeSchemaData, SchemaDeterminerError}
class BasedOnVersionAvroSchemaDeterminer(schemaRegistryClient: SchemaRegistryClient,
topic: String,
versionOption: SchemaVersionOption,
isKey: Boolean) extends AvroSchemaDeterminer {
override def determineSchemaUsedInTyping: Validated[SchemaDeterminerError, RuntimeSchemaData] = {
val version = versionOption match {
case ExistingSchemaVersion(v) => Some(v)
case LatestSchemaVersion => None
}
schemaRegistryClient
.getFreshSchema(topic, version, isKey = isKey)
.leftMap(err => new SchemaDeterminerError(s"Fetching schema error for topic: $topic, version: $versionOption", err))
.map(withMetadata => RuntimeSchemaData(withMetadata.schema, Some(withMetadata.id)))
}
}
| TouK/nussknacker | utils/avro-components-utils/src/main/scala/pl/touk/nussknacker/engine/avro/schemaregistry/BasedOnVersionAvroSchemaDeterminer.scala | Scala | apache-2.0 | 1,026 |
package skinny.util
import scala.util.parsing.combinator.RegexParsers
/**
* LTSV Utility.
*
* http://ltsv.org/
*/
object LTSV {
def parseLine(line: String, lenient: Boolean = false): Map[String, String] = {
LTSVParser.parse(line, lenient).head
}
def parseLines(lines: String, lenient: Boolean = false): List[Map[String, String]] = {
LTSVParser.parse(lines, lenient)
}
def dump(value: Map[String, String]): String = dump(value.toSeq: _*)
def dump(value: (String, String)*): String = value.map { case (k, v) => k + ":" + v }.mkString("\\t")
def dump(values: List[Map[String, String]]): List[String] = values.map(dump)
}
/**
* LTSV Parser configuration.
*
* @param lenient Allow a wider range of characters in field values than the LTSV spec
*/
case class LTSVParserConfig(lenient: Boolean = false)
/**
* LTSV Parser.
*/
class LTSVParser(config: LTSVParserConfig) extends RegexParsers {
/*
;; ABNF <https://tools.ietf.org/html/rfc5234>
ltsv = *(record NL) [record]
record = [field *(TAB field)]
field = label ":" field-value
label = 1*lbyte
field-value = *fbyte
TAB = %x09
NL = [%x0D] %x0A
lbyte = %x30-39 / %x41-5A / %x61-7A / "_" / "." / "-" ;; [0-9A-Za-z_.-]
fbyte = %x01-08 / %x0B / %x0C / %x0E-FF
*/
override def skipWhitespace = false
def ltsv = repsep(record, nl)
def record = repsep(field, tab) ^^ { _.toMap }
def field = label ~ ":" ~ fieldValue ^^ { case k ~ ":" ~ v => (k, v) }
def label = {
if (config.lenient) """[^\\t\\r\\n:]*""".r
else "[0-9A-Za-z_\\\\.-]+".r
}
def fieldValue = {
if (config.lenient) """[^\\t\\r\\n]*""".r
else """[\\u000B\\u000C\\u0001-\\u0008\\u000E-\\u00FF]*""".r
}
def tab = '\\t'
def nl = opt('\\r') <~ '\\n'
def parse(input: String): List[Map[String, String]] = parseAll(ltsv, input).getOrElse {
throw new IllegalArgumentException(
"Failed to parse ltsv: " +
(if (input.size > 1000) "\\"" + input.take(1000) + "..." + "\\"" else "\\"" + input + "\\"")
)
}
}
object LTSVParser {
def parse(input: String, lenient: Boolean = false): List[Map[String, String]] = {
new LTSVParser(LTSVParserConfig(lenient)).parse(input)
}
}
| seratch/skinny-framework | common/src/main/scala/skinny/util/LTSV.scala | Scala | mit | 2,171 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package util
import scala.collection.immutable.Seq
trait StringUtil {
def oxford(vs: Seq[String], conj: String): String = {
vs match {
case Seq() => ""
case Seq(a) => a
case Seq(a, b) => s"$a $conj $b"
case xs => xs.dropRight(1).mkString(", ") + s", $conj " + xs.last
}
}
}
object StringUtil extends StringUtil
| scala/scala | src/compiler/scala/tools/nsc/util/StringUtil.scala | Scala | apache-2.0 | 678 |
/*
* Copyright 2014 okumin.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akka.persistence.snapshot.sqlasync
import akka.persistence._
import akka.persistence.common.StoragePlugin
import akka.persistence.serialization.Snapshot
import akka.persistence.snapshot.SnapshotStore
import scala.concurrent.Future
import scalikejdbc._
import scalikejdbc.async._
import scalikejdbc.interpolation.SQLSyntax
private[persistence] trait ScalikeJDBCSnapshotStore extends SnapshotStore with StoragePlugin {
protected[this] lazy val snapshotTable = {
val tableName = extension.config.snapshotTableName
SQLSyntaxSupportFeature.verifyTableName(tableName)
SQLSyntax.createUnsafely(tableName)
}
override def loadAsync(persistenceId: String,
criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {
log.debug("Load a snapshot, persistenceId = {}, criteria = {}", persistenceId, criteria)
sessionProvider.localTx { implicit session =>
val SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, minSequenceNr, minTimestamp) =
criteria
for {
key <- surrogateKeyOf(persistenceId)
sql = sql"SELECT * FROM $snapshotTable WHERE persistence_key = $key AND sequence_nr >= $minSequenceNr AND sequence_nr <= $maxSequenceNr AND created_at >= $minTimestamp AND created_at <= $maxTimestamp ORDER BY sequence_nr DESC LIMIT 1"
snapshot <- logging(sql)
.map { result =>
val Snapshot(snapshot) =
serialization.deserialize(result.bytes("snapshot"), classOf[Snapshot]).get
SelectedSnapshot(
SnapshotMetadata(persistenceId,
result.long("sequence_nr"),
result.long("created_at")),
snapshot
)
}
.single()
.future()
} yield snapshot
}
}
protected[this] def upsert(persistenceId: String,
sequenceNr: Long,
timestamp: Long,
snapshot: Array[Byte]): Future[Unit]
override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = {
log.debug("Save the snapshot, metadata = {}, snapshot = {}", metadata, snapshot)
val SnapshotMetadata(persistenceId, sequenceNr, timestamp) = metadata
val bytes = serialization.serialize(Snapshot(snapshot)).get
upsert(persistenceId, sequenceNr, timestamp, bytes)
}
override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = {
log.debug("Delete the snapshot, {}", metadata)
val SnapshotMetadata(persistenceId, sequenceNr, _) = metadata
sessionProvider.localTx { implicit session =>
for {
key <- surrogateKeyOf(persistenceId)
// Ignores the timestamp since the target is specified by the sequence_nr.
sql = sql"DELETE FROM $snapshotTable WHERE persistence_key = $key AND sequence_nr = $sequenceNr"
_ <- logging(sql).update().future()
} yield ()
}
}
override def deleteAsync(persistenceId: String,
criteria: SnapshotSelectionCriteria): Future[Unit] = {
log.debug("Delete the snapshot for {}, criteria = {}", persistenceId, criteria)
val SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, minSequenceNr, minTimestamp) =
criteria
sessionProvider.localTx { implicit session =>
for {
key <- surrogateKeyOf(persistenceId)
sql = sql"DELETE FROM $snapshotTable WHERE persistence_key = $key AND sequence_nr <= $maxSequenceNr AND sequence_nr >= $minSequenceNr AND created_at <= $maxTimestamp AND created_at >= $minTimestamp"
_ <- logging(sql).update().future()
} yield ()
}
}
}
| okumin/akka-persistence-sql-async | core/src/main/scala/akka/persistence/snapshot/sqlasync/ScalikeJDBCSnapshotStore.scala | Scala | apache-2.0 | 4,280 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import java.util
import scala.collection.JavaConverters._
import java.util.concurrent.atomic.AtomicLong
import org.apache.kafka.clients.consumer.{ConsumerRebalanceListener, KafkaConsumer}
import org.apache.kafka.common.serialization.ByteArrayDeserializer
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.common.{Metric, MetricName, TopicPartition}
import kafka.utils.{CommandLineUtils, ToolsUtils}
import java.util.{Collections, Properties, Random}
import java.text.SimpleDateFormat
import com.typesafe.scalalogging.LazyLogging
import scala.collection.mutable
/**
* Performance test for the full zookeeper consumer
*/
object ConsumerPerformance extends LazyLogging {
def main(args: Array[String]): Unit = {
val config = new ConsumerPerfConfig(args)
logger.info("Starting consumer...")
val totalMessagesRead = new AtomicLong(0)
val totalBytesRead = new AtomicLong(0)
var metrics: mutable.Map[MetricName, _ <: Metric] = null
val joinGroupTimeInMs = new AtomicLong(0)
if (!config.hideHeader)
printHeader(config.showDetailedStats)
var startMs, endMs = 0L
val consumer = new KafkaConsumer[Array[Byte], Array[Byte]](config.props)
consumer.subscribe(Collections.singletonList(config.topic))
startMs = System.currentTimeMillis
consume(consumer, List(config.topic), config.numMessages, config.recordFetchTimeoutMs, config, totalMessagesRead, totalBytesRead, joinGroupTimeInMs, startMs)
endMs = System.currentTimeMillis
if (config.printMetrics) {
metrics = consumer.metrics.asScala
}
consumer.close()
val elapsedSecs = (endMs - startMs) / 1000.0
val fetchTimeInMs = (endMs - startMs) - joinGroupTimeInMs.get
if (!config.showDetailedStats) {
val totalMBRead = (totalBytesRead.get * 1.0) / (1024 * 1024)
println("%s, %s, %.4f, %.4f, %d, %.4f, %d, %d, %.4f, %.4f".format(
config.dateFormat.format(startMs),
config.dateFormat.format(endMs),
totalMBRead,
totalMBRead / elapsedSecs,
totalMessagesRead.get,
totalMessagesRead.get / elapsedSecs,
joinGroupTimeInMs.get,
fetchTimeInMs,
totalMBRead / (fetchTimeInMs / 1000.0),
totalMessagesRead.get / (fetchTimeInMs / 1000.0)
))
}
if (metrics != null) {
ToolsUtils.printMetrics(metrics)
}
}
private[tools] def printHeader(showDetailedStats: Boolean): Unit = {
val newFieldsInHeader = ", rebalance.time.ms, fetch.time.ms, fetch.MB.sec, fetch.nMsg.sec"
if (!showDetailedStats)
println("start.time, end.time, data.consumed.in.MB, MB.sec, data.consumed.in.nMsg, nMsg.sec" + newFieldsInHeader)
else
println("time, threadId, data.consumed.in.MB, MB.sec, data.consumed.in.nMsg, nMsg.sec" + newFieldsInHeader)
}
def consume(consumer: KafkaConsumer[Array[Byte], Array[Byte]],
topics: List[String],
count: Long,
timeout: Long,
config: ConsumerPerfConfig,
totalMessagesRead: AtomicLong,
totalBytesRead: AtomicLong,
joinTime: AtomicLong,
testStartTime: Long) {
var bytesRead = 0L
var messagesRead = 0L
var lastBytesRead = 0L
var lastMessagesRead = 0L
var joinStart = 0L
var joinTimeMsInSingleRound = 0L
consumer.subscribe(topics.asJava, new ConsumerRebalanceListener {
def onPartitionsAssigned(partitions: util.Collection[TopicPartition]) {
joinTime.addAndGet(System.currentTimeMillis - joinStart)
joinTimeMsInSingleRound += System.currentTimeMillis - joinStart
}
def onPartitionsRevoked(partitions: util.Collection[TopicPartition]) {
joinStart = System.currentTimeMillis
}})
// Now start the benchmark
val startMs = System.currentTimeMillis
var lastReportTime: Long = startMs
var lastConsumedTime = System.currentTimeMillis
var currentTimeMillis = lastConsumedTime
while (messagesRead < count && currentTimeMillis - lastConsumedTime <= timeout) {
val records = consumer.poll(100).asScala
currentTimeMillis = System.currentTimeMillis
if (records.nonEmpty)
lastConsumedTime = currentTimeMillis
for (record <- records) {
messagesRead += 1
if (record.key != null)
bytesRead += record.key.size
if (record.value != null)
bytesRead += record.value.size
if (currentTimeMillis - lastReportTime >= config.reportingInterval) {
if (config.showDetailedStats)
printConsumerProgress(0, bytesRead, lastBytesRead, messagesRead, lastMessagesRead,
lastReportTime, currentTimeMillis, config.dateFormat, joinTimeMsInSingleRound)
joinTimeMsInSingleRound = 0L
lastReportTime = currentTimeMillis
lastMessagesRead = messagesRead
lastBytesRead = bytesRead
}
}
}
if (messagesRead < count)
println(s"WARNING: Exiting before consuming the expected number of messages: timeout ($timeout ms) exceeded. " +
"You can use the --timeout option to increase the timeout.")
totalMessagesRead.set(messagesRead)
totalBytesRead.set(bytesRead)
}
def printConsumerProgress(id: Int,
bytesRead: Long,
lastBytesRead: Long,
messagesRead: Long,
lastMessagesRead: Long,
startMs: Long,
endMs: Long,
dateFormat: SimpleDateFormat,
periodicJoinTimeInMs: Long): Unit = {
printBasicProgress(id, bytesRead, lastBytesRead, messagesRead, lastMessagesRead, startMs, endMs, dateFormat)
printExtendedProgress(bytesRead, lastBytesRead, messagesRead, lastMessagesRead, startMs, endMs, periodicJoinTimeInMs)
println()
}
private def printBasicProgress(id: Int,
bytesRead: Long,
lastBytesRead: Long,
messagesRead: Long,
lastMessagesRead: Long,
startMs: Long,
endMs: Long,
dateFormat: SimpleDateFormat): Unit = {
val elapsedMs: Double = endMs - startMs
val totalMbRead = (bytesRead * 1.0) / (1024 * 1024)
val intervalMbRead = ((bytesRead - lastBytesRead) * 1.0) / (1024 * 1024)
val intervalMbPerSec = 1000.0 * intervalMbRead / elapsedMs
val intervalMessagesPerSec = ((messagesRead - lastMessagesRead) / elapsedMs) * 1000.0
print("%s, %d, %.4f, %.4f, %d, %.4f".format(dateFormat.format(endMs), id, totalMbRead,
intervalMbPerSec, messagesRead, intervalMessagesPerSec))
}
private def printExtendedProgress(bytesRead: Long,
lastBytesRead: Long,
messagesRead: Long,
lastMessagesRead: Long,
startMs: Long,
endMs: Long,
periodicJoinTimeInMs: Long): Unit = {
val fetchTimeMs = endMs - startMs - periodicJoinTimeInMs
val intervalMbRead = ((bytesRead - lastBytesRead) * 1.0) / (1024 * 1024)
val intervalMessagesRead = messagesRead - lastMessagesRead
val (intervalMbPerSec, intervalMessagesPerSec) = if (fetchTimeMs <= 0)
(0.0, 0.0)
else
(1000.0 * intervalMbRead / fetchTimeMs, 1000.0 * intervalMessagesRead / fetchTimeMs)
print(", %d, %d, %.4f, %.4f".format(periodicJoinTimeInMs, fetchTimeMs, intervalMbPerSec, intervalMessagesPerSec))
}
class ConsumerPerfConfig(args: Array[String]) extends PerfConfig(args) {
val bootstrapServersOpt = parser.accepts("broker-list", "REQUIRED: The server(s) to connect to.")
.withRequiredArg()
.describedAs("host")
.ofType(classOf[String])
val topicOpt = parser.accepts("topic", "REQUIRED: The topic to consume from.")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val groupIdOpt = parser.accepts("group", "The group id to consume on.")
.withRequiredArg
.describedAs("gid")
.defaultsTo("perf-consumer-" + new Random().nextInt(100000))
.ofType(classOf[String])
val fetchSizeOpt = parser.accepts("fetch-size", "The amount of data to fetch in a single request.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1024 * 1024)
val resetBeginningOffsetOpt = parser.accepts("from-latest", "If the consumer does not already have an established " +
"offset to consume from, start with the latest message present in the log rather than the earliest message.")
val socketBufferSizeOpt = parser.accepts("socket-buffer-size", "The size of the tcp RECV size.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(2 * 1024 * 1024)
val numThreadsOpt = parser.accepts("threads", "Number of processing threads.")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Integer])
.defaultsTo(10)
val numFetchersOpt = parser.accepts("num-fetch-threads", "Number of fetcher threads.")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val consumerConfigOpt = parser.accepts("consumer.config", "Consumer config properties file.")
.withRequiredArg
.describedAs("config file")
.ofType(classOf[String])
val printMetricsOpt = parser.accepts("print-metrics", "Print out the metrics.")
val showDetailedStatsOpt = parser.accepts("show-detailed-stats", "If set, stats are reported for each reporting " +
"interval as configured by reporting-interval")
val recordFetchTimeoutOpt = parser.accepts("timeout", "The maximum allowed time in milliseconds between returned records.")
.withOptionalArg()
.describedAs("milliseconds")
.ofType(classOf[Long])
.defaultsTo(10000)
val options = parser.parse(args: _*)
CommandLineUtils.checkRequiredArgs(parser, options, topicOpt, numMessagesOpt, bootstrapServersOpt)
val printMetrics = options.has(printMetricsOpt)
val props = if (options.has(consumerConfigOpt))
Utils.loadProps(options.valueOf(consumerConfigOpt))
else
new Properties
import org.apache.kafka.clients.consumer.ConsumerConfig
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, options.valueOf(bootstrapServersOpt))
props.put(ConsumerConfig.GROUP_ID_CONFIG, options.valueOf(groupIdOpt))
props.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, options.valueOf(socketBufferSizeOpt).toString)
props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, options.valueOf(fetchSizeOpt).toString)
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, if (options.has(resetBeginningOffsetOpt)) "latest" else "earliest")
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer])
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer])
props.put(ConsumerConfig.CHECK_CRCS_CONFIG, "false")
val numThreads = options.valueOf(numThreadsOpt).intValue
val topic = options.valueOf(topicOpt)
val numMessages = options.valueOf(numMessagesOpt).longValue
val reportingInterval = options.valueOf(reportingIntervalOpt).intValue
if (reportingInterval <= 0)
throw new IllegalArgumentException("Reporting interval must be greater than 0.")
val showDetailedStats = options.has(showDetailedStatsOpt)
val dateFormat = new SimpleDateFormat(options.valueOf(dateFormatOpt))
val hideHeader = options.has(hideHeaderOpt)
val recordFetchTimeoutMs = options.valueOf(recordFetchTimeoutOpt).longValue()
}
}
| richhaase/kafka | core/src/main/scala/kafka/tools/ConsumerPerformance.scala | Scala | apache-2.0 | 12,788 |
package com.twitter.util.mock
import org.mockito.{ArgumentMatchersSugar, IdiomaticMockito}
/**
* Helper for Mockito Scala sugar with [[https://github.com/mockito/mockito-scala#idiomatic-mockito idiomatic stubbing]].
* Java users are encouraged to use `org.mockito.Mockito` directly.
*
* Note that the Specs2 `smartMock[]` or `mock[].smart` is the default behavior
* for [[https://github.com/mockito/mockito-scala Mockito Scala]].
*
* =Usage=
*
* This trait uses `org.mockito.IdiomaticMockito` which is heavily influenced by ScalaTest Matchers.
*
* To use, mix in the [[com.twitter.util.mock.Mockito]] trait where desired.
*
* ==Create a new mock==
*
* {{{
*
* trait Foo {
* def bar: String
* def bar(v: Int): Int
* }
*
* class MyTest extends AnyFunSuite with Mockito {
* val aMock = mock[Foo]
* }
* }}}
*
* ==Expect behavior==
*
* {{{
* // "when" equivalents
* aMock.bar returns "mocked!"
* aMock.bar returns "mocked!" andThen "mocked again!"
* aMock.bar shouldCall realMethod
* aMock.bar.shouldThrow[IllegalArgumentException]
* aMock.bar throws new IllegalArgumentException
* aMock.bar answers "mocked!"
* aMock.bar(*) answers ((i: Int) => i * 10)
*
* // "do-when" equivalents
* "mocked!" willBe returned by aMock.bar
* "mocked!" willBe answered by aMock.bar
* ((i: Int) => i * 10) willBe answered by aMock.bar(*)
* theRealMethod willBe called by aMock.bar
* new IllegalArgumentException willBe thrown by aMock.bar
* aMock.bar.doesNothing() // doNothing().when(aMock).bar
*
* // verifications
* aMock wasNever called // verifyZeroInteractions(aMock)
* aMock.bar was called
* aMock.bar(*) was called // '*' is shorthand for 'any()' or 'any[T]'
* aMock.bar(any[Int]) was called // same as above but with typed input matcher
*
* aMock.bar wasCalled onlyHere
* aMock.bar wasNever called
*
* aMock.bar wasCalled twice
* aMock.bar wasCalled 2.times
*
* aMock.bar wasCalled fourTimes
* aMock.bar wasCalled 4.times
*
* aMock.bar wasCalled atLeastFiveTimes
* aMock.bar wasCalled atLeast(fiveTimes)
* aMock.bar wasCalled atLeast(5.times)
*
* aMock.bar wasCalled atMostSixTimes
* aMock.bar wasCalled atMost(sixTimes)
* aMock.bar wasCalled atMost(6.times)
*
* aMock.bar wasCalled (atLeastSixTimes within 2.seconds) // verify(aMock, timeout(2000).atLeast(6)).bar
*
* aMock wasNever calledAgain // verifyNoMoreInteractions(aMock)
*
* InOrder(mock1, mock2) { implicit order =>
* mock2.someMethod() was called
* mock1.anotherMethod() was called
* }
* }}}
*
* Note the 'dead code' warning that can happen when using 'any' or '*'
* [[https://github.com/mockito/mockito-scala#dead-code-warning matchers]].
*
* ==Mixing and matching matchers==
*
* Using the idiomatic syntax also allows for mixing argument matchers with real values. E.g., you
* are no longer forced to use argument matchers for all parameters as soon as you use one. E.g.,
*
* {{{
* trait Foo {
* def bar(v: Int, v2: Int, v3: Int = 42): Int
* }
*
* class MyTest extends AnyFunSuite with Mockito {
* val aMock = mock[Foo]
*
* aMock.bar(1, 2) returns "mocked!"
* aMock.bar(1, *) returns "mocked!"
* aMock.bar(1, any[Int]) returns "mocked!"
* aMock.bar(*, *) returns "mocked!"
* aMock.bar(any[Int], any[Int]) returns "mocked!"
* aMock.bar(*, *, 3) returns "mocked!"
* aMock.bar(any[Int], any[Int], 3) returns "mocked!"
*
* "mocked!" willBe returned by aMock.bar(1, 2)
* "mocked!" willBe returned by aMock.bar(1, *)
* "mocked!" willBe returned by aMock.bar(1, any[Int])
* "mocked!" willBe returned by aMock.bar(*, *)
* "mocked!" willBe returned by aMock.bar(any[Int], any[Int])
* "mocked!" willBe returned by aMock.bar(*, *, 3)
* "mocked!" willBe returned by aMock.bar(any[Int], any[Int], 3)
*
* aMock.bar(1, 2) was called
* aMock.bar(1, *) was called
* aMock.bar(1, any[Int]) was called
* aMock.bar(*, *) was called
* aMock.bar(any[Int], any[Int]) was called
* aMock.bar(*, *, 3) was called
* aMock.bar(any[Int], any[Int], 3) was called
* }
* }}}
*
* See [[https://github.com/mockito/mockito-scala#mixing-normal-values-with-argument-matchers Mix-and-Match]]
* for more information including a caveat around curried functions with default arguments.
*
* ==Numeric Matchers==
*
* Numeric comparisons are possible for argument matching, e.g.,
*
* {{{
* aMock.method(5)
*
* aMock.method(n > 4.99) was called
* aMock.method(n >= 5) was called
* aMock.method(n < 5.1) was called
* aMock.method(n <= 5) was called
* }}}
*
* See [[https://github.com/mockito/mockito-scala#numeric-matchers Numeric Matchers]].
*
* ==Vargargs==
*
* Most matches will deal with varargs out of the box, just note when using the 'eqTo' matcher to
* apply it to all the arguments as one (not individually).
*
* See [[https://github.com/mockito/mockito-scala#varargs Varargs]].
*
* ==More Information==
*
* See the [[https://github.com/mockito/mockito-scala#idiomatic-mockito IdiomaticMockito]] documentation
* for more specific information and the [[https://github.com/mockito/mockito-scala Mockito Scala]]
* [[https://github.com/mockito/mockito-scala#getting-started Getting Started]] documentation for general
* information.
*
* see `org.mockito.IdiomaticMockito`
* see `org.mockito.ArgumentMatchersSugar`
*/
trait Mockito extends IdiomaticMockito with ArgumentMatchersSugar
/**
* Simple object to allow the usage of the trait without mixing it in
*/
object Mockito extends Mockito
| twitter/util | util-mock/src/main/scala/com/twitter/util/mock/Mockito.scala | Scala | apache-2.0 | 5,676 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.