code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
// Jubatus: Online machine learning framework for distributed environment
// Copyright (C) 2014-2015 Preferred Networks and Nippon Telegraph and Telephone Corporation.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License version 2.1 as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
package us.jubat.jubaql_server.processor
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.sql.catalyst.types.{IntegerType, StructField, StructType}
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.streaming.dstream.{ConstantInputDStream, DStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.scalatest._
import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.future
class SchemaDStreamSpec extends FlatSpec
with ShouldMatchers
with BeforeAndAfter
with BeforeAndAfterAll {
val sc = new SparkContext("local[3]", "SlidingWindow")
val dummyData = sc.parallelize(
"""{"gender":"m","age":21,"jubaql_timestamp":"2014-11-21T15:52:21.943321112"}""" ::
"""{"gender":"f","age":22,"jubaql_timestamp":"2014-11-21T15:52:22"}""" ::
"""{"gender":"f","age":23,"jubaql_timestamp":"2014-11-21T15:52:23.123"}""" ::
Nil)
var ssc: StreamingContext = null
var sqlc: SQLContext = null
before {
ssc = new StreamingContext(sc, Seconds(1))
sqlc = new SQLContext(sc)
}
"fromStringStream" should "create a queryable stream" in {
val rawStream: DStream[String] = new ConstantInputDStream(ssc, dummyData)
SchemaDStream.fromStringStream(sqlc, rawStream, Some("hoge"))
val collectedData = mutable.ListBuffer[Array[Row]]()
rawStream.foreachRDD(_ => {
val data = sqlc.sql("SELECT age FROM hoge").collect()
collectedData += data
})
waitUntilProcessingEnds(rawStream, 1)
collectedData.size should be > (0)
val firstBatch = collectedData(0)
firstBatch.size shouldBe 3
firstBatch(0).getInt(0) shouldBe 21
firstBatch(1).getInt(0) shouldBe 22
firstBatch(2).getInt(0) shouldBe 23
}
"fromStringStreamWithSchema" should "create a queryable stream" in {
val rawStream: DStream[String] = new ConstantInputDStream(ssc, dummyData)
val schema = StructType(StructField("age", IntegerType, nullable = false) :: Nil)
SchemaDStream.fromStringStreamWithSchema(sqlc, rawStream, schema, Some("hoge"))
val collectedData = mutable.ListBuffer[Array[Row]]()
rawStream.foreachRDD(_ => {
val data = sqlc.sql("SELECT age FROM hoge").collect()
collectedData += data
})
waitUntilProcessingEnds(rawStream, 1)
collectedData.size should be > (0)
val firstBatch = collectedData(0)
firstBatch.size shouldBe 3
firstBatch(0).getInt(0) shouldBe 21
firstBatch(1).getInt(0) shouldBe 22
firstBatch(2).getInt(0) shouldBe 23
}
"fromRDDTransformation" should "transform registered RDDs" in {
val rawStream: DStream[String] = new ConstantInputDStream(ssc, dummyData)
SchemaDStream.fromStringStream(sqlc, rawStream, Some("hoge"))
val modified = SchemaDStream.fromRDDTransformation(ssc, sqlc, "hoge", rdd => {
val newSchema = StructType(StructField("len", IntegerType, nullable = false) :: Nil)
val newRdd = rdd.map(row => {
Row(row.getString(2).size)
})
sqlc.applySchema(newRdd, newSchema)
}, Some("foo"))
val collectedData = mutable.ListBuffer[Array[Row]]()
rawStream.foreachRDD(_ => {
val data = sqlc.sql("SELECT len FROM foo").collect()
collectedData += data
})
waitUntilProcessingEnds(rawStream, 1)
collectedData.size should be > (0)
val firstBatch = collectedData(0)
firstBatch.size shouldBe 3
firstBatch(0).getInt(0) shouldBe 29
firstBatch(1).getInt(0) shouldBe 19
firstBatch(2).getInt(0) shouldBe 23
}
"fromTableName" should "return registered streams" in {
val rawStream: DStream[String] = new ConstantInputDStream(ssc, dummyData)
SchemaDStream.fromStringStream(sqlc, rawStream, Some("hoge"))
val stream = SchemaDStream.fromTableName(ssc, sqlc, "hoge")
val collectedData = mutable.ListBuffer[Array[Row]]()
stream.foreachRDD(rdd => {
collectedData += rdd.collect()
})
waitUntilProcessingEnds(rawStream, 1)
collectedData.size should be > (0)
val firstBatch = collectedData(0)
firstBatch.size shouldBe 3
firstBatch(0).getInt(0) shouldBe 21
firstBatch(1).getInt(0) shouldBe 22
firstBatch(2).getInt(0) shouldBe 23
}
"fromSQL" should "find registered streams and register query outputs" in {
val rawStream: DStream[String] = new ConstantInputDStream(ssc, dummyData)
SchemaDStream.fromStringStream(sqlc, rawStream, Some("hoge"))
SchemaDStream.fromSQL(ssc, sqlc,
"SELECT jubaql_timestamp, age AS bar FROM hoge", Some("foo"))
val collectedData = mutable.ListBuffer[Array[Row]]()
rawStream.foreachRDD(_ => {
val data = sqlc.sql("SELECT bar FROM foo").collect()
collectedData += data
})
waitUntilProcessingEnds(rawStream, 1)
collectedData.size should be > (0)
val firstBatch = collectedData(0)
firstBatch.size shouldBe 3
firstBatch(0).getInt(0) shouldBe 21
firstBatch(1).getInt(0) shouldBe 22
firstBatch(2).getInt(0) shouldBe 23
}
override def afterAll(): Unit = {
println("stopping SparkContext")
sc.stop()
super.afterAll()
}
protected def waitUntilProcessingEnds(stream: DStream[_], numIterations: Int) = {
// count up in every interval
val i = sc.accumulator(0)
stream.foreachRDD(rdd => i += 1)
// start processing
ssc.start()
// stop streaming context when i has become numIterations
future {
while (i.value < numIterations + 1)
Thread.sleep(100)
ssc.stop(stopSparkContext = false, stopGracefully = true)
}
// wait for termination
ssc.awaitTermination()
}
}
| jubatus/jubaql-server | processor/src/test/scala/us/jubat/jubaql_server/processor/SchemaDStreamSpec.scala | Scala | lgpl-2.1 | 6,502 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
import minitest.TestSuite
import monix.execution.Ack.{Continue, Stop}
import monix.execution.exceptions.DummyException
import monix.execution.schedulers.TestScheduler
import monix.execution.internal.Platform
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success, Try}
object AckSuite extends TestSuite[TestScheduler] {
def setup() = TestScheduler()
def tearDown(env: TestScheduler): Unit = {
assert(env.state.tasks.isEmpty, "should not have tasks left to execute")
}
val stackSafeLoopN: Int = if (Platform.isJVM) 100000 else 5000
test("Continue defaults") { _ =>
assert(Continue.isCompleted, "Continue.isCompleted")
assertEquals(Continue.value, Some(Success(Continue)))
}
test("Stop defaults") { _ =>
assert(Stop.isCompleted, "Stop.isCompleted")
assertEquals(Stop.value, Some(Success(Stop)))
}
test("syncOnContinue(Continue) should execute synchronously #1") { implicit s =>
var triggered = false
Continue.syncOnContinue { triggered = true }
assert(triggered, "triggered")
}
test("syncOnContinue(Continue) should execute synchronously #2") { implicit s =>
val continue: Future[Ack] = Continue
var triggered = false
val trigger: () => Unit = {
val value = true
() => triggered = value
}
continue.syncOnContinue(trigger())
assert(triggered, "triggered")
}
test("syncOnContinue(Future.successful(Continue)) should execute trampolined") { implicit s =>
def loop(source: Future[Ack], n: Int): Future[Ack] =
source.syncOnContinue {
if (n > 0) { loop(source, n - 1); () }
}
var triggered = false
val continue: Future[Ack] = Future.successful(Continue)
loop(continue, stackSafeLoopN).syncOnContinue { triggered = true }
assert(triggered, "triggered")
}
test("syncOnContinue(Future(Continue)) should execute async") { implicit s =>
var triggered = false
val continue: Future[Ack] = Future(Continue)
continue.syncOnContinue { triggered = true }
assert(!triggered, "!triggered")
s.tick()
assert(triggered, "triggered")
}
test("syncOnContinue(Stop) should execute synchronously #1") { implicit s =>
var triggered = false
(Stop: Future[Ack]).syncOnContinue { triggered = true }
assert(!triggered, "!triggered")
assert(s.state.tasks.isEmpty, "there should be no async task registered")
}
test("syncOnContinue(Stop) should execute synchronously #2") { implicit s =>
val cancel: Future[Ack] = Stop
var triggered = false
val trigger: () => Unit = {
val value = true
() => triggered = value
}
cancel.syncOnContinue(trigger())
assert(!triggered, "!triggered")
assert(s.state.tasks.isEmpty, "there should be no async task registered")
}
test("syncOnContinue(Future.successful(Stop)) should execute trampolined") { implicit s =>
var triggered = false
val stop = Future.successful(Stop: Ack)
stop.syncOnContinue { triggered = true }
assert(!triggered, "!triggered")
assert(s.state.tasks.isEmpty, "tasks.isEmpty")
}
test("syncOnContinue(Future(Stop)) should execute async") { implicit s =>
var triggered = false
Future(Stop: Ack).syncOnContinue { triggered = true }
assert(s.state.tasks.nonEmpty, "async tasks should be registered")
s.tick()
assert(!triggered, "!triggered")
}
test("syncOnContinue(Continue) should protect against user errors") { implicit s =>
val ex = new RuntimeException("dummy")
Continue.syncOnContinue { throw ex }
assertEquals(s.state.lastReportedError, ex)
}
test("syncOnContinue(Future.successful(Continue)) should protect against user errors") { implicit s =>
val ex = new RuntimeException("dummy")
Future.successful(Continue).syncOnContinue { throw ex }
assertEquals(s.state.lastReportedError, ex)
}
test("syncOnStopOrFailure(Stop) should execute synchronously") { implicit s =>
var triggered = false
Stop.syncOnStopOrFailure { ex =>
if (ex.isEmpty) triggered = true
}
assert(triggered, "triggered")
assert(s.state.tasks.isEmpty, "there should be no async tasks registered")
}
test("syncOnStopOrFailure(Future(Stop)) should execute asynchronously") { implicit s =>
var triggered = false
Future(Stop).syncOnStopOrFailure { ex =>
if (ex.isEmpty) triggered = true
}
assert(!triggered, "!triggered")
s.tick()
assert(triggered, "triggered")
assert(s.state.tasks.isEmpty, "there should be no async tasks registered")
}
test("syncOnStopOrFailure(Future.successful(Stop)) should execute trampolined") { implicit s =>
def loop(source: Future[Ack], n: Int): Future[Ack] =
source.syncOnStopOrFailure { _ =>
if (n > 0) { loop(source, n - 1); () }
}
var triggered = false
loop(Future.successful(Stop), stackSafeLoopN).syncOnStopOrFailure { ex =>
triggered = ex.isEmpty
}
assert(triggered, "triggered")
}
test("syncOnStopOrFailure(Continue) should execute synchronously") { implicit s =>
var triggered = false
(Continue: Ack).syncOnStopOrFailure { ex =>
if (ex.isEmpty) triggered = true
}
assert(!triggered, "!triggered")
assert(s.state.tasks.isEmpty, "there should be no async tasks registered")
}
test("syncOnStopOrFailure(Future.successful(Continue)) should execute trampolined") { implicit s =>
var triggered = false
(Future.successful(Continue): Future[Ack]).syncOnStopOrFailure { ex =>
triggered = ex.isEmpty
}
assert(!triggered, "!triggered")
assert(s.state.tasks.isEmpty, "there should be async tasks registered")
}
test("syncOnStopOrFailure(Future(Continue)) should execute asynchronously") { implicit s =>
var triggered = false
(Future(Continue): Future[Ack]).syncOnStopOrFailure { ex =>
if (ex.isEmpty) triggered = true
}
assert(s.state.tasks.nonEmpty, "there should be async tasks registered")
s.tick()
assert(!triggered, "!triggered")
assert(s.state.tasks.isEmpty, "tasks.isEmpty")
}
test("syncOnStopOrFailure(Future.failed(ex)) should execute trampolined") { implicit s =>
var triggered = false
val ex = new RuntimeException("dummy")
val ack: Future[Ack] = Future.failed(ex)
ack.syncOnStopOrFailure { p =>
triggered = p.fold(triggered)(_ == ex)
}
assertEquals(triggered, true)
}
test("syncOnStopOrFailure(Future(throw ex)) should execute") { implicit s =>
var triggered = false
val ex = new RuntimeException("dummy")
val ack: Future[Ack] = Future { throw ex }
ack.syncOnStopOrFailure { p =>
triggered = p.fold(triggered)(_ == ex)
}
assertEquals(triggered, false)
assert(s.state.tasks.nonEmpty, "there should be async tasks registered")
s.tick()
assertEquals(triggered, true)
}
test("syncOnStopOrFailure(Stop) should protect against user errors") { implicit s =>
val ex = new RuntimeException("dummy")
Stop.syncOnStopOrFailure { _ =>
throw ex
}
assertEquals(s.state.lastReportedError, ex)
}
test("syncOnStopOrFailure(Future(Stop)) should protect against user errors") { implicit s =>
val ex = new RuntimeException("dummy")
Future(Stop).syncOnStopOrFailure { _ =>
throw ex
}
s.tick()
assertEquals(s.state.lastReportedError, ex)
}
test("syncOnStopOrFailure(Future.failed(ex)) should protect against user errors") { implicit s =>
val ex = new RuntimeException("dummy")
val source = Future.failed[Ack](new RuntimeException("first"))
source.syncOnStopOrFailure { _ =>
throw ex
}
assertEquals(s.state.lastReportedError, ex)
}
test("syncOnStopOrFailure(Future(throw ex)) should protect against user errors") { implicit s =>
val ex = new RuntimeException("dummy")
val source = Future[Ack](throw new RuntimeException("first"))
source.syncOnStopOrFailure { _ =>
throw ex
}
s.tick()
assertEquals(s.state.lastReportedError, ex)
}
test("syncMap(Continue) should execute synchronously") { implicit s =>
var triggered = false
val source: Future[Ack] = Continue
val result = source.syncMap {
case Stop => Stop
case Continue =>
triggered = true
Stop
}
assertEquals(result, Stop)
assertEquals(triggered, true)
assert(s.state.tasks.isEmpty, "there should be no async tasks registered")
}
test("syncMap(Stop) should execute synchronously") { implicit s =>
var triggered = false
val source: Future[Ack] = Stop
val result = source.syncMap {
case Continue => Continue
case Stop =>
triggered = true
Continue
}
assertEquals(result, Continue)
assertEquals(triggered, true)
assert(s.state.tasks.isEmpty, "there should be no async tasks registered")
}
test("syncMap should protect against exceptions") { implicit s =>
val dummy = new RuntimeException("dummy")
val result = (Continue: Future[Ack]).syncMap { _ =>
throw dummy
}
assertEquals(result, Stop)
assertEquals(s.state.lastReportedError, dummy)
}
test("syncMap(Future.successful(Continue)) should execute trampolined") { implicit s =>
var triggered = false
val source: Future[Ack] = Future.successful(Continue)
val result = source.syncMap {
case Stop => Stop
case Continue =>
triggered = true
Stop
}
assert(s.state.tasks.isEmpty)
assertEquals(result.syncTryFlatten, Stop)
assertEquals(triggered, true)
}
test("syncMap(Future.successful(Stop)) should execute trampolined") { implicit s =>
var triggered = false
val source: Future[Ack] = Future.successful(Stop)
val result = source.syncMap {
case Continue => Continue
case Stop =>
triggered = true
Continue
}
assert(s.state.tasks.isEmpty)
assertEquals(result.syncTryFlatten, Continue)
assertEquals(triggered, true)
}
test("syncMap(Continue) with impure function") { implicit s =>
var triggered = false
val source: Future[Ack] = Continue
val fn: Ack => Ack = {
val value = true
(ack: Ack) =>
ack match {
case Stop => Stop
case Continue =>
triggered = value
Stop
}
}
val result = source.syncMap(fn)
assertEquals(triggered, true)
assertEquals(result, Stop)
}
test("syncMap(Future(Continue)) with impure function") { implicit s =>
var triggered = false
val source: Future[Ack] = Future(Continue)
val fn: Ack => Ack = {
val value = true
(ack: Ack) =>
ack match {
case Stop => Stop
case Continue =>
triggered = value
Stop
}
}
val result = source.syncMap(fn)
s.tick()
assertEquals(triggered, true)
assertEquals(result.syncTryFlatten, Stop)
}
test("syncFlatMap(Continue) should execute synchronously") { implicit s =>
var triggered = false
val source: Future[Ack] = Continue
val result = source.syncFlatMap {
case Stop => Stop
case Continue =>
triggered = true
Stop
}
assertEquals(result, Stop)
assertEquals(triggered, true)
assert(s.state.tasks.isEmpty, "there should be no async tasks registered")
}
test("syncFlatMap(Stop) should execute synchronously") { implicit s =>
var triggered = false
val source: Future[Ack] = Stop
val result = source.syncFlatMap {
case Continue => Continue
case Stop =>
triggered = true
Continue
}
assertEquals(result, Continue)
assertEquals(triggered, true)
assert(s.state.tasks.isEmpty, "there should be no async tasks registered")
}
test("syncFlatMap should protect against exceptions") { implicit s =>
val dummy = new RuntimeException("dummy")
val result = (Continue: Future[Ack]).syncFlatMap { _ =>
throw dummy
}
assertEquals(result, Stop)
assertEquals(s.state.lastReportedError, dummy)
}
test("syncFlatMap(Future.successful(Continue)) should execute trampolined") { implicit s =>
def loop(f: Future[Ack], n: Int): Future[Ack] =
if (n > 0) f.syncFlatMap(_ => loop(f, n - 1))
else f
var triggered = false
val source: Future[Ack] = Future.successful(Continue)
val result = loop(source, stackSafeLoopN).syncFlatMap {
case Stop => Stop
case Continue =>
triggered = true
Stop
}
assert(s.state.tasks.isEmpty)
assertEquals(result.syncTryFlatten, Stop)
assertEquals(triggered, true)
}
test("syncFlatMap(Future.successful(Stop)) should execute trampolined") { implicit s =>
def loop(f: Future[Ack], n: Int): Future[Ack] =
if (n > 0) f.syncFlatMap(_ => loop(f, n - 1))
else f
var triggered = false
val source: Future[Ack] = Future.successful(Stop)
val result = loop(source, stackSafeLoopN).syncFlatMap {
case Continue => Continue
case Stop =>
triggered = true
Continue
}
assert(s.state.tasks.isEmpty)
assertEquals(result.syncTryFlatten, Continue)
assertEquals(triggered, true)
}
test("syncFlatMap(Continue) with impure function") { implicit s =>
var triggered = false
val source: Future[Ack] = Continue
val fn: Ack => Ack = {
val value = true
(ack: Ack) =>
ack match {
case Stop => Stop
case Continue =>
triggered = value
Stop
}
}
val result = source.syncFlatMap(fn)
assertEquals(triggered, true)
assertEquals(result, Stop)
}
test("syncFlatMap(Future(Continue)) with impure function") { implicit s =>
var triggered = false
val source: Future[Ack] = Future(Continue)
val fn: Ack => Ack = {
val value = true
(ack: Ack) =>
ack match {
case Stop => Stop
case Continue =>
triggered = value
Stop
}
}
val result = source.syncFlatMap(fn)
s.tick()
assertEquals(triggered, true)
assertEquals(result.syncTryFlatten, Stop)
}
test("syncTryFlatten(Continue)") { implicit s =>
val f = Continue.syncTryFlatten
assertEquals(f, Continue)
}
test("syncTryFlatten(Stop)") { implicit s =>
val f = Stop.syncTryFlatten
assertEquals(f, Stop)
}
test("syncTryFlatten(Future.successful(Continue))") { implicit s =>
val f = Future.successful(Continue).syncTryFlatten
assertEquals(f, Continue)
}
test("syncTryFlatten(Future(Continue))") { implicit s =>
val source = Future(Continue)
val f = source.syncTryFlatten
assertEquals(f, source)
s.tick()
}
test("syncTryFlatten(Stop)") { implicit s =>
val f = Stop.syncTryFlatten
assertEquals(f, Stop)
}
test("syncTryFlatten(Future.successful(Stop))") { implicit s =>
val f = Future.successful(Stop).syncTryFlatten
assertEquals(f, Stop)
}
test("syncTryFlatten(Future(Stop))") { implicit s =>
val source = Future(Stop)
val f = source.syncTryFlatten
assertEquals(f, source)
s.tick()
}
test("isSynchronous(Future(Continue)) == false") { implicit s =>
val f: Future[Ack] = Future.successful(Continue)
assert(!f.isSynchronous)
}
test("isSynchronous(Continue) == true") { implicit s =>
val f: Future[Ack] = Continue
assert(f.isSynchronous)
}
test("isSynchronous(Future(Stop)) == false") { implicit s =>
val f: Future[Ack] = Future.successful(Stop)
assert(!f.isSynchronous)
}
test("isSynchronous(Stop) == true") { implicit s =>
val f: Future[Ack] = Stop
assert(f.isSynchronous)
}
test("isSynchronous(failure) == false") { implicit s =>
val f: Future[Ack] = Future.failed(new RuntimeException)
assert(!f.isSynchronous)
}
test("isSynchronous(impure Future(Continue)) == false") { implicit s =>
def f: Future[Ack] = Future.successful(Continue)
assert(!f.isSynchronous)
}
test("isSynchronous(impure Continue) == true") { implicit s =>
def f: Future[Ack] = Continue
assert(f.isSynchronous)
}
test("isSynchronous(impure Future(Stop)) == false") { implicit s =>
def f: Future[Ack] = Future.successful(Stop)
assert(!f.isSynchronous)
}
test("isSynchronous(impure Stop) == true") { implicit s =>
def f: Future[Ack] = Stop
assert(f.isSynchronous)
}
test("isSynchronous(impure failure) == false") { implicit s =>
def f: Future[Ack] = Future.failed(new RuntimeException)
assert(!f.isSynchronous)
}
test("continue.syncOnComplete(clean)") { implicit s =>
val ack: Ack = Continue
var triggered = false
ack.syncOnComplete {
case Success(Continue) => triggered = true
case _ => ()
}
assert(triggered, "should have been sync")
}
test("stop.syncOnComplete(clean)") { implicit s =>
val ack: Ack = Stop
var triggered = false
ack.syncOnComplete {
case Success(Stop) => triggered = true
case _ => ()
}
assert(triggered, "should have been sync")
}
test("continue.syncOnComplete(unclean)") { implicit s =>
val ack: Ack = Continue
var triggered = false
val fn = (x: Try[Ack]) =>
x match {
case Success(Continue) => triggered = true
case _ => ()
}
ack.syncOnComplete(fn)
assert(triggered, "should have been sync")
}
test("stop.syncOnComplete(unclean)") { implicit s =>
val ack: Ack = Stop
var triggered = false
val fn = (x: Try[Ack]) =>
x match {
case Success(Stop) => triggered = true
case _ => ()
}
ack.syncOnComplete(fn)
assert(triggered, "should have been sync")
}
test("future(continue).syncOnComplete(clean)") { implicit s =>
val ack: Future[Ack] = Future(Continue)
var triggered = false
ack.syncOnComplete {
case Success(Continue) => triggered = true
case _ => ()
}
assert(!triggered, "!triggered")
s.tick()
assert(triggered, "should have been async")
}
test("future(stop).syncOnComplete(clean)") { implicit s =>
val ack: Future[Ack] = Future(Stop)
var triggered = false
ack.syncOnComplete {
case Success(Stop) => triggered = true
case _ => ()
}
assert(!triggered, "!triggered")
s.tick()
assert(triggered, "should have been async")
}
test("future(continue).syncOnComplete(unclean)") { implicit s =>
val ack: Future[Ack] = Future(Continue)
var triggered = false
val fn = (x: Try[Ack]) =>
x match {
case Success(Continue) => triggered = true
case _ => ()
}
ack.syncOnComplete(fn)
assert(!triggered, "!triggered")
s.tick()
assert(triggered, "should have been async")
}
test("future(stop).syncOnComplete(unclean)") { implicit s =>
val ack: Future[Ack] = Future(Stop)
var triggered = false
val fn = (x: Try[Ack]) =>
x match {
case Success(Stop) => triggered = true
case _ => ()
}
ack.syncOnComplete(fn)
assert(!triggered, "!triggered")
s.tick()
assert(triggered, "should have been async")
}
test("syncOnComplete protects against immediate errors") { implicit s =>
val dummy = new DummyException("dummy")
Continue.syncOnComplete { _ =>
throw dummy
}
assertEquals(s.state.lastReportedError, dummy)
}
test("syncOnComplete protects against async errors") { implicit s =>
val dummy = new DummyException("dummy")
Future(Continue).syncOnComplete { _ =>
throw dummy
}
s.tick()
assertEquals(s.state.lastReportedError, dummy)
}
test("Continue.syncOnContinueFollow") { implicit s =>
val ack: Future[Ack] = Continue
val p = Promise[Int]()
ack.syncOnContinueFollow(p, 1)
// should be immediate
assertEquals(p.future.value, Some(Success(1)))
}
test("Stop.syncOnContinueFollow") { implicit s =>
val ack: Future[Ack] = Stop
val p = Promise[Int]()
ack.syncOnContinueFollow(p, 1)
s.tick()
assertEquals(p.future.value, None)
}
test("Future(Continue).syncOnContinueFollow") { implicit s =>
val ack: Future[Ack] = Future(Continue)
val p = Promise[Int]()
ack.syncOnContinueFollow(p, 1)
assertEquals(p.future.value, None)
// should be async
s.tick()
assertEquals(p.future.value, Some(Success(1)))
}
test("Future(Stop).syncOnContinueFollow") { implicit s =>
val ack: Future[Ack] = Future(Stop)
val p = Promise[Int]()
ack.syncOnContinueFollow(p, 1)
s.tick()
assertEquals(p.future.value, None)
}
test("Continue.syncOnStopFollow") { implicit s =>
val ack: Future[Ack] = Continue
val p = Promise[Int]()
ack.syncOnStopFollow(p, 1)
s.tick()
assertEquals(p.future.value, None)
}
test("Stop.syncOnStopFollow") { implicit s =>
val ack: Future[Ack] = Stop
val p = Promise[Int]()
ack.syncOnStopFollow(p, 1)
// should be immediate
assertEquals(p.future.value, Some(Success(1)))
}
test("Future(Continue).syncOnStopFollow") { implicit s =>
val ack: Future[Ack] = Future(Continue)
val p = Promise[Int]()
ack.syncOnStopFollow(p, 1)
s.tick()
assertEquals(p.future.value, None)
}
test("Future(Stop).syncOnStopFollow") { implicit s =>
val ack: Future[Ack] = Future(Stop)
val p = Promise[Int]()
ack.syncOnStopFollow(p, 1)
s.tick()
assertEquals(p.future.value, Some(Success(1)))
}
test("syncTryFlatten works for synchronous failure") { implicit s =>
val dummy = new RuntimeException("dummy")
val f: Future[Ack] = Future.failed(dummy)
val sync = f.syncTryFlatten
assertEquals(sync, Stop)
assertEquals(s.state.lastReportedError, dummy)
}
test("Continue.transform") { implicit s =>
val f1 = Continue.transform { _ =>
Success(1)
}
s.tick()
assertEquals(f1.value, Some(Success(1)))
val dummy = new RuntimeException("dummy")
val f2 = Continue.transform { _ =>
Failure(dummy)
}
s.tick()
assertEquals(f2.value, Some(Failure(dummy)))
val f3 = Continue.transform { _ =>
throw dummy
}
s.tick()
assertEquals(f3.value, Some(Failure(dummy)))
}
test("Continue.transformWith") { implicit s =>
val f1 = Continue.transformWith { _ =>
Future.successful(1)
}
s.tick()
assertEquals(f1.value, Some(Success(1)))
val dummy = new RuntimeException("dummy")
val f2 = Continue.transformWith { _ =>
Future.failed(dummy)
}
s.tick()
assertEquals(f2.value, Some(Failure(dummy)))
val f3 = Continue.transformWith { _ =>
throw dummy
}
s.tick()
assertEquals(f3.value, Some(Failure(dummy)))
}
test("Continue.transformWith is stack safe") { implicit s =>
def loop(n: Int): Future[Ack] =
if (n <= 0) Continue
else
Continue.transformWith {
case Success(_) => loop(n - 1)
case Failure(ex) => Future.failed(ex)
}
val f = loop(100000); s.tick()
assertEquals(f.value, Some(Success(Continue)))
}
test("Stop.transform") { implicit s =>
val f1 = Stop.transform { _ =>
Success(1)
}
s.tick()
assertEquals(f1.value, Some(Success(1)))
val dummy = new RuntimeException("dummy")
val f2 = Stop.transform { _ =>
Failure(dummy)
}
s.tick()
assertEquals(f2.value, Some(Failure(dummy)))
val f3 = Stop.transform { _ =>
throw dummy
}
s.tick()
assertEquals(f3.value, Some(Failure(dummy)))
}
test("Stop.transformWith") { implicit s =>
val f1 = Stop.transformWith { _ =>
Future.successful(1)
}
s.tick()
assertEquals(f1.value, Some(Success(1)))
val dummy = new RuntimeException("dummy")
val f2 = Stop.transformWith { _ =>
Future.failed(dummy)
}
s.tick()
assertEquals(f2.value, Some(Failure(dummy)))
val f3 = Stop.transformWith { _ =>
throw dummy
}
s.tick()
assertEquals(f3.value, Some(Failure(dummy)))
}
test("Stop.transformWith is stack safe") { implicit s =>
def loop(n: Int): Future[Ack] =
if (n <= 0) Stop
else
Stop.transformWith {
case Success(_) => loop(n - 1)
case Failure(ex) => Future.failed(ex)
}
val f = loop(100000); s.tick()
assertEquals(f.value, Some(Success(Stop)))
}
}
| monixio/monix | monix-execution/shared/src/test/scala/monix/execution/AckSuite.scala | Scala | apache-2.0 | 25,054 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.busybees.streams.flows.dispatchers
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream._
import com.flipkart.connekt.busybees.models._
import com.flipkart.connekt.commons.factories.{ConnektLogger, LogFile}
import com.flipkart.connekt.commons.services.{ConnektConfig, KeyChainManager}
import com.typesafe.config.Config
import com.typesafe.sslconfig.akka.AkkaSSLConfig
import com.typesafe.sslconfig.ssl.{TrustManagerConfig, TrustStoreConfig}
import scala.concurrent.ExecutionContextExecutor
class HttpDispatcher(actorSystemConf: Config) {
implicit val httpSystem: ActorSystem = ActorSystem("http-out", actorSystemConf)
implicit val httpMat: ActorMaterializer = ActorMaterializer()
implicit val ec: ExecutionContextExecutor = httpSystem.dispatcher
private val gcmPoolClientFlow = Http().superPool[GCMRequestTracker]()(httpMat)
private val wnsPoolClientFlow = Http().superPool[WNSRequestTracker]()(httpMat)
private val smsPoolClientFlow = Http().superPool[SmsRequestTracker]()(httpMat)
private val waPoolInsecureClientFlow = {
// TODO :: Appname
val certificate = KeyChainManager.getWhatsAppCredentials("flipkart").get.getCertificateStr
val trustStoreConfig = TrustStoreConfig(Some(certificate), None).withStoreType("PEM")
val trustManagerConfig = TrustManagerConfig().withTrustStoreConfigs(List(trustStoreConfig))
val badSslConfig = AkkaSSLConfig().mapSettings(s => s.withLoose(s.loose
.withAcceptAnyCertificate(true)
.withDisableHostnameVerification(true)
).withTrustManagerConfig(trustManagerConfig))
Http().superPool[RequestTracker](
Http().createClientHttpsContext(badSslConfig),
ConnectionPoolSettings(httpSystem)
.withPipeliningLimit(ConnektConfig.getInt("wa.topology.max.pipeline.limit").get)
.withMaxConnections(ConnektConfig.getInt("wa.topology.max.parallel.connections").get)
)(httpMat)
}
private val openWebPoolClientFlow = Http().superPool[OpenWebRequestTracker]()(httpMat)
private val emailPoolClientFlow = Http().superPool[EmailRequestTracker]()(httpMat)
}
object HttpDispatcher {
private var instance: Option[HttpDispatcher] = None
def init(actorSystemConf: Config) = {
if (instance.isEmpty) {
ConnektLogger(LogFile.SERVICE).info(s"Creating HttpDispatcher actor-system with conf: ${actorSystemConf.toString}")
instance = Some(new HttpDispatcher(actorSystemConf))
}
}
def gcmPoolClientFlow = instance.map(_.gcmPoolClientFlow).get
def smsPoolClientFlow = instance.map(_.smsPoolClientFlow).get
def waPoolClientFlow = instance.map(_.waPoolInsecureClientFlow).get
def wnsPoolClientFlow = instance.map(_.wnsPoolClientFlow).get
def openWebPoolClientFlow = instance.map(_.openWebPoolClientFlow).get
def emailPoolClientFlow = instance.map(_.emailPoolClientFlow).get
}
| Flipkart/connekt | busybees/src/main/scala/com/flipkart/connekt/busybees/streams/flows/dispatchers/HttpDispatcher.scala | Scala | mit | 3,548 |
package life
import com.badlogic.gdx.backends.lwjgl._
object Main extends App {
val cfg = new LwjglApplicationConfiguration
cfg.title = "life"
cfg.width = 1420
cfg.height = 880
cfg.forceExit = false
new LwjglApplication(new Life, cfg)
}
| samyatchmenoff/emergence | life/desktop/src/main/scala/Main.scala | Scala | mit | 263 |
package mesosphere.marathon.core.launcher.impl
import java.util
import java.util.Collections
import mesosphere.marathon.core.base.ConstantClock
import mesosphere.marathon.{ MarathonTestHelper, MarathonSchedulerDriverHolder, MarathonSpec }
import mesosphere.marathon.core.launcher.TaskLauncher
import mesosphere.mesos.protos.OfferID
import org.apache.mesos.Protos.TaskInfo
import org.apache.mesos.{ Protos, SchedulerDriver }
import org.mockito.Mockito
import org.mockito.Mockito.{ when, verify }
import mesosphere.mesos.protos.Implicits._
import scala.collection.JavaConverters._
class TaskLauncherImplTest extends MarathonSpec {
private[this] val offerId = OfferID("offerId")
private[this] val offerIdAsJava: util.Set[Protos.OfferID] = Collections.singleton[Protos.OfferID](offerId)
private[this] val taskInfo1 = MarathonTestHelper.makeOneCPUTask("taskid1").build()
private[this] val taskInfo2 = MarathonTestHelper.makeOneCPUTask("taskid2").build()
private[this] val tasks = Seq(taskInfo1, taskInfo2)
private[this] val tasksAsJava: util.List[TaskInfo] = Seq(taskInfo1, taskInfo2).asJava
test("launchTasks without driver") {
driverHolder.driver = None
assert(!launcher.launchTasks(offerId, Seq(taskInfo1, taskInfo2)))
}
test("unsuccessful launchTasks") {
when(driverHolder.driver.get.launchTasks(offerIdAsJava, tasksAsJava)).thenReturn(Protos.Status.DRIVER_ABORTED)
assert(!launcher.launchTasks(offerId, Seq(taskInfo1, taskInfo2)))
verify(driverHolder.driver.get).launchTasks(offerIdAsJava, tasksAsJava)
}
test("successful launchTasks") {
when(driverHolder.driver.get.launchTasks(offerIdAsJava, tasksAsJava)).thenReturn(Protos.Status.DRIVER_RUNNING)
assert(launcher.launchTasks(offerId, Seq(taskInfo1, taskInfo2)))
verify(driverHolder.driver.get).launchTasks(offerIdAsJava, tasksAsJava)
}
test("declineOffer without driver") {
driverHolder.driver = None
launcher.declineOffer(offerId)
}
test("declineOffer with driver") {
launcher.declineOffer(offerId)
verify(driverHolder.driver.get).declineOffer(offerId)
}
var driverHolder: MarathonSchedulerDriverHolder = _
var launcher: TaskLauncher = _
before {
driverHolder = new MarathonSchedulerDriverHolder
driverHolder.driver = Some(mock[SchedulerDriver])
launcher = new TaskLauncherImpl(driverHolder, ConstantClock())
}
after {
driverHolder.driver.foreach(Mockito.verifyNoMoreInteractions(_))
}
}
| sepiroth887/marathon | src/test/scala/mesosphere/marathon/core/launcher/impl/TaskLauncherImplTest.scala | Scala | apache-2.0 | 2,467 |
import sbt._
import Keys._
import org.scalatra.sbt._
import org.scalatra.sbt.PluginKeys._
import com.mojolly.scalate.ScalatePlugin._
import ScalateKeys._
object YuzuBuild extends Build {
val Organization = "org.insightcentre"
val Name = "ColloqWN Editor"
val Version = "0.0.1-SNAPSHOT"
val ScalaVersion = "2.10.6"
val ScalatraVersion = "2.4.0"
lazy val project = Project (
"cwneditor",
file("."),
settings = ScalatraPlugin.scalatraSettings ++ scalateSettings ++ Seq(
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers += Classpaths.typesafeReleases,
resolvers += "Scalaz Bintray Repo" at "http://dl.bintray.com/scalaz/releases",
libraryDependencies ++= Seq(
"org.scalatra" %% "scalatra" % ScalatraVersion
excludeAll(ExclusionRule(organization="org.slf4j")),
"org.scalatra" %% "scalatra-scalate" % ScalatraVersion
excludeAll(ExclusionRule(organization="org.slf4j")),
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test"
excludeAll(ExclusionRule(organization="org.slf4j")),
"org.scalatra" %% "scalatra-auth" % ScalatraVersion
excludeAll(ExclusionRule(organization="org.slf4j")),
// "ch.qos.logback" % "logback-classic" % "1.1.1" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "9.2.10.v20150310"
excludeAll(ExclusionRule(organization="org.slf4j")),
"javax.servlet" % "javax.servlet-api" % "3.1.0" % "provided",
"io.spray" %% "spray-json" % "1.3.2",
"org.xerial" % "sqlite-jdbc" % "3.8.11.2",
"org.slf4j" % "slf4j-nop" % "1.7.13"
),
scalateTemplateConfig in Compile <<= (sourceDirectory in Compile){ base =>
Seq(
TemplateConfig(
base / "webapp" / "WEB-INF" / "templates",
Seq.empty, /* default imports should be added here */
Seq(
Binding("context", "_root_.org.scalatra.scalate.ScalatraRenderContext", importMembers = true, isImplicit = true)
), /* add extra bindings here */
Some("templates")
)
)
}
)
)
}
| jmccrae/cwn-editor | project/build.scala | Scala | apache-2.0 | 2,204 |
package mesosphere.marathon.api.v2.json
import mesosphere.marathon.Protos.Constraint
import mesosphere.marathon.core.readiness.ReadinessCheckTestHelper
import mesosphere.marathon.core.health.HealthCheck
import mesosphere.marathon.state.AppDefinition.VersionInfo.OnlyVersion
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state._
import mesosphere.marathon.Protos
import mesosphere.marathon.test.MarathonSpec
import org.scalatest.Matchers
import play.api.libs.json._
import scala.collection.immutable.Seq
class AppDefinitionFormatsTest
extends MarathonSpec
with AppAndGroupFormats
with HealthCheckFormats
with Matchers
with FetchUriFormats
with SecretFormats {
import Formats.PathIdFormat
object Fixture {
val a1 = AppDefinition(
id = "app1".toPath,
cmd = Some("sleep 10"),
versionInfo = AppDefinition.VersionInfo.OnlyVersion(Timestamp(1))
)
val j1 = Json.parse("""
{
"id": "app1",
"cmd": "sleep 10",
"version": "1970-01-01T00:00:00.001Z"
}
""")
}
test("ToJson") {
import AppDefinition._
import Fixture._
val r1 = Json.toJson(a1)
// check supplied values
(r1 \ "id").get should equal (JsString("app1"))
(r1 \ "cmd").get should equal (JsString("sleep 10"))
(r1 \ "version").get should equal (JsString("1970-01-01T00:00:00.001Z"))
(r1 \ "versionInfo").asOpt[JsObject] should equal(None)
// check default values
(r1 \ "args").asOpt[Seq[String]] should equal (None)
(r1 \ "user").asOpt[String] should equal (None)
(r1 \ "env").as[Map[String, String]] should equal (DefaultEnv)
(r1 \ "instances").as[Long] should equal (DefaultInstances)
(r1 \ "cpus").as[Double] should equal (DefaultCpus)
(r1 \ "mem").as[Double] should equal (DefaultMem)
(r1 \ "disk").as[Double] should equal (DefaultDisk)
(r1 \ "gpus").as[Int] should equal (DefaultGpus)
(r1 \ "executor").as[String] should equal (DefaultExecutor)
(r1 \ "constraints").as[Set[Constraint]] should equal (DefaultConstraints)
(r1 \ "uris").as[Seq[String]] should equal (DefaultUris)
(r1 \ "fetch").as[Seq[FetchUri]] should equal (DefaultFetch)
(r1 \ "storeUrls").as[Seq[String]] should equal (DefaultStoreUrls)
(r1 \ "portDefinitions").as[Seq[PortDefinition]] should equal (DefaultPortDefinitions)
(r1 \ "requirePorts").as[Boolean] should equal (DefaultRequirePorts)
(r1 \ "backoffSeconds").as[Long] should equal (DefaultBackoff.toSeconds)
(r1 \ "backoffFactor").as[Double] should equal (DefaultBackoffFactor)
(r1 \ "maxLaunchDelaySeconds").as[Long] should equal (DefaultMaxLaunchDelay.toSeconds)
(r1 \ "container").asOpt[String] should equal (None)
(r1 \ "healthChecks").as[Set[HealthCheck]] should equal (DefaultHealthChecks)
(r1 \ "dependencies").as[Set[PathId]] should equal (DefaultDependencies)
(r1 \ "upgradeStrategy").as[UpgradeStrategy] should equal (DefaultUpgradeStrategy)
(r1 \ "residency").asOpt[String] should equal (None)
(r1 \ "secrets").as[Map[String, Secret]] should equal (DefaultSecrets)
(r1 \ "taskKillGracePeriodSeconds").asOpt[Long] should equal (DefaultTaskKillGracePeriod)
}
test("ToJson should serialize full version info") {
import Fixture._
val r1 = Json.toJson(a1.copy(versionInfo = AppDefinition.VersionInfo.FullVersionInfo(
version = Timestamp(3),
lastScalingAt = Timestamp(2),
lastConfigChangeAt = Timestamp(1)
)))
(r1 \ "version").as[String] should equal("1970-01-01T00:00:00.003Z")
(r1 \ "versionInfo" \ "lastScalingAt").as[String] should equal("1970-01-01T00:00:00.002Z")
(r1 \ "versionInfo" \ "lastConfigChangeAt").as[String] should equal("1970-01-01T00:00:00.001Z")
}
test("FromJson") {
import AppDefinition._
import Fixture._
val r1 = j1.as[AppDefinition]
// check supplied values
r1.id should equal (a1.id)
r1.cmd should equal (a1.cmd)
r1.version should equal (Timestamp(1))
r1.versionInfo shouldBe a[VersionInfo.OnlyVersion]
// check default values
r1.args should equal (DefaultArgs)
r1.user should equal (DefaultUser)
r1.env should equal (DefaultEnv)
r1.instances should equal (DefaultInstances)
r1.cpus should equal (DefaultCpus)
r1.mem should equal (DefaultMem)
r1.disk should equal (DefaultDisk)
r1.gpus should equal (DefaultGpus)
r1.executor should equal (DefaultExecutor)
r1.constraints should equal (DefaultConstraints)
r1.fetch should equal (DefaultFetch)
r1.storeUrls should equal (DefaultStoreUrls)
r1.portDefinitions should equal (DefaultPortDefinitions)
r1.requirePorts should equal (DefaultRequirePorts)
r1.backoff should equal (DefaultBackoff)
r1.backoffFactor should equal (DefaultBackoffFactor)
r1.maxLaunchDelay should equal (DefaultMaxLaunchDelay)
r1.container should equal (DefaultContainer)
r1.healthChecks should equal (DefaultHealthChecks)
r1.dependencies should equal (DefaultDependencies)
r1.upgradeStrategy should equal (DefaultUpgradeStrategy)
r1.acceptedResourceRoles should not be ('defined)
r1.secrets should equal (DefaultSecrets)
r1.taskKillGracePeriod should equal (DefaultTaskKillGracePeriod)
}
test("FromJSON should ignore VersionInfo") {
val app = Json.parse(
"""{
| "id": "test",
| "version": "1970-01-01T00:00:00.002Z",
| "versionInfo": {
| "lastScalingAt": "1970-01-01T00:00:00.002Z",
| "lastConfigChangeAt": "1970-01-01T00:00:00.001Z"
| }
|}""".stripMargin).as[AppDefinition]
app.versionInfo shouldBe a[OnlyVersion]
}
test("FromJSON should fail for empty id") {
val json = Json.parse(""" { "id": "" }""")
a[JsResultException] shouldBe thrownBy { json.as[AppDefinition] }
}
test("FromJSON should fail when using / as an id") {
val json = Json.parse(""" { "id": "/" }""")
a[JsResultException] shouldBe thrownBy { json.as[AppDefinition] }
}
test("FromJSON should not fail when 'cpus' is greater than 0") {
val json = Json.parse(""" { "id": "test", "cpus": 0.0001 }""")
noException should be thrownBy {
json.as[AppDefinition]
}
}
test("""ToJSON should correctly handle missing acceptedResourceRoles""") {
val appDefinition = AppDefinition(id = PathId("test"), acceptedResourceRoles = None)
val json = Json.toJson(appDefinition)
(json \ "acceptedResourceRoles").asOpt[Set[String]] should be(None)
}
test("""ToJSON should correctly handle acceptedResourceRoles""") {
val appDefinition = AppDefinition(id = PathId("test"), acceptedResourceRoles = Some(Set("a")))
val json = Json.toJson(appDefinition)
(json \ "acceptedResourceRoles").asOpt[Set[String]] should be(Some(Set("a")))
}
test("""FromJSON should parse "acceptedResourceRoles": ["production", "*"] """) {
val json = Json.parse(""" { "id": "test", "acceptedResourceRoles": ["production", "*"] }""")
val appDef = json.as[AppDefinition]
appDef.acceptedResourceRoles should equal(Some(Set("production", ResourceRole.Unreserved)))
}
test("""FromJSON should parse "acceptedResourceRoles": ["*"] """) {
val json = Json.parse(""" { "id": "test", "acceptedResourceRoles": ["*"] }""")
val appDef = json.as[AppDefinition]
appDef.acceptedResourceRoles should equal(Some(Set(ResourceRole.Unreserved)))
}
test("FromJSON should fail when 'acceptedResourceRoles' is defined but empty") {
val json = Json.parse(""" { "id": "test", "acceptedResourceRoles": [] }""")
a[JsResultException] shouldBe thrownBy { json.as[AppDefinition] }
}
test("FromJSON should read the default upgrade strategy") {
val json = Json.parse(""" { "id": "test" }""")
val appDef = json.as[AppDefinition]
appDef.upgradeStrategy should be(UpgradeStrategy.empty)
}
test("FromJSON should read the residency upgrade strategy") {
val json = Json.parse(""" { "id": "test", "residency": {}}""")
val appDef = json.as[AppDefinition]
appDef.upgradeStrategy should be(UpgradeStrategy.forResidentTasks)
}
test("FromJSON should read the default residency automatically residency ") {
val json = Json.parse(
"""
|{
| "id": "resident",
| "container": {
| "type": "MESOS",
| "volumes": [{
| "containerPath": "var",
| "persistent": { "size": 10 },
| "mode": "RW"
| }]
| }
|}
""".stripMargin)
val appDef = json.as[AppDefinition]
appDef.residency should be(Some(Residency.defaultResidency))
}
test("""FromJSON should parse "residency" """) {
val appDef = Json.parse(
"""{
| "id": "test",
| "residency": {
| "relaunchEscalationTimeoutSeconds": 300,
| "taskLostBehavior": "RELAUNCH_AFTER_TIMEOUT"
| }
|}""".stripMargin).as[AppDefinition]
appDef.residency should equal(Some(Residency(300, Protos.ResidencyDefinition.TaskLostBehavior.RELAUNCH_AFTER_TIMEOUT)))
}
test("ToJson should serialize residency") {
import Fixture._
val json = Json.toJson(a1.copy(residency = Some(Residency(7200, Protos.ResidencyDefinition.TaskLostBehavior.WAIT_FOREVER))))
(json \ "residency" \ "relaunchEscalationTimeoutSeconds").as[Long] should equal(7200)
(json \ "residency" \ "taskLostBehavior").as[String] should equal(Protos.ResidencyDefinition.TaskLostBehavior.WAIT_FOREVER.name())
}
test("AppDefinition JSON includes readinessChecks") {
val app = AppDefinition(id = PathId("/test"), cmd = Some("sleep 123"), readinessChecks = Seq(
ReadinessCheckTestHelper.alternativeHttps
))
val appJson = Json.toJson(app)
val rereadApp = appJson.as[AppDefinition]
rereadApp.readinessChecks should have size (1)
rereadApp should equal(app)
}
test("FromJSON should parse ipAddress.networkName") {
val appDef = Json.parse(
"""{
| "id": "test",
| "ipAddress": {
| "networkName": "foo"
| }
|}""".stripMargin).as[AppDefinition]
appDef.ipAddress.isDefined && appDef.ipAddress.get.networkName.isDefined should equal(true)
appDef.ipAddress.get.networkName should equal(Some("foo"))
}
test("FromJSON should parse ipAddress.networkName with MESOS container") {
val appDef = Json.parse(
"""{
| "id": "test",
| "ipAddress": {
| "networkName": "foo"
| },
| "container": {
| "type": "MESOS"
| }
|}""".stripMargin).as[AppDefinition]
appDef.ipAddress.isDefined && appDef.ipAddress.get.networkName.isDefined should equal(true)
appDef.ipAddress.get.networkName should equal(Some("foo"))
appDef.container.isDefined
appDef.container.get shouldBe a[Container.Mesos]
}
test("FromJSON should parse ipAddress.networkName with DOCKER container w/o port mappings") {
val appDef = Json.parse(
"""{
| "id": "test",
| "ipAddress": {
| "networkName": "foo"
| },
| "container": {
| "type": "DOCKER",
| "docker": {
| "image": "busybox",
| "network": "USER"
| }
| }
|}""".stripMargin).as[AppDefinition]
appDef.ipAddress.isDefined && appDef.ipAddress.get.networkName.isDefined should equal(true)
appDef.ipAddress.get.networkName should equal(Some("foo"))
appDef.container.isDefined
appDef.container.get shouldBe a[Container.Docker]
appDef.container.flatMap(_.docker.flatMap(_.network.map(_.toString))) should equal (Some("USER"))
}
test("FromJSON should parse ipAddress.networkName with DOCKER container w/ port mappings") {
val appDef = Json.parse(
"""{
| "id": "test",
| "ipAddress": {
| "networkName": "foo"
| },
| "container": {
| "type": "DOCKER",
| "docker": {
| "image": "busybox",
| "network": "USER",
| "portMappings": [{
| "containerPort": 123, "servicePort": 80, "name": "foobar"
| }]
| }
| }
|}""".stripMargin).as[AppDefinition]
appDef.ipAddress.isDefined && appDef.ipAddress.get.networkName.isDefined should equal(true)
appDef.ipAddress.get.networkName should equal(Some("foo"))
appDef.container.isDefined
appDef.container.get shouldBe a[Container.Docker]
appDef.container.flatMap(_.docker.flatMap(_.network.map(_.toString))) should equal (Some("USER"))
appDef.container.flatMap(_.portMappings) should equal (Some(Seq(
Container.Docker.PortMapping(containerPort = 123, servicePort = 80, name = Some("foobar"))
)))
}
test("FromJSON should parse Mesos Docker container") {
val appDef = Json.parse(
"""{
| "id": "test",
| "ipAddress": {
| "networkName": "foo"
| },
| "container": {
| "type": "MESOS",
| "docker": {
| "image": "busybox",
| "credential": {
| "principal": "aPrincipal",
| "secret": "aSecret"
| }
| }
| }
|}""".stripMargin).as[AppDefinition]
appDef.ipAddress.isDefined && appDef.ipAddress.get.networkName.isDefined should equal(true)
appDef.ipAddress.get.networkName should equal(Some("foo"))
appDef.container.isDefined
appDef.container.get shouldBe a[Container.MesosDocker]
appDef.container.get match {
case dd: Container.MesosDocker =>
dd.credential.isDefined
dd.credential.get.principal should equal("aPrincipal")
dd.credential.get.secret should equal(Some("aSecret"))
case _ => {}
}
}
test("FromJSON should parse Mesos AppC container") {
val appDef = Json.parse(
"""{
| "id": "test",
| "ipAddress": {
| "networkName": "foo"
| },
| "container": {
| "type": "MESOS",
| "appc": {
| "image": "busybox",
| "id": "sha512-aHashValue",
| "labels": {
| "version": "1.2.0",
| "arch": "amd64",
| "os": "linux"
| }
| }
| }
|}""".stripMargin).as[AppDefinition]
appDef.ipAddress.isDefined && appDef.ipAddress.get.networkName.isDefined should equal(true)
appDef.ipAddress.get.networkName should equal(Some("foo"))
appDef.container.isDefined
appDef.container.get shouldBe a[Container.MesosAppC]
appDef.container.get match {
case ma: Container.MesosAppC =>
ma.image should equal("busybox")
ma.id should equal(Some("sha512-aHashValue"))
ma.labels.keys.size should equal(3)
ma.labels("version") should equal("1.2.0")
ma.labels("arch") should equal("amd64")
ma.labels("os") should equal("linux")
case _ => {}
}
}
test("FromJSON should parse ipAddress without networkName") {
val appDef = Json.parse(
"""{
| "id": "test",
| "ipAddress": { }
|}""".stripMargin).as[AppDefinition]
appDef.ipAddress.isDefined && !appDef.ipAddress.get.networkName.isDefined should equal(true)
}
test("FromJSON should parse secrets") {
val appDef = Json.parse(
"""{
| "id": "test",
| "secrets": {
| "secret1": { "source": "/foo" },
| "secret2": { "source": "/foo" },
| "secret3": { "source": "/foo2" }
| }
|}""".stripMargin).as[AppDefinition]
appDef.secrets.keys.size should equal(3)
appDef.secrets("secret1").source should equal("/foo")
appDef.secrets("secret2").source should equal("/foo")
appDef.secrets("secret3").source should equal("/foo2")
}
test("ToJSON should serialize secrets") {
import Fixture._
val json = Json.toJson(a1.copy(secrets = Map(
"secret1" -> Secret("/foo"),
"secret2" -> Secret("/foo"),
"secret3" -> Secret("/foo2")
)))
(json \ "secrets" \ "secret1" \ "source").as[String] should equal("/foo")
(json \ "secrets" \ "secret2" \ "source").as[String] should equal("/foo")
(json \ "secrets" \ "secret3" \ "source").as[String] should equal("/foo2")
}
}
| timcharper/marathon | src/test/scala/mesosphere/marathon/api/v2/json/AppDefinitionFormatsTest.scala | Scala | apache-2.0 | 16,366 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import org.apache.spark.sql.Encoder
package object encoders {
/**
* Returns an internal encoder object that can be used to serialize / deserialize JVM objects
* into Spark SQL rows. The implicit encoder should always be unresolved (i.e. have no attribute
* references from a specific schema.) This requirement allows us to preserve whether a given
* object type is being bound by name or by ordinal when doing resolution.
*/
def encoderFor[A : Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match {
case e: ExpressionEncoder[A] =>
e.assertUnresolved()
e
case _ => sys.error(s"Only expression encoders are supported today")
}
}
| wangyixiaohuihui/spark2-annotation | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala | Scala | apache-2.0 | 1,556 |
package de.zalando.play.swagger.sbt
import java.io.File
import de.zalando.BuildInfo
import de.zalando.apifirst.Application.StrictModel
import de.zalando.swagger.SwaggerParser
import de.zalando.swagger.strictModel.SwaggerModel
import sbt.Keys._
import sbt.{Defaults, _}
/**
* @since 24.05.2016.
*/
//noinspection ScalaStyle
object ApiFirstSwaggerParser extends AutoPlugin {
object autoImport {
lazy val swaggerKeyPrefix = settingKey[String]("The key prefix is a name for swagger vendor extension")
lazy val swaggerDefinitions = taskKey[Seq[File]]("The swagger definition files")
}
private lazy val swaggerParseSpec = taskKey[Seq[(java.net.URI, SwaggerModel)]]("Parse API specifications (swaggerDefinitions)")
lazy val swaggerSpec2Ast = taskKey[Seq[(File,StrictModel)]]("Convert API specifications (swaggerDefinitions) to AST")
// Users have to explicitly enable it
override def trigger: PluginTrigger = noTrigger
import autoImport._
override def projectSettings = Seq(
libraryDependencies ++= Seq(
"de.zalando" %% "play-swagger-api" % BuildInfo.version,
"com.fasterxml.jackson.dataformat" % "jackson-dataformat-yaml" % "2.4.4",
"com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.6.1",
"org.scalacheck" %% "scalacheck" % "1.12.4" % Test,
"com.typesafe.play" %% "play-test" % play.core.PlayVersion.current % Test
)
) ++ inConfig(Compile)(swaggerParserSettings)
/**
* We define these unscoped, and then scope later using inConfig, this means we could define different definitions
* to be compiled in compile and test, for example.
*/
private def swaggerParserSettings: Seq[Setting[_]] = Seq(
sourcePositionMappers := Seq(),
swaggerKeyPrefix := "x-api-first",
swaggerDefinitions := (resourceDirectory.value * "*.yaml").get,
watchSources in Defaults.ConfigGlobal <++= sources in swaggerDefinitions,
swaggerParseSpec <<= swaggerDefinitions map { t => t.map(SwaggerParser.readSwaggerModel) },
swaggerSpec2Ast in Defaults.ConfigGlobal <<= (swaggerDefinitions, swaggerParseSpec) map { (t, s) =>
s.zip(t) map { case ((uri, model), file) => file -> SwaggerParser.convertModelToAST(file, uri, model) }
}
)
}
| zalando/play-swagger | plugin/src/main/scala/de/zalando/play/swagger/sbt/ApiFirstSwaggerParser.scala | Scala | mit | 2,241 |
object Success
| twitter-forks/sbt | sbt/src/sbt-test/project/scripted-plugin/changes/Success.scala | Scala | bsd-3-clause | 15 |
package com.geishatokyo.sqlgen.process
import java.io.File
import java.nio.file.Paths
import com.geishatokyo.sqlgen.SQLGenException
import com.geishatokyo.sqlgen.core.Workbook
/**
* Created by takezoux2 on 2017/07/05.
*/
trait OutputProc[DataType] extends Proc with OutputSupport{
def dataKey: Key[MultiData[DataType]]
def output(data: MultiData[DataType], c: Context): Unit
override def apply(c: Context): Context = {
val d = c(dataKey)
output(d, c)
c
}
}
trait OutputSupport {
def getPath(c: Context, dir: String, name: String): File = {
if(Paths.get(dir).isAbsolute) {
new File(dir,name)
} else if(c.has(Context.ExportDir)) {
Paths.get(c(Context.ExportDir), dir, name).toFile
} else if(c.has(Context.WorkingDir)) {
Paths.get(c.workingDir, dir, name).toFile
} else {
Paths.get(dir,name).toFile
}
}
}
| geishatokyo/sql-generator | src/main/scala/com/geishatokyo/sqlgen/process/OutputProc.scala | Scala | mit | 884 |
import akka.actor.{ Actor, ActorSelection }
import akka.persistence.{ AtLeastOnceDelivery, PersistentActor }
sealed trait Cmd
case class SayHello(deliveryId: Long, s: String) extends Cmd
case class ReceiveHello(deliveryId: Long) extends Cmd
sealed trait Evt
case class HelloSaid(s: String) extends Evt
case class HelloReceived(deliveryId: Long) extends Evt
class SendActor(destination: ActorSelection)
extends PersistentActor with AtLeastOnceDelivery {
override def persistenceId: String = "persistence-id"
override def receiveCommand: Receive = {
case s: String =>
persist(HelloSaid(s))(updateState)
case ReceiveHello(deliveryId) =>
persist(HelloReceived(deliveryId))(updateState)
}
override def receiveRecover: Receive = {
case evt: Evt => updateState(evt)
}
def updateState(evt: Evt): Unit = evt match {
case HelloSaid(s) =>
deliver(destination)(deliveryId => SayHello(deliveryId, s))
case HelloReceived(deliveryId) =>
confirmDelivery(deliveryId)
}
}
class ReceiveActor extends Actor {
def receive = {
case SayHello(deliveryId, s) =>
// ... do something with s
sender() ! ReceiveHello(deliveryId)
}
}
| ironfish/reactive-application-development-scala | chapter6_003_atleastonce/src/main/scala/AtLeastOnce.scala | Scala | apache-2.0 | 1,199 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600j.v2
import uk.gov.hmrc.ct.box.CtBoxIdentifier
abstract class J2 extends CtBoxIdentifier(name = "Tax Avoidance 2 Reference Number")
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600j/v2/J2.scala | Scala | apache-2.0 | 768 |
package org.jetbrains.plugins.scala
package codeInspection.parameters
import com.intellij.codeInspection.{LocalInspectionTool, ProblemHighlightType, ProblemsHolder}
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiElement, PsiElementVisitor}
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.util.IntentionUtils
import scala.collection.Seq
/**
* @author Ksenia.Sautina
* @since 5/10/12
*/
abstract class NameBooleanParametersInspectionBase extends LocalInspectionTool {
override def buildVisitor(holder: ProblemsHolder, isOnTheFly: Boolean): PsiElementVisitor = {
new ScalaElementVisitor {
override def visitMethodCallExpression(mc: ScMethodCall) {
if (mc == null || mc.args == null || mc.args.exprs.isEmpty) return
if (isIgnoreSingleParameter) mc.getInvokedExpr match {
case ref: ScReferenceExpression => ref.bind() match {
case Some(srr) =>
val targets = Seq(srr.element) ++ srr.innerResolveResult.map(_.getElement)
if (targets.exists {
case fun: ScFunction => fun.parameters.size == 1
case _ => false
}) return
case _ =>
}
case _ =>
}
for (expr <- mc.args.exprs) {
expr match {
case literal: ScLiteral if isBooleanType(expr) &&
IntentionUtils.addNameToArgumentsFix(literal, onlyBoolean = true).isDefined &&
(expr.getNode.getFirstChildNode.getElementType == ScalaTokenTypes.kTRUE ||
expr.getNode.getFirstChildNode.getElementType == ScalaTokenTypes.kFALSE) =>
holder.registerProblem(holder.getManager.createProblemDescriptor(expr,
InspectionBundle.message("name.boolean"),
new NameBooleanParametersQuickFix(literal),
ProblemHighlightType.GENERIC_ERROR_OR_WARNING, isOnTheFly))
case _ =>
}
}
}
def isBooleanType(element: PsiElement): Boolean = {
val containingArgList: Option[ScArgumentExprList] = element.parents.collectFirst {
case al: ScArgumentExprList if !al.isBraceArgs => al
}
containingArgList match {
case Some(al) =>
val index = al.exprs.indexWhere(argExpr => PsiTreeUtil.isAncestor(argExpr, element, false))
index match {
case -1 => false
case i =>
val argExprsToNamify = al.exprs.drop(index)
val argsAndMatchingParams: Seq[(ScExpression, Option[Parameter])] = argExprsToNamify.map {
arg => (arg, al.parameterOf(arg))
}
argsAndMatchingParams.exists {
case (expr, Some(param)) =>
val paramInCode = param.paramInCode.orNull
if (paramInCode == null) return false
if (!paramInCode.isValid) return false //todo: find why it can be invalid?
val realParameterType = paramInCode.getRealParameterType(TypingContext.empty).getOrElse(null)
if (realParameterType == null) return false
else if (realParameterType.canonicalText == "Boolean") return true
else return false
case _ => return false
}
}
case None => false
}
}
}
}
def isIgnoreSingleParameter: Boolean
def setIgnoreSingleParameter(value: Boolean)
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/parameters/NameBooleanParametersInspectionBase.scala | Scala | apache-2.0 | 4,070 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.sql.catalyst.analysis.{MultiInstanceRelation, NamedRelation}
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.logical.{ExposesMetadataColumns, LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.catalyst.util.{truncatedString, CharVarcharUtils}
import org.apache.spark.sql.connector.catalog.{CatalogPlugin, Identifier, MetadataColumn, SupportsMetadataColumns, Table, TableCapability}
import org.apache.spark.sql.connector.read.{Scan, Statistics => V2Statistics, SupportsReportStatistics}
import org.apache.spark.sql.connector.read.streaming.{Offset, SparkDataStream}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
/**
* A logical plan representing a data source v2 table.
*
* @param table The table that this relation represents.
* @param output the output attributes of this relation.
* @param catalog catalogPlugin for the table. None if no catalog is specified.
* @param identifier the identifier for the table. None if no identifier is defined.
* @param options The options for this table operation. It's used to create fresh
* [[org.apache.spark.sql.connector.read.ScanBuilder]] and
* [[org.apache.spark.sql.connector.write.WriteBuilder]].
*/
case class DataSourceV2Relation(
table: Table,
output: Seq[AttributeReference],
catalog: Option[CatalogPlugin],
identifier: Option[Identifier],
options: CaseInsensitiveStringMap)
extends LeafNode with MultiInstanceRelation with NamedRelation with ExposesMetadataColumns {
import DataSourceV2Implicits._
override lazy val metadataOutput: Seq[AttributeReference] = table match {
case hasMeta: SupportsMetadataColumns =>
val resolve = conf.resolver
val outputNames = outputSet.map(_.name)
def isOutputColumn(col: MetadataColumn): Boolean = {
outputNames.exists(name => resolve(col.name, name))
}
// filter out metadata columns that have names conflicting with output columns. if the table
// has a column "line" and the table can produce a metadata column called "line", then the
// data column should be returned, not the metadata column.
hasMeta.metadataColumns.filterNot(isOutputColumn).toAttributes
case _ =>
Nil
}
override def name: String = table.name()
override def skipSchemaResolution: Boolean = table.supports(TableCapability.ACCEPT_ANY_SCHEMA)
override def simpleString(maxFields: Int): String = {
s"RelationV2${truncatedString(output, "[", ", ", "]", maxFields)} $name"
}
override def computeStats(): Statistics = {
if (Utils.isTesting) {
// when testing, throw an exception if this computeStats method is called because stats should
// not be accessed before pushing the projection and filters to create a scan. otherwise, the
// stats are not accurate because they are based on a full table scan of all columns.
throw new IllegalStateException(
s"BUG: computeStats called before pushdown on DSv2 relation: $name")
} else {
// when not testing, return stats because bad stats are better than failing a query
table.asReadable.newScanBuilder(options) match {
case r: SupportsReportStatistics =>
val statistics = r.estimateStatistics()
DataSourceV2Relation.transformV2Stats(statistics, None, conf.defaultSizeInBytes)
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
}
}
override def newInstance(): DataSourceV2Relation = {
copy(output = output.map(_.newInstance()))
}
def withMetadataColumns(): DataSourceV2Relation = {
if (metadataOutput.nonEmpty) {
DataSourceV2Relation(table, output ++ metadataOutput, catalog, identifier, options)
} else {
this
}
}
}
/**
* A logical plan for a DSv2 table with a scan already created.
*
* This is used in the optimizer to push filters and projection down before conversion to physical
* plan. This ensures that the stats that are used by the optimizer account for the filters and
* projection that will be pushed down.
*
* @param relation a [[DataSourceV2Relation]]
* @param scan a DSv2 [[Scan]]
* @param output the output attributes of this relation
*/
case class DataSourceV2ScanRelation(
relation: DataSourceV2Relation,
scan: Scan,
output: Seq[AttributeReference]) extends LeafNode with NamedRelation {
override def name: String = relation.table.name()
override def simpleString(maxFields: Int): String = {
s"RelationV2${truncatedString(output, "[", ", ", "]", maxFields)} $name"
}
override def computeStats(): Statistics = {
scan match {
case r: SupportsReportStatistics =>
val statistics = r.estimateStatistics()
DataSourceV2Relation.transformV2Stats(statistics, None, conf.defaultSizeInBytes)
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
}
}
/**
* A specialization of [[DataSourceV2Relation]] with the streaming bit set to true.
*
* Note that, this plan has a mutable reader, so Spark won't apply operator push-down for this plan,
* to avoid making the plan mutable. We should consolidate this plan and [[DataSourceV2Relation]]
* after we figure out how to apply operator push-down for streaming data sources.
*/
case class StreamingDataSourceV2Relation(
output: Seq[Attribute],
scan: Scan,
stream: SparkDataStream,
startOffset: Option[Offset] = None,
endOffset: Option[Offset] = None)
extends LeafNode with MultiInstanceRelation {
override def isStreaming: Boolean = true
override def newInstance(): LogicalPlan = copy(output = output.map(_.newInstance()))
override def computeStats(): Statistics = scan match {
case r: SupportsReportStatistics =>
val statistics = r.estimateStatistics()
DataSourceV2Relation.transformV2Stats(statistics, None, conf.defaultSizeInBytes)
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
}
object DataSourceV2Relation {
def create(
table: Table,
catalog: Option[CatalogPlugin],
identifier: Option[Identifier],
options: CaseInsensitiveStringMap): DataSourceV2Relation = {
// The v2 source may return schema containing char/varchar type. We replace char/varchar
// with "annotated" string type here as the query engine doesn't support char/varchar yet.
val schema = CharVarcharUtils.replaceCharVarcharWithStringInSchema(table.schema)
DataSourceV2Relation(table, schema.toAttributes, catalog, identifier, options)
}
def create(
table: Table,
catalog: Option[CatalogPlugin],
identifier: Option[Identifier]): DataSourceV2Relation =
create(table, catalog, identifier, CaseInsensitiveStringMap.empty)
/**
* This is used to transform data source v2 statistics to logical.Statistics.
*/
def transformV2Stats(
v2Statistics: V2Statistics,
defaultRowCount: Option[BigInt],
defaultSizeInBytes: Long): Statistics = {
val numRows: Option[BigInt] = if (v2Statistics.numRows().isPresent) {
Some(v2Statistics.numRows().getAsLong)
} else {
defaultRowCount
}
Statistics(
sizeInBytes = v2Statistics.sizeInBytes().orElse(defaultSizeInBytes),
rowCount = numRows)
}
}
| ueshin/apache-spark | sql/catalyst/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Relation.scala | Scala | apache-2.0 | 8,229 |
package stormlantern.consul.client.dao
//[
// {
// "Node": "foobar",
// "Address": "10.1.10.12",
// "ServiceID": "redis",
// "ServiceName": "redis",
// "ServiceTags": null,
// "ServiceAddress": "",
// "ServicePort": 8000
// }
//]
case class ServiceInstance(
node: String,
address: String,
serviceId: String,
serviceName: String,
serviceTags: Set[String],
serviceAddress: String,
servicePort: Int
)
case class IndexedServiceInstances(index: Long, resource: Set[ServiceInstance]) extends Indexed[Set[ServiceInstance]] {
def filterForTags(tags: Set[String]): IndexedServiceInstances = {
this.copy(resource = resource.filter { s ⇒
tags.forall(s.serviceTags.contains)
})
}
}
object IndexedServiceInstances {
def empty = IndexedServiceInstances(0, Set.empty)
}
| dlouwers/reactive-consul | client/src/main/scala/stormlantern/consul/client/dao/IndexedServiceInstances.scala | Scala | mit | 818 |
import leon.Utils._
import leon.Annotations._
/*
Add the missing postcondition.
*/
object SearchLinkedList {
sealed abstract class List
case class Cons(head : BigInt, tail : List) extends List
case class Nil() extends List
def size(list : List) : BigInt = (list match {
case Nil() => 0
case Cons(_, xs) => 1 + size(xs)
}) ensuring(_ >= 0)
def contains(list : List, elem : BigInt) : Boolean = (list match {
case Nil() => false
case Cons(x, xs) => x == elem || contains(xs, elem)
})
def firstZero(list : List) : BigInt = (list match {
case Nil() => 0
case Cons(x, xs) => if (x == 0) 0 else firstZero(xs) + 1
}) /*ensuring (res => )*/
def firstZeroAtPos(list : List, pos : BigInt) : Boolean = {
list match {
case Nil() => false
case Cons(x, xs) => if (pos == 0) x == 0 else x != 0 && firstZeroAtPos(xs, pos - 1)
}
}
def goal(list : List, i : BigInt) : Boolean = {
if(firstZero(list) == i) {
if(contains(list, 0)) {
firstZeroAtPos(list, i)
} else {
i == size(list)
}
} else {
true
}
} holds
}
| epfl-lara/leon | testcases/graveyard/tutorials/11_Ex10_SearchLinkedList.scala | Scala | gpl-3.0 | 1,122 |
/*
* Ported from https://github.com/junit-team/junit
*/
package org.junit.internal
import org.junit.Assert
class InexactComparisonCriteria private(val fDelta: AnyRef)
extends ComparisonCriteria {
def this(delta: Double) =
this(delta.asInstanceOf[AnyRef])
def this(delta: Float) =
this(delta.asInstanceOf[AnyRef])
override protected def assertElementsEqual(expected: AnyRef,
actual: AnyRef): Unit = {
expected match {
case expected: java.lang.Double =>
Assert.assertEquals(expected, actual.asInstanceOf[Double],
fDelta.asInstanceOf[Double])
case _ =>
Assert.assertEquals(expected.asInstanceOf[Float],
actual.asInstanceOf[Float], fDelta.asInstanceOf[Float])
}
}
}
| nicolasstucki/scala-js-junit | runtime/src/main/scala/org/junit/internal/InexactComparisonCriteria.scala | Scala | bsd-3-clause | 756 |
package models;
import java.util.UUID
case class YoSubscriber(
id: UUID,
username: String
) | sne11ius/playlog | app/models/YoSubscriber.scala | Scala | gpl-3.0 | 101 |
package com.perevillega.sesms
/**
*
* User: pvillega
*/
class Esms {
}
| pvillega/S-ESMS | src/main/scala/com/perevillega/sesms/Esms.scala | Scala | lgpl-3.0 | 76 |
package com.twitter.util
import org.openjdk.jmh.annotations._
// ./sbt 'project util-benchmark' 'jmh:run DurationBenchmark'
@State(Scope.Benchmark)
class DurationBenchmark extends StdBenchAnnotations {
private[this] val d1 = Duration.fromNanoseconds(1)
private[this] val d2 = Duration.fromNanoseconds(2)
private[this] val d3 = Duration.fromNanoseconds(1234567890L)
private[this] val d4 = Duration.fromNanoseconds(9876543210L)
private[this] val d5 = Duration.fromNanoseconds(Long.MaxValue - 10)
@OperationsPerInvocation(7)
@Benchmark
def durationEquals: Boolean = {
d1 == Duration.Top &
d1 == Duration.Bottom &
d1 == Duration.Undefined &
d1 == d2 &
Duration.Top == Duration.Top &
Duration.Top == Duration.Bottom &
Duration.Top == Duration.Undefined
}
@Benchmark
def durationMultiplyLong: Duration = d3 * 123456L
@Benchmark
def durationMultiplyLongOverflow: Duration = d3 * Long.MaxValue
@Benchmark
def durationAddDelta: Duration = d3 + d4
@Benchmark
def durationAddDeltaOverflow: Duration = d3 + d5
@Benchmark
def durationMod: Duration = d4 % d3
@Benchmark
def durationFloor: Duration = d4.floor(d3)
}
| twitter/util | util-benchmark/src/main/scala/com/twitter/util/DurationBenchmark.scala | Scala | apache-2.0 | 1,193 |
/*
* Copyright 2015 Roberto Tyley
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.madgag.scalagithub.model
import play.api.libs.json.Json
/*
{
"url": "https://api.github.com/repos/octocat/Hello-World/labels/bug",
"name": "bug",
"color": "f29513"
}
*/
case class Label(
url: String,
name: String,
color: String
) extends Deleteable // https://developer.github.com/v3/issues/labels/#delete-a-label
object Label {
implicit val readsLabel = Json.reads[Label]
} | rtyley/play-git-hub | src/main/scala/com/madgag/scalagithub/model/Label.scala | Scala | gpl-3.0 | 1,003 |
package models
case class InputTopListEntry(name: String, count: Int)
| MeiSign/Fillable | app/models/InputTopListEntry.scala | Scala | apache-2.0 | 72 |
/*
* Distributed as part of Scalala, a linear algebra library.
*
* Copyright (C) 2008- Daniel Ramage
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 USA
*/
package scalala;
package generic;
package math;
import collection.CanMapValues;
import scalala.operators.{UnaryOp}
import scalala.scalar.Complex
import scalala.tensor.{Matrix, Vector}
/**
* Operator type for exp(A).
*
* @author dramage
*/
trait OpExp extends operators.OpType;
object OpExp extends OpExp;
/**
* Constructiond delegate for exp(A).
*
* @author dramage
*/
trait CanExp[A,+RV] extends UnaryOp[A,OpExp,RV] {
def opType = OpExp;
}
object CanExp {
implicit object OpI extends CanExp[Int,Double] {
def apply(v : Int) = scala.math.exp(v);
}
implicit object OpL extends CanExp[Long,Double] {
def apply(v : Long) = scala.math.exp(v);
}
implicit object OpF extends CanExp[Float,Double] {
def apply(v : Float) = scala.math.exp(v);
}
implicit object OpD extends CanExp[Double,Double] {
def apply(v : Double) = scala.math.exp(v);
}
implicit object OpC extends CanExp[Complex,Complex] {
def apply(v: Complex) = Complex(scala.math.cos(v.imag), scala.math.sin(v.imag)) * scala.math.exp(v.real)
}
class OpMapValues[From,A,B,To](implicit op : CanExp[A,B], map : CanMapValues[From,A,B,To]) extends CanExp[From,To] {
def apply(v : From) = map.map(v, op.apply(_));
}
implicit def opMapValues[From,A,B,To](implicit map : CanMapValues[From,A,B,To], op : CanExp[A,B])
: CanExp[From,To] = new OpMapValues[From,A,B,To]()(op, map);
implicit object OpArrayI extends OpMapValues[Array[Int],Int,Double,Array[Double]]()(OpI,CanMapValues.OpArrayID);
implicit object OpArrayL extends OpMapValues[Array[Long],Long,Double,Array[Double]]()(OpL,CanMapValues.OpArrayLD);
implicit object OpArrayF extends OpMapValues[Array[Float],Float,Double,Array[Double]]()(OpF,CanMapValues.OpArrayFD);
implicit object OpArrayD extends OpMapValues[Array[Double],Double,Double,Array[Double]]()(OpD,CanMapValues.OpArrayDD);
implicit object OpArrayC extends OpMapValues[Array[Complex],Complex,Complex,Array[Complex]]()(OpC,CanMapValues.OpArrayCC);
implicit object OpVectorI extends OpMapValues[Vector[Int],Int,Double,Vector[Double]]()
implicit object OpVectorL extends OpMapValues[Vector[Long],Long,Double,Vector[Double]]()
implicit object OpVectorF extends OpMapValues[Vector[Float],Float,Double,Vector[Double]]()
implicit object OpVectorD extends OpMapValues[Vector[Double],Double,Double,Vector[Double]]()
implicit object OpMatrixI extends OpMapValues[Matrix[Int],Int,Double,Matrix[Double]]()
implicit object OpMatrixL extends OpMapValues[Matrix[Long],Long,Double,Matrix[Double]]()
implicit object OpMatrixF extends OpMapValues[Matrix[Float],Float,Double,Matrix[Double]]()
implicit object OpMatrixD extends OpMapValues[Matrix[Double],Double,Double,Matrix[Double]]()
}
| scalala/Scalala | src/main/scala/scalala/generic/math/CanExp.scala | Scala | lgpl-2.1 | 3,577 |
trait C[A]
object Test {
def ImplicitParamCA[CC[A], A](implicit ev: C[A]) {implicitly[C[A]]} // must use this exact syntax...
// error: could not find implicit value for parameter e: C[A]
}
// [[syntax trees at end of typer]]
// abstract trait C#5[A#9116 >: Nothing#5832 <: Any#52] extends scala#33.AnyRef#2780;
// final object Test#15 extends java.lang.Object#2485 with ScalaObject#1913 {
// def ImplicitParamCA#9123[CC#9124[A#10858 >: Nothing#5832 <: Any#52] >: [A#10858]Nothing#5832 <: [A#10858]Any#52,
// A#9125 >: Nothing#5832 <: Any#52](implicit ev#10856: C#5[A#9127]): Unit#3818
// = scala#34.this.Predef#1683.implicitly#8816[C#5[A#10858]]()
// }
| loskutov/intellij-scala | testdata/scalacTests/pos/t3582.scala | Scala | apache-2.0 | 695 |
package edu.gemini.spModel.dataset
import edu.gemini.spModel.core.catchingNonFatal
import edu.gemini.spModel.pio.xml.PioXmlFactory
import edu.gemini.spModel.pio.{PioParseException, ParamSet, Param}
import edu.gemini.spModel.pio.codec._
import java.time.temporal.{TemporalAccessor, TemporalQuery}
import java.time.{ZoneId, Instant}
import java.time.format.DateTimeFormatter
import java.util.UUID
import scalaz._, Scalaz._
object DatasetCodecs {
def explainPioError(e: PioError): String = e match {
case MissingKey(n) => s"Missing key $n"
case NullValue(n) => s"Null value for key $n"
case ParseError(n, v, d) => s"Problem parsing $n, of type $d: $v"
case UnknownTag(t, d) => s"Encountered unknown tag $t while parsing a $d"
case GeneralError(d) => s"Problem while parsing a $d"
}
/** Decodes a param set to the expected type or else throws a
* `PioParseException`. This is sometimes useful when working with pre-codec
* PIO code.
*/
def unsafeDecode[A](ps: ParamSet)(implicit psc: ParamSetCodec[A]): A =
psc.decode(ps).valueOr(e => throw new PioParseException(explainPioError(e)))
implicit val ParamSetCodecDataset: ParamSetCodec[Dataset] =
new ParamSetCodec[Dataset] {
val pf = new PioXmlFactory
def encode(key: String, a: Dataset): ParamSet =
a.toParamSet(pf) <| (_.setName(key))
def decode(ps: ParamSet): PioError \\/ Dataset =
catchingNonFatal {
new Dataset(ps)
}.leftMap(ex => ParseError(ps.getName, ex.getMessage, "Dataset"))
}
implicit val ParamCodecDatasetQaState: ParamCodec[DatasetQaState] =
new ParamCodec[DatasetQaState] {
def encode(key: String, a: DatasetQaState): Param =
ParamCodec[String].encode(key, a.name)
def decode(p: Param): PioError \\/ DatasetQaState =
ParamCodec[String].decode(p).flatMap { s =>
Option(DatasetQaState.parseType(s)) \\/> ParseError(p.getName, s, "DatasetQaState")
}
}
val ParamCodecInstant: ParamCodec[Instant] =
new ParamCodec[Instant] {
val dtf = DateTimeFormatter.ISO_INSTANT.withZone(ZoneId.of("Z"))
def encode(key: String, a: Instant): Param =
ParamCodec[String].encode(key, dtf.format(a))
def decode(p: Param): PioError \\/ Instant =
ParamCodec[String].decode(p).flatMap { s =>
catchingNonFatal {
dtf.parse(s, new TemporalQuery[Instant]() {
override def queryFrom(ta: TemporalAccessor): Instant = Instant.from(ta)
})
}.leftMap(_ => ParseError(p.getName, s, "Instant"))
}
}
val ParamCodecUuid: ParamCodec[UUID] =
new ParamCodec[UUID] {
def encode(key: String, a: UUID): Param =
ParamCodec[String].encode(key, a.toString)
def decode(p: Param): PioError \\/ UUID =
ParamCodec[String].decode(p).flatMap { s =>
catchingNonFatal {
UUID.fromString(s)
}.leftMap(_ => ParseError(p.getName, s, "UUID"))
}
}
}
| arturog8m/ocs | bundle/edu.gemini.pot/src/main/scala/edu/gemini/spModel/dataset/DatasetCodecs.scala | Scala | bsd-3-clause | 3,048 |
package tryp
package state
package core
import shapeless.{HList, Coproduct}
import cats.Functor
import cats.data.WriterT
import org.log4s.Logger
trait TransResultElem
trait Sendable
extends TransResultElem
sealed trait LoopSendable
extends TransResultElem
trait Message
extends Sendable
with LoopSendable
object Message
{
def unapply(e: TransResultElem): Option[Message] = {
e match { case m: Message => Some(m) case _ => None }
}
}
case object NopMessage
extends Message
trait InternalMessage
extends Message
trait ControlMessage
extends InternalMessage
trait LocalMessage
extends Sendable
case class Local[A <: Message](msg: A)
extends LocalMessage
final class MessageOps[A <: Message](val self: A)
extends AnyVal
{
def broadcast = Broadcast(self)
def local = Local(self)
}
trait ToMessageOps
{
implicit def ToMessageOps[A <: Message](x: A): MessageOps[A] = new MessageOps(x)
}
trait StateIO
extends LoopSendable
object StateIO
{
def apply[A](action: A): StateIO = SimpleIO(action)
def unapply(e: TransResultElem): Option[StateIO] = {
e match { case m: StateIO => Some(m) case _ => None }
}
}
case class SimpleIO[A](action: A)
extends StateIO
case class IOStateIO(task: IO[LoopSendable], desc: String)
extends StateIO
case class IOAsyncStateIO(task: IO[LoopSendable], desc: String)
extends StateIO
object NopIO
extends StateIO
trait ControlIO
extends StateIO
{
def msg: ControlMessage
}
case class SimpleControlIO(msg: ControlMessage)
extends ControlIO
object ControlIO
{
def apply(msg: ControlMessage): ControlIO = SimpleControlIO(msg)
def unapply(io: ControlIO) = Some(io.msg)
}
object ExitLoop
extends ControlIO
{
def msg = Exit
}
case class Broadcast(msg: Message)
extends StateIO
case class LogError(msg: String)
extends StateIO
sealed trait Trans[R]
extends Logging
{
def name: String
def repr: String
def lift(s: CState): Option[R]
def liftW(s: CState): CellWO[R] = WriterT.value(lift(s))
override def toString = repr
def success(r: Option[R], s: CState): Option[R] =
r.tap(Trans.logState(_, s, repr, name))
def flatMapO[R1](f: R => Option[R1]): Trans[R1] =
Trans.flatMapO(this)(f)
}
object Trans
extends TransInstances
{
case class Strict[R](result: R, repr: String, name: String)
extends Trans[R]
{
def lift(s: CState) = success(Some(result), s)
}
case class Opt[R](result: Option[R], repr: String, name: String)
extends Trans[R]
{
def lift(s: CState): Option[R] = success(result, s)
}
case class Partial[R](trans: CState => Option[R], repr: String, name: String)
extends Trans[R]
{
def lift(s: CState): Option[R] = success(trans(s), s)
}
case class Terminal[R](name: String)
extends Trans[R]
{
def repr = "<terminal>"
def lift(s: CState) = None
}
def logState[R](r: R, s: CState, repr: String, name: String)(implicit log: Logger) =
r match {
case Some(_) =>
log.debug(s"${"✓".green} ${s.yellowString} @ ${name.blue}")
case _ =>
log.debug(s"${"✗".red} ${s.yellowString} for ${repr.blue} @ ${name.blue}")
}
def terminal[R](name: String): Trans[R] = Terminal(name)
def partial[R](repr: String, name: String)(pf: PartialFunction[CState, R]) = Partial(pf.lift, repr, name)
def flatMapO[A, B](fa: Trans[A])(f: A => Option[B]): Trans[B] =
fa match {
case Strict(r, repr, name) => Opt(f(r), repr, name)
case x @ Opt(r, _, _) => x.copy(result = r.flatMap(f))
case x @ Partial(trans, _, _) => x.copy(trans andThen (r => r.flatMap(f)))
case Terminal(n) => Terminal(n)
}
}
trait TransInstances
{
import Trans._
implicit val Functor_Trans: Functor[Trans] =
new Functor[Trans] {
def map[A, B](fa: Trans[A])(f: A => B) =
fa match {
case x @ Strict(a, _, _) => x.copy(result = f(a))
case x @ Opt(a, _, _) => x.copy(result = a.map(f))
case x @ Partial(trans, _, _) => x.copy(trans = trans andThen (a => a.map(f)))
case Terminal(n) => Terminal(n)
}
}
}
final class LoopSendableTaskOps[A <: LoopSendable](val self: IO[A])
extends AnyVal
{
def io(desc: String): IOStateIO = IOStateIO(self, desc)
def forkIO(desc: String): IOAsyncStateIO = IOAsyncStateIO(self, desc)
}
trait ToLoopSendableTaskOps
{
implicit def ToLoopSendableTaskOps[A <: LoopSendable](x: IO[A]): LoopSendableTaskOps[A] = new LoopSendableTaskOps(x)
}
| tek/pulsar | state-core/src/data.scala | Scala | mit | 4,425 |
package arena.game.gameoflife
import scala.swing._
import scala.swing.event._
object SwingMain extends SimpleSwingApplication {
def top = new MainFrame {
title = "Conway's Game of Life"
val startStopButton = new Button("Stop")
val fasterButton = new Button("+")
val slowerButton = new Button("-")
val randomButton = new Button("Random")
val gameGrid = new GameGrid
contents = new BorderPanel() {
import BorderPanel._
add(new FlowPanel {
contents += startStopButton
contents += fasterButton
contents += slowerButton
contents += randomButton
}, Position.North)
add(gameGrid, Position.Center)
border = Swing.EmptyBorder(10, 10, 10, 10)
}
gameGrid._board = gameGrid._board.evolve()
listenTo(startStopButton)
listenTo(randomButton)
listenTo(fasterButton)
listenTo(slowerButton)
listenTo(gameGrid)
reactions += {
case WindowOpened(_) => gameGrid.start
case WindowClosed(_) => gameGrid ! Exit
case ButtonClicked(`randomButton`) => gameGrid ! Randomize
case ButtonClicked(`fasterButton`) => gameGrid ! AdjustSpeed(50)
case ButtonClicked(`slowerButton`) => gameGrid ! AdjustSpeed(-50)
case ButtonClicked(`startStopButton`) => {
startStopButton.text match {
case "Stop" => gameGrid ! SetUpdating(false); startStopButton.text = "Start"
case _ => gameGrid ! SetUpdating(true); startStopButton.text = "Stop"
}
}
case UIElementResized(`gameGrid`) => {
val gWidth = gameGrid.size.width;
val gHeight = gameGrid.size.height;
// Adjust size if the gameGrid is not square
if (gWidth < gHeight) {
self.setSize(size.width, size.height - gHeight + gWidth)
} else if (gameGrid.size.width > gameGrid.size.height) {
self.setSize(size.width - gWidth + gHeight, size.height)
}
}
}
}
}
| quedexco/arena-scala | src/main/scala/arena/game/gameoflife/SwingMain.scala | Scala | apache-2.0 | 1,959 |
package text.parser
import text.normalizer._
import text.{StringNone, StringOption, StringSome}
import util.Config
import util.primitive._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.io.Source
import scala.util.matching.Regex
import scala.util.matching.Regex.Match
/**
* @author ynupc
* Created on 2016/02/21
*/
object SentenceQuotationParser {
private type Quotation = (String, String)
private type QuotationSentence = (Quotation, QuotedSentence)
private final val quotations: Seq[Quotation] = {
val buffer = ListBuffer.empty[Quotation]
Source.fromFile(
Config.resourceFile("parser", "quotation.csv").toFile
).getLines foreach {
line: String =>
val quotations: Array[String] = line.trim.split(',')
if (2 <= quotations.length) {
val headOpt: StringOption = StringOption(quotations.head.trim)
val lastOpt: StringOption = StringOption(quotations.last.trim)
if (headOpt.nonEmpty && lastOpt.nonEmpty) {
buffer += ((headOpt.get, lastOpt.get))
}
}
}
buffer.result
}
private def getUnContainedNoun(text: String, nouns: Seq[String]): String = {
nouns foreach {
noun: String =>
if (!text.contains(noun)) {
return noun
}
}
throw new NoSuchElementException("SentenceQuotationParser.getUnContainedNoun")
}
def parse(sentenceOpt: StringOption): Option[QuotedSentence] = {
sentenceOpt match {
case StringSome(sentence) =>
Option(parse(sentence, ListBuffer.empty[String] ++ EscapeNoun.objects))
case StringNone =>
None
}
}
private def getFirstMatchOpt(sentence: String): Option[(Quotation, Range)] = {
val quotationRangeBuffer = ListBuffer.empty[(Quotation, Range)]
var firstMatchOpt: Option[(Quotation, Range)] = None
quotations foreach {
quotation: Quotation =>
val regex: Regex = ".+".quote(quotation).r
regex.findFirstMatchIn(sentence) foreach {
m: Match =>
quotationRangeBuffer += ((quotation, Range(m.start, m.end)))
}
}
if (quotationRangeBuffer.nonEmpty) {
quotationRangeBuffer foreach {
case (quotation, range) =>
if (firstMatchOpt.isEmpty || (range.start < firstMatchOpt.get._2.start)) {
firstMatchOpt = Some((quotation, range))
}
case _ =>
//Do nothing
}
}
firstMatchOpt
}
private def parse(sentence: String, nouns: ListBuffer[String]): QuotedSentence = {
var parentSentence: String = sentence
val childrenSentences = mutable.Map.empty[String, QuotationSentence]
var firstMatchOpt: Option[(Quotation, Range)] = getFirstMatchOpt(sentence)
while (firstMatchOpt.nonEmpty) {
val (quotation, range): (Quotation, Range) = firstMatchOpt.get
val start: Int = range.start
val end: Int = range.end
val prefix: String = parentSentence.substring(0, start)
val quotedPart: String = parentSentence.substring(start, end)
val suffix: String = parentSentence.substring(end)
val noun: String = getUnContainedNoun(parentSentence, nouns)
nouns -= noun
parentSentence = noun.quote((prefix, suffix))
childrenSentences.put(noun, (quotation, parse(quotedPart, nouns)))
firstMatchOpt = getFirstMatchOpt(sentence)
}
new QuotedSentence(parentSentence, childrenSentences.toMap)
}
class QuotedSentence(val parentSentence: String,
val childrenSentences: Map[String, QuotationSentence]) {
override def toString: String = {
var text: String = parentSentence
childrenSentences foreach {
case (replacement, (quotation, sentence)) =>
text = text.replaceAllLiteratim(
replacement,
sentence.toString.quote(quotation))
case _ =>
//Do nothing
}
text
}
}
def splitAndQuotationParseJapaneseText(textOpt: StringOption): Seq[NormalizedQuotedSentence] = {
val buffer = ListBuffer.empty[NormalizedQuotedSentence]
JapaneseSentenceSplitter.split(textOpt) foreach {
sentence =>
parse(StringOption(sentence.text)) match {
case Some(quotedSentence) =>
buffer += new NormalizedQuotedSentence(sentence.originalText, quotedSentence)
case None =>
//Do nothing
}
}
buffer.result
}
class NormalizedQuotedSentence(val originalText: String,
val quotedSentence: QuotedSentence) {
override def toString: String = quotedSentence.toString
}
}
| ynupc/scalastringcourseday7 | src/main/scala/text/parser/SentenceQuotationParser.scala | Scala | apache-2.0 | 4,674 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.utils
import org.apache.flink.table.functions.TableAggregateFunction
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import java.lang.{Integer => JInt}
import java.lang.{Iterable => JIterable}
import java.sql.Timestamp
import java.util
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.dataview.MapView
import org.apache.flink.table.functions.TableAggregateFunction.RetractableCollector
import org.apache.flink.util.Collector
import scala.collection.mutable.ListBuffer
class Top3Accum {
var data: util.HashMap[JInt, JInt] = _
var size: JInt = _
var smallest: JInt = _
}
/**
* Note: This function suffers performance problem. Only use it in tests.
*/
class Top3 extends TableAggregateFunction[JTuple2[JInt, JInt], Top3Accum] {
override def createAccumulator(): Top3Accum = {
val acc = new Top3Accum
acc.data = new util.HashMap[JInt, JInt]()
acc.size = 0
acc.smallest = Integer.MAX_VALUE
acc
}
def add(acc: Top3Accum, v: Int): Unit = {
var cnt = acc.data.get(v)
acc.size += 1
if (cnt == null) {
cnt = 0
}
acc.data.put(v, cnt + 1)
}
def delete(acc: Top3Accum, v: Int): Unit = {
if (acc.data.containsKey(v)) {
acc.size -= 1
val cnt = acc.data.get(v) - 1
if (cnt == 0) {
acc.data.remove(v)
} else {
acc.data.put(v, cnt)
}
}
}
def updateSmallest(acc: Top3Accum): Unit = {
acc.smallest = Integer.MAX_VALUE
val keys = acc.data.keySet().iterator()
while (keys.hasNext) {
val key = keys.next()
if (key < acc.smallest) {
acc.smallest = key
}
}
}
def accumulate(acc: Top3Accum, v: Int) {
if (acc.size == 0) {
acc.size = 1
acc.smallest = v
acc.data.put(v, 1)
} else if (acc.size < 3) {
add(acc, v)
if (v < acc.smallest) {
acc.smallest = v
}
} else if (v > acc.smallest) {
delete(acc, acc.smallest)
add(acc, v)
updateSmallest(acc)
}
}
def merge(acc: Top3Accum, its: JIterable[Top3Accum]): Unit = {
val iter = its.iterator()
while (iter.hasNext) {
val map = iter.next().data
val mapIter = map.entrySet().iterator()
while (mapIter.hasNext) {
val entry = mapIter.next()
for (_ <- 0 until entry.getValue) {
accumulate(acc, entry.getKey)
}
}
}
}
def emitValue(acc: Top3Accum, out: Collector[JTuple2[JInt, JInt]]): Unit = {
val entries = acc.data.entrySet().iterator()
while (entries.hasNext) {
val pair = entries.next()
for (_ <- 0 until pair.getValue) {
out.collect(JTuple2.of(pair.getKey, pair.getKey))
}
}
}
}
class Top3WithMapViewAccum {
var data: MapView[JInt, JInt] = _
var size: JInt = _
var smallest: JInt = _
}
class Top3WithEmitRetractValue extends Top3 {
val add: ListBuffer[Int] = new ListBuffer[Int]
val retract: ListBuffer[Int] = new ListBuffer[Int]
override def accumulate(acc: Top3Accum, v: Int) {
if (acc.size == 0) {
acc.size = 1
acc.smallest = v
acc.data.put(v, 1)
add.append(v)
} else if (acc.size < 3) {
add(acc, v)
if (v < acc.smallest) {
acc.smallest = v
}
add.append(v)
} else if (v > acc.smallest) {
delete(acc, acc.smallest)
retract.append(acc.smallest)
add(acc, v)
add.append(v)
updateSmallest(acc)
}
}
def emitUpdateWithRetract(
acc: Top3Accum,
out: RetractableCollector[JTuple2[JInt, JInt]])
: Unit = {
retract.foreach(e => out.retract(JTuple2.of(e, e)))
add.foreach(e => out.collect(JTuple2.of(e, e)))
retract.clear()
add.clear()
}
}
/**
* Note: This function suffers performance problem. Only use it in tests.
*/
class Top3WithMapView extends TableAggregateFunction[JTuple2[JInt, JInt], Top3WithMapViewAccum] {
@Override
def createAccumulator(): Top3WithMapViewAccum = {
val acc = new Top3WithMapViewAccum
acc.data = new MapView(Types.INT, Types.INT)
acc.size = 0
acc.smallest = Integer.MAX_VALUE
acc
}
def add(acc: Top3WithMapViewAccum, v: Int): Unit = {
var cnt = acc.data.get(v)
acc.size += 1
if (cnt == null) {
cnt = 0
}
acc.data.put(v, cnt + 1)
}
def delete(acc: Top3WithMapViewAccum, v: Int): Unit = {
if (acc.data.contains(v)) {
acc.size -= 1
val cnt = acc.data.get(v) - 1
if (cnt == 0) {
acc.data.remove(v)
} else {
acc.data.put(v, cnt)
}
}
}
def updateSmallest(acc: Top3WithMapViewAccum): Unit = {
acc.smallest = Integer.MAX_VALUE
val keys = acc.data.iterator
while (keys.hasNext) {
val pair = keys.next()
if (pair.getKey < acc.smallest) {
acc.smallest = pair.getKey
}
}
}
def accumulate(acc: Top3WithMapViewAccum, v: Int) {
if (acc.size == 0) {
acc.size = 1
acc.smallest = v
acc.data.put(v, 1)
} else if (acc.size < 3) {
add(acc, v)
if (v < acc.smallest) {
acc.smallest = v
}
} else if (v > acc.smallest) {
delete(acc, acc.smallest)
add(acc, v)
updateSmallest(acc)
}
}
def retract(acc: Top3WithMapViewAccum, v: Int) {
delete(acc, v)
updateSmallest(acc)
}
def emitValue(acc: Top3WithMapViewAccum, out: Collector[JTuple2[JInt, JInt]]): Unit = {
val keys = acc.data.iterator
while (keys.hasNext) {
val pair = keys.next()
for (_ <- 0 until pair.getValue) {
out.collect(JTuple2.of(pair.getKey, pair.getKey))
}
}
}
}
/**
* Test function for plan test.
*/
class EmptyTableAggFuncWithoutEmit extends TableAggregateFunction[JTuple2[JInt, JInt], Top3Accum] {
override def createAccumulator(): Top3Accum = new Top3Accum
def accumulate(acc: Top3Accum, category: Long, value: Timestamp): Unit = {}
def accumulate(acc: Top3Accum, category: Long, value: Int): Unit = {}
def accumulate(acc: Top3Accum, value: Int): Unit = {}
}
class EmptyTableAggFunc extends EmptyTableAggFuncWithoutEmit {
def emitValue(acc: Top3Accum, out: Collector[JTuple2[JInt, JInt]]): Unit = {}
}
class EmptyTableAggFuncWithIntResultType extends TableAggregateFunction[JInt, Top3Accum] {
override def createAccumulator(): Top3Accum = new Top3Accum
def accumulate(acc: Top3Accum, value: Int): Unit = {}
def emitValue(acc: Top3Accum, out: Collector[JInt]): Unit = {}
}
| jinglining/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/utils/UserDefinedTableAggFunctions.scala | Scala | apache-2.0 | 7,292 |
package mesosphere.marathon
import org.rogach.scallop.ScallopConf
import java.net.InetSocketAddress
import mesosphere.util.BackToTheFuture
import scala.concurrent.duration._
trait ZookeeperConf extends ScallopConf {
private val userAndPass = """[^/@]+"""
private val hostAndPort = """[A-z0-9-.]+(?::\\d+)?"""
private val zkNode = """[^/]+"""
private val zkURLPattern = s"""^zk://(?:$userAndPass@)?($hostAndPort(?:,$hostAndPort)*)(/$zkNode(?:/$zkNode)*)$$""".r
@Deprecated
lazy val zooKeeperHostString = opt[String]("zk_hosts",
descr = "[DEPRECATED use zk] The list of ZooKeeper servers for storing state",
default = Some("localhost:2181"))
@Deprecated
lazy val zooKeeperPath = opt[String]("zk_state",
descr = "[DEPRECATED use zk] Path in ZooKeeper for storing state",
default = Some("/marathon"))
lazy val zooKeeperTimeout = opt[Long]("zk_timeout",
descr = "The timeout for ZooKeeper in milliseconds",
default = Some(10000L))
lazy val zooKeeperUrl = opt[String]("zk",
descr = "ZooKeeper URL for storing state. Format: zk://host1:port1,host2:port2,.../path",
validate = (in) => zkURLPattern.pattern.matcher(in).matches()
)
lazy val zooKeeperMaxVersions = opt[Int]("zk_max_versions",
descr = "Limit the number of versions, stored for one entity.",
default = Some(25)
)
//do not allow mixing of hostState and url
conflicts(zooKeeperHostString, List(zooKeeperUrl))
conflicts(zooKeeperPath, List(zooKeeperUrl))
conflicts(zooKeeperUrl, List(zooKeeperHostString, zooKeeperPath))
def zooKeeperStatePath(): String = "%s/state".format(zkPath)
def zooKeeperLeaderPath(): String = "%s/leader".format(zkPath)
def zooKeeperServerSetPath(): String = "%s/apps".format(zkPath)
def zooKeeperHostAddresses: Seq[InetSocketAddress] =
for (s <- zkHosts.split(",")) yield {
val splits = s.split(":")
require(splits.length == 2, "expected host:port for zk servers")
new InetSocketAddress(splits(0), splits(1).toInt)
}
def zkURL(): String = zooKeeperUrl.get.getOrElse(s"zk://${zooKeeperHostString()}${zooKeeperPath()}")
lazy val zkHosts = zkURL match { case zkURLPattern(server, _) => server }
lazy val zkPath = zkURL match { case zkURLPattern(_, path) => path }
lazy val zkTimeoutDuration = Duration(zooKeeperTimeout(), MILLISECONDS)
lazy val zkFutureTimeout = BackToTheFuture.Timeout(zkTimeoutDuration)
}
| quamilek/marathon | src/main/scala/mesosphere/marathon/ZookeeperConf.scala | Scala | apache-2.0 | 2,412 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
/**
* All doc-comments marked as "MDN" are by Mozilla Contributors,
* distributed under the Creative Commons Attribution-ShareAlike license from
* https://developer.mozilla.org/en-US/docs/Web/Reference/API
*/
package scala.scalajs.js
import scala.language.implicitConversions
import scala.scalajs.js
import scala.collection.mutable
import scala.collection.generic.CanBuildFrom
/** Root of the hierarchy of JavaScript types.
*
* Subtypes of [[Any js.Any]] are JavaScript types, which have different
* semantics and guarantees than Scala types (subtypes of [[AnyRef]] and
* [[AnyVal]]). Operations on JavaScript types behave as the corresponding
* operations in the JavaScript language.
*
* You can implement JavaScript types in Scala.js. The implementation
* (i.e., the method and constructor bodies) will follow Scala semantics, but
* the constructor and methods will be called using JavaScript semantics
* (e.g., runtime dispatch).
*
* A JavaScript type that is annotated with [[native @js.native]] is a facade
* type to APIs implemented in JavaScript code. Its implementation is
* irrelevant and never emitted. As such, all members must be defined with
* their right-hand-side being [[native js.native]].
* Further, native JavaScript types must be annotated with one of
* [[annotation.JSGlobal @JSGlobal]], [[annotation.JSImport @JSImport]],
* [[annotation.JSGlobalScope @JSGlobalScope]] to specify where to fetch it
* from.
*
* In most cases, you should not directly extend this trait, but rather extend
* [[Object js.Object]].
*
* It is not possible to define traits or classes that inherit both from this
* trait and a strict subtype of [[AnyRef]]. In fact, you should think of
* [[Any js.Any]] as a third direct subclass of [[scala.Any]], besides
* [[scala.AnyRef]] and [[scala.AnyVal]].
*
* See the [[http://www.scala-js.org/doc/js-interoperability.html JavaScript
* interoperability guide]] of Scala.js for more details.
*/
trait Any extends scala.AnyRef
/** Provides implicit conversions from Scala values to JavaScript values. */
object Any extends js.LowPrioAnyImplicits {
@inline implicit def fromUnit(value: Unit): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromBoolean(value: Boolean): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromByte(value: Byte): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromShort(value: Short): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromInt(value: Int): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromFloat(value: Float): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromDouble(value: Double): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromString(s: String): js.Any =
s.asInstanceOf[js.Any]
/* The following overload makes sure that the developer does not
* inadvertently convert a Long to a Double to fit it in a js.Any.
*/
@deprecated(
"A Long is converted to Double to be cast to js.Any. " +
"This is almost certainly not what you want. " +
"Use `.toDouble` explicitly if you need it.",
"forever")
@inline
implicit def fromLong(value: Long): js.Any =
value.toDouble.asInstanceOf[js.Any]
implicit def jsArrayOps[A](array: js.Array[A]): js.ArrayOps[A] =
new js.ArrayOps(array)
implicit def canBuildFromArray[A]: CanBuildFrom[Array[_], A, js.Array[A]] = {
@inline
class CanBuildFromArray extends CanBuildFrom[Array[_], A, js.Array[A]] {
def apply(from: js.Array[_]): mutable.Builder[A, js.Array[A]] =
new js.ArrayOps[A]
def apply(): mutable.Builder[A, js.Array[A]] =
new js.ArrayOps[A]
}
new CanBuildFromArray
}
// scalastyle:off line.size.limit
implicit def fromFunction0[R](f: scala.Function0[R]): js.Function0[R] = () => f()
implicit def fromFunction1[T1, R](f: scala.Function1[T1, R]): js.Function1[T1, R] = (x1: T1) => f(x1)
implicit def fromFunction2[T1, T2, R](f: scala.Function2[T1, T2, R]): js.Function2[T1, T2, R] = (x1: T1, x2: T2) => f(x1, x2)
implicit def fromFunction3[T1, T2, T3, R](f: scala.Function3[T1, T2, T3, R]): js.Function3[T1, T2, T3, R] = (x1: T1, x2: T2, x3: T3) => f(x1, x2, x3)
implicit def fromFunction4[T1, T2, T3, T4, R](f: scala.Function4[T1, T2, T3, T4, R]): js.Function4[T1, T2, T3, T4, R] = (x1: T1, x2: T2, x3: T3, x4: T4) => f(x1, x2, x3, x4)
implicit def fromFunction5[T1, T2, T3, T4, T5, R](f: scala.Function5[T1, T2, T3, T4, T5, R]): js.Function5[T1, T2, T3, T4, T5, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5) => f(x1, x2, x3, x4, x5)
implicit def fromFunction6[T1, T2, T3, T4, T5, T6, R](f: scala.Function6[T1, T2, T3, T4, T5, T6, R]): js.Function6[T1, T2, T3, T4, T5, T6, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6) => f(x1, x2, x3, x4, x5, x6)
implicit def fromFunction7[T1, T2, T3, T4, T5, T6, T7, R](f: scala.Function7[T1, T2, T3, T4, T5, T6, T7, R]): js.Function7[T1, T2, T3, T4, T5, T6, T7, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7) => f(x1, x2, x3, x4, x5, x6, x7)
implicit def fromFunction8[T1, T2, T3, T4, T5, T6, T7, T8, R](f: scala.Function8[T1, T2, T3, T4, T5, T6, T7, T8, R]): js.Function8[T1, T2, T3, T4, T5, T6, T7, T8, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8) => f(x1, x2, x3, x4, x5, x6, x7, x8)
implicit def fromFunction9[T1, T2, T3, T4, T5, T6, T7, T8, T9, R](f: scala.Function9[T1, T2, T3, T4, T5, T6, T7, T8, T9, R]): js.Function9[T1, T2, T3, T4, T5, T6, T7, T8, T9, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9)
implicit def fromFunction10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R](f: scala.Function10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R]): js.Function10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10)
implicit def fromFunction11[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R](f: scala.Function11[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R]): js.Function11[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11)
implicit def fromFunction12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R](f: scala.Function12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R]): js.Function12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12)
implicit def fromFunction13[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R](f: scala.Function13[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R]): js.Function13[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13)
implicit def fromFunction14[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R](f: scala.Function14[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R]): js.Function14[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14)
implicit def fromFunction15[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R](f: scala.Function15[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R]): js.Function15[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15)
implicit def fromFunction16[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R](f: scala.Function16[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R]): js.Function16[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)
implicit def fromFunction17[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R](f: scala.Function17[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R]): js.Function17[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17)
implicit def fromFunction18[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R](f: scala.Function18[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R]): js.Function18[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18)
implicit def fromFunction19[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R](f: scala.Function19[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R]): js.Function19[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18, x19: T19) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19)
implicit def fromFunction20[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R](f: scala.Function20[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R]): js.Function20[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18, x19: T19, x20: T20) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20)
implicit def fromFunction21[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R](f: scala.Function21[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R]): js.Function21[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18, x19: T19, x20: T20, x21: T21) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21)
implicit def fromFunction22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R](f: scala.Function22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R]): js.Function22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R] = (x1: T1, x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18, x19: T19, x20: T20, x21: T21, x22: T22) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22)
implicit def toFunction0[R](f: js.Function0[R]): scala.Function0[R] = () => f()
implicit def toFunction1[T1, R](f: js.Function1[T1, R]): scala.Function1[T1, R] = (x1) => f(x1)
implicit def toFunction2[T1, T2, R](f: js.Function2[T1, T2, R]): scala.Function2[T1, T2, R] = (x1, x2) => f(x1, x2)
implicit def toFunction3[T1, T2, T3, R](f: js.Function3[T1, T2, T3, R]): scala.Function3[T1, T2, T3, R] = (x1, x2, x3) => f(x1, x2, x3)
implicit def toFunction4[T1, T2, T3, T4, R](f: js.Function4[T1, T2, T3, T4, R]): scala.Function4[T1, T2, T3, T4, R] = (x1, x2, x3, x4) => f(x1, x2, x3, x4)
implicit def toFunction5[T1, T2, T3, T4, T5, R](f: js.Function5[T1, T2, T3, T4, T5, R]): scala.Function5[T1, T2, T3, T4, T5, R] = (x1, x2, x3, x4, x5) => f(x1, x2, x3, x4, x5)
implicit def toFunction6[T1, T2, T3, T4, T5, T6, R](f: js.Function6[T1, T2, T3, T4, T5, T6, R]): scala.Function6[T1, T2, T3, T4, T5, T6, R] = (x1, x2, x3, x4, x5, x6) => f(x1, x2, x3, x4, x5, x6)
implicit def toFunction7[T1, T2, T3, T4, T5, T6, T7, R](f: js.Function7[T1, T2, T3, T4, T5, T6, T7, R]): scala.Function7[T1, T2, T3, T4, T5, T6, T7, R] = (x1, x2, x3, x4, x5, x6, x7) => f(x1, x2, x3, x4, x5, x6, x7)
implicit def toFunction8[T1, T2, T3, T4, T5, T6, T7, T8, R](f: js.Function8[T1, T2, T3, T4, T5, T6, T7, T8, R]): scala.Function8[T1, T2, T3, T4, T5, T6, T7, T8, R] = (x1, x2, x3, x4, x5, x6, x7, x8) => f(x1, x2, x3, x4, x5, x6, x7, x8)
implicit def toFunction9[T1, T2, T3, T4, T5, T6, T7, T8, T9, R](f: js.Function9[T1, T2, T3, T4, T5, T6, T7, T8, T9, R]): scala.Function9[T1, T2, T3, T4, T5, T6, T7, T8, T9, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9)
implicit def toFunction10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R](f: js.Function10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R]): scala.Function10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10)
implicit def toFunction11[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R](f: js.Function11[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R]): scala.Function11[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11)
implicit def toFunction12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R](f: js.Function12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R]): scala.Function12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12)
implicit def toFunction13[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R](f: js.Function13[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R]): scala.Function13[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13)
implicit def toFunction14[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R](f: js.Function14[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R]): scala.Function14[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14)
implicit def toFunction15[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R](f: js.Function15[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R]): scala.Function15[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15)
implicit def toFunction16[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R](f: js.Function16[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R]): scala.Function16[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)
implicit def toFunction17[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R](f: js.Function17[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R]): scala.Function17[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17)
implicit def toFunction18[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R](f: js.Function18[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R]): scala.Function18[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18)
implicit def toFunction19[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R](f: js.Function19[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R]): scala.Function19[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19)
implicit def toFunction20[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R](f: js.Function20[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R]): scala.Function20[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20)
implicit def toFunction21[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R](f: js.Function21[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R]): scala.Function21[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21)
implicit def toFunction22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R](f: js.Function22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R]): scala.Function22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R] = (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22) => f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22)
// scalastyle:on line.size.limit
@inline implicit def fromJBoolean(value: java.lang.Boolean): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromJByte(value: java.lang.Byte): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromJShort(value: java.lang.Short): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromJInteger(value: java.lang.Integer): js.Any =
value.asInstanceOf[js.Any]
/* The following overload makes sure that the developer does not
* inadvertently convert a Long to a Double to fit it in a js.Any.
*/
@deprecated(
"A Long is converted to Double to be cast to js.Any. " +
"This is almost certainly not what you want. " +
"Use `.toDouble` explicitly if you need it.",
"forever")
@inline
implicit def fromJLong(value: java.lang.Long): js.Any =
if (value eq null) null
else value.doubleValue.asInstanceOf[js.Any]
@inline implicit def fromJFloat(value: java.lang.Float): js.Any =
value.asInstanceOf[js.Any]
@inline implicit def fromJDouble(value: java.lang.Double): js.Any =
value.asInstanceOf[js.Any]
implicit class ObjectCompanionOps private[Any] (
private val self: js.Object.type)
extends AnyVal {
/** Tests whether the specified object `o` has a property `p` on itself or
* in its prototype chain.
*
* This method is the equivalent of `p in o` in JavaScript.
*/
def hasProperty(o: js.Object, p: String): Boolean =
js.special.in(p, o)
/** Returns the names of all the enumerable properties of the specified
* object `o`, including properties in its prototype chain.
*
* This method returns the same set of names that would be enumerated by
* a for-in loop in JavaScript, in the same order.
*
* This method assumes that all keys enumerated by a for-in loop are
* strings. If this is not the case, calling this method is an undefined
* behavior of kind `ClassCastException`. Note that for all *ordinary*
* objects, the ECMAScript 2015 guarantees that this is the case. It might
* be false if `o` is a proxy object or another exotic object.
*
* For ordinary objects, if the underlying implementation guarantees an
* order for for-in loops, then this is guaranteed to be consistent with
* [[js.Object.keys]], in the sense that the list returned by
* [[js.Object.keys]] is a sublist of the list returned by this method
* (not just a subset).
*/
@noinline
def properties(o: js.Any): js.Array[String] = {
/* DO NOT touch this code without double-checking the optimized code.
*
* This implementation is carefully crafted so that the optimizer turns
* the code into a pattern known not to fall off the performance cliffs.
*/
val result = js.Array[scala.Any]()
@inline def appendProp(p: scala.Any): Unit = result.push(p)
js.special.forin(o) { p =>
appendProp(p)
}
result.asInstanceOf[js.Array[String]]
}
}
}
sealed trait LowPrioAnyImplicits extends js.LowestPrioAnyImplicits {
this: js.Any.type =>
implicit def wrapArray[A](array: js.Array[A]): js.WrappedArray[A] =
new js.WrappedArray(array)
implicit def wrapDictionary[A](dict: js.Dictionary[A]): js.WrappedDictionary[A] =
new js.WrappedDictionary(dict)
implicit def wrapSet[A](set: js.Set[A]): js.WrappedSet[A] =
new js.WrappedSet(set)
implicit def wrapMap[K, V](map: js.Map[K, V]): js.WrappedMap[K, V] =
new js.WrappedMap(map)
}
sealed trait LowestPrioAnyImplicits {
this: js.Any.type =>
implicit def iterableOps[A](iterable: js.Iterable[A]): js.IterableOps[A] =
new js.IterableOps(iterable)
}
| scala-js/scala-js | library/src/main/scala-old-collections/scala/scalajs/js/Any.scala | Scala | apache-2.0 | 23,677 |
package com.github.challenge
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.WordSpec
import problem.ProblemInfo
import org.scalatest.concurrent.Conductors
abstract class ProblemTester[A <: problem.Problem] extends WordSpec with ShouldMatchers with Conductors {
type problemAndSolution = (A, String)
def makeDefaultProblems: List[problemAndSolution]
def makeExtraProblems: Option[List[problemAndSolution]] = None
lazy val defaultProblems = makeDefaultProblems
lazy val extraProblems = makeExtraProblems
lazy val beatLen = defaultProblems.length
lazy val runners = 10
lazy val solver = new ProblemSolver[A](runners, false, "TestTester")
def okSolver(answerMap: Map[Int, String]) =
(sols: Iterable[problem.SolvedProblem]) ⇒
sols foreach (sol ⇒
Some(sol.answer) should equal(answerMap.get(sol.num)))
def buildAnswerMap(problems: List[problemAndSolution]) =
problems.foldLeft(Map[Int, String]()) {
case (map, problemInfo) ⇒ map + (problemInfo._1.num -> problemInfo._2)
}
val conductor = new Conductor
import conductor._
"A problem" should {
thread("defaults") {
"solve the defaults correctly" in {
solver addInfo ProblemInfo(defaultProblems map {
_._1
}, okSolver(buildAnswerMap(defaultProblems)))
solver.solve()
}
}
thread("extras") {
waitForBeat(beatLen)
extraProblems match {
case Some(extraProblemsInfo) ⇒
"solve the the extra problems correctly" in {
solver addInfo ProblemInfo(extraProblemsInfo map {
_._1
}, okSolver(buildAnswerMap(extraProblemsInfo)))
solver.solve()
}
case None ⇒
}
}
whenFinished {
solver.shutdown()
}
}
}
| challenge-helper/challenge-helper | src/main/scala/com/github/challenge/ProblemTester.scala | Scala | apache-2.0 | 1,810 |
/*******************************************************************************
* Copyright 2010 Olaf Sebelin
*
* This file is part of Verdandi.
*
* Verdandi is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Verdandi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Verdandi. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package verdandi.model.persistence
import java.util.Date
import java.io.FileOutputStream
import verdandi.event.ApplicationShutdown
import java.io.{ File, FileInputStream, FileOutputStream, ObjectInputStream, ObjectOutputStream, Serializable }
import verdandi.VerdandiHelper
import scala.actors.Actor
import scala.swing.Reactor
import verdandi.event._
import verdandi.model._
import scala.collection.mutable.{ ListBuffer, HashMap }
import java.util.concurrent.atomic.AtomicLong
import com.weiglewilczek.slf4s.Logging
private[persistence] class IdGenerator extends Serializable {
val workRecordId = new AtomicLong(1)
}
trait StorageBackend extends Logging {
def writeCostUnits(data: ListBuffer[CostUnit])
def writeWorkRecords(data: ListBuffer[WorkRecord])
def writeOwnSelection(data: ListBuffer[String])
def writeIdGenerator(data: IdGenerator)
def shutdown()
def loadCostUnits(): ListBuffer[CostUnit]
def loadOwnSelection(): ListBuffer[String]
def loadWorkRecords(): ListBuffer[WorkRecord]
def loadIdGenerator(): IdGenerator
}
trait FlatFileBackend extends StorageBackend with Logging {
val basedir = VerdandiModel.conf.getBasedir
val costUnitFile = new File(basedir, "costunits.ser")
val ownCostUnitSelectionFile = new File(basedir, "ownSelection.ser")
val workRecordsFile = new File(basedir, "workRecords.ser")
val idGeneratorFile = new File(basedir, "idgenerator.ser")
protected def write[T](data: T, to: File) = {
VerdandiHelper.withCloseable(new ObjectOutputStream(new FileOutputStream(to))) { oOut =>
oOut.writeObject(data)
}
}
def loadCostUnits(): ListBuffer[CostUnit] = loadOrElse(costUnitFile, new ListBuffer[CostUnit])
def loadOwnSelection(): ListBuffer[String] = loadOrElse(ownCostUnitSelectionFile, new ListBuffer[String])
def loadWorkRecords(): ListBuffer[WorkRecord] = loadOrElse(workRecordsFile, new ListBuffer[WorkRecord])
def loadIdGenerator(): IdGenerator = loadOrElse(idGeneratorFile, new IdGenerator)
protected def loadOrElse[T](dataFile: File, orElse: => T): T = {
var res = null.asInstanceOf[T]
if (dataFile.exists) {
try {
VerdandiHelper.withCloseable(new ObjectInputStream(new FileInputStream(dataFile))) { oIn: ObjectInputStream =>
{
res = oIn.readObject.asInstanceOf[T]
}
}
} catch {
case e: Exception => {
logger.error("Cannot read " + dataFile.getAbsolutePath, e)
res = orElse
}
}
} else {
res = orElse
}
res
}
}
object InstantWriteFlatFileBackend extends FlatFileBackend with Logging {
def writeCostUnits(data: ListBuffer[CostUnit]) = write(data, costUnitFile)
def writeWorkRecords(data: ListBuffer[WorkRecord]) = write(data, workRecordsFile)
def writeOwnSelection(data: ListBuffer[String]) = write(data, ownCostUnitSelectionFile)
def writeIdGenerator(data: IdGenerator) = write(data, idGeneratorFile)
def shutdown() {}
}
trait DeferredWriteBackend {
protected var costUnits: Option[ListBuffer[CostUnit]] = None
protected var workRecords: Option[ListBuffer[WorkRecord]] = None
protected var ownSelection: Option[ListBuffer[String]] = None
protected var idGen: Option[IdGenerator] = None
def writeCostUnits(data: ListBuffer[CostUnit]) = costUnits = Some(data)
def writeWorkRecords(data: ListBuffer[WorkRecord]) = workRecords = Some(data)
def writeOwnSelection(data: ListBuffer[String]) = ownSelection = Some(data)
def writeIdGenerator(data: IdGenerator) = idGen = Some(data)
}
object DeferredWriteFlatFileBackend extends FlatFileBackend with Logging {
case class WriteData(val data: AnyRef, val to: File)
case class Shutdown()
object DirtyWrite extends Actor with Logging {
def act() {
react {
// FIXME: There may be write orders left!
case Shutdown => logger.debug("Aborting Actor")
case dirty: WriteData => {
logger.debug("Writing " + dirty.to)
write(dirty.data, dirty.to)
act()
}
}
}
}
DirtyWrite.start()
def shutdown() = DirtyWrite ! Shutdown()
def writeCostUnits(data: ListBuffer[CostUnit]) = DirtyWrite ! WriteData(data, costUnitFile)
def writeWorkRecords(data: ListBuffer[WorkRecord]) = DirtyWrite ! WriteData(data, workRecordsFile)
def writeOwnSelection(data: ListBuffer[String]) = DirtyWrite ! WriteData(data, ownCostUnitSelectionFile)
def writeIdGenerator(data: IdGenerator) = DirtyWrite ! WriteData(data, idGeneratorFile)
}
object WriteOnShutdownFlatFileBackend extends FlatFileBackend with DeferredWriteBackend with Logging {
private def write[T](_data: Option[T], to: File) = _data match {
case Some(data) => {
VerdandiHelper.withCloseable(new ObjectOutputStream(new FileOutputStream(to))) { oOut =>
oOut.writeObject(data)
}
}
case None => {}
}
def shutdown() {
write(costUnits, costUnitFile)
write(workRecords, workRecordsFile)
write(ownSelection, ownCostUnitSelectionFile)
write(idGen, idGeneratorFile)
}
}
object XMLFileBackend extends StorageBackend with DeferredWriteBackend with Logging {
import scala.xml._
val basedir = VerdandiModel.conf.getBasedir
val datafile = new File(basedir, "verdandidata.xml")
private var saved = false
val root = if (datafile.isFile) {
XML.loadFile(datafile)
} else {
<verdandidata/>
}
require(root.label == "verdandidata", "Unexpected root elem: " + root.label)
lazy val _costUnits: ListBuffer[CostUnit] = {
val res = new ListBuffer[CostUnit]()
def unmarshal(elem: Node): CostUnit = {
val res = new CostUnit((elem \ "@id").text, (elem \ "name").text, (elem \ "description").text)
res.active = (elem \ "@active").text.toBoolean
res
}
(root \ "costunits" \ "costunit").foreach(elem => res += unmarshal(elem))
res
}
private lazy val _costUnitById: HashMap[String, CostUnit] = {
val res = new HashMap[String, CostUnit]()
_costUnits.foreach(cu => res += (cu.id -> cu))
res
}
costUnits = Some(loadCostUnits)
workRecords = Some(_loadWorkRecords)
ownSelection = Some(_loadOwnSelection)
idGen = Some(_loadIdGenerator)
def toXml(costunit: CostUnit): Node = {
<costunit>
<name>{ costunit.name }</name>
<description>{ costunit.description }</description>
</costunit> % Attribute(None, "id", Text(costunit.id),
Attribute(None, "active", Text(costunit.active.toString), Null))
}
def toXml(wr: WorkRecord): Node = {
<workrecord>
<costunit>{ wr.costUnit.name }</costunit>
<annotation>{ wr.annotation }</annotation>
</workrecord> % Attribute(None, "id", Text(wr.id.toString),
Attribute(None, "start", Text(wr.start.getTime.toString),
Attribute(None, "duration", Text(wr.duration.toString),
Attribute(None, "costunitid", Text(wr.costUnit.id), Null))))
}
def seltoxml(cuid: String): Node = {
<costunit/> % Attribute(None, "id", Text(cuid), Null)
}
def toXml(): Node = {
<verdandidata>
<costunits>
{ costUnits.getOrElse(new ListBuffer()).map(toXml(_)) }
</costunits>
<ownselection>
{ ownSelection.getOrElse(new ListBuffer()).map(s => seltoxml(s)) }
</ownselection>
<workrecords>
{ workRecords.getOrElse(new ListBuffer()).map(toXml(_)) }
</workrecords>
</verdandidata> % Attribute(None, "nextid", Text(idGen.getOrElse(new IdGenerator()).workRecordId.get.toString), Null)
}
def shutdown() {
if (!saved) {
logger.debug("writing data to " + datafile.getAbsolutePath)
XML.save(datafile.getAbsolutePath, toXml(), "UTF-8", true, null)
saved = true
}
}
private def _loadOwnSelection(): ListBuffer[String] = {
val res = new ListBuffer[String]()
(root \ "ownselection" \ "costunit").foreach(elem => res += (elem \ "@id").text)
res
}
private def _loadWorkRecords(): ListBuffer[WorkRecord] = {
val res = new ListBuffer[WorkRecord]()
def unmarshal(elem: Node): WorkRecord = {
val id = (elem \ "@id").text.toInt
val cuId = ((elem \ "@costunitid").text)
val cu = _costUnitById.getOrElse(cuId, new CostUnit(cuId, cuId, null))
val startDate = new Date((elem \ "@start").text.toLong)
val duration = (elem \ "@duration").text.toInt
val res = new WorkRecord(id, cu, startDate, duration)
res.annotation = (elem \ "annotation").text
res
}
(root \ "workrecords" \ "workrecord").foreach(elem => res += unmarshal(elem))
res
}
private def _loadIdGenerator(): IdGenerator = {
val idGenerator = new IdGenerator()
idGenerator.workRecordId.set((root \ "@nextid").text.toLong)
idGenerator
}
def loadCostUnits(): ListBuffer[CostUnit] = _costUnits
def loadOwnSelection(): ListBuffer[String] = ownSelection.getOrElse(new ListBuffer())
def loadWorkRecords(): ListBuffer[WorkRecord] = workRecords.getOrElse(new ListBuffer())
def loadIdGenerator(): IdGenerator = idGen.getOrElse(new IdGenerator())
}
| osebelin/verdandi | src/main/scala/verdandi/model/persistence/StorageBackend.scala | Scala | gpl-3.0 | 9,889 |
package com.tenderowls.match3.server.data
case class Score(data: Map[ColorCell, Int]) extends AnyVal { a =>
def exists(f: Int => Boolean): Boolean =
data.values.exists(f)
def inc(cell: ColorCell): Score = {
val entry = (cell, a.data.get(cell).fold(0)(_ + 1))
Score(a.data + entry)
}
def +(b: Score): Score = {
val sum = (a.data.keySet ++ b.data.keySet).map { key =>
(key, a.data.getOrElse(key, 0) + b.data.getOrElse(key, 0))
}
Score(sum.toMap)
}
def sum: Int = data.values.sum
override def toString: String = {
val xs = data.map { case (k, v) => s"${k.getClass.getSimpleName.dropRight(5).toLowerCase()}=$v" }
s"Score(${xs.mkString(",")})"
}
}
object Score {
val empty = Score(ColorCell.All.zip(Seq.fill(ColorCell.All.size)(0)).toMap)
}
| fomkin/match3 | server/src/main/scala/com/tenderowls/match3/server/data/Score.scala | Scala | lgpl-3.0 | 799 |
package com.waltsu.wdht.storage
import com.waltsu.wdht.testhelpers.CleanDatabase
import org.junit.runner.RunWith
import org.specs2.mutable
import org.specs2.runner.JUnitRunner
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
@RunWith(classOf[JUnitRunner])
class StoreEngineSpec(implicit ec: ExecutionContext) extends mutable.Specification {
sequential
"StorageEngine" should {
"store new key and value" in new CleanDatabase {
val tests = for {
storedObject <- StorageEngine.put("foo", "bar")
} yield {
storedObject.id should beGreaterThan(0)
storedObject.key must equalTo("foo")
storedObject.value must equalTo("bar")
}
Await.result(tests, 2 seconds)
}
"update old value" in new CleanDatabase {
val tests = for {
insertion <- StorageEngine.put("foo", "bar")
update <- StorageEngine.put("foo", "bazz")
} yield {
insertion.id should beGreaterThan(0)
insertion.id should equalTo(update.id)
update.key must equalTo("foo")
update.value must equalTo("bazz")
}
Await.result(tests, 2 seconds)
}
}
}
| waltsu/wdht | src/test/scala/com/waltsu/wdht/storage/StorageEngineSpec.scala | Scala | mit | 1,181 |
package org.scalaide.ui.internal.preferences
import scala.tools.nsc.Settings
import org.eclipse.core.resources.IProject
import org.eclipse.core.runtime.IPath
import org.scalaide.core.internal.ScalaPlugin
import org.scalaide.core.internal.project.CompileScope
import org.scalaide.ui.internal.preferences.IDESettings.Box
object ScopesSettings extends Settings {
val TabTitle = "Scopes Settings"
private val NamePrefix = "-"
private val EmptyString = ""
private def choices = CompileScope.scopesInCompileOrder.map { _.name }.toList
private def makeSettings(project: IProject): List[Settings#Setting] = {
ScalaPlugin().asScalaProject(project).map { scalaProject =>
scalaProject.sourceFolders.map { srcFolder =>
val srcFolderRelativeToProject = srcFolder.makeRelativeTo(project.getLocation)
val srcName = makeKey(srcFolderRelativeToProject)
ChoiceSetting(srcName, helpArg = EmptyString, descr = EmptyString, choices, findDefaultScope(srcFolderRelativeToProject))
}
}.getOrElse(Nil).toList.sortBy { _.name }
}
def makeKey(srcFolderRelativeToProject: IPath): String =
NamePrefix + srcFolderRelativeToProject.segments.mkString("/")
private def findDefaultScope(srcFolderRelativeToProject: IPath): String =
CompileScope.scopesInCompileOrder.find { _.isValidSourcePath(srcFolderRelativeToProject) }.get.name
def buildScopesSettings(project: Option[IProject]): Box =
if (project.isEmpty) Box(TabTitle, Nil) else Box(TabTitle, makeSettings(project.get))
}
| dragos/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/ui/internal/preferences/ScopesSettings.scala | Scala | bsd-3-clause | 1,526 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import java.util.EnumSet
import javax.servlet.DispatcherType
import javax.servlet.http.{HttpServlet, HttpServletRequest}
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.xml.Node
import org.eclipse.jetty.servlet.{FilterHolder, FilterMapping, ServletContextHandler, ServletHolder}
import org.json4s.JsonAST.{JNothing, JValue}
import org.apache.spark.{SecurityManager, SparkConf, SSLOptions}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.Utils
/**
* The top level component of the UI hierarchy that contains the server.
*
* Each WebUI represents a collection of tabs, each of which in turn represents a collection of
* pages. The use of tabs is optional, however; a WebUI may choose to include pages directly.
*/
private[spark] abstract class WebUI(
val securityManager: SecurityManager,
val sslOptions: SSLOptions,
port: Int,
conf: SparkConf,
basePath: String = "",
name: String = "",
poolSize: Int = 200)
extends Logging {
protected val tabs = ArrayBuffer[WebUITab]()
protected val handlers = ArrayBuffer[ServletContextHandler]()
protected val pageToHandlers = new HashMap[WebUIPage, ArrayBuffer[ServletContextHandler]]
protected var serverInfo: Option[ServerInfo] = None
protected val publicHostName = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(
conf.get(DRIVER_HOST_ADDRESS))
protected val className = Utils.getFormattedClassName(this)
def getBasePath: String = basePath
def getTabs: Seq[WebUITab] = tabs.toSeq
def getHandlers: Seq[ServletContextHandler] = handlers.toSeq
def getDelegatingHandlers: Seq[DelegatingServletContextHandler] = {
handlers.map(new DelegatingServletContextHandler(_)).toSeq
}
/** Attaches a tab to this UI, along with all of its attached pages. */
def attachTab(tab: WebUITab): Unit = {
tab.pages.foreach(attachPage)
tabs += tab
}
/** Detaches a tab from this UI, along with all of its attached pages. */
def detachTab(tab: WebUITab): Unit = {
tab.pages.foreach(detachPage)
tabs -= tab
}
/** Detaches a page from this UI, along with all of its attached handlers. */
def detachPage(page: WebUIPage): Unit = {
pageToHandlers.remove(page).foreach(_.foreach(detachHandler))
}
/** Attaches a page to this UI. */
def attachPage(page: WebUIPage): Unit = {
val pagePath = "/" + page.prefix
val renderHandler = createServletHandler(pagePath,
(request: HttpServletRequest) => page.render(request), conf, basePath)
val renderJsonHandler = createServletHandler(pagePath.stripSuffix("/") + "/json",
(request: HttpServletRequest) => page.renderJson(request), conf, basePath)
attachHandler(renderHandler)
attachHandler(renderJsonHandler)
val handlers = pageToHandlers.getOrElseUpdate(page, ArrayBuffer[ServletContextHandler]())
handlers += renderHandler
handlers += renderJsonHandler
}
/** Attaches a handler to this UI. */
def attachHandler(handler: ServletContextHandler): Unit = synchronized {
handlers += handler
serverInfo.foreach(_.addHandler(handler, securityManager))
}
/** Attaches a handler to this UI. */
def attachHandler(contextPath: String, httpServlet: HttpServlet, pathSpec: String): Unit = {
val ctx = new ServletContextHandler()
ctx.setContextPath(contextPath)
ctx.addServlet(new ServletHolder(httpServlet), pathSpec)
attachHandler(ctx)
}
/** Detaches a handler from this UI. */
def detachHandler(handler: ServletContextHandler): Unit = synchronized {
handlers -= handler
serverInfo.foreach(_.removeHandler(handler))
}
/**
* Detaches the content handler at `path` URI.
*
* @param path Path in UI to unmount.
*/
def detachHandler(path: String): Unit = {
handlers.find(_.getContextPath() == path).foreach(detachHandler)
}
/**
* Adds a handler for static content.
*
* @param resourceBase Root of where to find resources to serve.
* @param path Path in UI where to mount the resources.
*/
def addStaticHandler(resourceBase: String, path: String = "/static"): Unit = {
attachHandler(JettyUtils.createStaticHandler(resourceBase, path))
}
/** A hook to initialize components of the UI */
def initialize(): Unit
def initServer(): ServerInfo = {
val host = Option(conf.getenv("SPARK_LOCAL_IP")).getOrElse("0.0.0.0")
val server = startJettyServer(host, port, sslOptions, conf, name, poolSize)
server
}
/** Binds to the HTTP server behind this web interface. */
def bind(): Unit = {
assert(serverInfo.isEmpty, s"Attempted to bind $className more than once!")
try {
val host = Option(conf.getenv("SPARK_LOCAL_IP")).getOrElse("0.0.0.0")
val server = initServer()
handlers.foreach(server.addHandler(_, securityManager))
serverInfo = Some(server)
logInfo(s"Bound $className to $host, and started at $webUrl")
} catch {
case e: Exception =>
logError(s"Failed to bind $className", e)
System.exit(1)
}
}
/** @return Whether SSL enabled. Only valid after [[bind]]. */
def isSecure: Boolean = serverInfo.map(_.securePort.isDefined).getOrElse(false)
/** @return The scheme of web interface. Only valid after [[bind]]. */
def scheme: String = if (isSecure) "https://" else "http://"
/** @return The url of web interface. Only valid after [[bind]]. */
def webUrl: String = s"${scheme}$publicHostName:${boundPort}"
/** @return The actual port to which this server is bound. Only valid after [[bind]]. */
def boundPort: Int = serverInfo.map(si => si.securePort.getOrElse(si.boundPort)).getOrElse(-1)
/** Stops the server behind this web interface. Only valid after [[bind]]. */
def stop(): Unit = {
assert(serverInfo.isDefined,
s"Attempted to stop $className before binding to a server!")
serverInfo.foreach(_.stop())
}
}
/**
* A tab that represents a collection of pages.
* The prefix is appended to the parent address to form a full path, and must not contain slashes.
*/
private[spark] abstract class WebUITab(parent: WebUI, val prefix: String) {
val pages = ArrayBuffer[WebUIPage]()
val name = prefix.capitalize
/** Attach a page to this tab. This prepends the page's prefix with the tab's own prefix. */
def attachPage(page: WebUIPage): Unit = {
page.prefix = (prefix + "/" + page.prefix).stripSuffix("/")
pages += page
}
/** Get a list of header tabs from the parent UI. */
def headerTabs: Seq[WebUITab] = parent.getTabs
def basePath: String = parent.getBasePath
}
/**
* A page that represents the leaf node in the UI hierarchy.
*
* The direct parent of a WebUIPage is not specified as it can be either a WebUI or a WebUITab.
* If the parent is a WebUI, the prefix is appended to the parent's address to form a full path.
* Else, if the parent is a WebUITab, the prefix is appended to the super prefix of the parent
* to form a relative path. The prefix must not contain slashes.
*/
private[spark] abstract class WebUIPage(var prefix: String) {
def render(request: HttpServletRequest): Seq[Node]
def renderJson(request: HttpServletRequest): JValue = JNothing
}
private[spark] class DelegatingServletContextHandler(handler: ServletContextHandler) {
def prependFilterMapping(
filterName: String,
spec: String,
types: EnumSet[DispatcherType]): Unit = {
val mapping = new FilterMapping()
mapping.setFilterName(filterName)
mapping.setPathSpec(spec)
mapping.setDispatcherTypes(types)
handler.getServletHandler.prependFilterMapping(mapping)
}
def addFilter(
filterName: String,
className: String,
filterParams: Map[String, String]): Unit = {
val filterHolder = new FilterHolder()
filterHolder.setName(filterName)
filterHolder.setClassName(className)
filterParams.foreach { case (k, v) => filterHolder.setInitParameter(k, v) }
handler.getServletHandler.addFilter(filterHolder)
}
def filterCount(): Int = {
handler.getServletHandler.getFilters.length
}
def getContextPath(): String = {
handler.getContextPath
}
}
| ueshin/apache-spark | core/src/main/scala/org/apache/spark/ui/WebUI.scala | Scala | apache-2.0 | 9,061 |
package models
import reactivemongo.bson
import reactivemongo.bson.{BSONDocument, BSONDocumentReader, Macros}
case class Ranking(
season: String,
region: String,
division: String,
position: Int,
team: String,
matches: Int,
wins: Int,
draws: Int,
losses: Int,
goalsPro: Int,
goalsAgainst: Int,
points: Int,
period: String
) {
}
object Ranking {
implicit def rankingReader: BSONDocumentReader[Ranking] = Macros.reader[Ranking]
def lineToRanking(region: Region)(line: Array[String]): Ranking = {
Ranking(
season = "1718",
region = region.shortName,
division = line(0),
position = line(1).toInt,
team = line(2),
matches = line(3).toInt,
wins = line(4).toInt,
losses = line(5).toInt,
draws = line(6).toInt,
goalsPro = line(7).toInt,
goalsAgainst = line(8).toInt,
points = line(9).toInt,
period = line(10)
)
}
implicit val upsertSelector: Ranking => BSONDocument = { theRanking: Ranking =>
bson.document(
"season" -> theRanking.season,
"region" -> theRanking.region,
"division" -> theRanking.division,
"period" -> theRanking.period,
"team" -> theRanking.team
)
}
}
| soniCaH/footbalisto-api | app/models/Ranking.scala | Scala | apache-2.0 | 1,480 |
/*
* Copyright (c) 2015-2022 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.weather
package providers.darksky
import java.text.DecimalFormat
import scalaj.http._
import model.WeatherRequest
final case class BlockType private (name: String) extends AnyVal
object BlockType {
val currently = BlockType("currently")
val minutely = BlockType("minutely")
val hourly = BlockType("hourly")
val daily = BlockType("daily")
val alerts = BlockType("alerts")
val flags = BlockType("flags")
}
final case class Units private (name: String) extends AnyVal
object Units {
val auto = Units("auto")
val ca = Units("ca")
val uk2 = Units("uk2")
val us = Units("us")
val si = Units("si")
}
object requests {
final case class DarkSkyRequest(
latitude: Float,
longitude: Float,
time: Option[Long] = None,
exclude: List[BlockType] = List.empty[BlockType],
extend: Boolean = false,
lang: Option[String] = None,
units: Option[Units] = None
) extends WeatherRequest {
override def constructRequest(baseUri: String, apiKey: String): HttpRequest = {
val pathParams = List(latitude, longitude).map(floatToString) ++ time.map(_.toString).toList
val uri = s"$baseUri/$apiKey/${pathParams.mkString(",")}"
val queryParams = List(
exclude.map(_.name).reduceOption(_ + "," + _).map("exclude" -> _),
Some(extend).collect { case true => "extend" -> "hourly" },
lang.map("lang" -> _),
units.map("units" -> _.name)
).flatten
Http(uri).params(queryParams)
}
}
/** Dark Sky seems to not consume numbers in scientific notation,
* which are sometimes produced by toString
*/
private def floatToString(value: Float): String = {
val decimalFormat = new DecimalFormat("0.0000")
decimalFormat.setMinimumFractionDigits(0)
decimalFormat.format(value.toDouble)
}
}
| snowplow/scala-weather | src/main/scala/com.snowplowanalytics/weather/providers/darksky/requests.scala | Scala | apache-2.0 | 2,570 |
package org.grimrose.scala
import scala.concurrent.{ ExecutionContext, Future }
import _root_.generated._
class PhotosynthServiceClient(implicit val ctx: ExecutionContext) extends SoapClient {
type CollectionId = String
def ping(): Future[Option[String]] = {
this.service.ping().map(_.PingResult)
}
def getCollectionData(id: CollectionId): Future[Option[CollectionResult]] = {
this.service.getCollectionData(id, false).map(_.GetCollectionDataResult)
}
def getServerInfo(): Future[Option[ServerInfo]] = {
this.service.getServerInfo(None).map(_.GetServerInfoResult)
}
}
trait SoapClient extends PhotosynthServiceSoap12Bindings
with scalaxb.SoapClientsAsync
with SkinnyHttpClientsAsync
private object SkinnyHttpClientsAsync {
val charset = "utf-8"
val contentType = s"text/xml; charset=$charset"
}
trait SkinnyHttpClientsAsync extends scalaxb.HttpClientsAsync {
import skinny.http._
import SkinnyHttpClientsAsync._
lazy val httpClient = new HttpClient {
def request(in: String, address: java.net.URI, headers: Map[String, String]): Future[String] = {
val req: Request = Request(address.toString)
headers.foreach { case (k, v) => req.header(k, v) }
req.body(in.getBytes(charset), contentType)
HTTP.asyncPost(req).map(_.asString)
}
}
}
| grimrose/shibuya-java-14 | src/main/scala/org/grimrose/scala/PhotosynthServiceClient.scala | Scala | mit | 1,316 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "realtimepermit"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
jdbc,
anorm,
"net.debasishg" %% "redisclient" % "2.10"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
requireJsShim += "shim.js",
requireJs += "main.js"
)
}
| jeroenr/notimelikerealtime | crud_app/project/Build.scala | Scala | apache-2.0 | 499 |
/*
* This file is part of P2pCore.
*
* Copyright (C) 2012 Timur Mehrvarz, timur.mehrvarz(at)gmail.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation <http://www.gnu.org/licenses/>, either
* version 3 of the License, or (at your option) any later version.
*/
package timur.p2pCore
object RelayStress {
def main(args:Array[String]): Unit = {
new RelayStress().start
}
}
class RelayStress extends RelayTrait {
var incomingDataCounter = 0
var outgoingDataCounter = 0
override def connectedThread(connectString:String) {
log("connectedThread start connectString='"+connectString+"' relayQuitFlag="+relayQuitFlag)
while(!relayQuitFlag && outgoingDataCounter<5000) {
send("data")
outgoingDataCounter += 1
}
log("connectString finished sending; relayQuitFlag="+relayQuitFlag)
send("last")
}
override def receiveMsgHandler(str:String) {
if(str=="data") {
incomingDataCounter+=1
} else if(str=="last") {
log("receiveMsgHandler last; relayQuitFlag="+relayQuitFlag)
relayQuitFlag = true
} else {
log("receiveMsgHandler data=["+str+"] incomingDataCounter="+incomingDataCounter)
}
}
override def relayExit() {
log("relayExit outgoingDataCounter="+outgoingDataCounter+" incomingDataCounter="+incomingDataCounter)
}
}
| mehrvarz/P2pCore | src/RelayStress.scala | Scala | gpl-3.0 | 1,449 |
package com.github.gdefacci.briscola.game
import com.github.gdefacci.ddd._
import com.github.gdefacci.briscola.player._
sealed trait Seed
object Seed {
case object bastoni extends Seed
case object coppe extends Seed
case object denari extends Seed
case object spade extends Seed
val values = Set(bastoni, coppe, denari, spade)
}
final case class Card(number: Int, seed: Seed) {
def points = {
number match {
case 1 => 11
case 3 => 10
case 8 => 2
case 9 => 3
case 10 => 4
case _ => 0
}
}
}
object Deck {
lazy val empty = new Deck(Seq.empty)
def initial = {
val cards = for (seed <- Seed.values; number <- 1.to(10)) yield (Card(number.toByte, seed))
Deck(new util.Random().shuffle(cards.toSeq))
}
}
final case class Deck(cards: Seq[Card]) {
lazy val isEmpty = cards.isEmpty
def takeCards(n: Int): (Set[Card], Deck) = {
val (crds, deck) = cards.splitAt(n)
crds.toSet -> Deck(deck)
}
def briscolaCard(numberOfPlayers: Int): Card =
cards.last
}
object GameState {
lazy val empty = EmptyGameState
val MIN_PLAYERS = 2
val MAX_PLAYERS = 8
val MIN_TEAMS_NUMBER = 2
val MAX_TEAMS_NUMBER = 4
val TEAM_MIN_PLAYERS_NUMBER = 2
val TEAM_MAX_PLAYERS_NUMBER = 4
def id(gm: GameState): Option[GameId] = gm match {
case EmptyGameState => None
case gm: ActiveGameState => Some(gm.id)
case gm: FinalGameState => Some(gm.id)
case gm: DroppedGameState => Some(gm.id)
}
}
sealed trait GameState
final case class GameId(id: Long)
case object EmptyGameState extends GameState
sealed trait GameStateTeamMixin {
def teams: Option[Teams]
def teamByName(teamName: String) = teams.flatMap(_.teams.find(team => team.name == teamName))
}
final case class ActiveGameState(id: GameId, briscolaCard: Card, deck: Deck, moves: Seq[Move], nextPlayers: Seq[PlayerState], teams: Option[Teams])
extends GameState with GameStateTeamMixin {
assert(nextPlayers.nonEmpty)
lazy val currentPlayer: PlayerState = nextPlayers.head
lazy val isLastHandTurn = nextPlayers.length == 1
lazy val isLastGameTurn = isLastHandTurn && deck.isEmpty && nextPlayers.head.cards.size == 1
lazy val players: Set[PlayerState] = moves.map(_.player).toSet ++ nextPlayers
lazy val deckCardsNumber = deck.cards.length
}
sealed trait DropReason
final case class PlayerLeft(player: PlayerId, reason: Option[String]) extends DropReason
final case class DroppedGameState(id: GameId, briscolaCard: Card, deck: Deck, moves: Seq[Move], nextPlayers: Seq[PlayerState], dropReason: DropReason, teams: Option[Teams])
extends GameState with GameStateTeamMixin
final case class FinalGameState(id: GameId, briscolaCard: Card, players: Seq[PlayerFinalState], teams: Option[Teams]) extends GameState with GameStateTeamMixin {
lazy val playersOrderByPoints: Seq[PlayerFinalState] = players.sortBy(_.score)
lazy val teamScoresOrderByPoints: Option[Seq[TeamScore]] = teams.map { teams =>
teams.teams.map { (t: Team) =>
val score = players.filter(pl => t.players.contains(pl.id)).foldLeft(Score.empty)((acc: Score, pl: PlayerFinalState) => acc.add(pl.score))
TeamScore(t, score)
}.toSeq.sortBy(_.score)
}
lazy val winner = playersOrderByPoints.head
lazy val winnerTeam: Option[TeamScore] = teamScoresOrderByPoints.flatMap(_.headOption)
}
final case class PlayerState(id: PlayerId, cards: Set[Card], score: Score) {
lazy val points: Int = score.points
}
final case class PlayerFinalState(id: PlayerId, score: Score) {
lazy val points: Int = score.points
}
final case class Move(player: PlayerState, card: Card)
private object comparison {
val LT = -1
val EQ = 0
val GT = 1
}
final case class Score(cards: Set[Card]) extends Ordered[Score] {
lazy val points = cards.toSeq.map(_.points).sum
lazy val numberOfCards = cards.size
def add(score: Score): Score = add(score.cards)
def add(cards: Iterable[Card]) = Score(this.cards ++ cards)
def compare(that: Score): Int =
if (points > that.points) comparison.LT
else if (points == that.points) {
if (numberOfCards > that.numberOfCards) comparison.LT
else if (numberOfCards < that.numberOfCards) comparison.GT
else comparison.EQ
} else comparison.GT
}
object Score {
val empty = Score(Set.empty)
}
case class TeamScore(team: Team, score: Score) | gdefacci/briscola | ddd-briscola/src/main/scala/com/github/gdefacci/briscola/game/model.scala | Scala | bsd-3-clause | 4,376 |
package com.twitter.finagle.http.codec
import org.jboss.netty.channel.{
SimpleChannelUpstreamHandler, Channels,
ChannelHandlerContext, MessageEvent}
import org.jboss.netty.handler.codec.http.{HttpHeaders, HttpRequest}
/**
* A simple channel handler to respond to "Expect: Continue" from
* clients. It responds unconditionally to these.
*/
private[http] class RespondToExpectContinue extends SimpleChannelUpstreamHandler {
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent) {
e.getMessage match {
case request: HttpRequest if HttpHeaders.is100ContinueExpected(request) =>
// Write the response immediately.
Channels.write(
ctx, Channels.future(ctx.getChannel),
OneHundredContinueResponse, e.getRemoteAddress)
// Remove the the ``Expect:'' header, and let the upstream
// continue receiving chunks after this.
request.headers.remove(HttpHeaders.Names.EXPECT)
case _ => ()
}
super.messageReceived(ctx, e)
}
}
| adriancole/finagle | finagle-http/src/main/scala/com/twitter/finagle/http/codec/RespondToExpectContinue.scala | Scala | apache-2.0 | 1,029 |
package com.bstek.designer.core.execution
import com.intellij.execution.application.{ApplicationConfigurationType, ApplicationConfiguration}
import com.intellij.openapi.project.Project
/**
* Created with IntelliJ IDEA.
* User: robin
* Date: 13-10-18
* Time: 下午4:33
* To change this template use File | Settings | File Templates.
*/
class DoradoApplicationConfiguration(val name: String, val project: Project, val applicationConfigurationType: ApplicationConfigurationType) extends ApplicationConfiguration(name, project, applicationConfigurationType) {
}
| OuYuBin/IDEADorado | dorado-core/src/com/bstek/designer/core/execution/DoradoApplicationConfiguration.scala | Scala | apache-2.0 | 569 |
package gitbucket.core.api
import gitbucket.core.util.RepositoryName
import gitbucket.core.model.CommitComment
import java.util.Date
/**
* https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent
*/
case class ApiPullRequestReviewComment(
id: Int, // 29724692
// "diff_hunk": "@@ -1 +1 @@\\n-# public-repo",
path: String, // "README.md",
// "position": 1,
// "original_position": 1,
commit_id: String, // "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c",
// "original_commit_id": "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c",
user: ApiUser,
body: String, // "Maybe you should use more emojji on this line.",
created_at: Date, // "2015-05-05T23:40:27Z",
updated_at: Date // "2015-05-05T23:40:27Z",
)(repositoryName:RepositoryName, issueId: Int) extends FieldSerializable {
// "url": "https://api.github.com/repos/baxterthehacker/public-repo/pulls/comments/29724692",
val url = ApiPath(s"/api/v3/repos/${repositoryName.fullName}/pulls/comments/${id}")
// "html_url": "https://github.com/baxterthehacker/public-repo/pull/1#discussion_r29724692",
val html_url = ApiPath(s"/${repositoryName.fullName}/pull/${issueId}#discussion_r${id}")
// "pull_request_url": "https://api.github.com/repos/baxterthehacker/public-repo/pulls/1",
val pull_request_url = ApiPath(s"/api/v3/repos/${repositoryName.fullName}/pulls/${issueId}")
/*
"_links": {
"self": {
"href": "https://api.github.com/repos/baxterthehacker/public-repo/pulls/comments/29724692"
},
"html": {
"href": "https://github.com/baxterthehacker/public-repo/pull/1#discussion_r29724692"
},
"pull_request": {
"href": "https://api.github.com/repos/baxterthehacker/public-repo/pulls/1"
}
}
*/
val _links = Map(
"self" -> Map("href" -> url),
"html" -> Map("href" -> html_url),
"pull_request" -> Map("href" -> pull_request_url))
}
object ApiPullRequestReviewComment{
def apply(comment: CommitComment, commentedUser: ApiUser, repositoryName: RepositoryName, issueId: Int): ApiPullRequestReviewComment =
new ApiPullRequestReviewComment(
id = comment.commentId,
path = comment.fileName.getOrElse(""),
commit_id = comment.commitId,
user = commentedUser,
body = comment.content,
created_at = comment.registeredDate,
updated_at = comment.updatedDate
)(repositoryName, issueId)
}
| noc06140728/gitbucket | src/main/scala/gitbucket/core/api/ApiPullRequestReviewComment.scala | Scala | apache-2.0 | 2,401 |
def f1(x: Int) = println("x : " + x)
def f2(x: Int) { println("x : " + x) }
| grzegorzbalcerek/scala-book-examples | examples/Procedures1.scala | Scala | mit | 76 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.config
import io.gatling.BaseSpec
import io.gatling.core.protocol.{ Protocols, Protocol }
import org.scalatest.OptionValues
class ProtocolSpec extends BaseSpec with OptionValues {
case class FooProtocol(foo: String) extends Protocol
case class BarProtocol(bar: String) extends Protocol
"building registry" should "return the configuration when 1 configuration" in {
Protocols(new FooProtocol("foo")).protocol[FooProtocol].map(_.foo) shouldBe Some("foo")
}
it should "return the configurations when 2 different configurations" in {
val protocols = Protocols(new FooProtocol("foo"), new BarProtocol("bar"))
protocols.protocol[FooProtocol].map(_.foo) shouldBe Some("foo")
protocols.protocol[BarProtocol].map(_.bar) shouldBe Some("bar")
}
it should "not fail when no configuration" in {
Protocols().protocol[FooProtocol] shouldBe None
}
it should "override with latest when multiple configurations of the same type" in {
Protocols(new FooProtocol("foo1"), new FooProtocol("foo2")).protocol[FooProtocol].map(_.foo) shouldBe Some("foo2")
}
}
| timve/gatling | gatling-core/src/test/scala/io/gatling/core/config/ProtocolSpec.scala | Scala | apache-2.0 | 1,730 |
package cmwell.analytics.main
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{DataWriterFactory, IndexWithCompleteDocument}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContextExecutor
object CopyIndexesWithMapping {
def main(args: Array[String]): Unit = {
val logger = LogManager.getLogger(CopyIndexesWithMapping.getClass)
// Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
// so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
// be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)
implicit val system: ActorSystem = ActorSystem("copy-index-with-mapping")
implicit val executionContext: ExecutionContextExecutor = system.dispatcher
implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()
try {
object Opts extends ScallopConf(args) {
val indexMap: ScallopOption[String] = opt[String]("index-map", short = 'i', descr = "A map from source to target index names, in JSON format", required = true)
val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))
val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)
verify()
}
val esContactPoint = FindContactPoints.es(Opts.url())
// Expect a map in the form: { "sourceIndex1": "targetIndex1", "sourceIndex2": "targetIndex2", ... }
val indexMap: Map[String, String] = new ObjectMapper().readTree(Opts.indexMap()).fields.asScala.map { entry =>
entry.getKey -> entry.getValue.asText
}.toMap
val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexMap.keys.toSeq)
// Validate that the index-map parameter specified valid index names, and not aliases.
for (indexName <- indexMap.keys)
if (!esTopology.allIndexNames.contains(indexName))
throw new RuntimeException(s"index-map parameter included $indexName as a source, which is not a valid index name.")
for (indexName <- indexMap.values)
if (!esTopology.allIndexNames.contains(indexName))
throw new RuntimeException(s"index-map parameter included $indexName as a target, which is not a valid index name.")
val dataWriterFactory = DataWriterFactory.index[IndexWithCompleteDocument](
indexMap = indexMap,
esEndpoint = esContactPoint)
PartitionedDownloader.runDownload(
esTopology = esTopology,
parallelism = Opts.parallelism(),
currentOnly = false,
objectExtractor = IndexWithCompleteDocument,
dataWriterFactory = dataWriterFactory,
sourceFilter = false)
}
catch {
case ex: Throwable =>
logger.error(ex.getMessage, ex)
System.exit(1)
}
finally {
system.terminate()
}
}
}
| bryaakov/CM-Well | tools/dataConsistencyTool/extract-index-from-es/src/main/scala/cmwell/analytics/main/CopyIndexesWithMapping.scala | Scala | apache-2.0 | 3,431 |
package org.scalatra.ssgi
import org.scalatest.matchers.MustMatchers
import org.scalatest.{WordSpec}
class CookieSpec extends WordSpec with MustMatchers {
"a Cookie" should {
"render a simple name value pair" in {
val cookie = Cookie("theName", "theValue")
cookie.toCookieString must startWith("theName=theValue")
}
"render a simple name value pair with a version" in {
val cookie = Cookie("theName", "theValue")
cookie.toCookieString must startWith("theName=theValue; Version=1")
}
"have a dot in front of the domain when set" in {
val cookie = Cookie("cookiename", "value1")( CookieOptions(domain="nowhere.com"))
cookie.toCookieString must startWith("cookiename=value1; Domain=.nowhere.com")
}
"prefix a path with / if a path is set" in {
val cookie = Cookie("cookiename", "value1")( CookieOptions(path="path/to/resource"))
cookie.toCookieString must startWith("cookiename=value1; Path=/path/to/resource")
}
"have a maxAge when the value is >= 0" in {
val cookie = Cookie("cookiename", "value1")(CookieOptions(maxAge=86700))
cookie.toCookieString must startWith("cookiename=value1; Max-Age=86700")
}
"set the comment when a comment is given" in {
val cookie = Cookie("cookiename", "value1")(CookieOptions(comment="This is the comment"))
cookie.toCookieString must startWith("cookiename=value1; Comment=This is the comment")
}
"flag the cookie as secure if needed" in {
val cookie = Cookie("cookiename", "value1")(CookieOptions(secure = true))
cookie.toCookieString must startWith("cookiename=value1; Secure")
}
"flag the cookie as http only if needed" in {
val cookie = Cookie("cookiename", "value1")( CookieOptions(httpOnly = true))
cookie.toCookieString must startWith("cookiename=value1; HttpOnly")
}
"render a cookie with all options set" in {
val cookie = Cookie("cookiename", "value3")(CookieOptions(
domain="nowhere.com",
path="path/to/page",
comment="the cookie thingy comment",
maxAge=15500,
secure=true,
httpOnly=true,
version=654
))
cookie.toCookieString must
equal("cookiename=value3; Domain=.nowhere.com; Path=/path/to/page; Comment=the cookie thingy comment; " +
"Max-Age=15500; Secure; HttpOnly; Version=654")
}
}
} | scalatra/ssgi | core/src/test/scala/org/scalatra/ssgi/CookieSpec.scala | Scala | bsd-2-clause | 2,413 |
package com.nummulus.amqp.driver.akka
import org.scalatest.FlatSpecLike
import org.scalatest.Matchers
import org.scalatest.OneInstancePerTest
import org.scalatest.mock.MockitoSugar
import com.nummulus.amqp.driver.MessageProperties
import com.rabbitmq.client.AMQP
import com.rabbitmq.client.Channel
import com.rabbitmq.client.Envelope
import akka.actor.ActorSystem
import akka.testkit.TestKit
class AkkaRabbitConsumerTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with Matchers with MockitoSugar with OneInstancePerTest {
val SomeDeliveryTag = 1337
val SomeMessageBody = "some message body"
val SomeEnvelope = new Envelope(SomeDeliveryTag, false, "", "")
val SomeProperties = new AMQP.BasicProperties()
val channel = mock[Channel]
behavior of "AkkaRabbitConsumer"
it should "pass on an AmqpRequestMessageWithProperties" in {
val consumer = new AkkaRabbitConsumer(channel, testActor)
consumer.handleDelivery("", SomeEnvelope, SomeProperties, SomeMessageBody.getBytes)
expectMsg(AmqpQueueMessageWithProperties(SomeMessageBody, MessageProperties(SomeProperties), SomeDeliveryTag))
}
}
| nummulus/amqp-driver | amqp-driver/src/test/scala/com/nummulus/amqp/driver/akka/AkkaRabbitConsumerTest.scala | Scala | apache-2.0 | 1,149 |
package com.softwaremill.play24.dao
import com.softwaremill.play24.models.{CoffeeTable, Coffee}
import slick.driver.H2Driver.api._
import scala.concurrent.Future
class CoffeeDao(db: Database, supplierDao: SupplierDao) {
val query = TableQuery[CoffeeTable]
def all: Future[Seq[Coffee]] = db.run(query.result)
// Table join example
def byPriceWithSuppliers(price: Double): Future[Seq[(String, String)]] = {
db.run {
val q2 = for {
c <- query if c.price < price
s <- supplierDao.query if s.id === c.supID
} yield (c.name, s.name)
q2.result
}
}
}
| guersam/macwire | examples/play24/app/com/softwaremill/play24/dao/CoffeeDao.scala | Scala | apache-2.0 | 601 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive
package issues
import minitest.TestSuite
import monix.eval.Task
import monix.execution.{Scheduler, UncaughtExceptionReporter}
import monix.execution.schedulers.SchedulerService
import monix.reactive.subjects.{AsyncSubject, Subject}
import scala.concurrent.{Await, TimeoutException}
import scala.concurrent.duration._
import scala.util.{Failure, Success}
object Issue908Suite extends TestSuite[SchedulerService] {
val CONCURRENT_TASKS = 1000
val CYCLES = 100
def setup(): SchedulerService = {
Scheduler.computation(
parallelism = math.max(Runtime.getRuntime.availableProcessors(), 2),
name = "issue908-suite",
daemonic = true,
reporter = UncaughtExceptionReporter(_ => ()))
}
def tearDown(env: SchedulerService): Unit = {
env.shutdown()
assert(env.awaitTermination(1.minute), "scheduler.awaitTermination")
}
test("broken tasks test (1)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val task = Task.async[String] { cb =>
sc.execute(() => cb.onSuccess("1"))
sc.execute(() => cb.onSuccess("2"))
}
val f = Task.race(task, task).runToFuture
val r = Await.result(f, 30.seconds)
assert(r != null, "r != null")
assert(r.isInstanceOf[Either[_, _]], "r.isInstanceOf[Either[_, _]]")
val i = r.fold(x => x, x => x)
assert(i == "1" || i == "2", s"$i == 1 || $i == 2")
}
}
test("broken tasks test (2)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val task = Task.async[String] { cb =>
sc.execute(() => cb.onSuccess("1"))
sc.execute(() => cb.onSuccess("2"))
}
val f = Task.raceMany((0 until CONCURRENT_TASKS).map(_ => task)).runToFuture
val r = Await.result(f, 30.seconds)
assert(r == "1" || r == "2", s"$r == 1 || $r == 2")
}
}
test("broken tasks test (3)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val task = Task.async[String] { cb =>
sc.execute(() => cb.onSuccess("1"))
sc.execute(() => cb.onSuccess("2"))
}
val f = task.timeout(1.millis).materialize.runToFuture
Await.result(f, 30.seconds) match {
case Success("1" | "2") =>
case Failure(_: TimeoutException) =>
case other =>
fail(s"Invalid value: $other")
}
}
}
test("concurrent test (1)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val subject = AsyncSubject.apply[Int]()
val tasks = (0 until CONCURRENT_TASKS).map { _ =>
subject.firstL.timeoutTo(1.millis, Task(1))
}
val await = Task.parSequenceUnordered(tasks).map(_.sum)
val f = Await.result(await.runToFuture, 30.seconds)
assertEquals(f, CONCURRENT_TASKS)
}
}
test("concurrent test (2)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val subject = AsyncSubject.apply[Int]()
val tasks = (0 until CONCURRENT_TASKS).map { _ =>
subject.firstL.timeoutTo(30.seconds, Task(1))
}
val await = for {
fiber <- Task.parSequenceUnordered(tasks).map(_.sum).start
_ <- awaitSubscribers(subject, CONCURRENT_TASKS)
_ <- Task {
subject.onNext(2)
subject.onComplete()
}
result <- fiber.join
} yield {
result
}
val f = Await.result(await.runToFuture, 30.seconds)
assertEquals(f, CONCURRENT_TASKS * 2)
}
}
def awaitSubscribers(subject: Subject[_, _], nr: Int): Task[Unit] =
Task.suspend {
if (subject.size < nr)
Task.sleep(1.millis).flatMap(_ => awaitSubscribers(subject, nr))
else
Task.unit
}
}
| monifu/monifu | monix-reactive/jvm/src/test/scala/monix/reactive/issues/Issue908Suite.scala | Scala | apache-2.0 | 4,305 |
import collection.mutable.Stack
import org.scalatest._
class ExampleSpec extends FunSuite with Matchers {
test( "Stack Test" ) {
val stack = new Stack[Int]
stack.push(1)
stack.push(2)
assert( stack.pop() == 2, ": A Stack should pop values in last-in-first-out order" )
assert( stack.pop() == 1, ": A Stack should pop values in last-in-first-out order" )
}
}
| nasa-nccs-cds/BDWPS | test/scalatest.scala | Scala | gpl-2.0 | 388 |
package org.airpnp.actor
import scala.actors.Actor
import scala.actors.Actor._
import scala.xml.Node
import org.airpnp.upnp.Device
import org.airpnp.Logging
import scala.util.Try
import scala.util.Failure
import scala.util.Success
class DeviceBuilder(private val download: String => Node) extends BaseActor {
override def toString() = "Device builder"
def act() = {
loop {
react {
case b: Build =>
try {
debug("Building new device with UDN {} from {}.", b.udn, b.location)
val node = download(b.location)
val device = new Device(node, b.location)
if (device.isMediaRenderer || b.force) {
device.getServices.foreach(s => s.initialize(download(s.getSCPDURL)))
sender ! DeviceReady(device)
} else {
sender ! DeviceShouldBeIgnored(Some(device), b.udn, "not a media renderer")
}
} catch {
case t: Throwable =>
error("Failed to build device from " + b.location, t)
sender ! DeviceShouldBeIgnored(None, b.udn, "build error: " + t.getMessage)
}
case Stop =>
debug("Device builder was stopped.")
sender ! Stopped
exit
}
}
}
} | provegard/ScAirPnp | src/main/scala/org/airpnp/actor/DeviceBuilder.scala | Scala | mit | 1,279 |
package org.clulab.openie
import java.io.InputStream
object ResourceUtils {
// methods for reading rules
def streamFromResource(path: String): InputStream = {
val stream = getClass.getClassLoader.getResourceAsStream(path)
stream
}
def readResource(path: String): String = {
val stream = streamFromResource(path)
val source = scala.io.Source.fromInputStream(stream)
val data = source.mkString
source.close()
data
}
}
| sistanlp/processors | openie/src/main/scala/org/clulab/openie/ResourceUtils.scala | Scala | apache-2.0 | 460 |
package challenge8a
import core._
sealed trait HttpValue[A] {
def fold[X](
explosion: Throwable => X,
fail: String => X,
ok: A => X
): X = this match {
case Explosion(exception) => explosion(exception)
case Fail(message) => fail(message)
case Ok(value) => ok(value)
}
def map[B](f: A => B): HttpValue[B] =
flatMap(f andThen HttpValue.ok)
def flatMap[B](f: A => HttpValue[B]): HttpValue[B] =
fold(HttpValue.explosion, HttpValue.fail, f)
}
case class Explosion[A](exception: Throwable) extends HttpValue[A]
case class Fail[A](message: String) extends HttpValue[A]
case class Ok[A](value: A) extends HttpValue[A]
object HttpValue {
def explosion[A](exception: Throwable): HttpValue[A] =
Explosion(exception)
def fail[A](message: String): HttpValue[A] =
Fail(message)
def ok[A](value: A): HttpValue[A] =
Ok(value)
implicit def HttpValueMonad: Monad[HttpValue] = new Monad[HttpValue] {
def point[A](a: => A) = ok(a)
def bind[A, B](a: HttpValue[A])(f: A => HttpValue[B]) = a flatMap f
}
}
| Kimply/scala-challenge | src/main/scala/challenge8a/HttpValue.scala | Scala | mit | 1,064 |
/*
Facsimile: A Discrete-Event Simulation Library
Copyright © 2004-2020, Michael J Allen.
This file is part of Facsimile.
Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
version.
Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see
http://www.gnu.org/licenses/lgpl.
The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the
project home page at:
http://facsim.org/
Thank you for your interest in the Facsimile project!
IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for inclusion
as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If your code
fails to comply with the standard, then your patches will be rejected. For further information, please visit the coding
standards at:
http://facsim.org/Documentation/CodingStandards/
========================================================================================================================
Scala source file from the org.facsim.anim.cell package.
*/
package org.facsim.anim.cell
import org.facsim.assertNonNull
/**
Thrown if a file supplied to an ''AutoMod® cell'' file loader is not a valid
''cell'' file.
@constructor Create a new parsing error exception.
@param msg Message explaining the cause of the exception.
@param e The underlying cause of the exception.
@since 0.0
*/
final class IncorrectFormatException private[cell](msg: String, e: Throwable)
extends RuntimeException(msg, e) {
/*
Sanity checks.
*/
assertNonNull(msg)
assertNonNull(e)
}
| MichaelJAllen/facsimile | core/src/main/scala/org/facsim/anim/cell/IncorrectFormatException.scala | Scala | lgpl-3.0 | 2,085 |
package com.twitter.inject.server
import com.google.inject.Module
import com.twitter.finagle.client.ClientRegistry
import com.twitter.finagle.{http, httpx}
import com.twitter.inject.Logging
import com.twitter.inject.app.App
import com.twitter.inject.modules.StatsReceiverModule
import com.twitter.server.Lifecycle.Warmup
import com.twitter.server.internal.{FinagleBuildRevision, PromoteToOldGenUtils}
import com.twitter.util.Await
trait TwitterServer
extends com.twitter.server.TwitterServer
with Ports
with App
with Warmup
with Logging {
addFrameworkModule(statsModule)
/* Protected */
protected def statsModule: Module = StatsReceiverModule // TODO: Use Guice v4 OptionalBinder
/** Resolve all Finagle clients before warmup method called */
protected def resolveFinagleClientsOnStartup = true
/* Overrides */
override final def main() {
super.main() // Call GuiceApp.main() to create injector
info("Startup complete, server ready.")
Await.ready(adminHttpServer)
}
/** Method to be called after injector creation */
override protected def postStartup() {
super.postStartup()
if (resolveFinagleClientsOnStartup) {
info("Resolving Finagle clients before warmup")
Await.ready {
ClientRegistry.expAllRegisteredClientsResolved() onSuccess { clients =>
info("Done resolving clients: " + clients.mkString("[", ", ", "]") + ".")
}
}
}
FinagleBuildRevision.register(injector)
}
/**
* After warmup completes, we want to run PromoteToOldGen without also signaling
* that we're healthy since we haven't successfully started our servers yet
*/
override protected def beforePostWarmup() {
super.beforePostWarmup()
PromoteToOldGenUtils.beforeServing()
}
/**
* After postWarmup, all external servers have been started, and we can now
* enable our health endpoint
*/
override protected def afterPostWarmup() {
super.afterPostWarmup()
info("Enabling health endpoint on port " + httpAdminPort)
/* For our open-source release, we need to stay backwards compatible with Finagle 6.28.0 and Twitter-Server 1.13.0 */
// TODO: Remove usage of http.HttpMuxer after new twitter-server released
if (httpx.HttpMuxer.patterns.contains("/health"))
httpx.HttpMuxer.addHandler("/health", new HttpxReplyHandler("OK\\n"))
else
http.HttpMuxer.addHandler("/health", new HttpReplyHandler("OK\\n"))
}
}
| jaume-pinyol/finatra | inject/inject-server/src/main/scala/com/twitter/inject/server/TwitterServer.scala | Scala | apache-2.0 | 2,456 |
import scala.quoted._
import scala.quoted.staging._
import scala.quoted.autolift
object Test {
implicit val toolbox: scala.quoted.staging.Toolbox = scala.quoted.staging.Toolbox.make(getClass.getClassLoader)
def main(args: Array[String]): Unit = run {
'{
def p[T](arr: Array[T]): Unit = {
println(arr.asInstanceOf[Array[_]].mkString("[", ", ", "]"))
}
p(${Array.empty[Boolean]})
p(${Array.empty[Byte]})
p(${Array.empty[Short]})
p(${Array.empty[Char]})
p(${Array.empty[Int]})
p(${Array.empty[Long]})
p(${Array.empty[Float]})
p(${Array.empty[Double]})
p(${Array.empty[String]})
println()
p(${Array(true)})
p(${Array[Byte](1, 2)})
p(${Array[Short](2, 3)})
p(${Array[Char]('a', 'b')})
p(${Array[Int](4, 5)})
p(${Array[Long](6L, 7L)})
p(${Array[Float](2.1f, 3.2f)})
p(${Array[Double](2.2, 3.3)})
p(${Array("abc", "xyz")})
}
}
} | som-snytt/dotty | tests/run-staging/quote-lift-Array.scala | Scala | apache-2.0 | 970 |
package cosbench_ng
import org.slf4j.LoggerFactory
import com.amazonaws.ClientConfiguration
import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder }
import com.amazonaws.services.s3.model.{ GetObjectRequest, ObjectMetadata }
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials}
import java.util.concurrent.Executors
import java.io.BufferedReader;
import java.io.InputStreamReader;
import scala.concurrent.ExecutionContext
import scala.util.{ Try, Failure, Success }
import scala.concurrent.{Future, blocking}
import java.io.InputStream
import org.apache.http.conn.ssl.{SSLConnectionSocketFactory }
import org.apache.http.ssl.{SSLContextBuilder, TrustStrategy}
import javax.net.ssl.{HostnameVerifier, SSLSession}
import java.security.cert.X509Certificate
object GetS3Client {
val log = LoggerFactory.getLogger(this.getClass)
def init(c:Config) = get(c)
def get(c: Config) = {
if (s3Client.isEmpty)
s3Client = createS3Client(c)
s3Client
}
def test(bkt: String) : Boolean =
Try {
s3Client.get.listObjects(bkt)
} match {
case Success(e) => true
case Failure(e) =>
log.error(e.toString)
false
}
private var s3Client: Option[AmazonS3] = None
private def sslNoVerifyConnFactory(): SSLConnectionSocketFactory = {
// No ssl verification
// from: http://literatejava.com/networks/ignore-ssl-certificate-errors-apache-httpclient-4-4/
// setup a trust strategy that allows all certificates
val sslContext = SSLContextBuilder.create()
.useProtocol("SSL")
.loadTrustMaterial(null, new TrustStrategy() {
def isTrusted(arg0: Array[X509Certificate], arg1: String) = true
})
.build()
val sslConFactory = new SSLConnectionSocketFactory(
sslContext,
new HostnameVerifier { def verify(hostname: String, session: SSLSession) = true })
return sslConFactory
}
private def createS3Client(c: Config): Option[AmazonS3] = {
if (c.fakeS3Latency > -1) None
else {
require(c.aidSkey._1 != "aid")
val awsCredentials = new BasicAWSCredentials(c.aidSkey._1, c.aidSkey._2)
// retry 10 times if run to completion is set
val clientConfig =
if (c.runToCompletion)
new ClientConfiguration().withMaxErrorRetry(10)
else
new ClientConfiguration().withMaxErrorRetry(0)
// No SSL-Verify
clientConfig.getApacheHttpClientConfig()
.setSslSocketFactory(sslNoVerifyConnFactory())
// S3 client with retries based on runToCompletion
val s3Client = AmazonS3ClientBuilder
.standard()
.withEndpointConfiguration(new EndpointConfiguration(c.endpoint, c.region))
.withCredentials(new AWSStaticCredentialsProvider(awsCredentials))
.withClientConfiguration(clientConfig)
.withPathStyleAccessEnabled(true)
.build()
Try {
s3Client.listObjects(c.bucketName)
} match {
case Success(e) => true
case Failure(e) =>
log.error("Problem with S3 configuration, unable to do a test list objects on bucket: " + c.bucketName)
log.error("Using AID = " + awsCredentials.getAWSAccessKeyId())
log.error("Using secret key = " + awsCredentials.getAWSSecretKey().charAt(1) + "***")
log.error("Using endpoint = " + c.endpoint)
log.error(e.toString)
System.exit(1)
false
}
Some(s3Client)
}
}
}
case class S3OpsFlush()
object S3Ops {
lazy val executors = Executors.newFixedThreadPool(MyConfig.maxThreads)
lazy val blockingEC = ExecutionContext.fromExecutor(executors)
val log = LoggerFactory.getLogger(this.getClass)
var config: Option[Config] = None
def init(c: Config): Boolean = {
if (config.isEmpty) { config = Some(c) } else require(config.get == c)
if (config.get.fakeS3Latency < 0)
GetS3Client.init(c) // initialize S3 if using real S3
true
}
// S3 client with retries disabled
private lazy val s3Client = GetS3Client.get(config.get)
def put(bucketName: String, objName: String) =
Try {
Future {
blocking {
Try {
val totalTime = {
val startTime = System.nanoTime / 1000000
val omd = new ObjectMetadata()
omd.setContentLength(config.get.objSize * 1024)
if (config.get.fakeS3Latency > -1)
Thread.sleep(config.get.fakeS3Latency) //fake s3
else
s3Client.get.putObject(bucketName, objName, byteStream(config.get.objSize * 1024), omd)
(System.nanoTime() / 1000000) - startTime
}
GoodStat(totalTime, totalTime)
} match {
case Success(v) => v
case Failure(e) =>
log.warn("PUT Failed - Bucket: " + bucketName + ", object: " + objName + ", " + e)
BadStat()
}
}
}(S3Ops.blockingEC)
} match {
case Success(v) => v
case Failure(e) =>
log.warn("PUT Execution Failed - Bucket: " + bucketName + ", object: " + objName + ", " + e)
Future.successful(BadStat())
}
def get(bucketName: String, objName: String) =
Try {
Future {
blocking {
Try {
var buffer: Array[Byte] = Array.ofDim(9126)
val startTime = System.nanoTime / 1000000
val getObjReq = new GetObjectRequest(bucketName, objName)
var totalBytesRead = 0
//if range read is used
val expectedBytes =
if (config.get.rangeReadStart > -1) {
getObjReq.setRange(config.get.rangeReadStart, config.get.rangeReadEnd)
config.get.rangeReadEnd - config.get.rangeReadStart + 1
} else
1024 * config.get.objSize
val rtnVal =
if (config.get.fakeS3Latency > -1) { //fake s3
Thread.sleep(config.get.fakeS3Latency)
val t = (System.nanoTime() / 1000000)
GoodStat(t - startTime, t - startTime)
} else { // real s3
val s3Obj = s3Client.get.getObject(getObjReq)
val stream = s3Obj.getObjectContent
val firstByte = stream.read()
var bytesRead = if (firstByte >=0 ) 1 else 0 // stream.read returns the byte, not the number of bytes read.
// time to first byte
val rt = (System.nanoTime() / 1000000)
while(bytesRead >= 0) {
totalBytesRead = bytesRead + totalBytesRead
bytesRead = stream.read(buffer)
}
val ct = (System.nanoTime() / 1000000)
stream.close()
s3Obj.close()
if (totalBytesRead != expectedBytes) {
log.error("unexpected object size read. Got: %d bytes, Expected %d bytes".format(totalBytesRead, expectedBytes))
BadStat() // return bad stat
} else
GoodStat(rt - startTime, ct - startTime) //(rspTime, totalTime)
}
rtnVal
} match {
case Success(v) => v
case Failure(e) =>
log.warn("GET Failed - Bucket: " + bucketName + ", object: " + objName + ", " + e)
BadStat()
}
}
}(S3Ops.blockingEC)
} match {
case Success(v) => v
case Failure(e) =>
log.warn("GET Execution Failed - Bucket: " + bucketName + ", object: " + objName + ", " + e)
Future.successful(BadStat())
}
def shutdown() = {
log.debug("S3Ops has shutdown")
S3Ops.executors.shutdown()
}
// this is our source of infinite bytes
val s3Buffer : Array[Byte] = (for (i <- 0 to 1023) yield 'b'.toByte).toArray
def byteStream(length: Long): InputStream = new InputStream {
require(length >= 0)
var currPos : Long = 0
def read(): Int = if (currPos < length) {
currPos += 1
S3Ops.s3Buffer((currPos % S3Ops.s3Buffer.size).toInt).toInt
} else -1
}
}
| vardhanv/cosbench_ng | common/src/main/scala/S3Actor.scala | Scala | mit | 8,559 |
package org.jetbrains.plugins.scala.worksheet.ui
import com.intellij.execution.impl.ConsoleViewImpl
import com.intellij.execution.ui.{ConsoleView, RunnerLayoutUi}
import com.intellij.openapi.project.{DumbAware, Project}
import com.intellij.openapi.wm.{ToolWindow, ToolWindowAnchor, ToolWindowFactory, ToolWindowManager}
import com.intellij.ui.content.Content
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
/**
* User: Dmitry.Naydanov
* Date: 25.05.18.
*/
class WorksheetToolWindowFactory extends ToolWindowFactory with DumbAware {
override def createToolWindowContent(project: Project, toolWindow: ToolWindow): Unit = {
}
override def init(window: ToolWindow): Unit = {
WorksheetToolWindowFactory.initToolWindow(window)
}
override def shouldBeAvailable(project: Project): Boolean = false
override def isDoNotActivateOnStart: Boolean = true
}
object WorksheetToolWindowFactory {
private val WORKSHEET_NAME = "Worksheet"
private val MY_ID = "WorksheetResultsToolWindow"
private def findToolWindow(project: Project)= {
Option(ToolWindowManager.getInstance(project).getToolWindow(MY_ID)) //TODO awful solution
}
private def initToolWindow(toolWindow: ToolWindow): ToolWindow = {
toolWindow.setIcon(Icons.WORKSHEET_LOGO)
toolWindow.setTitle("Worksheet Output")
toolWindow.setStripeTitle("Worksheet")
toolWindow
}
private def createOutputContent(file: ScalaFile): (Content, ConsoleView) = {
val project = file.getProject
val factory = RunnerLayoutUi.Factory.getInstance(project)
val layoutUi = factory.create("WorksheetConsolePrinter", WORKSHEET_NAME, WORKSHEET_NAME, project)
val cv = new ConsoleViewImpl(project, false)
val component = cv.getComponent //don't inline, it initializes cv's editor
(layoutUi.createContent(MY_ID, component, getDisplayName(file), Icons.WORKSHEET_LOGO, null), cv)
}
//todo
private def enableToolWindow(project: Project): Unit = {
val manager = ToolWindowManager.getInstance(project)
(manager.getToolWindow(MY_ID) match {
case tw: ToolWindow => tw
case _ => initToolWindow(manager.registerToolWindow(MY_ID, false, ToolWindowAnchor.BOTTOM))
}).activate(null)
}
private def disableToolWindow(project: Project): Unit = {
ToolWindowManager.getInstance(project).getToolWindow(MY_ID).hide(null)
ToolWindowManager.getInstance(project).unregisterToolWindow(MY_ID)
}
private def ensureRunning(project: Project, isRunning: Boolean) {
if (isRunning^findToolWindow(project).exists(_.isActive)) {
if (isRunning) enableToolWindow(project) else disableToolWindow(project)
}
}
private def getDisplayName(file: ScalaFile): String = file.getName
def disposeUI(content: Content, project: Project): Unit = {
findToolWindow(project) foreach {
toolWindow =>
val contentManager = toolWindow.getContentManager
contentManager.removeContent(content, true)
if (contentManager.getContents.isEmpty) disableToolWindow(project)
}
}
//todo Can we really get all content<->file managing to factory and cache?
def createOutputContent(file: ScalaFile, force: Boolean = true): Option[(Content, ConsoleView)] = {
val project = file.getProject
if (force) ensureRunning(project, isRunning = true)
findToolWindow(project).map {
toolWindow =>
val manager = toolWindow.getContentManager
val (content, cv) = createOutputContent(file)
manager addContent content
(content, cv)
}
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/worksheet/ui/WorksheetToolWindowFactory.scala | Scala | apache-2.0 | 3,624 |
package valfinding
class ClosureTest {
val fieldC = "captured field"
def useClosures(): Unit = {
val localValC = "Local val captured"
val shadowedInClosure = "This shouldn't be shown"
List("clParam1", "clParam2", "clParam3").map {closureParam /*{closure param decl}*/ =>
closureParam /*{closure param usage}*/
fieldC /*{captured field of enclosing class}*/
localValC /*{captured local variable of enclosing method}*/
val shadowedInClosure /*{local var of closure shadowing local var of enclosing method}*/ = "shadowed in closure"
shadowedInClosure
}
}
def localSimilar: Unit = {
val closureParam /*{local of another method named similarly to a local of closure}*/ = "SSS"
}
} | Kwestor/scala-ide | org.scala-ide.sdt.debug.tests/test-workspace/sfValFinding/src/Closures.scala | Scala | bsd-3-clause | 740 |
package module
import akka.cluster.VectorClock
import java.io.File
import java.nio.file.Files
import javafx.scene.{image => jfxsi}
import sun.audio.AudioPlayer
import scalafx.Includes._
import scalafx.beans.property.{ObjectProperty, ReadOnlyObjectWrapper, StringProperty}
import scalafx.collections.ObservableMap.Add
import scalafx.scene.image.Image
import scalafx.scene.media.{Media, MediaPlayer}
/**
* Created by yangwu on 4/4/15.
*/
sealed trait ClusterMessage
case class requestCS(ts: VectorClock) extends ClusterMessage{
val t = ts
}
case class Vote(song:String) extends ClusterMessage
case class release() extends ClusterMessage
case class SongModel() extends ClusterMessage {
private val DefaultImageURL = classOf[SongModel].getResource("/defaultAlbum.jpg").toString
private val DefaultImageCover = new Image(DefaultImageURL)
val album = new StringProperty(this, "album")
val artist = new StringProperty(this, "artist")
val title = new StringProperty(this, "title")
val year = new StringProperty(this, "year")
// NOTE: use of `javafx.scene.image.Image` instead of `scalafx.scene.image.Image`, this is required for binding in
// MetadataView to compile.
val albumCover = new ObjectProperty[jfxsi.Image](this, "albumCover")
private val _mediaPlayer = new ReadOnlyObjectWrapper[MediaPlayer](this, "mediaPlayer")
resetProperties()
def mediaPlayer = _mediaPlayer.readOnlyProperty
def url: String = if (mediaPlayer() != null) mediaPlayer().media.source else null
def url_=(url: String) {
if (mediaPlayer() != null) mediaPlayer().stop()
initializeMedia(url)
}
private def resetProperties() {
artist() = ""
album() = ""
title() = ""
year() = ""
albumCover() = DefaultImageCover
}
private def initializeMedia(url: String) {
resetProperties()
try {
val media = new Media(url) {
metadata.onChange((_, change) => {
change match {
case Add(key, added) => handleMetadata(key, added)
case _ =>
}
})
}
_mediaPlayer() = new MediaPlayer(media) {
// Handle errors during playback
onError = {
val errorMessage = media.error().getMessage
println("MediaPlayer Error: " + errorMessage)
}
}
} catch {
// Handle construction errors
case re: RuntimeException => println("Caught Exception: " + re.getMessage)
}
}
private def handleMetadata(key: String, value: AnyRef) {
key match {
case "album" => album() = value.toString
case "artist" => artist() = value.toString
case "title" => title() = value.toString
case "year" => year() = value.toString
case "image" => albumCover() = value.asInstanceOf[javafx.scene.image.Image]
case _ => println("Unhandled metadata key: " + key + ", value: " + value)
}
}
}
case class poll() extends ClusterMessage {
// empty
}
case class timeOut() extends ClusterMessage {
}
case class pollResponse(status : Boolean) extends ClusterMessage {
val res = status
}
case class Play() extends ClusterMessage{
var _musicName = ""
// getter
def musicName = _musicName
// setter
def musicName_= (value:String):Unit = _musicName = value
}
case class transferMusic() extends ClusterMessage {
}
case class startTime(time: Long, src:String) extends ClusterMessage {
val t= time
val name = src
}
object SongModel extends ClusterMessage {
private val DefaultImageURL = classOf[SongModel].getResource("/defaultAlbum.jpg").toString
private val DefaultImageCover = new Image(DefaultImageURL)
val album = new StringProperty(this, "album")
val artist = new StringProperty(this, "artist")
val title = new StringProperty(this, "title")
val year = new StringProperty(this, "year")
// NOTE: use of `javafx.scene.image.Image` instead of `scalafx.scene.image.Image`, this is required for binding in
// MetadataView to compile.
val albumCover = new ObjectProperty[jfxsi.Image](this, "albumCover")
private val _mediaPlayer = new ReadOnlyObjectWrapper[MediaPlayer](this, "mediaPlayer")
resetProperties()
def mediaPlayer = _mediaPlayer.readOnlyProperty
def url: String = if (mediaPlayer() != null) mediaPlayer().media.source else null
def url_=(url: String) {
if (mediaPlayer() != null) mediaPlayer().stop()
initializeMedia(url)
}
private def resetProperties() {
artist() = ""
album() = ""
title() = ""
year() = ""
albumCover() = DefaultImageCover
}
private def initializeMedia(url: String) {
resetProperties()
try {
val media = new Media(url) {
metadata.onChange((_, change) => {
change match {
case Add(key, added) => handleMetadata(key, added)
case _ =>
}
})
}
_mediaPlayer() = new MediaPlayer(media) {
// Handle errors during playback
onError = {
val errorMessage = media.error().getMessage
println("MediaPlayer Error: " + errorMessage)
}
}
} catch {
// Handle construction errors
case re: RuntimeException => println("Caught Exception: " + re.getMessage)
}
}
private def handleMetadata(key: String, value: AnyRef) {
key match {
case "album" => album() = value.toString
case "artist" => artist() = value.toString
case "title" => title() = value.toString
case "year" => year() = value.toString
case "image" => albumCover() = value.asInstanceOf[javafx.scene.image.Image]
case _ => println("Unhandled metadata key: " + key + ", value: " + value)
}
}
} | wy4515/echo | player/src/main/scala/model/SongModel.scala | Scala | mit | 5,645 |
package lila.user
import scala._
case class Profile(
country: Option[String] = None,
location: Option[String] = None,
bio: Option[String] = None,
firstName: Option[String] = None,
lastName: Option[String] = None) {
def nonEmptyRealName = List(ne(firstName), ne(lastName)).flatten match {
case Nil => none
case names => (names mkString " ").some
}
def countryInfo = country flatMap Countries.info
def nonEmptyLocation = ne(location)
def nonEmptyBio = ne(bio)
private def ne(str: Option[String]) = str filter (_.nonEmpty)
}
object Profile {
val default = Profile()
private[user] val profileBSONHandler = reactivemongo.bson.Macros.handler[Profile]
}
| Happy0/lila | modules/user/src/main/Profile.scala | Scala | mit | 703 |
package org.http4s
package parser
import cats.implicits._
import org.http4s.internal.parboiled2._
private[parser] abstract class Http4sHeaderParser[H <: Header](val input: ParserInput) extends Parser with AdditionalRules {
def entry: Rule1[H]
def parse: ParseResult[H] =
entry.run()(Parser.DeliveryScheme.Either)
.leftMap(e => ParseFailure("Invalid header", e.format(input)))
}
| ZizhengTai/http4s | core/src/main/scala/org/http4s/parser/Http4sHeaderParser.scala | Scala | apache-2.0 | 396 |
/*
* Copyright (C) 2017. RandomCoder <randomcoder@randomcoding.co.uk>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package uk.co.randomcoding.cucumber.generator.html
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Path}
import uk.co.randomcoding.cucumber.generator.gherkin.{Examples, Feature, ScenarioDesc, ScenarioOutline}
import uk.co.randomcoding.cucumber.generator.reader.FeatureReader
import uk.co.randomcoding.cucumber.generator.writer.writeHtml
import scala.collection.JavaConverters._
import scala.xml.NodeSeq
trait FeatureHtml {
def generateFeatures(featureFileDir: Path, baseOutputDir: Path, relativeTo: Path): Unit = {
val relativePath = if (featureFileDir == relativeTo) "" else featureFileDir.subpath(relativeTo.getNameCount, featureFileDir.getNameCount).toString
val targetDir = baseOutputDir.resolve(relativePath)
val dirContents = featureFileDir.toFile.listFiles().map(_.toPath)
dirContents.partition(Files.isDirectory(_)) match {
case (dirs, files) => {
writeFeatures(files.filter(_.toString.endsWith(".feature")), targetDir)
dirs.foreach(generateFeatures(_, baseOutputDir, relativeTo))
}
}
}
private[this] def writeFeatures(features: Seq[Path], outputDir: Path) = {
Files.createDirectories(outputDir)
features.foreach { featureFile =>
val html = FeatureHtml(FeatureReader.read(Files.readAllLines(featureFile, StandardCharsets.UTF_8).asScala.toList))
val targetFile = outputDir.resolve(featureFile.getFileName.toString + ".html")
writeHtml(html, targetFile.toFile)
}
}
}
object FeatureHtml {
def apply(feature: Feature): NodeSeq = {
<html lang="en">
<head>
<title>Feature: {feature.description}</title>
{metaTags}
{jquery}
{bootstrap}
{customCss}
</head>
<body>
<div class="container">
<div class="row">
<div class="col col-sm-10 col-sm-offset-1 feature">{ if (feature.tags.nonEmpty) {
<div class="row feature_tags" id="feature_tags">
<div class="col-sm-12">
{feature.tags.mkString(" ")}
</div>
</div>}}
<div class="row">
<div class="col-sm-12" id="feature_description">
<span class="feature_heading">Feature: </span><span class="feature_description">{feature.description}</span>
</div>
</div>
<div class="row">
<div class="col-sm-12" id="as_a">
<span class="feature_part_prefix">As a </span><span class="feature_part">{feature.asA}</span>
</div>
</div>
<div class="row">
<div class="col-sm-12" id="i_want">
<span class="feature_part_prefix">I want </span><span class="feature_part" >{feature.iWantTo}</span>
</div>
</div>
<div class="row">
<div class="col-sm-12 feature_part_prefix" id="in_order_to">
<span class="feature_part_prefix">In order to </span><span class="feature_part">{feature.inOrderTo}</span>
</div>
</div>
{ feature.scenarios.zipWithIndex.map { case (scenario, index) => scenarioHtml(scenario, s"scenario_$index") } }
</div>
</div>
</div>
</body>
</html>
}
private[this] def scenarioHtml(scenario: ScenarioDesc, elementId: String) = {
<div class="scenario">{ if (scenario.tags.nonEmpty) {
<div class="row" id="scenario_tags">
<div class="col-sm-12 scenario_tags">
{scenario.tags.mkString(" ")}
</div>
</div>}}
<div class="row">
<div class="col-sm-12" id="scenario_description">
<span class="scenario_heading">{scenario.identifier}: </span>
<span class="scenario_description">{scenario.description}</span>
</div>
</div>
{stepsHtml(scenario.givens, "given") ++
stepsHtml(scenario.whens, "when") ++
stepsHtml(scenario.thens, "then") ++
(scenario match {
case ScenarioOutline(_, _, _, _, _, examples) => examples.map(examplesHtml)
case _ => Nil
})
}
</div>
}
private[this] def stepsHtml(steps: Seq[String], gwtClass: String) = {
steps.flatMap { step =>
<div class="row">
<div class={s"col col-sm-12 $gwtClass"}>
{step}
</div>
</div>
}
}
private[this] def examplesHtml(examples: Examples) = {
<div class="examples">{ if (examples.tags.nonEmpty) {
<div class="row">
<div class="col-sm-12 examples_tags" id="example_tags">
{examples.tags.mkString(" ")}
</div>
</div>}}
<div class="row">
<div class="col col-sm-12 examples_heading">
Examples:
</div>
</div>
<div class="row">
<div class="col col-sm-8">
<table>
<tr>{ examples.headings.map(heading => <th>{heading}</th>) }</tr>
{ examples.examples.flatMap { example => <tr>{example.map(ex => <td>{ex}</td>) }</tr> } }
</table>
</div>
</div>
</div>
}
}
| randomcoder/gherkin-converter | src/main/scala/uk/co/randomcoding/cucumber/generator/html/FeatureHtml.scala | Scala | agpl-3.0 | 5,879 |
object Test {
import implicits.Not
class Foo
class Bar
implicit def foo: Foo = ???
implicitly[Foo]
implicitly[Not[Foo]] // error
implicitly[Not[Bar]]
}
| som-snytt/dotty | tests/neg/i5234a.scala | Scala | apache-2.0 | 167 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import slamdata.Predef._
trait HomomorphicFunction[-A, +B] { self =>
def arity: Int
def apply(args: List[A]): Option[B]
def andThen[C](f: B => C) = new HomomorphicFunction[A, C] {
def arity = self.arity
def apply(args: List[A]) = self.apply(args).map(f)
}
}
| drostron/quasar | frontend/src/main/scala/quasar/HomomorphicFunction.scala | Scala | apache-2.0 | 896 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.lang.System.currentTimeMillis
import java.util.{Collections, List => JList}
import java.util.{Date => JDate}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import java.util.concurrent.locks.ReentrantLock
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.concurrent.Future
import org.apache.mesos.Protos.{TaskInfo => MesosTaskInfo, _}
import org.apache.mesos.SchedulerDriver
import org.apache.spark.{SecurityManager, SparkContext, SparkException, TaskState}
import org.apache.spark.deploy.mesos.config._
import org.apache.spark.internal.config
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.shuffle.mesos.MesosExternalShuffleClient
import org.apache.spark.rpc.RpcEndpointAddress
import org.apache.spark.scheduler.{SlaveLost, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.util.Utils
/**
* A SchedulerBackend that runs tasks on Mesos, but uses "coarse-grained" tasks, where it holds
* onto each Mesos node for the duration of the Spark job instead of relinquishing cores whenever
* a task is done. It launches Spark tasks within the coarse-grained Mesos tasks using the
* CoarseGrainedSchedulerBackend mechanism. This class is useful for lower and more predictable
* latency.
*
* Unfortunately this has a bit of duplication from [[MesosFineGrainedSchedulerBackend]],
* but it seems hard to remove this.
*/
private[spark] class MesosCoarseGrainedSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
master: String,
securityManager: SecurityManager)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv)
with org.apache.mesos.Scheduler
with MesosSchedulerUtils {
// Blacklist a slave after this many failures
private val MAX_SLAVE_FAILURES = conf.getInt("spark.executor.slave.maxFailures", 5)
private val blacklistPeriod = conf.getTimeAsMs("spark.executor.slave.blacklistPeriod", "10m")
.ensuring(_ >= 0, "spark.executor.slave.blacklistPeriod must be >= 0")
private val maxCoresOption = conf.getOption("spark.cores.max").map(_.toInt)
private val executorCoresOption = conf.getOption("spark.executor.cores").map(_.toInt)
private val minCoresPerExecutor = executorCoresOption.getOrElse(1)
// Maximum number of cores to acquire
private val maxCores = {
val cores = maxCoresOption.getOrElse(Int.MaxValue)
// Set maxCores to a multiple of smallest executor we can launch
cores - (cores % minCoresPerExecutor)
}
private val useFetcherCache = conf.getBoolean("spark.mesos.fetcherCache.enable", false)
private val maxGpus = conf.getInt("spark.mesos.gpus.max", 0)
private val taskLabels = conf.get("spark.mesos.task.labels", "")
private[this] val shutdownTimeoutMS =
conf.getTimeAsMs("spark.mesos.coarse.shutdownTimeout", "10s")
.ensuring(_ >= 0, "spark.mesos.coarse.shutdownTimeout must be >= 0")
// Synchronization protected by stateLock
private[this] var stopCalled: Boolean = false
// If shuffle service is enabled, the Spark driver will register with the shuffle service.
// This is for cleaning up shuffle files reliably.
private val shuffleServiceEnabled = conf.getBoolean("spark.shuffle.service.enabled", false)
// Cores we have acquired with each Mesos task ID
private val coresByTaskId = new mutable.HashMap[String, Int]
private val gpusByTaskId = new mutable.HashMap[String, Int]
private var totalCoresAcquired = 0
private var totalGpusAcquired = 0
// SlaveID -> Slave
// This map accumulates entries for the duration of the job. Slaves are never deleted, because
// we need to maintain e.g. failure state and connection state.
private val slaves = new mutable.HashMap[String, Slave]
/**
* The total number of executors we aim to have. Undefined when not using dynamic allocation.
* Initially set to 0 when using dynamic allocation, the executor allocation manager will send
* the real initial limit later.
*/
private var executorLimitOption: Option[Int] = {
if (Utils.isDynamicAllocationEnabled(conf)) {
Some(0)
} else {
None
}
}
/**
* Return the current executor limit, which may be [[Int.MaxValue]]
* before properly initialized.
*/
private[mesos] def executorLimit: Int = executorLimitOption.getOrElse(Int.MaxValue)
// private lock object protecting mutable state above. Using the intrinsic lock
// may lead to deadlocks since the superclass might also try to lock
private val stateLock = new ReentrantLock
private val extraCoresPerExecutor = conf.getInt("spark.mesos.extra.cores", 0)
// Offer constraints
private val slaveOfferConstraints =
parseConstraintString(sc.conf.get("spark.mesos.constraints", ""))
// Reject offers with mismatched constraints in seconds
private val rejectOfferDurationForUnmetConstraints =
getRejectOfferDurationForUnmetConstraints(sc.conf)
// Reject offers when we reached the maximum number of cores for this framework
private val rejectOfferDurationForReachedMaxCores =
getRejectOfferDurationForReachedMaxCores(sc.conf)
// A client for talking to the external shuffle service
private val mesosExternalShuffleClient: Option[MesosExternalShuffleClient] = {
if (shuffleServiceEnabled) {
Some(getShuffleClient())
} else {
None
}
}
// This method is factored out for testability
protected def getShuffleClient(): MesosExternalShuffleClient = {
new MesosExternalShuffleClient(
SparkTransportConf.fromSparkConf(conf, "shuffle"),
securityManager,
securityManager.isAuthenticationEnabled(),
conf.get(config.SHUFFLE_REGISTRATION_TIMEOUT))
}
private var nextMesosTaskId = 0
@volatile var appId: String = _
private var schedulerDriver: SchedulerDriver = _
def newMesosTaskId(): String = {
val id = nextMesosTaskId
nextMesosTaskId += 1
id.toString
}
override def start() {
super.start()
val startedBefore = IdHelper.startedBefore.getAndSet(true)
val suffix = if (startedBefore) {
f"-${IdHelper.nextSCNumber.incrementAndGet()}%04d"
} else {
""
}
val driver = createSchedulerDriver(
master,
MesosCoarseGrainedSchedulerBackend.this,
sc.sparkUser,
sc.appName,
sc.conf,
sc.conf.getOption("spark.mesos.driver.webui.url").orElse(sc.ui.map(_.webUrl)),
None,
Some(sc.conf.get(DRIVER_FAILOVER_TIMEOUT)),
sc.conf.getOption("spark.mesos.driver.frameworkId").map(_ + suffix)
)
startScheduler(driver)
}
def createCommand(offer: Offer, numCores: Int, taskId: String): CommandInfo = {
val environment = Environment.newBuilder()
val extraClassPath = conf.getOption("spark.executor.extraClassPath")
extraClassPath.foreach { cp =>
environment.addVariables(
Environment.Variable.newBuilder().setName("SPARK_EXECUTOR_CLASSPATH").setValue(cp).build())
}
val extraJavaOpts = conf.get("spark.executor.extraJavaOptions", "")
// Set the environment variable through a command prefix
// to append to the existing value of the variable
val prefixEnv = conf.getOption("spark.executor.extraLibraryPath").map { p =>
Utils.libraryPathEnvPrefix(Seq(p))
}.getOrElse("")
environment.addVariables(
Environment.Variable.newBuilder()
.setName("SPARK_EXECUTOR_OPTS")
.setValue(extraJavaOpts)
.build())
sc.executorEnvs.foreach { case (key, value) =>
environment.addVariables(Environment.Variable.newBuilder()
.setName(key)
.setValue(value)
.build())
}
val command = CommandInfo.newBuilder()
.setEnvironment(environment)
val uri = conf.getOption("spark.executor.uri")
.orElse(Option(System.getenv("SPARK_EXECUTOR_URI")))
if (uri.isEmpty) {
val executorSparkHome = conf.getOption("spark.mesos.executor.home")
.orElse(sc.getSparkHome())
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val runScript = new File(executorSparkHome, "./bin/spark-class").getPath
command.setValue(
"%s \\"%s\\" org.apache.spark.executor.CoarseGrainedExecutorBackend"
.format(prefixEnv, runScript) +
s" --driver-url $driverURL" +
s" --executor-id $taskId" +
s" --hostname ${executorHostname(offer)}" +
s" --cores $numCores" +
s" --app-id $appId")
} else {
// Grab everything to the first '.'. We'll use that and '*' to
// glob the directory "correctly".
val basename = uri.get.split('/').last.split('.').head
command.setValue(
s"cd $basename*; $prefixEnv " +
"./bin/spark-class org.apache.spark.executor.CoarseGrainedExecutorBackend" +
s" --driver-url $driverURL" +
s" --executor-id $taskId" +
s" --hostname ${executorHostname(offer)}" +
s" --cores $numCores" +
s" --app-id $appId")
command.addUris(CommandInfo.URI.newBuilder().setValue(uri.get).setCache(useFetcherCache))
}
conf.getOption("spark.mesos.uris").foreach(setupUris(_, command, useFetcherCache))
command.build()
}
protected def driverURL: String = {
if (conf.contains("spark.testing")) {
"driverURL"
} else {
RpcEndpointAddress(
conf.get("spark.driver.host"),
conf.get("spark.driver.port").toInt,
CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString
}
}
override def offerRescinded(d: org.apache.mesos.SchedulerDriver, o: OfferID) {}
override def registered(
driver: org.apache.mesos.SchedulerDriver,
frameworkId: FrameworkID,
masterInfo: MasterInfo) {
this.appId = frameworkId.getValue
this.mesosExternalShuffleClient.foreach(_.init(appId))
this.schedulerDriver = driver
markRegistered()
}
override def sufficientResourcesRegistered(): Boolean = {
totalCoreCount.get >= maxCoresOption.getOrElse(0) * minRegisteredRatio
}
override def disconnected(d: org.apache.mesos.SchedulerDriver) {}
override def reregistered(d: org.apache.mesos.SchedulerDriver, masterInfo: MasterInfo) {}
/**
* Method called by Mesos to offer resources on slaves. We respond by launching an executor,
* unless we've already launched more than we wanted to.
*/
override def resourceOffers(d: org.apache.mesos.SchedulerDriver, offers: JList[Offer]) {
stateLock.synchronized {
if (stopCalled) {
logDebug("Ignoring offers during shutdown")
// Driver should simply return a stopped status on race
// condition between this.stop() and completing here
offers.asScala.map(_.getId).foreach(d.declineOffer)
return
}
logDebug(s"Received ${offers.size} resource offers.")
val (matchedOffers, unmatchedOffers) = offers.asScala.partition { offer =>
val offerAttributes = toAttributeMap(offer.getAttributesList)
matchesAttributeRequirements(slaveOfferConstraints, offerAttributes)
}
declineUnmatchedOffers(d, unmatchedOffers)
handleMatchedOffers(d, matchedOffers)
}
}
private def declineUnmatchedOffers(
driver: org.apache.mesos.SchedulerDriver, offers: mutable.Buffer[Offer]): Unit = {
offers.foreach { offer =>
declineOffer(
driver,
offer,
Some("unmet constraints"),
Some(rejectOfferDurationForUnmetConstraints))
}
}
/**
* Launches executors on accepted offers, and declines unused offers. Executors are launched
* round-robin on offers.
*
* @param driver SchedulerDriver
* @param offers Mesos offers that match attribute constraints
*/
private def handleMatchedOffers(
driver: org.apache.mesos.SchedulerDriver, offers: mutable.Buffer[Offer]): Unit = {
val tasks = buildMesosTasks(offers)
for (offer <- offers) {
val offerAttributes = toAttributeMap(offer.getAttributesList)
val offerMem = getResource(offer.getResourcesList, "mem")
val offerCpus = getResource(offer.getResourcesList, "cpus")
val offerPorts = getRangeResource(offer.getResourcesList, "ports")
val id = offer.getId.getValue
if (tasks.contains(offer.getId)) { // accept
val offerTasks = tasks(offer.getId)
logDebug(s"Accepting offer: $id with attributes: $offerAttributes " +
s"mem: $offerMem cpu: $offerCpus ports: $offerPorts." +
s" Launching ${offerTasks.size} Mesos tasks.")
for (task <- offerTasks) {
val taskId = task.getTaskId
val mem = getResource(task.getResourcesList, "mem")
val cpus = getResource(task.getResourcesList, "cpus")
val ports = getRangeResource(task.getResourcesList, "ports").mkString(",")
logDebug(s"Launching Mesos task: ${taskId.getValue} with mem: $mem cpu: $cpus" +
s" ports: $ports")
}
driver.launchTasks(
Collections.singleton(offer.getId),
offerTasks.asJava)
} else if (totalCoresAcquired >= maxCores) {
// Reject an offer for a configurable amount of time to avoid starving other frameworks
declineOffer(driver,
offer,
Some("reached spark.cores.max"),
Some(rejectOfferDurationForReachedMaxCores))
} else {
declineOffer(
driver,
offer)
}
}
}
/**
* Returns a map from OfferIDs to the tasks to launch on those offers. In order to maximize
* per-task memory and IO, tasks are round-robin assigned to offers.
*
* @param offers Mesos offers that match attribute constraints
* @return A map from OfferID to a list of Mesos tasks to launch on that offer
*/
private def buildMesosTasks(offers: mutable.Buffer[Offer]): Map[OfferID, List[MesosTaskInfo]] = {
// offerID -> tasks
val tasks = new mutable.HashMap[OfferID, List[MesosTaskInfo]].withDefaultValue(Nil)
// offerID -> resources
val remainingResources = mutable.Map(offers.map(offer =>
(offer.getId.getValue, offer.getResourcesList)): _*)
var launchTasks = true
// TODO(mgummelt): combine offers for a single slave
//
// round-robin create executors on the available offers
while (launchTasks) {
launchTasks = false
for (offer <- offers) {
val slaveId = offer.getSlaveId.getValue
val offerId = offer.getId.getValue
val resources = remainingResources(offerId)
if (canLaunchTask(slaveId, resources)) {
// Create a task
launchTasks = true
val taskId = newMesosTaskId()
val offerCPUs = getResource(resources, "cpus").toInt
val taskGPUs = Math.min(
Math.max(0, maxGpus - totalGpusAcquired), getResource(resources, "gpus").toInt)
val taskCPUs = executorCores(offerCPUs)
val taskMemory = executorMemory(sc)
slaves.getOrElseUpdate(slaveId, new Slave(offer.getHostname)).taskIDs.add(taskId)
val (resourcesLeft, resourcesToUse) =
partitionTaskResources(resources, taskCPUs, taskMemory, taskGPUs)
val taskBuilder = MesosTaskInfo.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
.setSlaveId(offer.getSlaveId)
.setCommand(createCommand(offer, taskCPUs + extraCoresPerExecutor, taskId))
.setName(s"${sc.appName} $taskId")
.setLabels(MesosProtoUtils.mesosLabels(taskLabels))
.addAllResources(resourcesToUse.asJava)
.setContainer(MesosSchedulerBackendUtil.containerInfo(sc.conf))
tasks(offer.getId) ::= taskBuilder.build()
remainingResources(offerId) = resourcesLeft.asJava
totalCoresAcquired += taskCPUs
coresByTaskId(taskId) = taskCPUs
if (taskGPUs > 0) {
totalGpusAcquired += taskGPUs
gpusByTaskId(taskId) = taskGPUs
}
}
}
}
tasks.toMap
}
/** Extracts task needed resources from a list of available resources. */
private def partitionTaskResources(
resources: JList[Resource],
taskCPUs: Int,
taskMemory: Int,
taskGPUs: Int)
: (List[Resource], List[Resource]) = {
// partition cpus & mem
val (afterCPUResources, cpuResourcesToUse) = partitionResources(resources, "cpus", taskCPUs)
val (afterMemResources, memResourcesToUse) =
partitionResources(afterCPUResources.asJava, "mem", taskMemory)
val (afterGPUResources, gpuResourcesToUse) =
partitionResources(afterMemResources.asJava, "gpus", taskGPUs)
// If user specifies port numbers in SparkConfig then consecutive tasks will not be launched
// on the same host. This essentially means one executor per host.
// TODO: handle network isolator case
val (nonPortResources, portResourcesToUse) =
partitionPortResources(nonZeroPortValuesFromConfig(sc.conf), afterGPUResources)
(nonPortResources,
cpuResourcesToUse ++ memResourcesToUse ++ portResourcesToUse ++ gpuResourcesToUse)
}
private def canLaunchTask(slaveId: String, resources: JList[Resource]): Boolean = {
val offerMem = getResource(resources, "mem")
val offerCPUs = getResource(resources, "cpus").toInt
val cpus = executorCores(offerCPUs)
val mem = executorMemory(sc)
val ports = getRangeResource(resources, "ports")
val meetsPortRequirements = checkPorts(sc.conf, ports)
cpus > 0 &&
cpus <= offerCPUs &&
cpus + totalCoresAcquired <= maxCores &&
mem <= offerMem &&
numExecutors() < executorLimit &&
(currentTimeMillis() -
slaves.get(slaveId).map(_.blacklistedAt)
.getOrElse(0L)) > blacklistPeriod &&
meetsPortRequirements
}
private def executorCores(offerCPUs: Int): Int = {
executorCoresOption.getOrElse(
math.min(offerCPUs, maxCores - totalCoresAcquired)
)
}
override def statusUpdate(d: org.apache.mesos.SchedulerDriver, status: TaskStatus) {
val taskId = status.getTaskId.getValue
val slaveId = status.getSlaveId.getValue
val state = mesosToTaskState(status.getState)
logInfo(s"Mesos task $taskId is now ${status.getState}")
stateLock.synchronized {
val slave = slaves(slaveId)
// If the shuffle service is enabled, have the driver register with each one of the
// shuffle services. This allows the shuffle services to clean up state associated with
// this application when the driver exits. There is currently not a great way to detect
// this through Mesos, since the shuffle services are set up independently.
if (state.equals(TaskState.RUNNING) &&
shuffleServiceEnabled &&
!slave.shuffleRegistered) {
assume(mesosExternalShuffleClient.isDefined,
"External shuffle client was not instantiated even though shuffle service is enabled.")
// TODO: Remove this and allow the MesosExternalShuffleService to detect
// framework termination when new Mesos Framework HTTP API is available.
val externalShufflePort = conf.getInt("spark.shuffle.service.port", 7337)
logDebug(s"Connecting to shuffle service on slave $slaveId, " +
s"host ${slave.hostname}, port $externalShufflePort for app ${conf.getAppId}")
mesosExternalShuffleClient.get
.registerDriverWithShuffleService(
slave.hostname,
externalShufflePort,
sc.conf.getTimeAsMs("spark.storage.blockManagerSlaveTimeoutMs",
s"${sc.conf.getTimeAsMs("spark.network.timeout", "120s")}ms"),
sc.conf.getTimeAsMs("spark.executor.heartbeatInterval", "10s"))
slave.shuffleRegistered = true
}
if (TaskState.isFinished(state)) {
// Remove the cores we have remembered for this task, if it's in the hashmap
for (cores <- coresByTaskId.get(taskId)) {
totalCoresAcquired -= cores
coresByTaskId -= taskId
}
// Also remove the gpus we have remembered for this task, if it's in the hashmap
for (gpus <- gpusByTaskId.get(taskId)) {
totalGpusAcquired -= gpus
gpusByTaskId -= taskId
}
// If it was a failure, mark the slave as failed for blacklisting purposes
if (TaskState.isFailed(state)) {
slave.taskFailures += 1
if (slave.taskFailures >= MAX_SLAVE_FAILURES) {
slave.blacklistedAt = currentTimeMillis()
slave.taskFailures = 0
val blacklistReadableDate = new JDate(slave.blacklistedAt)
logInfo(s"Blacklisting Mesos slave $slaveId due to " +
s"too many failures ($MAX_SLAVE_FAILURES) " +
s"at $blacklistReadableDate; " +
"is Spark installed on it?")
}
}
executorTerminated(d, slaveId, taskId, s"Executor finished with state $state")
// In case we'd rejected everything before but have now lost a node
d.reviveOffers()
}
}
}
override def error(d: org.apache.mesos.SchedulerDriver, message: String) {
logError(s"Mesos error: $message")
scheduler.error(message)
}
override def stop() {
// Make sure we're not launching tasks during shutdown
stateLock.synchronized {
if (stopCalled) {
logWarning("Stop called multiple times, ignoring")
return
}
stopCalled = true
super.stop()
}
// Wait for executors to report done, or else mesosDriver.stop() will forcefully kill them.
// See SPARK-12330
val startTime = System.nanoTime()
// slaveIdsWithExecutors has no memory barrier, so this is eventually consistent
while (numExecutors() > 0 &&
System.nanoTime() - startTime < shutdownTimeoutMS * 1000L * 1000L) {
Thread.sleep(100)
}
if (numExecutors() > 0) {
logWarning(s"Timed out waiting for ${numExecutors()} remaining executors "
+ s"to terminate within $shutdownTimeoutMS ms. This may leave temporary files "
+ "on the mesos nodes.")
}
// Close the mesos external shuffle client if used
mesosExternalShuffleClient.foreach(_.close())
if (schedulerDriver != null) {
schedulerDriver.stop()
}
}
override def frameworkMessage(
d: org.apache.mesos.SchedulerDriver, e: ExecutorID, s: SlaveID, b: Array[Byte]) {}
/**
* Called when a slave is lost or a Mesos task finished. Updates local view on
* what tasks are running. It also notifies the driver that an executor was removed.
*/
private def executorTerminated(
d: org.apache.mesos.SchedulerDriver,
slaveId: String,
taskId: String,
reason: String): Unit = {
stateLock.synchronized {
// Do not call removeExecutor() after this scheduler backend was stopped because
// removeExecutor() internally will send a message to the driver endpoint but
// the driver endpoint is not available now, otherwise an exception will be thrown.
if (!stopCalled) {
removeExecutor(taskId, SlaveLost(reason))
}
slaves(slaveId).taskIDs.remove(taskId)
}
}
override def slaveLost(d: org.apache.mesos.SchedulerDriver, slaveId: SlaveID): Unit = {
logInfo(s"Mesos slave lost: ${slaveId.getValue}")
}
override def executorLost(
d: org.apache.mesos.SchedulerDriver, e: ExecutorID, s: SlaveID, status: Int): Unit = {
logInfo("Mesos executor lost: %s".format(e.getValue))
}
override def applicationId(): String =
Option(appId).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
override def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = Future.successful {
// We don't truly know if we can fulfill the full amount of executors
// since at coarse grain it depends on the amount of slaves available.
logInfo("Capping the total amount of executors to " + requestedTotal)
executorLimitOption = Some(requestedTotal)
true
}
override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = Future.successful {
if (schedulerDriver == null) {
logWarning("Asked to kill executors before the Mesos driver was started.")
false
} else {
for (executorId <- executorIds) {
val taskId = TaskID.newBuilder().setValue(executorId).build()
schedulerDriver.killTask(taskId)
}
// no need to adjust `executorLimitOption` since the AllocationManager already communicated
// the desired limit through a call to `doRequestTotalExecutors`.
// See [[o.a.s.scheduler.cluster.CoarseGrainedSchedulerBackend.killExecutors]]
true
}
}
private def numExecutors(): Int = {
slaves.values.map(_.taskIDs.size).sum
}
private def executorHostname(offer: Offer): String = {
if (sc.conf.getOption("spark.mesos.network.name").isDefined) {
// The agent's IP is not visible in a CNI container, so we bind to 0.0.0.0
"0.0.0.0"
} else {
offer.getHostname
}
}
}
private class Slave(val hostname: String) {
val taskIDs = new mutable.HashSet[String]()
var taskFailures = 0
var blacklistedAt = 0L
var shuffleRegistered = false
}
object IdHelper {
// Use atomic values since Spark contexts can be initialized in parallel
private[mesos] val nextSCNumber = new AtomicLong(0)
private[mesos] val startedBefore = new AtomicBoolean(false)
}
| UndeadBaneGitHub/spark | resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala | Scala | apache-2.0 | 27,068 |
// This one blows up with a huge type in Scala 2.
// Reported by Jon Pretty in his talk on Scala type inference.
object Test {
val x = List(List, Vector)
val y: List[scala.collection.generic.SeqFactory[_]] = x
}
| lampepfl/dotty | tests/pos-special/jon.scala | Scala | apache-2.0 | 218 |
/**
* Copyright 2015 Frank Austin Nothaft
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fnothaft.gnocchi
import org.bdgenomics.utils.misc.SparkFunSuite
trait GnocchiFunSuite extends SparkFunSuite {
override val appName: String = "gnocchi"
override val properties: Map[String, String] = Map(("spark.serializer", "org.apache.spark.serializer.KryoSerializer"),
("spark.kryo.registrator", "org.bdgenomics.adam.serialization.ADAMKryoRegistrator"),
("spark.kryo.referenceTracking", "true"))
}
| bigdatagenomics/gnocchi | GnocchiFunSuite.scala | Scala | apache-2.0 | 1,031 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.carbondata.spark.testsuite.dataload
import java.io.File
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.CarbonHiveContext._
import org.apache.spark.sql.common.util.QueryTest
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.scalatest.BeforeAndAfterAll
/**
* Test Class for data loading with hive syntax and old syntax
*
*/
class TestLoadDataWithHiveSyntax extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop table if exists escapechar1")
sql("drop table if exists escapechar2")
sql("drop table if exists escapechar3")
sql("drop table if exists specialcharacter1")
sql("drop table if exists specialcharacter2")
sql("drop table if exists collessthanschema")
sql("drop table if exists decimalarray")
sql("drop table if exists decimalstruct")
sql("drop table if exists carbontable")
sql("drop table if exists hivetable")
sql("drop table if exists testtable")
sql("drop table if exists testhivetable")
sql("drop table if exists testtable1")
sql("drop table if exists testhivetable1")
sql("drop table if exists complexcarbontable")
sql("drop table if exists complex_t3")
sql("drop table if exists complex_hive_t3")
sql("drop table if exists header_test")
sql("drop table if exists duplicateColTest")
sql("drop table if exists mixed_header_test")
sql("drop table if exists primitivecarbontable")
sql("drop table if exists UPPERCASEcube")
sql("drop table if exists lowercaseCUBE")
sql("drop table if exists carbontable1")
sql("drop table if exists hivetable1")
sql("drop table if exists comment_test")
sql("drop table if exists smallinttable")
sql("drop table if exists smallinthivetable")
sql(
"CREATE table carbontable (empno int, empname String, designation String, doj String, " +
"workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, " +
"projectcode int, projectjoindate String, projectenddate String, attendance int," +
"utilization int,salary int) STORED BY 'org.apache.carbondata.format'"
)
sql(
"create table hivetable(empno int, empname String, designation string, doj String, " +
"workgroupcategory int, workgroupcategoryname String,deptno int, deptname String, " +
"projectcode int, projectjoindate String,projectenddate String, attendance String," +
"utilization String,salary String)row format delimited fields terminated by ','"
)
}
test("create table with smallint type and query smallint table") {
sql(
"create table smallinttable(empno smallint, empname String, designation string, " +
"doj String, workgroupcategory int, workgroupcategoryname String,deptno int, " +
"deptname String, projectcode int, projectjoindate String,projectenddate String, " +
"attendance String, utilization String,salary String)" +
"STORED BY 'org.apache.carbondata.format'"
)
sql(
"create table smallinthivetable(empno smallint, empname String, designation string, " +
"doj String, workgroupcategory int, workgroupcategoryname String,deptno int, " +
"deptname String, projectcode int, projectjoindate String,projectenddate String, " +
"attendance String, utilization String,salary String)" +
"row format delimited fields terminated by ','"
)
sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO table smallinttable " +
"OPTIONS('USE_KETTLE'='false')")
sql("LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' overwrite " +
"INTO table smallinthivetable")
checkAnswer(
sql("select empno from smallinttable"),
sql("select empno from smallinthivetable")
)
sql("drop table if exists smallinttable")
sql("drop table if exists smallinthivetable")
}
test("test data loading and validate query output") {
//Create test cube and hive table
sql(
"CREATE table testtable (empno string, empname String, designation String, doj String, " +
"workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
"projectcode string, projectjoindate String, projectenddate String,attendance double," +
"utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
"workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')"
)
sql(
"create table testhivetable(empno string, empname String, designation string, doj String, " +
"workgroupcategory string, workgroupcategoryname String,deptno string, deptname String, " +
"projectcode string, projectjoindate String,projectenddate String, attendance double," +
"utilization double,salary double)row format delimited fields terminated by ','"
)
//load data into test cube and hive table and validate query result
sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO table testtable")
sql(
"LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' overwrite INTO table " +
"testhivetable"
)
checkAnswer(sql("select * from testtable"), sql("select * from testhivetable"))
//load data incrementally and validate query result
sql(
"LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE testtable OPTIONS" +
"('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
)
sql(
"LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' INTO table testhivetable"
)
checkAnswer(sql("select * from testtable"), sql("select * from testhivetable"))
//drop test cube and table
sql("drop table if exists testtable")
sql("drop table if exists testhivetable")
}
/**
* TODO: temporarily changing cube names to different names,
* however deletion and creation of cube with same name
*/
test("test data loading with different case file header and validate query output") {
//Create test cube and hive table
sql(
"CREATE table testtable1 (empno string, empname String, designation String, doj String, " +
"workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
"projectcode string, projectjoindate String, projectenddate String,attendance double," +
"utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
"workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')"
)
sql(
"create table testhivetable1(empno string, empname String, designation string, doj String, " +
"workgroupcategory string, workgroupcategoryname String,deptno string, deptname String, " +
"projectcode string, projectjoindate String,projectenddate String, attendance double," +
"utilization double,salary double)row format delimited fields terminated by ','"
)
//load data into test cube and hive table and validate query result
sql(
"LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' INTO table testtable1 " +
"options('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='EMPno, empname,designation,doj," +
"workgroupcategory,workgroupcategoryname, deptno,deptname,projectcode,projectjoindate," +
"projectenddate, attendance, utilization,SALARY')"
)
sql(
"LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' overwrite INTO table " +
"testhivetable1"
)
checkAnswer(sql("select * from testtable1"), sql("select * from testhivetable1"))
//drop test cube and table
sql("drop table if exists testtable1")
sql("drop table if exists testhivetable1")
}
test("test hive table data loading") {
sql(
"LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' overwrite INTO table " +
"hivetable"
)
sql("LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' INTO table hivetable")
}
test("test carbon table data loading using old syntax") {
sql(
"LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE carbontable OPTIONS" +
"('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
)
}
test("test carbon table data loading using new syntax compatible with hive") {
sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO table carbontable")
sql(
"LOAD DATA local inpath './src/test/resources/data.csv' INTO table carbontable options" +
"('DELIMITER'=',', 'QUOTECHAR'='\"')"
)
}
test("test carbon table data loading using new syntax with overwrite option compatible with hive")
{
try {
sql("LOAD DATA local inpath './src/test/resources/data.csv' overwrite INTO table carbontable")
} catch {
case e: Throwable => {
assert(e.getMessage
.equals("Overwrite is not supported for carbon table with default.carbontable")
)
}
}
}
test("complex types data loading") {
sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
"ROMSize string, purchasedate string, mobile struct<imei:string, imsi:string>," +
"MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
"ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
"proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
"double,contractNumber double) " +
"STORED BY 'org.apache.carbondata.format' " +
"TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId')"
)
sql(
"LOAD DATA local inpath './src/test/resources/complexdata.csv' INTO table " +
"complexcarbontable " +
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
"ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
)
sql("drop table if exists complexcarbontable")
}
test(
"complex types data loading with more unused columns and different order of complex columns " +
"in csv and create table"
) {
sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
"mobile struct<imei:string, imsi:string>, ROMSize string, purchasedate string," +
"MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
"ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
"proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
"double,contractNumber double) " +
"STORED BY 'org.apache.carbondata.format' " +
"TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId','DICTIONARY_EXCLUDE'='channelsId')"
)
sql(
"LOAD DATA local inpath './src/test/resources/complextypediffentcolheaderorder.csv' INTO " +
"table complexcarbontable " +
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
"ROMSize,purchasedate,MAC,abc,mobile,locationinfo,proddate,gamePointId,contractNumber'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
)
sql("select count(*) from complexcarbontable")
sql("drop table if exists complexcarbontable")
}
test("test carbon table data loading with csv file Header in caps") {
sql("drop table if exists header_test")
sql(
"create table header_test(empno int, empname String, designation string, doj String, " +
"workgroupcategory int, workgroupcategoryname String,deptno int, deptname String, " +
"projectcode int, projectjoindate String,projectenddate String, attendance String," +
"utilization String,salary String) STORED BY 'org.apache.carbondata.format'"
)
val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
.getCanonicalPath
val csvFilePath = currentDirectory + "/src/test/resources/data_withCAPSHeader.csv"
sql("LOAD DATA local inpath '" + csvFilePath + "' INTO table header_test OPTIONS " +
"('DELIMITER'=',', 'QUOTECHAR'='\"')");
checkAnswer(sql("select empno from header_test"),
Seq(Row(11), Row(12))
)
}
test("test duplicate column validation") {
try {
sql("create table duplicateColTest(col1 string, Col1 string)")
}
catch {
case e: Exception => {
assert(e.getMessage.contains("Duplicate column name"))
}
}
}
test(
"test carbon table data loading with csv file Header in Mixed Case and create table columns " +
"in mixed case"
) {
sql("drop table if exists mixed_header_test")
sql(
"create table mixed_header_test(empno int, empname String, Designation string, doj String, " +
"Workgroupcategory int, workgroupcategoryname String,deptno int, deptname String, " +
"projectcode int, projectjoindate String,projectenddate String, attendance String," +
"utilization String,salary String) STORED BY 'org.apache.carbondata.format'"
)
val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
.getCanonicalPath
val csvFilePath = currentDirectory + "/src/test/resources/data_withMixedHeader.csv"
sql("LOAD DATA local inpath '" + csvFilePath + "' INTO table mixed_header_test OPTIONS " +
"('DELIMITER'=',', 'QUOTECHAR'='\"')");
checkAnswer(sql("select empno from mixed_header_test"),
Seq(Row(11), Row(12))
)
}
test("complex types data loading with hive column having more than required column values") {
sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
"ROMSize string, purchasedate string, mobile struct<imei:string, imsi:string>," +
"MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
"ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
"proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
"double,contractNumber double) " +
"STORED BY 'org.apache.carbondata.format' " +
"TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId')"
)
sql(
"LOAD DATA local inpath './src/test/resources/complexdatastructextra.csv' INTO table " +
"complexcarbontable " +
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
"ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
)
sql("drop table if exists complexcarbontable")
}
test("complex types & no dictionary columns data loading") {
sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
"ROMSize string, purchasedate string, mobile struct<imei:string, imsi:string>," +
"MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
"ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
"proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
"double,contractNumber double) " +
"STORED BY 'org.apache.carbondata.format' " +
"TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId', 'DICTIONARY_EXCLUDE'='ROMSize," +
"purchasedate')"
)
sql(
"LOAD DATA local inpath './src/test/resources/complexdata.csv' INTO table " +
"complexcarbontable " +
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
"ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
);
sql("drop table if exists complexcarbontable")
}
test("array<string> and string datatype for same column is not working properly") {
sql("create table complexcarbontable(deviceInformationId int, MAC array<string>, channelsId string, "+
"ROMSize string, purchasedate string, gamePointId double,contractNumber double) STORED BY 'org.apache.carbondata.format' "+
"TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId')")
sql("LOAD DATA local inpath './src/test/resources/complexdatareordered.csv' INTO table complexcarbontable "+
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,MAC,channelsId,ROMSize,purchasedate,gamePointId,contractNumber',"+
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')")
sql("drop table if exists complexcarbontable")
sql("create table primitivecarbontable(deviceInformationId int, MAC string, channelsId string, "+
"ROMSize string, purchasedate string, gamePointId double,contractNumber double) STORED BY 'org.apache.carbondata.format' "+
"TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId')")
sql("LOAD DATA local inpath './src/test/resources/complexdatareordered.csv' INTO table primitivecarbontable "+
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,MAC,channelsId,ROMSize,purchasedate,gamePointId,contractNumber',"+
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')")
sql("drop table if exists primitivecarbontable")
}
test(
"test carbon table data loading when table name is in different case with create table, for " +
"UpperCase"
) {
sql("create table UPPERCASEcube(empno Int, empname String, designation String, " +
"doj String, workgroupcategory Int, workgroupcategoryname String, deptno Int, " +
"deptname String, projectcode Int, projectjoindate String, projectenddate String, " +
"attendance Int,utilization Double,salary Double) STORED BY 'org.apache.carbondata.format'"
)
sql(
"LOAD DATA local inpath './src/test/resources/data.csv' INTO table uppercasecube OPTIONS" +
"('DELIMITER'=',', 'QUOTECHAR'='\"')"
)
sql("drop table if exists UpperCaseCube")
}
test(
"test carbon table data loading when table name is in different case with create table ,for " +
"LowerCase"
) {
sql("create table lowercaseCUBE(empno Int, empname String, designation String, " +
"doj String, workgroupcategory Int, workgroupcategoryname String, deptno Int, " +
"deptname String, projectcode Int, projectjoindate String, projectenddate String, " +
"attendance Int,utilization Double,salary Double) STORED BY 'org.apache.carbondata.format'"
)
sql(
"LOAD DATA local inpath './src/test/resources/data.csv' INTO table LOWERCASECUBE OPTIONS" +
"('DELIMITER'=',', 'QUOTECHAR'='\"')"
)
sql("drop table if exists LowErcasEcube")
}
test("test carbon table data loading using escape char 1") {
sql("DROP TABLE IF EXISTS escapechar1")
sql(
"""
CREATE TABLE IF NOT EXISTS escapechar1
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'org.apache.carbondata.format'
"""
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(
s"""
LOAD DATA LOCAL INPATH './src/test/resources/datawithbackslash.csv' into table escapechar1
OPTIONS('ESCAPECHAR'='@')
"""
)
checkAnswer(sql("select count(*) from escapechar1"), Seq(Row(10)))
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
sql("DROP TABLE IF EXISTS escapechar1")
}
test("test carbon table data loading using escape char 2") {
sql("DROP TABLE IF EXISTS escapechar2")
sql(
"""
CREATE TABLE escapechar2(imei string,specialchar string)
STORED BY 'org.apache.carbondata.format'
"""
)
sql(
"""
LOAD DATA LOCAL INPATH './src/test/resources/datawithescapecharacter.csv' into table escapechar2
options ('DELIMITER'=',', 'QUOTECHAR'='"','ESCAPECHAR'='\')
"""
)
checkAnswer(sql("select count(*) from escapechar2"), Seq(Row(21)))
checkAnswer(sql("select specialchar from escapechar2 where imei = '1AA44'"), Seq(Row("escapeesc")))
sql("DROP TABLE IF EXISTS escapechar2")
}
test("test carbon table data loading using escape char 3") {
sql("DROP TABLE IF EXISTS escapechar3")
sql(
"""
CREATE TABLE escapechar3(imei string,specialchar string)
STORED BY 'org.apache.carbondata.format'
"""
)
sql(
"""
LOAD DATA LOCAL INPATH './src/test/resources/datawithescapecharacter.csv' into table escapechar3
options ('DELIMITER'=',', 'QUOTECHAR'='"','ESCAPECHAR'='@')
"""
)
checkAnswer(sql("select count(*) from escapechar3"), Seq(Row(21)))
checkAnswer(sql("select specialchar from escapechar3 where imei in ('1232','12323')"), Seq(Row
("ayush@b.com"), Row("ayushb.com")
)
)
sql("DROP TABLE IF EXISTS escapechar3")
}
test("test carbon table data loading with special character 1") {
sql("DROP TABLE IF EXISTS specialcharacter1")
sql(
"""
CREATE TABLE specialcharacter1(imei string,specialchar string)
STORED BY 'org.apache.carbondata.format'
"""
)
sql(
"""
LOAD DATA LOCAL INPATH './src/test/resources/datawithspecialcharacter.csv' into table specialcharacter1
options ('DELIMITER'=',', 'QUOTECHAR'='"')
"""
)
checkAnswer(sql("select count(*) from specialcharacter1"), Seq(Row(37)))
checkAnswer(sql("select specialchar from specialcharacter1 where imei='1AA36'"), Seq(Row("\"i\"")))
sql("DROP TABLE IF EXISTS specialcharacter1")
}
test("test carbon table data loading with special character 2") {
sql("DROP TABLE IF EXISTS specialcharacter2")
sql(
"""
CREATE table specialcharacter2(customer_id int, 124_string_level_province String, date_level String,
Time_level String, lname String, fname String, mi String, address1 String, address2
String, address3 String, address4 String, city String, country String, phone1 String,
phone2 String, marital_status String, yearly_income String, gender String, education
String, member_card String, occupation String, houseowner String, fullname String,
numeric_level double, account_num double, customer_region_id int, total_children int,
num_children_at_home int, num_cars_owned int)
STORED BY 'org.apache.carbondata.format'
"""
)
sql(
"""
LOAD DATA LOCAL INPATH './src/test/resources/datawithcomplexspecialchar.csv' into
table specialcharacter2 options ('DELIMITER'=',', 'QUOTECHAR'='"','ESCAPECHAR'='"')
"""
)
checkAnswer(sql("select count(*) from specialcharacter2"), Seq(Row(150)))
checkAnswer(sql("select 124_string_level_province from specialcharacter2 where customer_id=103"),
Seq(Row("\"state province # 124\""))
)
sql("DROP TABLE IF EXISTS specialcharacter2")
}
test("test data which contain column less than schema"){
sql("DROP TABLE IF EXISTS collessthanschema")
sql(
"""
CREATE TABLE IF NOT EXISTS collessthanschema
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'org.apache.carbondata.format'
""")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(s"""
LOAD DATA LOCAL INPATH './src/test/resources/lessthandatacolumndata.csv' into table collessthanschema
""")
checkAnswer(sql("select count(*) from collessthanschema"),Seq(Row(10)))
sql("DROP TABLE IF EXISTS collessthanschema")
}
test("test data which contain column with decimal data type in array."){
sql("DROP TABLE IF EXISTS decimalarray")
sql(
"""
CREATE TABLE IF NOT EXISTS decimalarray
(ID decimal(5,5), date Timestamp, country String,
name String, phonetype String, serialname String, salary Int, complex
array<decimal(4,2)>)
STORED BY 'org.apache.carbondata.format'
"""
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(s"""
LOAD DATA LOCAL INPATH './src/test/resources/complexTypeDecimal.csv' into table decimalarray
""")
checkAnswer(sql("select count(*) from decimalarray"),Seq(Row(8)))
sql("DROP TABLE IF EXISTS decimalarray")
}
test("test data which contain column with decimal data type in struct."){
sql("DROP TABLE IF EXISTS decimalstruct")
sql(
"""
CREATE TABLE IF NOT EXISTS decimalstruct
(ID decimal(5,5), date Timestamp, country String,
name String, phonetype String, serialname String, salary Int, complex
struct<a:decimal(4,2)>)
STORED BY 'org.apache.carbondata.format'
"""
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(s"""
LOAD DATA LOCAL INPATH './src/test/resources/complexTypeDecimal.csv' into table decimalstruct
""")
checkAnswer(sql("select count(*) from decimalstruct"),Seq(Row(8)))
sql("DROP TABLE IF EXISTS decimalstruct")
}
test("test data which contain column with decimal data type in array of struct."){
sql("DROP TABLE IF EXISTS complex_t3")
sql("DROP TABLE IF EXISTS complex_hive_t3")
sql(
"""
CREATE TABLE complex_t3
(ID decimal, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int, complex
array<struct<a:decimal(4,2),str:string>>)
STORED BY 'org.apache.carbondata.format'
"""
)
sql(
"""
CREATE TABLE complex_hive_t3
(ID decimal, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int, complex
array<struct<a:decimal(4,2),str:string>>)
row format delimited fields terminated by ','
"""
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(s"""
LOAD DATA LOCAL INPATH './src/test/resources/complexTypeDecimalNested.csv' into table complex_t3
""")
sql(s"""
LOAD DATA LOCAL INPATH './src/test/resources/complexTypeDecimalNestedHive.csv' into table complex_hive_t3
""")
checkAnswer(sql("select count(*) from complex_t3"),sql("select count(*) from complex_hive_t3"))
checkAnswer(sql("select id from complex_t3 where salary = 15000"),sql("select id from complex_hive_t3 where salary = 15000"))
}
test("test data loading when delimiter is '|' and data with header") {
sql(
"CREATE table carbontable1 (empno string, empname String, designation String, doj String, " +
"workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
"projectcode string, projectjoindate String, projectenddate String,attendance double," +
"utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
"workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')"
)
sql(
"create table hivetable1 (empno string, empname String, designation string, doj String, " +
"workgroupcategory string, workgroupcategoryname String,deptno string, deptname String, " +
"projectcode string, projectjoindate String,projectenddate String, attendance double," +
"utilization double,salary double)row format delimited fields terminated by ','"
)
sql(
"LOAD DATA local inpath './src/test/resources/datadelimiter.csv' INTO TABLE carbontable1 OPTIONS" +
"('DELIMITER'= '|', 'QUOTECHAR'= '\"')"
)
sql("LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' INTO table hivetable1")
checkAnswer(sql("select * from carbontable1"), sql("select * from hivetable1"))
}
test("test data loading with comment option") {
sql("drop table if exists comment_test")
sql(
"create table comment_test(imei string, age int, task bigint, num double, level decimal(10," +
"3), productdate timestamp, mark int, name string) STORED BY 'org.apache.carbondata.format'"
)
sql(
"LOAD DATA local inpath './src/test/resources/comment.csv' INTO TABLE comment_test " +
"options('DELIMITER' = ',', 'QUOTECHAR' = '.', 'COMMENTCHAR' = '?','FILEHEADER'='imei,age,task,num,level,productdate,mark,name')"
)
checkAnswer(sql("select imei from comment_test"),Seq(Row("\".carbon"),Row("#?carbon"), Row(""),
Row("~carbon,")))
}
override def afterAll {
sql("drop table if exists escapechar1")
sql("drop table if exists escapechar2")
sql("drop table if exists escapechar3")
sql("drop table if exists specialcharacter1")
sql("drop table if exists specialcharacter2")
sql("drop table if exists collessthanschema")
sql("drop table if exists decimalarray")
sql("drop table if exists decimalstruct")
sql("drop table if exists carbontable")
sql("drop table if exists hivetable")
sql("drop table if exists testtable")
sql("drop table if exists testhivetable")
sql("drop table if exists testtable1")
sql("drop table if exists testhivetable1")
sql("drop table if exists complexcarbontable")
sql("drop table if exists complex_t3")
sql("drop table if exists complex_hive_t3")
sql("drop table if exists header_test")
sql("drop table if exists duplicateColTest")
sql("drop table if exists mixed_header_test")
sql("drop table if exists primitivecarbontable")
sql("drop table if exists UPPERCASEcube")
sql("drop table if exists lowercaseCUBE")
sql("drop table if exists carbontable1")
sql("drop table if exists hivetable1")
sql("drop table if exists comment_test")
}
}
| ashokblend/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala | Scala | apache-2.0 | 31,619 |
package org.salgo.sorting
import scala.annotation.tailrec
import scala.reflect.ClassTag
object BubbleSort extends GeneralFunctionalSortingAlgorithm {
override def sort[T <: Any : ClassTag](seq: Seq[T])(implicit ev: (T) => Ordered[T]): Seq[T] = {
this.sort(seq, Nil, Nil, continue = false)
}
@tailrec
private def sort[T <: Any : ClassTag](seq: Seq[T], acc: Seq[T], finalAcc: Seq[T], continue: Boolean)(implicit ev: T => Ordered[T]) : Seq[T] = seq match {
case Nil => finalAcc
case (h :: Nil) if !continue => acc ++ (h +: finalAcc)
case (h :: Nil) if continue => this.sort(acc, Nil, h +: finalAcc, continue = false)
case (h1 :: h2 :: t) if h1 > h2 => this.sort(h1 :: t, acc :+ h2, finalAcc, continue = true)
case (h1 :: h2 :: t) => this.sort(h2 :: t, acc :+ h1, finalAcc, continue)
}
}
| ascensio/salgo | src/org.salgo/sorting/BubbleSort.scala | Scala | apache-2.0 | 822 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package patterns
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.expressions.PostfixExpr
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
object Guard extends Guard {
override protected val postfixExpr = PostfixExpr
}
trait Guard {
protected val postfixExpr: PostfixExpr
def parse(builder: ScalaPsiBuilder): Boolean = parse(builder, noIf = false) //deprecated if true
def parse(builder: ScalaPsiBuilder, noIf: Boolean): Boolean = {
val guardMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.kIF =>
builder.advanceLexer //Ate if
case _ =>
if (!noIf) {
guardMarker.drop()
return false
}
}
if (!postfixExpr.parse(builder)) {
if (noIf) {
guardMarker.drop()
return false
}
builder error ErrMsg("wrong.postfix.expression")
}
guardMarker.done(ScalaElementTypes.GUARD)
return true
}
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/patterns/Guard.scala | Scala | apache-2.0 | 1,168 |
package epic.features
import epic.parser.RefinedFeaturizer
import epic.framework.Feature
import breeze.util.Index
import epic.trees.Rule
import epic.parser.RuleTopology
import epic.trees.TreeInstance
import epic.trees.AnnotatedLabel
import scala.collection.mutable.HashMap
import breeze.linalg.Counter2
import breeze.linalg.Counter
import breeze.linalg._
import epic.util.Arrays
class HackyLexicalProductionFeaturizer(wordTagCounts: Counter2[String, String, Double],
topology: RuleTopology[AnnotatedLabel],
featsDesc: String,
hackyHeadFinder: HackyHeadFinder[String,String] = new RuleBasedHackyHeadFinder,
db: DistanceBinner = DistanceBinner(),
wordThreshold: Int = 5,
commonWordThreshold: Int = 100) extends RuleAndSpansFeaturizer[String] {
private val wordCounts = Counter[String,Double];
private val wordToTagMap = new HashMap[String,String];
for (word <- wordTagCounts.keysIterator.map(_._2).toSeq.distinct) {
wordCounts(word) = sum(wordTagCounts(::, word));
if (!wordToTagMap.contains(word)) {
val tagCounts = wordTagCounts(::, word).iterator;
var bestTag = HackyLexicalProductionFeaturizer.UnkTag;
var bestTagCount = 0.0;
for ((tag, count) <- tagCounts) {
if (count > bestTagCount) {
bestTag = tag;
bestTagCount = count;
}
}
wordToTagMap.put(word, bestTag);
}
}
def tag(word: String) = if (wordToTagMap.contains(word)) wordToTagMap(word) else HackyLexicalProductionFeaturizer.UnkTag;
val emptyArray = Array[Feature]();
def anchor(w: IndexedSeq[String]) = new Anchoring {
def words: IndexedSeq[String] = w;
def featuresForBinaryRule(begin: Int, split: Int, end: Int, rule: Int, ref: Int):Array[Feature] = {
val preterminals = new Array[String](end - begin);
for (i <- begin until end) {
preterminals(i - begin) = tag(words(i));
}
val lc = topology.labelIndex.get(topology.leftChild(rule)).baseLabel;
val rc = topology.labelIndex.get(topology.rightChild(rule)).baseLabel;
val lcHeadIdx = begin + hackyHeadFinder.findHead(lc, preterminals.slice(0, split - begin));
val rcHeadIdx = split + hackyHeadFinder.findHead(rc, preterminals.slice(split - begin, end - begin));
val lcHeadWord = words(lcHeadIdx);
val lcHeadTag = tag(words(lcHeadIdx));
val rcHeadWord = words(rcHeadIdx);
val rcHeadTag = tag(words(rcHeadIdx));
val distance = db.binnedDistance(lcHeadIdx, rcHeadIdx);
// It doesn't really make sense to back off to tag features here since the tags
// will fail when the words are rare...
val otherFeats: Array[Feature] = if (featsDesc.contains("lexical")) {
Array(HeadPairDistanceRuleFeature(rule, lcHeadTag, rcHeadTag, distance),
HeadPairDistanceRuleFeature(rule, lcHeadTag, if (wordCounts(rcHeadWord) >= commonWordThreshold) rcHeadWord else rcHeadTag, distance),
HeadPairDistanceRuleFeature(rule, if (wordCounts(lcHeadWord) >= commonWordThreshold) lcHeadWord else lcHeadTag, rcHeadTag, distance));
} else if (featsDesc.contains("ultralexical")) {
Array(HeadPairDistanceRuleFeature(rule, lcHeadTag, rcHeadTag, distance),
HeadPairDistanceRuleFeature(rule, lcHeadTag, if (wordCounts(rcHeadWord) >= commonWordThreshold) rcHeadWord else rcHeadTag, distance),
HeadPairDistanceRuleFeature(rule, if (wordCounts(lcHeadWord) >= commonWordThreshold) lcHeadWord else lcHeadTag, rcHeadTag, distance),
HeadPairDistanceRuleFeature(rule, if (wordCounts(lcHeadWord) >= commonWordThreshold) lcHeadWord else lcHeadTag, if (wordCounts(rcHeadWord) >= commonWordThreshold) rcHeadWord else rcHeadTag, distance));
} else {
Array[Feature]();
}
Arrays.concatenate(otherFeats,
Array(LeftTagDistanceRuleFeature(rule, lcHeadTag, distance),
LeftHeadDistanceRuleFeature(rule, if (wordCounts(lcHeadWord) >= wordThreshold) lcHeadWord else HackyLexicalProductionFeaturizer.RareToken, distance),
RightTagDistanceRuleFeature(rule, rcHeadTag, distance),
RightHeadDistanceRuleFeature(rule, if (wordCounts(rcHeadWord) >= wordThreshold) rcHeadWord else HackyLexicalProductionFeaturizer.RareToken, distance)));
}
def featuresForUnaryRule(begin: Int, end: Int, rule: Int, ref: Int):Array[Feature] = emptyArray;
def featuresForSpan(begin: Int, end: Int, tag: Int, ref: Int):Array[Feature] = emptyArray;
}
}
case class LeftTagDistanceRuleFeature(rule: Int, ltag: String, distance: Int) extends Feature;
case class LeftHeadDistanceRuleFeature(rule: Int, lsuff: String, distance: Int) extends Feature;
case class RightTagDistanceRuleFeature(rule: Int, rtag: String, distance: Int) extends Feature;
case class RightHeadDistanceRuleFeature(rule: Int, rsuff: String, distance: Int) extends Feature;
case class HeadPairDistanceRuleFeature(rule: Int, lsuff: String, rsuff: String, distance: Int) extends Feature;
object HackyLexicalProductionFeaturizer {
val UnkTag = "NN";
val RareToken = "<RARE>";
}
| briantopping/epic | src/main/scala/epic/features/HackyLexicalProductionFeaturizer.scala | Scala | apache-2.0 | 5,421 |
package unfiltered
case class Cookie(name: String, value: String, domain: Option[String], path: Option[String], maxAge: Option[Int], secure: Option[Boolean]) {
def domain(d: String): Cookie = Cookie(name, value, Some(d), path, maxAge, secure)
def path(p: String): Cookie = Cookie(name, value, domain, Some(p), maxAge, secure)
def maxAge(a: Int): Cookie = Cookie(name, value, domain, path, Some(a), secure)
def secure(s: Boolean): Cookie = Cookie(name, value, domain, path, maxAge, Some(s))
}
object Cookie {
def apply(name: String, value: String) = new Cookie(name, value, None, None, None, None)
} | softprops/Unfiltered | library/src/main/scala/cookies.scala | Scala | mit | 610 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.collector.sampler.adaptive
import com.twitter.common.base.ExceptionalCommand
import com.twitter.common.zookeeper.Group.JoinException
import com.twitter.common.zookeeper._
import com.twitter.logging.Logger
import com.twitter.ostrich.stats.Stats
import com.twitter.util.{TimerTask, Duration}
import com.twitter.zipkin.collector.sampler.adaptive.policy.LeaderPolicy
import com.twitter.zipkin.config.sampler.adaptive.ZooKeeperAdaptiveSamplerConfig
import scala.collection.JavaConverters._
/**
* Adjusts the sample rate for the collector nodes depending on the traffic
*
* Keeps a window of values for the past M minutes.
* - If the last reported total traffic is an outlier (outside a threshold around
* the target storage request rate) and have had more than _N_
* consecutive outliers, then adjust the sample rate.
* - If its not an outlier for _L_ minutes, and the mean is outside the threshold,
* adjust the sample rate.
*/
trait ZooKeeperAdaptiveLeader extends Candidate.Leader {
val log = Logger.get("adaptivesampler")
val exceptionCounter = Stats.getCounter("collector.sampler.adaptive.leader.exception")
val CounterOutliers = Stats.getCounter("collector.sampler.adaptive.leader.outliers")
val CounterInliers = Stats.getCounter("collector.sampler.adaptive.leader.inliers")
val CounterOutliersAdjust = Stats.getCounter("collector.sampler.adaptive.leader.outliers.adjust")
val CounterOutliersNop = Stats.getCounter("collector.sampler.adaptive.leader.outliers.nop")
val CounterInliersAdjust = Stats.getCounter("collector.sampler.adaptive.leader.inliers.adjust")
val CounterInliersNop = Stats.getCounter("collector.sampler.adaptive.leader.inliers.nop")
/** Config with timer, Zookeeper client, sampleRateConfig, storageRequestRateConfig */
val config: ZooKeeperAdaptiveSamplerConfig
/** Group for reporting */
val reportGroup: Group
/** Group for leader election */
val leaderGroup: Group
val bufferSize: Duration
val pollInterval: Duration
val leaderPolicy: LeaderPolicy[BoundedBuffer]
@volatile var isLeader = false
var timerTask: Option[TimerTask] = None
lazy val buf: BoundedBuffer = new BoundedBuffer {
val maxLength = bufferSize.inSeconds / pollInterval.inSeconds
}
@volatile var leaderSR = 0.0
Stats.addGauge("collector.sampler.adaptive.leader.latest") { buf.latest }
def start() {
log.info("ZKLeader: start")
val candidateImpl: CandidateImpl = new CandidateImpl(leaderGroup)
candidateImpl.offerLeadership(this)
timerTask = Some {
config.taskTimer.schedule(pollInterval) {
try {
update()
if (isLeader) {
lead()
}
} catch {
case e: Exception =>
exceptionCounter.incr()
log.error(e, "ZKLeader: exception")
}
}
}
}
def shutdown() {
timerTask match {
case Some(x) => x.cancel()
case None =>
}
}
def onElected(abdicate: ExceptionalCommand[JoinException]) {
isLeader = true
log.info("ZKLeader: elected")
}
def onDefeated() {
log.info("ZKLeader: defeated")
isLeader = false
}
private[adaptive] def update() {
buf.update {
val zk = config.client.get
reportGroup.getMemberIds.asScala.map { id =>
zk.getData(reportGroup.getMemberPath(id), true, null) match {
case null => 0.0
case b: Array[Byte] => {
log.info(new String(b))
new String(b).toDouble
}
}
}.toSeq.sum
}
}
private[adaptive] def lead() {
leaderPolicy(Some(buf)) match {
case Some(sr) => {
log.info("ZKLeader: Setting sample rate: " + sr)
config.sampleRate.set(sr)
leaderPolicy.notifyChange(sr)
}
case None => {
log.info("ZKLeader: Not changing sample rate")
}
}
}
}
object ZooKeeperAdaptiveLeader {
def apply(
_config: ZooKeeperAdaptiveSamplerConfig,
_reportGroup: Group,
_leaderGroup: Group,
_bufferSize: Duration,
_pollInterval: Duration,
_leaderPolicy: LeaderPolicy[BoundedBuffer]
): ZooKeeperAdaptiveLeader =
new ZooKeeperAdaptiveLeader() {
val config = _config
val reportGroup = _reportGroup
val leaderGroup = _leaderGroup
val bufferSize = _bufferSize
val pollInterval = _pollInterval
val leaderPolicy = _leaderPolicy
}
/**
* Truncate a number to 4 decimal places
*/
def truncate(x: Double): Double = {
(x * 1000).toInt.toDouble / 1000
}
}
| martindale/zipkin | zipkin-server/src/main/scala/com/twitter/zipkin/collector/sampler/adaptive/ZooKeeperAdaptiveLeader.scala | Scala | apache-2.0 | 5,205 |
/*
* Copyright 2012-2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.aws
import com.netflix.edda.Elector
import com.netflix.edda.Utils
import com.netflix.edda.Record
import com.netflix.edda.RequestId
import org.slf4j.LoggerFactory
import org.joda.time.DateTime
/** [[com.netflix.edda.Elector]] subclass that uses DynamoDB's write contstraint operations
* to organize leadership
*/
class DynamoDBElector extends Elector {
private[this] val logger = LoggerFactory.getLogger(getClass)
lazy val instance = Option(
System.getenv(Utils.getProperty("edda.elector", "uniqueEnvName", "dynamodb", "EC2_INSTANCE_ID").get)).getOrElse("dev")
val leaderTimeout = Utils.getProperty("edda.elector", "leaderTimeout", "dynamodb", "5000")
private lazy val monitorTableName = Utils.getProperty("edda.elector", "tableName", "dynamodb", "edda-leader").get
lazy val readCap = Utils.getProperty("edda.elector", "readCapacity", "dynamodb", "5").get.toLong
lazy val writeCap = Utils.getProperty("edda.elector", "writeCapacity", "dynamodb", "1").get.toLong
lazy val account = Utils.getProperty("edda", "account", "elector.dynamodb", "").get
private var inited = false
val readDynamo = new AwsClient(account).dynamo
val writeDynamo = {
val client = new AwsClient(account).dynamo
client
}
override def init() {
implicit val client = writeDynamo
DynamoDB.init(monitorTableName, readCap, writeCap)
inited = true
super.init()
}
/** attempt to become the leader. If no leader is present it attempts
* to insert itself as leader (if insert error happens, then someone else became
* leader before us). If we are leader then update leader record mtime so that
* secondary severs see that we are still alive and don't assume leadership. If
* we are not leader, double-check the mtime of the record, if it is older than
* the leaderTimeout value then attempt to update leader record as self. The records
* for mtime and new-leader are atomic conditional updates so if some other servers
* updates dynamodb first we will "lose" will not be the leader.
* @return
*/
protected override def runElection()(implicit req: RequestId): Boolean = {
if( !inited ) {
return false
}
implicit val client = writeDynamo
val now = DateTime.now
var leader = instance
var isLeader = false
val t0 = System.nanoTime()
val response = try {
implicit val client = readDynamo
DynamoDB.get(monitorTableName, "name", "leader")
} finally {
val t1 = System.nanoTime()
val lapse = (t1 - t0) / 1000000;
if (logger.isInfoEnabled) logger.info(s"$req$this get leader lapse: ${lapse}ms")
}
if( response == null || response.isEmpty ) {
// no record found, so this is the first time we are creating a record
val t0 = System.nanoTime()
try {
DynamoDB.put(
monitorTableName,
Map(
"name" -> "leader",
"instance" -> instance,
"mtime" -> DateTime.now.getMillis,
"req" -> req.id
),
Map(
// precondition: assert that no record with this name exists
"name" -> None
)
)
isLeader = true
} catch {
case e: Exception => {
if (logger.isErrorEnabled) logger.error(s"$req$this failed to create leader record: " + e.getMessage)
isLeader = false
}
} finally {
val t1 = System.nanoTime()
val lapse = (t1 - t0) / 1000000;
if (logger.isInfoEnabled) logger.info(s"$req$this create leader lapse: ${lapse}ms")
}
} else {
// record found, if we are leader update mtime
// if we are not leader check to see if leader record has expired and they try to become leader
val item = response.get
leader = item("instance")
if( leader == instance ) {
// update mtime
val t0 = System.nanoTime()
try {
DynamoDB.put(
monitorTableName,
Map(
"name" -> "leader",
"instance" -> instance,
"mtime" -> DateTime.now.getMillis,
"req" -> req.id
),
Map(
// precondition: make sure the update happend on record we fetched
"instance" -> instance,
"req" -> item("req")
)
)
isLeader = true
} catch {
case e: Exception => {
if (logger.isErrorEnabled) logger.error(s"$req$this failed to update mtime for leader record: ${e.getMessage}")
isLeader = false
}
} finally {
val t1 = System.nanoTime()
val lapse = (t1 - t0) / 1000000;
if (logger.isInfoEnabled) logger.info(s"$req$this index leader (update mtime) lapse: ${lapse}ms")
}
} else {
val mtime = new DateTime(item("mtime").toLong)
val timeout = DateTime.now().plusMillis(-1 * (pollCycle.get.toInt + leaderTimeout.get.toInt))
if (mtime.isBefore(timeout)) {
// assume leader is dead, so try to become leader
val t0 = System.nanoTime()
try {
DynamoDB.put(
monitorTableName,
Map(
"name" -> "leader",
"instance" -> instance,
"mtime" -> DateTime.now.getMillis,
"req" -> req.id
),
Map(
// precondition: make sure the update happend on record we fetched
"instance" -> leader,
"req" -> item("req")
)
)
isLeader = true
leader = instance;
} catch {
case e: Exception => {
if (logger.isErrorEnabled) logger.error(s"$req$this failed to update leader for leader record: ${e.getMessage}")
isLeader = false
}
} finally {
val t1 = System.nanoTime()
val lapse = (t1 - t0) / 1000000;
if (logger.isInfoEnabled) logger.info(s"$req$this index leader + archive old leader lapse: ${lapse}ms")
}
}
}
}
if (logger.isInfoEnabled) logger.info(s"$req$this Leader [$instance] $isLeader [$leader]")
isLeader
}
override def toString = "[Elector DynamoDB]"
}
| wstrucke/edda | src/main/scala/com/netflix/edda/aws/DynamoDBElector.scala | Scala | apache-2.0 | 6,944 |
package cassandra
import com.datastax.driver.core.{Session, Cluster}
/**
*
*/
class CassandraConnector(val session : Session) {
val cluster = session.getCluster
def close = cluster.close
val metadata = cluster.getMetadata
}
object CassandraConnector {
def connect(node: String, port: Int): CassandraConnector = {
val cluster = Cluster.builder().addContactPoint(node).withPort(port).build()
new CassandraConnector(cluster.connect())
}
}
| tupol/spark-learning | src/main/scala/cassandra/CassandraConnector.scala | Scala | apache-2.0 | 465 |
import control._
import java.io.File
import org.specs2.execute.Error.ThrowableException
import org.specs2.execute.{Result, AsResult}
import org.specs2.matcher.{Matcher, MatchersImplicits}
import scalaz.concurrent.Task
import scalaz.effect.IO
import scalaz.stream.Process
import scalaz._, Scalaz._
package object io extends MatchersImplicits {
type Logger = String => IO[Unit]
lazy val noLogging = (s: String) => IO(())
lazy val consoleLogging = (s: String) => IO(println(s))
type Action[+A] = ActionT[IO, Unit, Logger, A]
object Actions extends ActionTSupport[IO, Unit, Logger] {
def configuration: Action[Configuration] = ???
}
trait Configuration {
def statsFile: File = ???
}
/** log a value, using the logger coming from the Reader environment */
def log[R](r: R): Action[Unit] =
Actions.ask.flatMap(logger => logger(r.toString).liftIO[Action])
/**
* This implicit allows any IO[result] to be used inside an example:
*
* "this should work" in {
* IO(success)
* }
*/
implicit def ioResultAsResult[T : AsResult]: AsResult[IO[T]] = new AsResult[IO[T]] {
def asResult(io: =>IO[T]) = AsResult(io.unsafePerformIO())
}
/**
* This implicit allows an IOAction[result] to be used inside an example.
*
* For example to read a database.
*
*/
implicit def ioActionResultAsResult[T : AsResult]: AsResult[Action[T]] = new AsResult[Action[T]] {
def asResult(ioAction: =>Action[T]): Result =
ioAction.execute(noLogging).unsafePerformIO.foldAll(
ok => AsResult(ok),
fail => org.specs2.execute.Failure(fail),
throwable => org.specs2.execute.Error(throwable),
(m, t) => org.specs2.execute.Error(m, new ThrowableException(t))
)
}
def beOk[T]: Matcher[Action[T]] = (action: Action[T]) =>
AsResult(action.map(_ => org.specs2.execute.Success()))
def beOk[T, R : AsResult](f: T => R): Matcher[Action[T]] = (action: Action[T]) =>
AsResult(action.map(f))
implicit class ioActionToProcess[T](action: Action[T]) {
def toProcess = Process(action.toTask).eval
}
implicit class ioActionToTask[T](action: Action[T]) {
def toTask = Task.delay(action.execute(noLogging).unsafePerformIO.toOption).map {
case Some(a) => a
case None => throw new Exception("error")
}
}
implicit class ioActionToOption[T](action: Action[T]) {
def runOption = action.toTask.attemptRun.toOption
}
}
| etorreborre/lambdajam-2014 | src/main/scala/io/package.scala | Scala | mit | 2,459 |
package io.github.binaryfoo.gclog
import fastparse.all._
import org.scalatest.{FlatSpec, Matchers}
class EndsWithCurlyBracketTest extends FlatSpec with Matchers {
"abc}" should "be matched" in {
val Parsed.Success(value, _) = EndsWithCurlyBracket.!.parse("abc}")
value shouldBe "abc}"
}
"}" should "be matched" in {
val Parsed.Success(value, _) = EndsWithCurlyBracket.!.parse("}")
value shouldBe "}"
}
"{abc" should "not be matched" in {
val Parsed.Failure(_, index, _) = EndsWithCurlyBracket.!.parse("{abc")
index shouldBe 3
}
"}a" should "not be matched" in {
val Parsed.Failure(_, index, _) = EndsWithCurlyBracket.!.parse("}a")
index shouldBe 1
}
}
| binaryfoo/gclog-parser | src/test/scala/io/github/binaryfoo/gclog/EndsWithCurlyBracketTest.scala | Scala | mit | 706 |
package cromwell
import java.util.UUID
import akka.actor.Props
import akka.testkit._
import com.typesafe.config.ConfigFactory
import cromwell.MetadataWatchActor.{FailureMatcher, Matcher}
import cromwell.SimpleWorkflowActorSpec._
import cromwell.core.{SimpleIoActor, WorkflowId, WorkflowSourceFilesWithoutImports}
import cromwell.engine.backend.BackendSingletonCollection
import cromwell.engine.workflow.WorkflowActor
import cromwell.engine.workflow.WorkflowActor._
import cromwell.engine.workflow.tokens.JobExecutionTokenDispenserActor
import cromwell.engine.workflow.workflowstore.Submitted
import cromwell.util.SampleWdl
import cromwell.util.SampleWdl.HelloWorld.Addressee
import org.scalatest.BeforeAndAfter
import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}
object SimpleWorkflowActorSpec {
case class TestableWorkflowActorAndMetadataPromise
(
workflowActor: TestFSMRef[WorkflowActorState, WorkflowActorData, WorkflowActor],
supervisor: TestProbe,
promise: Promise[Unit])
}
class SimpleWorkflowActorSpec extends CromwellTestKitWordSpec with BeforeAndAfter {
val serviceRegistry = TestProbe().ref
private def buildWorkflowActor(sampleWdl: SampleWdl,
rawInputsOverride: String,
workflowId: WorkflowId,
matchers: Matcher*): TestableWorkflowActorAndMetadataPromise = {
val workflowSources = WorkflowSourceFilesWithoutImports(
workflowSource = sampleWdl.workflowSource(),
workflowType = Option("WDL"),
workflowTypeVersion = None,
inputsJson = rawInputsOverride,
workflowOptionsJson = "{}",
labelsJson = "{}",
warnings = Vector.empty
)
val promise = Promise[Unit]()
val watchActor = system.actorOf(MetadataWatchActor.props(promise, matchers: _*), s"service-registry-$workflowId-${UUID.randomUUID()}")
val supervisor = TestProbe()
val workflowActor = TestFSMRef(
factory = new WorkflowActor(workflowId, Submitted, workflowSources, ConfigFactory.load(),
ioActor = system.actorOf(SimpleIoActor.props),
serviceRegistryActor = watchActor,
workflowLogCopyRouter = system.actorOf(Props.empty, s"workflow-copy-log-router-$workflowId-${UUID.randomUUID()}"),
jobStoreActor = system.actorOf(AlwaysHappyJobStoreActor.props),
subWorkflowStoreActor = system.actorOf(AlwaysHappySubWorkflowStoreActor.props),
callCacheReadActor = system.actorOf(EmptyCallCacheReadActor.props),
callCacheWriteActor = system.actorOf(EmptyCallCacheWriteActor.props),
dockerHashActor = system.actorOf(EmptyDockerHashActor.props),
jobTokenDispenserActor = system.actorOf(JobExecutionTokenDispenserActor.props(serviceRegistry)),
backendSingletonCollection = BackendSingletonCollection(Map("Local" -> None)),
serverMode = true),
supervisor = supervisor.ref,
name = s"workflow-actor-$workflowId"
)
TestableWorkflowActorAndMetadataPromise(workflowActor, supervisor, promise)
}
implicit val TestExecutionTimeout = 10.seconds.dilated
val AwaitAlmostNothing = 100.milliseconds.dilated
var workflowId: WorkflowId = _
before {
workflowId = WorkflowId.randomId()
}
"A WorkflowActor" should {
"start, run, succeed and die" in {
val TestableWorkflowActorAndMetadataPromise(workflowActor, supervisor, _) = buildWorkflowActor(SampleWdl.HelloWorld, SampleWdl.HelloWorld.workflowJson, workflowId)
val probe = TestProbe()
probe watch workflowActor
startingCallsFilter("wf_hello.hello") {
workflowActor ! StartWorkflowCommand
}
probe.expectTerminated(workflowActor, TestExecutionTimeout)
// Check the parent didn't see anything:
supervisor.expectNoMsg(AwaitAlmostNothing) // The actor's already terminated. No point hanging around waiting...
}
"fail to construct with missing inputs" in {
val expectedError = "Required workflow input 'wf_hello.hello.addressee' not specified"
val failureMatcher = FailureMatcher(expectedError)
val TestableWorkflowActorAndMetadataPromise(workflowActor, supervisor, promise) = buildWorkflowActor(SampleWdl.HelloWorld, "{}", workflowId, failureMatcher)
val probe = TestProbe()
probe watch workflowActor
workflowActor ! StartWorkflowCommand
Await.result(promise.future, TestExecutionTimeout)
probe.expectTerminated(workflowActor, AwaitAlmostNothing)
supervisor.expectMsgPF(AwaitAlmostNothing, "parent should get a failed response") {
case x: WorkflowFailedResponse =>
x.workflowId should be(workflowId)
x.reasons.size should be(1)
x.reasons.head.getMessage.contains(expectedError) should be(true)
}
}
"fail to construct with inputs of the wrong type" in {
// TODO WOM: restore offending offensive input name
val expectedError = "No coercion defined from '3' of type 'spray.json.JsNumber' to 'String'."
val failureMatcher = FailureMatcher(expectedError)
val TestableWorkflowActorAndMetadataPromise(workflowActor, supervisor, promise) = buildWorkflowActor(SampleWdl.HelloWorld, s""" { "$Addressee" : 3} """,
workflowId, failureMatcher)
val probe = TestProbe()
probe watch workflowActor
workflowActor ! StartWorkflowCommand
try {
Await.result(promise.future, TestExecutionTimeout)
} catch {
case _: Throwable =>
val info = failureMatcher.nearMissInformation
fail(s"We didn't see the expected error message $expectedError within $TestExecutionTimeout. ${info.mkString(", ")}")
}
probe.expectTerminated(workflowActor, AwaitAlmostNothing)
supervisor.expectMsgPF(AwaitAlmostNothing, "parent should get a failed response") {
case x: WorkflowFailedResponse =>
x.workflowId should be(workflowId)
x.reasons.size should be(1)
x.reasons.head.getMessage.contains(expectedError) should be(true)
}
}
"fail when a call fails" in {
val expectedError = "Job wf_goodbye.goodbye:NA:1 exited with return code 1 which has not been declared as a valid return code. See 'continueOnReturnCode' runtime attribute for more details."
val failureMatcher = FailureMatcher(expectedError)
val TestableWorkflowActorAndMetadataPromise(workflowActor, supervisor, promise) = buildWorkflowActor(SampleWdl.GoodbyeWorld, SampleWdl.GoodbyeWorld.workflowJson, workflowId, failureMatcher)
val probe = TestProbe()
probe watch workflowActor
startingCallsFilter("wf_goodbye.goodbye") {
workflowActor ! StartWorkflowCommand
}
Await.result(promise.future, TestExecutionTimeout)
probe.expectTerminated(workflowActor, 2.seconds)
supervisor.expectMsgPF(AwaitAlmostNothing, "parent should get a failed response") {
case x: WorkflowFailedResponse =>
x.workflowId should be(workflowId)
x.reasons.size should be(1)
x.reasons.head.getMessage.contains(expectedError) should be(true)
}
}
"gracefully handle malformed WDL" in {
val expectedError = "No input bfile found evaluating inputs for expression bfile"
val failureMatcher = FailureMatcher(expectedError)
val TestableWorkflowActorAndMetadataPromise(workflowActor, supervisor, promise) = buildWorkflowActor(SampleWdl.CoercionNotDefined, SampleWdl.CoercionNotDefined.workflowJson, workflowId, failureMatcher)
val probe = TestProbe()
probe watch workflowActor
workflowActor ! StartWorkflowCommand
try {
Await.result(promise.future, TestExecutionTimeout)
} catch {
case _: Throwable =>
val info = failureMatcher.nearMissInformation
val errorString =
if (info.nonEmpty) "We had a near miss: " + info.mkString(", ")
else s"The expected key was never seen. We saw: [\\n ${failureMatcher.fullEventList.map(e => s"${e.key} -> ${e.value}").mkString("\\n ")}\\n]."
fail(s"We didn't see the expected error message '$expectedError' within $TestExecutionTimeout. $errorString}")
}
probe.expectTerminated(workflowActor, AwaitAlmostNothing)
supervisor.expectMsgPF(AwaitAlmostNothing, "parent should get a failed response") {
case x: WorkflowFailedResponse =>
x.workflowId should be(workflowId)
x.reasons.size should be(1)
x.reasons.head.getMessage.contains(expectedError) should be(true)
}
}
}
private def startingCallsFilter[T](callNames: String*)(block: => T): T = {
import CromwellTestKitSpec.waitForInfo
within(TestExecutionTimeout) {
waitForInfo(s"Starting calls: ${callNames.mkString("", ":NA:1, ", ":NA:1")}$$") {
block
}
}
}
}
| ohsu-comp-bio/cromwell | engine/src/test/scala/cromwell/SimpleWorkflowActorSpec.scala | Scala | bsd-3-clause | 8,833 |
package dpla.ingestion3.mappers.providers
import dpla.ingestion3.mappers.utils.Document
import dpla.ingestion3.model._
import dpla.ingestion3.utils.FlatFileIO
import org.scalatest.{BeforeAndAfter, FlatSpec}
import scala.xml.{NodeSeq, XML}
class InMappingTest extends FlatSpec with BeforeAndAfter {
val shortName = "in"
val xmlString: String = new FlatFileIO().readFileAsString("/in.xml")
val xml: Document[NodeSeq] = Document(XML.loadString(xmlString))
val extractor = new InMapping
it should "extract the correct originalId" in {
val expected = Some("oai:dpla.library.in.gov:PALNI_herbarium4:oai:digital.library.in.gov:PALNI_herbarium4-22274")
assert(extractor.originalId(xml) == expected)
}
it should "extract the correct dataProvider" in {
val expected = Seq("Butler University").map(nameOnlyAgent)
assert(extractor.dataProvider(xml) == expected)
}
it should "extract the correct edmRights" in {
val expected = Seq("http://rightsstatements.org/vocab/InC/1.0/").map(URI)
assert(extractor.edmRights(xml) == expected)
}
it should "map accessRight to edmRights if accessRight begins with 'http'" in {
val xml =
<record>
<metadata>
<oai_qdc:qualifieddc>
<dcterms:accessRights>http://rightsstatements.org/vocab/InC/1.0/</dcterms:accessRights>
</oai_qdc:qualifieddc>
</metadata>
</record>
val expected = Seq("http://rightsstatements.org/vocab/InC/1.0/").map(URI)
assert(expected === extractor.edmRights(Document(xml)))
}
it should "strip semicolons from the end of edmRights" in {
val xml =
<record>
<metadata>
<oai_qdc:qualifieddc>
<dcterms:accessRights>http://rightsstatements.org/vocab/InC/1.0;</dcterms:accessRights>
</oai_qdc:qualifieddc>
</metadata>
</record>
val expected = Seq("http://rightsstatements.org/vocab/InC/1.0").map(URI)
assert(expected === extractor.edmRights(Document(xml)))
}
it should "extract the correct intermediateProvider" in {
val expected = Some("IUPUI (Campus). University Library").map(nameOnlyAgent)
assert(extractor.intermediateProvider(xml) == expected)
}
it should "extract the correct isShownAt" in {
val expected = Seq("http://palni.contentdm.oclc.org/cdm/ref/collection/herbarium4/id/22274")
.map(stringOnlyWebResource)
assert(extractor.isShownAt(xml) == expected)
}
it should "extract the correct alternate title" in {
val expected = Seq("Heuchera villosa Michx.")
assert(extractor.alternateTitle(xml) == expected)
}
it should "extract the correct contributor" in {
val expected = Seq("Name of contributor").map(nameOnlyAgent)
assert(extractor.contributor(xml) == expected)
}
it should "extract the correct creator" in {
val expected = Seq("Ray C. Friesner", "Sallie", "Ron", "Sterling").map(nameOnlyAgent)
assert(extractor.creator(xml) == expected)
}
it should "extract the correct date" in {
val expected = Seq("1881", "1882").map(stringOnlyTimeSpan)
assert(extractor.date(xml) == expected)
}
it should "extract the correct description" in {
val expected = Seq("Tobacco landing")
assert(extractor.description(xml) == expected)
}
it should "extract the correct extent" in {
val expected = Seq("1 photograph : color")
assert(extractor.extent(xml) == expected)
}
it should "extract the correct format" in {
val expected = Seq("Compact cassette", "Documents", "Typed text")
assert(extractor.format(xml) == expected)
}
it should "map type to format if type is not a valid DPLA type" in {
val xml =
<record>
<metadata>
<oai_qdc:qualifieddc>
<dc:type>photograph</dc:type>
</oai_qdc:qualifieddc>
</metadata>
</record>
val expected = Seq("photograph")
assert(expected === extractor.format(Document(xml)))
}
it should "extract the correct identifier" in {
val expected = Seq("http://palni.contentdm.oclc.org/cdm/ref/collection/herbarium4/id/22274")
assert(extractor.identifier(xml) == expected)
}
it should "extract the correct language" in {
val expected = Seq("English", "Czech").map(nameOnlyConcept)
assert(extractor.language(xml) == expected)
}
it should "extract the correct place" in {
val expected = Seq("Muncie", "Delaware County", "Indiana", "United States", "North and Central America").map(nameOnlyPlace)
assert(extractor.place(xml) == expected)
}
it should "extract the correct rights" in {
val expected = Seq("Manchester College is providing access to these materials for educational and research purposes.")
assert(extractor.rights(xml) == expected)
}
it should "filter out rights beginning with 'http'" in {
val xml =
<record>
<metadata>
<oai_qdc:qualifieddc>
<dcterms:accessRights>http://rightsstatements.org/vocab/InC/1.0/</dcterms:accessRights>
</oai_qdc:qualifieddc>
</metadata>
</record>
val expected = Seq()
assert(expected === extractor.rights(Document(xml)))
}
it should "extract the correct subject" in {
val expected = Seq("Civil War, 1861-1865", "United States History", "Diaries").map(nameOnlyConcept)
assert(extractor.subject(xml) == expected)
}
it should "extract the correct temporal" in {
val expected = Seq("2000s (2000-2009)", "Twenty-first century, C. E.").map(stringOnlyTimeSpan)
assert(extractor.temporal(xml) == expected)
}
it should "extract the correct title" in {
val expected = Seq("Heuchera villosa")
assert(extractor.title(xml) == expected)
}
it should "extract the correct type" in {
val expected = Seq("Sound", "Text")
assert(extractor.`type`(xml) == expected)
}
it should "extract the correct publisher" in {
val expected = Seq("Lewis G. Hall, Jr.", "Dexter Press").map(nameOnlyAgent)
assert(extractor.publisher(xml) == expected)
}
it should "extract the correct preview" in {
val expected = Seq("http://palni.contentdm.oclc.org/utils/getthumbnail/collection/herbarium4/id/22274")
.map(stringOnlyWebResource)
assert(extractor.preview(xml) == expected)
}
}
| dpla/ingestion3 | src/test/scala/dpla/ingestion3/mappers/providers/InMappingTest.scala | Scala | mit | 6,252 |
/*
* Buf.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape.stream
import java.util.concurrent.atomic.AtomicInteger
abstract class BufLike {
type Elem
def release()(implicit allocator: Allocator): Unit
def acquire(): Unit
def assertAllocated(): Unit
var size: Int
def allocCount(): Int
def buf: Array[Elem]
}
object BufD {
def apply(elems: Double*): BufD = {
val arr = elems.toArray
new BufD(arr, borrowed = false)
}
def alloc(size: Int): BufD = {
new BufD(new Array[Double](size), borrowed = true)
}
}
final class BufD private(val buf: Array[Double], borrowed: Boolean)
extends BufLike {
type Elem = Double
@volatile
private[this] var _size = buf.length
def size: Int = _size
def size_=(value: Int): Unit = {
assertOwned()
_size = value
}
private[this] val _allocCount = if (borrowed) new AtomicInteger(1) else null
def assertAllocated (): Unit = require(!borrowed || _allocCount.get() > 0)
def assertOwned (): Unit = require(!borrowed || _allocCount.get() == 1)
def allocCount(): Int = _allocCount.get()
override def acquire(): Unit = if (borrowed) {
val oldCount = _allocCount.getAndIncrement()
if (oldCount == 0) _size = buf.length
()
}
override def release()(implicit a: Allocator): Unit = if (borrowed) {
val newCount = _allocCount.decrementAndGet()
require(newCount >= 0)
if (newCount == 0) a.returnBufD(this)
}
override def toString: String =
if (size == 1) buf(0).toString else s"BufD(size = $size)@${hashCode.toHexString}"
}
object BufI {
def apply(elems: Int*): BufI = {
val arr = elems.toArray
new BufI(arr, borrowed = false)
}
def alloc(size: Int): BufI = {
new BufI(new Array[Int](size), borrowed = true)
}
}
final class BufI private(val buf: Array[Int], borrowed: Boolean)
extends BufLike {
type Elem = Int
@volatile
private[this] var _size = buf.length
def size: Int = _size
def size_=(value: Int): Unit = {
assertOwned()
_size = value
}
private[this] val _allocCount = if (borrowed) new AtomicInteger(1) else null
def assertAllocated (): Unit = require(!borrowed || _allocCount.get() > 0)
def assertOwned (): Unit = require(!borrowed || _allocCount.get() == 1)
def allocCount(): Int = _allocCount.get()
override def acquire(): Unit = if (borrowed) {
val oldCount = _allocCount.getAndIncrement()
if (oldCount == 0) _size = buf.length
()
}
override def release()(implicit a: Allocator): Unit = if (borrowed) {
val newCount = _allocCount.decrementAndGet()
require(newCount >= 0)
if (newCount == 0) a.returnBufI(this)
}
override def toString: String =
if (_size == 1) buf(0).toString else s"BufI(size = ${_size})@${hashCode.toHexString}"
}
object BufL {
def apply(elems: Long*): BufL = {
val arr = elems.toArray
new BufL(arr, borrowed = false)
}
def alloc(size: Int): BufL = {
new BufL(new Array[Long](size), borrowed = true)
}
}
final class BufL private(val buf: Array[Long], borrowed: Boolean)
extends BufLike {
type Elem = Long
@volatile
private[this] var _size = buf.length
def size: Int = _size
def size_=(value: Int): Unit = {
assertOwned()
_size = value
}
private[this] val _allocCount = if (borrowed) new AtomicInteger(1) else null
def assertAllocated(): Unit = require(!borrowed || _allocCount.get() > 0)
def assertOwned (): Unit = require(!borrowed || _allocCount.get() == 1)
def allocCount(): Int = _allocCount.get()
override def acquire(): Unit = if (borrowed) {
val oldCount = _allocCount.getAndIncrement()
if (oldCount == 0) _size = buf.length
()
}
override def release()(implicit a: Allocator): Unit = if (borrowed) {
val newCount = _allocCount.decrementAndGet()
require(newCount >= 0)
if (newCount == 0) a.returnBufL(this)
}
override def toString: String =
if (size == 1) buf(0).toString else s"BufL(size = $size)@${hashCode.toHexString}"
} | Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/stream/Buf.scala | Scala | agpl-3.0 | 4,247 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.table.api.{TableConfig, TableException}
import org.apache.flink.table.catalog.{CatalogManager, FunctionCatalog}
import org.apache.flink.table.data.RowData
import org.apache.flink.table.expressions.ApiExpressionUtils.intervalOfMillis
import org.apache.flink.table.expressions._
import org.apache.flink.table.functions.{FunctionIdentifier, UserDefinedFunctionHelper}
import org.apache.flink.table.module.ModuleManager
import org.apache.flink.table.operations.TableSourceQueryOperation
import org.apache.flink.table.planner.calcite.FlinkRelBuilder.PlannerNamedWindowProperty
import org.apache.flink.table.planner.calcite.{FlinkRelBuilder, FlinkTypeFactory}
import org.apache.flink.table.planner.delegation.PlannerContext
import org.apache.flink.table.planner.expressions.{PlannerProctimeAttribute, PlannerRowtimeAttribute, PlannerWindowReference, PlannerWindowStart}
import org.apache.flink.table.planner.functions.aggfunctions.SumAggFunction.DoubleSumAggFunction
import org.apache.flink.table.planner.functions.aggfunctions.{DenseRankAggFunction, RankAggFunction, RowNumberAggFunction}
import org.apache.flink.table.planner.functions.sql.FlinkSqlOperatorTable
import org.apache.flink.table.planner.functions.utils.AggSqlFunction
import org.apache.flink.table.planner.plan.PartialFinalType
import org.apache.flink.table.planner.plan.`trait`.{FlinkRelDistribution, FlinkRelDistributionTraitDef}
import org.apache.flink.table.planner.plan.logical.{LogicalWindow, TumblingGroupWindow}
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.calcite._
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.nodes.physical.batch._
import org.apache.flink.table.planner.plan.nodes.physical.stream._
import org.apache.flink.table.planner.plan.schema.FlinkPreparingTableBase
import org.apache.flink.table.planner.plan.stream.sql.join.TestTemporalTable
import org.apache.flink.table.planner.plan.utils.AggregateUtil.transformToStreamAggregateInfoList
import org.apache.flink.table.planner.plan.utils._
import org.apache.flink.table.planner.utils.{CountAggFunction, Top3}
import org.apache.flink.table.runtime.operators.rank.{ConstantRankRange, RankType, VariableRankRange}
import org.apache.flink.table.types.AtomicDataType
import org.apache.flink.table.types.logical._
import org.apache.flink.table.types.utils.TypeConversions
import org.apache.flink.table.utils.CatalogManagerMocks
import com.google.common.collect.{ImmutableList, Lists}
import org.apache.calcite.jdbc.CalciteSchema
import org.apache.calcite.plan._
import org.apache.calcite.prepare.CalciteCatalogReader
import org.apache.calcite.rel._
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFieldImpl}
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.logical._
import org.apache.calcite.rel.metadata.{JaninoRelMetadataProvider, RelMetadataQuery, RelMetadataQueryBase}
import org.apache.calcite.rex._
import org.apache.calcite.schema.SchemaPlus
import org.apache.calcite.sql.SqlWindow
import org.apache.calcite.sql.`type`.SqlTypeName._
import org.apache.calcite.sql.`type`.{BasicSqlType, SqlTypeName}
import org.apache.calcite.sql.fun.SqlStdOperatorTable._
import org.apache.calcite.sql.fun.{SqlCountAggFunction, SqlStdOperatorTable}
import org.apache.calcite.sql.parser.SqlParserPos
import org.apache.calcite.util._
import org.junit.{Before, BeforeClass}
import java.math.BigDecimal
import java.util
import scala.collection.JavaConversions._
class FlinkRelMdHandlerTestBase {
val tableConfig = new TableConfig()
val rootSchema: SchemaPlus = MetadataTestUtil.initRootSchema()
val catalogManager: CatalogManager = CatalogManagerMocks.createEmptyCatalogManager()
val moduleManager = new ModuleManager
// TODO batch RelNode and stream RelNode should have different PlannerContext
// and RelOptCluster due to they have different trait definitions.
val plannerContext: PlannerContext =
new PlannerContext(
tableConfig,
new FunctionCatalog(tableConfig, catalogManager, moduleManager),
catalogManager,
CalciteSchema.from(rootSchema),
util.Arrays.asList(
ConventionTraitDef.INSTANCE,
FlinkRelDistributionTraitDef.INSTANCE,
RelCollationTraitDef.INSTANCE
)
)
val typeFactory: FlinkTypeFactory = plannerContext.getTypeFactory
val mq: FlinkRelMetadataQuery = FlinkRelMetadataQuery.instance()
var relBuilder: FlinkRelBuilder = _
var rexBuilder: RexBuilder = _
var cluster: RelOptCluster = _
var logicalTraits: RelTraitSet = _
var flinkLogicalTraits: RelTraitSet = _
var batchPhysicalTraits: RelTraitSet = _
var streamPhysicalTraits: RelTraitSet = _
@Before
def setUp(): Unit = {
relBuilder = plannerContext.createRelBuilder("default_catalog", "default_database")
rexBuilder = relBuilder.getRexBuilder
cluster = relBuilder.getCluster
logicalTraits = cluster.traitSetOf(Convention.NONE)
flinkLogicalTraits = cluster.traitSetOf(FlinkConventions.LOGICAL)
batchPhysicalTraits = cluster.traitSetOf(FlinkConventions.BATCH_PHYSICAL)
streamPhysicalTraits = cluster.traitSetOf(FlinkConventions.STREAM_PHYSICAL)
}
protected def bd(value: Long): BigDecimal = {
BigDecimal.valueOf(value)
}
protected def bd(value: Double): BigDecimal = {
BigDecimal.valueOf(value)
}
protected val intType: RelDataType = typeFactory.createFieldTypeFromLogicalType(
new IntType(false))
protected val doubleType: RelDataType = typeFactory.createFieldTypeFromLogicalType(
new DoubleType(false))
protected val longType: RelDataType = typeFactory.createFieldTypeFromLogicalType(
new BigIntType(false))
protected val stringType: RelDataType = typeFactory.createFieldTypeFromLogicalType(
new VarCharType(false, VarCharType.MAX_LENGTH))
protected lazy val testRel = new TestRel(
cluster, logicalTraits, createDataStreamScan(ImmutableList.of("student"), logicalTraits))
protected lazy val studentLogicalScan: LogicalTableScan =
createDataStreamScan(ImmutableList.of("student"), logicalTraits)
protected lazy val studentFlinkLogicalScan: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("student"), flinkLogicalTraits)
protected lazy val studentBatchScan: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("student"), batchPhysicalTraits)
protected lazy val studentStreamScan: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("student"), streamPhysicalTraits)
protected lazy val empLogicalScan: LogicalTableScan =
createDataStreamScan(ImmutableList.of("emp"), logicalTraits)
protected lazy val empFlinkLogicalScan: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("emp"), flinkLogicalTraits)
protected lazy val empBatchScan: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("emp"), batchPhysicalTraits)
protected lazy val empStreamScan: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("emp"), streamPhysicalTraits)
private lazy val valuesType = relBuilder.getTypeFactory
.builder()
.add("a", SqlTypeName.BIGINT)
.add("b", SqlTypeName.BOOLEAN)
.add("c", SqlTypeName.DATE)
.add("d", SqlTypeName.TIME)
.add("e", SqlTypeName.TIMESTAMP)
.add("f", SqlTypeName.DOUBLE)
.add("g", SqlTypeName.FLOAT)
.add("h", SqlTypeName.VARCHAR)
.build()
protected lazy val emptyValues: LogicalValues = {
relBuilder.values(valuesType)
relBuilder.build().asInstanceOf[LogicalValues]
}
protected lazy val logicalValues: LogicalValues = {
val tupleList = List(
List("1", "true", "2017-10-01", "10:00:00", "2017-10-01 00:00:00", "2.12", null, "abc"),
List(null, "false", "2017-09-01", "10:00:01", null, "3.12", null, null),
List("3", "true", null, "10:00:02", "2017-10-01 01:00:00", "3.0", null, "xyz"),
List("2", "true", "2017-10-02", "09:59:59", "2017-07-01 01:00:00", "-1", null, "F")
).map(createLiteralList(valuesType, _))
relBuilder.values(tupleList, valuesType)
relBuilder.build().asInstanceOf[LogicalValues]
}
// select id, name, score + 0.2, age - 1, height * 1.1 as h1, height / 0.9 as h2, height,
// case sex = 'M' then 1 else 2, true, 2.1, 2, cast(score as double not null) as s from student
protected lazy val logicalProject: LogicalProject = {
relBuilder.push(studentLogicalScan)
val projects = List(
// id
relBuilder.field(0),
// name
relBuilder.field(1),
// score + 0.1
relBuilder.call(PLUS, relBuilder.field(2), relBuilder.literal(0.2)),
// age - 1
relBuilder.call(MINUS, relBuilder.field(3), relBuilder.literal(1)),
// height * 1.1 as h1
relBuilder.alias(relBuilder.call(MULTIPLY, relBuilder.field(4), relBuilder.literal(1.1)),
"h1"),
// height / 0.9 as h2
relBuilder.alias(relBuilder.call(DIVIDE, relBuilder.field(4), relBuilder.literal(0.9)), "h2"),
// height
relBuilder.field(4),
// case sex = 'M' then 1 else 2
relBuilder.call(CASE, relBuilder.call(EQUALS, relBuilder.field(5), relBuilder.literal("M")),
relBuilder.literal(1), relBuilder.literal(2)),
// true
relBuilder.literal(true),
// 2.1
rexBuilder.makeLiteral(2.1D, doubleType, true),
// 2
rexBuilder.makeLiteral(2L, longType, true),
// cast(score as double not null) as s
rexBuilder.makeCast(doubleType, relBuilder.field(2))
)
relBuilder.project(projects).build().asInstanceOf[LogicalProject]
}
// filter: id < 10
// calc = filter (id < 10) + logicalProject
protected lazy val (logicalFilter, logicalCalc) = {
relBuilder.push(studentLogicalScan)
// id < 10
val expr = relBuilder.call(LESS_THAN, relBuilder.field(0), relBuilder.literal(10))
val filter = relBuilder.filter(expr).build
val calc = createLogicalCalc(
studentLogicalScan, logicalProject.getRowType, logicalProject.getProjects, List(expr))
(filter, calc)
}
// id, name, score, age, height, sex, class, 1
// id, null, score, age, height, sex, class, 4
// id, null, score, age, height, null, class, 5
protected lazy val (logicalExpand, flinkLogicalExpand, batchExpand, streamExpand) = {
val cluster = studentLogicalScan.getCluster
val expandOutputType = ExpandUtil.buildExpandRowType(
cluster.getTypeFactory, studentLogicalScan.getRowType, Array.empty[Integer])
val expandProjects = ExpandUtil.createExpandProjects(
studentLogicalScan.getCluster.getRexBuilder,
studentLogicalScan.getRowType,
expandOutputType,
ImmutableBitSet.of(1, 3, 5),
ImmutableList.of(
ImmutableBitSet.of(1, 3, 5),
ImmutableBitSet.of(3, 5),
ImmutableBitSet.of(3)),
Array.empty[Integer])
val logicalExpand = new LogicalExpand(cluster, studentLogicalScan.getTraitSet,
studentLogicalScan, expandOutputType, expandProjects, 7)
val flinkLogicalExpand = new FlinkLogicalExpand(cluster, flinkLogicalTraits,
studentFlinkLogicalScan, expandOutputType, expandProjects, 7)
val batchExpand = new BatchExecExpand(cluster, batchPhysicalTraits,
studentBatchScan, expandOutputType, expandProjects, 7)
val streamExecExpand = new StreamExecExpand(cluster, streamPhysicalTraits,
studentStreamScan, expandOutputType, expandProjects, 7)
(logicalExpand, flinkLogicalExpand, batchExpand, streamExecExpand)
}
// hash exchange on class
protected lazy val (batchExchange, streamExchange) = {
val hash6 = FlinkRelDistribution.hash(Array(6), requireStrict = true)
val batchExchange = new BatchExecExchange(
cluster,
batchPhysicalTraits.replace(hash6),
studentBatchScan,
hash6
)
val streamExchange = new StreamExecExchange(
cluster,
streamPhysicalTraits.replace(hash6),
studentStreamScan,
hash6
)
(batchExchange, streamExchange)
}
// equivalent SQL is
// select * from student order by class asc, score desc
protected lazy val (logicalSort, flinkLogicalSort, batchSort, streamSort) = {
val logicalSort = relBuilder.scan("student").sort(
relBuilder.field("class"),
relBuilder.desc(relBuilder.field("score")))
.build.asInstanceOf[LogicalSort]
val collation = logicalSort.getCollation
val flinkLogicalSort = new FlinkLogicalSort(cluster, flinkLogicalTraits.replace(collation),
studentFlinkLogicalScan, collation, null, null)
val batchSort = new BatchExecSort(cluster,
batchPhysicalTraits.replace(collation).replace(FlinkRelDistribution.SINGLETON),
studentBatchScan, collation)
val streamSort = new StreamExecSort(cluster,
streamPhysicalTraits.replace(collation).replace(FlinkRelDistribution.SINGLETON),
studentStreamScan, collation)
(logicalSort, flinkLogicalSort, batchSort, streamSort)
}
// equivalent SQL is
// select * from student limit 20 offset 10
protected lazy val (
logicalLimit,
flinkLogicalLimit,
batchLimit,
batchLocalLimit,
batchGlobalLimit,
streamLimit) = {
val logicalSort = relBuilder.scan("student").limit(10, 20)
.build.asInstanceOf[LogicalSort]
val collation = logicalSort.getCollation
val flinkLogicalSort = new FlinkLogicalSort(
cluster, flinkLogicalTraits.replace(collation), studentFlinkLogicalScan, collation,
logicalSort.offset, logicalSort.fetch)
val batchSort = new BatchExecLimit(cluster, batchPhysicalTraits.replace(collation),
new BatchExecExchange(
cluster, batchPhysicalTraits.replace(FlinkRelDistribution.SINGLETON), studentBatchScan,
FlinkRelDistribution.SINGLETON),
logicalSort.offset, logicalSort.fetch, true)
val batchSortLocal = new BatchExecLimit(cluster, batchPhysicalTraits.replace(collation),
studentBatchScan,
relBuilder.literal(0),
relBuilder.literal(SortUtil.getLimitEnd(logicalSort.offset, logicalSort.fetch)),
false)
val batchSortGlobal = new BatchExecLimit(cluster, batchPhysicalTraits.replace(collation),
new BatchExecExchange(
cluster, batchPhysicalTraits.replace(FlinkRelDistribution.SINGLETON), batchSortLocal,
FlinkRelDistribution.SINGLETON),
logicalSort.offset, logicalSort.fetch, true)
val streamSort = new StreamExecLimit(cluster, streamPhysicalTraits.replace(collation),
studentStreamScan, logicalSort.offset, logicalSort.fetch)
(logicalSort, flinkLogicalSort, batchSort, batchSortLocal, batchSortGlobal, streamSort)
}
// equivalent SQL is
// select * from student order by class asc, score desc limit 20 offset 10
protected lazy val (
logicalSortLimit,
flinkLogicalSortLimit,
batchSortLimit,
batchLocalSortLimit,
batchGlobalSortLimit,
streamSortLimit) = {
val logicalSortLimit = relBuilder.scan("student").sort(
relBuilder.field("class"),
relBuilder.desc(relBuilder.field("score")))
.limit(10, 20).build.asInstanceOf[LogicalSort]
val collection = logicalSortLimit.collation
val offset = logicalSortLimit.offset
val fetch = logicalSortLimit.fetch
val flinkLogicalSortLimit = new FlinkLogicalSort(cluster,
flinkLogicalTraits.replace(collection), studentFlinkLogicalScan, collection, offset, fetch)
val batchSortLimit = new BatchExecSortLimit(cluster, batchPhysicalTraits.replace(collection),
new BatchExecExchange(
cluster, batchPhysicalTraits.replace(FlinkRelDistribution.SINGLETON), studentBatchScan,
FlinkRelDistribution.SINGLETON),
collection, offset, fetch, true)
val batchSortLocalLimit = new BatchExecSortLimit(cluster,
batchPhysicalTraits.replace(collection), studentBatchScan, collection,
relBuilder.literal(0),
relBuilder.literal(SortUtil.getLimitEnd(offset, fetch)),
false)
val batchSortGlobal = new BatchExecSortLimit(cluster, batchPhysicalTraits.replace(collection),
new BatchExecExchange(
cluster, batchPhysicalTraits.replace(FlinkRelDistribution.SINGLETON), batchSortLocalLimit,
FlinkRelDistribution.SINGLETON),
collection, offset, fetch, true)
val streamSort = new StreamExecSortLimit(cluster, streamPhysicalTraits.replace(collection),
studentStreamScan, collection, offset, fetch, UndefinedStrategy)
(logicalSortLimit, flinkLogicalSortLimit,
batchSortLimit, batchSortLocalLimit, batchSortGlobal, streamSort)
}
// equivalent SQL is
// select * from (
// select id, name, score, age, height, sex, class,
// RANK() over (partition by class order by score) rk from student
// ) t where rk <= 5
protected lazy val (
logicalRank,
flinkLogicalRank,
batchLocalRank,
batchGlobalRank,
streamRank) = {
val logicalRank = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val flinkLogicalRank = new FlinkLogicalRank(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val batchLocalRank = new BatchExecRank(
cluster,
batchPhysicalTraits,
studentBatchScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = false,
isGlobal = false
)
val hash6 = FlinkRelDistribution.hash(Array(6), requireStrict = true)
val batchExchange = new BatchExecExchange(
cluster, batchLocalRank.getTraitSet.replace(hash6), batchLocalRank, hash6)
val batchGlobalRank = new BatchExecRank(
cluster,
batchPhysicalTraits,
batchExchange,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
isGlobal = true
)
val streamExchange = new BatchExecExchange(cluster,
studentStreamScan.getTraitSet.replace(hash6), studentStreamScan, hash6)
val streamRank = new StreamExecRank(
cluster,
streamPhysicalTraits,
streamExchange,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
UndefinedStrategy
)
(logicalRank, flinkLogicalRank, batchLocalRank, batchGlobalRank, streamRank)
}
// equivalent SQL is
// select * from (
// select id, name, score, age, height, sex, class,
// RANK() over (partition by age order by score) rk from student
// ) t where rk <= 5 and rk >= 3
protected lazy val (
logicalRank2,
flinkLogicalRank2,
batchLocalRank2,
batchGlobalRank2,
streamRank2) = {
val logicalRank = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(3, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val flinkLogicalRank = new FlinkLogicalRank(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(3, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val batchLocalRank = new BatchExecRank(
cluster,
batchPhysicalTraits,
studentBatchScan,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = false,
isGlobal = false
)
val hash6 = FlinkRelDistribution.hash(Array(6), requireStrict = true)
val batchExchange = new BatchExecExchange(
cluster, batchLocalRank.getTraitSet.replace(hash6), batchLocalRank, hash6)
val batchGlobalRank = new BatchExecRank(
cluster,
batchPhysicalTraits,
batchExchange,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(3, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
isGlobal = true
)
val streamExchange = new BatchExecExchange(cluster,
studentStreamScan.getTraitSet.replace(hash6), studentStreamScan, hash6)
val streamRank = new StreamExecRank(
cluster,
streamPhysicalTraits,
streamExchange,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(3, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
UndefinedStrategy
)
(logicalRank, flinkLogicalRank, batchLocalRank, batchGlobalRank, streamRank)
}
// equivalent SQL is
// select * from (
// select id, name, score, age, height, sex, class,
// ROW_NUMBER() over (order by height) rn from student
// ) t where rk > 2 and rk < 7
protected lazy val (logicalRowNumber, flinkLogicalRowNumber, streamRowNumber) = {
val logicalRowNumber = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(),
RelCollations.of(4),
RankType.ROW_NUMBER,
new ConstantRankRange(3, 6),
new RelDataTypeFieldImpl("rn", 7, longType),
outputRankNumber = true
)
val flinkLogicalRowNumber = new FlinkLogicalRank(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
ImmutableBitSet.of(),
RelCollations.of(4),
RankType.ROW_NUMBER,
new ConstantRankRange(3, 6),
new RelDataTypeFieldImpl("rn", 7, longType),
outputRankNumber = true
)
val singleton = FlinkRelDistribution.SINGLETON
val streamExchange = new BatchExecExchange(cluster,
studentStreamScan.getTraitSet.replace(singleton), studentStreamScan, singleton)
val streamRowNumber = new StreamExecRank(
cluster,
streamPhysicalTraits,
streamExchange,
ImmutableBitSet.of(),
RelCollations.of(4),
RankType.ROW_NUMBER,
new ConstantRankRange(3, 6),
new RelDataTypeFieldImpl("rn", 7, longType),
outputRankNumber = true,
UndefinedStrategy
)
(logicalRowNumber, flinkLogicalRowNumber, streamRowNumber)
}
// equivalent SQL is
// select a, b, c from (
// select a, b, c, proctime
// ROW_NUMBER() over (partition by b order by proctime) rn from TemporalTable3
// ) t where rn <= 1
//
// select a, b, c from (
// select a, b, c, proctime
// ROW_NUMBER() over (partition by b, c order by proctime desc) rn from TemporalTable3
// ) t where rn <= 1
protected lazy val (streamDeduplicateFirstRow, streamDeduplicateLastRow) = {
val scan: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable3"), streamPhysicalTraits)
val hash1 = FlinkRelDistribution.hash(Array(1), requireStrict = true)
val streamExchange1 = new StreamExecExchange(
cluster, scan.getTraitSet.replace(hash1), scan, hash1)
val firstRow = new StreamExecDeduplicate(
cluster,
streamPhysicalTraits,
streamExchange1,
Array(1),
keepLastRow = false
)
val builder = typeFactory.builder()
firstRow.getRowType.getFieldList.dropRight(2).foreach(builder.add)
val projectProgram = RexProgram.create(
firstRow.getRowType,
Array(0, 1, 2).map(i => RexInputRef.of(i, firstRow.getRowType)).toList,
null,
builder.build(),
rexBuilder
)
val calcOfFirstRow = new StreamExecCalc(
cluster,
streamPhysicalTraits,
firstRow,
projectProgram,
projectProgram.getOutputRowType
)
val hash12 = FlinkRelDistribution.hash(Array(1, 2), requireStrict = true)
val streamExchange2 = new BatchExecExchange(cluster,
scan.getTraitSet.replace(hash12), scan, hash12)
val lastRow = new StreamExecDeduplicate(
cluster,
streamPhysicalTraits,
streamExchange2,
Array(1, 2),
keepLastRow = true
)
val calcOfLastRow = new StreamExecCalc(
cluster,
streamPhysicalTraits,
lastRow,
projectProgram,
projectProgram.getOutputRowType
)
(calcOfFirstRow, calcOfLastRow)
}
// equivalent SQL is
// select * from (
// select id, name, score, age, height, sex, class,
// RANK() over (partition by class order by score) rk from student
// ) t where rk <= age
protected lazy val (
logicalRankWithVariableRange,
flinkLogicalRankWithVariableRange,
streamRankWithVariableRange) = {
val logicalRankWithVariableRange = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new VariableRankRange(3),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val flinkLogicalRankWithVariableRange = new FlinkLogicalRank(
cluster,
logicalTraits,
studentFlinkLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new VariableRankRange(3),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val streamRankWithVariableRange = new StreamExecRank(
cluster,
logicalTraits,
studentStreamScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new VariableRankRange(3),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
UndefinedStrategy
)
(logicalRankWithVariableRange, flinkLogicalRankWithVariableRange, streamRankWithVariableRange)
}
protected lazy val tableAggCall = {
val top3 = new Top3
val resultTypeInfo = UserDefinedFunctionHelper.getReturnTypeOfAggregateFunction(top3)
val accTypeInfo = UserDefinedFunctionHelper.getAccumulatorTypeOfAggregateFunction(top3)
val resultDataType = TypeConversions.fromLegacyInfoToDataType(resultTypeInfo)
val accDataType = TypeConversions.fromLegacyInfoToDataType(accTypeInfo)
val builder = typeFactory.builder()
builder.add("f0", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.INTEGER))
builder.add("f1", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.INTEGER))
val relDataType = builder.build()
AggregateCall.create(
AggSqlFunction(
FunctionIdentifier.of("top3"),
"top3",
new Top3,
resultDataType,
accDataType,
typeFactory,
false),
false,
false,
false,
Seq(Integer.valueOf(0)).toList,
-1,
RelCollationImpl.of(),
relDataType,
""
)
}
protected lazy val (logicalTableAgg, flinkLogicalTableAgg, streamExecTableAgg) = {
val logicalTableAgg = new LogicalTableAggregate(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(0),
null,
Seq(tableAggCall))
val flinkLogicalTableAgg = new FlinkLogicalTableAggregate(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(0),
null,
Seq(tableAggCall)
)
val builder = typeFactory.builder()
builder.add("key", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.BIGINT))
builder.add("f0", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.INTEGER))
builder.add("f1", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.INTEGER))
val relDataType = builder.build()
val streamExecTableAgg = new StreamExecGroupTableAggregate(
cluster,
logicalTraits,
studentLogicalScan,
relDataType,
Array(0),
Seq(tableAggCall)
)
(logicalTableAgg, flinkLogicalTableAgg, streamExecTableAgg)
}
// equivalent Table API is
// tEnv.scan("TemporalTable1")
// .select("c, a, b, rowtime")
// .window(Tumble.over("15.minutes").on("rowtime").as("w"))
// .groupBy("a, w")
// .flatAggregate("top3(c)")
// .select("a, f0, f1, w.start, w.end, w.rowtime, w.proctime")
protected lazy val (
logicalWindowTableAgg,
flinkLogicalWindowTableAgg,
streamWindowTableAgg) = {
relBuilder.scan("TemporalTable1")
val ts = relBuilder.peek()
val project = relBuilder.project(relBuilder.fields(Seq[Integer](2, 0, 1, 4).toList))
.build().asInstanceOf[Project]
val program = RexProgram.create(
ts.getRowType, project.getProjects, null, project.getRowType, rexBuilder)
val aggCallOfWindowAgg = Lists.newArrayList(tableAggCall)
val logicalWindowAgg = new LogicalWindowTableAggregate(
ts.getCluster,
ts.getTraitSet,
project,
ImmutableBitSet.of(1),
ImmutableList.of(ImmutableBitSet.of(1)),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val flinkLogicalTs: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), flinkLogicalTraits)
val flinkLogicalWindowAgg = new FlinkLogicalWindowTableAggregate(
ts.getCluster,
logicalTraits,
new FlinkLogicalCalc(ts.getCluster, flinkLogicalTraits, flinkLogicalTs, program),
ImmutableBitSet.of(1),
ImmutableList.of(ImmutableBitSet.of(1)),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val hash01 = FlinkRelDistribution.hash(Array(1), requireStrict = true)
val streamTs: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), streamPhysicalTraits)
val streamCalc = new BatchExecCalc(
cluster, streamPhysicalTraits, streamTs, program, program.getOutputRowType)
val streamExchange = new StreamExecExchange(
cluster, streamPhysicalTraits.replace(hash01), streamCalc, hash01)
val emitStrategy = WindowEmitStrategy(tableConfig, tumblingGroupWindow)
val streamWindowAgg = new StreamExecGroupWindowTableAggregate(
cluster,
streamPhysicalTraits,
streamExchange,
flinkLogicalWindowAgg.getRowType,
streamExchange.getRowType,
Array(1),
flinkLogicalWindowAgg.getAggCallList,
tumblingGroupWindow,
namedPropertiesOfWindowAgg,
inputTimeFieldIndex = 2,
emitStrategy
)
(logicalWindowAgg, flinkLogicalWindowAgg, streamWindowAgg)
}
// equivalent SQL is
// select age,
// avg(score) as avg_score,
// sum(score) as sum_score,
// max(height) as max_height,
// min(height) as min_height,
// count(id) as cnt
// from student group by age
protected lazy val (
logicalAgg,
flinkLogicalAgg,
batchLocalAgg,
batchGlobalAggWithLocal,
batchGlobalAggWithoutLocal,
streamLocalAgg,
streamGlobalAggWithLocal,
streamGlobalAggWithoutLocal) = {
val logicalAgg = relBuilder.push(studentLogicalScan).aggregate(
relBuilder.groupKey(relBuilder.field(3)),
relBuilder.avg(false, "avg_score", relBuilder.field(2)),
relBuilder.sum(false, "sum_score", relBuilder.field(2)),
relBuilder.max("max_height", relBuilder.field(4)),
relBuilder.min("min_height", relBuilder.field(4)),
relBuilder.count(false, "cnt", relBuilder.field(0))
).build().asInstanceOf[LogicalAggregate]
val flinkLogicalAgg = new FlinkLogicalAggregate(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
logicalAgg.getGroupSet,
logicalAgg.getGroupSets,
logicalAgg.getAggCallList
)
val aggCalls = logicalAgg.getAggCallList
val aggFunctionFactory = new AggFunctionFactory(
studentBatchScan.getRowType, Array.empty[Int], Array.fill(aggCalls.size())(false))
val aggCallToAggFunction = aggCalls.zipWithIndex.map {
case (call, index) => (call, aggFunctionFactory.createAggFunction(call, index))
}
val rowTypeOfLocalAgg = typeFactory.builder
.add("age", intType)
.add("sum$0", doubleType)
.add("count$1", longType)
.add("sum_score", doubleType)
.add("max_height", doubleType)
.add("min_height", doubleType)
.add("cnt", longType).build()
val rowTypeOfGlobalAgg = typeFactory.builder
.add("age", intType)
.add("avg_score", doubleType)
.add("sum_score", doubleType)
.add("max_height", doubleType)
.add("min_height", doubleType)
.add("cnt", longType).build()
val hash0 = FlinkRelDistribution.hash(Array(0), requireStrict = true)
val hash3 = FlinkRelDistribution.hash(Array(3), requireStrict = true)
val batchLocalAgg = new BatchExecLocalHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
studentBatchScan,
rowTypeOfLocalAgg,
studentBatchScan.getRowType,
Array(3),
auxGrouping = Array(),
aggCallToAggFunction)
val batchExchange1 = new BatchExecExchange(
cluster, batchLocalAgg.getTraitSet.replace(hash0), batchLocalAgg, hash0)
val batchGlobalAgg = new BatchExecHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange1,
rowTypeOfGlobalAgg,
batchExchange1.getRowType,
batchLocalAgg.getInput.getRowType,
Array(0),
auxGrouping = Array(),
aggCallToAggFunction,
isMerge = true)
val batchExchange2 = new BatchExecExchange(cluster,
studentBatchScan.getTraitSet.replace(hash3), studentBatchScan, hash3)
val batchGlobalAggWithoutLocal = new BatchExecHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
rowTypeOfGlobalAgg,
batchExchange2.getRowType,
batchExchange2.getRowType,
Array(3),
auxGrouping = Array(),
aggCallToAggFunction,
isMerge = false)
val needRetractionArray = AggregateUtil.getNeedRetractions(
1, needRetraction = false, null, aggCalls)
val localAggInfoList = transformToStreamAggregateInfoList(
aggCalls,
studentStreamScan.getRowType,
needRetractionArray,
needInputCount = false,
isStateBackendDataViews = false)
val streamLocalAgg = new StreamExecLocalGroupAggregate(
cluster,
streamPhysicalTraits,
studentStreamScan,
rowTypeOfLocalAgg,
Array(3),
aggCalls,
localAggInfoList,
PartialFinalType.NONE)
val streamExchange1 = new StreamExecExchange(
cluster, streamLocalAgg.getTraitSet.replace(hash0), streamLocalAgg, hash0)
val globalAggInfoList = transformToStreamAggregateInfoList(
aggCalls,
streamExchange1.getRowType,
needRetractionArray,
needInputCount = false,
isStateBackendDataViews = true)
val streamGlobalAgg = new StreamExecGlobalGroupAggregate(
cluster,
streamPhysicalTraits,
streamExchange1,
streamExchange1.getRowType,
rowTypeOfGlobalAgg,
Array(0),
localAggInfoList,
globalAggInfoList,
PartialFinalType.NONE)
val streamExchange2 = new StreamExecExchange(cluster,
studentStreamScan.getTraitSet.replace(hash3), studentStreamScan, hash3)
val streamGlobalAggWithoutLocal = new StreamExecGroupAggregate(
cluster,
streamPhysicalTraits,
streamExchange2,
rowTypeOfGlobalAgg,
Array(3),
aggCalls)
(logicalAgg, flinkLogicalAgg,
batchLocalAgg, batchGlobalAgg, batchGlobalAggWithoutLocal,
streamLocalAgg, streamGlobalAgg, streamGlobalAggWithoutLocal)
}
// equivalent SQL is
// select avg(score) as avg_score,
// sum(score) as sum_score,
// count(id) as cnt
// from student group by id, name, height
protected lazy val (
logicalAggWithAuxGroup,
flinkLogicalAggWithAuxGroup,
batchLocalAggWithAuxGroup,
batchGlobalAggWithLocalWithAuxGroup,
batchGlobalAggWithoutLocalWithAuxGroup) = {
val logicalAggWithAuxGroup = relBuilder.push(studentLogicalScan).aggregate(
relBuilder.groupKey(relBuilder.field(0)),
relBuilder.aggregateCall(FlinkSqlOperatorTable.AUXILIARY_GROUP, relBuilder.field(1)),
relBuilder.aggregateCall(FlinkSqlOperatorTable.AUXILIARY_GROUP, relBuilder.field(4)),
relBuilder.avg(false, "avg_score", relBuilder.field(2)),
relBuilder.sum(false, "sum_score", relBuilder.field(2)),
relBuilder.count(false, "cnt", relBuilder.field(0))
).build().asInstanceOf[LogicalAggregate]
val flinkLogicalAggWithAuxGroup = new FlinkLogicalAggregate(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
logicalAggWithAuxGroup.getGroupSet,
logicalAggWithAuxGroup.getGroupSets,
logicalAggWithAuxGroup.getAggCallList
)
val aggCalls = logicalAggWithAuxGroup.getAggCallList.filter {
call => call.getAggregation != FlinkSqlOperatorTable.AUXILIARY_GROUP
}
val aggFunctionFactory = new AggFunctionFactory(
studentBatchScan.getRowType, Array.empty[Int], Array.fill(aggCalls.size())(false))
val aggCallToAggFunction = aggCalls.zipWithIndex.map {
case (call, index) => (call, aggFunctionFactory.createAggFunction(call, index))
}
val rowTypeOfLocalAgg = typeFactory.builder
.add("id", intType)
.add("name", stringType)
.add("height", doubleType)
.add("sum$0", doubleType)
.add("count$1", longType)
.add("sum_score", doubleType)
.add("cnt", longType).build()
val batchLocalAggWithAuxGroup = new BatchExecLocalHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
studentBatchScan,
rowTypeOfLocalAgg,
studentBatchScan.getRowType,
Array(0),
auxGrouping = Array(1, 4),
aggCallToAggFunction)
val hash0 = FlinkRelDistribution.hash(Array(0), requireStrict = true)
val batchExchange = new BatchExecExchange(cluster,
batchLocalAggWithAuxGroup.getTraitSet.replace(hash0), batchLocalAggWithAuxGroup, hash0)
val rowTypeOfGlobalAgg = typeFactory.builder
.add("id", intType)
.add("name", stringType)
.add("height", doubleType)
.add("avg_score", doubleType)
.add("sum_score", doubleType)
.add("cnt", longType).build()
val batchGlobalAggWithAuxGroup = new BatchExecHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange,
rowTypeOfGlobalAgg,
batchExchange.getRowType,
batchLocalAggWithAuxGroup.getInput.getRowType,
Array(0),
auxGrouping = Array(1, 2),
aggCallToAggFunction,
isMerge = true)
val batchExchange2 = new BatchExecExchange(cluster,
studentBatchScan.getTraitSet.replace(hash0), studentBatchScan, hash0)
val batchGlobalAggWithoutLocalWithAuxGroup = new BatchExecHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
rowTypeOfGlobalAgg,
batchExchange2.getRowType,
batchExchange2.getRowType,
Array(0),
auxGrouping = Array(1, 4),
aggCallToAggFunction,
isMerge = false)
(logicalAggWithAuxGroup, flinkLogicalAggWithAuxGroup,
batchLocalAggWithAuxGroup, batchGlobalAggWithAuxGroup, batchGlobalAggWithoutLocalWithAuxGroup)
}
// For window start/end/proc_time the windowAttribute inferred type is a hard code val,
// only for row_time we distinguish by batch row time, for what we hard code DataTypes.TIMESTAMP,
// which is ok here for testing.
private lazy val windowRef: PlannerWindowReference =
PlannerWindowReference.apply("w$", Some(new TimestampType(3)))
protected lazy val tumblingGroupWindow: LogicalWindow =
TumblingGroupWindow(
windowRef,
new FieldReferenceExpression(
"rowtime",
new AtomicDataType(new TimestampType(true, TimestampKind.ROWTIME, 3)),
0,
4),
intervalOfMillis(900000)
)
protected lazy val namedPropertiesOfWindowAgg: Seq[PlannerNamedWindowProperty] =
Seq(PlannerNamedWindowProperty("w$start", PlannerWindowStart(windowRef)),
PlannerNamedWindowProperty("w$end", PlannerWindowStart(windowRef)),
PlannerNamedWindowProperty("w$rowtime", PlannerRowtimeAttribute(windowRef)),
PlannerNamedWindowProperty("w$proctime", PlannerProctimeAttribute(windowRef)))
// equivalent SQL is
// select a, b, count(c) as s,
// TUMBLE_START(rowtime, INTERVAL '15' MINUTE) as w$start,
// TUMBLE_END(rowtime, INTERVAL '15' MINUTE) as w$end,
// TUMBLE_ROWTIME(rowtime, INTERVAL '15' MINUTE) as w$rowtime,
// TUMBLE_PROCTIME(rowtime, INTERVAL '15' MINUTE) as w$proctime
// from TemporalTable1 group by a, b, TUMBLE(rowtime, INTERVAL '15' MINUTE)
protected lazy val (
logicalWindowAgg,
flinkLogicalWindowAgg,
batchLocalWindowAgg,
batchGlobalWindowAggWithLocalAgg,
batchGlobalWindowAggWithoutLocalAgg,
streamWindowAgg) = {
relBuilder.scan("TemporalTable1")
val ts = relBuilder.peek()
val project = relBuilder.project(relBuilder.fields(Seq[Integer](0, 1, 4, 2).toList))
.build().asInstanceOf[Project]
val program = RexProgram.create(
ts.getRowType, project.getProjects, null, project.getRowType, rexBuilder)
val aggCallOfWindowAgg = Lists.newArrayList(AggregateCall.create(
new SqlCountAggFunction("COUNT"), false, false, List[Integer](3), -1, 2, project, null, "s"))
// TUMBLE(rowtime, INTERVAL '15' MINUTE))
val logicalWindowAgg = new LogicalWindowAggregate(
ts.getCluster,
ts.getTraitSet,
project,
ImmutableBitSet.of(0, 1),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val flinkLogicalTs: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), flinkLogicalTraits)
val flinkLogicalWindowAgg = new FlinkLogicalWindowAggregate(
ts.getCluster,
logicalTraits,
new FlinkLogicalCalc(ts.getCluster, flinkLogicalTraits, flinkLogicalTs, program),
ImmutableBitSet.of(0, 1),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val batchTs: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), batchPhysicalTraits)
val batchCalc = new BatchExecCalc(
cluster, batchPhysicalTraits, batchTs, program, program.getOutputRowType)
val hash01 = FlinkRelDistribution.hash(Array(0, 1), requireStrict = true)
val batchExchange1 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash01), batchCalc, hash01)
val (_, _, aggregates) =
AggregateUtil.transformToBatchAggregateFunctions(
flinkLogicalWindowAgg.getAggCallList, batchExchange1.getRowType)
val aggCallToAggFunction = flinkLogicalWindowAgg.getAggCallList.zip(aggregates)
val localWindowAggTypes =
(Array(0, 1).map(batchCalc.getRowType.getFieldList.get(_).getType) ++ // grouping
Array(longType) ++ // assignTs
aggCallOfWindowAgg.map(_.getType)).toList // agg calls
val localWindowAggNames =
(Array(0, 1).map(batchCalc.getRowType.getFieldNames.get(_)) ++ // grouping
Array("assignedWindow$") ++ // assignTs
Array("count$0")).toList // agg calls
val localWindowAggRowType = typeFactory.createStructType(
localWindowAggTypes, localWindowAggNames)
val batchLocalWindowAgg = new BatchExecLocalHashWindowAggregate(
batchCalc.getCluster,
relBuilder,
batchPhysicalTraits,
batchCalc,
localWindowAggRowType,
batchCalc.getRowType,
Array(0, 1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false)
val batchExchange2 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash01), batchLocalWindowAgg, hash01)
val batchWindowAggWithLocal = new BatchExecHashWindowAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
flinkLogicalWindowAgg.getRowType,
batchExchange2.getRowType,
batchCalc.getRowType,
Array(0, 1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = true
)
val batchWindowAggWithoutLocal = new BatchExecHashWindowAggregate(
batchExchange1.getCluster,
relBuilder,
batchPhysicalTraits,
batchExchange1,
flinkLogicalWindowAgg.getRowType,
batchExchange1.getRowType,
batchExchange1.getRowType,
Array(0, 1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = false
)
val streamTs: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), streamPhysicalTraits)
val streamCalc = new BatchExecCalc(
cluster, streamPhysicalTraits, streamTs, program, program.getOutputRowType)
val streamExchange = new StreamExecExchange(
cluster, streamPhysicalTraits.replace(hash01), streamCalc, hash01)
val emitStrategy = WindowEmitStrategy(tableConfig, tumblingGroupWindow)
val streamWindowAgg = new StreamExecGroupWindowAggregate(
cluster,
streamPhysicalTraits,
streamExchange,
flinkLogicalWindowAgg.getRowType,
streamExchange.getRowType,
Array(0, 1),
flinkLogicalWindowAgg.getAggCallList,
tumblingGroupWindow,
namedPropertiesOfWindowAgg,
inputTimeFieldIndex = 2,
emitStrategy
)
(logicalWindowAgg, flinkLogicalWindowAgg, batchLocalWindowAgg, batchWindowAggWithLocal,
batchWindowAggWithoutLocal, streamWindowAgg)
}
// equivalent SQL is
// select b, count(a) as s,
// TUMBLE_START(rowtime, INTERVAL '15' MINUTE) as w$start,
// TUMBLE_END(rowtime, INTERVAL '15' MINUTE) as w$end,
// TUMBLE_ROWTIME(rowtime, INTERVAL '15' MINUTE) as w$rowtime,
// TUMBLE_PROCTIME(rowtime, INTERVAL '15' MINUTE) as w$proctime
// from TemporalTable1 group by b, TUMBLE(rowtime, INTERVAL '15' MINUTE)
protected lazy val (
logicalWindowAgg2,
flinkLogicalWindowAgg2,
batchLocalWindowAgg2,
batchGlobalWindowAggWithLocalAgg2,
batchGlobalWindowAggWithoutLocalAgg2,
streamWindowAgg2) = {
relBuilder.scan("TemporalTable1")
val ts = relBuilder.peek()
val project = relBuilder.project(relBuilder.fields(Seq[Integer](0, 1, 4).toList))
.build().asInstanceOf[Project]
val program = RexProgram.create(
ts.getRowType, project.getProjects, null, project.getRowType, rexBuilder)
val aggCallOfWindowAgg = Lists.newArrayList(AggregateCall.create(
new SqlCountAggFunction("COUNT"), false, false, List[Integer](0), -1, 1, project, null, "s"))
// TUMBLE(rowtime, INTERVAL '15' MINUTE))
val logicalWindowAgg = new LogicalWindowAggregate(
ts.getCluster,
ts.getTraitSet,
project,
ImmutableBitSet.of(1),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val flinkLogicalTs: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), flinkLogicalTraits)
val flinkLogicalWindowAgg = new FlinkLogicalWindowAggregate(
ts.getCluster,
logicalTraits,
new FlinkLogicalCalc(ts.getCluster, flinkLogicalTraits, flinkLogicalTs, program),
ImmutableBitSet.of(1),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val batchTs: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), batchPhysicalTraits)
val batchCalc = new BatchExecCalc(
cluster, batchPhysicalTraits, batchTs, program, program.getOutputRowType)
val hash1 = FlinkRelDistribution.hash(Array(1), requireStrict = true)
val batchExchange1 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash1), batchCalc, hash1)
val (_, _, aggregates) =
AggregateUtil.transformToBatchAggregateFunctions(
flinkLogicalWindowAgg.getAggCallList, batchExchange1.getRowType)
val aggCallToAggFunction = flinkLogicalWindowAgg.getAggCallList.zip(aggregates)
val localWindowAggTypes =
(Array(batchCalc.getRowType.getFieldList.get(1).getType) ++ // grouping
Array(longType) ++ // assignTs
aggCallOfWindowAgg.map(_.getType)).toList // agg calls
val localWindowAggNames =
(Array(batchCalc.getRowType.getFieldNames.get(1)) ++ // grouping
Array("assignedWindow$") ++ // assignTs
Array("count$0")).toList // agg calls
val localWindowAggRowType = typeFactory.createStructType(
localWindowAggTypes, localWindowAggNames)
val batchLocalWindowAgg = new BatchExecLocalHashWindowAggregate(
batchCalc.getCluster,
relBuilder,
batchPhysicalTraits,
batchCalc,
localWindowAggRowType,
batchCalc.getRowType,
Array(1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false)
val batchExchange2 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash1), batchLocalWindowAgg, hash1)
val batchWindowAggWithLocal = new BatchExecHashWindowAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
flinkLogicalWindowAgg.getRowType,
batchExchange2.getRowType,
batchCalc.getRowType,
Array(0),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = true
)
val batchWindowAggWithoutLocal = new BatchExecHashWindowAggregate(
batchExchange1.getCluster,
relBuilder,
batchPhysicalTraits,
batchExchange1,
flinkLogicalWindowAgg.getRowType,
batchExchange1.getRowType,
batchExchange1.getRowType,
Array(1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = false
)
val streamTs: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), streamPhysicalTraits)
val streamCalc = new BatchExecCalc(
cluster, streamPhysicalTraits, streamTs, program, program.getOutputRowType)
val streamExchange = new StreamExecExchange(
cluster, streamPhysicalTraits.replace(hash1), streamCalc, hash1)
val emitStrategy = WindowEmitStrategy(tableConfig, tumblingGroupWindow)
val streamWindowAgg = new StreamExecGroupWindowAggregate(
cluster,
streamPhysicalTraits,
streamExchange,
flinkLogicalWindowAgg.getRowType,
streamExchange.getRowType,
Array(1),
flinkLogicalWindowAgg.getAggCallList,
tumblingGroupWindow,
namedPropertiesOfWindowAgg,
inputTimeFieldIndex = 2,
emitStrategy
)
(logicalWindowAgg, flinkLogicalWindowAgg, batchLocalWindowAgg, batchWindowAggWithLocal,
batchWindowAggWithoutLocal, streamWindowAgg)
}
// equivalent SQL is
// select a, c, count(b) as s,
// TUMBLE_START(rowtime, INTERVAL '15' MINUTE) as w$start,
// TUMBLE_END(rowtime, INTERVAL '15' MINUTE) as w$end,
// TUMBLE_ROWTIME(rowtime, INTERVAL '15' MINUTE) as w$rowtime,
// TUMBLE_PROCTIME(rowtime, INTERVAL '15' MINUTE) as w$proctime
// from TemporalTable2 group by a, c, TUMBLE(rowtime, INTERVAL '15' MINUTE)
protected lazy val (
logicalWindowAggWithAuxGroup,
flinkLogicalWindowAggWithAuxGroup,
batchLocalWindowAggWithAuxGroup,
batchGlobalWindowAggWithLocalAggWithAuxGroup,
batchGlobalWindowAggWithoutLocalAggWithAuxGroup) = {
relBuilder.scan("TemporalTable2")
val ts = relBuilder.peek()
val project = relBuilder.project(relBuilder.fields(Seq[Integer](0, 2, 4, 1).toList))
.build().asInstanceOf[Project]
val program = RexProgram.create(
ts.getRowType, project.getProjects, null, project.getRowType, rexBuilder)
val aggCallOfWindowAgg = Lists.newArrayList(
AggregateCall.create(FlinkSqlOperatorTable.AUXILIARY_GROUP, false, false,
List[Integer](1), -1, 1, project, null, "c"),
AggregateCall.create(new SqlCountAggFunction("COUNT"), false, false,
List[Integer](3), -1, 2, project, null, "s"))
// TUMBLE(rowtime, INTERVAL '15' MINUTE))
val logicalWindowAggWithAuxGroup = new LogicalWindowAggregate(
ts.getCluster,
ts.getTraitSet,
project,
ImmutableBitSet.of(0),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val flinkLogicalTs: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("TemporalTable2"), flinkLogicalTraits)
val flinkLogicalWindowAggWithAuxGroup = new FlinkLogicalWindowAggregate(
ts.getCluster,
logicalTraits,
new FlinkLogicalCalc(ts.getCluster, flinkLogicalTraits, flinkLogicalTs, program),
ImmutableBitSet.of(0),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val batchTs: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable2"), batchPhysicalTraits)
val batchCalc = new BatchExecCalc(
cluster, batchPhysicalTraits, batchTs, program, program.getOutputRowType)
val hash0 = FlinkRelDistribution.hash(Array(0), requireStrict = true)
val batchExchange1 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash0), batchCalc, hash0)
val aggCallsWithoutAuxGroup = flinkLogicalWindowAggWithAuxGroup.getAggCallList.drop(1)
val (_, _, aggregates) =
AggregateUtil.transformToBatchAggregateFunctions(
aggCallsWithoutAuxGroup, batchExchange1.getRowType)
val aggCallToAggFunction = aggCallsWithoutAuxGroup.zip(aggregates)
val localWindowAggTypes =
(Array(batchCalc.getRowType.getFieldList.get(0).getType) ++ // grouping
Array(longType) ++ // assignTs
Array(batchCalc.getRowType.getFieldList.get(1).getType) ++ // auxGrouping
aggCallsWithoutAuxGroup.map(_.getType)).toList // agg calls
val localWindowAggNames =
(Array(batchCalc.getRowType.getFieldNames.get(0)) ++ // grouping
Array("assignedWindow$") ++ // assignTs
Array(batchCalc.getRowType.getFieldNames.get(1)) ++ // auxGrouping
Array("count$0")).toList // agg calls
val localWindowAggRowType = typeFactory.createStructType(
localWindowAggTypes, localWindowAggNames)
val batchLocalWindowAggWithAuxGroup = new BatchExecLocalHashWindowAggregate(
batchCalc.getCluster,
relBuilder,
batchPhysicalTraits,
batchCalc,
localWindowAggRowType,
batchCalc.getRowType,
Array(0),
Array(1),
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false)
val batchExchange2 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash0), batchLocalWindowAggWithAuxGroup, hash0)
val batchWindowAggWithLocalWithAuxGroup = new BatchExecHashWindowAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
flinkLogicalWindowAggWithAuxGroup.getRowType,
batchExchange2.getRowType,
batchCalc.getRowType,
Array(0),
Array(2), // local output grouping keys: grouping + assignTs + auxGrouping
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = true
)
val batchWindowAggWithoutLocalWithAuxGroup = new BatchExecHashWindowAggregate(
batchExchange1.getCluster,
relBuilder,
batchPhysicalTraits,
batchExchange1,
flinkLogicalWindowAggWithAuxGroup.getRowType,
batchExchange1.getRowType,
batchExchange1.getRowType,
Array(0),
Array(1),
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = false
)
(logicalWindowAggWithAuxGroup, flinkLogicalWindowAggWithAuxGroup,
batchLocalWindowAggWithAuxGroup, batchWindowAggWithLocalWithAuxGroup,
batchWindowAggWithoutLocalWithAuxGroup)
}
// equivalent SQL is
// select id, name, score, age, class,
// row_number() over(partition by class order by name) as rn,
// rank() over (partition by class order by score) as rk,
// dense_rank() over (partition by class order by score) as drk,
// avg(score) over (partition by class order by score) as avg_score,
// max(score) over (partition by age) as max_score,
// count(id) over (partition by age) as cnt
// from student
protected lazy val (flinkLogicalOverAgg, batchOverAgg) = {
val types = Map(
"id" -> longType,
"name" -> stringType,
"score" -> doubleType,
"age" -> intType,
"class" -> intType,
"rn" -> longType,
"rk" -> longType,
"drk" -> longType,
"avg_score" -> doubleType,
"count$0_score" -> longType,
"sum$0_score" -> doubleType,
"max_score" -> doubleType,
"cnt" -> longType
)
def createRowType(selectFields: String*): RelDataType = {
val builder = typeFactory.builder
selectFields.foreach { f =>
builder.add(f, types.getOrElse(f, throw new IllegalArgumentException(s"$f does not exist")))
}
builder.build()
}
val rowTypeOfCalc = createRowType("id", "name", "score", "age", "class")
val rexProgram = RexProgram.create(
studentFlinkLogicalScan.getRowType,
Array(0, 1, 2, 3, 6).map(i => RexInputRef.of(i, studentFlinkLogicalScan.getRowType)).toList,
null,
rowTypeOfCalc,
rexBuilder
)
val rowTypeOfWindowAgg = createRowType(
"id", "name", "score", "age", "class", "rn", "rk", "drk",
"count$0_score", "sum$0_score", "max_score", "cnt")
val flinkLogicalOverAgg = new FlinkLogicalOverAggregate(
cluster,
flinkLogicalTraits,
new FlinkLogicalCalc(cluster, flinkLogicalTraits, studentFlinkLogicalScan, rexProgram),
ImmutableList.of(),
rowTypeOfWindowAgg,
overAggGroups
)
val rowTypeOfWindowAggOutput = createRowType(
"id", "name", "score", "age", "class", "rn", "rk", "drk", "avg_score", "max_score", "cnt")
val projectProgram = RexProgram.create(
flinkLogicalOverAgg.getRowType,
(0 until flinkLogicalOverAgg.getRowType.getFieldCount).flatMap { i =>
if (i < 8 || i >= 10) {
Array[RexNode](RexInputRef.of(i, flinkLogicalOverAgg.getRowType))
} else if (i == 8) {
Array[RexNode](rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE,
RexInputRef.of(8, flinkLogicalOverAgg.getRowType),
RexInputRef.of(9, flinkLogicalOverAgg.getRowType)))
} else {
Array.empty[RexNode]
}
}.toList,
null,
rowTypeOfWindowAggOutput,
rexBuilder
)
val flinkLogicalOverAggOutput = new FlinkLogicalCalc(
cluster,
flinkLogicalTraits,
flinkLogicalOverAgg,
projectProgram
)
val calc = new BatchExecCalc(
cluster, batchPhysicalTraits, studentBatchScan, rexProgram, rowTypeOfCalc)
val hash4 = FlinkRelDistribution.hash(Array(4), requireStrict = true)
val exchange1 = new BatchExecExchange(cluster, calc.getTraitSet.replace(hash4), calc, hash4)
// sort class, name
val collection1 = RelCollations.of(
FlinkRelOptUtil.ofRelFieldCollation(4), FlinkRelOptUtil.ofRelFieldCollation(1))
val newSortTrait1 = exchange1.getTraitSet.replace(collection1)
val sort1 = new BatchExecSort(cluster, newSortTrait1, exchange1,
newSortTrait1.getTrait(RelCollationTraitDef.INSTANCE))
val outputRowType1 = createRowType("id", "name", "score", "age", "class", "rn")
val innerWindowAgg1 = new BatchExecOverAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
sort1,
outputRowType1,
sort1.getRowType,
Array(4),
Array(1),
Array(true),
Array(false),
Seq((overAggGroups(0), Seq(
(AggregateCall.create(SqlStdOperatorTable.ROW_NUMBER, false, ImmutableList.of(), -1,
longType, "rn"),
new RowNumberAggFunction())))),
flinkLogicalOverAgg
)
// sort class, score
val collation2 = RelCollations.of(
FlinkRelOptUtil.ofRelFieldCollation(4), FlinkRelOptUtil.ofRelFieldCollation(2))
val newSortTrait2 = innerWindowAgg1.getTraitSet.replace(collation2)
val sort2 = new BatchExecSort(cluster, newSortTrait2, innerWindowAgg1,
newSortTrait2.getTrait(RelCollationTraitDef.INSTANCE))
val outputRowType2 = createRowType(
"id", "name", "score", "age", "class", "rn", "rk", "drk", "count$0_score", "sum$0_score")
val innerWindowAgg2 = new BatchExecOverAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
sort2,
outputRowType2,
sort2.getRowType,
Array(4),
Array(2),
Array(true),
Array(false),
Seq((overAggGroups(1), Seq(
(AggregateCall.create(SqlStdOperatorTable.RANK, false, ImmutableList.of(), -1, longType,
"rk"),
new RankAggFunction(Array(new VarCharType(VarCharType.MAX_LENGTH)))),
(AggregateCall.create(SqlStdOperatorTable.DENSE_RANK, false, ImmutableList.of(), -1,
longType, "drk"),
new DenseRankAggFunction(Array(new VarCharType(VarCharType.MAX_LENGTH)))),
(AggregateCall.create(SqlStdOperatorTable.COUNT, false,
ImmutableList.of(Integer.valueOf(2)), -1, longType, "count$0_socre"),
new CountAggFunction()),
(AggregateCall.create(SqlStdOperatorTable.SUM, false,
ImmutableList.of(Integer.valueOf(2)), -1, doubleType, "sum$0_score"),
new DoubleSumAggFunction())
))),
flinkLogicalOverAgg
)
val hash3 = FlinkRelDistribution.hash(Array(3), requireStrict = true)
val exchange2 = new BatchExecExchange(
cluster, innerWindowAgg2.getTraitSet.replace(hash3), innerWindowAgg2, hash3)
val outputRowType3 = createRowType(
"id", "name", "score", "age", "class", "rn", "rk", "drk",
"count$0_score", "sum$0_score", "max_score", "cnt")
val batchWindowAgg = new BatchExecOverAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
exchange2,
outputRowType3,
exchange2.getRowType,
Array(3),
Array.empty,
Array.empty,
Array.empty,
Seq((overAggGroups(2), Seq(
(AggregateCall.create(SqlStdOperatorTable.MAX, false,
ImmutableList.of(Integer.valueOf(2)), -1, longType, "max_score"),
new CountAggFunction()),
(AggregateCall.create(SqlStdOperatorTable.COUNT, false,
ImmutableList.of(Integer.valueOf(0)), -1, doubleType, "cnt"),
new DoubleSumAggFunction())
))),
flinkLogicalOverAgg
)
val batchWindowAggOutput = new BatchExecCalc(
cluster,
batchPhysicalTraits,
batchWindowAgg,
projectProgram,
projectProgram.getOutputRowType
)
(flinkLogicalOverAggOutput, batchWindowAggOutput)
}
// equivalent SQL is
// select id, name, score, age, class,
// rank() over (partition by class order by score) as rk,
// dense_rank() over (partition by class order by score) as drk,
// avg(score) over (partition by class order by score) as avg_score
// from student
protected lazy val streamOverAgg: StreamPhysicalRel = {
val types = Map(
"id" -> longType,
"name" -> stringType,
"score" -> doubleType,
"age" -> intType,
"class" -> intType,
"rk" -> longType,
"drk" -> longType,
"avg_score" -> doubleType,
"count$0_score" -> longType,
"sum$0_score" -> doubleType
)
def createRowType(selectFields: String*): RelDataType = {
val builder = typeFactory.builder
selectFields.foreach { f =>
builder.add(f, types.getOrElse(f, throw new IllegalArgumentException(s"$f does not exist")))
}
builder.build()
}
val rowTypeOfCalc = createRowType("id", "name", "score", "age", "class")
val rexProgram = RexProgram.create(
studentFlinkLogicalScan.getRowType,
Array(0, 1, 2, 3, 6).map(i => RexInputRef.of(i, studentFlinkLogicalScan.getRowType)).toList,
null,
rowTypeOfCalc,
rexBuilder
)
val rowTypeOfWindowAgg = createRowType(
"id", "name", "score", "age", "class", "rk", "drk", "count$0_score", "sum$0_score")
val flinkLogicalOverAgg = new FlinkLogicalOverAggregate(
cluster,
flinkLogicalTraits,
new FlinkLogicalCalc(cluster, flinkLogicalTraits, studentFlinkLogicalScan, rexProgram),
ImmutableList.of(),
rowTypeOfWindowAgg,
util.Arrays.asList(overAggGroups.get(1))
)
val streamScan: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("student"), streamPhysicalTraits)
val calc = new StreamExecCalc(
cluster, streamPhysicalTraits, streamScan, rexProgram, rowTypeOfCalc)
val hash4 = FlinkRelDistribution.hash(Array(4), requireStrict = true)
val exchange = new StreamExecExchange(cluster, calc.getTraitSet.replace(hash4), calc, hash4)
val windowAgg = new StreamExecOverAggregate(
cluster,
streamPhysicalTraits,
exchange,
rowTypeOfWindowAgg,
exchange.getRowType,
flinkLogicalOverAgg
)
val rowTypeOfWindowAggOutput = createRowType(
"id", "name", "score", "age", "class", "rk", "drk", "avg_score")
val projectProgram = RexProgram.create(
flinkLogicalOverAgg.getRowType,
(0 until flinkLogicalOverAgg.getRowType.getFieldCount).flatMap { i =>
if (i < 7) {
Array[RexNode](RexInputRef.of(i, flinkLogicalOverAgg.getRowType))
} else if (i == 7) {
Array[RexNode](rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE,
RexInputRef.of(7, flinkLogicalOverAgg.getRowType),
RexInputRef.of(8, flinkLogicalOverAgg.getRowType)))
} else {
Array.empty[RexNode]
}
}.toList,
null,
rowTypeOfWindowAggOutput,
rexBuilder
)
val streamWindowAggOutput = new StreamExecCalc(
cluster,
streamPhysicalTraits,
windowAgg,
projectProgram,
projectProgram.getOutputRowType
)
streamWindowAggOutput
}
// row_number() over(partition by class order by name) as rn,
// rank() over (partition by class order by score) as rk,
// dense_rank() over (partition by class order by score) as drk,
// avg(score) over (partition by class order by score) as avg_score,
// max(score) over (partition by age) as max_score,
// count(id) over (partition by age) as cnt
private lazy val overAggGroups = {
ImmutableList.of(
new Window.Group(
ImmutableBitSet.of(5),
true,
RexWindowBound.create(SqlWindow.createUnboundedPreceding(new SqlParserPos(0, 0)), null),
RexWindowBound.create(SqlWindow.createCurrentRow(new SqlParserPos(0, 0)), null),
RelCollationImpl.of(new RelFieldCollation(
1, RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST)),
ImmutableList.of(
new Window.RexWinAggCall(
SqlStdOperatorTable.ROW_NUMBER,
longType,
ImmutableList.of[RexNode](),
0,
false
)
)
),
new Window.Group(
ImmutableBitSet.of(5),
false,
RexWindowBound.create(SqlWindow.createUnboundedPreceding(new SqlParserPos(4, 15)), null),
RexWindowBound.create(SqlWindow.createCurrentRow(new SqlParserPos(0, 0)), null),
RelCollationImpl.of(new RelFieldCollation(
2, RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST)),
ImmutableList.of(
new Window.RexWinAggCall(
SqlStdOperatorTable.RANK,
longType,
ImmutableList.of[RexNode](),
1,
false
),
new Window.RexWinAggCall(
SqlStdOperatorTable.DENSE_RANK,
longType,
ImmutableList.of[RexNode](),
2,
false
),
new Window.RexWinAggCall(
SqlStdOperatorTable.COUNT,
longType,
util.Arrays.asList(new RexInputRef(2, longType)),
3,
false
),
new Window.RexWinAggCall(
SqlStdOperatorTable.SUM,
doubleType,
util.Arrays.asList(new RexInputRef(2, doubleType)),
4,
false
)
)
),
new Window.Group(
ImmutableBitSet.of(),
false,
RexWindowBound.create(SqlWindow.createUnboundedPreceding(new SqlParserPos(7, 19)), null),
RexWindowBound.create(SqlWindow.createUnboundedFollowing(new SqlParserPos(0, 0)), null),
RelCollations.EMPTY,
ImmutableList.of(
new Window.RexWinAggCall(
SqlStdOperatorTable.MAX,
doubleType,
util.Arrays.asList(new RexInputRef(2, doubleType)),
5,
false
),
new Window.RexWinAggCall(
SqlStdOperatorTable.COUNT,
longType,
util.Arrays.asList(new RexInputRef(0, longType)),
6,
false
)
)
)
)
}
protected lazy val flinkLogicalSnapshot: FlinkLogicalSnapshot = {
new FlinkLogicalSnapshot(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
relBuilder.call(FlinkSqlOperatorTable.PROCTIME))
}
// SELECT * FROM student AS T JOIN TemporalTable
// FOR SYSTEM_TIME AS OF T.proctime AS D ON T.a = D.id
protected lazy val (batchLookupJoin, streamLookupJoin) = {
val temporalTableSource = new TestTemporalTable
val batchSourceOp = new TableSourceQueryOperation[RowData](temporalTableSource, true)
val batchScan = relBuilder.queryOperation(batchSourceOp).build().asInstanceOf[TableScan]
val batchLookupJoin = new BatchExecLookupJoin(
cluster,
batchPhysicalTraits,
studentBatchScan,
batchScan.getTable,
None,
JoinInfo.of(ImmutableIntList.of(0), ImmutableIntList.of(0)),
JoinRelType.INNER
)
val streamSourceOp = new TableSourceQueryOperation[RowData](temporalTableSource, false)
val streamScan = relBuilder.queryOperation(streamSourceOp).build().asInstanceOf[TableScan]
val streamLookupJoin = new StreamExecLookupJoin(
cluster,
streamPhysicalTraits,
studentBatchScan,
streamScan.getTable,
None,
JoinInfo.of(ImmutableIntList.of(0), ImmutableIntList.of(0)),
JoinRelType.INNER
)
(batchLookupJoin, streamLookupJoin)
}
// select * from MyTable1 join MyTable4 on MyTable1.b = MyTable4.a
protected lazy val logicalInnerJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 join MyTable2 on MyTable1.a = MyTable2.a
protected lazy val logicalInnerJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 join MyTable2 on MyTable1.b = MyTable2.b
protected lazy val logicalInnerJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable2 join MyTable1 on MyTable2.b = MyTable1.b
protected lazy val logicalInnerJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable1 join MyTable2 on MyTable1.b = MyTable2.b and MyTable1.a > MyTable2.a
protected lazy val logicalInnerJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 join MyTable2 on MyTable1.a > MyTable2.a
protected lazy val logicalInnerJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 join MyTable2 on MyTable1.e = MyTable2.e
protected lazy val logicalInnerJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 left join MyTable4 on MyTable1.b = MyTable4.a
protected lazy val logicalLeftJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 left join MyTable2 on MyTable1.a = MyTable2.a
protected lazy val logicalLeftJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 left join MyTable2 on MyTable1.b = MyTable2.b
protected lazy val logicalLeftJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable2 left join MyTable1 on MyTable2.b = MyTable1.b
protected lazy val logicalLeftJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable1 left join MyTable2 on
// MyTable1.b = MyTable2.b and MyTable1.a > MyTable2.a
protected lazy val logicalLeftJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 left join MyTable2 on MyTable1.a > MyTable2.a
protected lazy val logicalLeftJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 left join MyTable2 on MyTable1.e = MyTable2.e
protected lazy val logicalLeftJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 right join MyTable4 on MyTable1.b = MyTable4.a
protected lazy val logicalRightJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 right join MyTable2 on MyTable1.a = MyTable2.a
protected lazy val logicalRightJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 right join MyTable2 on MyTable1.b = MyTable2.b
protected lazy val logicalRightJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable2 right join MyTable1 on MyTable2.b = MyTable1.b
protected lazy val logicalRightJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable1 right join MyTable2 on
// MyTable1.b = MyTable2.b and MyTable1.a > MyTable2.a
protected lazy val logicalRightJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 right join MyTable2 on MyTable1.a > MyTable2.a
protected lazy val logicalRightJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 right join MyTable2 on MyTable1.e = MyTable2.e
protected lazy val logicalRightJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 full join MyTable4 on MyTable1.b = MyTable4.a
protected lazy val logicalFullJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.a = MyTable2.a
protected lazy val logicalFullJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.b = MyTable2.b
protected lazy val logicalFullJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable2 full join MyTable1 on MyTable2.b = MyTable1.b
protected lazy val logicalFullJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.b = MyTable2.b and MyTable1.a >
// MyTable2.a
protected lazy val logicalFullJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.a > MyTable2.a
protected lazy val logicalFullJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.e = MyTable2.e
protected lazy val logicalFullJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 full join MyTable2 on true
protected lazy val logicalFullJoinWithoutCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL, relBuilder.literal(true))
.build
// select * from MyTable1 b in (select a from MyTable4)
protected lazy val logicalSemiJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 a in (select a from MyTable2)
protected lazy val logicalSemiJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 b in (select b from MyTable2)
protected lazy val logicalSemiJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build()
// select * from MyTable2 a in (select b from MyTable1)
protected lazy val logicalSemiJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build()
// select * from MyTable1 b in (select b from MyTable2 where MyTable1.a > MyTable2.a)
protected lazy val logicalSemiJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 exists (select * from MyTable2 where MyTable1.a > MyTable2.a)
protected lazy val logicalSemiJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 where e in (select e from MyTable2)
protected lazy val logicalSemiJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 not exists (select * from MyTable4 where MyTable1.b = MyTable4.a)
protected lazy val logicalAntiJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 not exists (select * from MyTable2 where MyTable1.a = MyTable2.a)
protected lazy val logicalAntiJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 not exists (select * from MyTable2 where MyTable1.b = MyTable2.b)
protected lazy val logicalAntiJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build()
// select * from MyTable2 not exists (select * from MyTable1 where MyTable1.b = MyTable2.b)
protected lazy val logicalAntiJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build()
// select * from MyTable1 b not in (select b from MyTable2 where MyTable1.a = MyTable2.a)
// notes: the nullable of b is true
protected lazy val logicalAntiJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI, relBuilder.call(AND,
relBuilder.call(OR,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.isNull(
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))),
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 b not in (select b from MyTable2)
// notes: the nullable of b is true
protected lazy val logicalAntiJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI, relBuilder.call(OR,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.isNull(
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))))
.build
// select * from MyTable1 where not exists (select e from MyTable2 where MyTable1.e = MyTable2.e)
protected lazy val logicalAntiJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// SELECT * FROM MyTable1 UNION ALL SELECT * MyTable2
protected lazy val logicalUnionAll: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.union(true).build()
// SELECT * FROM MyTable1 UNION ALL SELECT * MyTable2
protected lazy val logicalUnion: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.union(false).build()
// SELECT * FROM MyTable1 INTERSECT ALL SELECT * MyTable2
protected lazy val logicalIntersectAll: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.intersect(true).build()
// SELECT * FROM MyTable1 INTERSECT SELECT * MyTable2
protected lazy val logicalIntersect: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.intersect(false).build()
// SELECT * FROM MyTable1 MINUS ALL SELECT * MyTable2
protected lazy val logicalMinusAll: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.minus(true).build()
// SELECT * FROM MyTable1 MINUS SELECT * MyTable2
protected lazy val logicalMinus: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.minus(false).build()
protected def createDataStreamScan[T](
tableNames: util.List[String], traitSet: RelTraitSet): T = {
val table = relBuilder
.getRelOptSchema
.asInstanceOf[CalciteCatalogReader]
.getTable(tableNames)
.asInstanceOf[FlinkPreparingTableBase]
val conventionTrait = traitSet.getTrait(ConventionTraitDef.INSTANCE)
val scan = conventionTrait match {
case Convention.NONE =>
relBuilder.clear()
val scan = relBuilder.scan(tableNames).build()
scan.copy(traitSet, scan.getInputs)
case FlinkConventions.LOGICAL =>
new FlinkLogicalDataStreamTableScan(cluster, traitSet, table)
case FlinkConventions.BATCH_PHYSICAL =>
new BatchExecBoundedStreamScan(cluster, traitSet, table, table.getRowType)
case FlinkConventions.STREAM_PHYSICAL =>
new StreamExecDataStreamScan(cluster, traitSet, table, table.getRowType)
case _ => throw new TableException(s"Unsupported convention trait: $conventionTrait")
}
scan.asInstanceOf[T]
}
protected def createLiteralList(
rowType: RelDataType,
literalValues: Seq[String]): util.List[RexLiteral] = {
require(literalValues.length == rowType.getFieldCount)
val rexBuilder = relBuilder.getRexBuilder
literalValues.zipWithIndex.map {
case (v, index) =>
val fieldType = rowType.getFieldList.get(index).getType
if (v == null) {
rexBuilder.makeNullLiteral(fieldType)
} else {
fieldType.getSqlTypeName match {
case BIGINT => rexBuilder.makeLiteral(v.toLong, fieldType, true)
case INTEGER => rexBuilder.makeLiteral(v.toInt, fieldType, true)
case BOOLEAN => rexBuilder.makeLiteral(v.toBoolean)
case DATE => rexBuilder.makeDateLiteral(new DateString(v))
case TIME => rexBuilder.makeTimeLiteral(new TimeString(v), 0)
case TIMESTAMP => rexBuilder.makeTimestampLiteral(new TimestampString(v), 0)
case DOUBLE => rexBuilder.makeApproxLiteral(BigDecimal.valueOf(v.toDouble))
case FLOAT => rexBuilder.makeApproxLiteral(BigDecimal.valueOf(v.toFloat))
case VARCHAR => rexBuilder.makeLiteral(v)
case _ => throw new TableException(s"${fieldType.getSqlTypeName} is not supported!")
}
}.asInstanceOf[RexLiteral]
}.toList
}
protected def createLogicalCalc(
input: RelNode,
outputRowType: RelDataType,
projects: util.List[RexNode],
conditions: util.List[RexNode]): Calc = {
val predicate: RexNode = if (conditions == null || conditions.isEmpty) {
null
} else {
RexUtil.composeConjunction(rexBuilder, conditions, true)
}
val program = RexProgram.create(
input.getRowType,
projects,
predicate,
outputRowType,
rexBuilder)
FlinkLogicalCalc.create(input, program)
}
protected def makeLiteral(
value: Any,
internalType: LogicalType,
isNullable: Boolean = false,
allowCast: Boolean = true): RexNode = {
rexBuilder.makeLiteral(
value,
typeFactory.createFieldTypeFromLogicalType(internalType.copy(isNullable)),
allowCast
)
}
}
class TestRel(
cluster: RelOptCluster,
traits: RelTraitSet,
input: RelNode) extends SingleRel(cluster, traits, input) {
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
planner.getCostFactory.makeCost(1.0, 1.0, 1.0)
}
}
object FlinkRelMdHandlerTestBase {
@BeforeClass
def beforeAll(): Unit = {
RelMetadataQueryBase
.THREAD_PROVIDERS
.set(JaninoRelMetadataProvider.of(FlinkDefaultRelMetadataProvider.INSTANCE))
}
}
| jinglining/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdHandlerTestBase.scala | Scala | apache-2.0 | 92,188 |
package com.dt.scala.pattern_match
/**
* Author: Wang Jialin
* Contact Information:
* WeChat: 18610086859
* QQ: 1740415547
* Email: 18610086859@126.com
* Tel: 18610086859
*/
import java.awt.Color
object Hello_Pattern_Match {
def main(args: Array[String]) {
val data =2
data match {
case 1 => println("First")
case 2 => println("Second")
case _ => println("Not Known Number")
}
val result = data match {
case i if i == 1 => "The First"
case number if number ==2 => "The Second" + number
case _ => "Not Known Number"
}
println(result)
"Spark !" foreach { c => println (
c match {
case ' ' => "space"
case ch => "Char: " + ch
}
)}
}
} | slieer/scala-tutorials | src/main/scala/com/dt/scala/pattern_match/Hello_Pattern_Match.scala | Scala | apache-2.0 | 799 |
package com.twitter.finagle.thrift
import com.twitter.app.GlobalFlag
import com.twitter.conversions.storage._
import com.twitter.finagle.stats.{ClientStatsReceiver, Counter, StatsReceiver}
import com.twitter.finagle._
import com.twitter.scrooge._
import com.twitter.util._
import java.util.Arrays
import org.apache.thrift.TApplicationException
import org.apache.thrift.protocol.{TMessageType, TMessage, TProtocolFactory}
import org.apache.thrift.transport.TMemoryInputTransport
object maxReusableBufferSize extends GlobalFlag[StorageUnit](
16.kilobytes,
"Max size (bytes) for ThriftServiceIface reusable transport buffer"
)
/**
* Typeclass ServiceIfaceBuilder[T] creates T-typed interfaces from thrift clients.
* Scrooge generates implementations of this builder.
*/
trait ServiceIfaceBuilder[ServiceIface] {
/**
* Build a client ServiceIface wrapping a binary thrift service.
*
* @param thriftService An underlying thrift service that works on byte arrays.
* @param pf The protocol factory used to encode/decode thrift structures.
*/
def newServiceIface(
thriftService: Service[ThriftClientRequest, Array[Byte]],
pf: TProtocolFactory,
stats: StatsReceiver
): ServiceIface
}
/**
* A typeclass to construct a MethodIface by wrapping a ServiceIface.
* This is a compatibility constructor to replace an existing Future interface
* with one built from a ServiceIface.
*
* Scrooge generates implementations of this builder.
*/
trait MethodIfaceBuilder[ServiceIface, MethodIface] {
/**
* Build a FutureIface wrapping a ServiceIface.
*/
def newMethodIface(serviceIface: ServiceIface): MethodIface
}
object ThriftMethodStats {
def apply(stats: StatsReceiver): ThriftMethodStats =
ThriftMethodStats(
stats.counter("requests"),
stats.counter("success"),
stats.counter("failures"),
stats.scope("failures"))
}
case class ThriftMethodStats(
requestsCounter: Counter,
successCounter: Counter,
failuresCounter: Counter,
failuresScope: StatsReceiver)
/**
* Construct Service interface for a thrift method.
*
* There are two ways to use a Scrooge-generated thrift service with Finagle:
*
* 1. Using a Service interface, i.e. a collection of finagle [[Service Services]].
*
* 2. Using a method interface, i.e. a collection of methods returning [[Future Futures]].
*
* Example: for a thrift service
* {{{
* service Logger {
* string log(1: string message, 2: i32 logLevel);
* i32 getLogSize();
* }
* }}}
*
* the Service interface is
* {{{
* trait LoggerServiceIface {
* val log: com.twitter.finagle.Service[Logger.Log.Args, Logger.Log.Result]
* val getLogSize: com.twitter.finagle.Service[Logger.GetLogSize.Args, Logger.GetLogSize.Result]
* }
* }}}
*
* and the method interface is
* {{{
* trait Logger[Future] {
* def log(message: String, logLevel: Int): Future[String]
* def getLogSize(): Future[Int]
* }
* }}}
*
* Service interfaces can be modified and composed with Finagle [[Filter Filters]].
*/
object ThriftServiceIface {
private val resetCounter = ClientStatsReceiver.scope("thrift_service_iface").counter("reusable_buffer_resets")
/**
* Build a Service from a given Thrift method.
*/
def apply(
method: ThriftMethod,
thriftService: Service[ThriftClientRequest, Array[Byte]],
pf: TProtocolFactory,
stats: StatsReceiver
): Service[method.Args, method.Result] = {
statsFilter(method, stats) andThen
thriftCodecFilter(method, pf) andThen
thriftService
}
/**
* A [[Filter]] that updates success and failure stats for a thrift method.
* Thrift exceptions are counted as failures here.
*/
private def statsFilter(
method: ThriftMethod,
stats: StatsReceiver
): SimpleFilter[method.Args, method.Result] = {
val methodStats = ThriftMethodStats(stats.scope(method.serviceName).scope(method.name))
new SimpleFilter[method.Args, method.Result] {
def apply(
args: method.Args,
service: Service[method.Args, method.Result]
): Future[method.Result] = {
methodStats.requestsCounter.incr()
service(args).onSuccess { result =>
if (result.successField.isDefined) {
methodStats.successCounter.incr()
} else {
result.firstException.map { ex =>
methodStats.failuresCounter.incr()
methodStats.failuresScope.counter(Throwables.mkString(ex): _*).incr()
}
}
}
}
}
}
/**
* A [[Filter]] that wraps a binary thrift Service[ThriftClientRequest, Array[Byte]]
* and produces a [[Service]] from a [[ThriftStruct]] to [[ThriftClientRequest]] (i.e. bytes).
*/
private def thriftCodecFilter(
method: ThriftMethod,
pf: TProtocolFactory
): Filter[method.Args, method.Result, ThriftClientRequest, Array[Byte]] =
new Filter[method.Args, method.Result, ThriftClientRequest, Array[Byte]] {
override def apply(
args: method.Args,
service: Service[ThriftClientRequest, Array[Byte]]
): Future[method.Result] = {
val request = encodeRequest(method.name, args, pf, method.oneway)
service(request).map { bytes =>
decodeResponse(bytes, method.responseCodec, pf)
}
}
}
def resultFilter(
method: ThriftMethod
): Filter[method.Args, method.SuccessType, method.Args, method.Result] =
new Filter[method.Args, method.SuccessType, method.Args, method.Result] {
def apply(
args: method.Args,
service: Service[method.Args, method.Result]
): Future[method.SuccessType] = {
service(args).flatMap { response: method.Result =>
response.firstException() match {
case Some(exception) =>
setServiceName(exception, method.serviceName)
Future.exception(exception)
case None =>
response.successField match {
case Some(result) =>
Future.value(result)
case None =>
Future.exception(new TApplicationException(
TApplicationException.MISSING_RESULT,
s"Thrift method '${method.name}' failed: missing result"
))
}
}
}
}
}
private[this] val tlReusableBuffer = new ThreadLocal[TReusableMemoryTransport] {
override def initialValue() = TReusableMemoryTransport(512)
}
private[this] def getReusableBuffer(): TReusableMemoryTransport = {
val buf = tlReusableBuffer.get()
buf.reset()
buf
}
private[this] def resetBuffer(trans: TReusableMemoryTransport): Unit = {
if (trans.currentCapacity > maxReusableBufferSize().inBytes) {
resetCounter.incr()
tlReusableBuffer.remove()
}
}
private def encodeRequest(
methodName: String,
args: ThriftStruct,
pf: TProtocolFactory,
oneway: Boolean
): ThriftClientRequest = {
val buf = getReusableBuffer()
val oprot = pf.getProtocol(buf)
oprot.writeMessageBegin(new TMessage(methodName, TMessageType.CALL, 0))
args.write(oprot)
oprot.writeMessageEnd()
val bytes = Arrays.copyOfRange(buf.getArray, 0, buf.length)
resetBuffer(buf)
new ThriftClientRequest(bytes, oneway)
}
private def decodeResponse[T <: ThriftStruct](
resBytes: Array[Byte],
codec: ThriftStructCodec[T],
pf: TProtocolFactory
): T = {
val iprot = pf.getProtocol(new TMemoryInputTransport(resBytes))
val msg = iprot.readMessageBegin()
if (msg.`type` == TMessageType.EXCEPTION) {
val exception = TApplicationException.read(iprot)
iprot.readMessageEnd()
throw exception
} else {
val result = codec.decode(iprot)
iprot.readMessageEnd()
result
}
}
private def setServiceName(ex: Throwable, serviceName: String): Throwable =
ex match {
case se: SourcedException if !serviceName.isEmpty =>
se.serviceName = serviceName
se
case _ => ex
}
}
| sveinnfannar/finagle | finagle-thrift/src/main/scala/com/twitter/finagle/thrift/ThriftServiceIface.scala | Scala | apache-2.0 | 8,039 |
package com.twitter.service.cachet.test.unit
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import javax.servlet.FilterChain
import limiter.{Limiter, LimitingService}
import org.specs._
import org.specs.mock._
import org.specs.mock.JMocker._
import com.twitter.service.cachet.test.mock._
object LimiterSpec extends Specification with JMocker {
"Limiter" should {
var limitingService = null: LimitingService
var limiter = null: Limiter
var request = null: HttpServletRequest
var response = null: HttpServletResponse
var chain = null: FilterChain
doBefore{
request = mock[HttpServletRequest]
response = mock[HttpServletResponse]
chain = mock[FilterChain]
limitingService = mock[LimitingService]
limiter = new Limiter(limitingService)
}
"when the request is under the limit" >> {
"applies the chain" >> {
expect{
allowing(limitingService).isUnderLimit(request) willReturn true
one(chain).doFilter(request, response)
}
limiter(request, response, chain)
}
}
"when the request is over the limit" >> {
"applies the chain" >> {
expect{
allowing(limitingService).isUnderLimit(request) willReturn false
one(response).setStatus(HttpServletResponse.SC_BAD_REQUEST)
never(chain).doFilter(request, response)
}
limiter(request, response, chain)
}
}
}
} | nkallen/cachet | src/test/scala/com/twitter/service/cachet/test/unit/LimiterSpec.scala | Scala | mit | 1,463 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.sql
import quasar.CIStringArbitrary
trait Arbitraries extends
ExprArbitrary with
ScopedExprArbitrary with
StatementArbitrary with
CIStringArbitrary
object Arbitraries extends Arbitraries
| slamdata/slamengine | sql/src/test/scala/quasar/sql/Arbitraries.scala | Scala | apache-2.0 | 818 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable.HashMap
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.storage.RDDInfo
/**
* :: DeveloperApi ::
* Stores information about a stage to pass from the scheduler to SparkListeners.
*/
@DeveloperApi
class StageInfo(
val stageId: Int,
private val attemptId: Int,
val name: String,
val numTasks: Int,
val rddInfos: Seq[RDDInfo],
val parentIds: Seq[Int],
val details: String,
val taskMetrics: TaskMetrics = null,
private[spark] val taskLocalityPreferences: Seq[Seq[TaskLocation]] = Seq.empty,
private[spark] val shuffleDepId: Option[Int] = None,
val resourceProfileId: Int) {
/** When this stage was submitted from the DAGScheduler to a TaskScheduler. */
var submissionTime: Option[Long] = None
/** Time when the stage completed or when the stage was cancelled. */
var completionTime: Option[Long] = None
/** If the stage failed, the reason why. */
var failureReason: Option[String] = None
/**
* Terminal values of accumulables updated during this stage, including all the user-defined
* accumulators.
*/
val accumulables = HashMap[Long, AccumulableInfo]()
def stageFailed(reason: String): Unit = {
failureReason = Some(reason)
completionTime = Some(System.currentTimeMillis)
}
// This would just be the second constructor arg, except we need to maintain this method
// with parentheses for compatibility
def attemptNumber(): Int = attemptId
private[spark] def getStatusString: String = {
if (completionTime.isDefined) {
if (failureReason.isDefined) {
"failed"
} else {
"succeeded"
}
} else {
"running"
}
}
}
private[spark] object StageInfo {
/**
* Construct a StageInfo from a Stage.
*
* Each Stage is associated with one or many RDDs, with the boundary of a Stage marked by
* shuffle dependencies. Therefore, all ancestor RDDs related to this Stage's RDD through a
* sequence of narrow dependencies should also be associated with this Stage.
*/
def fromStage(
stage: Stage,
attemptId: Int,
numTasks: Option[Int] = None,
taskMetrics: TaskMetrics = null,
taskLocalityPreferences: Seq[Seq[TaskLocation]] = Seq.empty,
resourceProfileId: Int
): StageInfo = {
val ancestorRddInfos = stage.rdd.getNarrowAncestors.map(RDDInfo.fromRdd)
val rddInfos = Seq(RDDInfo.fromRdd(stage.rdd)) ++ ancestorRddInfos
val shuffleDepId = stage match {
case sms: ShuffleMapStage => Option(sms.shuffleDep).map(_.shuffleId)
case _ => None
}
new StageInfo(
stage.id,
attemptId,
stage.name,
numTasks.getOrElse(stage.numTasks),
rddInfos,
stage.parents.map(_.id),
stage.details,
taskMetrics,
taskLocalityPreferences,
shuffleDepId,
resourceProfileId)
}
}
| hvanhovell/spark | core/src/main/scala/org/apache/spark/scheduler/StageInfo.scala | Scala | apache-2.0 | 3,771 |
package mapalgebra
import java.util.concurrent.TimeUnit
import geotrellis.raster.{ IntArrayTile, Tile }
import org.openjdk.jmh.annotations._
// --- //
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@State(Scope.Thread)
class CreationBench {
@Benchmark
def constant256: Tile = IntArrayTile.fill(5, 256, 256)
@Benchmark
def constant512: Tile = IntArrayTile.fill(5, 512, 512)
@Benchmark
def function256: Tile = IntArrayTile.empty(256, 256).map { (c, r, _) => c * r }
@Benchmark
def function512: Tile = IntArrayTile.empty(512, 512).map { (c, r, _) => c * r }
}
| fosskers/mapalgebra | bench/geotrellis/src/main/scala/mapalgebra/Creation.scala | Scala | bsd-3-clause | 611 |
/*
* Copyright 2015 ligaDATA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ligadata.keyvaluestore
// Hbase core
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
// hbase client
import org.apache.hadoop.hbase.client.Get
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.client.HConnection
import org.apache.hadoop.hbase.client.HConnectionManager
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.HBaseAdmin;
// hbase filters
import org.apache.hadoop.hbase.filter.Filter
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter
import org.apache.hadoop.hbase.filter.FilterList
import org.apache.hadoop.hbase.filter.CompareFilter
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
// hadoop security model
import org.apache.hadoop.security.UserGroupInformation
import org.apache.logging.log4j._
import java.nio.ByteBuffer
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import com.ligadata.Exceptions._
import com.ligadata.Utils.{KamanjaLoaderInfo}
import com.ligadata.KvBase.{ Key, Value, TimeRange }
import com.ligadata.StorageBase.{ DataStore, Transaction, StorageAdapterObj }
import java.util.{ Date, Calendar, TimeZone }
import java.text.SimpleDateFormat
import scala.collection.JavaConversions._
class HBaseAdapter(val kvManagerLoader: KamanjaLoaderInfo, val datastoreConfig: String) extends DataStore {
val adapterConfig = if (datastoreConfig != null) datastoreConfig.trim else ""
val loggerName = this.getClass.getName
val logger = LogManager.getLogger(loggerName)
private[this] val lock = new Object
private var containerList: scala.collection.mutable.Set[String] = scala.collection.mutable.Set[String]()
//logger.setLevel(Level.DEBUG)
if (adapterConfig.size == 0) {
throw new Exception("Not found valid HBase Configuration.")
}
logger.debug("HBase configuration:" + adapterConfig)
var parsed_json: Map[String, Any] = null
try {
val json = parse(adapterConfig)
if (json == null || json.values == null) {
logger.error("Failed to parse HBase JSON configuration string:" + adapterConfig)
throw new Exception("Failed to parse HBase JSON configuration string:" + adapterConfig)
}
parsed_json = json.values.asInstanceOf[Map[String, Any]]
} catch {
case e: Exception => {
logger.error("Failed to parse HBase JSON configuration string:%s. Reason:%s Message:%s".format(adapterConfig, e.getCause, e.getMessage))
throw e
}
}
// Getting AdapterSpecificConfig if it has
var adapterSpecificConfig_json: Map[String, Any] = null
if (parsed_json.contains("AdapterSpecificConfig")) {
val adapterSpecificStr = parsed_json.getOrElse("AdapterSpecificConfig", "").toString.trim
if (adapterSpecificStr.size > 0) {
try {
val json = parse(adapterSpecificStr)
if (json == null || json.values == null) {
logger.error("Failed to parse Cassandra Adapter Specific JSON configuration string:" + adapterSpecificStr)
throw new Exception("Failed to parse Cassandra Adapter Specific JSON configuration string:" + adapterSpecificStr)
}
adapterSpecificConfig_json = json.values.asInstanceOf[Map[String, Any]]
} catch {
case e: Exception => {
logger.error("Failed to parse Cassandra Adapter Specific JSON configuration string:%s. Reason:%s Message:%s".format(adapterSpecificStr, e.getCause, e.getMessage))
throw e
}
}
}
}
private def getOptionalField(key: String, main_json: Map[String, Any], adapterSpecific_json: Map[String, Any], default: Any): Any = {
if (main_json != null) {
val mainVal = main_json.getOrElse(key, null)
if (mainVal != null)
return mainVal
}
if (adapterSpecific_json != null) {
val mainVal1 = adapterSpecific_json.getOrElse(key, null)
if (mainVal1 != null)
return mainVal1
}
return default
}
def CreateNameSpace(nameSpace: String): Unit = {
relogin
try{
val nsd = admin.getNamespaceDescriptor(nameSpace)
return
} catch{
case e: Exception => {
logger.info("Namespace " + nameSpace + " doesn't exist, create it")
}
}
try{
admin.createNamespace(NamespaceDescriptor.create(nameSpace).build)
} catch{
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.debug("StackTrace:"+stackTrace)
throw new ConnectionFailedException("Unable to create hbase name space " + nameSpace + ":" + e.getMessage())
}
}
}
val hostnames = if (parsed_json.contains("hostlist")) parsed_json.getOrElse("hostlist", "localhost").toString.trim else parsed_json.getOrElse("Location", "localhost").toString.trim
val namespace = if (parsed_json.contains("SchemaName")) parsed_json.getOrElse("SchemaName", "default").toString.trim else parsed_json.getOrElse("SchemaName", "default").toString.trim
val config = HBaseConfiguration.create();
config.setInt("zookeeper.session.timeout", getOptionalField("zookeeper_session_timeout", parsed_json, adapterSpecificConfig_json, "5000").toString.trim.toInt);
config.setInt("zookeeper.recovery.retry", getOptionalField("zookeeper_recovery_retry", parsed_json, adapterSpecificConfig_json, "1").toString.trim.toInt);
config.setInt("hbase.client.retries.number", getOptionalField("hbase_client_retries_number", parsed_json, adapterSpecificConfig_json, "3").toString.trim.toInt);
config.setInt("hbase.client.pause", getOptionalField("hbase_client_pause", parsed_json, adapterSpecificConfig_json, "5000").toString.trim.toInt);
config.set("hbase.zookeeper.quorum", hostnames);
config.setInt("hbase.client.keyvalue.maxsize", getOptionalField("hbase_client_keyvalue_maxsize", parsed_json, adapterSpecificConfig_json, "104857600").toString.trim.toInt);
var isKerberos: Boolean = false
var ugi: UserGroupInformation = null
val auth = getOptionalField("authentication", parsed_json, adapterSpecificConfig_json, "").toString.trim
if (auth.size > 0) {
isKerberos = auth.compareToIgnoreCase("kerberos") == 0
if (isKerberos) {
try {
val regionserver_principal = getOptionalField("regionserver_principal", parsed_json, adapterSpecificConfig_json, "").toString.trim
val master_principal = getOptionalField("master_principal", parsed_json, adapterSpecificConfig_json, "").toString.trim
val principal = getOptionalField("principal", parsed_json, adapterSpecificConfig_json, "").toString.trim
val keytab = getOptionalField("keytab", parsed_json, adapterSpecificConfig_json, "").toString.trim
logger.debug("HBase info => Hosts:" + hostnames + ", Namespace:" + namespace + ", Principal:" + principal + ", Keytab:" + keytab + ", hbase.regionserver.kerberos.principal:" + regionserver_principal + ", hbase.master.kerberos.principal:" + master_principal)
config.set("hadoop.proxyuser.hdfs.groups", "*")
config.set("hadoop.security.authorization", "true")
config.set("hbase.security.authentication", "kerberos")
config.set("hadoop.security.authentication", "kerberos")
config.set("hbase.regionserver.kerberos.principal", regionserver_principal)
config.set("hbase.master.kerberos.principal", master_principal)
org.apache.hadoop.security.UserGroupInformation.setConfiguration(config);
UserGroupInformation.loginUserFromKeytab(principal, keytab);
ugi = UserGroupInformation.getLoginUser
} catch {
case e: Exception => {
logger.error("HBase issue from JSON configuration string:%s. Reason:%s Message:%s".format(adapterConfig, e.getCause, e.getMessage))
throw e
}
}
} else {
logger.error("Not handling any authentication other than KERBEROS. AdapterSpecificConfig:" + adapterConfig)
throw new Exception("Not handling any authentication other than KERBEROS. AdapterSpecificConfig:" + adapterConfig)
}
} else {
logger.debug("HBase info => Hosts:" + hostnames + ", Namespace:" + namespace)
}
var connection: HConnection = _
try {
connection = HConnectionManager.createConnection(config);
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
throw new ConnectionFailedException("Unable to connect to hbase at " + hostnames + ":" + e.getMessage())
}
}
val admin = new HBaseAdmin(config);
CreateNameSpace(namespace)
private def relogin: Unit = {
try {
if (ugi != null)
ugi.checkTGTAndReloginFromKeytab
} catch {
case e: Exception => {
logger.error("Failed to relogin into HBase. Message:" + e.getMessage())
// Not throwing exception from here
}
}
}
private def createTable(tableName: String): Unit = {
relogin
if (!admin.tableExists(tableName)) {
val tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
val colDesc1 = new HColumnDescriptor("key".getBytes())
val colDesc2 = new HColumnDescriptor("serializerType".getBytes())
val colDesc3 = new HColumnDescriptor("serializedInfo".getBytes())
tableDesc.addFamily(colDesc1)
tableDesc.addFamily(colDesc2)
tableDesc.addFamily(colDesc3)
admin.createTable(tableDesc);
}
}
private def dropTable(tableName: String): Unit = {
relogin
if (admin.tableExists(tableName)) {
admin.disableTable(tableName)
admin.deleteTable(tableName)
}
}
private def CheckTableExists(containerName: String): Unit = {
if (containerList.contains(containerName)) {
return
} else {
CreateContainer(containerName)
containerList.add(containerName)
}
}
def DropNameSpace(namespace: String): Unit = lock.synchronized {
relogin
try{
val nsd = admin.getNamespaceDescriptor(namespace)
} catch{
case e: Exception => {
logger.info("Namespace " + namespace + " doesn't exist, nothing to delete")
return
}
}
try{
admin.deleteNamespace(namespace)
} catch{
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.debug("StackTrace:"+stackTrace)
throw new ConnectionFailedException("Unable to delete hbase name space " + namespace + ":" + e.getMessage())
}
}
}
private def toTableName(containerName: String): String = {
// we need to check for other restrictions as well
// such as length of the table, special characters etc
//containerName.replace('.','_')
namespace + ':' + containerName.toLowerCase.replace('.', '_').replace('-', '_')
}
private def toFullTableName(containerName: String): String = {
// we need to check for other restrictions as well
// such as length of the table, special characters etc
toTableName(containerName)
}
private def CreateContainer(containerName: String): Unit = lock.synchronized {
var tableName = toTableName(containerName)
var fullTableName = toFullTableName(containerName)
try {
createTable(fullTableName)
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def CreateContainer(containerNames: Array[String]): Unit = {
logger.info("create the container tables")
containerNames.foreach(cont => {
logger.info("create the container " + cont)
CreateContainer(cont)
})
}
private def MakeCompositeKey(key: Key): Array[Byte] = {
var compKey = key.timePartition.toString + "|" + key.bucketKey.mkString(".") +
"|" + key.transactionId.toString + "|" + key.rowId.toString
compKey.getBytes()
}
override def put(containerName: String, key: Key, value: Value): Unit = {
var tableName = toFullTableName(containerName)
try{
relogin
CheckTableExists(containerName)
val tableHBase = connection.getTable(tableName);
var kba = MakeCompositeKey(key)
var p = new Put(kba)
p.add(Bytes.toBytes("serializerType"),Bytes.toBytes("base"),Bytes.toBytes(value.serializerType))
p.add(Bytes.toBytes("serializedInfo"),Bytes.toBytes("base"),value.serializedInfo)
tableHBase.put(p)
} catch {
case e:Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.debug("Stacktrace:"+stackTrace)
throw new Exception("Failed to save an object in HBase table " + tableName + ":" + e.getMessage())
}
}
}
override def put(data_list: Array[(String, Array[(Key, Value)])]): Unit = {
try{
relogin
data_list.foreach(li => {
var containerName = li._1
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var keyValuePairs = li._2
var puts = new Array[Put](0)
keyValuePairs.foreach(keyValuePair => {
var key = keyValuePair._1
var value = keyValuePair._2
var kba = MakeCompositeKey(key)
var p = new Put(kba)
p.add(Bytes.toBytes("serializerType"),Bytes.toBytes("base"),Bytes.toBytes(value.serializerType))
p.add(Bytes.toBytes("serializedInfo"),Bytes.toBytes("base"),value.serializedInfo)
puts = puts :+ p
})
if( puts.length > 0 ){
tableHBase.put(puts.toList)
}
})
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
// delete operations
override def del(containerName: String, keys: Array[Key]): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var dels = new Array[Delete](0)
keys.foreach(key => {
var kba = MakeCompositeKey(key)
dels = dels :+ new Delete(kba)
})
if( dels.length > 0 ){
// callling tableHBase.delete(dels.toList) results in an exception as below ??
// Stacktrace:java.lang.UnsupportedOperationException
// at java.util.AbstractList.remove(AbstractList.java:161)
// at org.apache.hadoop.hbase.client.HTable.delete(HTable.java:896)
// at com.ligadata.keyvaluestore.HBaseAdapter.del(HBaseAdapter.scala:387)
val dl = new java.util.ArrayList(dels.toList)
tableHBase.delete(dl)
}
else{
logger.info("No rows found for the delete operation")
}
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def del(containerName: String, time: TimeRange, bucketKeys: Array[Array[String]]): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var bucketKeyMap: scala.collection.mutable.Map[String,Boolean] = new scala.collection.mutable.HashMap()
bucketKeys.foreach(bucketKey => {
var bkey = bucketKey.mkString(".")
bucketKeyMap.put(bkey,true)
})
// try scan with beginRow and endRow
logger.info("beginTime => " + time.beginTime)
logger.info("endTime => " + time.endTime)
var scan = new Scan()
scan.setStartRow(Bytes.toBytes(time.beginTime.toString))
scan.setStopRow(Bytes.toBytes((time.endTime + 1).toString))
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
var dels = new Array[Delete](0)
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
var keyArray = k.split('|')
logger.info("searching for " + keyArray(1))
var keyExists = bucketKeyMap.getOrElse(keyArray(1),null)
if (keyExists != null ){
dels = dels :+ new Delete(r.getRow())
}
}
if( dels.length > 0 ){
val dl = new java.util.ArrayList(dels.toList)
tableHBase.delete(dl)
// tableHBase.delete(dels.toList)
}
else{
logger.info("No rows found for the delete operation")
}
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
// get operations
def getRowCount(containerName: String): Long = {
relogin
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var scan = new Scan();
scan.setFilter(new FirstKeyOnlyFilter());
var rs = tableHBase.getScanner(scan);
val it = rs.iterator()
var cnt = 0
while( it.hasNext() ){
var r = it.next()
cnt = cnt + 1
}
return cnt
}
private def processRow(k:String,st:String, si:Array[Byte],callbackFunction: (Key, Value) => Unit){
var keyArray = k.split('|').toArray
var timePartition = keyArray(0).toLong
var keyStr = keyArray(1)
var tId = keyArray(2).toLong
var rId = keyArray(3).toInt
// format the data to create Key/Value
val bucketKey = if (keyStr != null) keyStr.split('.').toArray else new Array[String](0)
var key = new Key(timePartition, bucketKey, tId, rId)
var value = new Value(st, si)
(callbackFunction)(key, value)
}
private def processRow(key: Key, st:String, si:Array[Byte],callbackFunction: (Key, Value) => Unit){
var value = new Value(st, si)
(callbackFunction)(key, value)
}
private def processKey(k: String,callbackFunction: (Key) => Unit){
var keyArray = k.split('|').toArray
var timePartition = keyArray(0).toLong
var keyStr = keyArray(1)
var tId = keyArray(2).toLong
var rId = keyArray(3).toInt
// format the data to create Key/Value
val bucketKey = if (keyStr != null) keyStr.split('.').toArray else new Array[String](0)
var key = new Key(timePartition, bucketKey, tId, rId)
(callbackFunction)(key)
}
override def get(containerName: String, callbackFunction: (Key, Value) => Unit): Unit = {
try{
relogin
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var scan = new Scan();
var rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
val kvit = r.list().iterator()
var st:String = null
var si:Array[Byte] = null
while( kvit.hasNext() ){
val kv = kvit.next()
val q = Bytes.toString(kv.getFamily())
q match {
case "serializerType" => {
st = Bytes.toString(kv.getValue())
}
case "serializedInfo" => {
si = kv.getValue()
}
}
}
processRow(k,st,si,callbackFunction)
}
}catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def getKeys(containerName: String, callbackFunction: (Key) => Unit): Unit = {
try{
relogin
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var scan = new Scan();
scan.setFilter(new FirstKeyOnlyFilter());
var rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
processKey(k,callbackFunction)
}
}catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def getKeys(containerName: String, keys: Array[Key], callbackFunction: (Key) => Unit): Unit = {
try{
relogin
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
val filters = new java.util.ArrayList[Filter]()
keys.foreach(key => {
var kba = MakeCompositeKey(key)
val f = new SingleColumnValueFilter(Bytes.toBytes("key"), Bytes.toBytes("base"),
CompareOp.EQUAL, kba)
filters.add(f);
})
val fl = new FilterList(filters);
val scan = new Scan();
scan.setFilter(fl);
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
processKey(k,callbackFunction)
}
}catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def get(containerName: String, keys: Array[Key], callbackFunction: (Key, Value) => Unit): Unit = {
try{
relogin
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
val filters = new java.util.ArrayList[Filter]()
keys.foreach(key => {
var kba = MakeCompositeKey(key)
val f = new SingleColumnValueFilter(Bytes.toBytes("key"), Bytes.toBytes("base"),
CompareOp.EQUAL, kba)
filters.add(f);
})
val fl = new FilterList(filters);
val scan = new Scan();
scan.setFilter(fl);
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
val kvit = r.list().iterator()
var st:String = null
var si:Array[Byte] = null
while( kvit.hasNext() ){
val kv = kvit.next()
val q = Bytes.toString(kv.getFamily())
q match {
case "serializerType" => {
st = Bytes.toString(kv.getValue())
}
case "serializedInfo" => {
si = kv.getValue()
}
}
}
processRow(k,st,si,callbackFunction)
}
}catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def get(containerName: String, time_ranges: Array[TimeRange], callbackFunction: (Key, Value) => Unit): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
time_ranges.foreach(time_range => {
// try scan with beginRow and endRow
var scan = new Scan()
scan.setStartRow(Bytes.toBytes(time_range.beginTime.toString))
scan.setStopRow(Bytes.toBytes((time_range.endTime + 1).toString))
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
val kvit = r.list().iterator()
var st:String = null
var si:Array[Byte] = null
while( kvit.hasNext() ){
val kv = kvit.next()
val q = Bytes.toString(kv.getFamily())
q match {
case "serializerType" => {
st = Bytes.toString(kv.getValue())
}
case "serializedInfo" => {
si = kv.getValue()
}
}
}
processRow(k,st,si,callbackFunction)
}
})
}catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def getKeys(containerName: String, time_ranges: Array[TimeRange], callbackFunction: (Key) => Unit): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
time_ranges.foreach(time_range => {
// try scan with beginRow and endRow
var scan = new Scan()
scan.setStartRow(Bytes.toBytes(time_range.beginTime.toString))
scan.setStopRow(Bytes.toBytes((time_range.endTime + 1).toString))
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
processKey(k,callbackFunction)
}
})
}catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def get(containerName: String, time_ranges: Array[TimeRange], bucketKeys: Array[Array[String]], callbackFunction: (Key, Value) => Unit): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var bucketKeyMap: scala.collection.mutable.Map[String,Boolean] = new scala.collection.mutable.HashMap()
bucketKeys.foreach(bucketKey => {
var bkey = bucketKey.mkString(".")
bucketKeyMap.put(bkey,true)
})
time_ranges.foreach(time_range => {
// try scan with beginRow and endRow
var scan = new Scan()
scan.setStartRow(Bytes.toBytes(time_range.beginTime.toString))
scan.setStopRow(Bytes.toBytes((time_range.endTime + 1).toString))
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
var keyArray = k.split('|')
var keyExists = bucketKeyMap.getOrElse(keyArray(1),null)
var st:String = null
var si:Array[Byte] = null
if (keyExists != null ){
val kvit = r.list().iterator()
while( kvit.hasNext() ){
val kv = kvit.next()
val q = Bytes.toString(kv.getFamily())
q match {
case "serializerType" => {
st = Bytes.toString(kv.getValue())
}
case "serializedInfo" => {
si = kv.getValue()
}
}
}
processRow(k,st,si,callbackFunction)
}
}
})
}catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def getKeys(containerName: String, time_ranges: Array[TimeRange], bucketKeys: Array[Array[String]], callbackFunction: (Key) => Unit): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var bucketKeyMap: scala.collection.mutable.Map[String,Boolean] = new scala.collection.mutable.HashMap()
bucketKeys.foreach(bucketKey => {
var bkey = bucketKey.mkString(".")
bucketKeyMap.put(bkey,true)
})
time_ranges.foreach(time_range => {
// try scan with beginRow and endRow
var scan = new Scan()
scan.setStartRow(Bytes.toBytes(time_range.beginTime.toString))
scan.setStopRow(Bytes.toBytes((time_range.endTime + 1).toString))
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
var keyArray = k.split('|')
var keyExists = bucketKeyMap.getOrElse(keyArray(1),null)
if (keyExists != null ){
processKey(k,callbackFunction)
}
}
})
}catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def get(containerName: String, bucketKeys: Array[Array[String]], callbackFunction: (Key, Value) => Unit): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var bucketKeyMap: scala.collection.mutable.Map[String,Boolean] = new scala.collection.mutable.HashMap()
bucketKeys.foreach(bucketKey => {
var bkey = bucketKey.mkString(".")
bucketKeyMap.put(bkey,true)
})
// try scan with beginRow and endRow
var scan = new Scan()
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
var dels = new Array[Delete](0)
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
var keyArray = k.split('|')
var keyExists = bucketKeyMap.getOrElse(keyArray(1),null)
if (keyExists != null ){
val kvit = r.list().iterator()
var st:String = null
var si:Array[Byte] = null
while( kvit.hasNext() ){
val kv = kvit.next()
val q = Bytes.toString(kv.getFamily())
q match {
case "serializerType" => {
st = Bytes.toString(kv.getValue())
}
case "serializedInfo" => {
si = kv.getValue()
}
}
}
processRow(k,st,si,callbackFunction)
}
}
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def getKeys(containerName: String, bucketKeys: Array[Array[String]], callbackFunction: (Key) => Unit): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var bucketKeyMap: scala.collection.mutable.Map[String,Boolean] = new scala.collection.mutable.HashMap()
bucketKeys.foreach(bucketKey => {
var bkey = bucketKey.mkString(".")
bucketKeyMap.put(bkey,true)
})
// scan the whole table
var scan = new Scan()
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
var k = Bytes.toString(r.getRow())
var keyArray = k.split('|')
var keyExists = bucketKeyMap.getOrElse(keyArray(1),null)
if (keyExists != null ){
processKey(k,callbackFunction)
}
}
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def beginTx(): Transaction = {
new HBaseAdapterTx(this)
}
override def endTx(tx: Transaction): Unit = {}
override def commitTx(tx: Transaction): Unit = {}
override def rollbackTx(tx: Transaction): Unit = {}
override def Shutdown(): Unit = {
logger.info("close the session and connection pool")
if (connection != null) {
connection.close()
connection = null
}
}
private def TruncateContainer(containerName: String): Unit = {
try{
relogin
CheckTableExists(containerName)
var tableName = toFullTableName(containerName)
val tableHBase = connection.getTable(tableName);
var dels = new Array[Delete](0)
var scan = new Scan()
val rs = tableHBase.getScanner(scan);
val it = rs.iterator()
while( it.hasNext() ){
val r = it.next()
dels = dels :+ new Delete(r.getRow())
}
val dl = new java.util.ArrayList(dels.toList)
tableHBase.delete(dl)
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def TruncateContainer(containerNames: Array[String]): Unit = {
logger.info("truncate the container tables")
containerNames.foreach(cont => {
logger.info("truncate the container " + cont)
TruncateContainer(cont)
})
}
private def DropContainer(containerName: String): Unit = lock.synchronized {
try {
relogin
CheckTableExists(containerName)
var tableName = toTableName(containerName)
var fullTableName = toFullTableName(containerName)
dropTable(fullTableName)
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.error("Stacktrace:" + stackTrace)
}
}
}
override def DropContainer(containerNames: Array[String]): Unit = {
logger.info("drop the container tables")
containerNames.foreach(cont => {
logger.info("drop the container " + cont)
DropContainer(cont)
})
}
}
class HBaseAdapterTx(val parent: DataStore) extends Transaction {
val loggerName = this.getClass.getName
val logger = LogManager.getLogger(loggerName)
override def put(containerName: String, key: Key, value: Value): Unit = {
parent.put(containerName, key, value)
}
override def put(data_list: Array[(String, Array[(Key, Value)])]): Unit = {
parent.put(data_list)
}
// delete operations
override def del(containerName: String, keys: Array[Key]): Unit = {
parent.del(containerName, keys)
}
override def del(containerName: String, time: TimeRange, keys: Array[Array[String]]): Unit = {
parent.del(containerName, time, keys)
}
// get operations
override def get(containerName: String, callbackFunction: (Key, Value) => Unit): Unit = {
parent.get(containerName, callbackFunction)
}
override def get(containerName: String, keys: Array[Key], callbackFunction: (Key, Value) => Unit): Unit = {
parent.get(containerName, keys, callbackFunction)
}
override def get(containerName: String, time_ranges: Array[TimeRange], callbackFunction: (Key, Value) => Unit): Unit = {
parent.get(containerName, time_ranges, callbackFunction)
}
override def get(containerName: String, time_ranges: Array[TimeRange], bucketKeys: Array[Array[String]], callbackFunction: (Key, Value) => Unit): Unit = {
parent.get(containerName, time_ranges, bucketKeys, callbackFunction)
}
override def get(containerName: String, bucketKeys: Array[Array[String]], callbackFunction: (Key, Value) => Unit): Unit = {
parent.get(containerName, bucketKeys, callbackFunction)
}
def getKeys(containerName: String, callbackFunction: (Key) => Unit): Unit = {
parent.getKeys(containerName, callbackFunction)
}
def getKeys(containerName: String, keys: Array[Key], callbackFunction: (Key) => Unit): Unit = {
parent.getKeys(containerName, keys, callbackFunction)
}
def getKeys(containerName: String, timeRanges: Array[TimeRange], callbackFunction: (Key) => Unit): Unit = {
parent.getKeys(containerName, timeRanges, callbackFunction)
}
def getKeys(containerName: String, timeRanges: Array[TimeRange], bucketKeys: Array[Array[String]], callbackFunction: (Key) => Unit): Unit = {
parent.getKeys(containerName, timeRanges, bucketKeys, callbackFunction)
}
def getKeys(containerName: String, bucketKeys: Array[Array[String]], callbackFunction: (Key) => Unit): Unit = {
parent.getKeys(containerName, bucketKeys, callbackFunction)
}
}
// To create HBase Datastore instance
object HBaseAdapter extends StorageAdapterObj {
override def CreateStorageAdapter(kvManagerLoader: KamanjaLoaderInfo, datastoreConfig: String): DataStore = new HBaseAdapter(kvManagerLoader, datastoreConfig)
}
| traytonwhite/Kamanja | trunk/Storage/HBase/src/main/scala/com/ligadata/keyvaluestore/HBaseAdapter.scala | Scala | apache-2.0 | 35,264 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.aggfunctions
import java.math.BigDecimal
import java.util.{HashMap => JHashMap}
import java.lang.{Iterable => JIterable}
import java.sql.{Date, Time, Timestamp}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.{MapTypeInfo, TupleTypeInfo}
import org.apache.flink.table.api.Types
import org.apache.flink.table.functions.aggfunctions.Ordering._
import org.apache.flink.table.functions.AggregateFunction
/** The initial accumulator for Min with retraction aggregate function */
class MinWithRetractAccumulator[T] extends JTuple2[T, JHashMap[T, Long]]
/**
* Base class for built-in Min with retraction aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class MinWithRetractAggFunction[T](implicit ord: Ordering[T])
extends AggregateFunction[T, MinWithRetractAccumulator[T]] {
override def createAccumulator(): MinWithRetractAccumulator[T] = {
val acc = new MinWithRetractAccumulator[T]
acc.f0 = getInitValue //min
acc.f1 = new JHashMap[T, Long]() //store the count for each value
acc
}
def accumulate(acc: MinWithRetractAccumulator[T], value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[T]
if (acc.f1.size() == 0 || (ord.compare(acc.f0, v) > 0)) {
acc.f0 = v
}
if (!acc.f1.containsKey(v)) {
acc.f1.put(v, 1L)
} else {
var count = acc.f1.get(v)
count += 1L
acc.f1.put(v, count)
}
}
}
def retract(acc: MinWithRetractAccumulator[T], value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[T]
var count = acc.f1.get(v)
count -= 1L
if (count == 0) {
//remove the key v from the map if the number of appearance of the value v is 0
acc.f1.remove(v)
//if the total count is 0, we could just simply set the f0(min) to the initial value
if (acc.f1.size() == 0) {
acc.f0 = getInitValue
return
}
//if v is the current min value, we have to iterate the map to find the 2nd smallest
// value to replace v as the min value
if (v == acc.f0) {
val iterator = acc.f1.keySet().iterator()
var key = iterator.next()
acc.f0 = key
while (iterator.hasNext) {
key = iterator.next()
if (ord.compare(acc.f0, key) > 0) {
acc.f0 = key
}
}
}
} else {
acc.f1.put(v, count)
}
}
}
override def getValue(acc: MinWithRetractAccumulator[T]): T = {
if (acc.f1.size() != 0) {
acc.f0
} else {
null.asInstanceOf[T]
}
}
def merge(acc: MinWithRetractAccumulator[T],
its: JIterable[MinWithRetractAccumulator[T]]): Unit = {
val iter = its.iterator()
while (iter.hasNext) {
val a = iter.next()
if (a.f1.size() != 0) {
// set min element
if (ord.compare(acc.f0, a.f0) > 0) {
acc.f0 = a.f0
}
// merge the count for each key
val iterator = a.f1.keySet().iterator()
while (iterator.hasNext) {
val key = iterator.next()
if (acc.f1.containsKey(key)) {
acc.f1.put(key, acc.f1.get(key) + a.f1.get(key))
} else {
acc.f1.put(key, a.f1.get(key))
}
}
}
}
}
def resetAccumulator(acc: MinWithRetractAccumulator[T]): Unit = {
acc.f0 = getInitValue
acc.f1.clear()
}
override def getAccumulatorType: TypeInformation[MinWithRetractAccumulator[T]] = {
new TupleTypeInfo(
classOf[MinWithRetractAccumulator[T]],
getValueTypeInfo,
new MapTypeInfo(getValueTypeInfo, BasicTypeInfo.LONG_TYPE_INFO))
}
def getInitValue: T
def getValueTypeInfo: TypeInformation[_]
}
/**
* Built-in Byte Min with retraction aggregate function
*/
class ByteMinWithRetractAggFunction extends MinWithRetractAggFunction[Byte] {
override def getInitValue: Byte = 0.toByte
override def getValueTypeInfo = BasicTypeInfo.BYTE_TYPE_INFO
}
/**
* Built-in Short Min with retraction aggregate function
*/
class ShortMinWithRetractAggFunction extends MinWithRetractAggFunction[Short] {
override def getInitValue: Short = 0.toShort
override def getValueTypeInfo = BasicTypeInfo.SHORT_TYPE_INFO
}
/**
* Built-in Int Min with retraction aggregate function
*/
class IntMinWithRetractAggFunction extends MinWithRetractAggFunction[Int] {
override def getInitValue: Int = 0
override def getValueTypeInfo = BasicTypeInfo.INT_TYPE_INFO
}
/**
* Built-in Long Min with retraction aggregate function
*/
class LongMinWithRetractAggFunction extends MinWithRetractAggFunction[Long] {
override def getInitValue: Long = 0L
override def getValueTypeInfo = BasicTypeInfo.LONG_TYPE_INFO
}
/**
* Built-in Float Min with retraction aggregate function
*/
class FloatMinWithRetractAggFunction extends MinWithRetractAggFunction[Float] {
override def getInitValue: Float = 0.0f
override def getValueTypeInfo = BasicTypeInfo.FLOAT_TYPE_INFO
}
/**
* Built-in Double Min with retraction aggregate function
*/
class DoubleMinWithRetractAggFunction extends MinWithRetractAggFunction[Double] {
override def getInitValue: Double = 0.0d
override def getValueTypeInfo = BasicTypeInfo.DOUBLE_TYPE_INFO
}
/**
* Built-in Boolean Min with retraction aggregate function
*/
class BooleanMinWithRetractAggFunction extends MinWithRetractAggFunction[Boolean] {
override def getInitValue: Boolean = false
override def getValueTypeInfo = BasicTypeInfo.BOOLEAN_TYPE_INFO
}
/**
* Built-in Big Decimal Min with retraction aggregate function
*/
class DecimalMinWithRetractAggFunction extends MinWithRetractAggFunction[BigDecimal] {
override def getInitValue: BigDecimal = BigDecimal.ZERO
override def getValueTypeInfo = BasicTypeInfo.BIG_DEC_TYPE_INFO
}
/**
* Built-in String Min with retraction aggregate function
*/
class StringMinWithRetractAggFunction extends MinWithRetractAggFunction[String] {
override def getInitValue: String = ""
override def getValueTypeInfo = BasicTypeInfo.STRING_TYPE_INFO
}
/**
* Built-in Timestamp Min with retraction aggregate function
*/
class TimestampMinWithRetractAggFunction extends MinWithRetractAggFunction[Timestamp] {
override def getInitValue: Timestamp = new Timestamp(0)
override def getValueTypeInfo = Types.SQL_TIMESTAMP
}
/**
* Built-in Date Min with retraction aggregate function
*/
class DateMinWithRetractAggFunction extends MinWithRetractAggFunction[Date] {
override def getInitValue: Date = new Date(0)
override def getValueTypeInfo = Types.SQL_DATE
}
/**
* Built-in Time Min with retraction aggregate function
*/
class TimeMinWithRetractAggFunction extends MinWithRetractAggFunction[Time] {
override def getInitValue: Time = new Time(0)
override def getValueTypeInfo = Types.SQL_TIME
}
| zimmermatt/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/aggfunctions/MinAggFunctionWithRetract.scala | Scala | apache-2.0 | 7,853 |
package org.example4.usage
// !!! NOTE:
// 1. wrong order of imports shouldn't be changed after refactoring
// though modified import (of moved class) should be inserted to a "proper place"
// 2. unused imports shouldn't be removed during refactoring
// We could do it (e.g. Java does it), but in Scala it might be quite a dangerous and unexpected operation,
// taking into account the complexity of Scala imports
//
import org.example4.declaration.beta.BetaClass
import org.example4.declaration.data.{A, X}
import org.example4.declaration.eta.EtaClass
import org.example4.declaration.data.C
import org.example4.declaration.alpha.AlphaClass //NOTE: unused
import org.example4.declaration.data.B
class Usage_SortOnlyModifiedImport_DoNotTouchOther {
def foo(
x: X,
a: A,
b: B,
c: C,
//ac: AlphaClass,
bc: BetaClass,
cc: EtaClass,
): Unit ={
}
}
| JetBrains/intellij-scala | scala/scala-impl/testdata/move/sortOnlyModifiedImport_DoNotTouchOther/after/org/example4/usage/Usage_SortOnlyModifiedImport_DoNotTouchOther.scala | Scala | apache-2.0 | 888 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
package types
/**
* @author Alexander Podkhalyuzin
* Date: 14.03.2008
*/
trait ScSequenceArg extends ScalaPsiElement {
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/base/types/ScSequenceArg.scala | Scala | apache-2.0 | 210 |
package fpinscala.state
import fpinscala.state.RNG._
import org.scalatest.{FlatSpec, Matchers}
/**
* Created with IntelliJ IDEA.
* Date: 15-08-05
* Time: 7:32 PM
* To change this template use File | Settings | File Templates.
*/
class StateSpec extends FlatSpec with Matchers{
behavior of "State.flatMap"
it should "apply function to value and return next state" in {
val s = State(int)
val (result,_) = s.flatMap(a => State.unit(a)).run(Simple(100))
result should (be < Int.MaxValue and be > Int.MinValue)
}
behavior of "State.map"
it should "apply function to value and return next state" in {
val s = State(int)
val (result,_) = s.map(_ * 2).run(Simple(100))
result should (be < Int.MaxValue and be > Int.MinValue)
}
behavior of "State.map2"
it should "map 2 State" in {
val s1 = State(int)
val s2 = State(int)
val (result, _) = s1.map2(s2){(a,b) => a + b}.run(Simple(100))
result should (be < Int.MaxValue and be > Int.MinValue)
}
}
| hyleung/fpinscala | exercises/src/test/scala/fpinscala/state/StateSpec.scala | Scala | mit | 973 |
package sri.universal.apis
import scala.scalajs.js
import scala.scalajs.js.Promise
import scala.scalajs.js.annotation.JSImport
@js.native
@JSImport("react-native", "takeSnapshot")
object TakeSnapshot extends js.Object {
def apply(view: js.Any = ???,
options: SnapShotOptions = ???): Promise[js.Any] = js.native
}
trait SnapShotOptions extends js.Object {
var width: js.UndefOr[Double] = js.undefined
var height: js.UndefOr[Double] = js.undefined
var format: js.UndefOr[String] = js.undefined
var quality: js.UndefOr[Double] = js.undefined
}
| scalajs-react-interface/universal | src/main/scala/sri/universal/apis/TakeSnapshot.scala | Scala | apache-2.0 | 571 |
package com.donoroncall.server.rest.undertow.handlers.authentication
import com.donoroncall.server.rest.controllers.authentication.AuthenticationController
import com.donoroncall.server.utils.STATUS_CODES
import com.google.inject.Inject
import io.undertow.server.{HttpHandler, HttpServerExchange}
import org.apache.commons.io.IOUtils
import spray.json._
/**
* Created by Anmol on 10/3/16.
*/
class ProcessCompletion @Inject()(authenticationController: AuthenticationController) extends HttpHandler {
override def handleRequest(exchange: HttpServerExchange): Unit = {
if (exchange.isInIoThread) {
exchange.dispatch(this)
} else {
try {
exchange.startBlocking()
val request = new String(IOUtils.toByteArray(exchange.getInputStream))
val requestJson = request.parseJson.asJsObject
val username = requestJson.getFields("username").head.asInstanceOf[JsString].value
val donationStatus = requestJson.getFields("donationStatus").head.asInstanceOf[JsString].value
val donorUserName = requestJson.getFields("donorUserName").head.asInstanceOf[JsString].value
val noOfUnits = requestJson.getFields("noOfUnits").head.asInstanceOf[JsString].value.toInt
val date = requestJson.getFields("date").head.asInstanceOf[JsString].value
val blood_group = requestJson.getFields("date").head.asInstanceOf[JsString].value
val userId = authenticationController.processComplete(username, donationStatus, donorUserName, noOfUnits, date, blood_group)
if (userId) {
exchange.getResponseSender.send(JsObject(
"status" -> JsString("Complete"),
"message" -> JsString("Donation Process Complete.")
).prettyPrint)
} else {
//TODO add logic for Failed Registration
exchange.setStatusCode(STATUS_CODES.BAD_REQUEST)
exchange.getResponseSender.send(JsObject(
"status" -> JsString("failed"),
"message" -> JsString("Process Completion Failed")
).prettyPrint)
}
} catch {
case e: Exception => {
exchange.setStatusCode(STATUS_CODES.BAD_REQUEST)
exchange.getResponseSender.send(JsObject(
"status" -> JsString("failed"),
"message" -> JsString("Process Completion Failed")
).prettyPrint)
}
}
}
}
}
| donorcall01/DonorOnCall_Server | src/main/scala/com/donoroncall/server/rest/undertow/handlers/authentication/ProcessCompletion.scala | Scala | apache-2.0 | 2,387 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.