code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package com.twitter.diffy.analysis
import javax.inject.Inject
import com.twitter.util.Future
import scala.math.abs
object DifferencesFilterFactory {
def apply(relative: Double, absolute: Double): JoinedField => Boolean = {
(field: JoinedField) =>
field.raw.differences > field.noise.differences &&
field.relativeDifference > relative &&
field.absoluteDifference > absolute
}
}
case class JoinedDifferences @Inject() (raw: RawDifferenceCounter, noise: NoiseDifferenceCounter) {
lazy val endpoints: Future[Map[String, JoinedEndpoint]] = {
raw.counter.endpoints map { _.keys } flatMap { eps =>
Future.collect(
eps map { ep =>
endpoint(ep) map { ep -> _ }
} toSeq
) map { _.toMap }
}
}
def endpoint(endpoint: String): Future[JoinedEndpoint] = {
Future.join(
raw.counter.endpoint(endpoint),
raw.counter.fields(endpoint),
noise.counter.fields(endpoint)
) map { case (endpoint, rawFields, noiseFields) =>
JoinedEndpoint(endpoint, rawFields, noiseFields)
}
}
}
case class JoinedEndpoint(
endpoint: EndpointMetadata,
original: Map[String, FieldMetadata],
noise: Map[String, FieldMetadata])
{
def differences = endpoint.differences
def total = endpoint.total
lazy val fields: Map[String, JoinedField] = original map { case (path, field) =>
path -> JoinedField(endpoint, field, noise.getOrElse(path, FieldMetadata.Empty))
} toMap
}
case class JoinedField(endpoint: EndpointMetadata, raw: FieldMetadata, noise: FieldMetadata) {
// the percent difference out of the total # of requests
def absoluteDifference = abs(raw.differences - noise.differences) / endpoint.total.toDouble * 100
// the square error between this field's differences and the noisey counterpart's differences
def relativeDifference = abs(raw.differences - noise.differences) / (raw.differences + noise.differences).toDouble * 100
}
| 1c4r/diffy | src/main/scala/com/twitter/diffy/analysis/JoinedDifferences.scala | Scala | apache-2.0 | 1,941 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory
import org.apache.spark.internal.Logging
import org.apache.spark.rpc.{RpcAddress, RpcEnv}
import org.apache.spark.util.RpcUtils
import org.apache.spark.{ExecutorPlugin, SparkConf, SparkEnv}
class MonitorExecutorExtension extends ExecutorPlugin with Logging {
val env: SparkEnv = SparkEnv.get
val rpcEnv: RpcEnv = env.rpcEnv
val sparkConf: SparkConf = env.conf
override def init(): Unit = {
initMonitorEnv()
registerExecutorWithDriver()
}
private def initMonitorEnv(): Unit = {
val driverHost: String = env.conf.get("spark.driver.host", "localhost")
val driverPort: Int = env.conf.getInt("spark.driver.port", 7077)
logInfo(s"init monitor env, executorId: ${env.executorId}, driver -> $driverHost : $driverPort")
MonitorEnv.create(sparkConf, env.executorId, rpcEnv, RpcAddress(driverHost, driverPort), isDriver = false)
MonitorEnv.get.monitorManager.setMemoryMonitor(MemoryMonitor.install())
}
private def registerExecutorWithDriver() = {
val driverRef = MonitorEnv.get.monitorManager.driverEndpoint
logInfo(s"register executor executorId : ${env.executorId}")
val slaverEndpoint = new MonitorSlaverEndpoint(rpcEnv, driverRef)
val workerRef = rpcEnv.setupEndpoint(MonitorSlaverEndpoint.ENDPOINT_NAME + env.executorId, slaverEndpoint)
slaverEndpoint.registerMaster(env.executorId, workerRef)
}
override def shutdown(): Unit = super.shutdown()
} | apache/kylin | kylin-spark-project/kylin-spark-common/src/main/spark24/org/apache/spark/monitor/MonitorExecutorExtension.scala | Scala | apache-2.0 | 2,261 |
package controllers
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import models._
import views.html.{user => view}
import com.github.aselab.activerecord.dsl._
object Users extends Controller {
def index = Action {
Ok(view.index(User.all.toList))
}
def show(id: Long) = Action {
User.find(id) match {
case Some(user) => Ok(view.show(user))
case _ => NotFound
}
}
def newPage = Action { implicit request =>
Ok(view.edit(User.form, routes.Users.create, "Create", "User create"))
}
def create = Action { implicit request =>
User.form.bindFromRequest.fold(
errors => BadRequest(view.edit(errors, routes.Users.create, "Create", "User create")), {
user =>
User.transaction { user.save }
Redirect(routes.Users.show(user.id))
})
}
def edit(id: Long) = Action { implicit request =>
User.find(id) match {
case Some(user) => Ok(view.edit(User.form(user), routes.Users.update(id), "Update", "User edit"))
case _ => NotFound
}
}
def update(id: Long) = Action { implicit request =>
User.find(id) match {
case Some(user) =>
User.form(user).bindFromRequest.fold(
errors => BadRequest(view.edit(errors, routes.Users.update(id), "Update", "User edit")), {
user =>
User.transaction { user.save }
Redirect(routes.Users.index)
})
case _ => NotFound
}
}
def delete(id: Long) = Action {
User.find(id) match {
case Some(user) =>
User.transaction { user.delete }
Ok
case _ => NotFound
}
}
}
| xdougx/scala-activerecord | play2Sbt/src/sbt-test/generator/simple/app/controllers/Users.scala | Scala | mit | 1,631 |
package probability.continuous
import au.id.cxd.math.probability.continuous.{ChiSquare, StudentT}
import function.TestEvaluation
import org.scalatest.{FlatSpec, Matchers}
class TestChisq extends FlatSpec with Matchers with TestEvaluation {
val quantiles = List[Double](
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
1.0
)
val pdftest = List[Double](
1.2000389,
0.8071711,
0.6269101,
0.5164415,
0.4393913,
0.3815453,
0.3360145,
0.2989835,
0.2681367,
0.2419707
)
val cdftest = List[Double](
0.2481704,
0.3452792,
0.4161176,
0.4729107,
0.5204999,
0.5614220,
0.5972163,
0.6289066,
0.6572183,
0.6826895
)
"PDF" should "agree with R" in {
val fn = ChiSquare(1.0).pdf(_)
println("ChiSquare PDF")
evaluate1(fn, quantiles, pdftest, 0.01) should be (true)
println()
}
"CDF" should "agree with R " in {
val fn = (y:Double) => ChiSquare(1.0).cdf(y)
println("ChiSquare CDF")
evaluate1(fn, quantiles, cdftest, 0.01) should be(true)
println()
}
"INVCDF" should "agree with R" in {
val fn = ChiSquare(1.0).invcdf(_)
println("ChiSquare INVCDF")
evaluate1(fn, cdftest, quantiles, 0.01) should be(true)
println()
val quantiles1 = for (i <- 0.1 to 1.0 by 0.1) yield fn (i)
}
}
| cxd/scala-au.id.cxd.math | math/src/test/scala/probability/continuous/TestChisq.scala | Scala | mit | 1,362 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.carlomicieli.scalakoans
import org.scalatest._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AboutEmptyValues extends FunSuite with ShouldMatchers {
test("None equals None") {
assert(None === None)
}
test("None should be identical to None") {
val a = None
val b = None
assert(a eq b)
}
test("None can be converted to a String") {
assert(None.toString === "None")
}
test("An empty list can be represented by another nothing value: Nil") {
assert(List() === Nil)
}
test("None can be converted to an empty list") {
val a = None
assert(a.toList === List())
}
test("None is considered empty") {
assert(None.isEmpty === true)
}
test("None can be cast Any, AnyRef or AnyVal") {
assert(None.asInstanceOf[Any] === None)
assert(None.asInstanceOf[AnyRef] === None)
assert(None.asInstanceOf[AnyVal] === None)
}
test("None cannot be cast to all types of objects") {
intercept[ClassCastException] {
// put the exception you expect to see in place of the blank
assert(None.asInstanceOf[String] === None)
}
}
test("None can be used with Option instead of null references") {
val optional: Option[String] = None
assert(optional.isEmpty === true)
assert(optional === None)
}
test("Some is the opposite of None for Option types") {
val optional: Option[String] = Some("Some Value")
assert((optional == None) === false, "Some(value) should not equal None")
assert(optional.isEmpty === false, "Some(value) should not be empty")
}
test("Option.getOrElse can be used to provide a default in the case of None") {
val optional: Option[String] = Some("Some Value")
val optional2: Option[String] = None
assert(optional.getOrElse("No Value") === "Some Value", "Should return the value in the option")
assert(optional2.getOrElse("No Value") === "No Value", "Should return the specified default value")
}
}
| CarloMicieli/first-steps-with-scala | src/test/scala/io/github/carlomicieli/scalakoans/AboutEmptyValues.scala | Scala | apache-2.0 | 2,636 |
package bifrost.transaction.serialization
import bifrost.serialization.Serializer
import bifrost.transaction.bifrostTransaction.AssetTransfer
import bifrost.transaction.box.proposition.{Constants25519, PublicKey25519Proposition}
import com.google.common.primitives.Ints
import scala.util.Try
object AssetTransferCompanion extends Serializer[AssetTransfer] with TransferSerializer {
override def toBytes(at: AssetTransfer): Array[Byte] = {
TransferTransactionCompanion.prefixBytes ++ toChildBytes(at)
}
def toChildBytes(at: AssetTransfer): Array[Byte] = {
transferToBytes(at, "AssetTransfer") ++
at.issuer.pubKeyBytes ++
at.assetCode.getBytes ++
Ints.toByteArray(at.assetCode.getBytes.length)++
at.data.getBytes++
Ints.toByteArray(at.data.getBytes.length)
}
override def parseBytes(bytes: Array[Byte]): Try[AssetTransfer] = Try {
val params = parametersParseBytes(bytes)
val dataLen: Int = Ints.fromByteArray(bytes.slice(bytes.length - Ints.BYTES, bytes.length))
val data: String = new String(
bytes.slice(bytes.length - Ints.BYTES - dataLen, bytes.length - Ints.BYTES)
)
val assetCodeLen: Int = Ints.fromByteArray(bytes.slice(bytes.length - Ints.BYTES - dataLen - Ints.BYTES, bytes.length - Ints.BYTES - dataLen))
val assetCode: String = new String(
bytes.slice(bytes.length - Ints.BYTES - assetCodeLen - Ints.BYTES - dataLen, bytes.length - Ints.BYTES - dataLen - Ints.BYTES)
)
val issuer: PublicKey25519Proposition = PublicKey25519Proposition(
bytes.slice(bytes.length - Ints.BYTES - assetCodeLen - Ints.BYTES - dataLen - Constants25519.PubKeyLength,
bytes.length - Ints.BYTES - assetCodeLen - Ints.BYTES - dataLen)
)
AssetTransfer(params._1, params._2, params._3, issuer, assetCode, params._4, params._5, data)
}
}
| Topl/Project-Bifrost | src/main/scala/bifrost/transaction/serialization/AssetTransferCompanion.scala | Scala | mpl-2.0 | 1,889 |
package net.revenj.server
import java.util.Properties
import javax.sql.DataSource
import akka.actor.ActorSystem
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl._
import akka.http.scaladsl.model._
import akka.stream.{ActorMaterializer, Materializer}
import akka.http.scaladsl.model.Uri.Path
import akka.stream.scaladsl.Flow
import com.typesafe.config.ConfigFactory
import net.revenj.Revenj
import net.revenj.extensibility.{Container, PluginLoader}
import net.revenj.server.handlers.RequestBinding
import scala.collection.mutable
import scala.concurrent.Future
object WebServer {
private[this] def parseArgs(args: Array[String]): (String, Int) = {
if (args.isEmpty) {
("localhost", 8080)
} else if (args.length == 1) {
("localhost", args(0).toInt)
} else {
(args(0), args(1).toInt)
}
}
def main(args: Array[String]): Unit = {
val (address, port) = parseArgs(args)
start(address, port)
}
def start(address: String, port: Int, dataSource: Option[DataSource] = None): Container = {
val url = s"http://${address}:$port"
implicit val system = ActorSystem()
implicit val materializer = ActorMaterializer()
implicit val executionContext = system.dispatcher
val config = ConfigFactory.load()
val props = new Properties()
val iter = config.entrySet().iterator
while (iter.hasNext) {
val kv = iter.next()
props.put(kv.getKey, kv.getValue.unwrapped())
}
val revenj =
Revenj.setup(
dataSource.getOrElse(Revenj.dataSource(props)),
props,
None,
None,
Some(executionContext)
)
revenj.registerInstance(config)
revenj.registerInstance(system)
revenj.registerInstance(materializer)
revenj.registerInstance[Materializer](materializer)
val plugins = revenj.resolve[PluginLoader]
val bindings = plugins.resolve[RequestBinding](revenj)
val flow = Flow[HttpRequest]
val cpuCount = Runtime.getRuntime.availableProcessors
val asyncBuilder = new mutable.HashMap[Path#Head, HttpRequest => Future[HttpResponse]]()
bindings.foreach(_.bind(asyncBuilder))
val asyncUrls = asyncBuilder.toMap
val routes = flow.mapAsync(cpuCount / 2 + 1) {
case req@HttpRequest(_, uri, _, _, _) if !uri.path.tail.isEmpty =>
asyncUrls.get(uri.path.tail.head) match {
case Some(handler) => handler(req)
case _ =>
Future.successful(
HttpResponse(status = StatusCodes.BadRequest, entity = HttpEntity(ContentTypes.`text/plain(UTF-8)`, "Unrecognized path"))
)
}
case _ =>
Future {
HttpResponse(status = StatusCodes.BadRequest, entity = HttpEntity(ContentTypes.`text/plain(UTF-8)`, "Invalid request"))
}
}
val binding = Http().bindAndHandle(routes, address, port)
println(s"Starting server at $url ...")
binding.foreach { _ =>
println(s"Started server at $url")
}
revenj.registerInstance(new Shutdown(binding, system, url), handleClose = true)
revenj
}
private class Shutdown(binding: Future[ServerBinding], system: ActorSystem, url: String) extends AutoCloseable {
override def close(): Unit = {
import scala.concurrent.ExecutionContext.Implicits.global
binding.foreach { bind =>
println(s"Shutting down server at $url ...")
bind.unbind() map { _ =>
system.terminate()
println(s"Shut down server at $url")
}
}
}
}
}
| ngs-doo/revenj | scala/revenj-akka/src/main/scala/net/revenj/server/WebServer.scala | Scala | bsd-3-clause | 3,627 |
package com.stovokor.editor.state
import com.jme3.app.Application
import com.jme3.app.state.AppStateManager
import com.simsilica.lemur.Container
import com.simsilica.lemur.input.FunctionId
import com.simsilica.lemur.input.InputState
import com.simsilica.lemur.input.StateFunctionListener
import com.stovokor.editor.gui.GuiFactory
import com.stovokor.editor.input.InputFunction
import com.stovokor.editor.model.Settings
import com.stovokor.editor.model.repository.Repositories
import com.stovokor.util.EditSettings
import com.stovokor.util.EditorEvent
import com.stovokor.util.EditorEventListener
import com.stovokor.util.EventBus
import com.stovokor.util.SettingsUpdated
import com.simsilica.lemur.OptionPanel
import com.simsilica.lemur.OptionPanelState
class SettingsEditorState extends BaseState
with EditorEventListener
with CanMapInput
with StateFunctionListener {
val settingsRepository = Repositories.settingsRepository
override def initialize(stateManager: AppStateManager, simpleApp: Application) {
super.initialize(stateManager, simpleApp)
EventBus.subscribe(this, EditSettings())
setupInput
}
override def cleanup() {
super.cleanup
EventBus.removeFromAll(this)
inputMapper.removeStateListener(this, InputFunction.settings)
inputMapper.removeStateListener(this, InputFunction.cancel)
}
def onEvent(event: EditorEvent) = event match {
case EditSettings() => openSettingsDialog()
case _ =>
}
def setupInput {
inputMapper.addStateListener(this, InputFunction.settings)
inputMapper.addStateListener(this, InputFunction.cancel)
inputMapper.activateGroup(InputFunction.general)
}
def valueChanged(func: FunctionId, value: InputState, tpf: Double) {
if (value == InputState.Positive) func match {
case InputFunction.settings => openSettingsDialog()
case InputFunction.cancel => closeDialog(false)
case _ =>
}
}
var updatedSettings = Settings()
def openSettingsDialog() {
println("opening settings dialog")
optionPanelState.close()
updatedSettings = settingsRepository.get()
val dialog = GuiFactory.createSettingsDialog(cam.getWidth, cam.getHeight, updatedSettings, settingsUpdated, closeDialog)
optionPanelState.show(dialog)
}
def settingsUpdated(updated: Settings) {
updatedSettings = updated
}
def closeDialog(save: Boolean) {
optionPanelState.close()
if (save) {
println(s"Settings saved $updatedSettings")
settingsRepository.update(updatedSettings)
EventBus.trigger(SettingsUpdated())
}
}
def optionPanelState = stateManager.getState(classOf[OptionPanelState])
}
| jcfandino/leveleditor | src/main/scala/com/stovokor/editor/state/SettingsEditorState.scala | Scala | bsd-3-clause | 2,696 |
package org.jetbrains.plugins.scala.lang.controlFlow
import com.intellij.openapi.editor.SelectionModel
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.testFramework.fixtures.LightJavaCodeInsightFixtureTestCase
import org.jetbrains.plugins.scala.ScalaFileType
import org.jetbrains.plugins.scala.lang.psi.api.{ScControlFlowOwner, ScalaFile}
import org.jetbrains.plugins.scala.lang.psi.controlFlow.Instruction
import org.jetbrains.plugins.scala.util.TestUtils
import org.junit.Assert
/**
* @author ilyas
*/
class ControlFlowTest extends LightJavaCodeInsightFixtureTestCase {
protected override def getBasePath = TestUtils.getTestDataPath + "/controlFlow/"
override def setUp(): Unit = {
super.setUp()
myFixture.setTestDataPath(getBasePath)
}
def doTest(): Unit = {
val input: java.util.List[String] = TestUtils.readInput(getBasePath + getTestName(true) + ".test")
myFixture.configureByText(ScalaFileType.INSTANCE, input.get(0))
val file: ScalaFile = myFixture.getFile.asInstanceOf[ScalaFile]
val model: SelectionModel = myFixture.getEditor.getSelectionModel
val start: PsiElement = file.findElementAt(if (model.hasSelection) model.getSelectionStart else 0)
val end: PsiElement = file.findElementAt(if (model.hasSelection) model.getSelectionEnd - 1 else file.getTextLength - 1)
val owner: ScControlFlowOwner = PsiTreeUtil.getParentOfType(PsiTreeUtil.findCommonParent(start, end), classOf[ScControlFlowOwner], false)
val instructions = owner.getControlFlow
val cf: String = dumpControlFlow(instructions.toSeq)
Assert.assertEquals(input.get(1).trim, cf.trim)
}
protected def dumpControlFlow(instructions: Seq[Instruction]) = instructions.mkString("\\n")
def testAssignment(): Unit = {doTest()}
def testIfStatement(): Unit = {doTest()}
def testIfStatement2(): Unit = {doTest()}
def testWhile(): Unit = {doTest()}
def testWhile2(): Unit = {doTest()}
def testMatch1(): Unit = {doTest()}
def testFor1(): Unit = {doTest()}
def testFor2(): Unit = {doTest()}
def testDoWhile1(): Unit = {doTest()}
def testReturn1(): Unit = {doTest()}
def testMethod1(): Unit = {doTest()}
def testThrow1(): Unit = {doTest()}
def testKaplan_1703(): Unit = {doTest()}
def testKaplan_1703_2(): Unit = {doTest()}
def testTry1(): Unit = {doTest()}
def testTry2(): Unit = {doTest()}
def testTry3(): Unit = {doTest()}
def testNoneThrow(): Unit = doTest()
def testScl_7393(): Unit = doTest()
def testUnresolvedParamThrow(): Unit = doTest()
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/controlFlow/ControlFlowTest.scala | Scala | apache-2.0 | 2,560 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal.io
import org.apache.hadoop.fs._
import org.apache.hadoop.mapreduce._
import org.apache.spark.util.Utils
/**
* An interface to define how a single Spark job commits its outputs. Three notes:
*
* 1. Implementations must be serializable, as the committer instance instantiated on the driver
* will be used for tasks on executors.
* 2. Implementations should have a constructor with 2 arguments:
* (jobId: String, path: String)
* 3. A committer should not be reused across multiple Spark jobs.
*
* The proper call sequence is:
*
* 1. Driver calls setupJob.
* 2. As part of each task's execution, executor calls setupTask and then commitTask
* (or abortTask if task failed).
* 3. When all necessary tasks completed successfully, the driver calls commitJob. If the job
* failed to execute (e.g. too many failed tasks), the job should call abortJob.
*/
abstract class FileCommitProtocol {
import FileCommitProtocol._
/**
* Setups up a job. Must be called on the driver before any other methods can be invoked.
*/
def setupJob(jobContext: JobContext): Unit
/**
* Commits a job after the writes succeed. Must be called on the driver.
*/
def commitJob(jobContext: JobContext, taskCommits: Seq[TaskCommitMessage]): Unit
/**
* Aborts a job after the writes fail. Must be called on the driver.
*
* Calling this function is a best-effort attempt, because it is possible that the driver
* just crashes (or killed) before it can call abort.
*/
def abortJob(jobContext: JobContext): Unit
/**
* Sets up a task within a job.
* Must be called before any other task related methods can be invoked.
*/
def setupTask(taskContext: TaskAttemptContext): Unit
/**
* Notifies the commit protocol to add a new file, and gets back the full path that should be
* used. Must be called on the executors when running tasks.
*
* Note that the returned temp file may have an arbitrary path. The commit protocol only
* promises that the file will be at the location specified by the arguments after job commit.
*
* A full file path consists of the following parts:
* 1. the base path
* 2. some sub-directory within the base path, used to specify partitioning
* 3. file prefix, usually some unique job id with the task id
* 4. bucket id
* 5. source specific file extension, e.g. ".snappy.parquet"
*
* The "dir" parameter specifies 2, and "ext" parameter specifies both 4 and 5, and the rest
* are left to the commit protocol implementation to decide.
*
* Important: it is the caller's responsibility to add uniquely identifying content to "ext"
* if a task is going to write out multiple files to the same dir. The file commit protocol only
* guarantees that files written by different tasks will not conflict.
*/
def newTaskTempFile(taskContext: TaskAttemptContext, dir: Option[String], ext: String): String
/**
* Similar to newTaskTempFile(), but allows files to committed to an absolute output location.
* Depending on the implementation, there may be weaker guarantees around adding files this way.
*
* Important: it is the caller's responsibility to add uniquely identifying content to "ext"
* if a task is going to write out multiple files to the same dir. The file commit protocol only
* guarantees that files written by different tasks will not conflict.
*/
def newTaskTempFileAbsPath(
taskContext: TaskAttemptContext, absoluteDir: String, ext: String): String
/**
* Commits a task after the writes succeed. Must be called on the executors when running tasks.
*/
def commitTask(taskContext: TaskAttemptContext): TaskCommitMessage
/**
* Aborts a task after the writes have failed. Must be called on the executors when running tasks.
*
* Calling this function is a best-effort attempt, because it is possible that the executor
* just crashes (or killed) before it can call abort.
*/
def abortTask(taskContext: TaskAttemptContext): Unit
/**
* Specifies that a file should be deleted with the commit of this job. The default
* implementation deletes the file immediately.
*/
def deleteWithJob(fs: FileSystem, path: Path, recursive: Boolean): Boolean = {
fs.delete(path, recursive)
}
/**
* Called on the driver after a task commits. This can be used to access task commit messages
* before the job has finished. These same task commit messages will be passed to commitJob()
* if the entire job succeeds.
*/
def onTaskCommit(taskCommit: TaskCommitMessage): Unit = {}
}
object FileCommitProtocol {
class TaskCommitMessage(val obj: Any) extends Serializable
object EmptyTaskCommitMessage extends TaskCommitMessage(null)
/**
* Instantiates a FileCommitProtocol using the given className.
*/
def instantiate(className: String, jobId: String, outputPath: String)
: FileCommitProtocol = {
val clazz = Utils.classForName(className).asInstanceOf[Class[FileCommitProtocol]]
val ctor = clazz.getDeclaredConstructor(classOf[String], classOf[String])
ctor.newInstance(jobId, outputPath)
}
}
| minixalpha/spark | core/src/main/scala/org/apache/spark/internal/io/FileCommitProtocol.scala | Scala | apache-2.0 | 5,978 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.javaapi.consumer
import kafka.utils.threadsafe
import kafka.javaapi.message.ByteBufferMessageSet
import kafka.javaapi.MultiFetchResponse
import kafka.api.FetchRequest
/**
* A consumer of kafka messages
*/
@threadsafe
class SimpleConsumer(val host: String,
val port: Int,
val soTimeout: Int,
val bufferSize: Int) {
val underlying = new kafka.consumer.SimpleConsumer(host, port, soTimeout, bufferSize)
/**
* Fetch a set of messages from a topic.
*
* @param request specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched.
* @return a set of fetched messages
*/
def fetch(request: FetchRequest): ByteBufferMessageSet = {
import kafka.javaapi.Implicits._
underlying.fetch(request)
}
/**
* Combine multiple fetch requests in one call.
*
* @param fetches a sequence of fetch requests.
* @return a sequence of fetch responses
*/
def multifetch(fetches: java.util.List[FetchRequest]): MultiFetchResponse = {
import scala.collection.JavaConversions._
import kafka.javaapi.Implicits._
underlying.multifetch(asBuffer(fetches): _*)
}
/**
* Get a list of valid offsets (up to maxSize) before the given time.
* The result is a list of offsets, in descending order.
*
* @param time: time in millisecs (-1, from the latest offset available, -2 from the smallest offset available)
* @return an array of offsets
*/
def getOffsetsBefore(topic: String, partition: Int, time: Long, maxNumOffsets: Int): Array[Long] =
underlying.getOffsetsBefore(topic, partition, time, maxNumOffsets)
def close() {
underlying.close
}
} | quipo/kafka | core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala | Scala | apache-2.0 | 2,320 |
package models
import java.net.URLDecoder
import java.util.Date
import com.hp.hpl.jena.query.QueryExecutionFactory
import org.joda.time.LocalDate
import utils.Implicits._
import utils.QueryHost
import utils.semantic._
import scala.concurrent.{ Future, Promise }
case class LabWork(course: Resource, semester: Resource)
case class LabWorkFormModel(courseId: String, semester: String)
case class LabworkUpdateModel(courseId: String, semester: String, startDate: Date, endDate: Date)
object LabworkExportModes {
val PublicSchedule = "publicSchedule"
val PublicGroupMembersTable = "publicMembers"
val InternalSchedule = "internalSchedule"
val AssessmentSchedule = "assessmentSchedule"
val LabworkGraduates = "labworkGraduates"
val EmailList = "emailList"
}
object LabWorkForms {
import play.api.data.Forms._
import play.api.data._
val labworkForm = Form(
mapping(
"courseId" -> nonEmptyText,
"semester" -> nonEmptyText
)(LabWorkFormModel.apply)(LabWorkFormModel.unapply)
)
val labworkUpdateForm = Form(
mapping(
"courseId" -> nonEmptyText,
"semester" -> nonEmptyText,
"startDate" -> date,
"endDate" -> date
)(LabworkUpdateModel.apply)(LabworkUpdateModel.unapply)
)
}
/**
* Praktika
*/
object LabWorks {
import utils.semantic.Vocabulary._
import scala.concurrent.ExecutionContext.Implicits.global
def create(labWork: LabWork): Future[Individual] = {
import utils.Global._
val semesterIndividual = Individual(labWork.semester)
val startDate = semesterIndividual.props.getOrElse(lwm.hasStartDate, List(new DateLiteral(LocalDate.now()))).head
val endDate = semesterIndividual.props.getOrElse(lwm.hasEndDate, List(new DateLiteral(LocalDate.now()))).head
val courseIndividual = Individual(labWork.course)
val resource = ResourceUtils.createResource(lwmNamespace)
val futureTimetable = Timetables.create(Timetable(resource))
val labworkApplicationList = LabworkApplicationLists.create(LabworkApplicationList(resource))
val label = courseIndividual.props.getOrElse(rdfs.label, List(StringLiteral(""))).head.asLiteral().get
val futureStatements = futureTimetable.map { timetable β
List(
Statement(resource, rdf.typ, lwm.LabWork),
Statement(resource, rdf.typ, owl.NamedIndividual),
Statement(resource, rdfs.label, label),
Statement(resource, lwm.hasTimetable, timetable),
Statement(resource, lwm.hasCourse, labWork.course),
Statement(resource, lwm.hasStartDate, startDate),
Statement(resource, lwm.hasEndDate, endDate),
Statement(resource, lwm.allowsApplications, StringLiteral("false")),
Statement(resource, lwm.isClosed, StringLiteral("false")),
Statement(resource, lwm.hasSemester, labWork.semester)
)
}
labworkApplicationList.flatMap { list β
futureStatements.flatMap { statements β
sparqlExecutionContext.executeUpdate(SPARQLBuilder.insertStatements(statements: _*)).map { r β
Individual(resource)
}
}
}
}
def delete(resource: Resource): Future[Resource] = {
import utils.Global._
val p = Promise[Resource]()
val individual = Individual(resource)
if (individual.props(rdf.typ).contains(lwm.LabWork)) {
sparqlExecutionContext.executeUpdate(SPARQLBuilder.removeIndividual(resource)).map { b β p.success(resource) }
}
p.future
}
def all(): Future[List[Individual]] = {
import utils.Global._
sparqlExecutionContext.executeQuery(SPARQLBuilder.listIndividualsWithClass(lwm.LabWork)).map { stringResult β
SPARQLTools.statementsFromString(stringResult).map(labwork β Individual(labwork.s)).toList
}
}
def forStudent(student: Resource)(implicit queryHost: QueryHost) = {
s"""
|${Vocabulary.defaultPrefixes}
|select distinct ?labwork where {
| $student lwm:memberOf ?group .
| ?group lwm:hasLabWork ?labwork
|}
""".stripMargin.execSelect().map { solution β
Resource(solution.data("?labwork").toString)
}
}
def openForDegree(degree: Resource)(implicit queryHost: QueryHost) = {
s"""
|${Vocabulary.defaultPrefixes}
|select * where {
| ?labwork lwm:hasCourse ?course .
| ?course lwm:hasDegree $degree .
| ?labwork lwm:allowsApplications "true"
|}
""".stripMargin.execSelect().map { solution β
Resource(solution.data("labwork").toString)
}
}
def pendingApplications(student: Resource)(implicit queryHost: QueryHost) = {
s"""
|${Vocabulary.defaultPrefixes}
|select * where {
| $student lwm:hasPendingApplication ?application .
| ?application lwm:hasLabWork ?labwork
|}
""".stripMargin.execSelect().map { solution β
Resource(solution.data("labwork").toString)
}
}
def orderedGroups(labwork: Resource) = {
import utils.Global._
val query =
s"""
|select * where {
|$labwork ${Vocabulary.lwm.hasGroup} ?group .
|?group ${Vocabulary.lwm.hasGroupId} ?id .
|} order by desc(?id)
""".stripMargin
val result = QueryExecutionFactory.sparqlService(queryHost, query).execSelect()
var groups = List.empty[(Resource, String)]
while (result.hasNext) {
val n = result.nextSolution()
val group = Resource(n.getResource("group").toString)
val id = n.getLiteral("id").getString
groups = (group, id) :: groups
}
groups
}
def labworksForDate(date: LocalDate) = {
import utils.Global._
val query =
s"""
select * where {
?group ${rdf.typ} ${lwm.Group} .
?group ${lwm.hasGroupId} ?groupId .
?group ${lwm.hasScheduleAssociation} ?schedule .
optional {
?schedule ${lwm.hasAssignmentAssociation} ?assignmentAssociation .
?assignmentAssociation ${lwm.hasOrderId} ?orderId .
}
?schedule ${lwm.hasAssignmentDateTimetableEntry} ?entry .
?entry ${lwm.hasRoom} ?room .
?entry ${lwm.hasStartTime} ?startTime .
?entry ${lwm.hasEndTime} ?endTime .
?entry ${lwm.hasSupervisor} ?supervisor .
?group ${lwm.hasLabWork} ?labwork .
?labwork ${lwm.hasCourse} ?course .
?course ${lwm.hasId} ?courseName .
?course ${lwm.hasDegree} ?degree .
?degree ${lwm.hasId} ?degreeName .
?supervisor ${rdfs.label} ?name .
?room ${lwm.hasRoomId} ?roomId .
?schedule ${lwm.hasAssignmentDate} "${date.toString("yyyy-MM-dd")}" .
}
""".stripMargin
val result = QueryExecutionFactory.sparqlService(queryHost, query).execSelect()
var dates = List.empty[(Time, (Resource, String, String, String, String, String, Time, Time, Int))]
while (result.hasNext) {
val n = result.nextSolution()
val groupId = n.getLiteral("groupId").toString
val startTimeString = URLDecoder.decode(n.getLiteral("startTime").toString, "UTF-8").split(":")
val startTime = Time(startTimeString(0).toInt, startTimeString(1).toInt)
val endTimeString = URLDecoder.decode(n.getLiteral("endTime").toString, "UTF-8").split(":")
val endTime = Time(endTimeString(0).toInt, endTimeString(1).toInt)
val name = URLDecoder.decode(n.getLiteral("name").toString, "UTF-8")
val course = URLDecoder.decode(n.getLiteral("courseName").toString, "UTF-8")
val degree = URLDecoder.decode(n.getLiteral("degreeName").toString, "UTF-8")
val roomId = URLDecoder.decode(n.getLiteral("roomId").toString, "UTF-8")
val orderId = if (n.contains("orderId")) URLDecoder.decode(n.getLiteral("orderId").toString, "UTF-8").toInt + 1 else 0
val groupResource = Resource(n.getResource("group").toString)
val newDate = if (n.getResource("assignmentAssociation") == null) Nil else List((startTime, (groupResource, course, degree, groupId, roomId, name, startTime, endTime, orderId)))
dates = newDate ::: dates
}
dates.sortBy(_._1)
}
}
/**
* An assignment group.
* @param id id of this group
* @param labwork id of this associated labwork
*/
case class LabWorkGroup(id: String, labwork: Resource)
object LabworkGroups {
import utils.Global._
import utils.semantic.Vocabulary._
import scala.concurrent.ExecutionContext.Implicits.global
def create(group: LabWorkGroup): Future[Individual] = {
val resource = ResourceUtils.createResource(lwmNamespace)
val statements = List(
Statement(resource, rdf.typ, lwm.Group),
Statement(resource, rdf.typ, owl.NamedIndividual),
Statement(resource, lwm.hasGroupId, StringLiteral(group.id)),
Statement(resource, rdfs.label, StringLiteral(group.id)),
Statement(resource, lwm.hasLabWork, group.labwork),
Statement(group.labwork, lwm.hasGroup, resource)
)
sparqlExecutionContext.executeUpdate(SPARQLBuilder.insertStatements(statements: _*)).map { r β
Individual(resource)
}
}
def delete(group: LabWorkGroup): Future[LabWorkGroup] = {
val maybeGroup = SPARQLBuilder.listIndividualsWithClassAndProperty(lwm.Group, Vocabulary.lwm.hasId, StringLiteral(group.id))
val resultFuture = sparqlExecutionContext.executeQuery(maybeGroup)
val p = Promise[LabWorkGroup]()
resultFuture.map { result β
val resources = SPARQLTools.statementsFromString(result).map(g β g.s)
resources.map { resource β
sparqlExecutionContext.executeUpdate(SPARQLBuilder.removeIndividual(resource)).map { _ β p.success(group) }
}
}
p.future
}
def delete(resource: Resource): Future[Resource] = {
val p = Promise[Resource]()
val individual = Individual(resource)
if (individual.props(rdf.typ).contains(lwm.Group)) {
sparqlExecutionContext.executeUpdate(SPARQLBuilder.removeIndividual(resource)).map { b β p.success(resource) }
}
p.future
}
def all(): Future[List[Individual]] = {
sparqlExecutionContext.executeQuery(SPARQLBuilder.listIndividualsWithClass(lwm.Group)).map { stringResult β
SPARQLTools.statementsFromString(stringResult).map(labwork β Individual(labwork.s)).toList
}
}
def isLabWorkGroup(resource: Resource): Future[Boolean] = sparqlExecutionContext.executeBooleanQuery(s"ASK {$resource ${Vocabulary.rdf.typ} ${lwm.Group}}")
}
| FHK-ADV/lwm | app/models/LabWorks.scala | Scala | mit | 10,375 |
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
object Macros {
def impl(c: Context) = {
import c.universe._
val Expr(Block((cdef: ClassDef) :: Nil, _)) = reify { class C { def x = 2 } }
val cdef1 =
new Transformer {
override def transform(tree: Tree): Tree = tree match {
case Template(_, _, ctor :: defs) =>
val defs1 = defs collect {
case ddef @ DefDef(mods, name, tparams, vparamss, tpt, body) =>
val future = Select(Select(Ident(TermName("scala")), TermName("concurrent")), TermName("Future"))
val Future = Select(Select(Ident(TermName("scala")), TermName("concurrent")), TypeName("Future"))
val tpt1 = if (tpt.isEmpty) tpt else AppliedTypeTree(Future, List(tpt))
val body1 = Apply(future, List(body))
val name1 = TermName("async" + name.toString.capitalize)
DefDef(mods, name1, tparams, vparamss, tpt1, body1)
}
Template(Nil, emptyValDef, ctor +: defs ::: defs1)
case _ =>
super.transform(tree)
}
} transform cdef
c.Expr[Unit](Block(cdef1 :: Nil, Literal(Constant(()))))
}
def foo: Unit = macro impl
}
| lrytz/scala | test/files/run/macro-duplicate/Impls_Macros_1.scala | Scala | apache-2.0 | 1,277 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.TaskContext
import org.apache.spark.internal.Logging
import org.apache.spark.memory.SparkOutOfMemoryError
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeRowJoiner
import org.apache.spark.sql.execution.{UnsafeFixedWidthAggregationMap, UnsafeKVExternalSorter}
import org.apache.spark.sql.execution.metric.SQLMetric
import org.apache.spark.sql.types.StructType
import org.apache.spark.unsafe.KVIterator
/**
* An iterator used to evaluate aggregate functions. It operates on [[UnsafeRow]]s.
*
* This iterator first uses hash-based aggregation to process input rows. It uses
* a hash map to store groups and their corresponding aggregation buffers. If
* this map cannot allocate memory from memory manager, it spills the map into disk
* and creates a new one. After processed all the input, then merge all the spills
* together using external sorter, and do sort-based aggregation.
*
* The process has the following step:
* - Step 0: Do hash-based aggregation.
* - Step 1: Sort all entries of the hash map based on values of grouping expressions and
* spill them to disk.
* - Step 2: Create an external sorter based on the spilled sorted map entries and reset the map.
* - Step 3: Get a sorted [[KVIterator]] from the external sorter.
* - Step 4: Repeat step 0 until no more input.
* - Step 5: Initialize sort-based aggregation on the sorted iterator.
* Then, this iterator works in the way of sort-based aggregation.
*
* The code of this class is organized as follows:
* - Part 1: Initializing aggregate functions.
* - Part 2: Methods and fields used by setting aggregation buffer values,
* processing input rows from inputIter, and generating output
* rows.
* - Part 3: Methods and fields used by hash-based aggregation.
* - Part 4: Methods and fields used when we switch to sort-based aggregation.
* - Part 5: Methods and fields used by sort-based aggregation.
* - Part 6: Loads input and process input rows.
* - Part 7: Public methods of this iterator.
* - Part 8: A utility function used to generate a result when there is no
* input and there is no grouping expression.
*
* @param partIndex
* index of the partition
* @param groupingExpressions
* expressions for grouping keys
* @param aggregateExpressions
* [[AggregateExpression]] containing [[AggregateFunction]]s with mode [[Partial]],
* [[PartialMerge]], or [[Final]].
* @param aggregateAttributes the attributes of the aggregateExpressions'
* outputs when they are stored in the final aggregation buffer.
* @param resultExpressions
* expressions for generating output rows.
* @param newMutableProjection
* the function used to create mutable projections.
* @param originalInputAttributes
* attributes of representing input rows from `inputIter`.
* @param inputIter
* the iterator containing input [[UnsafeRow]]s.
*/
class TungstenAggregationIterator(
partIndex: Int,
groupingExpressions: Seq[NamedExpression],
aggregateExpressions: Seq[AggregateExpression],
aggregateAttributes: Seq[Attribute],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
newMutableProjection: (Seq[Expression], Seq[Attribute]) => MutableProjection,
originalInputAttributes: Seq[Attribute],
inputIter: Iterator[InternalRow],
testFallbackStartsAt: Option[(Int, Int)],
numOutputRows: SQLMetric,
peakMemory: SQLMetric,
spillSize: SQLMetric,
avgHashProbe: SQLMetric)
extends AggregationIterator(
partIndex,
groupingExpressions,
originalInputAttributes,
aggregateExpressions,
aggregateAttributes,
initialInputBufferOffset,
resultExpressions,
newMutableProjection) with Logging {
///////////////////////////////////////////////////////////////////////////
// Part 1: Initializing aggregate functions.
///////////////////////////////////////////////////////////////////////////
// Remember spill data size of this task before execute this operator so that we can
// figure out how many bytes we spilled for this operator.
private val spillSizeBefore = TaskContext.get().taskMetrics().memoryBytesSpilled
///////////////////////////////////////////////////////////////////////////
// Part 2: Methods and fields used by setting aggregation buffer values,
// processing input rows from inputIter, and generating output
// rows.
///////////////////////////////////////////////////////////////////////////
// Creates a new aggregation buffer and initializes buffer values.
// This function should be only called at most two times (when we create the hash map,
// and when we create the re-used buffer for sort-based aggregation).
private def createNewAggregationBuffer(): UnsafeRow = {
val bufferSchema = aggregateFunctions.flatMap(_.aggBufferAttributes)
val buffer: UnsafeRow = UnsafeProjection.create(bufferSchema.map(_.dataType))
.apply(new GenericInternalRow(bufferSchema.length))
// Initialize declarative aggregates' buffer values
expressionAggInitialProjection.target(buffer)(EmptyRow)
// Initialize imperative aggregates' buffer values
aggregateFunctions.collect { case f: ImperativeAggregate => f }.foreach(_.initialize(buffer))
buffer
}
// Creates a function used to generate output rows.
override protected def generateResultProjection(): (UnsafeRow, InternalRow) => UnsafeRow = {
val modes = aggregateExpressions.map(_.mode).distinct
if (modes.nonEmpty && !modes.contains(Final) && !modes.contains(Complete)) {
// Fast path for partial aggregation, UnsafeRowJoiner is usually faster than projection
val groupingAttributes = groupingExpressions.map(_.toAttribute)
val bufferAttributes = aggregateFunctions.flatMap(_.aggBufferAttributes)
val groupingKeySchema = StructType.fromAttributes(groupingAttributes)
val bufferSchema = StructType.fromAttributes(bufferAttributes)
val unsafeRowJoiner = GenerateUnsafeRowJoiner.create(groupingKeySchema, bufferSchema)
(currentGroupingKey: UnsafeRow, currentBuffer: InternalRow) => {
unsafeRowJoiner.join(currentGroupingKey, currentBuffer.asInstanceOf[UnsafeRow])
}
} else {
super.generateResultProjection()
}
}
// An aggregation buffer containing initial buffer values. It is used to
// initialize other aggregation buffers.
private[this] val initialAggregationBuffer: UnsafeRow = createNewAggregationBuffer()
///////////////////////////////////////////////////////////////////////////
// Part 3: Methods and fields used by hash-based aggregation.
///////////////////////////////////////////////////////////////////////////
// This is the hash map used for hash-based aggregation. It is backed by an
// UnsafeFixedWidthAggregationMap and it is used to store
// all groups and their corresponding aggregation buffers for hash-based aggregation.
private[this] val hashMap = new UnsafeFixedWidthAggregationMap(
initialAggregationBuffer,
StructType.fromAttributes(aggregateFunctions.flatMap(_.aggBufferAttributes)),
StructType.fromAttributes(groupingExpressions.map(_.toAttribute)),
TaskContext.get(),
1024 * 16, // initial capacity
TaskContext.get().taskMemoryManager().pageSizeBytes
)
// The function used to read and process input rows. When processing input rows,
// it first uses hash-based aggregation by putting groups and their buffers in
// hashMap. If there is not enough memory, it will multiple hash-maps, spilling
// after each becomes full then using sort to merge these spills, finally do sort
// based aggregation.
private def processInputs(fallbackStartsAt: (Int, Int)): Unit = {
if (groupingExpressions.isEmpty) {
// If there is no grouping expressions, we can just reuse the same buffer over and over again.
// Note that it would be better to eliminate the hash map entirely in the future.
val groupingKey = groupingProjection.apply(null)
val buffer: UnsafeRow = hashMap.getAggregationBufferFromUnsafeRow(groupingKey)
while (inputIter.hasNext) {
val newInput = inputIter.next()
processRow(buffer, newInput)
}
} else {
var i = 0
while (inputIter.hasNext) {
val newInput = inputIter.next()
val groupingKey = groupingProjection.apply(newInput)
var buffer: UnsafeRow = null
if (i < fallbackStartsAt._2) {
buffer = hashMap.getAggregationBufferFromUnsafeRow(groupingKey)
}
if (buffer == null) {
val sorter = hashMap.destructAndCreateExternalSorter()
if (externalSorter == null) {
externalSorter = sorter
} else {
externalSorter.merge(sorter)
}
i = 0
buffer = hashMap.getAggregationBufferFromUnsafeRow(groupingKey)
if (buffer == null) {
// failed to allocate the first page
// scalastyle:off throwerror
throw new SparkOutOfMemoryError("No enough memory for aggregation")
// scalastyle:on throwerror
}
}
processRow(buffer, newInput)
i += 1
}
if (externalSorter != null) {
val sorter = hashMap.destructAndCreateExternalSorter()
externalSorter.merge(sorter)
hashMap.free()
switchToSortBasedAggregation()
}
}
}
// The iterator created from hashMap. It is used to generate output rows when we
// are using hash-based aggregation.
private[this] var aggregationBufferMapIterator: KVIterator[UnsafeRow, UnsafeRow] = null
// Indicates if aggregationBufferMapIterator still has key-value pairs.
private[this] var mapIteratorHasNext: Boolean = false
///////////////////////////////////////////////////////////////////////////
// Part 4: Methods and fields used when we switch to sort-based aggregation.
///////////////////////////////////////////////////////////////////////////
// This sorter is used for sort-based aggregation. It is initialized as soon as
// we switch from hash-based to sort-based aggregation. Otherwise, it is not used.
private[this] var externalSorter: UnsafeKVExternalSorter = null
/**
* Switch to sort-based aggregation when the hash-based approach is unable to acquire memory.
*/
private def switchToSortBasedAggregation(): Unit = {
logInfo("falling back to sort based aggregation.")
// Basically the value of the KVIterator returned by externalSorter
// will be just aggregation buffer, so we rewrite the aggregateExpressions to reflect it.
val newExpressions = aggregateExpressions.map {
case agg @ AggregateExpression(_, Partial, _, _) =>
agg.copy(mode = PartialMerge)
case agg @ AggregateExpression(_, Complete, _, _) =>
agg.copy(mode = Final)
case other => other
}
val newFunctions = initializeAggregateFunctions(newExpressions, 0)
val newInputAttributes = newFunctions.flatMap(_.inputAggBufferAttributes)
sortBasedProcessRow = generateProcessRow(newExpressions, newFunctions, newInputAttributes)
// Step 5: Get the sorted iterator from the externalSorter.
sortedKVIterator = externalSorter.sortedIterator()
// Step 6: Pre-load the first key-value pair from the sorted iterator to make
// hasNext idempotent.
sortedInputHasNewGroup = sortedKVIterator.next()
// Copy the first key and value (aggregation buffer).
if (sortedInputHasNewGroup) {
val key = sortedKVIterator.getKey
val value = sortedKVIterator.getValue
nextGroupingKey = key.copy()
currentGroupingKey = key.copy()
firstRowInNextGroup = value.copy()
}
// Step 7: set sortBased to true.
sortBased = true
}
///////////////////////////////////////////////////////////////////////////
// Part 5: Methods and fields used by sort-based aggregation.
///////////////////////////////////////////////////////////////////////////
// Indicates if we are using sort-based aggregation. Because we first try to use
// hash-based aggregation, its initial value is false.
private[this] var sortBased: Boolean = false
// The KVIterator containing input rows for the sort-based aggregation. It will be
// set in switchToSortBasedAggregation when we switch to sort-based aggregation.
private[this] var sortedKVIterator: UnsafeKVExternalSorter#KVSorterIterator = null
// The grouping key of the current group.
private[this] var currentGroupingKey: UnsafeRow = null
// The grouping key of next group.
private[this] var nextGroupingKey: UnsafeRow = null
// The first row of next group.
private[this] var firstRowInNextGroup: UnsafeRow = null
// Indicates if we has new group of rows from the sorted input iterator.
private[this] var sortedInputHasNewGroup: Boolean = false
// The aggregation buffer used by the sort-based aggregation.
private[this] val sortBasedAggregationBuffer: UnsafeRow = createNewAggregationBuffer()
// The function used to process rows in a group
private[this] var sortBasedProcessRow: (InternalRow, InternalRow) => Unit = null
// Processes rows in the current group. It will stop when it find a new group.
private def processCurrentSortedGroup(): Unit = {
// First, we need to copy nextGroupingKey to currentGroupingKey.
currentGroupingKey.copyFrom(nextGroupingKey)
// Now, we will start to find all rows belonging to this group.
// We create a variable to track if we see the next group.
var findNextPartition = false
// firstRowInNextGroup is the first row of this group. We first process it.
sortBasedProcessRow(sortBasedAggregationBuffer, firstRowInNextGroup)
// The search will stop when we see the next group or there is no
// input row left in the iter.
// Pre-load the first key-value pair to make the condition of the while loop
// has no action (we do not trigger loading a new key-value pair
// when we evaluate the condition).
var hasNext = sortedKVIterator.next()
while (!findNextPartition && hasNext) {
// Get the grouping key and value (aggregation buffer).
val groupingKey = sortedKVIterator.getKey
val inputAggregationBuffer = sortedKVIterator.getValue
// Check if the current row belongs the current input row.
if (currentGroupingKey.equals(groupingKey)) {
sortBasedProcessRow(sortBasedAggregationBuffer, inputAggregationBuffer)
hasNext = sortedKVIterator.next()
} else {
// We find a new group.
findNextPartition = true
// copyFrom will fail when
nextGroupingKey.copyFrom(groupingKey)
firstRowInNextGroup.copyFrom(inputAggregationBuffer)
}
}
// We have not seen a new group. It means that there is no new row in the input
// iter. The current group is the last group of the sortedKVIterator.
if (!findNextPartition) {
sortedInputHasNewGroup = false
sortedKVIterator.close()
}
}
///////////////////////////////////////////////////////////////////////////
// Part 6: Loads input rows and setup aggregationBufferMapIterator if we
// have not switched to sort-based aggregation.
///////////////////////////////////////////////////////////////////////////
/**
* Start processing input rows.
*/
processInputs(testFallbackStartsAt.getOrElse((Int.MaxValue, Int.MaxValue)))
// If we did not switch to sort-based aggregation in processInputs,
// we pre-load the first key-value pair from the map (to make hasNext idempotent).
if (!sortBased) {
// First, set aggregationBufferMapIterator.
aggregationBufferMapIterator = hashMap.iterator()
// Pre-load the first key-value pair from the aggregationBufferMapIterator.
mapIteratorHasNext = aggregationBufferMapIterator.next()
// If the map is empty, we just free it.
if (!mapIteratorHasNext) {
hashMap.free()
}
}
TaskContext.get().addTaskCompletionListener[Unit](_ => {
// At the end of the task, update the task's peak memory usage. Since we destroy
// the map to create the sorter, their memory usages should not overlap, so it is safe
// to just use the max of the two.
val mapMemory = hashMap.getPeakMemoryUsedBytes
val sorterMemory = Option(externalSorter).map(_.getPeakMemoryUsedBytes).getOrElse(0L)
val maxMemory = Math.max(mapMemory, sorterMemory)
val metrics = TaskContext.get().taskMetrics()
peakMemory.set(maxMemory)
spillSize.set(metrics.memoryBytesSpilled - spillSizeBefore)
metrics.incPeakExecutionMemory(maxMemory)
// Updating average hashmap probe
avgHashProbe.set(hashMap.getAverageProbesPerLookup())
})
///////////////////////////////////////////////////////////////////////////
// Part 7: Iterator's public methods.
///////////////////////////////////////////////////////////////////////////
override final def hasNext: Boolean = {
(sortBased && sortedInputHasNewGroup) || (!sortBased && mapIteratorHasNext)
}
override final def next(): UnsafeRow = {
if (hasNext) {
val res = if (sortBased) {
// Process the current group.
processCurrentSortedGroup()
// Generate output row for the current group.
val outputRow = generateOutput(currentGroupingKey, sortBasedAggregationBuffer)
// Initialize buffer values for the next group.
sortBasedAggregationBuffer.copyFrom(initialAggregationBuffer)
outputRow
} else {
// We did not fall back to sort-based aggregation.
val result =
generateOutput(
aggregationBufferMapIterator.getKey,
aggregationBufferMapIterator.getValue)
// Pre-load next key-value pair form aggregationBufferMapIterator to make hasNext
// idempotent.
mapIteratorHasNext = aggregationBufferMapIterator.next()
if (!mapIteratorHasNext) {
// If there is no input from aggregationBufferMapIterator, we copy current result.
val resultCopy = result.copy()
// Then, we free the map.
hashMap.free()
resultCopy
} else {
result
}
}
numOutputRows += 1
res
} else {
// no more result
throw new NoSuchElementException
}
}
///////////////////////////////////////////////////////////////////////////
// Part 8: Utility functions
///////////////////////////////////////////////////////////////////////////
/**
* Generate an output row when there is no input and there is no grouping expression.
*/
def outputForEmptyGroupingKeyWithoutInput(): UnsafeRow = {
if (groupingExpressions.isEmpty) {
sortBasedAggregationBuffer.copyFrom(initialAggregationBuffer)
// We create an output row and copy it. So, we can free the map.
val resultCopy =
generateOutput(UnsafeRow.createFromByteArray(0, 0), sortBasedAggregationBuffer).copy()
hashMap.free()
resultCopy
} else {
throw new IllegalStateException(
"This method should not be called when groupingExpressions is not empty.")
}
}
}
| guoxiaolongzte/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala | Scala | apache-2.0 | 20,213 |
package Tutorial
import Chisel._
//object Tutorial extends NOCUtil{
object Tutorial {
def main(args: Array[String]): Unit = {
val tutArgs = args.slice(1, args.length)
val res =
args(0) match {
// case "GCD" =>
// chiselMainTest(tutArgs, () => new GCD()){
// c => new GCDTests(c)}
// case "RealGCD" =>
// chiselMainTest(tutArgs, () => new RealGCD()){
// c => new RealGCDTests(c)}
// case "Combinational" =>
// chiselMainTest(tutArgs, () => new Combinational()){
// c => new CombinationalTests(c)}
// case "Functional" =>
// chiselMainTest(tutArgs, () => new Functional()){
// c => new FunctionalTests(c)}
// case "Mux2" =>
// chiselMainTest(tutArgs, () => new Mux2()){
// c => new Mux2Tests(c)}
// case "Mux4" =>
// chiselMainTest(tutArgs, () => new Mux4()){
// c => new Mux4Tests(c)}
// case "Accumulator" =>
// chiselMainTest(tutArgs, () => new Accumulator()){
// c => new AccumulatorTests(c)}
// case "Parity" =>
// chiselMainTest(tutArgs, () => new Parity()){
// c => new ParityTests(c)}
// case "Memo" =>
// chiselMainTest(tutArgs, () => new Memo()){
// c => new MemoTests(c)}
// case "Filter" =>
// chiselMainTest(tutArgs, () => new Filter()){
// c => new FilterTests(c)}
// case "Tbl" =>
// chiselMainTest(tutArgs, () => new Tbl()){
// c => new TblTests(c)}
// case "Life" =>
// chiselMainTest(tutArgs, () => new Life(3)){
// c => new LifeTests(c)}
// case "Mul" =>
// chiselMainTest(tutArgs, () => new Mul()){
// c => new MulTests(c)}
// case "Risc" =>
// chiselMainTest(tutArgs, () => new Risc()){
// c => new RiscTests(c)}
// case "Counter" =>
// chiselMainTest(tutArgs, () => new Counter()){
// c => new CounterTest(c)}
// case "VendingMachine" =>
// chiselMainTest(tutArgs, () => new VendingMachine()){
// c => new VendingMachineTests(c)}
// case "Router" =>
// chiselMainTest(tutArgs, () => new Router()){
// c => new RouterTests(c)}
// case "Echo" =>
// chiselMainTest(tutArgs, () => new Echo()){
// c => new EchoTests(c, "../src/in.wav", "../emulator/out.wav")}
// case "Darken" =>
// chiselMainTest(tutArgs, () => new Darken()){
// c => new DarkenTests(c, "../src/in.im24", "../src/out.im24")}
//
// case "MyFifo" =>
// chiselMainTest(tutArgs, () => new MyFifo()){
// c => new MyFifoTests(c)}
// case "MyRouter" =>
//// chiselMainTest(tutArgs, () => new MyRouter(routerTestConfig.id, UFix(2, width=2))){
// chiselMainTest(tutArgs, () =>
// new MyRouter(5, idToXYBits(routerTestConfig.routerId, routerTestConfig.networkAray))){
// c => new MyRouterTests(c)}
// case "MyTwoDMesh" =>
// chiselMainTest(tutArgs, () => new MyTwoDMesh(meshTestConfig.aray)){
// c => new MyTwoDMeshTests(c)}
// case "NOCAndTestRig" =>
// chiselMainTest(tutArgs, () => new NOCAndTestRig(meshTestConfig.aray)){
// c => new NOCAndTestRigTests(c)}
// case "FU" =>
// chiselMainTest(tutArgs, () => new MullFU()){
// c => new MullFUTests(c)}
// case "StreamerAndMem" =>
// chiselMainTest(tutArgs, () => new StreamerAndMem()){
// c => new StreamerTests(c)}
// case "KmeansAcc" =>
// chiselMainTest(tutArgs, () => new KmeansAcc()){
// c => new KmeansAccTests(c)}
// case "BRAM" =>
// chiselMainTest(tutArgs, () => new BRAM(4,4)){
// c => new BRAMTests(c)}
// case "StreamerSplitter" =>
// chiselMainTest(tutArgs, () => new StreamerSplitter){
// c => new StreamerSplitterTests(c)}
// case "KmeansAccMC" =>
// chiselMainTest(tutArgs, () => new KmeansAccMC(LINQAccConfig.cores)){
// c => new KmeansAccMCTests(c)}
// case "KmeansAndMesh" =>
// chiselMainTest(tutArgs, () => new KmeansAndMesh(LINQAccConfig.cores)){
// c => new KmeansAndMeshTests(c)}
// case "NIC" =>
// chiselMainTest(tutArgs, () => new NIC){
// c => new NICTests(c)}
// case "TwoNICs" =>
// chiselMainTest(tutArgs, () => new TwoNICs){
// c => new TwoNICTests(c)}
// case "Chained" =>
// chiselMainTest(tutArgs, () => new Chained){
// c => new ChainedTests(c)}
// case "Offloaded" =>
// chiselMainTest(tutArgs, () => new Offloaded){
// c => new OffloadedTests(c)}
case "Top" =>
chiselMainTest(tutArgs, () => new Top){
c => new TopTests(c).asInstanceOf[Chisel.Tester[Top]]}
}
}
}
| seyedmaysamlavasani/GorillaPP | chisel/Gorilla++/src/tutorial.scala | Scala | bsd-3-clause | 4,824 |
package info.spielproject.spiel
package ui
import collection.JavaConversions._
import concurrent._
import ExecutionContext.Implicits.global
import android.app._
import android.bluetooth._
import android.content._
import android.content.pm._
import android.database._
import android.net._
import android.os._
import android.preference._
import android.util._
import android.view._
import android.view.accessibility._
import android.widget._
import org.droidparts.preference.MultiSelectListPreference
import org.scaloid.common.{Preferences => _, _}
import presenters._
import scripting._
/**
* Activity that serves as a host for other tabs.
*/
class Spiel extends SActivity with ActionBar.TabListener {
onCreate {
val bar = getActionBar
bar.setNavigationMode(ActionBar.NAVIGATION_MODE_TABS)
bar.addTab(bar.newTab.setText(R.string.scripts).setTabListener(this))
bar.addTab(bar.newTab.setText(R.string.events).setTabListener(this))
}
def onTabReselected(tab:ActionBar.Tab, ft:FragmentTransaction) { }
private var fragment:Option[Fragment] = None
def onTabSelected(tab:ActionBar.Tab, ft:FragmentTransaction) {
fragment.foreach(ft.remove(_))
val frag = tab.getPosition match {
case 0 => new Scripts
case 1 => new Events
}
fragment = Some(frag)
ft.add(android.R.id.content, frag)
}
def onTabUnselected(tab:ActionBar.Tab, ft:FragmentTransaction) {
fragment.foreach { frag =>
ft.remove(frag)
fragment = None
}
}
override def onCreateOptionsMenu(menu:Menu) = {
getMenuInflater.inflate(R.menu.spiel, menu)
super.onCreateOptionsMenu(menu)
}
override def onOptionsItemSelected(item:MenuItem) = item.getItemId match {
case R.id.settings =>
startActivity[Settings]
true
case _ => super.onOptionsItemSelected(item)
}
}
trait HasScriptPreferences {
protected def context:Context
def getPreferenceManager:PreferenceManager
protected def scriptPreferencesFor(pkg:String) = {
val screen = getPreferenceManager.createPreferenceScreen(context)
screen.setTitle(Script.labelFor(pkg))
screen.setEnabled(true)
screen.setSelectable(true)
Scripter.preferences(pkg).foreach { pkgpref =>
val pref = pkgpref._2
val preference = new CheckBoxPreference(context)
val key = pkg+"_"+pkgpref._1
preference.setKey(key)
preference.setTitle(pref("title").asInstanceOf[String])
preference.setSummary(pref("summary").asInstanceOf[String])
preference.setChecked(pref("default").asInstanceOf[Boolean])
screen.addPreference(preference)
}
screen
}
}
class StockPreferenceFragment extends PreferenceFragment {
override def onCreate(bundle:Bundle) {
super.onCreate(bundle)
val res = getActivity.getResources.getIdentifier(getArguments.getString("resource"), "xml", getActivity.getPackageName)
addPreferencesFromResource(res)
}
}
class SpeechPreferenceFragment extends StockPreferenceFragment {
override def onCreate(b:Bundle) {
super.onCreate(b)
val enginesPreference = findPreference("speechEngine").asInstanceOf[ListPreference]
val engines = TTS.engines(getActivity)
enginesPreference.setEntries((getString(R.string.systemDefault) :: engines.map(_._1).toList).toArray[CharSequence])
enginesPreference.setEntryValues(("" :: engines.map(_._2).toList).toArray[CharSequence])
Option(BluetoothAdapter.getDefaultAdapter).getOrElse {
getPreferenceScreen.removePreference(findPreference("useBluetoothSCO"))
}
// Now set the shortcut to system-wide TTS settings.
val ttsPreference = findPreference("textToSpeechSettings")
ttsPreference.setOnPreferenceClickListener(new Preference.OnPreferenceClickListener {
def onPreferenceClick(p:Preference) = {
startActivity(new Intent("com.android.settings.TTS_SETTINGS"))
false
}
})
}
}
class AlertsPreferenceFragment extends StockPreferenceFragment {
override def onCreate(b:Bundle) {
super.onCreate(b)
val vibrator = getActivity.getSystemService(Context.VIBRATOR_SERVICE).asInstanceOf[android.os.Vibrator]
if(!vibrator.hasVibrator)
getPreferenceScreen.removePreference(findPreference("hapticFeedback"))
}
}
class NotificationFiltersPreferenceFragment extends StockPreferenceFragment {
override def onCreate(b:Bundle) {
super.onCreate(b)
val pm = getActivity.getPackageManager
future {
val notificationFilters = findPreference("notificationFilters").asInstanceOf[MultiSelectListPreference]
notificationFilters.setShouldDisableView(true)
notificationFilters.setEnabled(false)
val packages = utils.installedPackages.map { pkg =>
(pkg.packageName, try {
pm.getApplicationInfo(pkg.packageName, 0).loadLabel(pm).toString
} catch {
case _:Throwable => pkg.packageName
})
}.sortWith((v1, v2) => v1._2 < v2._2)
getActivity.runOnUiThread(new Runnable { def run() {
notificationFilters.setEntryValues(packages.map(_._1.asInstanceOf[CharSequence]).toArray)
notificationFilters.setEntries(packages.map(_._2.asInstanceOf[CharSequence]).toArray)
notificationFilters.setEnabled(true)
}})
}
}
}
class ScriptsPreferenceFragment extends PreferenceFragment with HasScriptPreferences {
lazy val context = getActivity
override def onCreate(b:Bundle) {
super.onCreate(b)
val scripts = getPreferenceScreen
if(Scripter.preferences == Map.empty) {
//getPreferenceScreen.removePreference(scripts)
} else {
scripts.removeAll()
Scripter.preferences.foreach { pkg =>
scripts.addPreference(scriptPreferencesFor(pkg._1))
}
}
}
}
/**
* Activity for setting preferences.
*/
class Settings extends PreferenceActivity with HasScriptPreferences {
protected val context = this
override def onBuildHeaders(target:java.util.List[PreferenceActivity.Header]) {
val intent = getIntent
setIntent(intent)
Option(intent.getStringExtra("package")).foreach { pkg =>
val frag = new PreferenceFragment {
override def onCreate(b:Bundle) {
super.onCreate(b)
setPreferenceScreen(scriptPreferencesFor(pkg))
}
}
startPreferenceFragment(frag, false)
return super.onBuildHeaders(target)
}
loadHeadersFromResource(R.xml.preference_headers, target)
}
}
/**
* Trait implementing a "Refresh" menu item and action.
*/
trait Refreshable extends Fragment {
this: Fragment =>
override def onActivityCreated(b:Bundle) {
super.onActivityCreated(b)
setHasOptionsMenu(true)
}
override def onCreateOptionsMenu(menu:Menu, inflater:MenuInflater) {
inflater.inflate(R.menu.refreshable, menu)
super.onCreateOptionsMenu(menu, inflater)
}
override def onOptionsItemSelected(item:MenuItem) = {
item.getItemId match {
case R.id.refresh => refresh
}
true
}
def refresh()
}
class Scripts extends ListFragment with Refreshable {
override def onViewCreated(v:View, b:Bundle) {
super.onViewCreated(v, b)
registerForContextMenu(getListView)
}
def refresh() {
setListAdapter(
new ArrayAdapter[Script](
getActivity,
android.R.layout.simple_list_item_1,
Scripter.userScripts
)
)
}
override def onCreateContextMenu(menu:ContextMenu, v:View, info:ContextMenu.ContextMenuInfo) {
new MenuInflater(getActivity).inflate(R.menu.scripts_context, menu)
val script = Scripter.userScripts(info.asInstanceOf[AdapterView.AdapterContextMenuInfo].position)
if(!script.preferences_?) {
val item = menu.findItem(R.id.settings)
item.setEnabled(false)
item.setVisible(false)
}
}
override def onContextItemSelected(item:MenuItem) = {
val script = Scripter.userScripts(item.getMenuInfo.asInstanceOf[AdapterView.AdapterContextMenuInfo].position)
item.getItemId match {
case R.id.settings =>
val intent = SIntent(getActivity, reflect.ClassTag(classOf[Settings]))
intent.putExtra("package", script.pkg)
startActivity(intent)
case R.id.delete =>
new AlertDialogBuilder("", getString(R.string.confirmDelete, script.pkg))(getActivity) {
positiveButton(android.R.string.yes, {
script.delete()
script.uninstall()
refresh()
})
negativeButton(android.R.string.no)
}.show()
}
true
}
}
/**
* Lists most recently-received AccessibilityEvents.
*/
class Events extends ListFragment with Refreshable {
override def onViewCreated(v:View, b:Bundle) {
super.onViewCreated(v, b)
registerForContextMenu(getListView )
refresh()
}
def refresh() {
setListAdapter(
new ArrayAdapter[AccessibilityEvent](
getActivity,
android.R.layout.simple_list_item_1,
EventReviewQueue.toArray
)
)
}
override def onCreateContextMenu(menu:ContextMenu, v:View, info:ContextMenu.ContextMenuInfo) {
new MenuInflater(getActivity).inflate(R.menu.events_context, menu)
}
override def onContextItemSelected(item:MenuItem):Boolean = {
val position = item.getMenuInfo.asInstanceOf[AdapterView.AdapterContextMenuInfo].position
if(!EventReviewQueue.isDefinedAt(position)) return true
val event = EventReviewQueue(position)
item.getItemId match {
case R.id.createTemplate =>
val filename = Scripter.createTemplateFor(event)
alert(
"",
filename.map { fn =>
getString(R.string.templateCreated, fn)
}.getOrElse {
getString(R.string.templateCreationError)
}
)(getActivity)
}
true
}
}
| bramd/spiel | src/main/scala/ui.scala | Scala | apache-2.0 | 9,745 |
package com.geeksville.http
import org.apache.http.client.methods.HttpGet
import org.apache.http.client.methods.HttpPost
import org.apache.http.util.EntityUtils
import java.util.ArrayList
import org.apache.http.NameValuePair
import org.apache.http.message.BasicNameValuePair
import org.apache.http.client.entity.UrlEncodedFormEntity
import org.apache.http.HttpHost
import org.apache.http.impl.client.BasicCredentialsProvider
import org.apache.http.auth.AuthScope
import org.apache.http.auth.UsernamePasswordCredentials
import org.apache.http.impl.client.DefaultHttpClient
import org.json4s.native.JsonMethods._
import org.json4s.JsonAST.JObject
import org.apache.http.client.methods.HttpRequestBase
import scala.xml._
import org.apache.http.client.utils.URLEncodedUtils
/**
* Standard client side glue for talking to HTTP services
* Currently based on apache, but could use spray instead
*/
class HttpClient(val httpHost: HttpHost) {
protected val httpclient = new DefaultHttpClient()
// val myhttps = new Protocol("https", new MySSLSocketFactory(), 443);
def close() {
httpclient.getConnectionManager().shutdown()
}
def call(transaction: HttpRequestBase) = synchronized {
try {
val response = httpclient.execute(httpHost, transaction)
val entity = response.getEntity()
val msg = EntityUtils.toString(entity)
EntityUtils.consume(entity)
if (response.getStatusLine.getStatusCode != 200) {
println(s"HttpClient failure request: $transaction, body: $msg")
throw new Exception("HttpClient failure: " + response.getStatusLine())
}
msg
} finally {
transaction.releaseConnection()
}
}
/// Call something with a JSON response
def callJson(transaction: HttpRequestBase) = {
val msg = call(transaction)
parse(msg).asInstanceOf[JObject]
}
/// Call something with XML response
def callXml(transaction: HttpRequestBase) = {
val msg = call(transaction)
XML.loadString(msg)
}
}
| dronekit/dronekit-server | src/main/scala/com/geeksville/http/HttpClient.scala | Scala | gpl-3.0 | 1,997 |
import language.higherKinds
trait Travers[T[_]] extends Functor[T] with Foldable[T] {
def traverse[F[_]:Applic,A,B](t: T[A])(f: A => F[B]): F[T[B]]
override def map[A,B](functor: T[A])(g: A => B): T[B] = ???
override def foldMap[A,B:Monoid](foldable: T[A])(f: A => B): B = ???
}
object Travers {
def traverse[T[_]:Travers,F[_]:Applic,A,B](t: T[A])(f: A => F[B]): F[T[B]] =
implicitly[Travers[T]].traverse(t)(f)
def sequence[T[_]:Travers,F[_]:Applic,A](t: T[F[A]]): F[T[A]] = ???
}
| grzegorzbalcerek/scala-exercises | Travers/Travers.scala | Scala | bsd-2-clause | 495 |
package org.jetbrains.plugins.scala.util
import scala.language.higherKinds
import scala.reflect.ClassTag
/** This trait is necessary for "opaque type" pattern, which allows to use EnumSet[E]
* as a type-safe alternative to Int
**/
trait EnumSetProvider {
type EnumSet[E <: Enum[E]] <: Int
def empty[E <: Enum[E]]: EnumSet[E]
def single[E <: Enum[E]](e: E): EnumSet[E]
def union[E <: Enum[E]](set1: EnumSet[E], set2: EnumSet[E]): EnumSet[E]
def add[E <: Enum[E]](set:EnumSet[E], e: E): EnumSet[E]
def contains[E <: Enum[E]](set: EnumSet[E], e: E): Boolean
//for deserialization only
def readFromInt[E <: Enum[E]](i: Int): EnumSet[E]
}
object EnumSetProvider {
val instance: EnumSetProvider =
new EnumSetProvider {
type EnumSet[E <: Enum[E]] = Int
override def empty[E <: Enum[E]]: EnumSet[E] = 0
override def single[E <: Enum[E]](e: E): EnumSet[E] = 1 << e.ordinal()
override def union[E <: Enum[E]](set1: EnumSet[E], set2: EnumSet[E]): EnumSet[E] = set1 | set2
def intersect[E <: Enum[E]](set1: EnumSet[E], set2: EnumSet[E]): EnumSet[E] = set1 & set2
override def add[E <: Enum[E]](set: EnumSet[E], e: E): EnumSet[E] = union(set, single(e))
override def contains[E <: Enum[E]](set: EnumSet[E], e: E): Boolean = intersect(set, single(e)) == single(e)
override def readFromInt[E <: Enum[E]](i: Int): EnumSet[E] = i
}
}
object EnumSet {
import EnumSetProvider.instance
type EnumSet[E <: Enum[E]] = EnumSetProvider.instance.EnumSet[E]
def empty[E <: Enum[E]]: EnumSet[E] = instance.empty
def apply[E <: Enum[E]](e: E): EnumSet[E] = instance.single(e)
def apply[E <: Enum[E]](e1: E, e2: E): EnumSet[E] = EnumSet(e1) ++ e2
def apply[E <: Enum[E]](e1: E, e2: E, e3: E): EnumSet[E] = EnumSet(e1) ++ e2 ++ e3
def apply[E <: Enum[E]](elems: E*): EnumSet[E] = elems.foldLeft(empty[E])(_ ++ _)
def readFromInt[E <: Enum[E]](i: Int): EnumSet[E] = instance.readFromInt(i)
private def values[E <: Enum[E]](implicit classTag: ClassTag[E]): Array[E] = {
val aClass = classTag.runtimeClass.asInstanceOf[Class[E]]
aClass.getEnumConstants
}
implicit class EnumSetOps[E <: Enum[E]](private val set: EnumSet[E]) extends AnyVal {
//I would prefer `+` here, but it clashes with int addition
def ++(e: E): EnumSet[E] = instance.add(set, e)
def ++(e: EnumSet[E]): EnumSet[E] = instance.union(set, e)
def contains(e: E): Boolean = instance.contains(set, e)
def isEmpty: Boolean = set == EnumSet.empty
def foreach(f: E => Unit)(implicit classTag: ClassTag[E]): Unit = toArray.foreach(f)
def toArray(implicit classTag: ClassTag[E]): Array[E] = {
values[E].filter(set.contains)
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/util/EnumSet.scala | Scala | apache-2.0 | 2,736 |
package org.apache.spark.sql
import scala.language.implicitConversions
import com.datastax.spark.connector.util.{ConfigParameter, DeprecatedConfigParameter}
import org.apache.spark.sql.streaming.DataStreamWriter
package object cassandra {
/** A data frame format used to access Cassandra through Connector */
val CassandraFormat = "org.apache.spark.sql.cassandra"
/** Returns a map of options which configure the path to Cassandra table as well as whether pushdown is enabled
* or not */
def cassandraOptions(
table: String,
keyspace: String,
cluster: String = CassandraSourceRelation.defaultClusterName,
pushdownEnable: Boolean = true): Map[String, String] =
Map(
DefaultSource.CassandraDataSourceClusterNameProperty -> cluster,
DefaultSource.CassandraDataSourceKeyspaceNameProperty -> keyspace,
DefaultSource.CassandraDataSourceTableNameProperty -> table,
DefaultSource.CassandraDataSourcePushdownEnableProperty -> pushdownEnable.toString)
implicit class DataFrameReaderWrapper(val dfReader: DataFrameReader) extends AnyVal {
/** Sets the format used to access Cassandra through Connector */
def cassandraFormat: DataFrameReader = {
dfReader.format(CassandraFormat)
}
/** Sets the format used to access Cassandra through Connector and configure a path to Cassandra table. */
def cassandraFormat(
table: String,
keyspace: String,
cluster: String = CassandraSourceRelation.defaultClusterName,
pushdownEnable: Boolean = true): DataFrameReader = {
cassandraFormat.options(cassandraOptions(table, keyspace, cluster, pushdownEnable))
}
}
implicit class DataFrameWriterWrapper[T](val dfWriter: DataFrameWriter[T]) extends AnyVal {
/** Sets the format used to access Cassandra through Connector */
def cassandraFormat: DataFrameWriter[T] = {
dfWriter.format(CassandraFormat)
}
/** Sets the format used to access Cassandra through Connector and configure a path to Cassandra table. */
def cassandraFormat(
table: String,
keyspace: String,
cluster: String = CassandraSourceRelation.defaultClusterName,
pushdownEnable: Boolean = true): DataFrameWriter[T] = {
cassandraFormat.options(cassandraOptions(table, keyspace, cluster, pushdownEnable))
}
private def getSource(): String ={
val dfSourceField = classOf[DataFrameWriter[_]].getDeclaredField("source")
dfSourceField.setAccessible(true)
dfSourceField.get(dfWriter).asInstanceOf[String]
}
def withTTL(constant: Int): DataFrameWriter[T] = {
withTTL(constant.toString)
}
def withTTL(column: String): DataFrameWriter[T] = {
val source: String = getSource()
if (source != CassandraFormat) throw new IllegalArgumentException(
s"Write destination must be $CassandraFormat for setting TTL. Destination was $source")
dfWriter.option(CassandraSourceRelation.TTLParam.name, column)
}
def withWriteTime(constant: Long): DataFrameWriter[T] = {
withWriteTime(constant.toString)
}
def withWriteTime(column: String): DataFrameWriter[T] = {
val source: String = getSource()
if (source != CassandraFormat) throw new IllegalArgumentException(
s"Write destination must be $CassandraFormat for setting WriteTime. Destination was $source")
dfWriter.option(CassandraSourceRelation.WriteTimeParam.name, column)
}
}
implicit class DataStreamWriterWrapper[T](val dsWriter: DataStreamWriter[T]) extends AnyVal {
/** Sets the format used to access Cassandra through Connector */
def cassandraFormat: DataStreamWriter[T] = {
dsWriter.format(CassandraFormat)
}
/** Sets the format used to access Cassandra through Connector and configure a path to Cassandra table. */
def cassandraFormat(
table: String,
keyspace: String,
cluster: String = CassandraSourceRelation.defaultClusterName,
pushdownEnable: Boolean = true): DataStreamWriter[T] = {
cassandraFormat.options(cassandraOptions(table, keyspace, cluster, pushdownEnable))
}
private def getSource(): String ={
val dfSourceField = classOf[DataStreamWriter[_]].getDeclaredField("source")
dfSourceField.setAccessible(true)
dfSourceField.get(dsWriter).asInstanceOf[String]
}
def withTTL(constant: Int): DataStreamWriter[T] = {
withTTL(constant.toString)
}
def withTTL(column: String): DataStreamWriter[T] = {
val source = getSource()
if (source != CassandraFormat) throw new IllegalArgumentException(
s"Write destination must be $CassandraFormat for setting TTL. Destination was $source")
dsWriter.option(CassandraSourceRelation.TTLParam.name, column)
}
def withWriteTime(constant: Long): DataStreamWriter[T] = {
withWriteTime(constant.toString)
}
def withWriteTime(column: String): DataStreamWriter[T] = {
val source = getSource()
if (source != CassandraFormat) throw new IllegalArgumentException(
s"Write destination must be $CassandraFormat for setting WriteTime. Destination was $source")
dsWriter.option(CassandraSourceRelation.WriteTimeParam.name, column)
}
}
@deprecated("Use SparkSession instead of SQLContext", "2.0.0")
implicit class CassandraSQLContextFunctions(val sqlContext: SQLContext) extends AnyVal {
import org.apache.spark.sql.cassandra.CassandraSQLContextParams._
/** Set current used cluster name */
@deprecated("Use SparkSession instead of SQLContext", "2.0.0")
def setCluster(cluster: String): SQLContext = {
sqlContext.setConf(SqlClusterParam.name, cluster)
sqlContext
}
/** Get current used cluster name */
@deprecated("Use SparkSession instead of SQLContext", "2.0.0")
def getCluster: String = sqlContext.getConf(SqlClusterParam.name, SqlClusterParam.default)
/** Set the Spark Cassandra Connector configuration parameters */
@deprecated("Use SparkSession instead of SQLContext", "2.0.0")
def setCassandraConf(options: Map[String, String]): SQLContext = {
//noinspection ScalaDeprecation
setCassandraConf(SqlClusterParam.default, options)
sqlContext
}
/** Set the Spark Cassandra Connector configuration parameters which will be used when accessing
* a given cluster */
@deprecated("Use SparkSession instead of SQLContext", "2.0.0")
def setCassandraConf(
cluster: String,
options: Map[String, String]): SQLContext = {
checkOptions(options)
for ((k, v) <- options) sqlContext.setConf(s"$cluster/$k", v)
sqlContext
}
/** Set the Spark Cassandra Connector configuration parameters which will be used when accessing
* a given keyspace in a given cluster */
@deprecated("Use SparkSession instead of SQLContext", "2.0.0")
def setCassandraConf(
cluster: String,
keyspace: String,
options: Map[String, String]): SQLContext = {
checkOptions(options)
for ((k, v) <- options) sqlContext.setConf(s"$cluster:$keyspace/$k", v)
sqlContext
}
}
def ttl(column: Column): Column = {
Column(CassandraTTL(column.expr))
}
def ttl(column: String): Column = {
ttl(Column(column))
}
def writeTime(column: Column): Column = {
Column(CassandraWriteTime(column.expr))
}
def writeTime(column: String): Column = {
writeTime(Column(column))
}
implicit class CassandraSparkSessionFunctions(val sparkSession: SparkSession) extends AnyVal {
import org.apache.spark.sql.cassandra.CassandraSQLContextParams._
/** Set current used cluster name */
def setCluster(cluster: String): SparkSession = {
sparkSession.conf.set(SqlClusterParam.name, cluster)
sparkSession
}
/** Get current used cluster name */
def getCluster: String = sparkSession.conf.get(SqlClusterParam.name, SqlClusterParam.default)
/** Set the Spark Cassandra Connector configuration parameters */
def setCassandraConf(options: Map[String, String]): SparkSession = {
setCassandraConf(SqlClusterParam.default, options)
sparkSession
}
/** Set the Spark Cassandra Connector configuration parameters which will be used when accessing
* a given cluster */
def setCassandraConf(
cluster: String,
options: Map[String, String]): SparkSession = {
checkOptions(options)
for ((k, v) <- options) sparkSession.conf.set(s"$cluster/$k", v)
sparkSession
}
/** Set the Spark Cassandra Connector configuration parameters which will be used when accessing
* a given keyspace in a given cluster */
def setCassandraConf(
cluster: String,
keyspace: String,
options: Map[String, String]): SparkSession = {
checkOptions(options)
for ((k, v) <- options) sparkSession.conf.set(s"$cluster:$keyspace/$k", v)
sparkSession
}
}
object CassandraSQLContextParams {
// Should use general used database than Cassandra specific keyspace?
// Other source tables don't have keyspace concept. We should make
// an effort to set CassandraSQLContext a more database like to join
// tables from other sources. Keyspace is equivalent to database in SQL world
val ReferenceSection = "Cassandra SQL Context Options"
val SqlClusterParam = ConfigParameter[String](
name = "spark.cassandra.sql.cluster",
section = ReferenceSection,
default = "default",
description = "Sets the default Cluster to inherit configuration from")
private[cassandra] def checkOptions(options: Map[String, String]): Unit = {
val AllValidOptions = DeprecatedConfigParameter.names ++ ConfigParameter.names
options.keySet.foreach { name =>
require(AllValidOptions.contains(name),
s"Unrelated parameter. You can only set the following parameters: ${AllValidOptions.mkString(", ")}")
}
}
}
}
| datastax/spark-cassandra-connector | connector/src/main/scala/org/apache/spark/sql/cassandra/package.scala | Scala | apache-2.0 | 10,009 |
package com.twitter.io
import com.twitter.util.StdBenchAnnotations
import java.nio
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
import scala.util.Random
// run via:
// ./sbt 'project util-benchmark' 'jmh:run BufBenchmark'
@State(Scope.Benchmark)
class BufBenchmark extends StdBenchAnnotations {
@Param(Array("1000"))
var size: Int = 1000
private[this] var bytes: Array[Byte] = _
private[this] var byteArrayBuf: Buf = _
private[this] var byteBufferBuf: Buf = _
private[this] var compositeBuf: Buf = _
// create a 2nd composite that is sliced differently from the other
// to avoid some implementation artifacts changing the perf.
private[this] var compositeBuf2: Buf = _
private[this] var string: String = _
private[this] var stringBuf: Buf = _
@Setup(Level.Trial)
def setup(): Unit = {
val cap = size * 2
val start = cap / 4
val end = start + size
bytes = 0.until(cap).map(_.toByte).toArray
val bb = java.nio.ByteBuffer.wrap(bytes, start, size)
byteArrayBuf = Buf.ByteArray.Owned(bytes, start, end)
byteBufferBuf = Buf.ByteBuffer.Owned(bb)
compositeBuf = byteArrayBuf.slice(0, size / 2).concat(byteArrayBuf.slice(size / 2, size))
compositeBuf2 = byteArrayBuf.slice(0, size / 4).concat(byteArrayBuf.slice(size / 4, size))
val rnd = new Random(120412421512L)
string = rnd.nextString(size)
stringBuf = Buf.Utf8(string)
}
@Benchmark
def equalityByteArrayByteArray(): Boolean =
byteArrayBuf == byteArrayBuf
@Benchmark
def equalityByteArrayByteBuffer(): Boolean =
byteArrayBuf == byteBufferBuf
@Benchmark
def equalityByteArrayComposite(): Boolean =
byteArrayBuf == compositeBuf
@Benchmark
def equalityByteBufferByteArray(): Boolean =
byteBufferBuf == byteArrayBuf
@Benchmark
def equalityByteBufferByteBuffer(): Boolean =
byteBufferBuf == byteBufferBuf
@Benchmark
def equalityByteBufferComposite(): Boolean =
byteBufferBuf == compositeBuf
@Benchmark
def equalityCompositeByteArray(): Boolean =
compositeBuf == byteArrayBuf
@Benchmark
def equalityCompositeByteBuffer(): Boolean =
compositeBuf == byteBufferBuf
@Benchmark
def equalityCompositeComposite(): Boolean =
compositeBuf == compositeBuf2
private[this] def hash(buf: Buf): Int = buf.hashCode()
@Benchmark
@Warmup(iterations = 5)
@Measurement(iterations = 5)
def hashCodeByteArrayBufBaseline(): Buf =
Buf.ByteArray.Owned(bytes, 1, size + 1)
// subtract the results of the Baseline run to get the results
@Benchmark
@Warmup(iterations = 5)
@Measurement(iterations = 5)
def hashCodeByteArrayBuf(hole: Blackhole): Int = {
val buf = hashCodeByteArrayBufBaseline()
hole.consume(buf)
hash(buf)
}
@Benchmark
@Warmup(iterations = 5)
@Measurement(iterations = 5)
def hashCodeByteBufferBufBaseline(): Buf =
Buf.ByteBuffer.Owned(java.nio.ByteBuffer.wrap(bytes, 1, size))
// subtract the results of the Baseline run to get the results
@Benchmark
@Warmup(iterations = 5)
@Measurement(iterations = 5)
def hashCodeByteBufferBuf(hole: Blackhole): Int = {
val buf = hashCodeByteBufferBufBaseline()
hole.consume(buf)
hash(buf)
}
@Benchmark
@Warmup(iterations = 5)
@Measurement(iterations = 5)
def hashCodeCompositeBufBaseline(): Buf =
Buf.ByteArray.Owned(bytes, 0, 5).concat(Buf.ByteArray.Owned(bytes, 5, size))
// subtract the results of the Baseline run to get the results
@Benchmark
@Warmup(iterations = 5)
@Measurement(iterations = 5)
def hashCodeCompositeBuf(hole: Blackhole): Int = {
val buf = hashCodeCompositeBufBaseline()
hole.consume(buf)
hash(buf)
}
private[this] def slice(buf: Buf): Buf =
buf.slice(size / 4, size / 4 + size / 2)
@Benchmark
def sliceByteArrayBuf(): Buf =
slice(byteArrayBuf)
@Benchmark
def sliceByteBufferBuf(): Buf =
slice(byteBufferBuf)
@Benchmark
def sliceCompositeBuf(): Buf =
slice(compositeBuf)
private[this] def asByteBuffer(buf: Buf): nio.ByteBuffer =
Buf.ByteBuffer.Owned.extract(buf)
@Benchmark
def asByteBufferByteArrayBuf(): nio.ByteBuffer =
asByteBuffer(byteArrayBuf)
@Benchmark
def asByteBufferByteBufferBuf(): nio.ByteBuffer =
asByteBuffer(byteBufferBuf)
@Benchmark
def asByteBufferCompositeBuf(): nio.ByteBuffer =
asByteBuffer(compositeBuf)
private[this] def asByteArray(buf: Buf): Array[Byte] =
Buf.ByteArray.Owned.extract(buf)
@Benchmark
def asByteArrayByteArrayBuf(): Array[Byte] =
asByteArray(byteArrayBuf)
@Benchmark
def asByteArrayByteBufferBuf(): Array[Byte] =
asByteArray(byteBufferBuf)
@Benchmark
def asByteArrayCompositeBuf(): Array[Byte] =
asByteArray(compositeBuf)
@Benchmark
def stringToUtf8Buf(): Buf =
Buf.Utf8(string)
@Benchmark
def utf8BufToString(): String = {
val Buf.Utf8(str) = stringBuf
str
}
private val out = new Array[Byte](1)
private[this] def singleByteSliceAndWrite(buf: Buf): Byte = {
buf.slice(0, 1).write(out, 0)
out(0)
}
private[this] def singleByteGet(buf: Buf): Byte =
buf.get(0)
@Benchmark
def singleByteSliceAndWriteByteArray(): Byte =
singleByteSliceAndWrite(byteArrayBuf)
@Benchmark
def singleByteSliceAndWriteByteBuffer(): Byte =
singleByteSliceAndWrite(byteBufferBuf)
@Benchmark
def singleByteSliceAndWriteCompositeBuf(): Byte =
singleByteSliceAndWrite(compositeBuf)
@Benchmark
def singleByteIndexedByteArray(): Byte =
singleByteGet(byteArrayBuf)
@Benchmark
def singleByteIndexedByteBuffer(): Byte =
singleByteGet(byteBufferBuf)
@Benchmark
def singleByteIndexedCompositeBuf(): Byte =
singleByteGet(compositeBuf)
}
| BuoyantIO/twitter-util | util-benchmark/src/main/scala/com/twitter/io/BufBenchmark.scala | Scala | apache-2.0 | 5,748 |
package api
import play.api.libs.functional.syntax._
import play.api.libs.json.Reads.StringReads
import play.api.libs.json.{JsPath, Reads}
case class User(username: String, followersCount: Long, friendsCount: Long)
object User {
implicit val userReads: Reads[User] = (
(JsPath \\ "screen_name").read[String] and
(JsPath \\ "followers_count").read[Long] and
(JsPath \\ "friends_count").read[Long]
)(User.apply _)
}
| rtfpessoa/distributed-twitter-crawler | app/api/User.scala | Scala | mit | 436 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.lang._
import leon.lang.synthesis._
import leon.annotation._
object Numerals {
sealed abstract class Num
case object Z extends Num
case class S(pred: Num) extends Num
def value(n: Num): BigInt = {
n match {
case Z => 0
case S(p) => 1 + value(p)
}
} ensuring (_ >= 0)
def add(x: Num, y: Num): Num = {
choose { (r: Num) =>
value(r) == value(x) + value(y)
}
}
}
| epfl-lara/leon | src/test/resources/regression/performance/cegis/Add.scala | Scala | gpl-3.0 | 465 |
package gov.uk.dvla.vehicles.dispose.stepdefs
import cucumber.api.java.en.{Then, When, Given}
import org.openqa.selenium.WebDriver
import org.scalatest.selenium.WebBrowser.pageTitle
import org.scalatest.selenium.WebBrowser.click
import org.scalatest.selenium.WebBrowser.go
import pages.disposal_of_vehicle.BeforeYouStartPage
import pages.disposal_of_vehicle.BusinessChooseYourAddressPage
import pages.disposal_of_vehicle.DisposePage
import pages.disposal_of_vehicle.DisposeSuccessPage
import pages.disposal_of_vehicle.SetupTradeDetailsPage
import pages.disposal_of_vehicle.VehicleLookupPage
import uk.gov.dvla.vehicles.presentation.common.helpers.webbrowser.{WithClue, WebBrowserDriver}
class DemoTestSteps(webBrowserDriver: WebBrowserDriver) extends gov.uk.dvla.vehicles.dispose.helpers.AcceptanceTestHelper {
implicit val webDriver = webBrowserDriver.asInstanceOf[WebDriver]
@Given("^I am on the vehicles online prototype site url$")
def i_am_on_the_vehicles_online_prototype_site_url() {
go to BeforeYouStartPage
}
@Given("^I click the Start now button to begin the transaction$")
def i_click_the_Start_now_button_to_begin_the_transaction() {
click on BeforeYouStartPage.startNow
}
@Given("^I enter trader name and postcode then click on next button$")
def i_enter_trader_name_and_postcode_then_click_on_next_button() {
pageTitle shouldEqual SetupTradeDetailsPage.title withClue trackingId
SetupTradeDetailsPage.traderName.value = "sudotrader"
SetupTradeDetailsPage.traderPostcode.value = "qq99qq"
click on SetupTradeDetailsPage.emailInvisible
click on SetupTradeDetailsPage.lookup
}
@Given("^Select the address form address choose page then click on next button$")
def select_the_address_form_address_choose_page_then_click_on_next_button() {
pageTitle shouldEqual BusinessChooseYourAddressPage.title withClue trackingId
BusinessChooseYourAddressPage.chooseAddress.value = BusinessChooseYourAddressPage.selectedAddressLine
click on BusinessChooseYourAddressPage.select
}
@When("^I enter vehicle look up details and click on submit button$")
def i_enter_vehicle_look_up_details_and_click_on_submit_button() {
pageTitle shouldEqual VehicleLookupPage.title withClue trackingId
VehicleLookupPage.vehicleRegistrationNumber.value = "b1"
VehicleLookupPage.documentReferenceNumber.value = "11111111111"
click on VehicleLookupPage.findVehicleDetails
}
@Then("^I should be taken to complete and confirm page and fill the required details and click on confirm sale button$")
def i_should_be_taken_to_complete_and_confirm_page_and_fill_the_required_details_and_click_on_confirm_sale_button() {
import webserviceclients.fakes.FakeDateServiceImpl.DateOfDisposalDayValid
import webserviceclients.fakes.FakeDateServiceImpl.DateOfDisposalMonthValid
import webserviceclients.fakes.FakeDateServiceImpl.DateOfDisposalYearValid
import webserviceclients.fakes.FakeDisposeWebServiceImpl.MileageValid
pageTitle shouldEqual DisposePage.title withClue trackingId
DisposePage.mileage.value = MileageValid
DisposePage.dateOfDisposalDay.value = DateOfDisposalDayValid
DisposePage.dateOfDisposalMonth.value = DateOfDisposalMonthValid
DisposePage.dateOfDisposalYear.value = DateOfDisposalYearValid
click on DisposePage.consent
click on DisposePage.lossOfRegistrationConsent
click on DisposePage.emailInvisible
click on DisposePage.dispose
}
@Then("^I am on the summary page$")
def i_am_on_the_summary_page() {
pageTitle shouldEqual DisposeSuccessPage.title withClue trackingId
}
}
| dvla/vehicles-online | acceptance-tests/src/test/scala/gov/uk/dvla/vehicles/dispose/stepdefs/DemoTestSteps.scala | Scala | mit | 3,620 |
package ru.maizy.ambient7.core.config.options
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2017
* See LICENSE.txt for details.
*/
// TODO: should be only in webapp submodule
case class WebAppSpecificOptions(
port: Int = 22480
)
| maizy/ambient7 | core/src/main/scala/ru/maizy/ambient7/core/config/options/WebAppSpecificOptions.scala | Scala | apache-2.0 | 240 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.view
import org.geotools.data.{DataStore, Query}
import org.opengis.feature.simple.SimpleFeatureType
/**
* Routes queries to one of a set of stores
*/
trait RouteSelector {
/**
* Initialize this instance with the datastore to select from
*
* @param stores stores and configuration maps
*/
def init(stores: Seq[(DataStore, java.util.Map[String, _ <: AnyRef])]): Unit
/**
* Route a query to a particular store. If no store is selected, query will return empty
*
* @param sft simple feature type
* @param query query
* @return
*/
def route(sft: SimpleFeatureType, query: Query): Option[DataStore]
}
| locationtech/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/view/RouteSelector.scala | Scala | apache-2.0 | 1,156 |
object Test {
def main(args: Array[String]) {
def fibs: Stream[Int] = Stream.cons(0, Stream.cons(1, fibs.zip(fibs.tail).map(p => p._1 + p._2)))
println(fibs(2)) // stack overflow
}
}
| felixmulder/scala | test/files/run/t2027.scala | Scala | bsd-3-clause | 195 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet.infer
import org.apache.mxnet.{DType, DataDesc, Shape, NDArray, Context}
import org.mockito.Matchers._
import org.mockito.Mockito
import org.scalatest.BeforeAndAfterAll
// scalastyle:off
import java.awt.image.BufferedImage
// scalastyle:on
/**
* Unit tests for ImageClassifier
*/
class ImageClassifierSuite extends ClassifierSuite with BeforeAndAfterAll {
class MyImageClassifier(modelPathPrefix: String,
inputDescriptors: IndexedSeq[DataDesc])
extends ImageClassifier(modelPathPrefix, inputDescriptors) {
override def getPredictor(): MyClassyPredictor = {
Mockito.mock(classOf[MyClassyPredictor])
}
override def getClassifier(modelPathPrefix: String, inputDescriptors:
IndexedSeq[DataDesc], contexts: Array[Context] = Context.cpu(),
epoch: Option[Int] = Some(0)): Classifier = {
Mockito.mock(classOf[Classifier])
}
def getSynset(): IndexedSeq[String] = synset
}
test("ImageClassifierSuite-testRescaleImage") {
val image1 = new BufferedImage(100, 200, BufferedImage.TYPE_BYTE_GRAY)
val image2 = ImageClassifier.reshapeImage(image1, 1000, 2000)
assert(image2.getWidth === 1000)
assert(image2.getHeight === 2000)
}
test("ImageClassifierSuite-testConvertBufferedImageToNDArray") {
val dType = DType.Float32
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 2, 2),
dType, "NCHW"))
val image1 = new BufferedImage(100, 200, BufferedImage.TYPE_BYTE_GRAY)
val image2 = ImageClassifier.reshapeImage(image1, 2, 2)
val result = ImageClassifier.bufferedImageToPixels(image2, Shape(1, 3, 2, 2))
assert(result.shape == inputDescriptor(0).shape)
}
test("ImageClassifierSuite-testWithInputImage") {
val dType = DType.Float32
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 512, 512),
dType, "NCHW"))
val inputImage = new BufferedImage(224, 224, BufferedImage.TYPE_INT_RGB)
val testImageClassifier: ImageClassifier =
new MyImageClassifier(modelPath, inputDescriptor)
val predictExpected: IndexedSeq[Array[Float]] =
IndexedSeq[Array[Float]](Array(.98f, 0.97f, 0.96f, 0.99f))
val synset = testImageClassifier.synset
val predictExpectedOp: List[(String, Float)] =
List[(String, Float)]((synset(1), .98f), (synset(2), .97f),
(synset(3), .96f), (synset(0), .99f))
val predictExpectedND: NDArray = NDArray.array(predictExpected.flatten.toArray, Shape(1, 4))
Mockito.doReturn(IndexedSeq(predictExpectedND)).when(testImageClassifier.predictor)
.predictWithNDArray(any(classOf[IndexedSeq[NDArray]]))
Mockito.doReturn(IndexedSeq(predictExpectedOp))
.when(testImageClassifier.getClassifier(modelPath, inputDescriptor))
.classifyWithNDArray(any(classOf[IndexedSeq[NDArray]]), Some(anyInt()))
val predictResult: IndexedSeq[IndexedSeq[(String, Float)]] =
testImageClassifier.classifyImage(inputImage, Some(4))
for (i <- predictExpected.indices) {
assertResult(predictExpected(i).sortBy(-_)) {
predictResult(i).map(_._2).toArray
}
}
}
test("ImageClassifierSuite-testWithInputBatchImage") {
val dType = DType.Float32
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 512, 512),
dType, "NCHW"))
val inputImage = new BufferedImage(224, 224, BufferedImage.TYPE_INT_RGB)
val imageBatch = IndexedSeq[BufferedImage](inputImage, inputImage)
val testImageClassifier: ImageClassifier =
new MyImageClassifier(modelPath, inputDescriptor)
val predictExpected: IndexedSeq[Array[Array[Float]]] =
IndexedSeq[Array[Array[Float]]](Array(Array(.98f, 0.97f, 0.96f, 0.99f),
Array(.98f, 0.97f, 0.96f, 0.99f)))
val synset = testImageClassifier.synset
val predictExpectedOp: List[List[(String, Float)]] =
List[List[(String, Float)]](List((synset(1), .98f), (synset(2), .97f),
(synset(3), .96f), (synset(0), .99f)),
List((synset(1), .98f), (synset(2), .97f),
(synset(3), .96f), (synset(0), .99f)))
val predictExpectedND: NDArray = NDArray.array(predictExpected.flatten.flatten.toArray,
Shape(2, 4))
Mockito.doReturn(IndexedSeq(predictExpectedND)).when(testImageClassifier.predictor)
.predictWithNDArray(any(classOf[IndexedSeq[NDArray]]))
Mockito.doReturn(IndexedSeq(predictExpectedOp))
.when(testImageClassifier.getClassifier(modelPath, inputDescriptor))
.classifyWithNDArray(any(classOf[IndexedSeq[NDArray]]), Some(anyInt()))
val result: IndexedSeq[IndexedSeq[(String, Float)]] =
testImageClassifier.classifyImageBatch(imageBatch, Some(4))
for (i <- predictExpected.indices) {
for (idx <- predictExpected(i).indices) {
assertResult(predictExpected(i)(idx).sortBy(-_)) {
result(i).map(_._2).toArray
}
}
}
}
}
| indhub/mxnet | scala-package/infer/src/test/scala/org/apache/mxnet/infer/ImageClassifierSuite.scala | Scala | apache-2.0 | 5,781 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package system.basic
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common.JsHelpers
import common.WskTestHelpers
import common.Wsk
@RunWith(classOf[JUnitRunner])
class WskCliUnicodePython3Tests extends WskUnicodeTests with WskTestHelpers with JsHelpers {
override val wsk: common.Wsk = new Wsk
override lazy val actionKind: String = "python:3"
override lazy val actionSource: String = "unicode3.py"
}
| duynguyen/incubator-openwhisk | tests/src/test/scala/system/basic/WskCliUnicodePython3Tests.scala | Scala | apache-2.0 | 1,247 |
package com.github.shadowsocks
import android.annotation.TargetApi
import android.graphics.drawable.Icon
import android.service.quicksettings.{Tile, TileService}
import com.github.shadowsocks.aidl.IShadowsocksServiceCallback
import com.github.shadowsocks.utils.{State, Utils}
import com.github.shadowsocks.ShadowsocksApplication.app
/**
* @author Mygod
*/
object ShadowsocksTileService {
var running: Boolean = _
}
@TargetApi(24)
final class ShadowsocksTileService extends TileService with ServiceBoundContext {
import ShadowsocksTileService._
private lazy val iconIdle = Icon.createWithResource(this, R.drawable.ic_start_idle).setTint(0x80ffffff)
private lazy val iconBusy = Icon.createWithResource(this, R.drawable.ic_start_busy)
private lazy val iconConnected = Icon.createWithResource(this, R.drawable.ic_start_connected)
private lazy val callback = new IShadowsocksServiceCallback.Stub {
def trafficUpdated(txRate: Long, rxRate: Long, txTotal: Long, rxTotal: Long) = ()
def stateChanged(state: Int, msg: String) {
val tile = getQsTile
if (tile != null) {
state match {
case State.STOPPED =>
tile.setIcon(iconIdle)
tile.setLabel(getString(R.string.app_name))
tile.setState(Tile.STATE_INACTIVE)
case State.CONNECTED =>
tile.setIcon(iconConnected)
tile.setLabel(app.currentProfile match {
case Some(profile) => profile.name
case None => getString(R.string.app_name)
})
tile.setState(Tile.STATE_ACTIVE)
case _ =>
tile.setIcon(iconBusy)
tile.setLabel(getString(R.string.app_name))
tile.setState(Tile.STATE_UNAVAILABLE)
}
tile.updateTile
}
}
}
override def onServiceConnected() = callback.stateChanged(bgService.getState, null)
override def onCreate {
super.onCreate
running = true
}
override def onDestroy {
super.onDestroy
running = false
}
override def onStartListening {
super.onStartListening
attachService(callback)
}
override def onStopListening {
super.onStopListening
detachService // just in case the user switches to NAT mode, also saves battery
}
override def onClick() = if (isLocked) unlockAndRun(toggle) else toggle()
private def toggle() = if (bgService != null) bgService.getState match {
case State.STOPPED => Utils.startSsService(this)
case State.CONNECTED => Utils.stopSsService(this)
case _ => // ignore
}
}
| otoil/shadowsocks-android | src/main/scala/com/github/shadowsocks/ShadowsocksTileService.scala | Scala | gpl-3.0 | 2,555 |
/**
* Copyright 2014 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package djinni.syntax
import java.io.File
case class Loc(file: File, line: Int, col: Int) {
override def toString() = file.getAbsolutePath() + " (" + line + "." + col + ")"
}
case class Error(loc: Loc, msg: String) {
override def toString() = loc + ": " + msg
def toException: Error.Exception = Error.Exception(this)
}
object Error {
case class Exception(error: Error) extends java.lang.Exception(error.toString)
}
| aijiekj/djinni | src/source/syntax.scala | Scala | apache-2.0 | 1,039 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ly.stealth.mesos.exhibitor
import play.api.libs.functional.syntax._
import play.api.libs.json._
import scala.collection.mutable.ListBuffer
case class Cluster(exhibitorServers: List[ExhibitorServer] = Nil) {
private val storage = Cluster.newStorage(Config.storage)
private[exhibitor] var frameworkId: Option[String] = None
private[exhibitor] val servers = new ListBuffer[ExhibitorServer]
//add anything that was passed to constructor
exhibitorServers.foreach(servers += _)
def getServer(id: String): Option[ExhibitorServer] = servers.find(_.id == id)
def addServer(server: ExhibitorServer): Boolean = {
servers.find(_.id == server.id) match {
case Some(_) => false
case None =>
servers += server
true
}
}
def expandIds(expr: String): List[String] = {
if (expr == null || expr == "") throw new IllegalArgumentException("ID expression cannot be null or empty")
else {
expr.split(",").flatMap { part =>
if (part == "*") return servers.map(_.id).toList
else Util.Range(part).values.map(_.toString)
}.distinct.sorted.toList
}
}
def save() = storage.save(this)(Cluster.writer)
def load() {
storage.load(Cluster.reader).foreach { cluster =>
this.frameworkId = cluster.frameworkId
//TODO load servers too
}
}
override def toString: String = servers.toString()
}
object Cluster {
private def newStorage(storage: String): Storage[Cluster] = {
storage.split(":", 2) match {
case Array("file", fileName) => FileStorage(fileName)
case _ => throw new IllegalArgumentException(s"Unsupported storage: $storage")
}
}
implicit val writer = new Writes[Cluster] {
override def writes(o: Cluster): JsValue = Json.obj("frameworkid" -> o.frameworkId, "cluster" -> o.servers.toList)
}
implicit val reader = ((__ \\ 'frameworkid).readNullable[String] and
(__ \\ 'cluster).read[List[ExhibitorServer]])((frameworkId, servers) => {
val cluster = Cluster(servers)
cluster.frameworkId = frameworkId
cluster
})
}
| CiscoCloud/exhibitor-mesos-framework | src/main/scala/ly/stealth/mesos/exhibitor/Cluster.scala | Scala | apache-2.0 | 2,880 |
package com.github.j5ik2o.forseti.adaptor.handler.flow.`implicit`
import java.net.URI
import com.github.j5ik2o.forseti.adaptor.handler.model.AuthorizationResponse
import com.github.j5ik2o.forseti.domain.exception.InvalidRequestException
import com.github.j5ik2o.forseti.domain.{MessageBase, Scope}
import scalaz.{Maybe, \\/}
trait AuthorizationImplicitResponse extends AuthorizationResponse {
val responseType: InvalidRequestException \\/ String
val scope: InvalidRequestException \\/ Scope
val state: InvalidRequestException \\/ Maybe[String]
override lazy val asFullRedirectUri: InvalidRequestException \\/ URI = redirectUri
}
object AuthorizationImplicitResponse {
def apply(
clientId: String,
redirectUri: URI,
scope: Scope,
state: Maybe[String]
): AuthorizationImplicitResponse = new Default(
Map("Cache-Control" -> Seq("no-store"), "Pragma" -> Seq("no-cache")),
Map(
"response_type" -> Seq("token"),
"client_id" -> Seq(clientId),
"redirect_uri" -> Seq(redirectUri.toString)
)
++ (if (scope.nonEmpty) Map("scope" -> Seq(scope.values.mkString(" "))) else Map.empty)
++ state.toOption.map("state" -> Seq(_))
)
class Default(headers: Map[String, Seq[String]], params: Map[String, Seq[String]])
extends MessageBase(headers, params)
with AuthorizationImplicitResponse {
override val responseType: InvalidRequestException \\/ String = requireParam("response_type")
override val redirectUri: InvalidRequestException \\/ URI =
requireParam("redirect_uri").flatMap { v =>
\\/.fromTryCatchThrowable[URI, InvalidRequestException] {
URI.create(v)
}
}
override val scope: InvalidRequestException \\/ Scope =
param("scope").map(_.map(_.split(" ").toSeq).map(Scope(_)).getOrElse(Scope.empty))
override val state: InvalidRequestException \\/ Maybe[String] = param("state")
}
}
| j5ik2o/forseti | server/server-use-case-port/src/main/scala/com/github/j5ik2o/forseti/adaptor/handler/flow/implicit/AuthorizationImplicitResponse.scala | Scala | mit | 1,931 |
package com.cloudray.scalapress.plugin.ecommerce.tags
import org.joda.time.{DateTimeZone, DateTime}
import com.cloudray.scalapress.theme.MarkupRenderer
import com.cloudray.scalapress.theme.tag.{ScalapressTag, TagBuilder}
import scala.collection.JavaConverters._
import com.cloudray.scalapress.plugin.ecommerce.ShoppingPluginDao
import com.cloudray.scalapress.item.attr.AttributeValueRenderer
import com.cloudray.scalapress.framework.{ScalapressRequest, Tag}
/** @author Stephen Samuel */
@Tag("invoice_account_number")
class InvoiceAccountNumberTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.map(_.account.id.toString)
}
}
@Tag("invoice_account_name")
class InvoiceAccountNameTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.map(_.account.name)
}
}
@Tag("invoice_account_email")
class InvoiceAccountEmailTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.map(_.account.email)
}
}
@Tag("invoice_delivery_address")
class InvoiceDeliveryAddressTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.flatMap(order => Option(order.deliveryAddress)).map(add => {
val label = add.label
label
})
}
}
@Tag("invoice_billing_address")
class InvoiceBillingAddressTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.flatMap(order => Option(order.billingAddress)).map(_.label)
}
}
@Tag("invoice_date")
class InvoiceDateTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order
.map(arg => new DateTime(arg.datePlaced, DateTimeZone.forID("Europe/London")).toString("dd/MM/yyyy"))
}
}
@Tag("invoice_customer_note")
class InvoiceCustomerNoteTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.flatMap(o => Option(o.customerNote))
}
}
@Tag("invoice_attribute_value")
class InvoiceAttributeValueTag extends ScalapressTag with TagBuilder {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
params.get("id") match {
case None => Some("<!-- no id specified for attribute tag -->")
case Some(id) => {
request.orderLine.flatMap(line => Option(request.context.itemDao.find(line.obj))).flatMap(obj => {
obj.attributeValues.asScala.find(_.attribute.id == id.trim.toLong) match {
case None => None
case Some(av) => Some(build(AttributeValueRenderer.renderValue(av), params))
}
})
}
}
}
}
@Tag("invoice_line_qty")
class InvoiceLineQtyTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.orderLine.map(_.qty.toString)
}
}
@Tag("invoice_lines")
class InvoiceLinesTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.flatMap(order => {
Option(request.context.bean[ShoppingPluginDao].get.invoiceLineMarkup) match {
case None => None
case Some(m) =>
val render = MarkupRenderer.renderOrderLines(order.sortedLines, m, request)
Some(render)
}
})
}
}
@Tag("invoice_number")
class InvoiceNumberTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.map(order => order.id.toString)
}
}
@Tag("invoice_delivery_desc")
class InvoiceDeliveryDetailsTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.flatMap(order => Option(order.deliveryDetails))
}
}
@Tag("invoice_delivery_charge")
class InvoiceDeliveryChargeTag extends ScalapressTag with TagBuilder {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.map(order => {
val text = if (params.contains("ex")) order.deliveryEx
else if (params.contains("vat") && request.installation.vatEnabled) order.deliveryVat
else if (params.contains("vat")) 0
else if (request.installation.vatEnabled) order.deliveryInc
else order.deliveryEx
val textFormatted = "£%1.2f".format(text)
build(textFormatted, params)
})
}
}
@Tag("invoice_line_desc")
class InvoiceLineDescTag extends ScalapressTag {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.orderLine.map(line => line.description)
}
}
@Tag("invoice_line_price")
class InvoiceLinePriceTag extends ScalapressTag with TagBuilder {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.orderLine.map(line => {
val text = if (params.contains("ex")) line.priceExVat
else if (params.contains("vat") && request.installation.vatEnabled) line.priceVat
else if (params.contains("vat")) 0
else if (request.installation.vatEnabled) line.priceIncVat
else line.priceExVat
val textFormatted = "£%1.2f".format(text)
build(textFormatted, params)
})
}
}
@Tag("invoice_line_total")
class InvoiceLineTotalTag extends ScalapressTag with TagBuilder {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.orderLine.map(line => {
val text = if (params.contains("ex")) line.totalExVat
else if (params.contains("vat") && request.installation.vatEnabled) line.totalVat
else if (params.contains("vat")) 0
else if (request.installation.vatEnabled) line.totalIncVat
else line.totalExVat
val textFormatted = "£%1.2f".format(text)
build(textFormatted, params)
})
}
}
@Tag("invoice_total")
class InvoiceTotalTag extends ScalapressTag with TagBuilder {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.map(order => {
val amount = if (params.contains("ex")) order.subtotal
else if (params.contains("vat") && request.installation.vatEnabled) order.vat
else if (params.contains("vat")) 0
else if (request.installation.vatEnabled) order.total
else order.subtotal
val formatted = "£%1.2f".format(amount)
build(formatted, params)
})
}
}
@Tag("invoice_lines_total")
class InvoiceLinesTotalTag extends ScalapressTag with TagBuilder {
def render(request: ScalapressRequest, params: Map[String, String]): Option[String] = {
request.order.map(order => {
val text = if (params.contains("ex")) order.linesSubtotal
else if (params.contains("vat") && request.installation.vatEnabled) order.linesVat
else if (params.contains("vat")) 0
else if (request.installation.vatEnabled) order.linesTotal
else order.linesSubtotal
val textFormatted = "£%1.2f".format(text)
build(textFormatted, params)
})
}
} | vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/plugin/ecommerce/tags/invoice.scala | Scala | apache-2.0 | 7,299 |
/*
* Copyright 2020 Lenses.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.lenses.streamreactor.connect.aws.s3.formats.parquet
import java.io.InputStream
import java.nio.ByteBuffer
import com.typesafe.scalalogging.LazyLogging
import org.apache.parquet.io.{DelegatingSeekableInputStream, SeekableInputStream}
class ParquetSeekableInputStream(inputStreamFn: () => InputStream) extends SeekableInputStream with LazyLogging {
/**
* The InceptionDelegatingInputStream delegates to a DelegatingInputStream for the read operations (so as to avoid
* duplication of all the read code, and it delegates to the outer class for the position and seeking operations.
*
* This is obviously a massive workaround for the design of the library we are using as we cannot supply a HTTP input
* stream that is seekable and therefore we need to recreate the inputStream if we want to seek backwards.
*
* We will therefore need to also recreate the InceptionDelegatingInputStream in the event we want to seek backwards.
*
* @param inputStream the actual inputStream containing Parquet data from S3
*/
class InceptionDelegatingInputStream(inputStream: InputStream) extends DelegatingSeekableInputStream(inputStream) {
override def getPos: Long = ParquetSeekableInputStream.this.getPos
override def seek(newPos: Long): Unit = ParquetSeekableInputStream.this.seek(newPos)
}
private var pos: Long = 0
private var inputStream: InputStream = _
private var inceptionInputStream: DelegatingSeekableInputStream = _
createInputStream()
private def createInputStream(): Unit = {
logger.debug(s"Recreating input stream")
inputStream = inputStreamFn()
inceptionInputStream = new InceptionDelegatingInputStream(inputStream)
}
override def getPos: Long = {
logger.debug("Retrieving position: " + pos)
pos
}
override def seek(newPos: Long): Unit = {
logger.debug(s"Seeking from $pos to position $newPos")
if (newPos < pos) {
createInputStream()
inputStream.skip(newPos)
} else {
inputStream.skip(newPos - pos)
}
pos = newPos
}
override def readFully(bytes: Array[Byte]): Unit = inceptionInputStream.readFully(bytes)
override def readFully(bytes: Array[Byte], start: Int, len: Int): Unit = inceptionInputStream.readFully(bytes, start, len)
override def read(buf: ByteBuffer): Int = inceptionInputStream.read(buf)
override def readFully(buf: ByteBuffer): Unit = inceptionInputStream.readFully(buf)
override def read(): Int = inceptionInputStream.read()
}
| datamountaineer/stream-reactor | kafka-connect-aws-s3/src/main/scala/io/lenses/streamreactor/connect/aws/s3/formats/parquet/ParquetSeekableInputStream.scala | Scala | apache-2.0 | 3,105 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package unit.kafka.server
import java.lang.{Long => JLong}
import java.net.InetAddress
import java.util
import kafka.api.{ApiVersion, KAFKA_0_10_2_IV0}
import kafka.cluster.Replica
import kafka.controller.KafkaController
import kafka.coordinator.group.GroupCoordinator
import kafka.coordinator.transaction.TransactionCoordinator
import kafka.log.{Log, TimestampOffset}
import kafka.network.RequestChannel
import kafka.network.RequestChannel.Session
import kafka.security.auth.Authorizer
import kafka.server.QuotaFactory.QuotaManagers
import kafka.server._
import kafka.utils.{MockTime, TestUtils, ZkUtils}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors, SecurityProtocol}
import org.apache.kafka.common.record.RecordBatch
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests.WriteTxnMarkersRequest.TxnMarkerEntry
import org.apache.kafka.common.requests._
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.utils.Utils
import org.easymock.{Capture, EasyMock, IAnswer}
import org.junit.Assert.{assertEquals, assertTrue}
import org.junit.Test
import scala.collection.JavaConverters._
import scala.collection.Map
class KafkaApisTest {
private val requestChannel = EasyMock.createNiceMock(classOf[RequestChannel])
private val replicaManager = EasyMock.createNiceMock(classOf[ReplicaManager])
private val groupCoordinator = EasyMock.createNiceMock(classOf[GroupCoordinator])
private val adminManager = EasyMock.createNiceMock(classOf[AdminManager])
private val txnCoordinator = EasyMock.createNiceMock(classOf[TransactionCoordinator])
private val controller = EasyMock.createNiceMock(classOf[KafkaController])
private val zkUtils = EasyMock.createNiceMock(classOf[ZkUtils])
private val metadataCache = EasyMock.createNiceMock(classOf[MetadataCache])
private val metrics = new Metrics()
private val brokerId = 1
private val authorizer: Option[Authorizer] = None
private val clientQuotaManager = EasyMock.createNiceMock(classOf[ClientQuotaManager])
private val clientRequestQuotaManager = EasyMock.createNiceMock(classOf[ClientRequestQuotaManager])
private val replicaQuotaManager = EasyMock.createNiceMock(classOf[ReplicationQuotaManager])
private val quotas = QuotaManagers(clientQuotaManager, clientQuotaManager, clientRequestQuotaManager, replicaQuotaManager, replicaQuotaManager)
private val brokerTopicStats = new BrokerTopicStats
private val clusterId = "clusterId"
private val time = new MockTime
def createKafkaApis(interBrokerProtocolVersion: ApiVersion = ApiVersion.latestVersion): KafkaApis = {
val properties = TestUtils.createBrokerConfig(brokerId, "zk")
properties.put(KafkaConfig.InterBrokerProtocolVersionProp, interBrokerProtocolVersion.toString)
properties.put(KafkaConfig.LogMessageFormatVersionProp, interBrokerProtocolVersion.toString)
new KafkaApis(requestChannel,
replicaManager,
adminManager,
groupCoordinator,
txnCoordinator,
controller,
zkUtils,
brokerId,
new KafkaConfig(properties),
metadataCache,
metrics,
authorizer,
quotas,
brokerTopicStats,
clusterId,
time
)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleAddOffsetToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddOffsetsToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleAddPartitionsToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleTxnOffsetCommitRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleEndTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleEndTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleWriteTxnMarkersRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleWriteTxnMarkersRequest(null)
}
@Test
def shouldRespondWithUnsupportedForMessageFormatOnHandleWriteTxnMarkersWhenMagicLowerThanRequired(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(Utils.mkList(topicPartition))
val expectedErrors = Map(topicPartition -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
}
@Test
def shouldRespondWithUnknownTopicWhenPartitionIsNotHosted(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(Utils.mkList(topicPartition))
val expectedErrors = Map(topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(None)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
}
@Test
def shouldRespondWithUnsupportedMessageFormatForBadPartitionAndNoErrorsForGoodPartition(): Unit = {
val tp1 = new TopicPartition("t", 0)
val tp2 = new TopicPartition("t1", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(Utils.mkList(tp1, tp2))
val expectedErrors = Map(tp1 -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, tp2 -> Errors.NONE).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(tp1))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.expect(replicaManager.getMagic(tp2))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.capture(responseCallback),
EasyMock.anyObject())).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))
}
})
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
EasyMock.verify(replicaManager)
}
@Test
def shouldRespondWithUnknownTopicOrPartitionForBadPartitionAndNoErrorsForGoodPartition(): Unit = {
val tp1 = new TopicPartition("t", 0)
val tp2 = new TopicPartition("t1", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(Utils.mkList(tp1, tp2))
val expectedErrors = Map(tp1 -> Errors.UNKNOWN_TOPIC_OR_PARTITION, tp2 -> Errors.NONE).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(tp1))
.andReturn(None)
EasyMock.expect(replicaManager.getMagic(tp2))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.capture(responseCallback),
EasyMock.anyObject())).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))
}
})
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
EasyMock.verify(replicaManager)
}
@Test
def shouldAppendToLogOnWriteTxnMarkersWhenCorrectMagicVersion(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val request = createWriteTxnMarkersRequest(Utils.mkList(topicPartition))._2
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.anyObject(),
EasyMock.anyObject(),
EasyMock.anyObject()))
EasyMock.replay(replicaManager)
createKafkaApis().handleWriteTxnMarkersRequest(request)
EasyMock.verify(replicaManager)
}
@Test
def testReadUncommittedConsumerListOffsetLimitedAtHighWatermark(): Unit = {
testConsumerListOffsetLimit(IsolationLevel.READ_UNCOMMITTED)
}
@Test
def testReadCommittedConsumerListOffsetLimitedAtLastStableOffset(): Unit = {
testConsumerListOffsetLimit(IsolationLevel.READ_COMMITTED)
}
private def testConsumerListOffsetLimit(isolationLevel: IsolationLevel): Unit = {
val tp = new TopicPartition("foo", 0)
val timestamp: JLong = time.milliseconds()
val limitOffset = 15L
val capturedResponse = EasyMock.newCapture[RequestChannel.Response]()
val capturedThrottleCallback = EasyMock.newCapture[Int => Unit]()
val replica = EasyMock.mock(classOf[Replica])
val log = EasyMock.mock(classOf[Log])
EasyMock.expect(replicaManager.getLeaderReplicaIfLocal(tp)).andReturn(replica)
if (isolationLevel == IsolationLevel.READ_UNCOMMITTED)
EasyMock.expect(replica.highWatermark).andReturn(LogOffsetMetadata(messageOffset = limitOffset))
else
EasyMock.expect(replica.lastStableOffset).andReturn(LogOffsetMetadata(messageOffset = limitOffset))
EasyMock.expect(replicaManager.getLog(tp)).andReturn(Some(log))
EasyMock.expect(log.fetchOffsetsByTimestamp(timestamp)).andReturn(Some(TimestampOffset(timestamp = timestamp, offset = limitOffset)))
expectThrottleCallbackAndInvoke(capturedThrottleCallback)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, replica, log)
val builder = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(Map(tp -> timestamp).asJava)
val (listOffsetRequest, request) = buildRequest(builder)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse).asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(ListOffsetResponse.UNKNOWN_OFFSET, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
@Test
def testReadUncommittedConsumerListOffsetEarliestOffsetEqualsHighWatermark(): Unit = {
testConsumerListOffsetEarliestOffsetEqualsLimit(IsolationLevel.READ_UNCOMMITTED)
}
@Test
def testReadCommittedConsumerListOffsetEarliestOffsetEqualsLastStableOffset(): Unit = {
testConsumerListOffsetEarliestOffsetEqualsLimit(IsolationLevel.READ_COMMITTED)
}
private def testConsumerListOffsetEarliestOffsetEqualsLimit(isolationLevel: IsolationLevel): Unit = {
val tp = new TopicPartition("foo", 0)
val limitOffset = 15L
val capturedResponse = EasyMock.newCapture[RequestChannel.Response]()
val capturedThrottleCallback = EasyMock.newCapture[Int => Unit]()
val replica = EasyMock.mock(classOf[Replica])
val log = EasyMock.mock(classOf[Log])
EasyMock.expect(replicaManager.getLeaderReplicaIfLocal(tp)).andReturn(replica)
if (isolationLevel == IsolationLevel.READ_UNCOMMITTED)
EasyMock.expect(replica.highWatermark).andReturn(LogOffsetMetadata(messageOffset = limitOffset))
else
EasyMock.expect(replica.lastStableOffset).andReturn(LogOffsetMetadata(messageOffset = limitOffset))
EasyMock.expect(replicaManager.getLog(tp)).andReturn(Some(log))
EasyMock.expect(log.fetchOffsetsByTimestamp(ListOffsetRequest.EARLIEST_TIMESTAMP))
.andReturn(Some(TimestampOffset(timestamp = ListOffsetResponse.UNKNOWN_TIMESTAMP, offset = limitOffset)))
expectThrottleCallbackAndInvoke(capturedThrottleCallback)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, replica, log)
val builder = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(Map(tp -> (ListOffsetRequest.EARLIEST_TIMESTAMP: JLong)).asJava)
val (listOffsetRequest, request) = buildRequest(builder)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse).asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(limitOffset, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
@Test
def testReadUncommittedConsumerListOffsetLatest(): Unit = {
testConsumerListOffsetLatest(IsolationLevel.READ_UNCOMMITTED)
}
@Test
def testReadCommittedConsumerListOffsetLatest(): Unit = {
testConsumerListOffsetLatest(IsolationLevel.READ_COMMITTED)
}
private def testConsumerListOffsetLatest(isolationLevel: IsolationLevel): Unit = {
val tp = new TopicPartition("foo", 0)
val latestOffset = 15L
val capturedResponse = EasyMock.newCapture[RequestChannel.Response]()
val capturedThrottleCallback = EasyMock.newCapture[Int => Unit]()
val replica = EasyMock.mock(classOf[Replica])
val log = EasyMock.mock(classOf[Log])
EasyMock.expect(replicaManager.getLeaderReplicaIfLocal(tp)).andReturn(replica)
if (isolationLevel == IsolationLevel.READ_UNCOMMITTED)
EasyMock.expect(replica.highWatermark).andReturn(LogOffsetMetadata(messageOffset = latestOffset))
else
EasyMock.expect(replica.lastStableOffset).andReturn(LogOffsetMetadata(messageOffset = latestOffset))
expectThrottleCallbackAndInvoke(capturedThrottleCallback)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, replica, log)
val builder = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(Map(tp -> (ListOffsetRequest.LATEST_TIMESTAMP: JLong)).asJava)
val (listOffsetRequest, request) = buildRequest(builder)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse).asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(latestOffset, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = {
val requestBuilder = new WriteTxnMarkersRequest.Builder(Utils.mkList(
new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions)))
buildRequest(requestBuilder)
}
private def buildRequest[T <: AbstractRequest](builder: AbstractRequest.Builder[T]): (T, RequestChannel.Request) = {
val request = builder.build()
val header = new RequestHeader(builder.apiKey.id, request.version, "", 0)
val buffer = request.serialize(header)
val session = Session(KafkaPrincipal.ANONYMOUS, InetAddress.getLocalHost)
(request, RequestChannel.Request(1, "1", session, buffer, 0, new ListenerName(""), SecurityProtocol.PLAINTEXT))
}
private def readResponse(api: ApiKeys, request: AbstractRequest, capturedResponse: Capture[RequestChannel.Response]): AbstractResponse = {
val send = capturedResponse.getValue.responseSend.get
val channel = new ByteBufferChannel(send.size)
send.writeTo(channel)
channel.close()
channel.buffer.getInt() // read the size
ResponseHeader.parse(channel.buffer)
val struct = api.responseSchema(request.version).read(channel.buffer)
AbstractResponse.getResponse(api, struct)
}
private def expectThrottleCallbackAndInvoke(capturedThrottleCallback: Capture[Int => Unit]): Unit = {
EasyMock.expect(clientRequestQuotaManager.recordAndThrottleOnQuotaViolation(
EasyMock.anyObject[ClientSensors],
EasyMock.anyDouble(),
EasyMock.capture(capturedThrottleCallback)))
.andAnswer(new IAnswer[Int] {
override def answer(): Int = {
val callback = capturedThrottleCallback.getValue
callback(0)
0
}
})
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala | Scala | apache-2.0 | 19,824 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.data
import org.geotools.data.collection.ListFeatureCollection
import org.geotools.data.simple.SimpleFeatureStore
import org.geotools.data.{DataStoreFinder, Query, _}
import org.geotools.util.factory.Hints
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.conf.QueryProperties
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.security.SecurityUtils
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeature
import org.specs2.matcher.MatchResult
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class KuduDataStoreIntegrationTest extends Specification {
import scala.collection.JavaConverters._
skipAll // integration test
sequential
val params = Map(
"kudu.master" -> "localhost",
"kudu.catalog" -> "geomesa",
"geomesa.security.auths" -> "admin"
)
"KuduDataStore" should {
"support table splitting" in {
val typeName = "testsplits"
val ds = DataStoreFinder.getDataStore(params.asJava).asInstanceOf[KuduDataStore]
ds must not(beNull)
try {
ds.createSchema(SimpleFeatureTypes.createType(typeName,
"name:String:index=true,age:Int,dtg:Date,*geom:Point:srid=4326;table.splitter.options=" +
"'id.pattern:[A-Z],z3.min:2017-01-01,z3.max:2017-01-10,z3.bits:2,attr.name.pattern:[A-Z],z2.bits:2'"))
val sft = ds.getSchema(typeName)
val toAdd = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
sf.setAttribute(0, s"name$i")
sf.setAttribute(1, Int.box(i))
sf.setAttribute(2, f"2014-01-${i + 1}%02dT00:00:01.000Z")
sf.setAttribute(3, s"POINT(4$i 5$i)")
sf
}
val fs = ds.getFeatureSource(typeName).asInstanceOf[SimpleFeatureStore]
val ids = fs.addFeatures(new ListFeatureCollection(sft, toAdd))
ids.asScala.map(_.getID) must containTheSameElementsAs(toAdd.map(_.getID))
ds.removeSchema(typeName)
ds.getSchema(typeName) must beNull
} finally {
ds.dispose()
}
}
"work with points" in {
val typeName = "testpoints"
val ds = DataStoreFinder.getDataStore(params.asJava).asInstanceOf[KuduDataStore]
ds must not(beNull)
try {
ds.createSchema(SimpleFeatureTypes.createType(typeName,
"name:String:index=true,age:Int,dtg:Date,*geom:Point:srid=4326"))
val sft = ds.getSchema(typeName)
sft must not(beNull)
val toAdd = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
sf.setAttribute(0, s"name$i")
sf.setAttribute(1, Int.box(i))
sf.setAttribute(2, f"2014-01-${i + 1}%02dT00:00:01.000Z")
sf.setAttribute(3, s"POINT(4$i 5$i)")
val vis = i % 3 match {
case 0 => null
case 1 => "user|admin"
case 2 => "admin"
}
SecurityUtils.setFeatureVisibility(sf, vis)
sf
}
val fs = ds.getFeatureSource(typeName).asInstanceOf[SimpleFeatureStore]
val ids = fs.addFeatures(new ListFeatureCollection(sft, toAdd))
ids.asScala.map(_.getID) must containTheSameElementsAs(toAdd.map(_.getID))
forall(Seq(null, Array.empty[String], Array("geom", "dtg"), Array("geom", "name"))) { transforms =>
testQuery(ds, typeName, "INCLUDE", transforms, toAdd)
testQuery(ds, typeName, "IN('0', '2')", transforms, Seq(toAdd(0), toAdd(2)))
testQuery(ds, typeName, "bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z", transforms, toAdd.dropRight(2))
testQuery(ds, typeName, "bbox(geom,42,48,52,62)", transforms, toAdd.drop(2))
testQuery(ds, typeName, "name < 'name5' AND abs(age) < 3", transforms, toAdd.take(3))
testQuery(ds, typeName, "name = 'name5' OR name = 'name7'", transforms, Seq(toAdd(5), toAdd(7)))
testQuery(ds, typeName, "(name = 'name5' OR name = 'name6') and bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z", transforms, Seq(toAdd(5), toAdd(6)))
}
def testTransforms(ds: KuduDataStore) = {
val transforms = Array("derived=strConcat('hello',name)", "geom")
forall(Seq(("INCLUDE", toAdd), ("bbox(geom,42,48,52,62)", toAdd.drop(2)))) { case (filter, results) =>
val fr = ds.getFeatureReader(new Query(typeName, ECQL.toFilter(filter), transforms), Transaction.AUTO_COMMIT)
val features = SelfClosingIterator(fr).map(ScalaSimpleFeature.copy).toList // copy features as the same one is mutated
features.headOption.map(f => SimpleFeatureTypes.encodeType(f.getFeatureType)) must
beSome("derived:String,*geom:Point:srid=4326")
features.map(_.getID) must containTheSameElementsAs(results.map(_.getID))
forall(features) { feature =>
feature.getAttribute("derived") mustEqual s"helloname${feature.getID}"
feature.getAttribute("geom") mustEqual results.find(_.getID == feature.getID).get.getAttribute("geom")
}
}
}
testTransforms(ds)
ds.getFeatureSource(typeName).removeFeatures(ECQL.toFilter("INCLUDE"))
forall(Seq("INCLUDE",
"IN('0', '2')",
"bbox(geom,42,48,52,62)",
"bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z",
"(name = 'name5' OR name = 'name6') and bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z",
"name < 'name5'",
"name = 'name5'")) { filter =>
testQuery(ds, typeName, filter, null, Seq.empty)
}
ds.removeSchema(typeName)
ds.getSchema(typeName) must beNull
} finally {
ds.dispose()
}
}
"work with polys" in {
val typeName = "testpolys"
val ds = DataStoreFinder.getDataStore(params.asJava).asInstanceOf[KuduDataStore]
ds must not(beNull)
try {
ds.getSchema(typeName) must beNull
ds.createSchema(SimpleFeatureTypes.createType(typeName, "name:String:index=true,dtg:Date,*geom:Polygon:srid=4326"))
val sft = ds.getSchema(typeName)
sft must not(beNull)
val toAdd = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
sf.setAttribute(0, s"name$i")
sf.setAttribute(1, s"2014-01-01T0$i:00:01.000Z")
sf.setAttribute(2, s"POLYGON((-120 4$i, -120 50, -125 50, -125 4$i, -120 4$i))")
sf
}
val fs = ds.getFeatureSource(typeName).asInstanceOf[SimpleFeatureStore]
val ids = fs.addFeatures(new ListFeatureCollection(sft, toAdd))
ids.asScala.map(_.getID) must containTheSameElementsAs((0 until 10).map(_.toString))
testQuery(ds, typeName, "INCLUDE", null, toAdd)
testQuery(ds, typeName, "IN('0', '2')", null, Seq(toAdd(0), toAdd(2)))
testQuery(ds, typeName, "bbox(geom,-126,38,-119,52) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-01T07:59:59.000Z", null, toAdd.dropRight(2))
testQuery(ds, typeName, "bbox(geom,-126,42,-119,45)", null, toAdd.dropRight(4))
testQuery(ds, typeName, "name < 'name5'", null, toAdd.take(5))
testQuery(ds, typeName, "(name = 'name5' OR name = 'name6') and bbox(geom,-126,38,-119,52) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-01T07:59:59.000Z", null, Seq(toAdd(5), toAdd(6)))
ds.removeSchema(typeName)
ds.getSchema(typeName) must beNull
} finally {
ds.dispose()
}
}
}
def testQuery(ds: KuduDataStore,
typeName: String,
filter: String,
transforms: Array[String],
results: Seq[SimpleFeature],
explain: Option[Explainer] = None): MatchResult[_] = {
QueryProperties.ScanRangesTarget.threadLocalValue.set("10")
try {
val query = new Query(typeName, ECQL.toFilter(filter), transforms)
explain.foreach(e => ds.getQueryPlan(query, explainer = e))
val fr = ds.getFeatureReader(query, Transaction.AUTO_COMMIT)
val features = SelfClosingIterator(fr).map(ScalaSimpleFeature.copy).toList // copy features as the same one is mutated
val attributes = Option(transforms).getOrElse(ds.getSchema(typeName).getAttributeDescriptors.map(_.getLocalName).toArray)
features.map(_.getID) must containTheSameElementsAs(results.map(_.getID))
forall(features) { feature =>
feature.getAttributes must haveLength(attributes.length)
forall(attributes.zipWithIndex) { case (attribute, i) =>
feature.getAttribute(attribute) mustEqual feature.getAttribute(i)
feature.getAttribute(attribute) mustEqual results.find(_.getID == feature.getID).get.getAttribute(attribute)
}
}
} finally {
QueryProperties.ScanRangesTarget.threadLocalValue.remove()
}
}
}
| locationtech/geomesa | geomesa-kudu/geomesa-kudu-datastore/src/test/scala/org/locationtech/geomesa/kudu/data/KuduDataStoreIntegrationTest.scala | Scala | apache-2.0 | 10,072 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.help
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.tools.Command
import org.locationtech.geomesa.tools.help.ClasspathCommand.ClasspathParameters
/**
* Note: this class is a placeholder for the 'classpath' function implemented in the 'geomesa-*' script, to get it
* to show up in the JCommander help
*/
class ClasspathCommand extends Command {
override val name = "classpath"
override val params = new ClasspathParameters
override def execute(): Unit = {}
}
object ClasspathCommand {
@Parameters(commandDescription = "Display the GeoMesa classpath")
class ClasspathParameters {}
}
| locationtech/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/help/ClasspathCommand.scala | Scala | apache-2.0 | 1,128 |
import collection.mutable.ListBuffer
import io.Source
import java.io.{File, FileWriter, BufferedWriter}
/*
* Copyright 2001-2011 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
object GenMatchers {
def translateShouldToMust(shouldLine: String): String = {
val temp1 = shouldLine.replaceAll("<code>must</code>", "<code>I_WAS_must_ORIGINALLY</code>")
val temp2 = temp1.replaceAll("<!-- PRESERVE -->should", " I_MUST_STAY_SHOULD")
val temp3 = temp2.replaceAll(
"<a href=\\"MustMatchers.html\\"><code>MustMatchers</code></a>",
"<a href=\\"I_WAS_Must_ORIGINALLYMatchers.html\\"><code>I_WAS_Must_ORIGINALLYMatchers</code></a>"
)
val temp4 = temp3.replaceAll("should", "must")
val temp5 = temp4.replaceAll("Should", "Must")
val temp6 = temp5.replaceAll("I_WAS_must_ORIGINALLY", "should")
val temp7 = temp6.replaceAll("I_MUST_STAY_SHOULD", "should")
temp7.replaceAll("I_WAS_Must_ORIGINALLY", "Should")
}
def genMain(targetDir: File, scalaVersion: String) {
targetDir.mkdirs()
val matchersDir = new File(targetDir, "matchers")
matchersDir.mkdirs()
val junitDir = new File(targetDir, "junit")
junitDir.mkdirs()
val mustMatchersFile = new File(matchersDir, "MustMatchers.scala")
val mustMatchersWriter = new BufferedWriter(new FileWriter(mustMatchersFile))
try {
val lines = Source.fromFile(new File("src/main/scala/org/scalatest/matchers/ShouldMatchers.scala")).getLines.toList
for (line <- lines) {
val mustLine = translateShouldToMust(line)
mustMatchersWriter.write(mustLine)
mustMatchersWriter.newLine()
}
}
finally {
mustMatchersWriter.flush()
mustMatchersWriter.close()
println("Generated " + mustMatchersFile.getAbsolutePath)
}
val mustMatchersForJUnitFile = new File(junitDir, "MustMatchersForJUnit.scala")
val mustMatchersForJUnitWriter = new BufferedWriter(new FileWriter(mustMatchersForJUnitFile))
try {
val lines = Source.fromFile(new File("src/main/scala/org/scalatest/junit/ShouldMatchersForJUnit.scala")).getLines.toList
for (line <- lines) {
val mustLine = translateShouldToMust(line)
mustMatchersForJUnitWriter.write(mustLine)
mustMatchersForJUnitWriter.newLine()
}
}
finally {
mustMatchersForJUnitWriter.flush()
mustMatchersForJUnitWriter.close()
println("Generated " + mustMatchersForJUnitFile.getAbsolutePath)
}
}
def genTest(targetBaseDir: File, scalaVersion: String) {
val sourceBaseDir = new File("src/test/scala/org/scalatest")
val matchersDir = new File(targetBaseDir, "matchers")
matchersDir.mkdirs()
val shouldFileNames =
List(
"ShouldBehaveLikeSpec.scala",
"ShouldContainElementSpec.scala",
"ShouldContainKeySpec.scala",
"ShouldContainValueSpec.scala",
"ShouldEqualSpec.scala",
"ShouldHavePropertiesSpec.scala",
"ShouldLengthSpec.scala",
"ShouldOrderedSpec.scala",
"ShouldSizeSpec.scala",
// "ShouldStackSpec.scala", now in examples
// "ShouldStackFlatSpec.scala",
"ShouldBeASymbolSpec.scala",
"ShouldBeAnSymbolSpec.scala",
"ShouldBeMatcherSpec.scala",
"ShouldBePropertyMatcherSpec.scala",
"ShouldBeSymbolSpec.scala",
"ShouldEndWithRegexSpec.scala",
"ShouldEndWithSubstringSpec.scala",
"ShouldFullyMatchSpec.scala",
"ShouldIncludeRegexSpec.scala",
"ShouldIncludeSubstringSpec.scala",
"ShouldLogicalMatcherExprSpec.scala",
"ShouldMatcherSpec.scala",
"ShouldPlusOrMinusSpec.scala",
"ShouldSameInstanceAsSpec.scala",
"ShouldStartWithRegexSpec.scala",
"ShouldStartWithSubstringSpec.scala",
"ShouldBeNullSpec.scala",
"ShouldBeAnySpec.scala",
"ShouldBeTripleEqualsSpec.scala",
"ShouldFileBePropertyMatcherSpec.scala",
"ShouldThrowSpec.scala"
)
for (shouldFileName <- shouldFileNames) {
val mustFileName = shouldFileName.replace("Should", "Must")
val mustFile = new File(matchersDir, mustFileName)
val writer = new BufferedWriter(new FileWriter(mustFile))
try {
val shouldLines = Source.fromFile(new File(sourceBaseDir, "matchers/" + shouldFileName)).getLines().toList // for 2.8
for (shouldLine <- shouldLines) {
val mustLine = translateShouldToMust(shouldLine)
writer.write(mustLine.toString)
writer.newLine() // add for 2.8
}
}
finally {
writer.close()
println("Generated " + mustFile.getAbsolutePath)
}
}
val junitDir = new File(targetBaseDir, "junit")
junitDir.mkdirs()
val mustMatchersForJUnitWordSpecFile = new File(junitDir, "MustMatchersForJUnitWordSpec.scala")
val writer = new BufferedWriter(new FileWriter(mustMatchersForJUnitWordSpecFile))
try {
val shouldLines = Source.fromFile(new File(sourceBaseDir, "junit/" + "ShouldMatchersForJUnitWordSpec.scala")).getLines().toList // for 2.8
for (shouldLine <- shouldLines) {
val mustLine = translateShouldToMust(shouldLine)
writer.write(mustLine.toString)
writer.newLine() // add for 2.8
}
}
finally {
writer.close()
println("Generated " + mustMatchersForJUnitWordSpecFile.getAbsolutePath)
}
}
def main(args: Array[String]) {
val targetDir = args(0)
val scalaVersion = args(1)
genMain(new File(targetDir + "/main/scala/org/scalatest/"), scalaVersion)
// genTest(new File("gen/" + targetDir + "/test/scala/org/scalatest/"), scalaVersion)
}
}
| hubertp/scalatest | project/GenMatchers.scala | Scala | apache-2.0 | 6,191 |
package fpinscala.examples
// hide builtin scala Stream type
import scala.{Stream => _}
trait Stream[A] {
def uncons: Option[(A, Stream[A])]
}
object Stream {
def empty[A]: Stream[A] =
new Stream[A] {
def uncons = None
}
def cons[A](hd: => A, tl: => Stream[A]): Stream[A] =
new Stream[A] {
lazy val uncons = Some((hd, tl))
}
def apply[A](as: A*): Stream[A] =
if (as.isEmpty) empty
else cons(as.head, apply(as.tail: _*))
}
| ryo-murai/fpinscala-exercises | myanswers/src/main/scala/fpinscala/examples/laziness.scala | Scala | mit | 478 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.responsiblepeople
import connectors.DataCacheConnector
import controllers.actions.SuccessfulAuthAction
import models.businessactivities.{BusinessActivities, InvolvedInOtherYes}
import models.businessmatching.{BusinessActivities => BusinessMatchingActivities, _}
import models.responsiblepeople.ResponsiblePerson._
import models.responsiblepeople.{ExperienceTrainingNo, ExperienceTrainingYes, PersonName, ResponsiblePerson}
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import play.api.i18n.Messages
import play.api.test.Helpers._
import uk.gov.hmrc.http.HeaderCarrier
import uk.gov.hmrc.http.cache.client.CacheMap
import utils.AmlsSpec
import views.html.responsiblepeople.experience_training
import scala.concurrent.Future
class ExperienceTrainingControllerSpec extends AmlsSpec with MockitoSugar with ScalaFutures {
val RecordId = 1
def getMessage(service: BusinessActivity): String = Messages("businessactivities.registerservices.servicename.lbl." + BusinessMatchingActivities.getValue(service))
trait Fixture {
self => val request = addToken(authRequest)
val dataCacheConnector = mock[DataCacheConnector]
lazy val view = app.injector.instanceOf[experience_training]
val controller = new ExperienceTrainingController (
dataCacheConnector = dataCacheConnector,
authAction = SuccessfulAuthAction,
ds = commonDependencies,
cc = mockMcc,
experience_training = view,
error = errorView)
}
val emptyCache = CacheMap("", Map.empty)
"ExperienceTrainingController" must {
val pageTitle = Messages("responsiblepeople.experiencetraining.title", "firstname lastname") + " - " +
Messages("summary.responsiblepeople") + " - " +
Messages("title.amls") + " - " + Messages("title.gov")
val personName = Some(PersonName("firstname", None, "lastname"))
"on get load the page with the business activities" in new Fixture {
val mockCacheMap = mock[CacheMap]
when(controller.dataCacheConnector.fetchAll(any())(any[HeaderCarrier]))
.thenReturn(Future.successful(Some(mockCacheMap)))
when(controller.dataCacheConnector.fetch[Seq[ResponsiblePerson]](any(), any())
(any(), any())).thenReturn(Future.successful(Some(Seq(ResponsiblePerson(personName = personName, experienceTraining = Some(ExperienceTrainingYes("I do not remember when I did the training")))))))
val businessActivities = BusinessActivities(involvedInOther = Some(InvolvedInOtherYes("test")))
when(mockCacheMap.getEntry[BusinessActivities](BusinessActivities.key))
.thenReturn(Some(businessActivities))
val businessMatchingActivities = BusinessMatchingActivities(Set(AccountancyServices, BillPaymentServices, EstateAgentBusinessService))
when(mockCacheMap.getEntry[BusinessMatching](BusinessMatching.key)).thenReturn(Some(BusinessMatching(None, Some(businessMatchingActivities))))
val RecordId = 1
val result = controller.get(RecordId)(request)
status(result) must be(OK)
contentAsString(result) must include(getMessage(AccountancyServices))
contentAsString(result) must include(getMessage(BillPaymentServices))
contentAsString(result) must include(getMessage(EstateAgentBusinessService))
contentAsString(result) must include(Messages("responsiblepeople.experiencetraining.title"))
}
"on get display the page with pre populated data for the Yes Option" in new Fixture {
val mockCacheMap = mock[CacheMap]
when(controller.dataCacheConnector.fetchAll(any())(any[HeaderCarrier]))
.thenReturn(Future.successful(Some(mockCacheMap)))
val businessMatchingActivities = BusinessMatchingActivities(Set(AccountancyServices, BillPaymentServices, EstateAgentBusinessService))
when(mockCacheMap.getEntry[BusinessMatching](BusinessMatching.key)).thenReturn(Some(BusinessMatching(None, Some(businessMatchingActivities))))
when(controller.dataCacheConnector.fetch[Seq[ResponsiblePerson]](any(), any())
(any(), any())).thenReturn(Future.successful(Some(Seq(ResponsiblePerson(personName = personName,experienceTraining = Some(ExperienceTrainingYes("I do not remember when I did the training")))))))
val result = controller.get(RecordId)(request)
status(result) must be(OK)
contentAsString(result) must include ("I do not remember when I did the training")
}
"on get display the page with pre populated data with No Data for the information" in new Fixture {
val mockCacheMap = mock[CacheMap]
when(controller.dataCacheConnector.fetchAll(any())(any[HeaderCarrier]))
.thenReturn(Future.successful(Some(mockCacheMap)))
val businessMatchingActivities = BusinessMatchingActivities(Set(AccountancyServices, BillPaymentServices, EstateAgentBusinessService))
when(mockCacheMap.getEntry[BusinessMatching](BusinessMatching.key)).thenReturn(Some(BusinessMatching(None, Some(businessMatchingActivities))))
when(controller.dataCacheConnector.fetch[Seq[ResponsiblePerson]](any(), any())
(any(), any())).thenReturn(Future.successful(Some(Seq(ResponsiblePerson(personName = personName, experienceTraining = Some(ExperienceTrainingNo))))))
val result = controller.get(RecordId)(request)
status(result) must be(OK)
val document = Jsoup.parse(contentAsString(result))
contentAsString(result) must not include "I do not remember when I did the training"
document.select("input[name=experienceTraining][value=true]").hasAttr("checked") must be(false)
document.select("input[name=experienceTraining][value=false]").hasAttr("checked") must be(true)
}
"on post with valid data and training selected yes" in new Fixture {
val newRequest = requestWithUrlEncodedBody(
"experienceTraining" -> "true",
"experienceInformation" -> "I do not remember when I did the training"
)
val mockCacheMap = mock[CacheMap]
when(controller.dataCacheConnector.fetchAll(any())(any[HeaderCarrier]))
.thenReturn(Future.successful(Some(mockCacheMap)))
val businessMatchingActivities = BusinessMatchingActivities(Set(AccountancyServices, BillPaymentServices, EstateAgentBusinessService))
when(mockCacheMap.getEntry[BusinessMatching](BusinessMatching.key)).thenReturn(Some(BusinessMatching(None, Some(businessMatchingActivities))))
when(controller.dataCacheConnector.fetch[Seq[ResponsiblePerson]](any(), any())
(any(), any())).thenReturn(Future.successful(Some(Seq(ResponsiblePerson(personName = personName, experienceTraining = Some(ExperienceTrainingYes("I do not remember when I did the training")))))))
when(controller.dataCacheConnector.save[ResponsiblePerson](any(), any(), any())
(any(), any())).thenReturn(Future.successful(emptyCache))
val result = controller.post(RecordId)(newRequest)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be(Some(routes.TrainingController.get(RecordId).url))
}
"on post with valid data and training selected no" in new Fixture {
val newRequest = requestWithUrlEncodedBody(
"experienceTraining" -> "false"
)
val mockCacheMap = mock[CacheMap]
when(controller.dataCacheConnector.fetchAll(any())(any[HeaderCarrier]))
.thenReturn(Future.successful(Some(mockCacheMap)))
val businessMatchingActivities = BusinessMatchingActivities(Set(AccountancyServices, BillPaymentServices, EstateAgentBusinessService))
when(mockCacheMap.getEntry[BusinessMatching](BusinessMatching.key)).thenReturn(Some(BusinessMatching(None, Some(businessMatchingActivities))))
when(controller.dataCacheConnector.fetch[Seq[ResponsiblePerson]](any(), any())
(any(), any())).thenReturn(Future.successful(Some(Seq(ResponsiblePerson(personName = personName, experienceTraining = Some(ExperienceTrainingYes("I do not remember when I did the training")))))))
when(controller.dataCacheConnector.save[ResponsiblePerson](any(), any(), any())
(any(), any())).thenReturn(Future.successful(emptyCache))
val result = controller.post(RecordId)(newRequest)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be(Some(routes.TrainingController.get(RecordId).url))
}
"on post with invalid data" in new Fixture {
val newRequest = requestWithUrlEncodedBody(
"experienceTraining" -> "not a boolean value"
)
val mockCacheMap = mock[CacheMap]
when(controller.dataCacheConnector.fetchAll(any())(any[HeaderCarrier]))
.thenReturn(Future.successful(Some(mockCacheMap)))
val businessMatchingActivities = BusinessMatchingActivities(Set(AccountancyServices, BillPaymentServices, EstateAgentBusinessService))
when(mockCacheMap.getEntry[BusinessMatching](BusinessMatching.key)).thenReturn(Some(BusinessMatching(None, Some(businessMatchingActivities))))
when(controller.dataCacheConnector.fetch[Seq[ResponsiblePerson]](any(), any())
(any(), any())).thenReturn(Future.successful(Some(Seq(ResponsiblePerson(personName = personName, experienceTraining = Some(ExperienceTrainingYes("I do not remember when I did the training")))))))
val result = controller.post(RecordId)(newRequest)
status(result) must be(BAD_REQUEST)
val document: Document = Jsoup.parse(contentAsString(result))
document.title must be(s"Error: $pageTitle")
}
"on post with valid data in edit mode" in new Fixture {
val mockCacheMap = mock[CacheMap]
when(controller.dataCacheConnector.fetchAll(any())(any[HeaderCarrier]))
.thenReturn(Future.successful(Some(mockCacheMap)))
val businessMatchingActivities = BusinessMatchingActivities(Set(AccountancyServices, BillPaymentServices, EstateAgentBusinessService))
when(mockCacheMap.getEntry[BusinessMatching](BusinessMatching.key)).thenReturn(Some(BusinessMatching(None, Some(businessMatchingActivities))))
val newRequest = requestWithUrlEncodedBody(
"experienceTraining" -> "true",
"experienceInformation" -> "I do not remember when I did the training"
)
when(controller.dataCacheConnector.fetch[Seq[ResponsiblePerson]](any(), any())
(any(), any())).thenReturn(Future.successful(Some(Seq(ResponsiblePerson(experienceTraining = Some(ExperienceTrainingYes("I do not remember when I did the training")))))))
when(controller.dataCacheConnector.save[ResponsiblePerson](any(), any(), any())
(any(), any())).thenReturn(Future.successful(emptyCache))
val result = controller.post(RecordId, true, Some(flowFromDeclaration))(newRequest)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be(Some(routes.DetailedAnswersController.get(RecordId, Some(flowFromDeclaration)).url))
}
}
}
| hmrc/amls-frontend | test/controllers/responsiblepeople/ExperienceTrainingControllerSpec.scala | Scala | apache-2.0 | 11,642 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.io
import java.net.{ URI, URL, URLConnection }
import java.util.jar.JarFile
import org.beangle.commons.lang.Strings
object Jars {
val protocols = Set("jar", "zip", "wsjar", "vsfzip")
val URLSeparator = "!/"
def isJarURL(url: URL): Boolean =
protocols.contains(url.getProtocol)
def useCachesIfNecessary(con: URLConnection): Unit =
con.setUseCaches(con.getClass.getSimpleName.startsWith("JNLP"))
def toURI(location: String): URI =
new URI(Strings.replace(location, " ", "%20"));
/**
* Resolve the given jar file URL into a JarFile object.
*/
def getJarFile(jarFileUrl: String): JarFile =
if (jarFileUrl.startsWith("file:"))
new JarFile(Jars.toURI(jarFileUrl).getSchemeSpecificPart())
else
new JarFile(jarFileUrl)
}
| beangle/commons | core/src/main/scala/org/beangle/commons/io/Jars.scala | Scala | lgpl-3.0 | 1,523 |
package org.jetbrains.plugins.scala
package refactoring.changeSignature
import com.intellij.psi.PsiMember
import com.intellij.refactoring.changeSignature.{ChangeSignatureProcessorBase, ParameterInfo}
import org.jetbrains.plugins.scala.lang.psi.api.base.ScMethodLike
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createTypeFromText
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.refactoring.changeSignature.{ScalaChangeSignatureHandler, ScalaParameterInfo}
import org.junit.Assert._
/**
* Nikolay.Tropin
* 2014-09-05
*/
class ChangeSignatureFromScalaTest extends ChangeSignatureTestBase {
override def folderPath: String = baseRootPath + "changeSignature/fromScala/"
override def mainFileName(testName: String) = testName + ".scala"
override def secondFileName(testName: String) = testName + ".java"
override def mainFileAfterName(testName: String) = testName + "_after.scala"
override def secondFileAfterName(testName: String) = testName + "_after.java"
override def findTargetElement: PsiMember = {
val element = new ScalaChangeSignatureHandler().findTargetMember(getFileAdapter, getEditorAdapter)
assertTrue("<caret> is not on method name", element.isInstanceOf[ScMethodLike])
element.asInstanceOf[ScMethodLike]
}
override def processor(newVisibility: String,
newName: String,
newReturnType: String,
newParams: => Seq[Seq[ParameterInfo]]): ChangeSignatureProcessorBase = {
scalaProcessor(newVisibility, newName, newReturnType, newParams, isAddDefaultValue)
}
private def parameterInfo(name: String, oldIdx: Int, tpe: ScType, defVal: String = "", isRep: Boolean = false, isByName: Boolean = false) = {
new ScalaParameterInfo(name, oldIdx, tpe, getProjectAdapter, isRep, isByName, defVal)
}
def testSimpleMethod(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("ii", 0, Int), parameterInfo("b", 2, Boolean))
doTest(null, "bar", null, Seq(params))
}
def testSimpleMethodAdd(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("s", -1, AnyRef, "\\"hi\\""), parameterInfo("b", 1, Boolean))
doTest(null, "foo", null, Seq(params))
}
def testAddWithDefault(): Unit = {
isAddDefaultValue = true
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("s", -1, AnyRef, "\\"hi\\""), parameterInfo("b", 1, Boolean))
doTest(null, "foo", null, Seq(params))
}
def testParameterless(): Unit = {
isAddDefaultValue = true
val params = Seq(parameterInfo("i", -1, Int, "1"))
doTest(null, "bar", null, Seq(params))
}
def testAddByName(): Unit = {
val params = Seq(parameterInfo("x", 0, Int), parameterInfo("s", 1, AnyRef, isByName = true))
doTest(null, "foo", null, Seq(params))
}
def testReturnTypeChange(): Unit = {
val params = Seq(Seq.empty)
doTest(null, "foo", "Unit", params)
}
def testGenerics(): Unit = {
def tpe = createTypeFromText("T", targetMethod, targetMethod).get
doTest(null, "foo", "T", Seq(Seq(parameterInfo("t", 0, tpe))))
}
def testSecConstructor(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("j", -1, Int, "0"))
doTest(null, "Constructor", null, Seq(params))
}
def testPrimConstructor(): Unit = {
isAddDefaultValue = false
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("b", -1, Boolean, "true"))
doTest("protected", "Constructor", null, Seq(params))
}
def testDifferentParamNames(): Unit = {
val params = Seq(parameterInfo("newName", 0, Int))
doTest(null, "foo", null, Seq(params))
}
def testPrimConstructorDefault(): Unit = {
isAddDefaultValue = true
val params = Seq(parameterInfo("i", 0, Int), parameterInfo("b", -1, Boolean, "true"))
doTest("protected", "Constructor", null, Seq(params))
}
def testAddNewClauseWithDefault(): Unit = {
isAddDefaultValue = true
val params = Seq(Seq(parameterInfo("b", -1, Boolean, "true")), Seq(parameterInfo("x", 0, Int), parameterInfo("y", -1, Int, "0")))
doTest(null, "foo", null, params)
}
def testAddNewClause(): Unit = {
isAddDefaultValue = false
val params = Seq(Seq(parameterInfo("b", -1, Boolean, "true")), Seq(parameterInfo("x", 0, Int), parameterInfo("y", -1, Int, "0")))
doTest(null, "foo", null, params)
}
def testRemoveClause(): Unit = {
val params = Seq(parameterInfo("b", 1, Boolean), parameterInfo("i", 0, Int))
doTest(null, "RemoveClauseConstructor", null, Seq(params))
}
def testCaseClass(): Unit = {
val params = Seq(parameterInfo("number", 1, Int), parameterInfo("char", 0, Char), parameterInfo("b", -1, Boolean, "true"))
doTest(null, "MyClass", null, Seq(params))
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/refactoring/changeSignature/ChangeSignatureFromScalaTest.scala | Scala | apache-2.0 | 4,944 |
package ch.descabato.core.actors
import java.util.Date
import ch.descabato.core._
import ch.descabato.core.actors.MetadataStorageActor.{AllKnownStoredPartsMemory, BackupDescription, BackupMetaDataStored}
import ch.descabato.core.commands.ProblemCounter
import ch.descabato.core.model._
import ch.descabato.utils.Implicits._
import ch.descabato.utils.{StandardMeasureTime, Utils}
import scala.concurrent.Future
import scala.util.{Failure, Success, Try}
class MetadataStorageActor(val context: BackupContext, val journalHandler: JournalHandler) extends MetadataStorage with JsonUser with Utils {
val config = context.config
import context.executionContext
private var hasFinished = false
private var allKnownStoredPartsMemory = new AllKnownStoredPartsMemory()
private var thisBackup: BackupDescriptionStored = new BackupDescriptionStored()
private var notCheckpointed: BackupMetaDataStored = new BackupMetaDataStored()
private var toBeStored: Set[FileDescription] = Set.empty
def addDirectory(description: FolderDescription): Future[Boolean] = {
require(!hasFinished)
allKnownStoredPartsMemory.mapByPath.get(description.path) match {
case Some(FolderMetadataStored(id, fd)) if fd.attrs == description.attrs =>
thisBackup.dirIds += id
case _ =>
val stored = FolderMetadataStored(BackupIds.nextId(), description)
notCheckpointed.folders :+= stored
thisBackup.dirIds += stored.id
}
Future.successful(true)
}
def startup(): Future[Boolean] = {
Future {
val mt = new StandardMeasureTime()
val metadata = context.fileManager.metadata
val files = metadata.getFiles().sortBy(x => metadata.numberOfFile(x))
for (file <- files) {
readJson[BackupMetaDataStored](file) match {
case Success(data) =>
allKnownStoredPartsMemory ++= data
case Failure(f) =>
logger.info(s"Found corrupt backup metadata in $file, deleting it. Exception: ${f.getMessage}")
file.delete()
}
}
logger.info(s"Took ${mt.measuredTime()} to read the metadata")
true
}
}
def getKnownFiles(): Map[String, FileMetadataStored] = allKnownStoredPartsMemory.mapByPath.collect {
case (x, f: FileMetadataStored) => (x, f)
}
override def verifyMetadataForIdsAvailable(date: Date, counter: ProblemCounter): BlockingOperation = {
val future = retrieveBackup(Some(date))
future.value match {
case Some(Success(backupDescription)) =>
// verification successful, as otherwise it would throw an exception if some metadata is missing
case Some(Failure(e)) =>
counter.addProblem(s"Could not load backup for ${date}")
case None =>
throw new IllegalStateException("Implementation was updated")
}
new BlockingOperation()
}
override def getAllFileChunkIds(): Seq[Long] = {
allKnownStoredPartsMemory.mapById.collect {
case (_, f: FileMetadataStored) => f
}.toSeq.flatMap { f: FileMetadataStored =>
f.chunkIds.toSeq
}
}
def retrieveBackup(date: Option[Date] = None): Future[BackupDescription] = {
val filesToLoad = date match {
case Some(d) =>
context.fileManager.backup.forDate(d)
case None =>
context.fileManager.backup.newestFile().get
}
logger.info(s"Loading backup metadata from $filesToLoad")
val tryToLoad = readJson[BackupDescriptionStored](filesToLoad).flatMap { bds =>
Try(allKnownStoredPartsMemory.putTogether(bds))
}
Future.fromTry(tryToLoad)
}
override def hasAlready(fd: FileDescription): Future[FileAlreadyBackedupResult] = {
require(!hasFinished)
if (toBeStored.safeContains(fd)) {
Future.successful(Storing)
} else {
val metadata = allKnownStoredPartsMemory.mapByPath.get(fd.path)
val haveAlready = metadata.map(_.checkIfMatch(fd)).getOrElse(false)
if (haveAlready) {
Future.successful(FileAlreadyBackedUp(metadata.get.asInstanceOf[FileMetadataStored]))
} else {
toBeStored += fd
Future.successful(FileNotYetBackedUp)
}
}
}
override def saveFile(fileDescription: FileDescription, hashList: Seq[Long]): Future[Boolean] = {
require(!hasFinished)
// TODO check if we have file already under another id and reuse it if possible
val id = BackupIds.nextId()
val metadata = FileMetadataStored(id, fileDescription, hashList.toArray)
allKnownStoredPartsMemory += metadata
notCheckpointed.files :+= metadata
toBeStored -= metadata.fd
thisBackup.fileIds += id
Future.successful(true)
}
override def saveFileSameAsBefore(fileMetadataStored: FileMetadataStored): Future[Boolean] = {
require(!hasFinished)
thisBackup.fileIds += fileMetadataStored.id
Future.successful(false)
}
private def isDifferentFromLastBackup(thisBackup: BackupDescriptionStored) = {
context.fileManager.backup.newestFile() match {
case Some(x) =>
readJson[BackupDescriptionStored](x) match {
case Success(lastBackup) if lastBackup == thisBackup =>
false
case _ => true
}
case _ =>
true
}
}
override def finish(): Future[Boolean] = {
if (!hasFinished) {
if (notCheckpointed.files.nonEmpty || notCheckpointed.folders.nonEmpty) {
writeMetadata(notCheckpointed)
notCheckpointed = new BackupMetaDataStored()
}
if (thisBackup.dirIds.nonEmpty || thisBackup.fileIds.nonEmpty) {
if (isDifferentFromLastBackup(thisBackup)) {
// this backup has data and it is different from the last backup
val file = context.fileManager.backup.nextFile()
writeToJson(file, thisBackup)
} else {
logger.info("Same files as last backup, skipping creation of new file")
}
}
hasFinished = true
}
Future.successful(true)
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
logger.error("Actor was restarted", reason)
}
private def writeMetadata(metadataStored: BackupMetaDataStored): Unit = {
val file = context.fileManager.metadata.nextFile()
writeToJson(file, metadataStored)
}
private def checkpointMetadata(ids: Set[Long]): Unit = {
val (allChunksDone, notAllChunksDone) = notCheckpointed.files.partition(_.chunkIds.forall(ids.safeContains))
val stored = new BackupMetaDataStored(files = allChunksDone, folders = notCheckpointed.folders)
writeMetadata(stored)
notCheckpointed = new BackupMetaDataStored(files = notAllChunksDone)
}
override def receive(myEvent: MyEvent): Unit = {
myEvent match {
case CheckpointedChunks(ids) =>
if (hasFinished) {
assert(notCheckpointed.files.isEmpty)
assert(notCheckpointed.folders.isEmpty)
logger.info("Ignoring as files are already written")
} else {
checkpointMetadata(ids)
}
case _ =>
// ignore unknown message
}
}
}
object MetadataStorageActor extends Utils {
class BackupMetaDataStored(var files: Seq[FileMetadataStored] = Seq.empty,
var folders: Seq[FolderMetadataStored] = Seq.empty,
var symlinks: Seq[SymbolicLink] = Seq.empty
) {
def merge(other: BackupMetaDataStored): BackupMetaDataStored = {
logger.info("Merging BackupMetaData")
new BackupMetaDataStored(files ++ other.files, folders ++ other.folders, symlinks ++ other.symlinks)
}
override def equals(obj: scala.Any): Boolean = obj match {
case x: BackupMetaDataStored => files == x.files && folders == x.folders && symlinks == x.symlinks
case _ => false
}
}
class AllKnownStoredPartsMemory() {
private var _mapById: Map[Long, StoredPart] = Map.empty
private var _mapByPath: Map[String, StoredPartWithPath] = Map.empty
def putTogether(bds: BackupDescriptionStored): BackupDescription = {
val files = bds.fileIds.map(fileId => _mapById(fileId).asInstanceOf[FileMetadataStored])
val folders = bds.dirIds.map(dirId => _mapById(dirId).asInstanceOf[FolderMetadataStored].folderDescription)
BackupDescription(files, folders)
}
// I want to do operator overloading here, scalastyle doesn't agree
// scalastyle:off
def +=(storedPart: StoredPart): Unit = {
_mapById += storedPart.id -> storedPart
storedPart match {
case storedPartWithPath: StoredPartWithPath =>
_mapByPath += storedPartWithPath.path -> storedPartWithPath
case _ =>
// nothing further
}
}
def ++=(backupMetaDataStored: BackupMetaDataStored): Unit = {
var maxId: Long = 0
for (folder <- backupMetaDataStored.folders) {
_mapById += folder.id -> folder
_mapByPath += folder.path -> folder
maxId = Math.max(maxId, folder.id)
}
for (file <- backupMetaDataStored.files) {
_mapById += file.id -> file
_mapByPath += file.path -> file
maxId = Math.max(maxId, file.id)
}
BackupIds.maxId(maxId)
}
// scalastyle:on
def mapById = _mapById
def mapByPath = _mapByPath
}
// TODO symlinks
case class BackupDescription(files: Seq[FileMetadataStored], folders: Seq[FolderDescription])
}
| Stivo/DeScaBaTo | core/src/main/scala/ch/descabato/core/actors/MetadataStorageActor.scala | Scala | gpl-3.0 | 9,351 |
package org.jetbrains.plugins.scala
package annotator
package element
import com.intellij.lang.ASTNode
import com.intellij.lexer.StringLiteralLexer
import com.intellij.openapi.util.TextRange
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.psi.StringEscapesTokenTypes
import com.intellij.psi.tree.TokenSet
import org.jetbrains.plugins.scala.extensions.ObjectExt
import org.jetbrains.plugins.scala.highlighter.lexer.{ScalaInterpolatedStringLiteralLexer, ScalaMultilineStringLiteralLexer, ScalaStringLiteralLexer}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes.{tINTERPOLATED_MULTILINE_STRING, tINTERPOLATED_STRING, tMULTILINE_STRING, tSTRING}
import org.jetbrains.plugins.scala.lang.psi.api.base.ScInterpolatedStringLiteral
import org.jetbrains.plugins.scala.lang.psi.api.base.literals.ScStringLiteral
import org.jetbrains.plugins.scala.macroAnnotations.Measure
/** see also [[org.jetbrains.plugins.scala.annotator.element.ScInterpolatedStringLiteralAnnotator]] */
object ScStringLiteralAnnotator extends ElementAnnotator[ScStringLiteral] {
private val StringLiteralSizeLimit = 65536
private val StringCharactersCountLimit = StringLiteralSizeLimit / 4
import scala.util.chaining.scalaUtilChainingOps
private val SET_STRING = tSTRING.pipe(t => (t, TokenSet.create(t)))
private val SET_MULTILINE_STRING = tMULTILINE_STRING.pipe(t => (t, TokenSet.create(t)))
private val SET_INTERPOLATED_STRING = tINTERPOLATED_STRING.pipe(t => (t, TokenSet.create(t)))
private val SET_INTERPOLATED_MULTILINE_STRING = tINTERPOLATED_MULTILINE_STRING.pipe(t => (t, TokenSet.create(t)))
override def annotate(literal: ScStringLiteral, typeAware: Boolean)
(implicit holder: ScalaAnnotationHolder): Unit = {
if (annotateTooLongString(literal))
return
annotateInvalidEscapeSequences(literal)
}
private def annotateInvalidEscapeSequences(literal: ScStringLiteral)
(implicit holder: ScalaAnnotationHolder): Unit = {
val isInterpolated = literal.is[ScInterpolatedStringLiteral]
val isMultiline = literal.isMultiLineString
val (tokenType, tokenSet) = (isInterpolated, isMultiline) match {
case (false, false) => SET_STRING
case (false, true) => SET_MULTILINE_STRING
case (true, false) => SET_INTERPOLATED_STRING
case (true, true) => SET_INTERPOLATED_MULTILINE_STRING
}
val isRaw = literal.asOptionOf[ScInterpolatedStringLiteral].exists(_.kind == ScInterpolatedStringLiteral.Raw)
val lexer: ScalaStringLiteralLexer = (isInterpolated, isMultiline) match {
case (false, false) => new ScalaStringLiteralLexer(StringLiteralLexer.NO_QUOTE_CHAR, tokenType)
case (false, true) => new ScalaMultilineStringLiteralLexer(StringLiteralLexer.NO_QUOTE_CHAR, tokenType)
case (true, false) => new ScalaInterpolatedStringLiteralLexer(StringLiteralLexer.NO_QUOTE_CHAR, tokenType, isRaw)
case (true, true) => new ScalaInterpolatedStringLiteralLexer(StringLiteralLexer.NO_QUOTE_CHAR, tokenType, isRaw)
}
val stringLeafNodes = literal.getNode.getChildren(tokenSet)
stringLeafNodes.foreach(annotateInvalidEscapeSequences(_, lexer))
}
// NOTE: in platform, lexer is reused during highlighting, so we also reuse to catch potential issues
private def annotateInvalidEscapeSequences(node: ASTNode, lexer: ScalaStringLiteralLexer)
(implicit holder: ScalaAnnotationHolder): Unit = {
lexer.start(node.getChars)
var tokenType = lexer.getTokenType
while (tokenType != null) {
val range = TextRange.create(lexer.getTokenStart, lexer.getTokenEnd).shiftRight(node.getStartOffset)
tokenType match {
case StringEscapesTokenTypes.INVALID_CHARACTER_ESCAPE_TOKEN =>
holder.createErrorAnnotation(range, ScalaBundle.message("string.literal.invalid.escape.character"))
case StringEscapesTokenTypes.INVALID_UNICODE_ESCAPE_TOKEN =>
holder.createErrorAnnotation(range, ScalaBundle.message("string.literal.invalid.unicode.escape"))
case _ =>
}
lexer.advance()
tokenType = lexer.getTokenType
}
}
private def annotateTooLongString(literal: ScStringLiteral)
(implicit holder: ScalaAnnotationHolder): Boolean = {
val isTooLong = literal match {
case interpolated: ScInterpolatedStringLiteral => isTooLongLiteral(interpolated, interpolated.getStringParts: _*)
case ScStringLiteral(string) => isTooLongLiteral(literal, string)
case _ => false
}
if (isTooLong) {
holder.createErrorAnnotation(literal, ScalaBundle.message("string.literal.is.too.long"))
}
isTooLong
}
private def isTooLongLiteral(literal: ScStringLiteral, strings: String*): Boolean = {
import extensions.PsiElementExt
implicit val virtualFile: Option[VirtualFile] = literal.containingVirtualFile
strings.exists(exceedsLimit)
}
private def exceedsLimit(string: String)
(implicit virtualFile: Option[VirtualFile]): Boolean = string.length match {
case length if length >= StringLiteralSizeLimit => true
case length if length >= StringCharactersCountLimit => utf8Size(string) >= StringLiteralSizeLimit
case _ => false
}
private def utf8Size(string: String)
(implicit virtualFile: Option[VirtualFile]): Int = {
val lineSeparator = virtualFile
.flatMap(virtualFile => Option(virtualFile.getDetectedLineSeparator))
.getOrElse(Option(System.lineSeparator).getOrElse("\\n"))
string.map {
case '\\n' => lineSeparator.length
case '\\r' => 0
case character if character >= 0 && character <= '\\u007F' => 1
case character if character >= '\\u0080' && character <= '\\u07FF' => 2
case character if character >= '\\u0800' && character <= '\\uFFFF' => 3
case _ => 4
}.sum
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/element/ScStringLiteralAnnotator.scala | Scala | apache-2.0 | 6,105 |
/**
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.actor
import language.implicitConversions
import scala.annotation.tailrec
import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.util.Success
import scala.util.Failure
import java.util.regex.Pattern
import akka.pattern.ask
import akka.routing.MurmurHash
import akka.util.Helpers
import akka.util.Timeout
import akka.dispatch.ExecutionContexts
/**
* An ActorSelection is a logical view of a section of an ActorSystem's tree of Actors,
* allowing for broadcasting of messages to that section.
*/
@SerialVersionUID(1L)
abstract class ActorSelection extends Serializable {
this: ScalaActorSelection β
protected[akka] val anchor: ActorRef
protected val path: immutable.IndexedSeq[SelectionPathElement]
/**
* Sends the specified message to this ActorSelection, i.e. fire-and-forget
* semantics, including the sender reference if possible.
*
* Pass [[ActorRef#noSender]] or `null` as sender if there is nobody to reply to
*/
def tell(msg: Any, sender: ActorRef): Unit =
ActorSelection.deliverSelection(anchor.asInstanceOf[InternalActorRef], sender,
ActorSelectionMessage(msg, path, wildcardFanOut = false))
/**
* Forwards the message and passes the original sender actor as the sender.
*
* Works, no matter whether originally sent with tell/'!' or ask/'?'.
*/
def forward(message: Any)(implicit context: ActorContext) = tell(message, context.sender())
/**
* Resolve the [[ActorRef]] matching this selection.
* The result is returned as a Future that is completed with the [[ActorRef]]
* if such an actor exists. It is completed with failure [[ActorNotFound]] if
* no such actor exists or the identification didn't complete within the
* supplied `timeout`.
*
* Under the hood it talks to the actor to verify its existence and acquire its
* [[ActorRef]].
*/
def resolveOne()(implicit timeout: Timeout): Future[ActorRef] = {
implicit val ec = ExecutionContexts.sameThreadExecutionContext
val p = Promise[ActorRef]()
this.ask(Identify(None)) onComplete {
case Success(ActorIdentity(_, Some(ref))) β p.success(ref)
case _ β p.failure(ActorNotFound(this))
}
p.future
}
/**
* Resolve the [[ActorRef]] matching this selection.
* The result is returned as a Future that is completed with the [[ActorRef]]
* if such an actor exists. It is completed with failure [[ActorNotFound]] if
* no such actor exists or the identification didn't complete within the
* supplied `timeout`.
*
* Under the hood it talks to the actor to verify its existence and acquire its
* [[ActorRef]].
*/
def resolveOne(timeout: FiniteDuration): Future[ActorRef] = resolveOne()(timeout)
override def toString: String = {
val builder = new java.lang.StringBuilder()
builder.append("ActorSelection[Anchor(").append(anchor.path)
if (anchor.path.uid != ActorCell.undefinedUid)
builder.append("#").append(anchor.path.uid)
builder.append("), Path(").append(path.mkString("/", "/", "")).append(")]")
builder.toString
}
/**
* The [[akka.actor.ActorPath]] of the anchor actor.
*/
def anchorPath: ActorPath = anchor.path
/**
* String representation of the path elements, starting with "/" and separated with "/".
*/
def pathString: String = path.mkString("/", "/", "")
/**
* String representation of the actor selection suitable for storage and recreation.
* The output is similar to the URI fragment returned by [[akka.actor.ActorPath#toSerializationFormat]].
* @return URI fragment
*/
def toSerializationFormat: String = {
val anchorPath = anchor match {
case a: ActorRefWithCell β anchor.path.toStringWithAddress(a.provider.getDefaultAddress)
case _ β anchor.path.toString
}
val builder = new java.lang.StringBuilder()
builder.append(anchorPath)
val lastChar = builder.charAt(builder.length - 1)
if (path.nonEmpty && lastChar != '/')
builder.append(path.mkString("/", "/", ""))
else if (path.nonEmpty)
builder.append(path.mkString("/"))
builder.toString
}
override def equals(obj: Any): Boolean = obj match {
case s: ActorSelection β this.anchor == s.anchor && this.path == s.path
case _ β false
}
override lazy val hashCode: Int = {
import MurmurHash._
var h = startHash(anchor.##)
h = extendHash(h, path.##, startMagicA, startMagicB)
finalizeHash(h)
}
}
/**
* An ActorSelection is a logical view of a section of an ActorSystem's tree of Actors,
* allowing for broadcasting of messages to that section.
*/
object ActorSelection {
//This cast is safe because the self-type of ActorSelection requires that it mixes in ScalaActorSelection
implicit def toScala(sel: ActorSelection): ScalaActorSelection = sel.asInstanceOf[ScalaActorSelection]
/**
* Construct an ActorSelection from the given string representing a path
* relative to the given target. This operation has to create all the
* matching magic, so it is preferable to cache its result if the
* intention is to send messages frequently.
*/
def apply(anchorRef: ActorRef, path: String): ActorSelection = apply(anchorRef, path.split("/+"))
/**
* Construct an ActorSelection from the given string representing a path
* relative to the given target. This operation has to create all the
* matching magic, so it is preferable to cache its result if the
* intention is to send messages frequently.
*/
def apply(anchorRef: ActorRef, elements: Iterable[String]): ActorSelection = {
val compiled: immutable.IndexedSeq[SelectionPathElement] = elements.collect({
case x if !x.isEmpty β
if ((x.indexOf('?') != -1) || (x.indexOf('*') != -1)) SelectChildPattern(x)
else if (x == "..") SelectParent
else SelectChildName(x)
})(scala.collection.breakOut)
new ActorSelection with ScalaActorSelection {
override val anchor = anchorRef
override val path = compiled
}
}
/**
* INTERNAL API
* The receive logic for ActorSelectionMessage. The idea is to recursively descend as far as possible
* with local refs and hand over to that βforeignβ child when we encounter it.
*/
private[akka] def deliverSelection(anchor: InternalActorRef, sender: ActorRef, sel: ActorSelectionMessage): Unit =
if (sel.elements.isEmpty)
anchor.tell(sel.msg, sender)
else {
val iter = sel.elements.iterator
@tailrec def rec(ref: InternalActorRef): Unit = {
ref match {
case refWithCell: ActorRefWithCell β
def emptyRef = new EmptyLocalActorRef(refWithCell.provider, anchor.path / sel.elements.map(_.toString),
refWithCell.underlying.system.eventStream)
iter.next() match {
case SelectParent β
val parent = ref.getParent
if (iter.isEmpty)
parent.tell(sel.msg, sender)
else
rec(parent)
case SelectChildName(name) β
val child = refWithCell.getSingleChild(name)
if (child == Nobody) {
// don't send to emptyRef after wildcard fan-out
if (!sel.wildcardFanOut) emptyRef.tell(sel, sender)
} else if (iter.isEmpty)
child.tell(sel.msg, sender)
else
rec(child)
case p: SelectChildPattern β
// fan-out when there is a wildcard
val chldr = refWithCell.children
if (iter.isEmpty) {
// leaf
val matchingChildren = chldr.filter(c β p.pattern.matcher(c.path.name).matches)
if (matchingChildren.isEmpty && !sel.wildcardFanOut)
emptyRef.tell(sel, sender)
else
matchingChildren.foreach(_.tell(sel.msg, sender))
} else {
val matchingChildren = chldr.filter(c β p.pattern.matcher(c.path.name).matches)
// don't send to emptyRef after wildcard fan-out
if (matchingChildren.isEmpty && !sel.wildcardFanOut)
emptyRef.tell(sel, sender)
else {
val m = sel.copy(elements = iter.toVector,
wildcardFanOut = sel.wildcardFanOut || matchingChildren.size > 1)
matchingChildren.foreach(c β deliverSelection(c.asInstanceOf[InternalActorRef], sender, m))
}
}
}
case _ β
// foreign ref, continue by sending ActorSelectionMessage to it with remaining elements
ref.tell(sel.copy(elements = iter.toVector), sender)
}
}
rec(anchor)
}
}
/**
* Contains the Scala API (!-method) for ActorSelections) which provides automatic tracking of the sender,
* as per the usual implicit ActorRef pattern.
*/
trait ScalaActorSelection {
this: ActorSelection β
def !(msg: Any)(implicit sender: ActorRef = Actor.noSender) = tell(msg, sender)
}
/**
* INTERNAL API
* ActorRefFactory.actorSelection returns a ActorSelection which sends these
* nested path descriptions whenever using ! on them, the idea being that the
* message is delivered by traversing the various actor paths involved.
*/
@SerialVersionUID(2L) // it has protobuf serialization in akka-remote
private[akka] final case class ActorSelectionMessage(
msg: Any,
elements: immutable.Iterable[SelectionPathElement],
wildcardFanOut: Boolean)
extends AutoReceivedMessage with PossiblyHarmful {
def identifyRequest: Option[Identify] = msg match {
case x: Identify β Some(x)
case _ β None
}
}
/**
* INTERNAL API
*/
@SerialVersionUID(1L)
private[akka] sealed trait SelectionPathElement
/**
* INTERNAL API
*/
@SerialVersionUID(2L)
private[akka] final case class SelectChildName(name: String) extends SelectionPathElement {
override def toString: String = name
}
/**
* INTERNAL API
*/
@SerialVersionUID(2L)
private[akka] final case class SelectChildPattern(patternStr: String) extends SelectionPathElement {
val pattern: Pattern = Helpers.makePattern(patternStr)
override def toString: String = patternStr
}
/**
* INTERNAL API
*/
@SerialVersionUID(2L)
private[akka] case object SelectParent extends SelectionPathElement {
override def toString: String = ".."
}
/**
* When [[ActorSelection#resolveOne]] can't identify the actor the
* `Future` is completed with this failure.
*/
@SerialVersionUID(1L)
final case class ActorNotFound(selection: ActorSelection) extends RuntimeException("Actor not found for: " + selection)
| jmnarloch/akka.js | akka-js-actor/shared/src/main/scala/akka/actor/ActorSelection.scala | Scala | bsd-3-clause | 10,964 |
package com.github.eklavya.thrust
import argonaut.Argonaut._
import argonaut._
object Actions {
implicit def actionToJson(a: Action): (Json.JsonField, Json) = a.toJson
sealed abstract class Action {
def toJson: (Json.JsonField, Json)
}
case object CREATE extends Action {
override def toJson: (Json.JsonField, Json) = ("_action" -> jString("create"))
}
case object CALL extends Action {
override def toJson: (Json.JsonField, Json) = ("_action" -> jString("call"))
}
}
| eklavya/scala-thrust | src/main/scala/com/github/eklavya/thrust/Actions.scala | Scala | apache-2.0 | 501 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import org.apache.hadoop.yarn.api.records.ApplicationId
/**
* Simple Testing Application Id; ID and cluster timestamp are set in constructor
* and cannot be updated.
* @param id app id
* @param clusterTimestamp timestamp
*/
private[spark] class StubApplicationId(id: Int, clusterTimestamp: Long) extends ApplicationId {
override def getId: Int = {
id
}
override def getClusterTimestamp: Long = {
clusterTimestamp
}
override def setId(id: Int): Unit = {}
override def setClusterTimestamp(clusterTimestamp: Long): Unit = {}
override def build(): Unit = {}
}
| Panos-Bletsos/spark-cost-model-optimizer | yarn/src/test/scala/org/apache/spark/scheduler/cluster/StubApplicationId.scala | Scala | apache-2.0 | 1,434 |
package com.kubukoz.scala99
/**
* P11 (*) Modified run-length encoding.
* Modify the result of problem P10 in such a way that if an element has no duplicates it is simply copied into the result list. Only elements with duplicates are transferred as (N, E) terms.
* Example:
* *
* scala> encodeModified(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e))
* res0: List[Any] = List((4,'a), 'b, (2,'c), (2,'a), 'd, (4,'e))
**/
object P11 {
/**
* Again, something pretty straightforward once we have `encode` implemented.
**/
def encodeModified(list: List[Any]): List[Any] = P10.encode(list).map {
case (1, elem) => elem
case elems => elems
}
}
| kubukoz/scala-99 | src/main/scala/com/kubukoz/scala99/P11.scala | Scala | apache-2.0 | 687 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* Beineng Ma <baineng.ma@gmail.com>
*/
package com.thenetcircle.event_bus.event.extractor
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.util.ByteString
import com.thenetcircle.event_bus.event.extractor.DataFormat.DataFormat
import com.thenetcircle.event_bus.event.Event
import com.typesafe.scalalogging.LazyLogging
import scala.collection.mutable
import scala.concurrent.ExecutionContext
object EventExtractorFactory extends LazyLogging {
private val registeredExtractors: mutable.Map[DataFormat, EventExtractor] = mutable.Map.empty
def registerExtractor(extractor: EventExtractor): Unit =
registeredExtractors += (extractor.getFormat() -> extractor)
registerExtractor(new ActivityStreamsEventExtractor())
val defaultExtractor: EventExtractor = getExtractor(DataFormat.ACTIVITYSTREAMS)
/**
* Returns [[EventExtractor]] based on [[DataFormat]]
*/
def getExtractor(format: DataFormat): EventExtractor =
try {
registeredExtractors(format)
} catch {
case _: NoSuchElementException =>
logger.warn(s"there is no EventExtractor match format $format, use the DefaultExtractor instead.")
defaultExtractor
}
/**
* Returns Unmarshaller[ByteString, Event] based on [[DataFormat]]
*/
def getByteStringUnmarshaller(
format: DataFormat
)(implicit executionContext: ExecutionContext): Unmarshaller[ByteString, Event] =
Unmarshaller.apply(_ => data => getExtractor(format).extract(data.toArray))
/**
* Returns Unmarshaller[HttpEntity, Event] based on [[DataFormat]]
*/
def getHttpEntityUnmarshaller(
format: DataFormat
)(implicit executionContext: ExecutionContext): Unmarshaller[HttpEntity, Event] =
Unmarshaller.byteStringUnmarshaller andThen getByteStringUnmarshaller(format)
}
| thenetcircle/event-bus | core/src/main/scala/com/thenetcircle/event_bus/event/extractor/EventExtractorFactory.scala | Scala | apache-2.0 | 2,434 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.feature.image3d
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.feature.transform.vision.image.FeatureTransformer._
import com.intel.analytics.bigdl.dllib.feature.transform.vision.image.ImageFeature
import com.intel.analytics.bigdl.dllib.feature.image.{ImageProcessing, ImageSet}
import org.apache.logging.log4j.LogManager
private[bigdl] abstract class ImageProcessing3D extends ImageProcessing {
/**
* if true, catch the exception of the transformer to avoid crashing.
* if false, interrupt the transformer when error happens
*/
private var ignoreImageException: Boolean = false
/**
* catch the exception of the transformer to avoid crashing.
*/
override def enableIgnoreException(): this.type = {
ignoreImageException = true
this
}
protected def transformTensor(tensor: Tensor[Float]): Tensor[Float] = {
tensor
}
/**
* transform image feature
*
* @param feature ImageFeature
* @return ImageFeature
*/
override def transform(feature: ImageFeature): ImageFeature = {
if (!feature.isInstanceOf[ImageFeature3D]) return feature
else {
transform(feature.asInstanceOf[ImageFeature3D])
}
}
/**
* transform 3D image feature
*
* @param feature ImageFeature3D
* @return ImageFeature3D
*/
def transform(feature: ImageFeature3D): ImageFeature3D = {
try {
if (!feature.isValid) return feature
// change image to tensor
val tensor = feature.asInstanceOf[ImageFeature3D][Tensor[Float]](ImageFeature.imageTensor)
val out = transformTensor(tensor)
feature.update(ImageFeature.imageTensor, out)
feature.update(ImageFeature.size, out.size())
} catch {
case e: Exception =>
feature.isValid = false
if (ignoreImageException) {
val path = if (feature.contains(ImageFeature.uri)) feature(ImageFeature.uri) else ""
logger.warn(s"failed ${path} in transformer ${getClass}")
e.printStackTrace()
} else {
throw e
}
}
feature
}
override def apply(imageSet: ImageSet): ImageSet = {
imageSet.transform(this)
}
}
object ImageProcessing3D {
val logger = LogManager.getLogger(getClass)
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/feature/image3d/ImageProcessing3D.scala | Scala | apache-2.0 | 2,896 |
import akka.actor.{ Props, ActorSystem }
import db.UserDB
import handler._
object Main extends App {
val system = ActorSystem("server")
val UserDB = system.actorOf(Props(classOf[UserDB]), "UserDBActor")
val service = system.actorOf(Props(classOf[ApiHandler]), "ApiHandler")
}
/*object MainWithEchoHandler extends App {
val system = ActorSystem("server")
val service = system.actorOf(TcpServer.props(EchoHandlerProps), "ServerActor")
}
object MainWithApiHandler extends App {
val system = ActorSystem("server")
val service = system.actorOf(TcpServer.props(ApiHandlerProps), "ServerActor")
}
object MainWithDbHandler extends App {
val system = ActorSystem("server")
val service = system.actorOf(TcpServer.props(DbHandlerProps), "ServerActor")
}*/ | pahomovda/protectedchat-server | src/main/scala/Main.scala | Scala | mit | 767 |
package com.datasift.dropwizard.scala
import scala.reflect._
import org.skife.jdbi.v2._
import org.skife.jdbi.v2.sqlobject.mixins.Transactional
import org.skife.jdbi.v2.tweak.HandleCallback
/** Global definitions and implicits for JDBI. */
package object jdbi {
implicit final def JDBIWrapper(db: DBI) = new JDBIWrapper(db)
/** Provides idiomatic Scala enhancements to the JDBI API.
*
* Examples -
*
* dbi.open[DAO] to open a handle and attach a new sql object of the specified type to that handle
*
* dbi.daoFor[DAO] to create a new sql object which will obtain and release connections from this dbi instance,
* as it needs to, and can, respectively
*
* When in scope, you can create transactions using for comprehension. For instance -
* {{{
* for { handle <- dbi.transaction
* dao1 <- handle.attachable[Dao1]
* ...
* daoN <- handle.attachable[DaoN] } yield {
*
* dao1.some_function()
* ...
* daoN.some_other_function()
* }
* }}}
*
* @param db the [[org.skife.jdbi.v2.DBI]] instance to wrap.
*/
class JDBIWrapper private[jdbi](db: DBI) {
/** Creates a typed DAO instance.
*
* @tparam T type of the DAO to create.
* @return a DAO instance for the specified type.
*/
def open[T : ClassTag]: T = db.open[T](classTag[T].runtimeClass.asInstanceOf[Class[T]])
/** Creates an on-demand typed DAO instance.
*
* @tparam T type of the DAO to create.
* @return an on-demand DAO instance for the specified type.
*/
def daoFor[T : ClassTag]: T = db.onDemand[T](classTag[T].runtimeClass.asInstanceOf[Class[T]])
/** Executes the given function within a transaction.
*
* @tparam A the return type of the function to execute.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[A](f: (Handle, TransactionStatus) => A): A = {
db.inTransaction(new TransactionCallback[A] {
def inTransaction(handle: Handle, status: TransactionStatus): A = f(handle, status)
})
}
/** Executes the given function within a transaction.
*
* @tparam A the return type of the function to execute.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[A](f: Handle => A): A = {
db.inTransaction(new TransactionCallback[A] {
def inTransaction(handle: Handle, status: TransactionStatus): A = f(handle)
})
}
/** Executes the given function within a transaction of the given isolation level.
*
* @tparam A the return type of the function to execute.
* @param isolation the isolation level for the transaction.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
* @deprecated This method cannot be called. See https://issues.scala-lang.org/browse/SI-8021
* Use JDBIWrapper#inTransactionWithIsolation.
*/
def inTransaction[A](isolation: TransactionIsolationLevel)
(f: (Handle, TransactionStatus) => A): A = {
db.inTransaction(isolation, new TransactionCallback[A] {
def inTransaction(handle: Handle, status: TransactionStatus): A = f(handle, status)
})
}
/** Executes the given function within a transaction of the given isolation level.
*
* @tparam A the return type of the function to execute.
* @param isolation the isolation level for the transaction.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
* @deprecated This method cannot be called. See https://issues.scala-lang.org/browse/SI-8021
* Use JDBIWrapper#inTransactionWithIsolation.
*/
def inTransaction[A](isolation: TransactionIsolationLevel)
(f: Handle => A): A = {
db.inTransaction(isolation, new TransactionCallback[A] {
def inTransaction(handle: Handle, status: TransactionStatus): A = f(handle)
})
}
/** Executes the given function within a transaction of the given isolation level.
* This method has been added to break the ambiguity of the methods above.
*
* @tparam A the return type of the function to execute.
* @param isolation the isolation level for the transaction.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransactionWithIsolation[A](isolation: TransactionIsolationLevel)(f: (Handle, TransactionStatus) => A): A = {
db.inTransaction(isolation, new TransactionCallback[A] {
def inTransaction(handle: Handle, status: TransactionStatus): A = f(handle, status)
})
}
/** Applies the given function with a DBI [[org.skife.jdbi.v2.Handle]].
*
* @tparam A the return type of the function to apply.
* @param f the function to apply the handle to.
* @return the result of applying the function.
* @throws Exception if an Exception is thrown by the function.
*/
def withHandle[A](f: Handle => A): A = {
db.withHandle(new HandleCallback[A] {
def withHandle(handle: Handle): A = f(handle)
})
}
/** Extends this DBI to support for-comprehensions for transactions. */
def transaction: JDBITransactionWrapper =
new JDBITransactionWrapper(this)
}
/** Provides for-comprehension support for composable transactions */
class JDBITransactionWrapper private[jdbi] (dbi: JDBIWrapper) {
def map[A](f: Handle => A): A = dbi.inTransaction(f)
def flatMap[A](f: Handle => A): A = map(f)
def foreach(f: Handle => Unit): Unit = map(f)
}
implicit final def HandleWrapper(handle: Handle) = new HandleWrapper(handle)
/** Provides idiomatic Scala enhancements to the JDBI API.
*
* @param handle the [[org.skife.jdbi.v2.Handle]] instance to wrap.
*/
class HandleWrapper private[jdbi] (handle: Handle) {
/** Creates a typed DAO instance attached to this [[org.skife.jdbi.v2.Handle]].
*
* @tparam A type of the DAO to create.
* @return a DAO instance for the specified type.
*/
def attach[A : ClassTag]: A = {
handle.attach(classTag[A].runtimeClass.asInstanceOf[Class[A]])
}
/** Extends this [[org.skife.jdbi.v2.Handle]] to support the creation of typed DAOs through for-comprehensions. */
def attachable[A : ClassTag]: HandleDaoWrapper[A] =
new HandleDaoWrapper[A](handle, classTag[A].runtimeClass.asInstanceOf[Class[A]])
/** Executes the given function within a transaction.
*
* @tparam A the return type of the function to execute.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[A](f: Handle => A): A = {
handle.inTransaction(new TransactionCallback[A] {
def inTransaction(conn: Handle, status: TransactionStatus): A = f(conn)
})
}
/** Executes the given function within a transaction.
*
* @tparam A the return type of the function to execute.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[A](f: (Handle, TransactionStatus) => A): A = {
handle.inTransaction(new TransactionCallback[A] {
def inTransaction(conn: Handle, status: TransactionStatus): A = f(conn, status)
})
}
/** Executes the given function within a transaction.
*
* @tparam A the return type of the function to execute.
* @param isolation the isolation level for the transaction.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[A](isolation: TransactionIsolationLevel)
(f: Handle => A): A = {
handle.inTransaction(isolation, new TransactionCallback[A] {
def inTransaction(conn: Handle, status: TransactionStatus): A = f(conn)
})
}
/** Executes the given function within a transaction.
*
* @tparam A the return type of the function to execute.
* @param isolation the isolation level for the transaction.
* @param f the function to execute within the transaction.
* @return the result of the function.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[A](isolation: TransactionIsolationLevel)
(f: (Handle, TransactionStatus) => A): A = {
handle.inTransaction(isolation, new TransactionCallback[A] {
def inTransaction(conn: Handle, status: TransactionStatus): A = f(conn, status)
})
}
}
class HandleDaoWrapper[A] private [jdbi] (handle: Handle, clazz: Class[A]) {
//require(handle.isInTransaction, "handle must be in a transaction")
def map[B](f: A => B): B = f(handle.attach(clazz))
def flatMap[B](f: A => B): B = map(f)
def foreach(f: A => Unit): Unit = map(f)
}
implicit final def TransactionalWrapper[A <: Transactional[A]](transactional : A) =
new TransactionalWrapper[A](transactional)
/** Provides enhancements to the Dropwizard jDBI API for transactional DAOs.
*
* @param transactional the [[org.skife.jdbi.v2.sqlobject.mixins.Transactional]] object to wrap.
*/
class TransactionalWrapper[A <: Transactional[A]] private[jdbi] (transactional: A) {
/** Executes the given function within a transaction of the given isolation level.
*
* @tparam B the type of the result of the function being executed.
* @param isolation the isolation level for the transaction.
* @param f the function on this object to execute within the transaction.
* @return the result of the function being executed.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[B](isolation: TransactionIsolationLevel)
(f: A => B): B = {
transactional.inTransaction[B](isolation, new Transaction[B, A] {
def inTransaction(tx: A, status: TransactionStatus): B = f(tx)
})
}
/** Executes the given function within a transaction of the given isolation level.
*
* @tparam B the type of the result of the function being executed.
* @param isolation the isolation level for the transaction.
* @param f the function on this object to execute within the transaction.
* @return the result of the function being executed.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[B](isolation: TransactionIsolationLevel)
(f: (A, TransactionStatus) => B): B = {
transactional.inTransaction[B](isolation, new Transaction[B, A] {
def inTransaction(tx: A, status: TransactionStatus): B = f(tx, status)
})
}
/** Executes the given function within a transaction.
*
* @tparam B the type of the result of the function being executed.
* @param f the function on this object to execute within the transaction.
* @return the result of the function being executed.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[B](f: A => B): B = {
transactional.inTransaction[B](new Transaction[B, A] {
def inTransaction(tx: A, status: TransactionStatus): B = f(tx)
})
}
/** Executes the given function within a transaction.
*
* @tparam B the type of the result of the function being executed.
* @param f the function on this object to execute within the transaction.
* @return the result of the function being executed.
* @throws Exception if an Exception is thrown by the function, the transaction will be
* rolled-back.
*/
def inTransaction[B](f: (A, TransactionStatus) => B): B = {
transactional.inTransaction[B](new Transaction[B, A] {
def inTransaction(tx: A, status: TransactionStatus): B = f(tx, status)
})
}
}
}
| datasift/dropwizard-scala | jdbi/src/main/scala/com/datasift/dropwizard/scala/jdbi/package.scala | Scala | apache-2.0 | 13,658 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReferences
import org.apache.spark.sql.types._
/**
* An interpreted row ordering comparator.
*/
class InterpretedOrdering(ordering: Seq[SortOrder]) extends Ordering[InternalRow] {
def this(ordering: Seq[SortOrder], inputSchema: Seq[Attribute]) =
this(bindReferences(ordering, inputSchema))
def compare(a: InternalRow, b: InternalRow): Int = {
var i = 0
val size = ordering.size
while (i < size) {
val order = ordering(i)
val left = order.child.eval(a)
val right = order.child.eval(b)
if (left == null && right == null) {
// Both null, continue looking.
} else if (left == null) {
return if (order.nullOrdering == NullsFirst) -1 else 1
} else if (right == null) {
return if (order.nullOrdering == NullsFirst) 1 else -1
} else {
val comparison = order.dataType match {
case dt: AtomicType if order.direction == Ascending =>
dt.ordering.asInstanceOf[Ordering[Any]].compare(left, right)
case dt: AtomicType if order.direction == Descending =>
dt.ordering.asInstanceOf[Ordering[Any]].reverse.compare(left, right)
case a: ArrayType if order.direction == Ascending =>
a.interpretedOrdering.asInstanceOf[Ordering[Any]].compare(left, right)
case a: ArrayType if order.direction == Descending =>
a.interpretedOrdering.asInstanceOf[Ordering[Any]].reverse.compare(left, right)
case s: StructType if order.direction == Ascending =>
s.interpretedOrdering.asInstanceOf[Ordering[Any]].compare(left, right)
case s: StructType if order.direction == Descending =>
s.interpretedOrdering.asInstanceOf[Ordering[Any]].reverse.compare(left, right)
case other =>
throw new IllegalArgumentException(s"Type $other does not support ordered operations")
}
if (comparison != 0) {
return comparison
}
}
i += 1
}
return 0
}
}
object InterpretedOrdering {
/**
* Creates a [[InterpretedOrdering]] for the given schema, in natural ascending order.
*/
def forSchema(dataTypes: Seq[DataType]): InterpretedOrdering = {
new InterpretedOrdering(dataTypes.zipWithIndex.map {
case (dt, index) => SortOrder(BoundReference(index, dt, nullable = true), Ascending)
})
}
}
object RowOrdering {
/**
* Returns true iff the data type can be ordered (i.e. can be sorted).
*/
def isOrderable(dataType: DataType): Boolean = dataType match {
case NullType => true
case dt: AtomicType => true
case CalendarIntervalType => true
case struct: StructType => struct.fields.forall(f => isOrderable(f.dataType))
case array: ArrayType => isOrderable(array.elementType)
case udt: UserDefinedType[_] => isOrderable(udt.sqlType)
case _ => false
}
/**
* Returns true iff outputs from the expressions can be ordered.
*/
def isOrderable(exprs: Seq[Expression]): Boolean = exprs.forall(e => isOrderable(e.dataType))
}
| caneGuy/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ordering.scala | Scala | apache-2.0 | 4,010 |
/*
* Copyright (C) 2011 Mikhail Vorozhtsov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mvv.sawoko
import java.nio.ByteBuffer
import java.nio.channels.{
SelectableChannel, SelectionKey, SocketChannel, ServerSocketChannel,
ReadableByteChannel, WritableByteChannel}
import java.net.SocketAddress
trait NioPollWaitCap extends WaitCap
final case class NioPoll(channel: SelectableChannel, ops: Int) extends WaitOp {
require(ops != 0 && (ops & channel.validOps) == ops)
type Cap = NioPollWaitCap
type Result = Int
}
trait NioAsyncExecutor extends AsyncExecutor {
def registerNioClose(pid: Pid, channel: SelectableChannel,
callback: Callback[NioAsyncExecutor, Unit]): Boolean
}
final case class NioCloseOp(channel: SelectableChannel)
extends AsyncOp[NioAsyncExecutor, Unit] {
def register(ep: EP, callback: Callback) = {
val suspended = ep.executor.registerNioClose(ep.pid, channel, callback)
if (suspended) None
else Some(Success(()))
}
}
trait NioOps {
import AsyncOps._
import WaitOps._
import YieldOps._
@inline
def poll(channel: SelectableChannel, ops: Int) =
waitOne(NioPoll(channel, ops))
@inline
def poll(channel: SelectableChannel, ops: Int, timeout: Timeout) =
waitOne(NioPoll(channel, ops), timeout)
@inline
def close(channel: SelectableChannel) =
exec(NioCloseOp(channel))
def connect(channel: SocketChannel, address: SocketAddress) = yieldM >> {
if (channel.connect(address))
unitM
else repeatM {
poll(channel, SelectionKey.OP_CONNECT) >>
unlessM(channel.finishConnect)
}
}
def connect(channel: SocketChannel, address: SocketAddress,
timeout: Timeout) = yieldM >> {
if (channel.connect(address))
trueM
else forM(timeout.base) { case base =>
poll(channel, SelectionKey.OP_CONNECT, base.timeout).flatMap {
case Some(_) =>
unlessM(channel.finishConnect, base.adjust, true)
case None =>
breakM(false)
}
}
}
def connect[W <: WaitOp](
channel: SocketChannel, address: SocketAddress, op: W) =
waitOne(op, Timeout.Now).flatMap {
case Some(result) =>
LeftM(result)
case None =>
if (channel.connect(address))
RightUnitM
else repeatM {
waitMany(op ?: NioPoll(channel, SelectionKey.OP_CONNECT)).flatMap {
case R1(result) =>
breakM(Left(result))
case _ =>
breakIfM(channel.finishConnect, RightUnit)
}
}
}
def connect[W <: WaitOp](
channel: SocketChannel, address: SocketAddress, op: W,
timeout: Timeout) =
waitOne(op, Timeout.Now).flatMap {
case Some(result) =>
LeftM(result)
case None =>
if (channel.connect(address))
RightM(true)
else forM(timeout.base) { base =>
waitMany(op ?: NioPoll(channel, SelectionKey.OP_CONNECT),
base.timeout).flatMap {
case Some(R1(result)) =>
breakM(Left(result))
case Some(_) =>
unlessM(channel.finishConnect, base.adjust, Right(true))
case None =>
breakM(Right(false))
}
}
}
def accept(channel: ServerSocketChannel) = yieldM >> repeatM {
channel.accept match {
case null =>
poll(channel, SelectionKey.OP_ACCEPT) >>
continueM
case s =>
breakM(s)
}
}
def accept(channel: ServerSocketChannel, timeout: Timeout) = yieldM >> {
channel.accept match {
case null => forM(timeout.base) { base =>
poll(channel, SelectionKey.OP_ACCEPT, base.timeout).flatMap {
case Some(_) => channel.accept match {
case null => continueM(base.adjust)
case s => breakM(Some(s))
}
case None => breakM(None)
}
}
case s =>
SomeM(s)
}
}
def accept[W <: WaitOp](channel: ServerSocketChannel, op: W) =
waitOne(op, Timeout.Now).flatMap {
case Some(value) => LeftM(value)
case None => channel.accept match {
case null => repeatM {
waitMany(op ?: NioPoll(channel, SelectionKey.OP_ACCEPT)).flatMap {
case R1(result) => breakM(Left(result))
case _ => channel.accept match {
case null => continueM
case s => breakM(Right(s))
}
}
}
case s => RightM(s)
}
}
def accept[W <: WaitOp](
channel: ServerSocketChannel, op: W, timeout: Timeout) =
waitOne(op, Timeout.Now).flatMap {
case Some(value) => SomeM(Left(value))
case None => channel.accept match {
case null => forM(timeout.base) { base =>
waitMany(op ?: NioPoll(channel, SelectionKey.OP_ACCEPT),
base.timeout).flatMap {
case Some(R1(result)) => breakM(Some(Left(result)))
case Some(_) => channel.accept match {
case null => continueM(base.adjust)
case s => breakM(Some(Right(s)))
}
case None => breakM(None)
}
}
case s => SomeM(Right(s))
}
}
def read(channel: SelectableChannel with ReadableByteChannel,
dst: ByteBuffer) = yieldM >> forM(0) { alreadyRead =>
val n = channel.read(dst)
if (n < 0)
breakM(if (alreadyRead == 0) -1 else alreadyRead)
else if (dst.remaining == 0)
breakM(alreadyRead + n)
else
poll(channel, SelectionKey.OP_READ) >>
continueM(alreadyRead + n)
}
def readSome(channel: SelectableChannel with ReadableByteChannel,
dst: ByteBuffer) = yieldM >> repeatM {
val n = channel.read(dst)
if (n < 0)
breakM(-1)
else if (n > 0 || dst.remaining == 0)
breakM(n)
else
poll(channel, SelectionKey.OP_READ) >>
continueM
}
def read(channel: SelectableChannel with ReadableByteChannel,
dst: ByteBuffer, timeout: Timeout) =
yieldM >>
forM(0) { alreadyRead =>
val n = channel.read(dst)
if (n < 0)
breakM(if (alreadyRead == 0) -1 else alreadyRead)
else if (dst.remaining == 0)
breakM(alreadyRead + n)
else
poll(channel, SelectionKey.OP_READ, timeout).flatMap {
case Some(_) => continueM(alreadyRead + n)
case None => breakM(0)
}
}
def readSome(channel: SelectableChannel with ReadableByteChannel,
dst: ByteBuffer, timeout: Timeout) = yieldM >> {
val n = channel.read(dst)
if (n < 0)
pure(-1)
else if (n > 0 || dst.remaining == 0)
pure(n)
else forM(timeout.base) { base =>
poll(channel, SelectionKey.OP_READ, base.timeout).flatMap {
case Some(_) =>
val n = channel.read(dst)
if (n < 0)
breakM(-1)
else if (n > 0 || dst.remaining == 0)
breakM(n)
else
continueM(base.adjust)
case None =>
breakM(0)
}
}
}
def read[W <: WaitOp](
channel: SelectableChannel with ReadableByteChannel,
dst: ByteBuffer, op: W) =
waitOne(op, Timeout.Now).flatMap {
case Some(result) => LeftM(result)
case None => forM(0) { alreadyRead =>
val n = channel.read(dst)
if (n < 0)
breakM(Right(if (alreadyRead == 0) -1 else alreadyRead))
else if (dst.remaining == 0)
breakM(Right(alreadyRead + n))
else
waitMany(op ?: NioPoll(channel, SelectionKey.OP_READ)).flatMap {
case R1(value) => breakM(Left(value))
case _ => continueM(alreadyRead + n)
}
}
}
def readSome[W <: WaitOp](
channel: SelectableChannel with ReadableByteChannel,
dst: ByteBuffer, op: W) =
waitOne(op, Timeout.Now).flatMap {
case Some(result) => LeftM(result)
case None => repeatM {
val n = channel.read(dst)
if (n < 0)
breakM(Right(-1))
else if (n > 0 || dst.remaining == 0)
breakM(Right(n))
else
waitMany(op ?: NioPoll(channel, SelectionKey.OP_READ)).flatMap {
case R1(value) => breakM(Left(value))
case _ => continueM
}
}
}
def read[W <: WaitOp](
channel: SelectableChannel with ReadableByteChannel,
dst: ByteBuffer, op: W, timeout: Timeout) =
waitOne(op, Timeout.Now).flatMap {
case Some(result) => LeftM(result)
case None => forM((timeout.base, 0)) { case (base, alreadyRead) =>
val n = channel.read(dst)
if (n < 0)
breakM(Right(if (alreadyRead == 0) -1 else alreadyRead))
else if (dst.remaining == 0)
breakM(Right(alreadyRead + n))
else
waitMany(op ?: NioPoll(channel, SelectionKey.OP_READ),
base.timeout).flatMap {
case Some(R1(value)) => breakM(Left(value))
case Some(_) => continueM((base.adjust, alreadyRead + n))
case None => breakM(Right(alreadyRead + n))
}
}
}
def readSome[W <: WaitOp](
channel: SelectableChannel with ReadableByteChannel,
dst: ByteBuffer, op: W, timeout: Timeout) =
waitOne(op, Timeout.Now).flatMap {
case Some(result) => LeftM(result)
case None => forM(timeout.base) { base =>
val n = channel.read(dst)
if (n < 0)
breakM(Right(-1))
else if (n > 0 || dst.remaining == 0)
breakM(Right(n))
else
waitMany(op ?: NioPoll(channel, SelectionKey.OP_READ),
base.timeout).flatMap {
case Some(R1(value)) => breakM(Left(value))
case Some(_) => continueM(base.adjust)
case None => breakM(Right(0))
}
}
}
def write(channel: SelectableChannel with WritableByteChannel,
src: ByteBuffer) = yieldM >> forM(0) { alreadyWrote =>
val n = channel.write(src)
if (src.remaining == 0)
breakM(alreadyWrote + n)
else
poll(channel, SelectionKey.OP_WRITE) >>
continueM(alreadyWrote + n)
}
def write(channel: SelectableChannel with WritableByteChannel,
src: ByteBuffer, timeout: Timeout) =
yieldM >>
forM((timeout.base, 0)) { case (base, alreadyWrote) =>
val n = channel.write(src)
if (src.remaining == 0)
breakM(alreadyWrote + n)
else
poll(channel, SelectionKey.OP_WRITE, base.timeout).flatMap {
case Some(_) => continueM((base.adjust, alreadyWrote + n))
case None => breakM(alreadyWrote + n)
}
}
def write[W <: WaitOp](
channel: SelectableChannel with WritableByteChannel,
src: ByteBuffer, op: W) =
waitOne(op, Timeout.Now).flatMap {
case Some(result) => LeftM(result)
case None => forM(0) { alreadyWrote =>
val n = channel.write(src)
if (src.remaining == 0)
breakM(alreadyWrote + n)
else
waitMany(op ?: NioPoll(channel, SelectionKey.OP_WRITE)).flatMap {
case R1(result) => breakM(Left(result))
case _ => continueM(alreadyWrote + n)
}
}
}
def write[W <: WaitOp](
channel: SelectableChannel with WritableByteChannel,
src: ByteBuffer, op: W, timeout: Timeout) =
waitOne(op, Timeout.Now).flatMap {
case Some(result) => LeftM(result)
case None => forM((timeout.base, 0)) { case (base, alreadyWrote) =>
val n = channel.write(src)
if (src.remaining == 0)
breakM(alreadyWrote + n)
else
waitMany(op ?: NioPoll(channel, SelectionKey.OP_WRITE),
base.timeout).flatMap {
case Some(R1(result)) => breakM(Left(result))
case Some(_) => continueM(base.adjust, alreadyWrote + n)
case None => breakM(Right(alreadyWrote + n))
}
}
}
}
object NioOps extends NioOps
| mvv/sawoko | src/Nio.scala | Scala | apache-2.0 | 12,515 |
package org.jetbrains.plugins.scala
package codeInsight.intention.literal
import com.intellij.application.options.CodeStyle
import com.intellij.codeInsight.intention.PsiElementBaseIntentionAction
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import com.intellij.psi.codeStyle.CodeStyleSettingsManager
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.util.MultilineStringUtil
/**
* User: Dmitry Naydanov
* Date: 4/2/12
*/
class AddStripMarginToMLStringIntention extends PsiElementBaseIntentionAction{
def isAvailable(project: Project, editor: Editor, element: PsiElement): Boolean = {
if (element == null || element.getNode == null || element.getNode.getElementType != ScalaTokenTypes.tMULTILINE_STRING ||
!element.getText.contains("\\n")) return false
MultilineStringUtil.needAddStripMargin(element, getMarginChar(project))
}
def getFamilyName: String = "Add .stripMargin"
override def getText: String = "Add 'stripMargin'"
override def invoke(project: Project, editor: Editor, element: PsiElement) {
val marginChar = getMarginChar(project)
val suffix = if (marginChar == "|") "" else "(\\'" + marginChar + "\\')"
extensions.inWriteAction {
editor.getDocument.insertString(element.getTextRange.getEndOffset, ".stripMargin" + suffix)
}
}
private def getMarginChar(project: Project): String =
CodeStyle.getSettings(project).getCustomSettings(classOf[ScalaCodeStyleSettings]).MARGIN_CHAR + ""
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/codeInsight/intention/literal/AddStripMarginToMLStringIntention.scala | Scala | apache-2.0 | 1,666 |
package scalaquantity
import scalaquantity.Exponents._
import Units._
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.{FunSuite, FlatSpec}
/**
* If it compiles, it passed
*/
class TestUnits extends FunSuite with ShouldMatchers {
test("An Exponent should support numerals") {
assert(exponentValue[P0] === 0)
assert(exponentValue[P1] === 1)
assert(exponentValue[P4] === 4)
assert(exponentValue[N3] === -3)
}
test("An Exponent should support next and previous") {
assert(exponentValue[P0#Next] === 1)
assert(exponentValue[P1#Next] === 2)
assert(exponentValue[P1#Prev] === 0)
assert(exponentValue[P0#Next#Prev] === 0)
assert(exponentValue[P2#Next#Prev#Next#Next] === 4)
}
test("An Exponent should support negation") {
assert(exponentValue[P0#Neg] === 0)
assert(exponentValue[P3#Neg] === -3)
assert(exponentValue[P3#Neg#Neg] === 3)
}
test("An Exponent should support addition and subtraction") {
assert(exponentValue[P4#Sub[P3]] === 1)
assert(exponentValue[P5#Add[P2]] === 7)
}
test("A Quantity should have exponents that can be converted to numbers") {
assert(exponentValue[Speed#M] === 1)
assert(exponentValue[Speed#S] === -1)
assert(exponentValue[Speed#KG] === 0)
}
test("A Quantity should check units on compile time") {
val speed: Speed = 120*m / (1*min)
assert(speed === 2.0*m/s)
val pressure: Pressure = 10*MN / (10*m2)
assert(pressure === 10*bar)
val power: Watt = 9000 * GW
val tw: Double = power / TW
assert(tw === 9.0)
}
}
| zzorn/ScalaQuantity | src/test/scala/scalaquantity/TestUnits.scala | Scala | bsd-3-clause | 1,584 |
import java.util.Calendar
import java.util.concurrent.TimeUnit
import com.sksamuel.elastic4s.ElasticDsl.{create, index, _}
import com.sksamuel.elastic4s.mappings.FieldType.{DateType, IntegerType, StringType}
import com.sksamuel.elastic4s.{ElasticClient, ElasticsearchClientUri}
import org.elasticsearch.common.settings.ImmutableSettings
import org.elasticsearch.search.sort.SortOrder
import scala.util.parsing.json.JSON
import scalaj.http._
object DWNewsClient {
// Global variables related to the query
// brand = spaces+separated+by+a+plus+sign
val keywords = List("real+madrid","BBVA", "Repsol")
// langID = 28 (Spanish), 2 (English)
//val langIDs = List("2","28")
val langIDs = List("2")
// date (format) = yyyy-mm-dd
val lowerLimitDate = "2012-07-01"
val upperLimitDate = "2015-12-31"
val formatter = new java.text.SimpleDateFormat("yyyy-MM-dd")
val DWDateFormatter = new java.text.SimpleDateFormat("yyyy-MM-dd'T'mm:hh:ss.000'Z'")
// Global variables related to ES storage
val ip_host = "136.243.53.82"
// val ip_host = "localhost"
val cluster_name = "elasticsearch"
// val cluster_name = "mixedem_elasticsearch"
// Name of the index
val es_index = "dw_news"
val langMap = Map(28 -> "es", 2 -> "en")
var pageWithResultsCount = 0
def getLimitDates(currentDates: Array[String]): Array[String] = {
val fCurrentDate = formatter.parse(currentDates(1))
val oneMonthMillis = TimeUnit.DAYS.toMillis(30)
val newDateMillis = fCurrentDate.getTime + oneMonthMillis
val calendar = Calendar.getInstance()
calendar.setTimeInMillis(fCurrentDate.getTime + oneMonthMillis)
var d:Array[String] = new Array[String](2)
d(0) = currentDates(1)
d(1) = formatter.format(calendar.getTime())
println("from: " + d(0) + " to: " + d(1))
return(d)
}
def processJSON(jstr: String): List[Map[String, Any]] = {
// The field items contains the elements found in the website of DW containing the specified keywords.
// The response, which is obtained as a string, is initially parsed as a JSON object
val json = JSON.parseFull(jstr)
val resultCount = json match {
case Some(m: Map[String, Any]) => {
m.get("resultCount") match {
case Some(i: Any) => i.toString.toDouble
}
}
case _ => throw new Exception("Error parsing array")
}
var data = List[Map[String, Any]]()
if(resultCount!=0){
pageWithResultsCount += 1
// From the JSON object, the field items, which is given by a list of elements (Map), is retained
val items = json match {
case Some(m: Map[String, Any]) =>
m.get("items") match {
case Some(i: List[Map[String, Any]]) => i
}
}
// For each item, the field reference is extracted. This field contains a set of four elements: id, type, name
// and url
data = items.map(x => x.get("reference") match {
case Some(r: Map[String, Any]) => r
})
}
println("Pages with results:"+pageWithResultsCount)
data
}
def insertJSONData(keyword: String, inputData: Map[String, Any]): Unit = {
// Once the obtained response has been processed, the resulting JSON object is stored in the ES database
// Getting a client for ES
val uri = ElasticsearchClientUri("elasticsearch://" + ip_host + ":9300")
val settings = ImmutableSettings.settingsBuilder().put("cluster.name", cluster_name).build()
val client = ElasticClient.remote(settings, uri)
// The type of the documents to be indexed
val es_type = inputData.get("type") match {
case Some(d: Any) => d.toString
}
// Create the index if it does not exist
if (client.execute{index exists es_index}.await.isExists == false) {
println("Index did not exist")
client.execute {
create index es_index mappings (
es_type as(
"dw_id" typed IntegerType,
"lang" typed StringType,
"name" typed StringType,
"url" typed StringType,
"text" typed StringType,
"keyword" typed StringType,
"project" typed StringType,
"created_at" typed DateType
)
)
}.await
}else{
println("Index exists")
}
// Extracting the value of the fields from the input data
val f_id = inputData.get("id") match {
case Some(d: Any) => "dw_"+ d.toString
}
val f_name = inputData.get("name") match {
case Some(d: Any) => d
}
val f_url = inputData.get("url") match {
case Some(d: Any) => d
}
println("Url: "+ f_url)
val f_keyword = keyword.replace("+","_")
// From the url field, the text is extracted
val response: HttpResponse[String] = Http(f_url.toString).asString
var f_text = ""
var f_displayDate = Calendar.getInstance().getTime()
var f_lang = ""
if(response.isNotError) {
val json = JSON.parseFull(response.body)
f_text = json match {
case Some(m: Map[String, Any]) => m.get("text") match {
case Some(i: Any) => i.toString
}
}
f_displayDate = json match {
case Some(m: Map[String, Any]) => m.get("displayDate") match {
case Some(i: Any) => DWDateFormatter.parse(i.toString)
}
}
val langID = json match{
case Some(m: Map[String, Any]) => m.get("languageId") match {
case Some(d: Any) => d.toString.toFloat.toInt
}
}
println("LangID " + langID )
f_lang = langMap.get(langID) match {
case Some(s: String)=> s
}// The ID of the language is taken from the intial values of the query
}
// Once every field has been obtained, it is indexed in ES
client.execute {
index into es_index / es_type id f_id fields(
"id" -> f_id,
"lang" -> f_lang,
"name" -> f_name,
"url" -> f_url,
"text" -> f_text,
"keyword" -> f_keyword,
"project" -> f_keyword,
"created_at" -> f_displayDate
)
}.await
client.close()
}
def getLowerLimitDate(): String = {
// Getting a client for ES
val uri = ElasticsearchClientUri("elasticsearch://" + ip_host + ":9300")
val settings = ImmutableSettings.settingsBuilder().put("cluster.name", cluster_name).build()
val client = ElasticClient.remote(settings, uri)
client.execute{search in "dw_news" query(
"*:*"
) sort( field sort "created_at" order(SortOrder.DESC))
}
lowerLimitDate
}
def main (args: Array[String]) {
var z = true
var dates = Array("", lowerLimitDate)
for(keyword <- keywords) {
println("Querying for "+ keyword)
for(langID <- langIDs){
println("In language: " + langID)
// The loop is running until the time period of the query is within the limits specified by lowerLimitDate and
// upperLimitDate
while (z) {
// Getting the interval time of the query
dates = getLimitDates(dates)
// Comparing the upper limit of the interval with the predefined limit in upperLimitDate
if (formatter.parse(dates(1)).getTime > formatter.parse(upperLimitDate).getTime) {
z = false
}
// The query for the DW service is defined
val dw_query = "terms=" + keyword + "&languageId=" + langID + "&contentTypes=Article,Video&startDate=" +
dates(0) + "&endDate=" + dates(1) + "&sortByDate=true&pageIndex=1&asTeaser=false"
// The REST service is queried and the response (JSON format) is obtained
val response: HttpResponse[String] = Http("http://www.dw.com/api/search/global?" + dw_query).asString
// The response in JSON format is processed
if (response.isNotError) {
processJSON(response.body).foreach(x => insertJSONData(keyword, x))
}
Thread.sleep(500)
}
} // end while
}
println("Finished")
}
}
| canademar/me_extractors | DWClient2/src/main/scala/DWNewsClient.scala | Scala | gpl-2.0 | 8,039 |
/*
* Copyright 2013-2017 Tsukasa Kitachi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package configs.beans
import com.typesafe.config.ConfigFactory
import configs.{ConfigReader, ConfigWriter}
import scala.beans.BeanProperty
import scalaprops.Property.forAll
import scalaprops.Scalaprops
object IgnoredBeanPropertiesTest extends Scalaprops {
trait NotHaveInstance
class SomeBean(
@BeanProperty var intValue: Int,
@BeanProperty var notHaveInstance: NotHaveInstance,
@BeanProperty var nested: Nested) {
def this() = this(0, null, null)
}
class Nested(@BeanProperty var intValue: Int) {
def this() = this(0)
}
val ignoreOnRead = forAll {
@ignoredBeanProperties("notHaveInstance")
implicit val reader: ConfigReader[SomeBean] = ConfigReader.derive[SomeBean]
val config = ConfigFactory.parseString(
"""int-value = 100
|nested.int-value = 200
|""".stripMargin)
reader.extract(config).exists { b =>
b.intValue == 100 && b.nested.intValue == 200
}
}
val ignoreOnWrite = forAll {
@ignoredBeanProperties("notHaveInstance")
implicit val writer: ConfigWriter[SomeBean] = ConfigWriter.derive[SomeBean]
val value = writer.write(new SomeBean(100, new NotHaveInstance {}, new Nested(200)))
value == ConfigFactory.parseString(
"""int-value = 100
|nested.int-value = 200
|""".stripMargin).root()
}
}
| kxbmap/configs | core/src/test/scala/configs/beans/IgnoredBeanPropertiesTest.scala | Scala | apache-2.0 | 1,942 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.typeutils
import org.apache.flink.api.common.typeinfo.BasicTypeInfo._
import org.apache.flink.api.common.typeinfo._
import org.apache.flink.api.common.typeutils.CompositeType
import org.apache.flink.api.java.typeutils.{MapTypeInfo, ObjectArrayTypeInfo, PojoTypeInfo}
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.typeutils.TimeIntervalTypeInfo.{INTERVAL_MILLIS, INTERVAL_MONTHS}
import org.apache.flink.table.validate._
object TypeInfoCheckUtils {
/**
* Checks if type information is an advanced type that can be converted to a
* SQL type but NOT vice versa.
*/
def isAdvanced(dataType: TypeInformation[_]): Boolean = dataType match {
case _: TimeIndicatorTypeInfo => false
case _: BasicTypeInfo[_] => false
case _: SqlTimeTypeInfo[_] => false
case _: TimeIntervalTypeInfo[_] => false
case _ => true
}
/**
* Checks if type information is a simple type that can be converted to a
* SQL type and vice versa.
*/
def isSimple(dataType: TypeInformation[_]): Boolean = !isAdvanced(dataType)
def isNumeric(dataType: TypeInformation[_]): Boolean = dataType match {
case _: NumericTypeInfo[_] => true
case BIG_DEC_TYPE_INFO => true
case _ => false
}
def isTemporal(dataType: TypeInformation[_]): Boolean =
isTimePoint(dataType) || isTimeInterval(dataType)
def isTimePoint(dataType: TypeInformation[_]): Boolean =
dataType.isInstanceOf[SqlTimeTypeInfo[_]]
def isTimeInterval(dataType: TypeInformation[_]): Boolean =
dataType.isInstanceOf[TimeIntervalTypeInfo[_]]
def isString(dataType: TypeInformation[_]): Boolean = dataType == STRING_TYPE_INFO
def isBoolean(dataType: TypeInformation[_]): Boolean = dataType == BOOLEAN_TYPE_INFO
def isDecimal(dataType: TypeInformation[_]): Boolean = dataType == BIG_DEC_TYPE_INFO
def isInteger(dataType: TypeInformation[_]): Boolean = dataType == INT_TYPE_INFO
def isIntegerFamily(dataType: TypeInformation[_]): Boolean =
dataType.isInstanceOf[IntegerTypeInfo[_]]
def isLong(dataType: TypeInformation[_]): Boolean = dataType == LONG_TYPE_INFO
def isIntervalMonths(dataType: TypeInformation[_]): Boolean = dataType == INTERVAL_MONTHS
def isIntervalMillis(dataType: TypeInformation[_]): Boolean = dataType == INTERVAL_MILLIS
def isArray(dataType: TypeInformation[_]): Boolean = dataType match {
case _: ObjectArrayTypeInfo[_, _] |
_: BasicArrayTypeInfo[_, _] |
_: PrimitiveArrayTypeInfo[_] => true
case _ => false
}
def isMap(dataType: TypeInformation[_]): Boolean =
dataType.isInstanceOf[MapTypeInfo[_, _]]
def isComparable(dataType: TypeInformation[_]): Boolean =
classOf[Comparable[_]].isAssignableFrom(dataType.getTypeClass) && !isArray(dataType)
/**
* Types that can be easily converted into a string without ambiguity.
*/
def isSimpleStringRepresentation(dataType: TypeInformation[_]): Boolean =
isNumeric(dataType) || isString(dataType) || isTemporal(dataType) || isBoolean(dataType)
def assertNumericExpr(
dataType: TypeInformation[_],
caller: String)
: ValidationResult = dataType match {
case _: NumericTypeInfo[_] =>
ValidationSuccess
case BIG_DEC_TYPE_INFO =>
ValidationSuccess
case _ =>
ValidationFailure(s"$caller requires numeric types, get $dataType here")
}
def assertIntegerFamilyExpr(
dataType: TypeInformation[_],
caller: String)
: ValidationResult = dataType match {
case _: IntegerTypeInfo[_] =>
ValidationSuccess
case _ =>
ValidationFailure(s"$caller requires integer types but was '$dataType'.")
}
def assertOrderableExpr(dataType: TypeInformation[_], caller: String): ValidationResult = {
if (dataType.isSortKeyType) {
ValidationSuccess
} else {
ValidationFailure(s"$caller requires orderable types, get $dataType here")
}
}
/**
* Checks whether a type implements own hashCode() and equals() methods for storing an instance
* in Flink's state or performing a keyBy operation.
*
* @param name name of the operation
* @param t type information to be validated
*/
def validateEqualsHashCode(name: String, t: TypeInformation[_]): Unit = t match {
// make sure that a POJO class is a valid state type
case pt: PojoTypeInfo[_] =>
// we don't check the types recursively to give a chance of wrapping
// proper hashCode/equals methods around an immutable type
validateEqualsHashCode(name, pt.getClass)
// recursively check composite types
case ct: CompositeType[_] =>
validateEqualsHashCode(name, t.getTypeClass)
// we check recursively for entering Flink types such as tuples and rows
for (i <- 0 until ct.getArity) {
val subtype = ct.getTypeAt(i)
validateEqualsHashCode(name, subtype)
}
// check other type information only based on the type class
case _: TypeInformation[_] =>
validateEqualsHashCode(name, t.getTypeClass)
}
/**
* Checks whether a class implements own hashCode() and equals() methods for storing an instance
* in Flink's state or performing a keyBy operation.
*
* @param name name of the operation
* @param c class to be validated
*/
def validateEqualsHashCode(name: String, c: Class[_]): Unit = {
// skip primitives
if (!c.isPrimitive) {
// check the component type of arrays
if (c.isArray) {
validateEqualsHashCode(name, c.getComponentType)
}
// check type for methods
else {
if (c.getMethod("hashCode").getDeclaringClass eq classOf[Object]) {
throw new ValidationException(
s"Type '${c.getCanonicalName}' cannot be used in a $name operation because it " +
s"does not implement a proper hashCode() method.")
}
if (c.getMethod("equals", classOf[Object]).getDeclaringClass eq classOf[Object]) {
throw new ValidationException(
s"Type '${c.getCanonicalName}' cannot be used in a $name operation because it " +
s"does not implement a proper equals() method.")
}
}
}
}
/**
* Checks if a class is a Java primitive wrapper.
*/
def isPrimitiveWrapper(clazz: Class[_]): Boolean = {
clazz == classOf[java.lang.Boolean] ||
clazz == classOf[java.lang.Byte] ||
clazz == classOf[java.lang.Character] ||
clazz == classOf[java.lang.Short] ||
clazz == classOf[java.lang.Integer] ||
clazz == classOf[java.lang.Long] ||
clazz == classOf[java.lang.Double] ||
clazz == classOf[java.lang.Float]
}
/**
* Checks if one class can be assigned to a variable of another class.
*
* Adopted from o.a.commons.lang.ClassUtils#isAssignable(java.lang.Class[], java.lang.Class[])
* but without null checks.
*/
def isAssignable(classArray: Array[Class[_]], toClassArray: Array[Class[_]]): Boolean = {
if (classArray.length != toClassArray.length) {
return false
}
var i = 0
while (i < classArray.length) {
if (!isAssignable(classArray(i), toClassArray(i))) {
return false
}
i += 1
}
true
}
/**
* Checks if one class can be assigned to a variable of another class.
*
* Adopted from o.a.commons.lang.ClassUtils#isAssignable(java.lang.Class, java.lang.Class) but
* without null checks.
*/
def isAssignable(cls: Class[_], toClass: Class[_]): Boolean = {
if (cls.equals(toClass)) {
return true
}
if (cls.isPrimitive) {
if (!toClass.isPrimitive) {
return false
}
if (java.lang.Integer.TYPE.equals(cls)) {
return java.lang.Long.TYPE.equals(toClass) ||
java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Long.TYPE.equals(cls)) {
return java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Boolean.TYPE.equals(cls)) {
return false
}
if (java.lang.Double.TYPE.equals(cls)) {
return false
}
if (java.lang.Float.TYPE.equals(cls)) {
return java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Character.TYPE.equals(cls)) {
return java.lang.Integer.TYPE.equals(toClass) ||
java.lang.Long.TYPE.equals(toClass) ||
java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Short.TYPE.equals(cls)) {
return java.lang.Integer.TYPE.equals(toClass) ||
java.lang.Long.TYPE.equals(toClass) ||
java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Byte.TYPE.equals(cls)) {
return java.lang.Short.TYPE.equals(toClass) ||
java.lang.Integer.TYPE.equals(toClass) ||
java.lang.Long.TYPE.equals(toClass) ||
java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
// should never get here
return false
}
toClass.isAssignableFrom(cls)
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/typeutils/TypeInfoCheckUtils.scala | Scala | apache-2.0 | 10,071 |
package org.dberg.hubot
import org.dberg.hubot.brain.MapdbBackend
import org.scalatest.{ BeforeAndAfterAll, Suites }
class MasterTestSuite extends Suites(new BrainTestSuite, new HubotTestSuite, new ListenerTestSuite) with BeforeAndAfterAll {
override def beforeAll(): Unit = {
MapdbBackend.deleteAll()
}
override def afterAll(): Unit = {
MapdbBackend.shutdown()
}
}
| denen99/hubot-scala | src/test/scala/org/dberg/hubot/MasterTestSuite.scala | Scala | apache-2.0 | 386 |
import java.io._
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{universe => ru}
class C(x: Int) {
def this(x: String) = this(x.toInt)
}
object Test extends dotty.runtime.LegacyApp {
def test(sym: ClassSymbol): Unit = {
def fullyInitializeSymbol(sym: Symbol): Unit = {
val internal = ru.asInstanceOf[scala.reflect.internal.SymbolTable]
internal.definitions.fullyInitializeSymbol(sym.asInstanceOf[internal.Symbol])
}
def defString(sym: Symbol): String = {
val internal = ru.asInstanceOf[scala.reflect.internal.SymbolTable]
sym.asInstanceOf[internal.Symbol].defString
}
def showCtor(sym: Symbol): String = {
fullyInitializeSymbol(sym)
if (sym == NoSymbol) "NoSymbol"
else s"${defString(sym)} => ${sym.asMethod.isPrimaryConstructor}"
}
sym.info
println(sym.toString)
println(s"primary constructor: ${showCtor(sym.primaryConstructor)}")
val ctors = sym.info.members.filter(_.name == termNames.CONSTRUCTOR).map(sym => showCtor(sym))
ctors.toList.sorted.foreach(println)
}
Macros.foo
println("runtime")
// SI-8367 primaryConstructor for Java-defined classes is unstable, so I'm commenting this out
// test(typeOf[File].typeSymbol.asClass)
test(definitions.ScalaPackageClass)
test(definitions.ListModule.moduleClass.asClass)
test(typeOf[Product1[_]].typeSymbol.asClass)
test(typeOf[UninitializedFieldError].typeSymbol.asClass)
test(typeOf[C].typeSymbol.asClass)
}
| yusuke2255/dotty | tests/disabled/macro/run/t8192/Test_2.scala | Scala | bsd-3-clause | 1,492 |
package com.github.pedrovgs.haveaniceday.smiles
import javax.inject.Inject
import com.github.pedrovgs.haveaniceday.smiles.model.SmilesExtractionResult
import scala.concurrent.Future
class ExtractSmiles @Inject()(smilesGenerator: SmilesGenerator) {
def apply(): Future[SmilesExtractionResult] = smilesGenerator.extractSmiles()
}
| pedrovgs/HaveANiceDay | src/main/scala/com/github/pedrovgs/haveaniceday/smiles/ExtractSmiles.scala | Scala | gpl-3.0 | 336 |
package com.rocketfuel.sdbc.postgresql.jdbc.implementation
import java.time.OffsetTime
import com.rocketfuel.sdbc.base.ToParameter
import org.postgresql.util.PGobject
private[sdbc] class PGTimeTz() extends PGobject() {
setType("timetz")
var offsetTime: Option[OffsetTime] = None
override def getValue: String = {
offsetTime.map(offsetTimeFormatter.format).orNull
}
override def setValue(value: String): Unit = {
this.offsetTime = for {
reallyValue <- Option(value)
} yield {
val parsed = offsetTimeFormatter.parse(reallyValue)
OffsetTime.from(parsed)
}
}
}
private[sdbc] object PGTimeTz extends ToParameter {
def apply(value: String): PGTimeTz = {
val tz = new PGTimeTz()
tz.setValue(value)
tz
}
def apply(value: OffsetTime): PGTimeTz = {
val tz = new PGTimeTz()
tz.offsetTime = Some(value)
tz
}
val toParameter: PartialFunction[Any, Any] = {
case o: OffsetTime => PGTimeTz(o)
}
}
private[sdbc] trait PGTimeTzImplicits {
implicit def OffsetTimeToPGobject(o: OffsetTime): PGobject = {
PGTimeTz(o)
}
}
| wdacom/sdbc | postgresql/src/main/scala/com/rocketfuel/sdbc/postgresql/jdbc/implementation/PGTimeTz.scala | Scala | bsd-3-clause | 1,112 |
package aia.testdriven
import scala.util.Random
import akka.testkit.TestKit
import akka.actor.{ Props, ActorRef, Actor, ActorSystem }
import org.scalatest.{WordSpecLike, MustMatchers}
class SendingActorTest extends TestKit(ActorSystem("testsystem"))
with WordSpecLike
with MustMatchers
with StopSystemAfterAll {
"A Sending Actor" must {
"send a message to another actor when it has finished processing" in {
import SendingActor._
val props = SendingActor.props(testActor)
val sendingActor = system.actorOf(props, "sendingActor")
val size = 1000
val maxInclusive = 100000
def randomEvents() = (0 until size).map{ _ =>
Event(Random.nextInt(maxInclusive))
}.toVector
val unsorted = randomEvents()
val sortEvents = SortEvents(unsorted)
sendingActor ! sortEvents
expectMsgPF() {
case SortedEvents(events) =>
events.size must be(size)
unsorted.sortBy(_.id) must be(events)
}
}
}
}
object SendingActor {
def props(receiver: ActorRef) =
Props(new SendingActor(receiver))
case class Event(id: Long)
case class SortEvents(unsorted: Vector[Event])
case class SortedEvents(sorted: Vector[Event])
}
class SendingActor(receiver: ActorRef) extends Actor {
import SendingActor._
def receive = {
case SortEvents(unsorted) =>
receiver ! SortedEvents(unsorted.sortBy(_.id))
}
}
| RayRoestenburg/akka-in-action | chapter-testdriven/src/test/scala/aia/testdriven/SendingActorTest.scala | Scala | mit | 1,434 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.security
import org.geotools.feature.simple.SimpleFeatureImpl
import org.geotools.filter.identity.FeatureIdImpl
import org.junit.runner.RunWith
import org.locationtech.geomesa.security._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.springframework.security.authentication.TestingAuthenticationToken
import org.springframework.security.core.context.SecurityContextHolder
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class VisibilityFilterFunctionTest extends Specification {
sequential
val testSFT = SimpleFeatureTypes.createType("test", "name:String,*geom:Point:srid=4326")
System.setProperty(AuthorizationsProvider.AUTH_PROVIDER_SYS_PROPERTY, classOf[TestAuthorizationsProvider].getName)
"VisibilityFilter" should {
"work with simple viz" in {
val f = new SimpleFeatureImpl(List.empty[AnyRef], testSFT, new FeatureIdImpl(""))
f.visibility = "ADMIN&USER"
val ctx = SecurityContextHolder.createEmptyContext()
ctx.setAuthentication(new TestingAuthenticationToken(null, null, "ADMIN", "USER"))
SecurityContextHolder.setContext(ctx)
VisibilityFilterFunction.filter.evaluate(f) must beTrue
}
"work with no viz on the feature" in {
val f = new SimpleFeatureImpl(List.empty[AnyRef], testSFT, new FeatureIdImpl(""))
val ctx = SecurityContextHolder.createEmptyContext()
ctx.setAuthentication(new TestingAuthenticationToken(null, null, "ADMIN", "USER"))
SecurityContextHolder.setContext(ctx)
VisibilityFilterFunction.filter.evaluate(f) must beFalse
}
"return false when user does not have the right auths" in {
val f = new SimpleFeatureImpl(List.empty[AnyRef], testSFT, new FeatureIdImpl(""))
f.visibility = "ADMIN&USER"
val ctx = SecurityContextHolder.createEmptyContext()
ctx.setAuthentication(new TestingAuthenticationToken(null, null, "ADMIN"))
SecurityContextHolder.setContext(ctx)
VisibilityFilterFunction.filter.evaluate(f) must beFalse
}
"return true when dealing with expressions" in {
val f = new SimpleFeatureImpl(List.empty[AnyRef], testSFT, new FeatureIdImpl(""))
f.visibility = "ADMIN|USER"
val ctx = SecurityContextHolder.createEmptyContext()
ctx.setAuthentication(new TestingAuthenticationToken(null, null, "USER"))
SecurityContextHolder.setContext(ctx)
VisibilityFilterFunction.filter.evaluate(f) must beTrue
}
}
}
| MutahirKazmi/geomesa | geomesa-accumulo/geomesa-accumulo-security/src/test/scala/org/locationtech/geomesa/accumulo/security/VisibilityFilterFunctionTest.scala | Scala | apache-2.0 | 3,063 |
package fpinscala.ch02gettingstarted
// A comment!
/* Another comment */
/** A documentation comment */
object MyModule {
def abs(n: Int): Int =
if (n < 0) -n
else n
private def formatAbs(x: Int) = {
val msg = "The absolute value of %d is %d"
msg.format(x, abs(x))
}
def main(args: Array[String]): Unit =
println(formatAbs(-42))
// A definition of factorial, using a local, tail recursive function
def factorial(n: Int): Int = {
@annotation.tailrec
def go(n: Int, acc: Int): Int =
if (n <= 0) acc
else go(n-1, n*acc)
go(n, 1)
}
// Another implementation of `factorial`, this time with a `while` loop
def factorial2(n: Int): Int = {
var acc = 1
var i = n
while (i > 0) { acc *= i; i -= 1 }
acc
}
// Exercise 1: Write a function to compute the nth fibonacci number
// Hints taken from <http://peter-braun.org/2012/06/fibonacci-numbers-in-scala/>
// - Using `match` instead of `if`
// - Count down to 0 instead of up to `n`
def fib(n: Int): Int = {
// Int overflow detected by the numbers generator scalacheck test
require(0 <= n && n <= 44, "only available for numbers between [0..44]")
@annotation.tailrec
def go(curr: Int, fib_1: Int, fib_2: Int): Int = curr match {
case 0 => fib_2
case c => go(curr - 1, fib_2 + fib_1, fib_1)
}
go(n, 1, 0)
}
def fib_firstTailrecVersion(n: Int): Int = {
@annotation.tailrec
def go(curr: Int, fib_1: Int, fib_2: Int): Int = {
val fib = if (curr == 0 || curr == 1) curr else fib_1 + fib_2
if (curr < n) go(curr + 1, fib, fib_1)
else fib
}
go(0, 0, 0)
}
def fib_nonTailrecVersion(n: Int): Int = {
// @annotation.tailrec // Error!
def go(curr: Int): Int = {
if (curr == 0 || curr == 1) curr
else go(curr - 1) + go(curr - 2)
}
go(n)
}
// This definition and `formatAbs` are very similar..
private def formatFactorial(n: Int) = {
val msg = "The factorial of %d is %d."
msg.format(n, factorial(n))
}
// We can generalize `formatAbs` and `formatFactorial` to
// accept a _function_ as a parameter
def formatResult(name: String, n: Int, f: Int => Int) = {
val msg = "The %s of %d is %d."
msg.format(name, n, f(n))
}
}
object FormatAbsAndFactorial {
import MyModule._
// Now we can use our general `formatResult` function
// with both `abs` and `factorial`
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
}
}
object TestFib {
import MyModule._
// test implementation of `fib`
def main(args: Array[String]): Unit = {
println("Expected: 0, 1, 1, 2, 3, 5, 8")
println("Actual: %d, %d, %d, %d, %d, %d, %d".format(fib(0), fib(1), fib(2), fib(3), fib(4), fib(5), fib(6)))
}
}
// Functions get passed around so often in FP that it's
// convenient to have syntax for constructing a function
// *without* having to give it a name
object AnonymousFunctions {
import MyModule._
// Some examples of anonymous functions:
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
println(formatResult("increment", 7, (x: Int) => x + 1))
println(formatResult("increment2", 7, (x) => x + 1))
println(formatResult("increment3", 7, x => x + 1))
println(formatResult("increment4", 7, _ + 1))
println(formatResult("increment5", 7, x => { val r = x + 1; r }))
}
}
object MonomorphicBinarySearch {
// First, a binary search implementation, specialized to `Double`,
// another primitive type in Scala, representing 64-bit floating
// point numbers
// Ideally, we could generalize this to work for any `Array` type,
// so long as we have some way of comparing elements of the `Array`
def binarySearch(ds: Array[Double], key: Double): Int = {
@annotation.tailrec
def go(low: Int, mid: Int, high: Int): Int = {
if (low > high) -mid - 1
else {
val mid2 = (low + high) / 2
val d = ds(mid2) // We index into an array using the same
// syntax as function application
if (d == key) mid2
else if (d > key) go(low, mid2, mid2-1)
else go(mid2 + 1, mid2, high)
}
}
go(0, 0, ds.length - 1)
}
}
object PolymorphicFunctions {
// Here's a polymorphic version of `binarySearch`, parameterized on
// a function for testing whether an `A` is greater than another `A`.
def binarySearch[A](as: Array[A], key: A, gt: (A,A) => Boolean): Int = {
@annotation.tailrec
def go(low: Int, mid: Int, high: Int): Int = {
if (low > high) -mid - 1
else {
val mid2 = (low + high) / 2
val a = as(mid2)
val greater = gt(a, key)
if (!greater && !gt(key,a)) mid2
else if (greater) go(low, mid2, mid2-1)
else go(mid2 + 1, mid2, high)
}
}
go(0, 0, as.length - 1)
}
// Exercise 2: Implement a polymorphic function to check whether
// an `Array[A]` is sorted
def isSorted[A](as: Array[A], gt: (A, A) => Boolean): Boolean = {
@annotation.tailrec
def go(list: List[A]): Boolean = list match {
case List() | _ :: Nil => true
case head :: tail => if (gt(tail.head, head)) go(tail) else false
}
go(as.toList)
}
/*
pattern matching with Arrays:
case Array() => ???
case Array(head) => ???
case Array(head, tail @ _*) => ???
*/
def isSorted_firstVersion[A](as: Array[A], gt: (A, A) => Boolean): Boolean = {
@annotation.tailrec
def go(element: A, arr: Array[A]): Boolean = {
if (arr.isEmpty) true
else if (gt(arr.head, element)) go(arr.head, arr.tail)
else false
}
as.isEmpty || go(as.head, as.tail)
}
// Polymorphic functions are often so constrained by their type
// that they only have one implementation! Here's an example:
def partial1[A,B,C](a: A, f: (A,B) => C): B => C =
(b: B) => f(a, b)
// Exercise 3: Implement `curry`.
// Note that `=>` associates to the right, so we could
// write the return type as `A => B => C`
def curry[A,B,C](f: (A, B) => C): A => B => C = (a: A) => (b: B) => f(a, b)
// HF: See testing with ScalaCheck using arbitrary functions, via @dabd
// https://github.com/dabd/fpscala/commit/a1835f#diff-83942b48d6aa9d018ddc7d5cdb8f321fR36
// NB: The `Function2` trait has a `curried` method already
// Exercise 4: Implement `uncurry`
def uncurry[A,B,C](f: A => B => C): (A, B) => C = (a: A, b: B) => f(a)(b)
// HF: See testing with ScalaCheck using arbitrary functions, via @dabd
// https://github.com/dabd/fpscala/commit/a1835f#diff-83942b48d6aa9d018ddc7d5cdb8f321fR40
/*
NB: There is a method on the `Function` object in the standard library,
`Function.uncurried` that you can use for uncurrying.
Note that we can go back and forth between the two forms. We can curry
and uncurry and the two forms are in some sense "the same". In FP jargon,
we say that they are _isomorphic_ ("iso" = same; "morphe" = shape, form),
a term we inherit from category theory.
*/
// Exercise 5: Implement `compose`
def compose[A,B,C](f: B => C, g: A => B): A => C = (a: A) => f(g(a))
// HF: See testing with ScalaCheck using arbitrary functions, via @dabd
// https://github.com/dabd/fpscala/commit/a1835f#diff-83942b48d6aa9d018ddc7d5cdb8f321fR44
}
| hugocf/fpinscala | src/main/scala/fpinscala/ch02gettingstarted/GettingStarted.scala | Scala | mit | 7,476 |
package razie.diesel.samples
import razie.{cout}
import razie.diesel.dom._
import razie.diesel.engine._
import razie.diesel.ext.{EMsg, EVal}
import razie.tconf.{DSpec, TextSpec}
import scala.collection.mutable.ListBuffer
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
/**
* Created by raz on 2017-06-13.
*/
object SimpleFlow {
// some rules - make sure each line starts with $ and ends with \\n
val specs = List(
TextSpec ( "spec1",
"""
$when home.guest_arrived(name) => lights.on
$when home.guest_arrived(name=="Jane") => chimes.welcome(name="Jane")
""".stripMargin
),
TextSpec ( "spec2",
"""
$mock chimes.welcome => (greeting = "Greetings, "+name)
""".stripMargin
)
)
// some trigger message
val story =
TextSpec ( "story1",
"""
$msg home.guest_arrived(name="Jane")
""".stripMargin
)
def main (argv:Array[String]) : Unit = {
// 1. settings
val settings = new DomEngineSettings()
// 2. the current domain (classes, entities, message specs etc)
val dom = RDomain.domFrom(specs.head, specs.tail)
// 2. create the process instance / root node
val root = DomAst("root", AstKinds.ROOT)
// 3. add the entry points / triggers to the process
RDExt.addStoryToAst(root, List(story))
// 4. rules configuration
// 5. start processing
val engine = DieselAppContext.mkEngine(dom, root, settings, story :: specs, "simpleFlow")
// 6. when done...
val future = engine.process
future.map { engine =>
val root = engine.root // may be a different
cout << "DONE ---------------------- "
cout << root.toString
}
// just hang around to let the engine finish
Thread.sleep(5000)
}
}
/**
* Created by raz on 2017-06-13.
*/
object SimplestFlow {
// some rules - make sure each line starts with $ and ends with \\n
val specs = List(
TextSpec ( "spec1",
"""
$when home.guest_arrived(name) => lights.on
$when home.guest_arrived(name=="Jane") => chimes.welcome(name="Jane")
""".stripMargin
),
TextSpec ( "spec2",
"""
$mock chimes.welcome => (greeting = "Greetings, "+name)
""".stripMargin
)
)
// some trigger message
val story =
TextSpec ( "story1",
"""
$msg home.guest_arrived(name="Jane")
""".stripMargin
)
def main (argv:Array[String]) : Unit = {
val engine = DomEngineUtils.mkEngine(new DomEngineSettings(), specs, List(story))
// 6. when done...
val future = engine.process
future.map { engine =>
val root = engine.root // may be a different
cout << "DONE ---------------------- "
cout << root.toString
}
// just hang around to let the engine finish
Thread.sleep(5000)
}
}
object DomEngineUtils {
// todo maybe not have this sync option at all?
def execAndWait (engine:DomEngine) = {
val future = engine.process
Await.result(future, Duration.create(5, "seconds"))
}
// todo maybe not have this sync option at all?
def execTestsAndWait (engine:DomEngine) = {
val future = engine.processTests
Await.result(future, Duration.create(5, "seconds"))
}
// todo reuse DomFiddle.runDom
def mkEngine(settings: DomEngineSettings, specs : List[DSpec], stories:List[DSpec]) : DomEngine = {
// 2. the current domain (classes, entities, message specs etc)
val dom = RDomain.domFrom(specs.head, specs.tail)
// 2. create the process instance / root node
val root = DomAst("root", AstKinds.ROOT)
// 3. add the entry points / triggers to the process
RDExt.addStoryToAst(root, stories)
// 4. rules configuration
// 5. start processing
val engine = DieselAppContext.mkEngine(dom, root, settings, stories ::: specs, "simpleFlow")
engine
}
/** execute message
*
* @param msg "entity.action(p=value,etc)
* @param specs the specs to use for rules
* @param stories any other stories to add (tests, engine settings etc)
* @param settings engine settings
* @return
*/
def runDom(msg:String, specs:List[DSpec], stories: List[DSpec], settings:DomEngineSettings) : Future[Map[String,Any]] = {
// Audit.logdb("DIESEL_RUNDOM")
// to domain
val dom = RDomain.domFrom(specs.head, specs.tail)
// make up a story
var originalMsg = msg
var story = if (msg.trim.startsWith("$msg")) {
originalMsg = msg.trim.replace("$msg ", "").trim
msg
} else "$msg " + msg
val idom =
if(stories.isEmpty) RDomain.empty
else RDomain.domFrom(stories.head, stories.tail).revise addRoot
var res = ""
val root = DomAst("root", AstKinds.ROOT)
RDExt.addStoryToAst(root, stories)
// start processing all elements
val engine = DieselAppContext.mkEngine(dom plus idom, root, settings, stories ::: specs, "simpleFlow")
engine.process.map { engine =>
val errors = new ListBuffer[String]()
// find the spec and check its result
// then find the resulting value.. if not, then json
val oattrs = dom.moreElements.collect {
// case n:EMsg if n.entity == e && n.met == a => n
case n: EMsg if msg.startsWith(n.entity + "." + n.met) => n
}.headOption.toList.flatMap(_.ret)
if (oattrs.isEmpty) {
errors append s"Can't find the spec for $msg"
}
import razie.diesel.ext.stripQuotes
// collect values
val values = root.collect {
case d@DomAst(EVal(p), /*AstKinds.GENERATED*/ _, _, _) if oattrs.isEmpty || oattrs.find(_.name == p.name).isDefined => (p.name, p.dflt)
}
var m = Map(
"value" -> values.headOption.map(_._2).map(stripQuotes).getOrElse(""),
"values" -> values.toMap,
"totalCount" -> (engine.totalTestCount),
"failureCount" -> engine.failedTestCount,
"errors" -> errors.toList,
"root" -> root,
"dieselTrace" -> DieselTrace(root, settings.node, engine.id, "diesel", "runDom", settings.parentNodeId).toJson
)
m
}
}
}
| razie/wikireactor | diesel/src/main/scala/razie/diesel/samples/SimpleFlow.scala | Scala | apache-2.0 | 6,124 |
/*
* Copyright 2014-15 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eventstreams.gauges
import com.codahale.metrics.Histogram
import play.api.libs.json.{JsString, JsValue}
trait GaugeMetricAccounting extends NumericMetricAccounting with WithMetric[Histogram] {
override def createMetric(metricName: String): Histogram = metricRegistry.histogram(metricName)
override def updateValue(v: Double): Unit = {
super.updateValue(v)
m.foreach(_.update(v.toLong))
}
override def toValuesData: Option[JsValue] =
m.map { metric =>
JsString(Seq(
valueForLevels,
metric.getSnapshot.getMean,
metric.getSnapshot.getStdDev,
metric.getSnapshot.get95thPercentile(),
metric.getSnapshot.get99thPercentile()
).map(fmt).mkString(","))
}
}
| intelix/eventstreams | es-gauges/es-gauges-service/src/main/scala/eventstreams/gauges/GaugeMetricAccounting.scala | Scala | apache-2.0 | 1,341 |
package com.datawizards.dmg.examples
import com.datawizards.dmg.examples.TestModel.PersonWithComments
import com.datawizards.dmg.generator.HiveGenerator
import com.datawizards.dmg.{DataModelGenerator, dialects}
object TableColumnCommentsExample extends App {
println(DataModelGenerator.generate[PersonWithComments](dialects.H2Dialect))
println(DataModelGenerator.generate[PersonWithComments](dialects.AvroSchemaDialect))
println(DataModelGenerator.generate[PersonWithComments](new HiveGenerator))
}
| mateuszboryn/data-model-generator | src/main/scala/com/datawizards/dmg/examples/TableColumnCommentsExample.scala | Scala | apache-2.0 | 507 |
package SecureSocialPlugins
import play.api.data.Form
import play.api.mvc.{Request, RequestHeader}
import play.api.templates.{Html, Txt}
import securesocial.controllers.PasswordChange.ChangeInfo
import securesocial.controllers.Registration.RegistrationInfo
import securesocial.controllers.TemplatesPlugin
import securesocial.core.{Identity, SecuredRequest}
class SecureSocialViews(application: play.Application) extends TemplatesPlugin {
override def getSignUpPage[A](implicit request: Request[A], form: Form[RegistrationInfo], token: String): Html = {
views.html.Secure.Registration.signUp(form, token)
}
override def getLoginPage[A](implicit request: Request[A], form: Form[(String, String)],
msg: Option[String] = None): Html =
{
views.html.Secure.login(form, msg)
}
override def getStartSignUpPage[A](implicit request: Request[A], form: Form[String]): Html = {
views.html.Secure.Registration.startSignUp(form)
}
override def getStartResetPasswordPage[A](implicit request: Request[A], form: Form[String]): Html = {
views.html.Secure.Registration.startResetPassword(form)
}
def getResetPasswordPage[A](implicit request: Request[A], form: Form[(String, String)], token: String): Html = {
views.html.Secure.Registration.resetPasswordPage(form, token)
}
def getPasswordChangePage[A](implicit request: SecuredRequest[A], form: Form[ChangeInfo]): Html = {
views.html.Secure.passwordChange(form)
}
/*
def getSignUpEmail(token: String)(implicit request: play.api.mvc.RequestHeader): String = {
views.html.custom.mails.signUpEmail(token).body
}
*/
def getSignUpEmail(token: String)(implicit request: RequestHeader): (Option[Txt], Option[Html]) = {
(None, Some(securesocial.views.html.mails.signUpEmail(token)))
}
/*
def getAlreadyRegisteredEmail(user: SocialUser)(implicit request: play.api.mvc.RequestHeader): String = {
views.html.custom.mails.alreadyRegisteredEmail(user).body
}
*/
def getAlreadyRegisteredEmail(user: Identity)(implicit request: RequestHeader): (Option[Txt], Option[Html]) = {
(None, Some(securesocial.views.html.mails.alreadyRegisteredEmail(user)))
}
/*
def getWelcomeEmail(user: SocialUser)(implicit request: play.api.mvc.RequestHeader): String = {
views.html.custom.mails.welcomeEmail(user).body
}
*/
def getWelcomeEmail(user: Identity)(implicit request: RequestHeader): (Option[Txt], Option[Html]) = {
(None, Some(securesocial.views.html.mails.welcomeEmail(user)))
}
/*
def getUnknownEmailNotice()(implicit request: play.api.mvc.RequestHeader): String = {
views.html.custom.mails.unknownEmailNotice(request).body
}
*/
def getUnknownEmailNotice()(implicit request: RequestHeader): (Option[Txt], Option[Html]) = {
(None, Some(securesocial.views.html.mails.unknownEmailNotice(request)))
}
/*
def getSendPasswordResetEmail(user: SocialUser, token: String)(implicit request: play.api.mvc.RequestHeader): String = {
views.html.custom.mails.passwordResetEmail(user, token).body
}
*/
def getSendPasswordResetEmail(user: Identity, token: String)(implicit request: RequestHeader): (Option[Txt], Option[Html]) = {
(None, Some(securesocial.views.html.mails.passwordResetEmail(user, token)))
}
/*
def getPasswordChangedNoticeEmail(user: SocialUser)(implicit request: play.api.mvc.RequestHeader): String = {
views.html.custom.mails.passwordChangedNotice(user).body
}
*/
def getPasswordChangedNoticeEmail(user: Identity)(implicit request: RequestHeader): (Option[Txt], Option[Html]) = {
(None, Some(securesocial.views.html.mails.passwordChangedNotice(user)))
}
def getNotAuthorizedPage[A](implicit request: Request[A]): Html = {
securesocial.views.html.notAuthorized()
}
} | antigenomics/vdjviz | app/SecureSocialPlugins/SecureSocialViews.scala | Scala | apache-2.0 | 3,938 |
package com.github.agourlay.cornichon.steps.check.checkModel
import com.github.agourlay.cornichon.core.{ NoOpStep, NoValue, Step }
// N equals 6 for now
trait PropertyN[A, B, C, D, E, F] {
val description: String
val preCondition: Step
val invariantN: (() => A, () => B, () => C, () => D, () => E, () => F) => Step
}
case class Property6[A, B, C, D, E, F](
description: String,
preCondition: Step = NoOpStep,
invariant: (() => A, () => B, () => C, () => D, () => E, () => F) => Step) extends PropertyN[A, B, C, D, E, F] {
override val invariantN: (() => A, () => B, () => C, () => D, () => E, () => F) => Step = invariant
}
case class Property5[A, B, C, D, E](
description: String,
preCondition: Step = NoOpStep,
invariant: (() => A, () => B, () => C, () => D, () => E) => Step) extends PropertyN[A, B, C, D, E, NoValue] {
override val invariantN: (() => A, () => B, () => C, () => D, () => E, () => NoValue) => Step =
(a, b, c, d, e, _) => invariant(a, b, c, d, e)
}
case class Property4[A, B, C, D](
description: String,
preCondition: Step = NoOpStep,
invariant: (() => A, () => B, () => C, () => D) => Step) extends PropertyN[A, B, C, D, NoValue, NoValue] {
override val invariantN: (() => A, () => B, () => C, () => D, () => NoValue, () => NoValue) => Step =
(a, b, c, d, _, _) => invariant(a, b, c, d)
}
case class Property3[A, B, C](
description: String,
preCondition: Step = NoOpStep,
invariant: (() => A, () => B, () => C) => Step) extends PropertyN[A, B, C, NoValue, NoValue, NoValue] {
override val invariantN: (() => A, () => B, () => C, () => NoValue, () => NoValue, () => NoValue) => Step =
(a, b, c, _, _, _) => invariant(a, b, c)
}
case class Property2[A, B](
description: String,
preCondition: Step = NoOpStep,
invariant: (() => A, () => B) => Step) extends PropertyN[A, B, NoValue, NoValue, NoValue, NoValue] {
override val invariantN: (() => A, () => B, () => NoValue, () => NoValue, () => NoValue, () => NoValue) => Step =
(a, b, _, _, _, _) => invariant(a, b)
}
case class Property1[A](
description: String,
preCondition: Step = NoOpStep,
invariant: (() => A) => Step) extends PropertyN[A, NoValue, NoValue, NoValue, NoValue, NoValue] {
override val invariantN: (() => A, () => NoValue, () => NoValue, () => NoValue, () => NoValue, () => NoValue) => Step =
(a, _, _, _, _, _) => invariant(a)
}
case class Property0(
description: String,
preCondition: Step = NoOpStep,
invariant: () => Step) extends PropertyN[NoValue, NoValue, NoValue, NoValue, NoValue, NoValue] {
override val invariantN: (() => NoValue, () => NoValue, () => NoValue, () => NoValue, () => NoValue, () => NoValue) => Step =
(_, _, _, _, _, _) => invariant()
}
| agourlay/cornichon | cornichon-core/src/main/scala/com/github/agourlay/cornichon/steps/check/checkModel/PropertyN.scala | Scala | apache-2.0 | 2,784 |
package io.getquill.context.spark.norm
import io.getquill.Spec
import QuestionMarkEscaper._
class QuestionMarkEscaperSpec extends Spec {
"should escape strings with question marks and even ones with slashes already" in {
escape("foo ? bar \\\\? baz \\\\\\\\?") must equal("foo \\\\? bar \\\\\\\\? baz \\\\\\\\\\\\?")
}
"should escape and then unescape going back to original form" in {
val str = "foo ? bar \\\\? baz \\\\\\\\?"
unescape(escape(str)) must equal(str)
}
def plug1(str: String) = pluginValueSafe(str, "<1>")
def plug2(str: String) = pluginValueSafe(plug1(str), "<2>")
def plug3(str: String) = pluginValueSafe(plug2(str), "<3>")
def plug4(str: String) = pluginValueSafe(plug3(str), "<4>")
def plug2Q(str: String) = pluginValueSafe(plug1(str), "<2?>")
def plug3QN(str: String) = pluginValueSafe(plug2Q(str), "<3>")
def plug4Q(str: String) = pluginValueSafe(plug3QN(str), "<4?>")
"should escape and replace variables correctly" in {
val str = "foo ? bar ? ?"
plug1(str) must equal("foo <1> bar ? ?")
plug2(str) must equal("foo <1> bar <2> ?")
plug3(str) must equal("foo <1> bar <2> <3>")
}
"should escape and replace variables correctly with other question marks" in {
val str = "foo ? bar \\\\? ? baz ? \\\\\\\\? ?"
plug1(str) must equal("foo <1> bar \\\\? ? baz ? \\\\\\\\? ?")
plug2(str) must equal("foo <1> bar \\\\? <2> baz ? \\\\\\\\? ?")
plug3(str) must equal("foo <1> bar \\\\? <2> baz <3> \\\\\\\\? ?")
plug4(str) must equal("foo <1> bar \\\\? <2> baz <3> \\\\\\\\? <4>")
unescape(plug4(str)) must equal("foo <1> bar ? <2> baz <3> \\\\? <4>")
}
"should escape and replace variables correctly even if the variables have question marks" in {
val str = "foo ? bar \\\\? ? baz ? \\\\\\\\? ?"
plug1(str) must equal("foo <1> bar \\\\? ? baz ? \\\\\\\\? ?")
plug2Q(str) must equal("foo <1> bar \\\\? <2\\\\?> baz ? \\\\\\\\? ?")
plug3QN(str) must equal("foo <1> bar \\\\? <2\\\\?> baz <3> \\\\\\\\? ?")
plug4Q(str) must equal("foo <1> bar \\\\? <2\\\\?> baz <3> \\\\\\\\? <4\\\\?>")
unescape(plug4Q(str)) must equal("foo <1> bar ? <2?> baz <3> \\\\? <4?>")
}
}
| getquill/quill | quill-spark/src/test/scala/io/getquill/context/spark/norm/QuestionMarkEscaperSpec.scala | Scala | apache-2.0 | 2,096 |
package it.mighe.ssbi
import org.scalatest.{BeforeAndAfter, Matchers, FlatSpec}
class TapeSpec extends FlatSpec with Matchers with BeforeAndAfter {
var tape: Tape = _
before {
tape = new Tape
}
it should "have cells initialized at zero" in {
tape.at(0) should be(0)
tape.at(29999) should be(0)
}
it should "have pointer initialized to zero" in {
tape.pointerPosition should be(0)
}
it should "increment pointer" in {
tape.shiftRight()
tape.pointerPosition should be(1)
}
it should "decrement pointer" in {
tape.shiftRight()
tape.shiftLeft()
tape.pointerPosition should be(0)
}
it should "increment current cell" in {
tape.shiftRight()
tape.increment()
tape.at(1) should be(1)
}
it should "decrement current cell" in {
tape.increment()
tape.decrement()
tape.at(0) should be(0)
}
it should "wrap value" in {
tape.decrement()
tape.current should be(255.toByte)
tape.increment()
tape.current should be(0)
}
it should "assign current value" in {
tape.current = 5
tape.current should be(5)
}
it should "assign with offset" in {
tape.setAt(50, 30)
tape.at(30) should be(50)
}
it should "wrap current value" in {
tape.current = 257
tape.current should be(1)
}
it should "adjust current value without offset" in {
tape.adjustValue(50)
tape.current should be(50)
tape.adjustValue(-10)
tape.current should be(40)
}
it should "adjust current value with offset" in {
tape.adjustPointer(10)
tape.adjustValue(-10, -3)
tape.adjustValue(50, 2)
tape.at(7) should be(-10)
tape.current should be(0)
tape.at(12) should be(50)
}
it should "adjust pointer position" in {
tape.shiftRight()
tape.adjustPointer(27)
tape.pointerPosition should be(28)
tape.adjustPointer(-5)
tape.pointerPosition should be(23)
}
}
| mighe/ssbi | src/test/scala/it/mighe/ssbi/TapeSpec.scala | Scala | mit | 1,934 |
package views.html
import play.twirl.api._
import play.twirl.api.TemplateMagic._
import play.api.templates.PlayMagic._
import models._
import controllers._
import play.api.i18n._
import play.api.mvc._
import play.api.data._
import views.html._
/**/
object download extends BaseScalaTemplate[play.twirl.api.HtmlFormat.Appendable,Format[play.twirl.api.HtmlFormat.Appendable]](play.twirl.api.HtmlFormat) with play.twirl.api.Template1[models.User,play.twirl.api.HtmlFormat.Appendable] {
/**/
def apply/*1.2*/(user: models.User):play.twirl.api.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*1.21*/("""
"""),_display_(/*3.2*/main("Tracking - Home", Some(user))/*3.37*/ {_display_(Seq[Any](format.raw/*3.39*/("""
"""),format.raw/*4.1*/("""<head>
<title>tracking</title>
<style>
body """),format.raw/*7.14*/("""{"""),format.raw/*7.15*/("""
"""),format.raw/*8.9*/("""background: url("""),_display_(/*8.26*/routes/*8.32*/.Assets.at("/images/abstract_swirls.jpg")),format.raw/*8.73*/(""") fixed 50% / cover;
"""),format.raw/*9.9*/("""}"""),format.raw/*9.10*/("""
"""),format.raw/*10.5*/("""</style>
<style type="text/css">
button """),format.raw/*12.16*/("""{"""),format.raw/*12.17*/("""
"""),format.raw/*13.9*/("""font-size: 150%;
background-color: #82BF56;
border-bottom: 5px solid #669644;
border-radius: 5px;
text-shadow: 0px -2px #669644;
font-family: 'Pacifico', cursive;
color: #FFF;
"""),format.raw/*20.9*/("""}"""),format.raw/*20.10*/("""
"""),format.raw/*21.5*/("""</style>
</head>
<body >
<h1 style="font-size:200%"> """),_display_(/*24.30*/user/*24.34*/.fullName.getOrElse("None")),format.raw/*24.61*/(""" """),format.raw/*24.62*/("""</h1>
<h1 style="font-size:300%"> welcome to the tracking system </h1>
<br> </br>
<h1 style="font-size:200%"> your user id is: """),_display_(/*28.48*/user/*28.52*/.userID),format.raw/*28.59*/(""" """),format.raw/*28.60*/("""</h1>
<h1 style="font-size:200%"> (set this user id in the TrackerApp->Settings) </h1>
<br> </br>
<a style="font-size:180%" href="""),_display_(/*32.33*/controllers/*32.44*/.routes.ApplicationController.kmlLink(user.userID)),format.raw/*32.94*/(""" """),format.raw/*32.95*/("""download="ggeNetworkLink.kml"> download the Google Earth file </a>
<br> </br>
<a style="font-size:180%" href='"""),_display_(/*35.34*/routes/*35.40*/.Assets.at("apk/TrackerApp.apk")),format.raw/*35.72*/("""' download> download the Android TrackerApp </a>
<br> </br>
<a href=""""),_display_(/*38.11*/routes/*38.17*/.ApplicationController.googlemap),format.raw/*38.49*/(""""><button>Show me the map</button></a>
</body>
""")))}),format.raw/*41.2*/("""
"""))}
}
def render(user:models.User): play.twirl.api.HtmlFormat.Appendable = apply(user)
def f:((models.User) => play.twirl.api.HtmlFormat.Appendable) = (user) => apply(user)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Wed May 13 17:47:26 JST 2015
SOURCE: /Users/ringo/dev/map-location-server/secure-server/trackersys/app/views/download.scala.html
HASH: f098f2bdd3575c9d7dc01bba5ea4e1a3d44cf766
MATRIX: 513->1|620->20|648->23|691->58|730->60|757->61|844->121|872->122|907->131|950->148|964->154|1025->195|1080->224|1108->225|1140->230|1220->282|1249->283|1285->292|1544->524|1573->525|1605->530|1686->584|1699->588|1747->615|1776->616|1933->746|1946->750|1974->757|2003->758|2161->889|2181->900|2252->950|2281->951|2420->1063|2435->1069|2488->1101|2586->1172|2601->1178|2654->1210|2733->1259
LINES: 19->1|22->1|24->3|24->3|24->3|25->4|28->7|28->7|29->8|29->8|29->8|29->8|30->9|30->9|31->10|33->12|33->12|34->13|41->20|41->20|42->21|45->24|45->24|45->24|45->24|49->28|49->28|49->28|49->28|53->32|53->32|53->32|53->32|56->35|56->35|56->35|59->38|59->38|59->38|62->41
-- GENERATED --
*/
| workingDog/trackersys | target/scala-2.11/twirl/main/views/html/download.template.scala | Scala | apache-2.0 | 4,009 |
/*
* Copyright 2017-2018 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package freestyle.tagless
package effects
import cats.mtl.FunctorTell
object writer {
final class AccumulatorProvider[W] {
@tagless(true) sealed abstract class WriterM {
def writer[A](aw: (W, A)): FS[A]
def tell(w: W): FS[Unit]
}
trait Implicits {
implicit def freestyleWriterMHandler[M[_]](
implicit FT: FunctorTell[M, W]): WriterM.Handler[M] =
new WriterM.Handler[M] {
def writer[A](aw: (W, A)): M[A] = FT.tuple(aw)
def tell(w: W): M[Unit] = FT.tell(w)
}
}
object implicits extends Implicits
}
def apply[W] = new AccumulatorProvider[W]
}
| frees-io/freestyle | modules/effects/shared/src/main/scala/tagless/effects/writer.scala | Scala | apache-2.0 | 1,277 |
package com.ctask.http.server
import akka.actor.{Actor, ActorRef, Props}
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpMethods, HttpRequest, HttpResponse}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import com.ctask.data.TaskList
import com.ctask.messages.{Command, GetTaskList}
import com.ctask.protocol.Envelope.RootEnvelopeProto
import com.ctask.protocol.data.Data.TaskListProto
import com.ctask.protocol.response.Response.ResponseProto
import com.ctask.http.util.ProtoConverters.responseUnmarshaller
import com.ctask.messages.ServerResponses.SingleList
import com.ctask.protocol.util.RequestEnvelopeUtils
import org.scalatest.{FlatSpec, Matchers}
class RoutesSpec
extends FlatSpec
with Routes
with Matchers
with ScalatestRouteTest {
val serviceActor: ActorRef = system.actorOf(TestActor.props(GetTaskList("TODOs", withCompleted = true)))
"The route" should "unmarshal" in {
val commandEnv = RequestEnvelopeUtils.createGetTaskListCommandEnvelope("TODOs", true)
val getTaskListRequest = HttpRequest(
HttpMethods.POST,
uri = "/command",
entity = HttpEntity(ContentTypes.`application/octet-stream`, commandEnv.toByteArray))
getTaskListRequest ~> taskListsRoute ~> check {
handled shouldBe true
val envelope = responseAs[RootEnvelopeProto]
val response = ResponseProto.parseFrom(envelope.getPayload)
TaskListProto.parseFrom(response.getPayload).getName == "TODOs"
}
}
}
object TestActor {
def props(expectedCommand: Command): Props = Props(new TestActor(expectedCommand))
}
class TestActor(val expectedCommand: Command) extends Actor with Matchers {
def receive: PartialFunction[Any, Unit] = {
case actual if actual == expectedCommand => sender() ! new SingleList(new TaskList("TODOs", Array.empty, None.orNull))
case unexpected => sender() ! new RuntimeException("error")
}
}
| modsrm/ctask | http/src/test/scala/com/ctask/http/server/RoutesSpec.scala | Scala | gpl-3.0 | 1,901 |
/*
* A real-time collaborative tool to develop files over the network.
* Copyright (C) 2010 Mauro Ciancio and Leandro Gilioli
* {maurociancio,legilioli} at gmail dot com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ar.noxit.paralleleditor.common
import operation._
class BasicXFormStrategy extends XFormStrategy {
override def xform(ops: (EditOperation, EditOperation)) = {
if (ops == null)
throw new IllegalArgumentException("ops cannot be null")
ops match {
case (c: AddTextOperation, s: AddTextOperation) =>
xform(c, s)
case (c: DeleteTextOperation, s: DeleteTextOperation) =>
xform(c, s)
case (c: AddTextOperation, s: DeleteTextOperation) =>
xform(c, s)
case (c: DeleteTextOperation, s: AddTextOperation) =>
xform(s, c).swap
case (c: EditOperation, s: EditOperation) if c.isInstanceOf[NullOperation] || s.isInstanceOf[NullOperation] =>
(c, s)
}
}
/**
* Caso agregar-agregar
*/
protected def xform(c: AddTextOperation, s: AddTextOperation): (EditOperation, EditOperation) =
(simpleXForm(c, s), simpleXForm(s, c))
/**
* ImplementaciΓ³n segΓΊn paper
* Achieving Convergence with Operational
* Transformation in Distributed Groupware Systems
*/
protected def simpleXForm(c: AddTextOperation, s: AddTextOperation) = {
if (c.text.size != 1 || s.text.size != 1) throw new UnsupportedEditOperationException("add size must be 1")
val p1 = c.startPos
val p2 = s.startPos
val c1 = c.text
val c2 = s.text
val w1 = c.pword
val alfa1 = pw(c)
val alfa2 = pw(s)
if (menor(alfa1, alfa2) || (igual(alfa1, alfa2) && c1 < c2))
c
else if (mayor(alfa1, alfa2) || (igual(alfa1, alfa2) && c1 > c2))
new AddTextOperation(c1, p1 + c2.length, p1 :: w1)
else
c
}
/**
* Caso borrar-borrar
* para operaciones de borrado de 1 caracter
*/
protected def xform(c: DeleteTextOperation, s: DeleteTextOperation): (EditOperation, EditOperation) = {
if (s.size != 1 || c.size != 1) throw new UnsupportedEditOperationException("Delete size must be 1")
val p1 = c.startPos
val p2 = s.startPos
if (p1 < p2)
(c, new DeleteTextOperation(p2 - 1, s.size))
else if (p1 > p2)
(new DeleteTextOperation(p1 - 1, c.size), s)
else
(new NullOperation, new NullOperation)
}
/**
* Caso agregar-borrar
* la implementaciΓ³n contempla solo operaciones de borrado
* de un caracter
*/
protected def xform(c: AddTextOperation, s: DeleteTextOperation): (EditOperation, EditOperation) = {
if (c.text.size != 1) throw new UnsupportedEditOperationException("add size must be 1")
if (s.size != 1) throw new UnsupportedEditOperationException("Delete size must be 1")
val p1 = c.startPos
val p2 = s.startPos
val pw = c.pword
if (p1 > p2)
(new AddTextOperation(c.text, p1 - 1, p1 :: pw), s)
else if (p1 < p2)
(c, new DeleteTextOperation(p2 + c.text.length, s.size))
else
(new AddTextOperation(c.text, p1, p1 :: pw), new DeleteTextOperation(p2 + c.text.length, s.size))
}
/**
* pΓΊblico para testing
*/
def pw(op: EditOperation) = {
op match {
case at: AddTextOperation => {
// primer caso si w == vacio, con w = pword
val p = at.startPos
val w = at.pword
if (w.isEmpty)
List(p)
else if (!w.isEmpty && (p == current(w) || (p - current(w)).abs == 1))
p :: w
else
List()
}
case dt: DeleteTextOperation => {
val p = dt.startPos
List(p)
}
case o: NullOperation => List()
}
}
protected def current(pword: List[Int]) = pword.head
private def getRangeFor(o: DeleteTextOperation) = o.startPos to (o.startPos + o.size)
def menor(a: List[Int], b: List[Int]) = comparar(a, b, {(v1, v2) => v1 < v2})
def mayor(a: List[Int], b: List[Int]) = comparar(a, b, {(v1, v2) => v1 > v2})
def igual(a: List[Int], b: List[Int]) = comparar(a, b, {(v1, v2) => v1 == v2})
private def comparar(a: List[Int], b: List[Int], comp: (Int, Int) => Boolean) = {
val tuples = a zip b
val result = tuples.dropWhile {t => t._1 == t._2}
if (result isEmpty)
// aca una es mas larga q la otra
comp(a.size, b.size)
else {
val head = result.head
comp(head._1, head._2)
}
}
}
| maurociancio/parallel-editor | src/parallel-editor-common/src/main/scala/ar/noxit/paralleleditor/common/BasicXFormStrategy.scala | Scala | gpl-3.0 | 5,547 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.std
import slamdata.Predef._
import quasar.common.data.Data
import qdata.time.DateTimeInterval
import scalaz._
class DateSpecs extends quasar.Qspec {
import DateLib._
"parseInterval" should {
def fromMillis(millis: Long) = \\/-(Data.Interval(DateTimeInterval.ofMillis(millis)))
def hms(hours: Int, minutes: Int, seconds: Int, millis: Int) =
fromMillis((((hours.toLong*60) + minutes)*60 + seconds)*1000 + millis)
"parse millis" in {
parseInterval("PT0.001S") must_=== fromMillis(1)
}
"parse negative parts" in {
parseInterval("PT-1H-1M-1S") must_=== hms(-1, -1, -1, 0)
}
"parse fractional parts" in {
// The spec says "the smallest value may have a decimal fraction"
parseInterval("PT1.5H") must_=== hms(1, 30, 0, 0)
parseInterval("PT5H1.5M") must_=== hms(5, 1, 30, 0)
}.pendingUntilFixed("SD-720")
"parse days" in {
parseInterval("P1D") must_=== \\/-(Data.Interval(DateTimeInterval.ofDays(1)))
}
"parse ymd" in {
parseInterval("P1Y1M1D") must_=== \\/-(Data.Interval(DateTimeInterval.make(1, 1, 1, 0, 0)))
}
}
}
| quasar-analytics/quasar | frontend/src/test/scala/quasar/std/date.scala | Scala | apache-2.0 | 1,731 |
package org.hammerlab.guacamole.filters.somatic
import org.apache.spark.rdd.RDD
import org.hammerlab.guacamole.variants.CalledSomaticAllele
object SomaticLogOddsFilter {
def hasMinimumLOD(somaticGenotype: CalledSomaticAllele,
minLogOdds: Int): Boolean = {
somaticGenotype.somaticLogOdds > minLogOdds
}
/**
*
* @param genotypes RDD of genotypes to filter
* @param minLogOdds minimum log odd difference between tumor and normal genotypes
* @param debug if true, compute the count of genotypes after filtering
* @return Genotypes with tumor genotype log odds >= minLOD
*/
def apply(genotypes: RDD[CalledSomaticAllele],
minLogOdds: Int,
debug: Boolean = false): RDD[CalledSomaticAllele] = {
val filteredGenotypes = genotypes.filter(hasMinimumLOD(_, minLogOdds))
if (debug) SomaticGenotypeFilter.printFilterProgress(filteredGenotypes)
filteredGenotypes
}
}
| hammerlab/guacamole | src/main/scala/org/hammerlab/guacamole/filters/somatic/SomaticLogOddsFilter.scala | Scala | apache-2.0 | 946 |
package deaktator.pops.msgs
import java.io.{IOException, InputStream}
import com.google.protobuf.Descriptors.Descriptor
import com.google.protobuf._
import scala.annotation.implicitNotFound
import scala.language.experimental.macros
import scala.language.implicitConversions
/**
* A type class for static methods present in `com.google.protobuf.GeneratedMessage`.
* @author deaktator
*/
@implicitNotFound(msg = "Cannot find ProtoOps type class for ${A}.")
trait ProtoOps[A <: GeneratedMessage] {
def getDefaultInstance(): A
def getDescriptor(): Descriptor
@throws(classOf[InvalidProtocolBufferException])
def parseFrom(data: ByteString): A
@throws(classOf[InvalidProtocolBufferException])
def parseFrom(data: ByteString, extensionRegistry: ExtensionRegistryLite): A
@throws(classOf[InvalidProtocolBufferException])
def parseFrom(data: Array[Byte]): A
@throws(classOf[InvalidProtocolBufferException])
def parseFrom(data: Array[Byte], extensionRegistry: ExtensionRegistryLite): A
@throws(classOf[IOException])
def parseFrom(input: InputStream): A
@throws(classOf[IOException])
def parseFrom(input: InputStream, extensionRegistry: ExtensionRegistryLite): A
@throws(classOf[IOException])
def parseDelimitedFrom(input: InputStream): A
@throws(classOf[IOException])
def parseDelimitedFrom(input: InputStream, extensionRegistry: ExtensionRegistryLite): A
@throws(classOf[IOException])
def parseFrom(input: CodedInputStream): A
@throws(classOf[IOException])
def parseFrom(input: CodedInputStream, extensionRegistry: ExtensionRegistryLite): A
}
/**
* Provides factory methods and implicit materializer macros to Get class instances.
* @author deaktator
*/
object ProtoOps {
/**
* Materialize a `ProtoOps[A]` using macros. This is the one of the two prefered ways to
* get one an instance of [[ProtoOps]]. The other is using [[deaktator.pops.Proto.apply]].
* @tparam A type of the `GeneratedMessage` for which a type class should be materialized.
* @return
*/
implicit def apply[A <: GeneratedMessage]: ProtoOps[A] = macro ProtoOpsMacros.materialize[A]
/**
* This really only should be used in cases where compile-time type knowledge is not available.
* @param c Class[A]
* @tparam A
* @return
*/
def runtime[A <: GeneratedMessage](c: Class[A]): ProtoOps[A] = RuntimeProtoOps(c)
}
| deaktator/pops | pops-2.4.1/src/main/scala/deaktator/pops/msgs/ProtoOps.scala | Scala | mit | 2,395 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.rdd
import scala.collection.mutable
import scala.reflect.ClassTag
import org.apache.spark.{Partition, TaskContext}
import org.apache.spark.rdd.RDD
private[mllib]
class SlidingRDDPartition[T](val idx: Int, val prev: Partition, val tail: Seq[T], val offset: Int)
extends Partition with Serializable {
override val index: Int = idx
}
/**
* Represents an RDD from grouping items of its parent RDD in fixed size blocks by passing a sliding
* window over them. The ordering is first based on the partition index and then the ordering of
* items within each partition. This is similar to sliding in Scala collections, except that it
* becomes an empty RDD if the window size is greater than the total number of items. It needs to
* trigger a Spark job if the parent RDD has more than one partitions. To make this operation
* efficient, the number of items per partition should be larger than the window size and the
* window size should be small, e.g., 2.
*
* @param parent the parent RDD
* @param windowSize the window size, must be greater than 1
* @param step step size for windows
*
* @see `org.apache.spark.mllib.rdd.RDDFunctions.sliding(Int, Int)*`
* @see `scala.collection.IterableLike.sliding(Int, Int)*`
*/
private[mllib]
class SlidingRDD[T: ClassTag](@transient val parent: RDD[T], val windowSize: Int, val step: Int)
extends RDD[Array[T]](parent) {
require(windowSize > 0 && step > 0 && !(windowSize == 1 && step == 1),
"Window size and step must be greater than 0, " +
s"and they cannot be both 1, but got windowSize = $windowSize and step = $step.")
override def compute(split: Partition, context: TaskContext): Iterator[Array[T]] = {
val part = split.asInstanceOf[SlidingRDDPartition[T]]
(firstParent[T].iterator(part.prev, context) ++ part.tail)
.drop(part.offset)
.sliding(windowSize, step)
.withPartial(false)
.map(_.toArray)
}
override def getPreferredLocations(split: Partition): Seq[String] =
firstParent[T].preferredLocations(split.asInstanceOf[SlidingRDDPartition[T]].prev)
override def getPartitions: Array[Partition] = {
val parentPartitions = parent.partitions
val n = parentPartitions.length
if (n == 0) {
Array.empty
} else if (n == 1) {
Array(new SlidingRDDPartition[T](0, parentPartitions(0), Seq.empty, 0))
} else {
val w1 = windowSize - 1
// Get partition sizes and first w1 elements.
val (sizes, heads) = parent.mapPartitions { iter =>
val w1Array = iter.take(w1).toArray
Iterator.single((w1Array.length + iter.length, w1Array))
}.collect().unzip
val partitions = mutable.ArrayBuffer.empty[SlidingRDDPartition[T]]
var i = 0
var cumSize = 0
var partitionIndex = 0
while (i < n) {
val mod = cumSize % step
val offset = if (mod == 0) 0 else step - mod
val size = sizes(i)
if (offset < size) {
val tail = mutable.ListBuffer.empty[T]
// Keep appending to the current tail until it has w1 elements.
var j = i + 1
while (j < n && tail.length < w1) {
tail ++= heads(j).take(w1 - tail.length)
j += 1
}
if (sizes(i) + tail.length >= offset + windowSize) {
partitions +=
new SlidingRDDPartition[T](partitionIndex, parentPartitions(i), tail, offset)
partitionIndex += 1
}
}
cumSize += size
i += 1
}
partitions.toArray
}
}
// TODO: Override methods such as aggregate, which only requires one Spark job.
}
| wangyixiaohuihui/spark2-annotation | mllib/src/main/scala/org/apache/spark/mllib/rdd/SlidingRDD.scala | Scala | apache-2.0 | 4,558 |
/**
* Copyright 2015 Gianluca Amato <gamato@unich.it>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.fixpoint
/**
* This is the trait for a generic equation system.
*/
trait EquationSystem {
/**
* The type of the unknowns for this equation system.
*/
type Unknown
/**
* The type of values for this equation system.
*/
type Value
/**
* An assignment of values to unknowns, which is a candidate solution.
*/
type Assignment = Unknown => Value
/**
* A way to combine the value in the previous iteration with the value in the
* new iteration.
*/
type Box = (Value, Value) => Value
/**
* The actual equation system.
*/
def apply(rho: Assignment): Assignment
}
| francescaScozzari/Jandom | core/src/main/scala/it/unich/jandom/fixpoint/EquationSystem.scala | Scala | lgpl-3.0 | 1,403 |
package scala.collection
import scala.language.higherKinds
import scala.collection.mutable.Builder
import scala.annotation.implicitNotFound
import scala.reflect.ClassTag
/** Builds a collection of type `C` from elements of type `A` when a source collection of type `From` is available.
* Implicit instances of `BuildFrom` are available for all collection types.
*
* @tparam From Type of source collection
* @tparam A Type of elements (e.g. `Int`, `Boolean`, etc.)
* @tparam C Type of collection (e.g. `List[Int]`, `TreeMap[Int, String]`, etc.)
*/
@implicitNotFound(msg = "Cannot construct a collection of type ${C} with elements of type ${A} based on a collection of type ${From}.")
trait BuildFrom[-From, -A, +C] extends Any {
def fromSpecificIterable(from: From)(it: Iterable[A]): C
/** Get a Builder for the collection. For non-strict collection types this will use an intermediate buffer.
* Building collections with `fromSpecificIterable` is preferred because it can be lazy for lazy collections. */
def newBuilder(from: From): Builder[A, C]
@deprecated("Use newBuilder() instead of apply()", "2.13.0")
@`inline` def apply(from: From): Builder[A, C] = newBuilder(from)
}
object BuildFrom extends BuildFromLowPriority1 {
/** Build the source collection type from a MapOps */
implicit def buildFromMapOps[CC[X, Y] <: Map[X, Y] with MapOps[X, Y, CC, _], K0, V0, K, V]: BuildFrom[CC[K0, V0], (K, V), CC[K, V]] = new BuildFrom[CC[K0, V0], (K, V), CC[K, V]] {
//TODO: Reuse a prototype instance
def newBuilder(from: CC[K0, V0]): Builder[(K, V), CC[K, V]] = from.mapFactory.newBuilder[K, V]()
def fromSpecificIterable(from: CC[K0, V0])(it: Iterable[(K, V)]): CC[K, V] = from.mapFactory.from(it)
}
/** Build the source collection type from a SortedMapOps */
implicit def buildFromSortedMapOps[CC[X, Y] <: SortedMap[X, Y] with SortedMapOps[X, Y, CC, _], K0, V0, K : Ordering, V]: BuildFrom[CC[K0, V0], (K, V), CC[K, V]] = new BuildFrom[CC[K0, V0], (K, V), CC[K, V]] {
def newBuilder(from: CC[K0, V0]): Builder[(K, V), CC[K, V]] = from.sortedMapFactory.newBuilder[K, V]()
def fromSpecificIterable(from: CC[K0, V0])(it: Iterable[(K, V)]): CC[K, V] = from.sortedMapFactory.from(it)
}
implicit def buildFromBitSet[C <: BitSet with BitSetOps[C]]: BuildFrom[C, Int, C] =
new BuildFrom[C, Int, C] {
def fromSpecificIterable(from: C)(it: Iterable[Int]): C = from.bitSetFactory.fromSpecific(it)
def newBuilder(from: C): Builder[Int, C] = from.bitSetFactory.newBuilder()
}
implicit val buildFromString: BuildFrom[String, Char, String] =
new BuildFrom[String, Char, String] {
def fromSpecificIterable(from: String)(it: Iterable[Char]): String = Factory.stringFactory.fromSpecific(it)
def newBuilder(from: String): Builder[Char, String] = Factory.stringFactory.newBuilder()
}
implicit def buildFromArray[A : ClassTag]: BuildFrom[Array[_], A, Array[A]] =
new BuildFrom[Array[_], A, Array[A]] {
def fromSpecificIterable(from: Array[_])(it: Iterable[A]): Array[A] = Factory.arrayFactory[A].fromSpecific(it)
def newBuilder(from: Array[_]): Builder[A, Array[A]] = Factory.arrayFactory[A].newBuilder()
}
implicit def buildFromView[A, B]: BuildFrom[View[A], B, View[B]] =
new BuildFrom[View[A], B, View[B]] {
def fromSpecificIterable(from: View[A])(it: Iterable[B]): View[B] = View.from(it)
def newBuilder(from: View[A]): Builder[B, View[B]] = View.newBuilder()
}
}
trait BuildFromLowPriority1 extends BuildFromLowPriority2 {
/** Build the source collection type from an Iterable with SortedOps */
implicit def buildFromSortedSetOps[CC[X] <: SortedSet[X] with SortedSetOps[X, CC, _], A0, A : Ordering]: BuildFrom[CC[A0], A, CC[A]] = new BuildFrom[CC[A0], A, CC[A]] {
def newBuilder(from: CC[A0]): Builder[A, CC[A]] = from.sortedIterableFactory.newBuilder[A]()
def fromSpecificIterable(from: CC[A0])(it: Iterable[A]): CC[A] = from.sortedIterableFactory.from(it)
}
implicit def fallbackStringCanBuildFrom[A]: BuildFrom[String, A, immutable.IndexedSeq[A]] =
new BuildFrom[String, A, immutable.IndexedSeq[A]] {
def fromSpecificIterable(from: String)(it: Iterable[A]): immutable.IndexedSeq[A] = immutable.IndexedSeq.from(it)
def newBuilder(from: String): Builder[A, immutable.IndexedSeq[A]] = immutable.IndexedSeq.newBuilder[A]
}
}
trait BuildFromLowPriority2 {
/** Build the source collection type from an IterableOps */
implicit def buildFromIterableOps[CC[X] <: Iterable[X] with IterableOps[X, CC, _], A0, A]: BuildFrom[CC[A0], A, CC[A]] = new BuildFrom[CC[A0], A, CC[A]] {
//TODO: Reuse a prototype instance
def newBuilder(from: CC[A0]): Builder[A, CC[A]] = from.iterableFactory.newBuilder[A]()
def fromSpecificIterable(from: CC[A0])(it: Iterable[A]): CC[A] = from.iterableFactory.from(it)
}
implicit def buildFromIterator[A]: BuildFrom[Iterator[_], A, Iterator[A]] = new BuildFrom[Iterator[_], A, Iterator[A]] {
def newBuilder(from: Iterator[_]): mutable.Builder[A, Iterator[A]] = Iterator.newBuilder()
def fromSpecificIterable(from: Iterator[_])(it: Iterable[A]): Iterator[A] = Iterator.from(it)
}
}
| rorygraves/perf_tester | corpus/scala-library/src/main/scala/collection/BuildFrom.scala | Scala | apache-2.0 | 5,207 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.File
import java.math.MathContext
import java.sql.Timestamp
import java.util.concurrent.atomic.AtomicBoolean
import org.apache.spark.{AccumulatorSuite, SparkException}
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
import org.apache.spark.sql.catalyst.util.StringUtils
import org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, CartesianProductExec, SortMergeJoinExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{SharedSQLContext, TestSQLContext}
import org.apache.spark.sql.test.SQLTestData._
import org.apache.spark.sql.types._
class SQLQuerySuite extends QueryTest with SharedSQLContext {
import testImplicits._
setupTestData()
test("SPARK-8010: promote numeric to string") {
val df = Seq((1, 1)).toDF("key", "value")
df.createOrReplaceTempView("src")
val queryCaseWhen = sql("select case when true then 1.0 else '1' end from src ")
val queryCoalesce = sql("select coalesce(null, 1, '1') from src ")
checkAnswer(queryCaseWhen, Row("1.0") :: Nil)
checkAnswer(queryCoalesce, Row("1") :: Nil)
}
test("show functions") {
def getFunctions(pattern: String): Seq[Row] = {
StringUtils.filterPattern(
spark.sessionState.catalog.listFunctions("default").map(_._1.funcName), pattern)
.map(Row(_))
}
def createFunction(names: Seq[String]): Unit = {
names.foreach { name =>
spark.udf.register(name, (arg1: Int, arg2: String) => arg2 + arg1)
}
}
def dropFunction(names: Seq[String]): Unit = {
names.foreach { name =>
spark.sessionState.catalog.dropTempFunction(name, false)
}
}
val functions = Array("ilog", "logi", "logii", "logiii", "crc32i", "cubei", "cume_disti",
"isize", "ispace", "to_datei", "date_addi", "current_datei")
createFunction(functions)
checkAnswer(sql("SHOW functions"), getFunctions("*"))
assert(sql("SHOW functions").collect().size > 200)
Seq("^c*", "*e$", "log*", "*date*").foreach { pattern =>
// For the pattern part, only '*' and '|' are allowed as wildcards.
// For '*', we need to replace it to '.*'.
checkAnswer(sql(s"SHOW FUNCTIONS '$pattern'"), getFunctions(pattern))
}
dropFunction(functions)
}
test("describe functions") {
checkKeywordsExist(sql("describe function extended upper"),
"Function: upper",
"Class: org.apache.spark.sql.catalyst.expressions.Upper",
"Usage: upper(str) - Returns `str` with all characters changed to uppercase",
"Extended Usage:",
"Examples:",
"> SELECT upper('SparkSql');",
"SPARKSQL")
checkKeywordsExist(sql("describe functioN Upper"),
"Function: upper",
"Class: org.apache.spark.sql.catalyst.expressions.Upper",
"Usage: upper(str) - Returns `str` with all characters changed to uppercase")
checkKeywordsNotExist(sql("describe functioN Upper"), "Extended Usage")
checkKeywordsExist(sql("describe functioN abcadf"), "Function: abcadf not found.")
}
test("SPARK-14415: All functions should have own descriptions") {
for (f <- spark.sessionState.functionRegistry.listFunction()) {
if (!Seq("cube", "grouping", "grouping_id", "rollup", "window").contains(f)) {
checkKeywordsNotExist(sql(s"describe function `$f`"), "N/A.")
}
}
}
test("SPARK-6743: no columns from cache") {
Seq(
(83, 0, 38),
(26, 0, 79),
(43, 81, 24)
).toDF("a", "b", "c").createOrReplaceTempView("cachedData")
spark.catalog.cacheTable("cachedData")
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
checkAnswer(
sql("SELECT t1.b FROM cachedData, cachedData t1 GROUP BY t1.b"),
Row(0) :: Row(81) :: Nil)
}
}
test("self join with aliases") {
Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", "str").createOrReplaceTempView("df")
checkAnswer(
sql(
"""
|SELECT x.str, COUNT(*)
|FROM df x JOIN df y ON x.str = y.str
|GROUP BY x.str
""".stripMargin),
Row("1", 1) :: Row("2", 1) :: Row("3", 1) :: Nil)
}
test("support table.star") {
checkAnswer(
sql(
"""
|SELECT r.*
|FROM testData l join testData2 r on (l.key = r.a)
""".stripMargin),
Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 2) :: Nil)
}
test("self join with alias in agg") {
Seq(1, 2, 3)
.map(i => (i, i.toString))
.toDF("int", "str")
.groupBy("str")
.agg($"str", count("str").as("strCount"))
.createOrReplaceTempView("df")
checkAnswer(
sql(
"""
|SELECT x.str, SUM(x.strCount)
|FROM df x JOIN df y ON x.str = y.str
|GROUP BY x.str
""".stripMargin),
Row("1", 1) :: Row("2", 1) :: Row("3", 1) :: Nil)
}
test("SPARK-8668 expr function") {
checkAnswer(Seq((1, "Bobby G."))
.toDF("id", "name")
.select(expr("length(name)"), expr("abs(id)")), Row(8, 1))
checkAnswer(Seq((1, "building burrito tunnels"), (1, "major projects"))
.toDF("id", "saying")
.groupBy(expr("length(saying)"))
.count(), Row(24, 1) :: Row(14, 1) :: Nil)
}
test("SPARK-4625 support SORT BY in SimpleSQLParser & DSL") {
checkAnswer(
sql("SELECT a FROM testData2 SORT BY a"),
Seq(1, 1, 2, 2, 3, 3).map(Row(_))
)
}
test("SPARK-7158 collect and take return different results") {
import java.util.UUID
val df = Seq(Tuple1(1), Tuple1(2), Tuple1(3)).toDF("index")
// we except the id is materialized once
val idUDF = org.apache.spark.sql.functions.udf(() => UUID.randomUUID().toString)
val dfWithId = df.withColumn("id", idUDF())
// Make a new DataFrame (actually the same reference to the old one)
val cached = dfWithId.cache()
// Trigger the cache
val d0 = dfWithId.collect()
val d1 = cached.collect()
val d2 = cached.collect()
// Since the ID is only materialized once, then all of the records
// should come from the cache, not by re-computing. Otherwise, the ID
// will be different
assert(d0.map(_(0)) === d2.map(_(0)))
assert(d0.map(_(1)) === d2.map(_(1)))
assert(d1.map(_(0)) === d2.map(_(0)))
assert(d1.map(_(1)) === d2.map(_(1)))
}
test("grouping on nested fields") {
spark.read
.json(Seq("""{"nested": {"attribute": 1}, "value": 2}""").toDS())
.createOrReplaceTempView("rows")
checkAnswer(
sql(
"""
|select attribute, sum(cnt)
|from (
| select nested.attribute, count(*) as cnt
| from rows
| group by nested.attribute) a
|group by attribute
""".stripMargin),
Row(1, 1) :: Nil)
}
test("SPARK-6201 IN type conversion") {
spark.read
.json(Seq("{\"a\": \"1\"}}", "{\"a\": \"2\"}}", "{\"a\": \"3\"}}").toDS())
.createOrReplaceTempView("d")
checkAnswer(
sql("select * from d where d.a in (1,2)"),
Seq(Row("1"), Row("2")))
}
test("SPARK-11226 Skip empty line in json file") {
spark.read
.json(Seq("{\"a\": \"1\"}}", "{\"a\": \"2\"}}", "{\"a\": \"3\"}}", "").toDS())
.createOrReplaceTempView("d")
checkAnswer(
sql("select count(1) from d"),
Seq(Row(3)))
}
test("SPARK-8828 sum should return null if all input values are null") {
checkAnswer(
sql("select sum(a), avg(a) from allNulls"),
Seq(Row(null, null))
)
}
private def testCodeGen(sqlText: String, expectedResults: Seq[Row]): Unit = {
val df = sql(sqlText)
// First, check if we have GeneratedAggregate.
val hasGeneratedAgg = df.queryExecution.sparkPlan
.collect { case _: aggregate.HashAggregateExec => true }
.nonEmpty
if (!hasGeneratedAgg) {
fail(
s"""
|Codegen is enabled, but query $sqlText does not have HashAggregate in the plan.
|${df.queryExecution.simpleString}
""".stripMargin)
}
// Then, check results.
checkAnswer(df, expectedResults)
}
test("aggregation with codegen") {
// Prepare a table that we can group some rows.
spark.table("testData")
.union(spark.table("testData"))
.union(spark.table("testData"))
.createOrReplaceTempView("testData3x")
try {
// Just to group rows.
testCodeGen(
"SELECT key FROM testData3x GROUP BY key",
(1 to 100).map(Row(_)))
// COUNT
testCodeGen(
"SELECT key, count(value) FROM testData3x GROUP BY key",
(1 to 100).map(i => Row(i, 3)))
testCodeGen(
"SELECT count(key) FROM testData3x",
Row(300) :: Nil)
// COUNT DISTINCT ON int
testCodeGen(
"SELECT value, count(distinct key) FROM testData3x GROUP BY value",
(1 to 100).map(i => Row(i.toString, 1)))
testCodeGen(
"SELECT count(distinct key) FROM testData3x",
Row(100) :: Nil)
// SUM
testCodeGen(
"SELECT value, sum(key) FROM testData3x GROUP BY value",
(1 to 100).map(i => Row(i.toString, 3 * i)))
testCodeGen(
"SELECT sum(key), SUM(CAST(key as Double)) FROM testData3x",
Row(5050 * 3, 5050 * 3.0) :: Nil)
// AVERAGE
testCodeGen(
"SELECT value, avg(key) FROM testData3x GROUP BY value",
(1 to 100).map(i => Row(i.toString, i)))
testCodeGen(
"SELECT avg(key) FROM testData3x",
Row(50.5) :: Nil)
// MAX
testCodeGen(
"SELECT value, max(key) FROM testData3x GROUP BY value",
(1 to 100).map(i => Row(i.toString, i)))
testCodeGen(
"SELECT max(key) FROM testData3x",
Row(100) :: Nil)
// MIN
testCodeGen(
"SELECT value, min(key) FROM testData3x GROUP BY value",
(1 to 100).map(i => Row(i.toString, i)))
testCodeGen(
"SELECT min(key) FROM testData3x",
Row(1) :: Nil)
// Some combinations.
testCodeGen(
"""
|SELECT
| value,
| sum(key),
| max(key),
| min(key),
| avg(key),
| count(key),
| count(distinct key)
|FROM testData3x
|GROUP BY value
""".stripMargin,
(1 to 100).map(i => Row(i.toString, i*3, i, i, i, 3, 1)))
testCodeGen(
"SELECT max(key), min(key), avg(key), count(key), count(distinct key) FROM testData3x",
Row(100, 1, 50.5, 300, 100) :: Nil)
// Aggregate with Code generation handling all null values
testCodeGen(
"SELECT sum('a'), avg('a'), count(null) FROM testData",
Row(null, null, 0) :: Nil)
} finally {
spark.catalog.dropTempView("testData3x")
}
}
test("Add Parser of SQL COALESCE()") {
checkAnswer(
sql("""SELECT COALESCE(1, 2)"""),
Row(1))
checkAnswer(
sql("SELECT COALESCE(null, 1, 1.5)"),
Row(BigDecimal(1)))
checkAnswer(
sql("SELECT COALESCE(null, null, null)"),
Row(null))
}
test("SPARK-3176 Added Parser of SQL LAST()") {
checkAnswer(
sql("SELECT LAST(n) FROM lowerCaseData"),
Row(4))
}
test("SPARK-2041 column name equals tablename") {
checkAnswer(
sql("SELECT tableName FROM tableName"),
Row("test"))
}
test("SQRT") {
checkAnswer(
sql("SELECT SQRT(key) FROM testData"),
(1 to 100).map(x => Row(math.sqrt(x.toDouble))).toSeq
)
}
test("SQRT with automatic string casts") {
checkAnswer(
sql("SELECT SQRT(CAST(key AS STRING)) FROM testData"),
(1 to 100).map(x => Row(math.sqrt(x.toDouble))).toSeq
)
}
test("SPARK-2407 Added Parser of SQL SUBSTR()") {
checkAnswer(
sql("SELECT substr(tableName, 1, 2) FROM tableName"),
Row("te"))
checkAnswer(
sql("SELECT substr(tableName, 3) FROM tableName"),
Row("st"))
checkAnswer(
sql("SELECT substring(tableName, 1, 2) FROM tableName"),
Row("te"))
checkAnswer(
sql("SELECT substring(tableName, 3) FROM tableName"),
Row("st"))
}
test("SPARK-3173 Timestamp support in the parser") {
(0 to 3).map(i => Tuple1(new Timestamp(i))).toDF("time").createOrReplaceTempView("timestamps")
checkAnswer(sql(
"SELECT time FROM timestamps WHERE time='1969-12-31 16:00:00.0'"),
Row(java.sql.Timestamp.valueOf("1969-12-31 16:00:00")))
checkAnswer(sql(
"SELECT time FROM timestamps WHERE time=CAST('1969-12-31 16:00:00.001' AS TIMESTAMP)"),
Row(java.sql.Timestamp.valueOf("1969-12-31 16:00:00.001")))
checkAnswer(sql(
"SELECT time FROM timestamps WHERE time='1969-12-31 16:00:00.001'"),
Row(java.sql.Timestamp.valueOf("1969-12-31 16:00:00.001")))
checkAnswer(sql(
"SELECT time FROM timestamps WHERE '1969-12-31 16:00:00.001'=time"),
Row(java.sql.Timestamp.valueOf("1969-12-31 16:00:00.001")))
checkAnswer(sql(
"""SELECT time FROM timestamps WHERE time<'1969-12-31 16:00:00.003'
AND time>'1969-12-31 16:00:00.001'"""),
Row(java.sql.Timestamp.valueOf("1969-12-31 16:00:00.002")))
checkAnswer(sql(
"""
|SELECT time FROM timestamps
|WHERE time IN ('1969-12-31 16:00:00.001','1969-12-31 16:00:00.002')
""".stripMargin),
Seq(Row(java.sql.Timestamp.valueOf("1969-12-31 16:00:00.001")),
Row(java.sql.Timestamp.valueOf("1969-12-31 16:00:00.002"))))
checkAnswer(sql(
"SELECT time FROM timestamps WHERE time='123'"),
Nil)
}
test("left semi greater than predicate") {
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
checkAnswer(
sql("SELECT * FROM testData2 x LEFT SEMI JOIN testData2 y ON x.a >= y.a + 2"),
Seq(Row(3, 1), Row(3, 2))
)
}
}
test("left semi greater than predicate and equal operator") {
checkAnswer(
sql("SELECT * FROM testData2 x LEFT SEMI JOIN testData2 y ON x.b = y.b and x.a >= y.a + 2"),
Seq(Row(3, 1), Row(3, 2))
)
checkAnswer(
sql("SELECT * FROM testData2 x LEFT SEMI JOIN testData2 y ON x.b = y.a and x.a >= y.b + 1"),
Seq(Row(2, 1), Row(2, 2), Row(3, 1), Row(3, 2))
)
}
test("select *") {
checkAnswer(
sql("SELECT * FROM testData"),
testData.collect().toSeq)
}
test("simple select") {
checkAnswer(
sql("SELECT value FROM testData WHERE key = 1"),
Row("1"))
}
def sortTest(): Unit = {
checkAnswer(
sql("SELECT * FROM testData2 ORDER BY a ASC, b ASC"),
Seq(Row(1, 1), Row(1, 2), Row(2, 1), Row(2, 2), Row(3, 1), Row(3, 2)))
checkAnswer(
sql("SELECT * FROM testData2 ORDER BY a ASC, b DESC"),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
sql("SELECT * FROM testData2 ORDER BY a DESC, b DESC"),
Seq(Row(3, 2), Row(3, 1), Row(2, 2), Row(2, 1), Row(1, 2), Row(1, 1)))
checkAnswer(
sql("SELECT * FROM testData2 ORDER BY a DESC, b ASC"),
Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2)))
checkAnswer(
sql("SELECT b FROM binaryData ORDER BY a ASC"),
(1 to 5).map(Row(_)))
checkAnswer(
sql("SELECT b FROM binaryData ORDER BY a DESC"),
(1 to 5).map(Row(_)).toSeq.reverse)
checkAnswer(
sql("SELECT * FROM arrayData ORDER BY data[0] ASC"),
arrayData.collect().sortBy(_.data(0)).map(Row.fromTuple).toSeq)
checkAnswer(
sql("SELECT * FROM arrayData ORDER BY data[0] DESC"),
arrayData.collect().sortBy(_.data(0)).reverse.map(Row.fromTuple).toSeq)
checkAnswer(
sql("SELECT * FROM mapData ORDER BY data[1] ASC"),
mapData.collect().sortBy(_.data(1)).map(Row.fromTuple).toSeq)
checkAnswer(
sql("SELECT * FROM mapData ORDER BY data[1] DESC"),
mapData.collect().sortBy(_.data(1)).reverse.map(Row.fromTuple).toSeq)
}
test("external sorting") {
sortTest()
}
test("negative in LIMIT or TABLESAMPLE") {
val expected = "The limit expression must be equal to or greater than 0, but got -1"
var e = intercept[AnalysisException] {
sql("SELECT * FROM testData TABLESAMPLE (-1 rows)")
}.getMessage
assert(e.contains(expected))
}
test("CTE feature") {
checkAnswer(
sql("with q1 as (select * from testData limit 10) select * from q1"),
testData.take(10).toSeq)
checkAnswer(
sql("""
|with q1 as (select * from testData where key= '5'),
|q2 as (select * from testData where key = '4')
|select * from q1 union all select * from q2""".stripMargin),
Row(5, "5") :: Row(4, "4") :: Nil)
}
test("Allow only a single WITH clause per query") {
intercept[AnalysisException] {
sql(
"with q1 as (select * from testData) with q2 as (select * from q1) select * from q2")
}
}
test("date row") {
checkAnswer(sql(
"""select cast("2015-01-28" as date) from testData limit 1"""),
Row(java.sql.Date.valueOf("2015-01-28"))
)
}
test("from follow multiple brackets") {
checkAnswer(sql(
"""
|select key from ((select * from testData)
| union all (select * from testData)) x limit 1
""".stripMargin),
Row(1)
)
checkAnswer(sql(
"select key from (select * from testData) x limit 1"),
Row(1)
)
checkAnswer(sql(
"""
|select key from
| (select * from testData union all select * from testData) x
| limit 1
""".stripMargin),
Row(1)
)
}
test("average") {
checkAnswer(
sql("SELECT AVG(a) FROM testData2"),
Row(2.0))
}
test("average overflow") {
checkAnswer(
sql("SELECT AVG(a),b FROM largeAndSmallInts group by b"),
Seq(Row(2147483645.0, 1), Row(2.0, 2)))
}
test("count") {
checkAnswer(
sql("SELECT COUNT(*) FROM testData2"),
Row(testData2.count()))
}
test("count distinct") {
checkAnswer(
sql("SELECT COUNT(DISTINCT b) FROM testData2"),
Row(2))
}
test("approximate count distinct") {
checkAnswer(
sql("SELECT APPROX_COUNT_DISTINCT(a) FROM testData2"),
Row(3))
}
test("approximate count distinct with user provided standard deviation") {
checkAnswer(
sql("SELECT APPROX_COUNT_DISTINCT(a, 0.04) FROM testData2"),
Row(3))
}
test("null count") {
checkAnswer(
sql("SELECT a, COUNT(b) FROM testData3 GROUP BY a"),
Seq(Row(1, 0), Row(2, 1)))
checkAnswer(
sql(
"SELECT COUNT(a), COUNT(b), COUNT(1), COUNT(DISTINCT a), COUNT(DISTINCT b) FROM testData3"),
Row(2, 1, 2, 2, 1))
}
test("count of empty table") {
withTempView("t") {
Seq.empty[(Int, Int)].toDF("a", "b").createOrReplaceTempView("t")
checkAnswer(
sql("select count(a) from t"),
Row(0))
}
}
test("inner join where, one match per row") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
sql("SELECT * FROM uppercasedata JOIN lowercasedata WHERE n = N"),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")))
}
}
test("inner join ON, one match per row") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
sql("SELECT * FROM uppercasedata JOIN lowercasedata ON n = N"),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")))
}
}
test("inner join, where, multiple matches") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
sql(
"""
|SELECT * FROM
| (SELECT * FROM testdata2 WHERE a = 1) x JOIN
| (SELECT * FROM testdata2 WHERE a = 1) y
|WHERE x.a = y.a""".stripMargin),
Row(1, 1, 1, 1) ::
Row(1, 1, 1, 2) ::
Row(1, 2, 1, 1) ::
Row(1, 2, 1, 2) :: Nil)
}
}
test("inner join, no matches") {
checkAnswer(
sql(
"""
|SELECT * FROM
| (SELECT * FROM testData2 WHERE a = 1) x JOIN
| (SELECT * FROM testData2 WHERE a = 2) y
|WHERE x.a = y.a""".stripMargin),
Nil)
}
test("big inner join, 4 matches per row") {
checkAnswer(
sql(
"""
|SELECT * FROM
| (SELECT * FROM testData UNION ALL
| SELECT * FROM testData UNION ALL
| SELECT * FROM testData UNION ALL
| SELECT * FROM testData) x JOIN
| (SELECT * FROM testData UNION ALL
| SELECT * FROM testData UNION ALL
| SELECT * FROM testData UNION ALL
| SELECT * FROM testData) y
|WHERE x.key = y.key""".stripMargin),
testData.rdd.flatMap(
row => Seq.fill(16)(Row.merge(row, row))).collect().toSeq)
}
test("cartesian product join") {
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
checkAnswer(
testData3.join(testData3),
Row(1, null, 1, null) ::
Row(1, null, 2, 2) ::
Row(2, 2, 1, null) ::
Row(2, 2, 2, 2) :: Nil)
}
}
test("left outer join") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
sql("SELECT * FROM uppercasedata LEFT OUTER JOIN lowercasedata ON n = N"),
Row(1, "A", 1, "a") ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
}
}
test("right outer join") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
sql("SELECT * FROM lowercasedata RIGHT OUTER JOIN uppercasedata ON n = N"),
Row(1, "a", 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
}
}
test("full outer join") {
checkAnswer(
sql(
"""
|SELECT * FROM
| (SELECT * FROM upperCaseData WHERE N <= 4) leftTable FULL OUTER JOIN
| (SELECT * FROM upperCaseData WHERE N >= 3) rightTable
| ON leftTable.N = rightTable.N
""".stripMargin),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", 3, "C") ::
Row (4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
}
test("SPARK-11111 null-safe join should not use cartesian product") {
val df = sql("select count(*) from testData a join testData b on (a.key <=> b.key)")
val cp = df.queryExecution.sparkPlan.collect {
case cp: CartesianProductExec => cp
}
assert(cp.isEmpty, "should not use CartesianProduct for null-safe join")
val smj = df.queryExecution.sparkPlan.collect {
case smj: SortMergeJoinExec => smj
case j: BroadcastHashJoinExec => j
}
assert(smj.size > 0, "should use SortMergeJoin or BroadcastHashJoin")
checkAnswer(df, Row(100) :: Nil)
}
test("SPARK-3349 partitioning after limit") {
sql("SELECT DISTINCT n FROM lowerCaseData ORDER BY n DESC")
.limit(2)
.createOrReplaceTempView("subset1")
sql("SELECT DISTINCT n FROM lowerCaseData ORDER BY n ASC")
.limit(2)
.createOrReplaceTempView("subset2")
checkAnswer(
sql("SELECT * FROM lowerCaseData INNER JOIN subset1 ON subset1.n = lowerCaseData.n"),
Row(3, "c", 3) ::
Row(4, "d", 4) :: Nil)
checkAnswer(
sql("SELECT * FROM lowerCaseData INNER JOIN subset2 ON subset2.n = lowerCaseData.n"),
Row(1, "a", 1) ::
Row(2, "b", 2) :: Nil)
}
test("mixed-case keywords") {
checkAnswer(
sql(
"""
|SeleCT * from
| (select * from upperCaseData WherE N <= 4) leftTable fuLL OUtER joiN
| (sElEcT * FROM upperCaseData whERe N >= 3) rightTable
| oN leftTable.N = rightTable.N
""".stripMargin),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
}
test("select with table name as qualifier") {
checkAnswer(
sql("SELECT testData.value FROM testData WHERE testData.key = 1"),
Row("1"))
}
test("inner join ON with table name as qualifier") {
checkAnswer(
sql("SELECT * FROM upperCaseData JOIN lowerCaseData ON lowerCaseData.n = upperCaseData.N"),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")))
}
test("qualified select with inner join ON with table name as qualifier") {
checkAnswer(
sql("SELECT upperCaseData.N, upperCaseData.L FROM upperCaseData JOIN lowerCaseData " +
"ON lowerCaseData.n = upperCaseData.N"),
Seq(
Row(1, "A"),
Row(2, "B"),
Row(3, "C"),
Row(4, "D")))
}
test("system function upper()") {
checkAnswer(
sql("SELECT n,UPPER(l) FROM lowerCaseData"),
Seq(
Row(1, "A"),
Row(2, "B"),
Row(3, "C"),
Row(4, "D")))
checkAnswer(
sql("SELECT n, UPPER(s) FROM nullStrings"),
Seq(
Row(1, "ABC"),
Row(2, "ABC"),
Row(3, null)))
}
test("system function lower()") {
checkAnswer(
sql("SELECT N,LOWER(L) FROM upperCaseData"),
Seq(
Row(1, "a"),
Row(2, "b"),
Row(3, "c"),
Row(4, "d"),
Row(5, "e"),
Row(6, "f")))
checkAnswer(
sql("SELECT n, LOWER(s) FROM nullStrings"),
Seq(
Row(1, "abc"),
Row(2, "abc"),
Row(3, null)))
}
test("UNION") {
checkAnswer(
sql("SELECT * FROM lowerCaseData UNION SELECT * FROM upperCaseData"),
Row(1, "A") :: Row(1, "a") :: Row(2, "B") :: Row(2, "b") :: Row(3, "C") :: Row(3, "c") ::
Row(4, "D") :: Row(4, "d") :: Row(5, "E") :: Row(6, "F") :: Nil)
checkAnswer(
sql("SELECT * FROM lowerCaseData UNION SELECT * FROM lowerCaseData"),
Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Row(4, "d") :: Nil)
checkAnswer(
sql("SELECT * FROM lowerCaseData UNION ALL SELECT * FROM lowerCaseData"),
Row(1, "a") :: Row(1, "a") :: Row(2, "b") :: Row(2, "b") :: Row(3, "c") :: Row(3, "c") ::
Row(4, "d") :: Row(4, "d") :: Nil)
}
test("UNION with column mismatches") {
// Column name mismatches are allowed.
checkAnswer(
sql("SELECT n,l FROM lowerCaseData UNION SELECT N as x1, L as x2 FROM upperCaseData"),
Row(1, "A") :: Row(1, "a") :: Row(2, "B") :: Row(2, "b") :: Row(3, "C") :: Row(3, "c") ::
Row(4, "D") :: Row(4, "d") :: Row(5, "E") :: Row(6, "F") :: Nil)
// Column type mismatches are not allowed, forcing a type coercion.
checkAnswer(
sql("SELECT n FROM lowerCaseData UNION SELECT L FROM upperCaseData"),
("1" :: "2" :: "3" :: "4" :: "A" :: "B" :: "C" :: "D" :: "E" :: "F" :: Nil).map(Row(_)))
// Column type mismatches where a coercion is not possible, in this case between integer
// and array types, trigger a TreeNodeException.
intercept[AnalysisException] {
sql("SELECT data FROM arrayData UNION SELECT 1 FROM arrayData").collect()
}
}
test("EXCEPT") {
checkAnswer(
sql("SELECT * FROM lowerCaseData EXCEPT SELECT * FROM upperCaseData"),
Row(1, "a") ::
Row(2, "b") ::
Row(3, "c") ::
Row(4, "d") :: Nil)
checkAnswer(
sql("SELECT * FROM lowerCaseData EXCEPT SELECT * FROM lowerCaseData"), Nil)
checkAnswer(
sql("SELECT * FROM upperCaseData EXCEPT SELECT * FROM upperCaseData"), Nil)
}
test("MINUS") {
checkAnswer(
sql("SELECT * FROM lowerCaseData MINUS SELECT * FROM upperCaseData"),
Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Row(4, "d") :: Nil)
checkAnswer(
sql("SELECT * FROM lowerCaseData MINUS SELECT * FROM lowerCaseData"), Nil)
checkAnswer(
sql("SELECT * FROM upperCaseData MINUS SELECT * FROM upperCaseData"), Nil)
}
test("INTERSECT") {
checkAnswer(
sql("SELECT * FROM lowerCaseData INTERSECT SELECT * FROM lowerCaseData"),
Row(1, "a") ::
Row(2, "b") ::
Row(3, "c") ::
Row(4, "d") :: Nil)
checkAnswer(
sql("SELECT * FROM lowerCaseData INTERSECT SELECT * FROM upperCaseData"), Nil)
}
test("SET commands semantics using sql()") {
spark.sessionState.conf.clear()
val testKey = "test.key.0"
val testVal = "test.val.0"
val nonexistentKey = "nonexistent"
// "set" itself returns all config variables currently specified in SQLConf.
assert(sql("SET").collect().size === TestSQLContext.overrideConfs.size)
sql("SET").collect().foreach { row =>
val key = row.getString(0)
val value = row.getString(1)
assert(
TestSQLContext.overrideConfs.contains(key),
s"$key should exist in SQLConf.")
assert(
TestSQLContext.overrideConfs(key) === value,
s"The value of $key should be ${TestSQLContext.overrideConfs(key)} instead of $value.")
}
val overrideConfs = sql("SET").collect()
// "set key=val"
sql(s"SET $testKey=$testVal")
checkAnswer(
sql("SET"),
overrideConfs ++ Seq(Row(testKey, testVal))
)
sql(s"SET ${testKey + testKey}=${testVal + testVal}")
checkAnswer(
sql("set"),
overrideConfs ++ Seq(Row(testKey, testVal), Row(testKey + testKey, testVal + testVal))
)
// "set key"
checkAnswer(
sql(s"SET $testKey"),
Row(testKey, testVal)
)
checkAnswer(
sql(s"SET $nonexistentKey"),
Row(nonexistentKey, "<undefined>")
)
spark.sessionState.conf.clear()
}
test("SPARK-19218 SET command should show a result in a sorted order") {
val overrideConfs = sql("SET").collect()
sql(s"SET test.key3=1")
sql(s"SET test.key2=2")
sql(s"SET test.key1=3")
val result = sql("SET").collect()
assert(result ===
(overrideConfs ++ Seq(
Row("test.key1", "3"),
Row("test.key2", "2"),
Row("test.key3", "1"))).sortBy(_.getString(0))
)
spark.sessionState.conf.clear()
}
test("SPARK-19218 `SET -v` should not fail with null value configuration") {
import SQLConf._
val confEntry = buildConf("spark.test").doc("doc").stringConf.createWithDefault(null)
try {
val result = sql("SET -v").collect()
assert(result === result.sortBy(_.getString(0)))
} finally {
SQLConf.unregister(confEntry)
}
}
test("SET commands with illegal or inappropriate argument") {
spark.sessionState.conf.clear()
// Set negative mapred.reduce.tasks for automatically determining
// the number of reducers is not supported
intercept[IllegalArgumentException](sql(s"SET mapred.reduce.tasks=-1"))
intercept[IllegalArgumentException](sql(s"SET mapred.reduce.tasks=-01"))
intercept[IllegalArgumentException](sql(s"SET mapred.reduce.tasks=-2"))
spark.sessionState.conf.clear()
}
test("SET mapreduce.job.reduces automatically converted to spark.sql.shuffle.partitions") {
spark.sessionState.conf.clear()
val before = spark.conf.get(SQLConf.SHUFFLE_PARTITIONS.key).toInt
val newConf = before + 1
sql(s"SET mapreduce.job.reduces=${newConf.toString}")
val after = spark.conf.get(SQLConf.SHUFFLE_PARTITIONS.key).toInt
assert(before != after)
assert(newConf === after)
intercept[IllegalArgumentException](sql(s"SET mapreduce.job.reduces=-1"))
spark.sessionState.conf.clear()
}
test("apply schema") {
val schema1 = StructType(
StructField("f1", IntegerType, false) ::
StructField("f2", StringType, false) ::
StructField("f3", BooleanType, false) ::
StructField("f4", IntegerType, true) :: Nil)
val rowRDD1 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v4 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(values(0).toInt, values(1), values(2).toBoolean, v4)
}
val df1 = spark.createDataFrame(rowRDD1, schema1)
df1.createOrReplaceTempView("applySchema1")
checkAnswer(
sql("SELECT * FROM applySchema1"),
Row(1, "A1", true, null) ::
Row(2, "B2", false, null) ::
Row(3, "C3", true, null) ::
Row(4, "D4", true, 2147483644) :: Nil)
checkAnswer(
sql("SELECT f1, f4 FROM applySchema1"),
Row(1, null) ::
Row(2, null) ::
Row(3, null) ::
Row(4, 2147483644) :: Nil)
val schema2 = StructType(
StructField("f1", StructType(
StructField("f11", IntegerType, false) ::
StructField("f12", BooleanType, false) :: Nil), false) ::
StructField("f2", MapType(StringType, IntegerType, true), false) :: Nil)
val rowRDD2 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v4 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(Row(values(0).toInt, values(2).toBoolean), Map(values(1) -> v4))
}
val df2 = spark.createDataFrame(rowRDD2, schema2)
df2.createOrReplaceTempView("applySchema2")
checkAnswer(
sql("SELECT * FROM applySchema2"),
Row(Row(1, true), Map("A1" -> null)) ::
Row(Row(2, false), Map("B2" -> null)) ::
Row(Row(3, true), Map("C3" -> null)) ::
Row(Row(4, true), Map("D4" -> 2147483644)) :: Nil)
checkAnswer(
sql("SELECT f1.f11, f2['D4'] FROM applySchema2"),
Row(1, null) ::
Row(2, null) ::
Row(3, null) ::
Row(4, 2147483644) :: Nil)
// The value of a MapType column can be a mutable map.
val rowRDD3 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v4 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(Row(values(0).toInt, values(2).toBoolean), scala.collection.mutable.Map(values(1) -> v4))
}
val df3 = spark.createDataFrame(rowRDD3, schema2)
df3.createOrReplaceTempView("applySchema3")
checkAnswer(
sql("SELECT f1.f11, f2['D4'] FROM applySchema3"),
Row(1, null) ::
Row(2, null) ::
Row(3, null) ::
Row(4, 2147483644) :: Nil)
}
test("SPARK-3423 BETWEEN") {
checkAnswer(
sql("SELECT key, value FROM testData WHERE key BETWEEN 5 and 7"),
Seq(Row(5, "5"), Row(6, "6"), Row(7, "7"))
)
checkAnswer(
sql("SELECT key, value FROM testData WHERE key BETWEEN 7 and 7"),
Row(7, "7")
)
checkAnswer(
sql("SELECT key, value FROM testData WHERE key BETWEEN 9 and 7"),
Nil
)
}
test("SPARK-17863: SELECT distinct does not work correctly if order by missing attribute") {
checkAnswer(
sql("""select distinct struct.a, struct.b
|from (
| select named_struct('a', 1, 'b', 2, 'c', 3) as struct
| union all
| select named_struct('a', 1, 'b', 2, 'c', 4) as struct) tmp
|order by a, b
|""".stripMargin),
Row(1, 2) :: Nil)
val error = intercept[AnalysisException] {
sql("""select distinct struct.a, struct.b
|from (
| select named_struct('a', 1, 'b', 2, 'c', 3) as struct
| union all
| select named_struct('a', 1, 'b', 2, 'c', 4) as struct) tmp
|order by struct.a, struct.b
|""".stripMargin)
}
assert(error.message contains "cannot resolve '`struct.a`' given input columns: [a, b]")
}
test("cast boolean to string") {
// TODO Ensure true/false string letter casing is consistent with Hive in all cases.
checkAnswer(
sql("SELECT CAST(TRUE AS STRING), CAST(FALSE AS STRING) FROM testData LIMIT 1"),
Row("true", "false"))
}
test("metadata is propagated correctly") {
val person: DataFrame = sql("SELECT * FROM person")
val schema = person.schema
val docKey = "doc"
val docValue = "first name"
val metadata = new MetadataBuilder()
.putString(docKey, docValue)
.build()
val schemaWithMeta = new StructType(Array(
schema("id"), schema("name").copy(metadata = metadata), schema("age")))
val personWithMeta = spark.createDataFrame(person.rdd, schemaWithMeta)
def validateMetadata(rdd: DataFrame): Unit = {
assert(rdd.schema("name").metadata.getString(docKey) == docValue)
}
personWithMeta.createOrReplaceTempView("personWithMeta")
validateMetadata(personWithMeta.select($"name"))
validateMetadata(personWithMeta.select($"name"))
validateMetadata(personWithMeta.select($"id", $"name"))
validateMetadata(sql("SELECT * FROM personWithMeta"))
validateMetadata(sql("SELECT id, name FROM personWithMeta"))
validateMetadata(sql("SELECT * FROM personWithMeta JOIN salary ON id = personId"))
validateMetadata(sql(
"SELECT name, salary FROM personWithMeta JOIN salary ON id = personId"))
}
test("SPARK-3371 Renaming a function expression with group by gives error") {
spark.udf.register("len", (s: String) => s.length)
checkAnswer(
sql("SELECT len(value) as temp FROM testData WHERE key = 1 group by len(value)"),
Row(1))
}
test("SPARK-3813 CASE a WHEN b THEN c [WHEN d THEN e]* [ELSE f] END") {
checkAnswer(
sql("SELECT CASE key WHEN 1 THEN 1 ELSE 0 END FROM testData WHERE key = 1 group by key"),
Row(1))
}
test("SPARK-3813 CASE WHEN a THEN b [WHEN c THEN d]* [ELSE e] END") {
checkAnswer(
sql("SELECT CASE WHEN key = 1 THEN 1 ELSE 2 END FROM testData WHERE key = 1 group by key"),
Row(1))
}
testQuietly(
"SPARK-16748: SparkExceptions during planning should not wrapped in TreeNodeException") {
intercept[SparkException] {
val df = spark.range(0, 5).map(x => (1 / x).toString).toDF("a").orderBy("a")
df.queryExecution.toRdd // force physical planning, but not execution of the plan
}
}
test("Multiple join") {
checkAnswer(
sql(
"""SELECT a.key, b.key, c.key
|FROM testData a
|JOIN testData b ON a.key = b.key
|JOIN testData c ON a.key = c.key
""".stripMargin),
(1 to 100).map(i => Row(i, i, i)))
}
test("SPARK-3483 Special chars in column names") {
val data = Seq("""{"key?number1": "value1", "key.number2": "value2"}""").toDS()
spark.read.json(data).createOrReplaceTempView("records")
sql("SELECT `key?number1`, `key.number2` FROM records")
}
test("SPARK-3814 Support Bitwise & operator") {
checkAnswer(sql("SELECT key&1 FROM testData WHERE key = 1 "), Row(1))
}
test("SPARK-3814 Support Bitwise | operator") {
checkAnswer(sql("SELECT key|0 FROM testData WHERE key = 1 "), Row(1))
}
test("SPARK-3814 Support Bitwise ^ operator") {
checkAnswer(sql("SELECT key^0 FROM testData WHERE key = 1 "), Row(1))
}
test("SPARK-3814 Support Bitwise ~ operator") {
checkAnswer(sql("SELECT ~key FROM testData WHERE key = 1 "), Row(-2))
}
test("SPARK-4120 Join of multiple tables does not work in SparkSQL") {
checkAnswer(
sql(
"""SELECT a.key, b.key, c.key
|FROM testData a,testData b,testData c
|where a.key = b.key and a.key = c.key
""".stripMargin),
(1 to 100).map(i => Row(i, i, i)))
}
test("SPARK-4154 Query does not work if it has 'not between' in Spark SQL and HQL") {
checkAnswer(sql("SELECT key FROM testData WHERE key not between 0 and 10 order by key"),
(11 to 100).map(i => Row(i)))
}
test("SPARK-4207 Query which has syntax like 'not like' is not working in Spark SQL") {
checkAnswer(sql("SELECT key FROM testData WHERE value not like '100%' order by key"),
(1 to 99).map(i => Row(i)))
}
test("SPARK-4322 Grouping field with struct field as sub expression") {
spark.read.json(Seq("""{"a": {"b": [{"c": 1}]}}""").toDS())
.createOrReplaceTempView("data")
checkAnswer(sql("SELECT a.b[0].c FROM data GROUP BY a.b[0].c"), Row(1))
spark.catalog.dropTempView("data")
spark.read.json(Seq("""{"a": {"b": 1}}""").toDS())
.createOrReplaceTempView("data")
checkAnswer(sql("SELECT a.b + 1 FROM data GROUP BY a.b + 1"), Row(2))
spark.catalog.dropTempView("data")
}
test("SPARK-4432 Fix attribute reference resolution error when using ORDER BY") {
checkAnswer(
sql("SELECT a + b FROM testData2 ORDER BY a"),
Seq(2, 3, 3, 4, 4, 5).map(Row(_))
)
}
test("oder by asc by default when not specify ascending and descending") {
checkAnswer(
sql("SELECT a, b FROM testData2 ORDER BY a desc, b"),
Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2))
)
}
test("Supporting relational operator '<=>' in Spark SQL") {
val nullCheckData1 = TestData(1, "1") :: TestData(2, null) :: Nil
val rdd1 = sparkContext.parallelize((0 to 1).map(i => nullCheckData1(i)))
rdd1.toDF().createOrReplaceTempView("nulldata1")
val nullCheckData2 = TestData(1, "1") :: TestData(2, null) :: Nil
val rdd2 = sparkContext.parallelize((0 to 1).map(i => nullCheckData2(i)))
rdd2.toDF().createOrReplaceTempView("nulldata2")
checkAnswer(sql("SELECT nulldata1.key FROM nulldata1 join " +
"nulldata2 on nulldata1.value <=> nulldata2.value"),
(1 to 2).map(i => Row(i)))
}
test("Multi-column COUNT(DISTINCT ...)") {
val data = TestData(1, "val_1") :: TestData(2, "val_2") :: Nil
val rdd = sparkContext.parallelize((0 to 1).map(i => data(i)))
rdd.toDF().createOrReplaceTempView("distinctData")
checkAnswer(sql("SELECT COUNT(DISTINCT key,value) FROM distinctData"), Row(2))
}
test("SPARK-4699 case sensitivity SQL query") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val data = TestData(1, "val_1") :: TestData(2, "val_2") :: Nil
val rdd = sparkContext.parallelize((0 to 1).map(i => data(i)))
rdd.toDF().createOrReplaceTempView("testTable1")
checkAnswer(sql("SELECT VALUE FROM TESTTABLE1 where KEY = 1"), Row("val_1"))
}
}
test("SPARK-6145: ORDER BY test for nested fields") {
spark.read
.json(Seq("""{"a": {"b": 1, "a": {"a": 1}}, "c": [{"d": 1}]}""").toDS())
.createOrReplaceTempView("nestedOrder")
checkAnswer(sql("SELECT 1 FROM nestedOrder ORDER BY a.b"), Row(1))
checkAnswer(sql("SELECT a.b FROM nestedOrder ORDER BY a.b"), Row(1))
checkAnswer(sql("SELECT 1 FROM nestedOrder ORDER BY a.a.a"), Row(1))
checkAnswer(sql("SELECT a.a.a FROM nestedOrder ORDER BY a.a.a"), Row(1))
checkAnswer(sql("SELECT 1 FROM nestedOrder ORDER BY c[0].d"), Row(1))
checkAnswer(sql("SELECT c[0].d FROM nestedOrder ORDER BY c[0].d"), Row(1))
}
test("SPARK-6145: special cases") {
spark.read
.json(Seq("""{"a": {"b": [1]}, "b": [{"a": 1}], "_c0": {"a": 1}}""").toDS())
.createOrReplaceTempView("t")
checkAnswer(sql("SELECT a.b[0] FROM t ORDER BY _c0.a"), Row(1))
checkAnswer(sql("SELECT b[0].a FROM t ORDER BY _c0.a"), Row(1))
}
test("SPARK-6898: complete support for special chars in column names") {
spark.read
.json(Seq("""{"a": {"c.b": 1}, "b.$q": [{"a@!.q": 1}], "q.w": {"w.i&": [1]}}""").toDS())
.createOrReplaceTempView("t")
checkAnswer(sql("SELECT a.`c.b`, `b.$q`[0].`a@!.q`, `q.w`.`w.i&`[0] FROM t"), Row(1, 1, 1))
}
test("SPARK-6583 order by aggregated function") {
Seq("1" -> 3, "1" -> 4, "2" -> 7, "2" -> 8, "3" -> 5, "3" -> 6, "4" -> 1, "4" -> 2)
.toDF("a", "b").createOrReplaceTempView("orderByData")
checkAnswer(
sql(
"""
|SELECT a
|FROM orderByData
|GROUP BY a
|ORDER BY sum(b)
""".stripMargin),
Row("4") :: Row("1") :: Row("3") :: Row("2") :: Nil)
checkAnswer(
sql(
"""
|SELECT sum(b)
|FROM orderByData
|GROUP BY a
|ORDER BY sum(b)
""".stripMargin),
Row(3) :: Row(7) :: Row(11) :: Row(15) :: Nil)
checkAnswer(
sql(
"""
|SELECT sum(b)
|FROM orderByData
|GROUP BY a
|ORDER BY sum(b), max(b)
""".stripMargin),
Row(3) :: Row(7) :: Row(11) :: Row(15) :: Nil)
checkAnswer(
sql(
"""
|SELECT a, sum(b)
|FROM orderByData
|GROUP BY a
|ORDER BY sum(b)
""".stripMargin),
Row("4", 3) :: Row("1", 7) :: Row("3", 11) :: Row("2", 15) :: Nil)
checkAnswer(
sql(
"""
|SELECT a, sum(b)
|FROM orderByData
|GROUP BY a
|ORDER BY sum(b) + 1
""".stripMargin),
Row("4", 3) :: Row("1", 7) :: Row("3", 11) :: Row("2", 15) :: Nil)
checkAnswer(
sql(
"""
|SELECT count(*)
|FROM orderByData
|GROUP BY a
|ORDER BY count(*)
""".stripMargin),
Row(2) :: Row(2) :: Row(2) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
|SELECT a
|FROM orderByData
|GROUP BY a
|ORDER BY a, count(*), sum(b)
""".stripMargin),
Row("1") :: Row("2") :: Row("3") :: Row("4") :: Nil)
}
test("SPARK-7952: fix the equality check between boolean and numeric types") {
withTempView("t") {
// numeric field i, boolean field j, result of i = j, result of i <=> j
Seq[(Integer, java.lang.Boolean, java.lang.Boolean, java.lang.Boolean)](
(1, true, true, true),
(0, false, true, true),
(2, true, false, false),
(2, false, false, false),
(null, true, null, false),
(null, false, null, false),
(0, null, null, false),
(1, null, null, false),
(null, null, null, true)
).toDF("i", "b", "r1", "r2").createOrReplaceTempView("t")
checkAnswer(sql("select i = b from t"), sql("select r1 from t"))
checkAnswer(sql("select i <=> b from t"), sql("select r2 from t"))
}
}
test("SPARK-7067: order by queries for complex ExtractValue chain") {
withTempView("t") {
spark.read
.json(Seq("""{"a": {"b": [{"c": 1}]}, "b": [{"d": 1}]}""").toDS())
.createOrReplaceTempView("t")
checkAnswer(sql("SELECT a.b FROM t ORDER BY b[0].d"), Row(Seq(Row(1))))
}
}
test("SPARK-8782: ORDER BY NULL") {
withTempView("t") {
Seq((1, 2), (1, 2)).toDF("a", "b").createOrReplaceTempView("t")
checkAnswer(sql("SELECT * FROM t ORDER BY NULL"), Seq(Row(1, 2), Row(1, 2)))
}
}
test("SPARK-8837: use keyword in column name") {
withTempView("t") {
val df = Seq(1 -> "a").toDF("count", "sort")
checkAnswer(df.filter("count > 0"), Row(1, "a"))
df.createOrReplaceTempView("t")
checkAnswer(sql("select count, sort from t"), Row(1, "a"))
}
}
test("SPARK-8753: add interval type") {
import org.apache.spark.unsafe.types.CalendarInterval
val df = sql("select interval 3 years -3 month 7 week 123 microseconds")
checkAnswer(df, Row(new CalendarInterval(12 * 3 - 3, 7L * 1000 * 1000 * 3600 * 24 * 7 + 123 )))
withTempPath(f => {
// Currently we don't yet support saving out values of interval data type.
val e = intercept[AnalysisException] {
df.write.json(f.getCanonicalPath)
}
e.message.contains("Cannot save interval data type into external storage")
})
val e1 = intercept[AnalysisException] {
sql("select interval")
}
assert(e1.message.contains("at least one time unit should be given for interval literal"))
// Currently we don't yet support nanosecond
val e2 = intercept[AnalysisException] {
sql("select interval 23 nanosecond")
}
assert(e2.message.contains("No interval can be constructed"))
}
test("SPARK-8945: add and subtract expressions for interval type") {
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.unsafe.types.CalendarInterval.MICROS_PER_WEEK
val df = sql("select interval 3 years -3 month 7 week 123 microseconds as i")
checkAnswer(df, Row(new CalendarInterval(12 * 3 - 3, 7L * MICROS_PER_WEEK + 123)))
checkAnswer(df.select(df("i") + new CalendarInterval(2, 123)),
Row(new CalendarInterval(12 * 3 - 3 + 2, 7L * MICROS_PER_WEEK + 123 + 123)))
checkAnswer(df.select(df("i") - new CalendarInterval(2, 123)),
Row(new CalendarInterval(12 * 3 - 3 - 2, 7L * MICROS_PER_WEEK + 123 - 123)))
// unary minus
checkAnswer(df.select(-df("i")),
Row(new CalendarInterval(-(12 * 3 - 3), -(7L * MICROS_PER_WEEK + 123))))
}
test("aggregation with codegen updates peak execution memory") {
AccumulatorSuite.verifyPeakExecutionMemorySet(sparkContext, "aggregation with codegen") {
testCodeGen(
"SELECT key, count(value) FROM testData GROUP BY key",
(1 to 100).map(i => Row(i, 1)))
}
}
test("decimal precision with multiply/division") {
checkAnswer(sql("select 10.3 * 3.0"), Row(BigDecimal("30.90")))
checkAnswer(sql("select 10.3000 * 3.0"), Row(BigDecimal("30.90000")))
checkAnswer(sql("select 10.30000 * 30.0"), Row(BigDecimal("309.000000")))
checkAnswer(sql("select 10.300000000000000000 * 3.000000000000000000"),
Row(BigDecimal("30.900000000000000000000000000000000000", new MathContext(38))))
checkAnswer(sql("select 10.300000000000000000 * 3.0000000000000000000"),
Row(null))
checkAnswer(sql("select 10.3 / 3.0"), Row(BigDecimal("3.433333")))
checkAnswer(sql("select 10.3000 / 3.0"), Row(BigDecimal("3.4333333")))
checkAnswer(sql("select 10.30000 / 30.0"), Row(BigDecimal("0.343333333")))
checkAnswer(sql("select 10.300000000000000000 / 3.00000000000000000"),
Row(BigDecimal("3.433333333333333333333333333", new MathContext(38))))
checkAnswer(sql("select 10.3000000000000000000 / 3.00000000000000000"),
Row(BigDecimal("3.4333333333333333333333333333", new MathContext(38))))
}
test("SPARK-10215 Div of Decimal returns null") {
val d = Decimal(1.12321).toBigDecimal
val df = Seq((d, 1)).toDF("a", "b")
checkAnswer(
df.selectExpr("b * a / b"),
Seq(Row(d)))
checkAnswer(
df.selectExpr("b * a / b / b"),
Seq(Row(d)))
checkAnswer(
df.selectExpr("b * a + b"),
Seq(Row(BigDecimal(2.12321))))
checkAnswer(
df.selectExpr("b * a - b"),
Seq(Row(BigDecimal(0.12321))))
checkAnswer(
df.selectExpr("b * a * b"),
Seq(Row(d)))
}
test("precision smaller than scale") {
checkAnswer(sql("select 10.00"), Row(BigDecimal("10.00")))
checkAnswer(sql("select 1.00"), Row(BigDecimal("1.00")))
checkAnswer(sql("select 0.10"), Row(BigDecimal("0.10")))
checkAnswer(sql("select 0.01"), Row(BigDecimal("0.01")))
checkAnswer(sql("select 0.001"), Row(BigDecimal("0.001")))
checkAnswer(sql("select -0.01"), Row(BigDecimal("-0.01")))
checkAnswer(sql("select -0.001"), Row(BigDecimal("-0.001")))
}
test("external sorting updates peak execution memory") {
AccumulatorSuite.verifyPeakExecutionMemorySet(sparkContext, "external sort") {
sql("SELECT * FROM testData2 ORDER BY a ASC, b ASC").collect()
}
}
test("SPARK-9511: error with table starting with number") {
withTempView("1one") {
sparkContext.parallelize(1 to 10).map(i => (i, i.toString))
.toDF("num", "str")
.createOrReplaceTempView("1one")
checkAnswer(sql("select count(num) from 1one"), Row(10))
}
}
test("specifying database name for a temporary view is not allowed") {
withTempPath { dir =>
val path = dir.toURI.toString
val df =
sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
df
.write
.format("parquet")
.save(path)
// We don't support creating a temporary table while specifying a database
intercept[AnalysisException] {
spark.sql(
s"""
|CREATE TEMPORARY VIEW db.t
|USING parquet
|OPTIONS (
| path '$path'
|)
""".stripMargin)
}.getMessage
// If you use backticks to quote the name then it's OK.
spark.sql(
s"""
|CREATE TEMPORARY VIEW `db.t`
|USING parquet
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(spark.table("`db.t`"), df)
}
}
test("SPARK-10130 type coercion for IF should have children resolved first") {
withTempView("src") {
Seq((1, 1), (-1, 1)).toDF("key", "value").createOrReplaceTempView("src")
checkAnswer(
sql("SELECT IF(a > 0, a, 0) FROM (SELECT key a FROM src) temp"), Seq(Row(1), Row(0)))
}
}
test("SPARK-10389: order by non-attribute grouping expression on Aggregate") {
withTempView("src") {
Seq((1, 1), (-1, 1)).toDF("key", "value").createOrReplaceTempView("src")
checkAnswer(sql("SELECT MAX(value) FROM src GROUP BY key + 1 ORDER BY key + 1"),
Seq(Row(1), Row(1)))
checkAnswer(sql("SELECT MAX(value) FROM src GROUP BY key + 1 ORDER BY (key + 1) * 2"),
Seq(Row(1), Row(1)))
}
}
test("run sql directly on files") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.json(f.getCanonicalPath)
checkAnswer(sql(s"select id from json.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.json`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select a.id from json.`${f.getCanonicalPath}` as a"),
df)
})
var e = intercept[AnalysisException] {
sql("select * from in_valid_table")
}
assert(e.message.contains("Table or view not found"))
e = intercept[AnalysisException] {
sql("select * from no_db.no_table").show()
}
assert(e.message.contains("Table or view not found"))
e = intercept[AnalysisException] {
sql("select * from json.invalid_file")
}
assert(e.message.contains("Path does not exist"))
e = intercept[AnalysisException] {
sql(s"select id from `org.apache.spark.sql.hive.orc`.`file_path`")
}
assert(e.message.contains("The ORC data source must be used with Hive support enabled"))
e = intercept[AnalysisException] {
sql(s"select id from `com.databricks.spark.avro`.`file_path`")
}
assert(e.message.contains("Failed to find data source: com.databricks.spark.avro."))
// data source type is case insensitive
e = intercept[AnalysisException] {
sql(s"select id from Avro.`file_path`")
}
assert(e.message.contains("Failed to find data source: avro."))
e = intercept[AnalysisException] {
sql(s"select id from avro.`file_path`")
}
assert(e.message.contains("Failed to find data source: avro."))
e = intercept[AnalysisException] {
sql(s"select id from `org.apache.spark.sql.sources.HadoopFsRelationProvider`.`file_path`")
}
assert(e.message.contains("Table or view not found: " +
"`org.apache.spark.sql.sources.HadoopFsRelationProvider`.`file_path`"))
e = intercept[AnalysisException] {
sql(s"select id from `Jdbc`.`file_path`")
}
assert(e.message.contains("Unsupported data source type for direct query on files: Jdbc"))
e = intercept[AnalysisException] {
sql(s"select id from `org.apache.spark.sql.execution.datasources.jdbc`.`file_path`")
}
assert(e.message.contains("Unsupported data source type for direct query on files: " +
"org.apache.spark.sql.execution.datasources.jdbc"))
}
test("SortMergeJoin returns wrong results when using UnsafeRows") {
// This test is for the fix of https://issues.apache.org/jira/browse/SPARK-10737.
// This bug will be triggered when Tungsten is enabled and there are multiple
// SortMergeJoin operators executed in the same task.
val confs = SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1" :: Nil
withSQLConf(confs: _*) {
val df1 = (1 to 50).map(i => (s"str_$i", i)).toDF("i", "j")
val df2 =
df1
.join(df1.select(df1("i")), "i")
.select(df1("i"), df1("j"))
val df3 = df2.withColumnRenamed("i", "i1").withColumnRenamed("j", "j1")
val df4 =
df2
.join(df3, df2("i") === df3("i1"))
.withColumn("diff", $"j" - $"j1")
.select(df2("i"), df2("j"), $"diff")
checkAnswer(
df4,
df1.withColumn("diff", lit(0)))
}
}
test("SPARK-11303: filter should not be pushed down into sample") {
val df = spark.range(100)
List(true, false).foreach { withReplacement =>
val sampled = df.sample(withReplacement, 0.1, 1)
val sampledOdd = sampled.filter("id % 2 != 0")
val sampledEven = sampled.filter("id % 2 = 0")
assert(sampled.count() == sampledOdd.count() + sampledEven.count())
}
}
test("Struct Star Expansion") {
val structDf = testData2.select("a", "b").as("record")
checkAnswer(
structDf.select($"record.a", $"record.b"),
Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 2) :: Nil)
checkAnswer(
structDf.select($"record.*"),
Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 2) :: Nil)
checkAnswer(
structDf.select($"record.*", $"record.*"),
Row(1, 1, 1, 1) :: Row(1, 2, 1, 2) :: Row(2, 1, 2, 1) :: Row(2, 2, 2, 2) ::
Row(3, 1, 3, 1) :: Row(3, 2, 3, 2) :: Nil)
checkAnswer(
sql("select struct(a, b) as r1, struct(b, a) as r2 from testData2").select($"r1.*", $"r2.*"),
Row(1, 1, 1, 1) :: Row(1, 2, 2, 1) :: Row(2, 1, 1, 2) :: Row(2, 2, 2, 2) ::
Row(3, 1, 1, 3) :: Row(3, 2, 2, 3) :: Nil)
// Try with a temporary view
sql("select struct(a, b) as record from testData2").createOrReplaceTempView("structTable")
checkAnswer(
sql("SELECT record.* FROM structTable"),
Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 2) :: Nil)
checkAnswer(sql(
"""
| SELECT min(struct(record.*)) FROM
| (select struct(a,b) as record from testData2) tmp
""".stripMargin),
Row(Row(1, 1)) :: Nil)
// Try with an alias on the select list
checkAnswer(sql(
"""
| SELECT max(struct(record.*)) as r FROM
| (select struct(a,b) as record from testData2) tmp
""".stripMargin).select($"r.*"),
Row(3, 2) :: Nil)
// With GROUP BY
checkAnswer(sql(
"""
| SELECT min(struct(record.*)) FROM
| (select a as a, struct(a,b) as record from testData2) tmp
| GROUP BY a
""".stripMargin),
Row(Row(1, 1)) :: Row(Row(2, 1)) :: Row(Row(3, 1)) :: Nil)
// With GROUP BY and alias
checkAnswer(sql(
"""
| SELECT max(struct(record.*)) as r FROM
| (select a as a, struct(a,b) as record from testData2) tmp
| GROUP BY a
""".stripMargin).select($"r.*"),
Row(1, 2) :: Row(2, 2) :: Row(3, 2) :: Nil)
// With GROUP BY and alias and additional fields in the struct
checkAnswer(sql(
"""
| SELECT max(struct(a, record.*, b)) as r FROM
| (select a as a, b as b, struct(a,b) as record from testData2) tmp
| GROUP BY a
""".stripMargin).select($"r.*"),
Row(1, 1, 2, 2) :: Row(2, 2, 2, 2) :: Row(3, 3, 2, 2) :: Nil)
// Create a data set that contains nested structs.
val nestedStructData = sql(
"""
| SELECT struct(r1, r2) as record FROM
| (SELECT struct(a, b) as r1, struct(b, a) as r2 FROM testData2) tmp
""".stripMargin)
checkAnswer(nestedStructData.select($"record.*"),
Row(Row(1, 1), Row(1, 1)) :: Row(Row(1, 2), Row(2, 1)) :: Row(Row(2, 1), Row(1, 2)) ::
Row(Row(2, 2), Row(2, 2)) :: Row(Row(3, 1), Row(1, 3)) :: Row(Row(3, 2), Row(2, 3)) :: Nil)
checkAnswer(nestedStructData.select($"record.r1"),
Row(Row(1, 1)) :: Row(Row(1, 2)) :: Row(Row(2, 1)) :: Row(Row(2, 2)) ::
Row(Row(3, 1)) :: Row(Row(3, 2)) :: Nil)
checkAnswer(
nestedStructData.select($"record.r1.*"),
Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 2) :: Nil)
// Try with a temporary view
withTempView("nestedStructTable") {
nestedStructData.createOrReplaceTempView("nestedStructTable")
checkAnswer(
sql("SELECT record.* FROM nestedStructTable"),
nestedStructData.select($"record.*"))
checkAnswer(
sql("SELECT record.r1 FROM nestedStructTable"),
nestedStructData.select($"record.r1"))
checkAnswer(
sql("SELECT record.r1.* FROM nestedStructTable"),
nestedStructData.select($"record.r1.*"))
// Try resolving something not there.
assert(intercept[AnalysisException](sql("SELECT abc.* FROM nestedStructTable"))
.getMessage.contains("cannot resolve"))
}
// Create paths with unusual characters
val specialCharacterPath = sql(
"""
| SELECT struct(`col$.a_`, `a.b.c.`) as `r&&b.c` FROM
| (SELECT struct(a, b) as `col$.a_`, struct(b, a) as `a.b.c.` FROM testData2) tmp
""".stripMargin)
withTempView("specialCharacterTable") {
specialCharacterPath.createOrReplaceTempView("specialCharacterTable")
checkAnswer(
specialCharacterPath.select($"`r&&b.c`.*"),
nestedStructData.select($"record.*"))
checkAnswer(
sql("SELECT `r&&b.c`.`col$.a_` FROM specialCharacterTable"),
nestedStructData.select($"record.r1"))
checkAnswer(
sql("SELECT `r&&b.c`.`a.b.c.` FROM specialCharacterTable"),
nestedStructData.select($"record.r2"))
checkAnswer(
sql("SELECT `r&&b.c`.`col$.a_`.* FROM specialCharacterTable"),
nestedStructData.select($"record.r1.*"))
}
// Try star expanding a scalar. This should fail.
assert(intercept[AnalysisException](sql("select a.* from testData2")).getMessage.contains(
"Can only star expand struct data types."))
}
test("Struct Star Expansion - Name conflict") {
// Create a data set that contains a naming conflict
val nameConflict = sql("SELECT struct(a, b) as nameConflict, a as a FROM testData2")
withTempView("nameConflict") {
nameConflict.createOrReplaceTempView("nameConflict")
// Unqualified should resolve to table.
checkAnswer(sql("SELECT nameConflict.* FROM nameConflict"),
Row(Row(1, 1), 1) :: Row(Row(1, 2), 1) :: Row(Row(2, 1), 2) :: Row(Row(2, 2), 2) ::
Row(Row(3, 1), 3) :: Row(Row(3, 2), 3) :: Nil)
// Qualify the struct type with the table name.
checkAnswer(sql("SELECT nameConflict.nameConflict.* FROM nameConflict"),
Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 2) :: Nil)
}
}
test("Star Expansion - group by") {
withSQLConf("spark.sql.retainGroupColumns" -> "false") {
checkAnswer(
testData2.groupBy($"a", $"b").agg($"*"),
sql("SELECT * FROM testData2 group by a, b"))
}
}
test("Star Expansion - table with zero column") {
withTempView("temp_table_no_cols") {
val rddNoCols = sparkContext.parallelize(1 to 10).map(_ => Row.empty)
val dfNoCols = spark.createDataFrame(rddNoCols, StructType(Seq.empty))
dfNoCols.createTempView("temp_table_no_cols")
// ResolvedStar
checkAnswer(
dfNoCols,
dfNoCols.select(dfNoCols.col("*")))
// UnresolvedStar
checkAnswer(
dfNoCols,
sql("SELECT * FROM temp_table_no_cols"))
checkAnswer(
dfNoCols,
dfNoCols.select($"*"))
var e = intercept[AnalysisException] {
sql("SELECT a.* FROM temp_table_no_cols a")
}.getMessage
assert(e.contains("cannot resolve 'a.*' give input columns ''"))
e = intercept[AnalysisException] {
dfNoCols.select($"b.*")
}.getMessage
assert(e.contains("cannot resolve 'b.*' give input columns ''"))
}
}
test("Common subexpression elimination") {
// TODO: support subexpression elimination in whole stage codegen
withSQLConf("spark.sql.codegen.wholeStage" -> "false") {
// select from a table to prevent constant folding.
val df = sql("SELECT a, b from testData2 limit 1")
checkAnswer(df, Row(1, 1))
checkAnswer(df.selectExpr("a + 1", "a + 1"), Row(2, 2))
checkAnswer(df.selectExpr("a + 1", "a + 1 + 1"), Row(2, 3))
// This does not work because the expressions get grouped like (a + a) + 1
checkAnswer(df.selectExpr("a + 1", "a + a + 1"), Row(2, 3))
checkAnswer(df.selectExpr("a + 1", "a + (a + 1)"), Row(2, 3))
// Identity udf that tracks the number of times it is called.
val countAcc = sparkContext.longAccumulator("CallCount")
spark.udf.register("testUdf", (x: Int) => {
countAcc.add(1)
x
})
// Evaluates df, verifying it is equal to the expectedResult and the accumulator's value
// is correct.
def verifyCallCount(df: DataFrame, expectedResult: Row, expectedCount: Int): Unit = {
countAcc.setValue(0)
QueryTest.checkAnswer(
df, Seq(expectedResult), checkToRDD = false /* avoid duplicate exec */)
assert(countAcc.value == expectedCount)
}
verifyCallCount(df.selectExpr("testUdf(a)"), Row(1), 1)
verifyCallCount(df.selectExpr("testUdf(a)", "testUdf(a)"), Row(1, 1), 1)
verifyCallCount(df.selectExpr("testUdf(a + 1)", "testUdf(a + 1)"), Row(2, 2), 1)
verifyCallCount(df.selectExpr("testUdf(a + 1)", "testUdf(a)"), Row(2, 1), 2)
verifyCallCount(
df.selectExpr("testUdf(a + 1) + testUdf(a + 1)", "testUdf(a + 1)"), Row(4, 2), 1)
verifyCallCount(
df.selectExpr("testUdf(a + 1) + testUdf(1 + b)", "testUdf(a + 1)"), Row(4, 2), 2)
val testUdf = functions.udf((x: Int) => {
countAcc.add(1)
x
})
verifyCallCount(
df.groupBy().agg(sum(testUdf($"b") + testUdf($"b") + testUdf($"b"))), Row(3.0), 1)
verifyCallCount(
df.selectExpr("testUdf(a + 1) + testUdf(1 + a)", "testUdf(a + 1)"), Row(4, 2), 1)
// Try disabling it via configuration.
spark.conf.set("spark.sql.subexpressionElimination.enabled", "false")
verifyCallCount(df.selectExpr("testUdf(a)", "testUdf(a)"), Row(1, 1), 2)
spark.conf.set("spark.sql.subexpressionElimination.enabled", "true")
verifyCallCount(df.selectExpr("testUdf(a)", "testUdf(a)"), Row(1, 1), 1)
}
}
test("SPARK-10707: nullability should be correctly propagated through set operations (1)") {
// This test produced an incorrect result of 1 before the SPARK-10707 fix because of the
// NullPropagation rule: COUNT(v) got replaced with COUNT(1) because the output column of
// UNION was incorrectly considered non-nullable:
checkAnswer(
sql("""SELECT count(v) FROM (
| SELECT v FROM (
| SELECT 'foo' AS v UNION ALL
| SELECT NULL AS v
| ) my_union WHERE isnull(v)
|) my_subview""".stripMargin),
Seq(Row(0)))
}
test("SPARK-10707: nullability should be correctly propagated through set operations (2)") {
// This test uses RAND() to stop column pruning for Union and checks the resulting isnull
// value. This would produce an incorrect result before the fix in SPARK-10707 because the "v"
// column of the union was considered non-nullable.
checkAnswer(
sql(
"""
|SELECT a FROM (
| SELECT ISNULL(v) AS a, RAND() FROM (
| SELECT 'foo' AS v UNION ALL SELECT null AS v
| ) my_union
|) my_view
""".stripMargin),
Row(false) :: Row(true) :: Nil)
}
test("filter on a grouping column that is not presented in SELECT") {
checkAnswer(
sql("select count(1) from (select 1 as a) t group by a having a > 0"),
Row(1) :: Nil)
}
test("SPARK-13056: Null in map value causes NPE") {
val df = Seq(1 -> Map("abc" -> "somestring", "cba" -> null)).toDF("key", "value")
withTempView("maptest") {
df.createOrReplaceTempView("maptest")
// local optimization will by pass codegen code, so we should keep the filter `key=1`
checkAnswer(sql("SELECT value['abc'] FROM maptest where key = 1"), Row("somestring"))
checkAnswer(sql("SELECT value['cba'] FROM maptest where key = 1"), Row(null))
}
}
test("hash function") {
val df = Seq(1 -> "a", 2 -> "b").toDF("i", "j")
withTempView("tbl") {
df.createOrReplaceTempView("tbl")
checkAnswer(
df.select(hash($"i", $"j")),
sql("SELECT hash(i, j) from tbl")
)
}
}
test("join with using clause") {
val df1 = Seq(("r1c1", "r1c2", "t1r1c3"),
("r2c1", "r2c2", "t1r2c3"), ("r3c1x", "r3c2", "t1r3c3")).toDF("c1", "c2", "c3")
val df2 = Seq(("r1c1", "r1c2", "t2r1c3"),
("r2c1", "r2c2", "t2r2c3"), ("r3c1y", "r3c2", "t2r3c3")).toDF("c1", "c2", "c3")
val df3 = Seq((null, "r1c2", "t3r1c3"),
("r2c1", "r2c2", "t3r2c3"), ("r3c1y", "r3c2", "t3r3c3")).toDF("c1", "c2", "c3")
withTempView("t1", "t2", "t3") {
df1.createOrReplaceTempView("t1")
df2.createOrReplaceTempView("t2")
df3.createOrReplaceTempView("t3")
// inner join with one using column
checkAnswer(
sql("SELECT * FROM t1 join t2 using (c1)"),
Row("r1c1", "r1c2", "t1r1c3", "r1c2", "t2r1c3") ::
Row("r2c1", "r2c2", "t1r2c3", "r2c2", "t2r2c3") :: Nil)
// inner join with two using columns
checkAnswer(
sql("SELECT * FROM t1 join t2 using (c1, c2)"),
Row("r1c1", "r1c2", "t1r1c3", "t2r1c3") ::
Row("r2c1", "r2c2", "t1r2c3", "t2r2c3") :: Nil)
// Left outer join with one using column.
checkAnswer(
sql("SELECT * FROM t1 left join t2 using (c1)"),
Row("r1c1", "r1c2", "t1r1c3", "r1c2", "t2r1c3") ::
Row("r2c1", "r2c2", "t1r2c3", "r2c2", "t2r2c3") ::
Row("r3c1x", "r3c2", "t1r3c3", null, null) :: Nil)
// Right outer join with one using column.
checkAnswer(
sql("SELECT * FROM t1 right join t2 using (c1)"),
Row("r1c1", "r1c2", "t1r1c3", "r1c2", "t2r1c3") ::
Row("r2c1", "r2c2", "t1r2c3", "r2c2", "t2r2c3") ::
Row("r3c1y", null, null, "r3c2", "t2r3c3") :: Nil)
// Full outer join with one using column.
checkAnswer(
sql("SELECT * FROM t1 full outer join t2 using (c1)"),
Row("r1c1", "r1c2", "t1r1c3", "r1c2", "t2r1c3") ::
Row("r2c1", "r2c2", "t1r2c3", "r2c2", "t2r2c3") ::
Row("r3c1x", "r3c2", "t1r3c3", null, null) ::
Row("r3c1y", null,
null, "r3c2", "t2r3c3") :: Nil)
// Full outer join with null value in join column.
checkAnswer(
sql("SELECT * FROM t1 full outer join t3 using (c1)"),
Row("r1c1", "r1c2", "t1r1c3", null, null) ::
Row("r2c1", "r2c2", "t1r2c3", "r2c2", "t3r2c3") ::
Row("r3c1x", "r3c2", "t1r3c3", null, null) ::
Row("r3c1y", null, null, "r3c2", "t3r3c3") ::
Row(null, null, null, "r1c2", "t3r1c3") :: Nil)
// Self join with using columns.
checkAnswer(
sql("SELECT * FROM t1 join t1 using (c1)"),
Row("r1c1", "r1c2", "t1r1c3", "r1c2", "t1r1c3") ::
Row("r2c1", "r2c2", "t1r2c3", "r2c2", "t1r2c3") ::
Row("r3c1x", "r3c2", "t1r3c3", "r3c2", "t1r3c3") :: Nil)
}
}
test("SPARK-15327: fail to compile generated code with complex data structure") {
withTempDir{ dir =>
val json =
"""
|{"h": {"b": {"c": [{"e": "adfgd"}], "a": [{"e": "testing", "count": 3}],
|"b": [{"e": "test", "count": 1}]}}, "d": {"b": {"c": [{"e": "adfgd"}],
|"a": [{"e": "testing", "count": 3}], "b": [{"e": "test", "count": 1}]}},
|"c": {"b": {"c": [{"e": "adfgd"}], "a": [{"count": 3}],
|"b": [{"e": "test", "count": 1}]}}, "a": {"b": {"c": [{"e": "adfgd"}],
|"a": [{"count": 3}], "b": [{"e": "test", "count": 1}]}},
|"e": {"b": {"c": [{"e": "adfgd"}], "a": [{"e": "testing", "count": 3}],
|"b": [{"e": "test", "count": 1}]}}, "g": {"b": {"c": [{"e": "adfgd"}],
|"a": [{"e": "testing", "count": 3}], "b": [{"e": "test", "count": 1}]}},
|"f": {"b": {"c": [{"e": "adfgd"}], "a": [{"e": "testing", "count": 3}],
|"b": [{"e": "test", "count": 1}]}}, "b": {"b": {"c": [{"e": "adfgd"}],
|"a": [{"count": 3}], "b": [{"e": "test", "count": 1}]}}}'
|
""".stripMargin
spark.read.json(Seq(json).toDS()).write.mode("overwrite").parquet(dir.toString)
spark.read.parquet(dir.toString).collect()
}
}
test("data source table created in InMemoryCatalog should be able to read/write") {
withTable("tbl") {
sql("CREATE TABLE tbl(i INT, j STRING) USING parquet")
checkAnswer(sql("SELECT i, j FROM tbl"), Nil)
Seq(1 -> "a", 2 -> "b").toDF("i", "j").write.mode("overwrite").insertInto("tbl")
checkAnswer(sql("SELECT i, j FROM tbl"), Row(1, "a") :: Row(2, "b") :: Nil)
Seq(3 -> "c", 4 -> "d").toDF("i", "j").write.mode("append").saveAsTable("tbl")
checkAnswer(
sql("SELECT i, j FROM tbl"),
Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Row(4, "d") :: Nil)
}
}
test("Eliminate noop ordinal ORDER BY") {
withSQLConf(SQLConf.ORDER_BY_ORDINAL.key -> "true") {
val plan1 = sql("SELECT 1.0, 'abc', year(current_date()) ORDER BY 1, 2, 3")
val plan2 = sql("SELECT 1.0, 'abc', year(current_date())")
comparePlans(plan1.queryExecution.optimizedPlan, plan2.queryExecution.optimizedPlan)
}
}
test("check code injection is prevented") {
// The end of comment (*/) should be escaped.
var literal =
"""|*/
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
var expected =
"""|*/
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
// `\u002A` is `*` and `\u002F` is `/`
// so if the end of comment consists of those characters in queries, we need to escape them.
literal =
"""|\\u002A/
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
s"""|${"\\u002A/"}
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|\\\\u002A/
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
"""|\\u002A/
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|\\u002a/
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
s"""|${"\\u002a/"}
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|\\\\u002a/
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
"""|\\u002a/
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|*\\u002F
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
s"""|${"*\\u002F"}
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|*\\\\u002F
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
"""|*\\u002F
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|*\\u002f
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
s"""|${"*\\u002f"}
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|*\\\\u002f
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
"""|*\\u002f
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|\\u002A\\u002F
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
s"""|${"\\u002A\\u002F"}
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|\\\\u002A\\u002F
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
s"""|${"\\\\u002A\\u002F"}
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|\\u002A\\\\u002F
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
s"""|${"\\u002A\\\\u002F"}
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
literal =
"""|\\\\u002A\\\\u002F
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
expected =
"""|\\u002A\\u002F
|{
| new Object() {
| void f() { throw new RuntimeException("This exception is injected."); }
| }.f();
|}
|/*""".stripMargin
checkAnswer(
sql(s"SELECT '$literal' AS DUMMY"),
Row(s"$expected") :: Nil)
}
test("SPARK-15752 optimize metadata only query for datasource table") {
withSQLConf(SQLConf.OPTIMIZER_METADATA_ONLY.key -> "true") {
withTable("srcpart_15752") {
val data = (1 to 10).map(i => (i, s"data-$i", i % 2, if ((i % 2) == 0) "a" else "b"))
.toDF("col1", "col2", "partcol1", "partcol2")
data.write.partitionBy("partcol1", "partcol2").mode("append").saveAsTable("srcpart_15752")
checkAnswer(
sql("select partcol1 from srcpart_15752 group by partcol1"),
Row(0) :: Row(1) :: Nil)
checkAnswer(
sql("select partcol1 from srcpart_15752 where partcol1 = 1 group by partcol1"),
Row(1))
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srcpart_15752 group by partcol1"),
Row(0, 1) :: Row(1, 1) :: Nil)
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srcpart_15752 where partcol1 = 1 " +
"group by partcol1"),
Row(1, 1) :: Nil)
checkAnswer(sql("select distinct partcol1 from srcpart_15752"), Row(0) :: Row(1) :: Nil)
checkAnswer(sql("select distinct partcol1 from srcpart_15752 where partcol1 = 1"), Row(1))
checkAnswer(
sql("select distinct col from (select partcol1 + 1 as col from srcpart_15752 " +
"where partcol1 = 1) t"),
Row(2))
checkAnswer(sql("select max(partcol1) from srcpart_15752"), Row(1))
checkAnswer(sql("select max(partcol1) from srcpart_15752 where partcol1 = 1"), Row(1))
checkAnswer(sql("select max(partcol1) from (select partcol1 from srcpart_15752) t"), Row(1))
checkAnswer(
sql("select max(col) from (select partcol1 + 1 as col from srcpart_15752 " +
"where partcol1 = 1) t"),
Row(2))
}
}
}
test("SPARK-16975: Column-partition path starting '_' should be handled correctly") {
withTempDir { dir =>
val parquetDir = new File(dir, "parquet").getCanonicalPath
spark.range(10).withColumn("_col", $"id").write.partitionBy("_col").save(parquetDir)
spark.read.parquet(parquetDir)
}
}
test("SPARK-16644: Aggregate should not put aggregate expressions to constraints") {
withTable("tbl") {
sql("CREATE TABLE tbl(a INT, b INT) USING parquet")
checkAnswer(sql(
"""
|SELECT
| a,
| MAX(b) AS c1,
| b AS c2
|FROM tbl
|WHERE a = b
|GROUP BY a, b
|HAVING c1 = 1
""".stripMargin), Nil)
}
}
test("SPARK-16674: field names containing dots for both fields and partitioned fields") {
withTempPath { path =>
val data = (1 to 10).map(i => (i, s"data-$i", i % 2, if ((i % 2) == 0) "a" else "b"))
.toDF("col.1", "col.2", "part.col1", "part.col2")
data.write
.format("parquet")
.partitionBy("part.col1", "part.col2")
.save(path.getCanonicalPath)
val readBack = spark.read.format("parquet").load(path.getCanonicalPath)
checkAnswer(
readBack.selectExpr("`part.col1`", "`col.1`"),
data.selectExpr("`part.col1`", "`col.1`"))
}
}
test("SPARK-17515: CollectLimit.execute() should perform per-partition limits") {
val numRecordsRead = spark.sparkContext.longAccumulator
spark.range(1, 100, 1, numPartitions = 10).map { x =>
numRecordsRead.add(1)
x
}.limit(1).queryExecution.toRdd.count()
assert(numRecordsRead.value === 10)
}
test("CREATE TABLE USING should not fail if a same-name temp view exists") {
withTable("same_name") {
withTempView("same_name") {
spark.range(10).createTempView("same_name")
sql("CREATE TABLE same_name(i int) USING json")
checkAnswer(spark.table("same_name"), spark.range(10).toDF())
assert(spark.table("default.same_name").collect().isEmpty)
}
}
}
test("SPARK-18053: ARRAY equality is broken") {
withTable("array_tbl") {
spark.range(10).select(array($"id").as("arr")).write.saveAsTable("array_tbl")
assert(sql("SELECT * FROM array_tbl where arr = ARRAY(1L)").count == 1)
}
}
test("SPARK-19157: should be able to change spark.sql.runSQLOnFiles at runtime") {
withTempPath { path =>
Seq(1 -> "a").toDF("i", "j").write.parquet(path.getCanonicalPath)
val newSession = spark.newSession()
val originalValue = newSession.sessionState.conf.runSQLonFile
try {
newSession.sessionState.conf.setConf(SQLConf.RUN_SQL_ON_FILES, false)
intercept[AnalysisException] {
newSession.sql(s"SELECT i, j FROM parquet.`${path.getCanonicalPath}`")
}
newSession.sessionState.conf.setConf(SQLConf.RUN_SQL_ON_FILES, true)
checkAnswer(
newSession.sql(s"SELECT i, j FROM parquet.`${path.getCanonicalPath}`"),
Row(1, "a"))
} finally {
newSession.sessionState.conf.setConf(SQLConf.RUN_SQL_ON_FILES, originalValue)
}
}
}
test("should be able to resolve a persistent view") {
withTable("t1", "t2") {
withView("v1") {
sql("CREATE TABLE `t1` USING parquet AS SELECT * FROM VALUES(1, 1) AS t1(a, b)")
sql("CREATE TABLE `t2` USING parquet AS SELECT * FROM VALUES('a', 2, 1.0) AS t2(d, e, f)")
sql("CREATE VIEW `v1`(x, y) AS SELECT * FROM t1")
checkAnswer(spark.table("v1").orderBy("x"), Row(1, 1))
sql("ALTER VIEW `v1` AS SELECT * FROM t2")
checkAnswer(spark.table("v1").orderBy("f"), Row("a", 2, 1.0))
}
}
}
test("SPARK-19059: read file based table whose name starts with underscore") {
withTable("_tbl") {
sql("CREATE TABLE `_tbl`(i INT) USING parquet")
sql("INSERT INTO `_tbl` VALUES (1), (2), (3)")
checkAnswer( sql("SELECT * FROM `_tbl`"), Row(1) :: Row(2) :: Row(3) :: Nil)
}
}
test("SPARK-19334: check code injection is prevented") {
// The end of comment (*/) should be escaped.
val badQuery =
"""|SELECT inline(array(cast(struct(1) AS
| struct<`=
| new Object() {
| {f();}
| public void f() {throw new RuntimeException("This exception is injected.");}
| public int x;
| }.x
| `:int>)))""".stripMargin.replaceAll("\n", "")
checkAnswer(sql(badQuery), Row(1) :: Nil)
}
test("SPARK-19650: An action on a Command should not trigger a Spark job") {
// Create a listener that checks if new jobs have started.
val jobStarted = new AtomicBoolean(false)
val listener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
jobStarted.set(true)
}
}
// Make sure no spurious job starts are pending in the listener bus.
sparkContext.listenerBus.waitUntilEmpty(500)
sparkContext.addSparkListener(listener)
try {
// Execute the command.
sql("show databases").head()
// Make sure we have seen all events triggered by DataFrame.show()
sparkContext.listenerBus.waitUntilEmpty(500)
} finally {
sparkContext.removeSparkListener(listener)
}
assert(!jobStarted.get(), "Command should not trigger a Spark job.")
}
}
| jianran/spark | sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala | Scala | apache-2.0 | 89,213 |
class FolderExistsException extends Exception
class ValidationException extends Exception
class FolderNotFoundException extends Exception
class BookmarkExistsException extends Exception
class BookmarkNotFoundException extends Exception
| jeroanan/SunshineRecorder | src/sunshine/Exceptions.scala | Scala | gpl-3.0 | 240 |
package info.glennengstrand.news.dao
import slick.jdbc.MySQLProfile.api._
object MySqlDao {
lazy val db = Database.forConfig("mysql")
}
| gengstrand/clojure-news-feed | server/feed11/src/main/scala/info/glennengstrand/news/dao/MySqlDao.scala | Scala | epl-1.0 | 141 |
object Test {
def main(args: Array[String]): Unit = {
val t6: (Int,Int,Int,Int,Int,Int) = 1 *: (2, 3, 4, 5, 6)
println(t6)
}
}
| lampepfl/dotty | tests/run/tuple-cons-2.scala | Scala | apache-2.0 | 140 |
/*
* Copyright 2017-2018 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package examples.todolist.service
import cats.Monad
import cats.implicits._
import freestyle.tagless._
import freestyle.tagless.effects.error.ErrorM
import examples.todolist.{Tag, TodoForm, TodoList}
import examples.todolist.persistence.AppRepository
@module
trait AppService[F[_]] {
implicit val M: Monad[F]
val repo: AppRepository[F]
val tagService: TagService[F]
val todoItemService: TodoItemService[F]
val todoListService: TodoListService[F]
val error: ErrorM
def insert(form: TodoForm): F[TodoForm] = {
for {
tag <- tagService.insert(form.tag)
t <- error.either[Tag](tag.toRight(new NoSuchElementException("Could not create Tag")))
list <- todoListService.insert(form.list.copy(tagId = t.id))
l <- error.either[TodoList](
list.toRight(new NoSuchElementException("Could not create TodoList")))
item <- todoItemService.batchedInsert(form.items.map(_.copy(todoListId = l.id)))
} yield {
form.copy(list = l, tag = t, items = item.sequence getOrElse form.items)
}
}
def update(form: TodoForm): F[TodoForm] =
for {
tag <- tagService.update(form.tag)
t <- error.either[Tag](tag.toRight(new NoSuchElementException("Could not create Tag")))
list <- todoListService.update(form.list.copy(tagId = t.id))
l <- error.either[TodoList](
list.toRight(new NoSuchElementException("Could not create TodoList")))
item <- todoItemService.batchedUpdate(form.items.map(_.copy(todoListId = l.id)))
} yield {
form.copy(list = l, tag = t, items = item.sequence getOrElse form.items)
}
def destroy(form: TodoForm): F[Int] = {
val itemIds: Option[List[Int]] = form.items.map(_.id).sequence
val listId: Option[Int] = form.list.id
val tagId: Option[Int] = form.tag.id
val program = for {
a <- itemIds.map(todoItemService.batchedDestroy)
b <- listId.map(todoListService.destroy)
c <- tagId.map(tagService.destroy)
} yield List(a, b, c)
program
.map(_.sequence.map(_.sum))
.getOrElse(throw new NoSuchElementException("Could not delete"))
}
val reset: F[Int] =
for {
tags <- tagService.reset
lists <- todoListService.reset
items <- todoItemService.reset
} yield tags + lists + items
val list: F[List[TodoForm]] =
repo.list.map(_.groupBy(x => (x._1, x._2)).map {
case ((todoList, tag), list) =>
TodoForm(todoList, tag, list.flatMap(_._3))
}.toList)
}
| frees-io/freestyle | modules/examples/todolist-lib/src/main/scala/todo/service/AppService.scala | Scala | apache-2.0 | 3,127 |
package io.react2.scalata.parsers
import io.react2.scalata.plugins.Plugin
import io.react2.scalata.translation.Field
/**
* @author dbalduini
*/
trait Parser extends Plugin {
def parse(f: Field): Any
}
| React2/scalata | src/main/scala/io/react2/scalata/parsers/Parser.scala | Scala | apache-2.0 | 207 |
package com.shocktrade.server.dao.contest
import com.shocktrade.common.forms.ContestSearchForm
import com.shocktrade.common.models.contest.{ChatMessage, Participant}
import io.scalajs.npm.mongodb._
import io.scalajs.util.ScalaJsHelper._
import scala.concurrent.{ExecutionContext, Future}
import scala.scalajs.js
/**
* Contest DAO
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
@js.native
trait ContestDAO extends Collection
/**
* Contest DAO Companion
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
object ContestDAO {
/**
* Contest DAO Extensions
* @param dao the given [[ContestDAO Contest DAO]]
*/
implicit class ContestDAOExtensions(val dao: ContestDAO) {
@inline
def addChatMessage(contestID: String, message: ChatMessage)(implicit ec: ExecutionContext): js.Promise[FindAndModifyWriteOpResult] = {
dao.findOneAndUpdate(
filter = "_id" $eq contestID.$oid,
update = "messages" $addToSet message,
options = new FindAndUpdateOptions(returnOriginal = false, upsert = false))
}
@inline
def create(contest: ContestData): js.Promise[InsertWriteOpResult] = dao.insertOne(contest)
@inline
def findActiveContests()(implicit ec: ExecutionContext): js.Promise[js.Array[ContestData]] = {
dao.find[ContestData]("status" $eq "ACTIVE").toArray()
}
@inline
def findChatMessages(contestID: String)(implicit ec: ExecutionContext): Future[Option[js.Array[ChatMessage]]] = {
dao.findOneFuture[ContestData](
selector = "_id" $eq contestID.$oid,
fields = js.Array("messages")) map (_ map (_.messages getOrElse emptyArray))
}
@inline
def findOneByID(contestID: String)(implicit ec: ExecutionContext): Future[Option[ContestData]] = {
dao.findOneFuture[ContestData]("_id" $eq contestID.$oid)
}
@inline
def findByPlayer(playerID: String)(implicit ec: ExecutionContext): js.Promise[js.Array[ContestData]] = {
dao.find[ContestData]("participants._id" $eq playerID).toArray()
}
@inline
def findUnoccupied(playerID: String)(implicit ec: ExecutionContext): js.Promise[js.Array[ContestData]] = {
dao.find[ContestData]("participants" $not $elemMatch("_id" $eq playerID)).toArray()
}
@inline
def join(contestID: String, participant: Participant)(implicit ec: ExecutionContext): js.Promise[FindAndModifyWriteOpResult] = {
dao.findOneAndUpdate(filter = "_id" $eq contestID.$oid, update = "participants" $addToSet participant)
}
@inline
def search(form: ContestSearchForm): js.Promise[js.Array[ContestData]] = {
val query = doc(Seq(
form.activeOnly.toOption.flatMap(checked => if (checked) Some("status" $eq "ACTIVE") else None),
form.friendsOnly.toOption.flatMap(checked => if (checked) Some("friendsOnly" $eq true) else None),
form.perksAllowed.toOption.flatMap(checked => if (checked) Some("perksAllowed" $eq true) else None),
form.invitationOnly.toOption.flatMap(checked => if (checked) Some("invitationOnly" $eq true) else None),
(for (allowed <- form.levelCapAllowed.toOption; level <- form.levelCap.toOption) yield (allowed, level)) flatMap {
case (allowed, levelCap) => if (allowed) Some($or("levelCap" $exists false, "levelCap" $lte levelCap.toInt)) else None
},
form.perksAllowed.toOption.flatMap(checked => if (checked) Some("perksAllowed" $eq true) else None),
form.robotsAllowed.toOption.flatMap(checked => if (checked) Some("robotsAllowed" $eq true) else None)
).flatten: _*)
dao.find[ContestData](query).toArray()
}
@inline
def updateContest(contest: ContestData): js.Promise[UpdateWriteOpResultObject] = {
dao.updateOne(filter = "_id" $eq contest._id, update = contest, new UpdateOptions(upsert = false))
}
@inline
def updateContests(contests: Seq[ContestData]): js.Promise[BulkWriteOpResultObject] = {
dao.bulkWrite(
js.Array(contests map (contest =>
updateOne(filter = "_id" $eq contest._id, update = contest, upsert = false)
): _*)
)
}
}
/**
* Contest DAO Constructors
* @param db the given [[Db database]]
*/
implicit class ContestDAOConstructors(val db: Db) extends AnyVal {
@inline
def getContestDAO: ContestDAO = {
db.collection("Contests").asInstanceOf[ContestDAO]
}
}
} | ldaniels528/shocktrade.js | app/server/dao/src/main/scala/com/shocktrade/server/dao/contest/ContestDAO.scala | Scala | apache-2.0 | 4,419 |
package ru.dgolubets.jsmoduleloader.api.amd
import javax.script.ScriptEngineManager
import jdk.nashorn.api.scripting.ScriptObjectMirror
import org.scalatest.{AsyncWordSpec, Matchers}
import ru.dgolubets.jsmoduleloader.api.ScriptModuleException
import ru.dgolubets.jsmoduleloader.api.readers.FileModuleReader
import ru.dgolubets.jsmoduleloader.internal.Resource
import scala.concurrent.Promise
// https://github.com/amdjs/amdjs-api/blob/master/AMD.md
class AmdLoaderSpec extends AsyncWordSpec with Matchers {
val engineManager = new ScriptEngineManager(null)
def createLoader = AmdLoader(FileModuleReader("src/test/javascript/amd"))
def createLoaderWithGlobals = {
val loader = AmdLoader(FileModuleReader("src/test/javascript/amd"))
loader.engine.eval(Resource.readString("/globals.js").get)
loader
}
"AmdLoader" when {
"created with it's own engine" should {
"set globals" in {
val loader = createLoader
loader.engine.eval("typeof global === 'object'") shouldBe true
loader.engine.eval("typeof console === 'object'") shouldBe true
loader.engine.eval("typeof console.log === 'function'") shouldBe true
loader.engine.eval("typeof console.debug === 'function'") shouldBe true
loader.engine.eval("typeof console.warn === 'function'") shouldBe true
loader.engine.eval("typeof console.error === 'function'") shouldBe true
}
}
"created" should {
"create 'require' function on the engine" in {
val loader = createLoader
loader.engine.eval("typeof require == 'function'") shouldBe true
}
"create 'define' function on the engine" in {
val loader = createLoader
loader.engine.eval("typeof define == 'function'") shouldBe true
}
"set define.amd property" in {
val loader = createLoader
loader.engine.eval("typeof define.amd === 'object'") shouldBe true
}
}
"loads modules" should {
"expose engine scope to modules for read access" in {
val loader = createLoader
val module = loader.requireAsync("core/readEngineScope")
loader.engine.put("engineText", "some text")
module.map(_ => succeed)
}
}
"require is called in javascript" should {
"load module" in {
val loader = createLoaderWithGlobals
val promise = Promise[AnyRef]()
loader.engine.put("promise", promise)
loader.engine.eval("require(['definitions/object'], function(m){ promise.success(m); })")
promise.future.map { m =>
m shouldNot be(null)
m shouldBe a[ScriptObjectMirror]
}
}
}
"require is called in scala" should {
"return error for invalid file" in {
val loader = createLoaderWithGlobals
val module = loader.requireAsync("aModuleThatDoesNotExist")
module.failed.map { m =>
m shouldBe a[ScriptModuleException]
}
}
def load[T: Manifest](message: String, file: String, check: T => Boolean = { _: T => true }) = {
s"load $message" in {
val loader = createLoaderWithGlobals
val module = loader.requireAsync(file)
module.map { m =>
m.value shouldBe a[T]
assert(check(m.value.asInstanceOf[T]))
}
}
}
load[ScriptObjectMirror]("a module in simple object format", "definitions/object")
load[ScriptObjectMirror]("a module in simple function format", "definitions/function")
load[ScriptObjectMirror]("a module in simple function format with default arguments", "definitions/functionWithDefaultArgs")
load[ScriptObjectMirror]("a module in function format with dependencies", "definitions/functionWithDependencies")
load[ScriptObjectMirror]("a module in function format with dependencies that returns a function", "definitions/returnsFunction",
m => m.isFunction)
load[ScriptObjectMirror]("a module in function format with dependencies that returns an array", "definitions/returnsArray",
m => m.isArray)
load[String]("a module in function format with dependencies that returns a string", "definitions/returnsString")
load[Integer]("a module in function format with dependencies that returns a number", "definitions/returnsNumber")
load("simple circular dependant modules", "dependencies/circular/simple/A")
load("circular dependant modules defined with 'exports'", "dependencies/circular/exports/A")
load("modules with common dependency", "dependencies/common/simple/A")
load("modules with common dependency defined with 'exports'", "dependencies/common/exports/A")
load("module using local require", "dependencies/local/A")
load("modules bundle", "bundles/ABC")
}
}
} | DGolubets/js-module-loader | src/test/scala/ru/dgolubets/jsmoduleloader/api/amd/AmdLoaderSpec.scala | Scala | mit | 4,830 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import com.google.common.base.Objects
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.metastore.Warehouse
import org.apache.hadoop.hive.metastore.api.FieldSchema
import org.apache.hadoop.hive.ql.metadata._
import org.apache.hadoop.hive.serde2.Deserializer
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.analysis.{Catalog, MultiInstanceRelation, OverrideCatalog}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.hive.client._
import org.apache.spark.sql.parquet.ParquetRelation2
import org.apache.spark.sql.sources.{CreateTableUsingAsSelect, LogicalRelation, Partition => ParquetPartition, PartitionSpec, ResolvedDataSource}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{AnalysisException, SQLContext, SaveMode, sources}
import org.apache.spark.util.Utils
/* Implicit conversions */
import scala.collection.JavaConversions._
private[hive] class HiveMetastoreCatalog(val client: ClientInterface, hive: HiveContext)
extends Catalog with Logging {
val conf = hive.conf
/** Usages should lock on `this`. */
protected[hive] lazy val hiveWarehouse = new Warehouse(hive.hiveconf)
// TODO: Use this everywhere instead of tuples or databaseName, tableName,.
/** A fully qualified identifier for a table (i.e., database.tableName) */
case class QualifiedTableName(database: String, name: String) {
def toLowerCase: QualifiedTableName = QualifiedTableName(database.toLowerCase, name.toLowerCase)
}
/** A cache of Spark SQL data source tables that have been accessed. */
protected[hive] val cachedDataSourceTables: LoadingCache[QualifiedTableName, LogicalPlan] = {
val cacheLoader = new CacheLoader[QualifiedTableName, LogicalPlan]() {
override def load(in: QualifiedTableName): LogicalPlan = {
logDebug(s"Creating new cached data source for $in")
val table = client.getTable(in.database, in.name)
def schemaStringFromParts: Option[String] = {
table.properties.get("spark.sql.sources.schema.numParts").map { numParts =>
val parts = (0 until numParts.toInt).map { index =>
val part = table.properties.get(s"spark.sql.sources.schema.part.$index").orNull
if (part == null) {
throw new AnalysisException(
"Could not read schema from the metastore because it is corrupted " +
s"(missing part $index of the schema, $numParts parts are expected).")
}
part
}
// Stick all parts back to a single schema string.
parts.mkString
}
}
// Originally, we used spark.sql.sources.schema to store the schema of a data source table.
// After SPARK-6024, we removed this flag.
// Although we are not using spark.sql.sources.schema any more, we need to still support.
val schemaString =
table.properties.get("spark.sql.sources.schema").orElse(schemaStringFromParts)
val userSpecifiedSchema =
schemaString.map(s => DataType.fromJson(s).asInstanceOf[StructType])
// We only need names at here since userSpecifiedSchema we loaded from the metastore
// contains partition columns. We can always get datatypes of partitioning columns
// from userSpecifiedSchema.
val partitionColumns = table.partitionColumns.map(_.name)
// It does not appear that the ql client for the metastore has a way to enumerate all the
// SerDe properties directly...
val options = table.serdeProperties
val resolvedRelation =
ResolvedDataSource(
hive,
userSpecifiedSchema,
partitionColumns.toArray,
table.properties("spark.sql.sources.provider"),
options)
LogicalRelation(resolvedRelation.relation)
}
}
CacheBuilder.newBuilder().maximumSize(1000).build(cacheLoader)
}
override def refreshTable(databaseName: String, tableName: String): Unit = {
// refreshTable does not eagerly reload the cache. It just invalidate the cache.
// Next time when we use the table, it will be populated in the cache.
// Since we also cache ParquetRelations converted from Hive Parquet tables and
// adding converted ParquetRelations into the cache is not defined in the load function
// of the cache (instead, we add the cache entry in convertToParquetRelation),
// it is better at here to invalidate the cache to avoid confusing waring logs from the
// cache loader (e.g. cannot find data source provider, which is only defined for
// data source table.).
invalidateTable(databaseName, tableName)
}
def invalidateTable(databaseName: String, tableName: String): Unit = {
cachedDataSourceTables.invalidate(QualifiedTableName(databaseName, tableName).toLowerCase)
}
val caseSensitive: Boolean = false
/**
* Creates a data source table (a table created with USING clause) in Hive's metastore.
* Returns true when the table has been created. Otherwise, false.
*/
def createDataSourceTable(
tableName: String,
userSpecifiedSchema: Option[StructType],
partitionColumns: Array[String],
provider: String,
options: Map[String, String],
isExternal: Boolean): Unit = {
val (dbName, tblName) = processDatabaseAndTableName("default", tableName)
val tableProperties = new scala.collection.mutable.HashMap[String, String]
tableProperties.put("spark.sql.sources.provider", provider)
// Saves optional user specified schema. Serialized JSON schema string may be too long to be
// stored into a single metastore SerDe property. In this case, we split the JSON string and
// store each part as a separate SerDe property.
if (userSpecifiedSchema.isDefined) {
val threshold = conf.schemaStringLengthThreshold
val schemaJsonString = userSpecifiedSchema.get.json
// Split the JSON string.
val parts = schemaJsonString.grouped(threshold).toSeq
tableProperties.put("spark.sql.sources.schema.numParts", parts.size.toString)
parts.zipWithIndex.foreach { case (part, index) =>
tableProperties.put(s"spark.sql.sources.schema.part.$index", part)
}
}
val metastorePartitionColumns = userSpecifiedSchema.map { schema =>
val fields = partitionColumns.map(col => schema(col))
fields.map { field =>
HiveColumn(
name = field.name,
hiveType = HiveMetastoreTypes.toMetastoreType(field.dataType),
comment = "")
}.toSeq
}.getOrElse {
if (partitionColumns.length > 0) {
// The table does not have a specified schema, which means that the schema will be inferred
// when we load the table. So, we are not expecting partition columns and we will discover
// partitions when we load the table. However, if there are specified partition columns,
// we simplily ignore them and provide a warning message..
logWarning(
s"The schema and partitions of table $tableName will be inferred when it is loaded. " +
s"Specified partition columns (${partitionColumns.mkString(",")}) will be ignored.")
}
Seq.empty[HiveColumn]
}
val tableType = if (isExternal) {
tableProperties.put("EXTERNAL", "TRUE")
ExternalTable
} else {
tableProperties.put("EXTERNAL", "FALSE")
ManagedTable
}
client.createTable(
HiveTable(
specifiedDatabase = Option(dbName),
name = tblName,
schema = Seq.empty,
partitionColumns = metastorePartitionColumns,
tableType = tableType,
properties = tableProperties.toMap,
serdeProperties = options))
}
def hiveDefaultTableFilePath(tableName: String): String = {
// Code based on: hiveWarehouse.getTablePath(currentDatabase, tableName)
new Path(
new Path(client.getDatabase(client.currentDatabase).location),
tableName.toLowerCase).toString
}
def tableExists(tableIdentifier: Seq[String]): Boolean = {
val tableIdent = processTableIdentifier(tableIdentifier)
val databaseName =
tableIdent
.lift(tableIdent.size - 2)
.getOrElse(client.currentDatabase)
val tblName = tableIdent.last
client.getTableOption(databaseName, tblName).isDefined
}
def lookupRelation(
tableIdentifier: Seq[String],
alias: Option[String]): LogicalPlan = {
val tableIdent = processTableIdentifier(tableIdentifier)
val databaseName = tableIdent.lift(tableIdent.size - 2).getOrElse(
client.currentDatabase)
val tblName = tableIdent.last
val table = client.getTable(databaseName, tblName)
if (table.properties.get("spark.sql.sources.provider").isDefined) {
val dataSourceTable =
cachedDataSourceTables(QualifiedTableName(databaseName, tblName).toLowerCase)
// Then, if alias is specified, wrap the table with a Subquery using the alias.
// Otherwise, wrap the table with a Subquery using the table name.
val withAlias =
alias.map(a => Subquery(a, dataSourceTable)).getOrElse(
Subquery(tableIdent.last, dataSourceTable))
withAlias
} else if (table.tableType == VirtualView) {
val viewText = table.viewText.getOrElse(sys.error("Invalid view without text."))
alias match {
// because hive use things like `_c0` to build the expanded text
// currently we cannot support view from "create view v1(c1) as ..."
case None => Subquery(table.name, HiveQl.createPlan(viewText))
case Some(aliasText) => Subquery(aliasText, HiveQl.createPlan(viewText))
}
} else {
MetastoreRelation(databaseName, tblName, alias)(table)(hive)
}
}
private def convertToParquetRelation(metastoreRelation: MetastoreRelation): LogicalRelation = {
val metastoreSchema = StructType.fromAttributes(metastoreRelation.output)
val mergeSchema = hive.convertMetastoreParquetWithSchemaMerging
// NOTE: Instead of passing Metastore schema directly to `ParquetRelation2`, we have to
// serialize the Metastore schema to JSON and pass it as a data source option because of the
// evil case insensitivity issue, which is reconciled within `ParquetRelation2`.
val parquetOptions = Map(
ParquetRelation2.METASTORE_SCHEMA -> metastoreSchema.json,
ParquetRelation2.MERGE_SCHEMA -> mergeSchema.toString)
val tableIdentifier =
QualifiedTableName(metastoreRelation.databaseName, metastoreRelation.tableName)
def getCached(
tableIdentifier: QualifiedTableName,
pathsInMetastore: Seq[String],
schemaInMetastore: StructType,
partitionSpecInMetastore: Option[PartitionSpec]): Option[LogicalRelation] = {
cachedDataSourceTables.getIfPresent(tableIdentifier) match {
case null => None // Cache miss
case logical@LogicalRelation(parquetRelation: ParquetRelation2) =>
// If we have the same paths, same schema, and same partition spec,
// we will use the cached Parquet Relation.
val useCached =
parquetRelation.paths.toSet == pathsInMetastore.toSet &&
logical.schema.sameType(metastoreSchema) &&
parquetRelation.partitionSpec == partitionSpecInMetastore.getOrElse {
PartitionSpec(StructType(Nil), Array.empty[sources.Partition])
}
if (useCached) {
Some(logical)
} else {
// If the cached relation is not updated, we invalidate it right away.
cachedDataSourceTables.invalidate(tableIdentifier)
None
}
case other =>
logWarning(
s"${metastoreRelation.databaseName}.${metastoreRelation.tableName} should be stored " +
s"as Parquet. However, we are getting a $other from the metastore cache. " +
s"This cached entry will be invalidated.")
cachedDataSourceTables.invalidate(tableIdentifier)
None
}
}
val result = if (metastoreRelation.hiveQlTable.isPartitioned) {
val partitionSchema = StructType.fromAttributes(metastoreRelation.partitionKeys)
val partitionColumnDataTypes = partitionSchema.map(_.dataType)
val partitions = metastoreRelation.hiveQlPartitions.map { p =>
val location = p.getLocation
val values = Row.fromSeq(p.getValues.zip(partitionColumnDataTypes).map {
case (rawValue, dataType) => Cast(Literal(rawValue), dataType).eval(null)
})
ParquetPartition(values, location)
}
val partitionSpec = PartitionSpec(partitionSchema, partitions)
val paths = partitions.map(_.path)
val cached = getCached(tableIdentifier, paths, metastoreSchema, Some(partitionSpec))
val parquetRelation = cached.getOrElse {
val created = LogicalRelation(
new ParquetRelation2(
paths.toArray, None, Some(partitionSpec), parquetOptions)(hive))
cachedDataSourceTables.put(tableIdentifier, created)
created
}
parquetRelation
} else {
val paths = Seq(metastoreRelation.hiveQlTable.getDataLocation.toString)
val cached = getCached(tableIdentifier, paths, metastoreSchema, None)
val parquetRelation = cached.getOrElse {
val created = LogicalRelation(
new ParquetRelation2(paths.toArray, None, None, parquetOptions)(hive))
cachedDataSourceTables.put(tableIdentifier, created)
created
}
parquetRelation
}
result.newInstance()
}
override def getTables(databaseName: Option[String]): Seq[(String, Boolean)] = {
val db = databaseName.getOrElse(client.currentDatabase)
client.listTables(db).map(tableName => (tableName, false))
}
protected def processDatabaseAndTableName(
databaseName: Option[String],
tableName: String): (Option[String], String) = {
if (!caseSensitive) {
(databaseName.map(_.toLowerCase), tableName.toLowerCase)
} else {
(databaseName, tableName)
}
}
protected def processDatabaseAndTableName(
databaseName: String,
tableName: String): (String, String) = {
if (!caseSensitive) {
(databaseName.toLowerCase, tableName.toLowerCase)
} else {
(databaseName, tableName)
}
}
/**
* When scanning or writing to non-partitioned Metastore Parquet tables, convert them to Parquet
* data source relations for better performance.
*
* This rule can be considered as [[HiveStrategies.ParquetConversion]] done right.
*/
object ParquetConversions extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
if (!plan.resolved) {
return plan
}
// Collects all `MetastoreRelation`s which should be replaced
val toBeReplaced = plan.collect {
// Write path
case InsertIntoTable(relation: MetastoreRelation, _, _, _, _)
// Inserting into partitioned table is not supported in Parquet data source (yet).
if !relation.hiveQlTable.isPartitioned &&
hive.convertMetastoreParquet &&
conf.parquetUseDataSourceApi &&
relation.tableDesc.getSerdeClassName.toLowerCase.contains("parquet") =>
val parquetRelation = convertToParquetRelation(relation)
val attributedRewrites = relation.output.zip(parquetRelation.output)
(relation, parquetRelation, attributedRewrites)
// Write path
case InsertIntoHiveTable(relation: MetastoreRelation, _, _, _, _)
// Inserting into partitioned table is not supported in Parquet data source (yet).
if !relation.hiveQlTable.isPartitioned &&
hive.convertMetastoreParquet &&
conf.parquetUseDataSourceApi &&
relation.tableDesc.getSerdeClassName.toLowerCase.contains("parquet") =>
val parquetRelation = convertToParquetRelation(relation)
val attributedRewrites = relation.output.zip(parquetRelation.output)
(relation, parquetRelation, attributedRewrites)
// Read path
case p @ PhysicalOperation(_, _, relation: MetastoreRelation)
if hive.convertMetastoreParquet &&
conf.parquetUseDataSourceApi &&
relation.tableDesc.getSerdeClassName.toLowerCase.contains("parquet") =>
val parquetRelation = convertToParquetRelation(relation)
val attributedRewrites = relation.output.zip(parquetRelation.output)
(relation, parquetRelation, attributedRewrites)
}
val relationMap = toBeReplaced.map(r => (r._1, r._2)).toMap
val attributedRewrites = AttributeMap(toBeReplaced.map(_._3).fold(Nil)(_ ++: _))
// Replaces all `MetastoreRelation`s with corresponding `ParquetRelation2`s, and fixes
// attribute IDs referenced in other nodes.
plan.transformUp {
case r: MetastoreRelation if relationMap.contains(r) =>
val parquetRelation = relationMap(r)
val alias = r.alias.getOrElse(r.tableName)
Subquery(alias, parquetRelation)
case InsertIntoTable(r: MetastoreRelation, partition, child, overwrite, ifNotExists)
if relationMap.contains(r) =>
val parquetRelation = relationMap(r)
InsertIntoTable(parquetRelation, partition, child, overwrite, ifNotExists)
case InsertIntoHiveTable(r: MetastoreRelation, partition, child, overwrite, ifNotExists)
if relationMap.contains(r) =>
val parquetRelation = relationMap(r)
InsertIntoTable(parquetRelation, partition, child, overwrite, ifNotExists)
case other => other.transformExpressions {
case a: Attribute if a.resolved => attributedRewrites.getOrElse(a, a)
}
}
}
}
/**
* Creates any tables required for query execution.
* For example, because of a CREATE TABLE X AS statement.
*/
object CreateTables extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Wait until children are resolved.
case p: LogicalPlan if !p.childrenResolved => p
case p: LogicalPlan if p.resolved => p
case p @ CreateTableAsSelect(table, child, allowExisting) =>
val schema = if (table.schema.size > 0) {
table.schema
} else {
child.output.map {
attr => new HiveColumn(
attr.name,
HiveMetastoreTypes.toMetastoreType(attr.dataType), null)
}
}
val desc = table.copy(schema = schema)
if (hive.convertCTAS && table.serde.isEmpty) {
// Do the conversion when spark.sql.hive.convertCTAS is true and the query
// does not specify any storage format (file format and storage handler).
if (table.specifiedDatabase.isDefined) {
throw new AnalysisException(
"Cannot specify database name in a CTAS statement " +
"when spark.sql.hive.convertCTAS is set to true.")
}
val mode = if (allowExisting) SaveMode.Ignore else SaveMode.ErrorIfExists
CreateTableUsingAsSelect(
desc.name,
hive.conf.defaultDataSourceName,
temporary = false,
Array.empty[String],
mode,
options = Map.empty[String, String],
child
)
} else {
val desc = if (table.serde.isEmpty) {
// add default serde
table.copy(
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
} else {
table
}
val (dbName, tblName) =
processDatabaseAndTableName(
desc.specifiedDatabase.getOrElse(client.currentDatabase), desc.name)
execution.CreateTableAsSelect(
desc.copy(
specifiedDatabase = Some(dbName),
name = tblName),
child,
allowExisting)
}
}
}
/**
* Casts input data to correct data types according to table definition before inserting into
* that table.
*/
object PreInsertionCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transform {
// Wait until children are resolved.
case p: LogicalPlan if !p.childrenResolved => p
case p @ InsertIntoTable(table: MetastoreRelation, _, child, _, _) =>
castChildOutput(p, table, child)
}
def castChildOutput(p: InsertIntoTable, table: MetastoreRelation, child: LogicalPlan)
: LogicalPlan = {
val childOutputDataTypes = child.output.map(_.dataType)
val numDynamicPartitions = p.partition.values.count(_.isEmpty)
val tableOutputDataTypes =
(table.attributes ++ table.partitionKeys.takeRight(numDynamicPartitions))
.take(child.output.length).map(_.dataType)
if (childOutputDataTypes == tableOutputDataTypes) {
InsertIntoHiveTable(table, p.partition, p.child, p.overwrite, p.ifNotExists)
} else if (childOutputDataTypes.size == tableOutputDataTypes.size &&
childOutputDataTypes.zip(tableOutputDataTypes)
.forall { case (left, right) => left.sameType(right) }) {
// If both types ignoring nullability of ArrayType, MapType, StructType are the same,
// use InsertIntoHiveTable instead of InsertIntoTable.
InsertIntoHiveTable(table, p.partition, p.child, p.overwrite, p.ifNotExists)
} else {
// Only do the casting when child output data types differ from table output data types.
val castedChildOutput = child.output.zip(table.output).map {
case (input, output) if input.dataType != output.dataType =>
Alias(Cast(input, output.dataType), input.name)()
case (input, _) => input
}
p.copy(child = logical.Project(castedChildOutput, child))
}
}
}
/**
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
override def registerTable(tableIdentifier: Seq[String], plan: LogicalPlan): Unit = {
throw new UnsupportedOperationException
}
/**
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
override def unregisterTable(tableIdentifier: Seq[String]): Unit = {
throw new UnsupportedOperationException
}
override def unregisterAllTables(): Unit = {}
}
/**
* A logical plan representing insertion into Hive table.
* This plan ignores nullability of ArrayType, MapType, StructType unlike InsertIntoTable
* because Hive table doesn't have nullability for ARRAY, MAP, STRUCT types.
*/
private[hive] case class InsertIntoHiveTable(
table: MetastoreRelation,
partition: Map[String, Option[String]],
child: LogicalPlan,
overwrite: Boolean,
ifNotExists: Boolean)
extends LogicalPlan {
override def children: Seq[LogicalPlan] = child :: Nil
override def output: Seq[Attribute] = child.output
val numDynamicPartitions = partition.values.count(_.isEmpty)
// This is the expected schema of the table prepared to be inserted into,
// including dynamic partition columns.
val tableOutput = table.attributes ++ table.partitionKeys.takeRight(numDynamicPartitions)
override lazy val resolved: Boolean = childrenResolved && child.output.zip(tableOutput).forall {
case (childAttr, tableAttr) => childAttr.dataType.sameType(tableAttr.dataType)
}
}
private[hive] case class MetastoreRelation
(databaseName: String, tableName: String, alias: Option[String])
(val table: HiveTable)
(@transient sqlContext: SQLContext)
extends LeafNode with MultiInstanceRelation {
self: Product =>
override def equals(other: Any): Boolean = other match {
case relation: MetastoreRelation =>
databaseName == relation.databaseName &&
tableName == relation.tableName &&
alias == relation.alias &&
output == relation.output
case _ => false
}
override def hashCode(): Int = {
Objects.hashCode(databaseName, tableName, alias, output)
}
@transient val hiveQlTable: Table = {
// We start by constructing an API table as Hive performs several important transformations
// internally when converting an API table to a QL table.
val tTable = new org.apache.hadoop.hive.metastore.api.Table()
tTable.setTableName(table.name)
tTable.setDbName(table.database)
val tableParameters = new java.util.HashMap[String, String]()
tTable.setParameters(tableParameters)
table.properties.foreach { case (k, v) => tableParameters.put(k, v) }
tTable.setTableType(table.tableType.name)
val sd = new org.apache.hadoop.hive.metastore.api.StorageDescriptor()
tTable.setSd(sd)
sd.setCols(table.schema.map(c => new FieldSchema(c.name, c.hiveType, c.comment)))
tTable.setPartitionKeys(
table.partitionColumns.map(c => new FieldSchema(c.name, c.hiveType, c.comment)))
table.location.foreach(sd.setLocation)
table.inputFormat.foreach(sd.setInputFormat)
table.outputFormat.foreach(sd.setOutputFormat)
val serdeInfo = new org.apache.hadoop.hive.metastore.api.SerDeInfo
sd.setSerdeInfo(serdeInfo)
table.serde.foreach(serdeInfo.setSerializationLib)
val serdeParameters = new java.util.HashMap[String, String]()
serdeInfo.setParameters(serdeParameters)
table.serdeProperties.foreach { case (k, v) => serdeParameters.put(k, v) }
new Table(tTable)
}
@transient val hiveQlPartitions: Seq[Partition] = table.getAllPartitions.map { p =>
val tPartition = new org.apache.hadoop.hive.metastore.api.Partition
tPartition.setDbName(databaseName)
tPartition.setTableName(tableName)
tPartition.setValues(p.values)
val sd = new org.apache.hadoop.hive.metastore.api.StorageDescriptor()
tPartition.setSd(sd)
sd.setCols(table.schema.map(c => new FieldSchema(c.name, c.hiveType, c.comment)))
sd.setLocation(p.storage.location)
sd.setInputFormat(p.storage.inputFormat)
sd.setOutputFormat(p.storage.outputFormat)
val serdeInfo = new org.apache.hadoop.hive.metastore.api.SerDeInfo
sd.setSerdeInfo(serdeInfo)
serdeInfo.setSerializationLib(p.storage.serde)
val serdeParameters = new java.util.HashMap[String, String]()
serdeInfo.setParameters(serdeParameters)
table.serdeProperties.foreach { case (k, v) => serdeParameters.put(k, v) }
p.storage.serdeProperties.foreach { case (k, v) => serdeParameters.put(k, v) }
new Partition(hiveQlTable, tPartition)
}
@transient override lazy val statistics: Statistics = Statistics(
sizeInBytes = {
val totalSize = hiveQlTable.getParameters.get(HiveShim.getStatsSetupConstTotalSize)
val rawDataSize = hiveQlTable.getParameters.get(HiveShim.getStatsSetupConstRawDataSize)
// TODO: check if this estimate is valid for tables after partition pruning.
// NOTE: getting `totalSize` directly from params is kind of hacky, but this should be
// relatively cheap if parameters for the table are populated into the metastore. An
// alternative would be going through Hadoop's FileSystem API, which can be expensive if a lot
// of RPCs are involved. Besides `totalSize`, there are also `numFiles`, `numRows`,
// `rawDataSize` keys (see StatsSetupConst in Hive) that we can look at in the future.
BigInt(
// When table is external,`totalSize` is always zero, which will influence join strategy
// so when `totalSize` is zero, use `rawDataSize` instead
// if the size is still less than zero, we use default size
Option(totalSize).map(_.toLong).filter(_ > 0)
.getOrElse(Option(rawDataSize).map(_.toLong).filter(_ > 0)
.getOrElse(sqlContext.conf.defaultSizeInBytes)))
}
)
/** Only compare database and tablename, not alias. */
override def sameResult(plan: LogicalPlan): Boolean = {
plan match {
case mr: MetastoreRelation =>
mr.databaseName == databaseName && mr.tableName == tableName
case _ => false
}
}
val tableDesc = HiveShim.getTableDesc(
Class.forName(
hiveQlTable.getSerializationLib,
true,
Utils.getContextOrSparkClassLoader).asInstanceOf[Class[Deserializer]],
hiveQlTable.getInputFormatClass,
// The class of table should be org.apache.hadoop.hive.ql.metadata.Table because
// getOutputFormatClass will use HiveFileFormatUtils.getOutputFormatSubstitute to
// substitute some output formats, e.g. substituting SequenceFileOutputFormat to
// HiveSequenceFileOutputFormat.
hiveQlTable.getOutputFormatClass,
hiveQlTable.getMetadata
)
implicit class SchemaAttribute(f: HiveColumn) {
def toAttribute: AttributeReference = AttributeReference(
f.name,
HiveMetastoreTypes.toDataType(f.hiveType),
// Since data can be dumped in randomly with no validation, everything is nullable.
nullable = true
)(qualifiers = Seq(alias.getOrElse(tableName)))
}
/** PartitionKey attributes */
val partitionKeys = table.partitionColumns.map(_.toAttribute)
/** Non-partitionKey attributes */
val attributes = table.schema.map(_.toAttribute)
val output = attributes ++ partitionKeys
/** An attribute map that can be used to lookup original attributes based on expression id. */
val attributeMap = AttributeMap(output.map(o => (o, o)))
/** An attribute map for determining the ordinal for non-partition columns. */
val columnOrdinals = AttributeMap(attributes.zipWithIndex)
override def newInstance(): MetastoreRelation = {
MetastoreRelation(databaseName, tableName, alias)(table)(sqlContext)
}
}
private[hive] object HiveMetastoreTypes {
def toDataType(metastoreType: String): DataType = DataTypeParser.parse(metastoreType)
def toMetastoreType(dt: DataType): String = dt match {
case ArrayType(elementType, _) => s"array<${toMetastoreType(elementType)}>"
case StructType(fields) =>
s"struct<${fields.map(f => s"${f.name}:${toMetastoreType(f.dataType)}").mkString(",")}>"
case MapType(keyType, valueType, _) =>
s"map<${toMetastoreType(keyType)},${toMetastoreType(valueType)}>"
case StringType => "string"
case FloatType => "float"
case IntegerType => "int"
case ByteType => "tinyint"
case ShortType => "smallint"
case DoubleType => "double"
case LongType => "bigint"
case BinaryType => "binary"
case BooleanType => "boolean"
case DateType => "date"
case d: DecimalType => HiveShim.decimalMetastoreString(d)
case TimestampType => "timestamp"
case NullType => "void"
case udt: UserDefinedType[_] => toMetastoreType(udt.sqlType)
}
}
| andrewor14/iolap | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala | Scala | apache-2.0 | 32,071 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.util.{List => JList}
import java.util.concurrent.CountDownLatch
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.control.NonFatal
import com.google.common.base.Splitter
import org.apache.mesos.{MesosSchedulerDriver, Protos, Scheduler, SchedulerDriver}
import org.apache.mesos.Protos.{TaskState => MesosTaskState, _}
import org.apache.mesos.Protos.FrameworkInfo.Capability
import org.apache.mesos.protobuf.{ByteString, GeneratedMessage}
import org.apache.spark.{SparkConf, SparkContext, SparkException}
import org.apache.spark.TaskState
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.util.Utils
/**
* Shared trait for implementing a Mesos Scheduler. This holds common state and helper
* methods and Mesos scheduler will use.
*/
trait MesosSchedulerUtils extends Logging {
// Lock used to wait for scheduler to be registered
private final val registerLatch = new CountDownLatch(1)
// Driver for talking to Mesos
protected var mesosDriver: SchedulerDriver = null
/**
* Creates a new MesosSchedulerDriver that communicates to the Mesos master.
*
* @param masterUrl The url to connect to Mesos master
* @param scheduler the scheduler class to receive scheduler callbacks
* @param sparkUser User to impersonate with when running tasks
* @param appName The framework name to display on the Mesos UI
* @param conf Spark configuration
* @param webuiUrl The WebUI url to link from Mesos UI
* @param checkpoint Option to checkpoint tasks for failover
* @param failoverTimeout Duration Mesos master expect scheduler to reconnect on disconnect
* @param frameworkId The id of the new framework
*/
protected def createSchedulerDriver(
masterUrl: String,
scheduler: Scheduler,
sparkUser: String,
appName: String,
conf: SparkConf,
webuiUrl: Option[String] = None,
checkpoint: Option[Boolean] = None,
failoverTimeout: Option[Double] = None,
frameworkId: Option[String] = None): SchedulerDriver = {
val fwInfoBuilder = FrameworkInfo.newBuilder().setUser(sparkUser).setName(appName)
val credBuilder = Credential.newBuilder()
webuiUrl.foreach { url => fwInfoBuilder.setWebuiUrl(url) }
checkpoint.foreach { checkpoint => fwInfoBuilder.setCheckpoint(checkpoint) }
failoverTimeout.foreach { timeout => fwInfoBuilder.setFailoverTimeout(timeout) }
frameworkId.foreach { id =>
fwInfoBuilder.setId(FrameworkID.newBuilder().setValue(id).build())
}
conf.getOption("spark.mesos.principal").foreach { principal =>
fwInfoBuilder.setPrincipal(principal)
credBuilder.setPrincipal(principal)
}
conf.getOption("spark.mesos.secret").foreach { secret =>
credBuilder.setSecret(secret)
}
if (credBuilder.hasSecret && !fwInfoBuilder.hasPrincipal) {
throw new SparkException(
"spark.mesos.principal must be configured when spark.mesos.secret is set")
}
conf.getOption("spark.mesos.role").foreach { role =>
fwInfoBuilder.setRole(role)
}
val maxGpus = conf.getInt("spark.mesos.gpus.max", 0)
if (maxGpus > 0) {
fwInfoBuilder.addCapabilities(Capability.newBuilder().setType(Capability.Type.GPU_RESOURCES))
}
if (credBuilder.hasPrincipal) {
new MesosSchedulerDriver(
scheduler, fwInfoBuilder.build(), masterUrl, credBuilder.build())
} else {
new MesosSchedulerDriver(scheduler, fwInfoBuilder.build(), masterUrl)
}
}
/**
* Starts the MesosSchedulerDriver and stores the current running driver to this new instance.
* This driver is expected to not be running.
* This method returns only after the scheduler has registered with Mesos.
*/
def startScheduler(newDriver: SchedulerDriver): Unit = {
synchronized {
if (mesosDriver != null) {
registerLatch.await()
return
}
@volatile
var error: Option[Exception] = None
// We create a new thread that will block inside `mesosDriver.run`
// until the scheduler exists
new Thread(Utils.getFormattedClassName(this) + "-mesos-driver") {
setDaemon(true)
override def run() {
try {
mesosDriver = newDriver
val ret = mesosDriver.run()
logInfo("driver.run() returned with code " + ret)
if (ret != null && ret.equals(Status.DRIVER_ABORTED)) {
error = Some(new SparkException("Error starting driver, DRIVER_ABORTED"))
markErr()
}
} catch {
case e: Exception =>
logError("driver.run() failed", e)
error = Some(e)
markErr()
}
}
}.start()
registerLatch.await()
// propagate any error to the calling thread. This ensures that SparkContext creation fails
// without leaving a broken context that won't be able to schedule any tasks
error.foreach(throw _)
}
}
def getResource(res: JList[Resource], name: String): Double = {
// A resource can have multiple values in the offer since it can either be from
// a specific role or wildcard.
res.asScala.filter(_.getName == name).map(_.getScalar.getValue).sum
}
/**
* Transforms a range resource to a list of ranges
*
* @param res the mesos resource list
* @param name the name of the resource
* @return the list of ranges returned
*/
protected def getRangeResource(res: JList[Resource], name: String): List[(Long, Long)] = {
// A resource can have multiple values in the offer since it can either be from
// a specific role or wildcard.
res.asScala.filter(_.getName == name).flatMap(_.getRanges.getRangeList.asScala
.map(r => (r.getBegin, r.getEnd)).toList).toList
}
/**
* Signal that the scheduler has registered with Mesos.
*/
protected def markRegistered(): Unit = {
registerLatch.countDown()
}
protected def markErr(): Unit = {
registerLatch.countDown()
}
def createResource(name: String, amount: Double, role: Option[String] = None): Resource = {
val builder = Resource.newBuilder()
.setName(name)
.setType(Value.Type.SCALAR)
.setScalar(Value.Scalar.newBuilder().setValue(amount).build())
role.foreach { r => builder.setRole(r) }
builder.build()
}
/**
* Partition the existing set of resources into two groups, those remaining to be
* scheduled and those requested to be used for a new task.
*
* @param resources The full list of available resources
* @param resourceName The name of the resource to take from the available resources
* @param amountToUse The amount of resources to take from the available resources
* @return The remaining resources list and the used resources list.
*/
def partitionResources(
resources: JList[Resource],
resourceName: String,
amountToUse: Double): (List[Resource], List[Resource]) = {
var remain = amountToUse
var requestedResources = new ArrayBuffer[Resource]
val remainingResources = resources.asScala.map {
case r =>
if (remain > 0 &&
r.getType == Value.Type.SCALAR &&
r.getScalar.getValue > 0.0 &&
r.getName == resourceName) {
val usage = Math.min(remain, r.getScalar.getValue)
requestedResources += createResource(resourceName, usage, Some(r.getRole))
remain -= usage
createResource(resourceName, r.getScalar.getValue - usage, Some(r.getRole))
} else {
r
}
}
// Filter any resource that has depleted.
val filteredResources =
remainingResources.filter(r => r.getType != Value.Type.SCALAR || r.getScalar.getValue > 0.0)
(filteredResources.toList, requestedResources.toList)
}
/** Helper method to get the key,value-set pair for a Mesos Attribute protobuf */
protected def getAttribute(attr: Attribute): (String, Set[String]) = {
(attr.getName, attr.getText.getValue.split(',').toSet)
}
/** Build a Mesos resource protobuf object */
protected def createResource(resourceName: String, quantity: Double): Protos.Resource = {
Resource.newBuilder()
.setName(resourceName)
.setType(Value.Type.SCALAR)
.setScalar(Value.Scalar.newBuilder().setValue(quantity).build())
.build()
}
/**
* Converts the attributes from the resource offer into a Map of name -> Attribute Value
* The attribute values are the mesos attribute types and they are
*
* @param offerAttributes the attributes offered
* @return
*/
protected def toAttributeMap(offerAttributes: JList[Attribute]): Map[String, GeneratedMessage] = {
offerAttributes.asScala.map { attr =>
val attrValue = attr.getType match {
case Value.Type.SCALAR => attr.getScalar
case Value.Type.RANGES => attr.getRanges
case Value.Type.SET => attr.getSet
case Value.Type.TEXT => attr.getText
}
(attr.getName, attrValue)
}.toMap
}
/**
* Match the requirements (if any) to the offer attributes.
* if attribute requirements are not specified - return true
* else if attribute is defined and no values are given, simple attribute presence is performed
* else if attribute name and value is specified, subset match is performed on slave attributes
*/
def matchesAttributeRequirements(
slaveOfferConstraints: Map[String, Set[String]],
offerAttributes: Map[String, GeneratedMessage]): Boolean = {
slaveOfferConstraints.forall {
// offer has the required attribute and subsumes the required values for that attribute
case (name, requiredValues) =>
offerAttributes.get(name) match {
case None => false
case Some(_) if requiredValues.isEmpty => true // empty value matches presence
case Some(scalarValue: Value.Scalar) =>
// check if provided values is less than equal to the offered values
requiredValues.map(_.toDouble).exists(_ <= scalarValue.getValue)
case Some(rangeValue: Value.Range) =>
val offerRange = rangeValue.getBegin to rangeValue.getEnd
// Check if there is some required value that is between the ranges specified
// Note: We only support the ability to specify discrete values, in the future
// we may expand it to subsume ranges specified with a XX..YY value or something
// similar to that.
requiredValues.map(_.toLong).exists(offerRange.contains(_))
case Some(offeredValue: Value.Set) =>
// check if the specified required values is a subset of offered set
requiredValues.subsetOf(offeredValue.getItemList.asScala.toSet)
case Some(textValue: Value.Text) =>
// check if the specified value is equal, if multiple values are specified
// we succeed if any of them match.
requiredValues.contains(textValue.getValue)
}
}
}
/**
* Parses the attributes constraints provided to spark and build a matching data struct:
* Map[<attribute-name>, Set[values-to-match]]
* The constraints are specified as ';' separated key-value pairs where keys and values
* are separated by ':'. The ':' implies equality (for singular values) and "is one of" for
* multiple values (comma separated). For example:
* {{{
* parseConstraintString("os:centos7;zone:us-east-1a,us-east-1b")
* // would result in
* <code>
* Map(
* "os" -> Set("centos7"),
* "zone": -> Set("us-east-1a", "us-east-1b")
* )
* }}}
*
* Mesos documentation: http://mesos.apache.org/documentation/attributes-resources/
* https://github.com/apache/mesos/blob/master/src/common/values.cpp
* https://github.com/apache/mesos/blob/master/src/common/attributes.cpp
*
* @param constraintsVal constaints string consisting of ';' separated key-value pairs (separated
* by ':')
* @return Map of constraints to match resources offers.
*/
def parseConstraintString(constraintsVal: String): Map[String, Set[String]] = {
/*
Based on mesos docs:
attributes : attribute ( ";" attribute )*
attribute : labelString ":" ( labelString | "," )+
labelString : [a-zA-Z0-9_/.-]
*/
val splitter = Splitter.on(';').trimResults().withKeyValueSeparator(':')
// kv splitter
if (constraintsVal.isEmpty) {
Map()
} else {
try {
splitter.split(constraintsVal).asScala.toMap.mapValues(v =>
if (v == null || v.isEmpty) {
Set[String]()
} else {
v.split(',').toSet
}
)
} catch {
case NonFatal(e) =>
throw new IllegalArgumentException(s"Bad constraint string: $constraintsVal", e)
}
}
}
// These defaults copied from YARN
private val MEMORY_OVERHEAD_FRACTION = 0.10
private val MEMORY_OVERHEAD_MINIMUM = 384
/**
* Return the amount of memory to allocate to each executor, taking into account
* container overheads.
*
* @param sc SparkContext to use to get `spark.mesos.executor.memoryOverhead` value
* @return memory requirement as (0.1 * <memoryOverhead>) or MEMORY_OVERHEAD_MINIMUM
* (whichever is larger)
*/
def executorMemory(sc: SparkContext): Int = {
sc.conf.getInt("spark.mesos.executor.memoryOverhead",
math.max(MEMORY_OVERHEAD_FRACTION * sc.executorMemory, MEMORY_OVERHEAD_MINIMUM).toInt) +
sc.executorMemory
}
def setupUris(uris: String, builder: CommandInfo.Builder): Unit = {
uris.split(",").foreach { uri =>
builder.addUris(CommandInfo.URI.newBuilder().setValue(uri.trim()))
}
}
protected def getRejectOfferDurationForUnmetConstraints(sc: SparkContext): Long = {
sc.conf.getTimeAsSeconds("spark.mesos.rejectOfferDurationForUnmetConstraints", "120s")
}
protected def getRejectOfferDurationForReachedMaxCores(sc: SparkContext): Long = {
sc.conf.getTimeAsSeconds("spark.mesos.rejectOfferDurationForReachedMaxCores", "120s")
}
/**
* Checks executor ports if they are within some range of the offered list of ports ranges,
*
* @param conf the Spark Config
* @param ports the list of ports to check
* @return true if ports are within range false otherwise
*/
protected def checkPorts(conf: SparkConf, ports: List[(Long, Long)]): Boolean = {
def checkIfInRange(port: Long, ps: List[(Long, Long)]): Boolean = {
ps.exists{case (rangeStart, rangeEnd) => rangeStart <= port & rangeEnd >= port }
}
val portsToCheck = nonZeroPortValuesFromConfig(conf)
val withinRange = portsToCheck.forall(p => checkIfInRange(p, ports))
// make sure we have enough ports to allocate per offer
val enoughPorts =
ports.map{case (rangeStart, rangeEnd) => rangeEnd - rangeStart + 1}.sum >= portsToCheck.size
enoughPorts && withinRange
}
/**
* Partitions port resources.
*
* @param requestedPorts non-zero ports to assign
* @param offeredResources the resources offered
* @return resources left, port resources to be used.
*/
def partitionPortResources(requestedPorts: List[Long], offeredResources: List[Resource])
: (List[Resource], List[Resource]) = {
if (requestedPorts.isEmpty) {
(offeredResources, List[Resource]())
} else {
// partition port offers
val (resourcesWithoutPorts, portResources) = filterPortResources(offeredResources)
val portsAndRoles = requestedPorts.
map(x => (x, findPortAndGetAssignedRangeRole(x, portResources)))
val assignedPortResources = createResourcesFromPorts(portsAndRoles)
// ignore non-assigned port resources, they will be declined implicitly by mesos
// no need for splitting port resources.
(resourcesWithoutPorts, assignedPortResources)
}
}
val managedPortNames = List("spark.executor.port", BLOCK_MANAGER_PORT.key)
/**
* The values of the non-zero ports to be used by the executor process.
* @param conf the spark config to use
* @return the ono-zero values of the ports
*/
def nonZeroPortValuesFromConfig(conf: SparkConf): List[Long] = {
managedPortNames.map(conf.getLong(_, 0)).filter( _ != 0)
}
/** Creates a mesos resource for a specific port number. */
private def createResourcesFromPorts(portsAndRoles: List[(Long, String)]) : List[Resource] = {
portsAndRoles.flatMap{ case (port, role) =>
createMesosPortResource(List((port, port)), Some(role))}
}
/** Helper to create mesos resources for specific port ranges. */
private def createMesosPortResource(
ranges: List[(Long, Long)],
role: Option[String] = None): List[Resource] = {
ranges.map { case (rangeStart, rangeEnd) =>
val rangeValue = Value.Range.newBuilder()
.setBegin(rangeStart)
.setEnd(rangeEnd)
val builder = Resource.newBuilder()
.setName("ports")
.setType(Value.Type.RANGES)
.setRanges(Value.Ranges.newBuilder().addRange(rangeValue))
role.foreach(r => builder.setRole(r))
builder.build()
}
}
/**
* Helper to assign a port to an offered range and get the latter's role
* info to use it later on.
*/
private def findPortAndGetAssignedRangeRole(port: Long, portResources: List[Resource])
: String = {
val ranges = portResources.
map(resource =>
(resource.getRole, resource.getRanges.getRangeList.asScala
.map(r => (r.getBegin, r.getEnd)).toList))
val rangePortRole = ranges
.find { case (role, rangeList) => rangeList
.exists{ case (rangeStart, rangeEnd) => rangeStart <= port & rangeEnd >= port}}
// this is safe since we have previously checked about the ranges (see checkPorts method)
rangePortRole.map{ case (role, rangeList) => role}.get
}
/** Retrieves the port resources from a list of mesos offered resources */
private def filterPortResources(resources: List[Resource]): (List[Resource], List[Resource]) = {
resources.partition { r => !(r.getType == Value.Type.RANGES && r.getName == "ports") }
}
/**
* spark.mesos.driver.frameworkId is set by the cluster dispatcher to correlate driver
* submissions with frameworkIDs. However, this causes issues when a driver process launches
* more than one framework (more than one SparkContext(, because they all try to register with
* the same frameworkID. To enforce that only the first driver registers with the configured
* framework ID, the driver calls this method after the first registration.
*/
def unsetFrameworkID(sc: SparkContext) {
sc.conf.remove("spark.mesos.driver.frameworkId")
System.clearProperty("spark.mesos.driver.frameworkId")
}
def mesosToTaskState(state: MesosTaskState): TaskState.TaskState = state match {
case MesosTaskState.TASK_STAGING | MesosTaskState.TASK_STARTING => TaskState.LAUNCHING
case MesosTaskState.TASK_RUNNING | MesosTaskState.TASK_KILLING => TaskState.RUNNING
case MesosTaskState.TASK_FINISHED => TaskState.FINISHED
case MesosTaskState.TASK_FAILED => TaskState.FAILED
case MesosTaskState.TASK_KILLED => TaskState.KILLED
case MesosTaskState.TASK_LOST | MesosTaskState.TASK_ERROR => TaskState.LOST
}
def taskStateToMesos(state: TaskState.TaskState): MesosTaskState = state match {
case TaskState.LAUNCHING => MesosTaskState.TASK_STARTING
case TaskState.RUNNING => MesosTaskState.TASK_RUNNING
case TaskState.FINISHED => MesosTaskState.TASK_FINISHED
case TaskState.FAILED => MesosTaskState.TASK_FAILED
case TaskState.KILLED => MesosTaskState.TASK_KILLED
case TaskState.LOST => MesosTaskState.TASK_LOST
}
}
| likithkailas/StreamingSystems | mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala | Scala | apache-2.0 | 20,677 |
package controllers
import dao.{AnnotationDao, AuthorityDao}
import database.{AnnotationDb, AnnotationTable}
import models.AnnotationLike.{Annotation, AnnotationAtom}
import models.{AnnotationLike, AnnotationProtocol, User}
import org.joda.time.DateTime
import play.api.libs.functional.syntax._
import play.api.libs.json.{Json, Reads, Writes, _}
import play.api.mvc.ControllerComponents
import security.LWMRole.{CourseAssistant, CourseEmployee, CourseManager, StudentRole}
import security.SecurityActionChain
import java.util.UUID
import javax.inject.Inject
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
object AnnotationController {
lazy val courseAttribute = "course"
lazy val labworkAttribute = "labwork"
lazy val studentAttribute = "student"
lazy val systemIdAttribute = "systemId"
lazy val authorAttribute = "author"
lazy val reportCardEntryAttribute = "reportCardEntry"
implicit def annotationWrites: Writes[Annotation] = (
(JsPath \ "reportCardEntry").write[UUID] and
(JsPath \ "author").write[UUID] and
(JsPath \ "message").write[String] and
(JsPath \ "lastModified").write[DateTime](utils.date.DateTimeJsonFormatter.writeDateTime) and
(JsPath \ "id").write[UUID]
) (unlift(Annotation.unapply))
implicit def annotationAtomWrites: Writes[AnnotationAtom] = (
(JsPath \ "reportCardEntry").write[UUID] and
(JsPath \ "author").write[User](User.writes) and
(JsPath \ "message").write[String] and
(JsPath \ "lastModified").write[DateTime](utils.date.DateTimeJsonFormatter.writeDateTime) and
(JsPath \ "id").write[UUID]
) (unlift(AnnotationAtom.unapply))
implicit def annotationLikeWrites: Writes[AnnotationLike] = {
case normal: Annotation => Json.toJson(normal)(annotationWrites)
case atom: AnnotationAtom => Json.toJson(atom)(annotationAtomWrites)
}
implicit def protocolReads: Reads[AnnotationProtocol] = Json.reads[AnnotationProtocol]
}
class AnnotationController @Inject()(
cc: ControllerComponents,
val abstractDao: AnnotationDao,
val authorityDao: AuthorityDao,
val securedAction: SecurityActionChain,
implicit val ctx: ExecutionContext
) extends AbstractCRUDController[AnnotationProtocol, AnnotationTable, AnnotationDb, AnnotationLike](cc) {
import AnnotationController._
override protected implicit val writes: Writes[AnnotationLike] = annotationLikeWrites
override protected implicit val reads: Reads[AnnotationProtocol] = protocolReads
override protected def toDbModel(protocol: AnnotationProtocol, existingId: Option[UUID]) = AnnotationDb(
protocol.reportCardEntry,
protocol.author,
protocol.message,
id = existingId getOrElse UUID.randomUUID
)
override protected def restrictedContext(restrictionId: String) = {
case _ => SecureBlock(restrictionId, List(CourseAssistant, CourseEmployee, CourseManager))
}
override protected def contextFrom = {
case GetAll => PartialSecureBlock(List(StudentRole))
}
def forStudent(labwork: String) = contextFrom(GetAll) asyncAction { request =>
import utils.Ops.OptionOps
for {
systemId <- Future.fromTry(request.systemId.toTry("No User ID found in request"))
annotations <- all(NonSecureBlock)(request.appending(systemIdAttribute -> Seq(systemId), labworkAttribute -> Seq(labwork)))
} yield annotations
}
def allFrom(course: String) = restrictedContext(course)(GetAll) asyncAction { request =>
all(NonSecureBlock)(request.appending(courseAttribute -> Seq(course)))
}
def countFrom(course: String) = restrictedContext(course)(GetAll) asyncAction { request =>
count(NonSecureBlock)(request.appending(courseAttribute -> Seq(course)))
}
def getFrom(course: String, id: String) = restrictedContext(course)(Get) asyncAction { request =>
get(id, NonSecureBlock)(request)
}
def createFrom(course: String) = restrictedContext(course)(Create) asyncAction { request =>
create(NonSecureBlock)(request)
}
def updateFrom(course: String, id: String) = restrictedContext(course)(Update) asyncAction { request =>
update(id, NonSecureBlock)(request)
}
def invalidateFrom(course: String, id: String) = restrictedContext(course)(Delete) asyncAction { request =>
invalidate(id, NonSecureBlock)(request)
}
override protected def makeTableFilter(attribute: String, value: String) = {
import dao.helper.TableFilter._
(attribute, value) match {
case (`reportCardEntryAttribute`, reportCardEntry) => reportCardEntry.uuid map reportCardEntryFilter
case (`courseAttribute`, course) => course.uuid map courseByReportCardEntryFilter
case (`labworkAttribute`, labwork) => labwork.uuid map labworkByReportCardEntryFilter
case (`studentAttribute`, student) => student.uuid map userByReportCardEntryFilter
case (`systemIdAttribute`, systemId) => Success(systemIdByReportCardEntryFilter(systemId))
case (`authorAttribute`, author) => author.uuid map userFilter
case _ => Failure(new Throwable(s"Unknown attribute $attribute"))
}
}
}
| THK-ADV/lwm-reloaded | app/controllers/AnnotationController.scala | Scala | mit | 5,092 |
/*
* Copyright 2014β2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.matryoshka
import slamdata.Predef._
import scalaz._
/** Calculates the width of a typelevel union (coproduct). */
sealed abstract class UnionWidth[F[_]] {
val width: Int
}
object UnionWidth extends UWidthInstances
sealed abstract class UWidthInstances extends UWidthInstances0 {
implicit def coproductUWidth[F[_], G[_]](
implicit
F: UnionWidth[F],
G: UnionWidth[G]
): UnionWidth[Coproduct[F, G, ?]] =
new UnionWidth[Coproduct[F, G, ?]] {
val width = F.width + G.width
}
}
sealed abstract class UWidthInstances0 {
implicit def defaultUWidth[F[_]]: UnionWidth[F] =
new UnionWidth[F] { val width = 1 }
}
| drostron/quasar | foundation/src/test/scala/quasar/contrib/matryoshka/UnionWidth.scala | Scala | apache-2.0 | 1,273 |
/*
* Copyright (c) 2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.config
import akka.actor.ActorSystem
import com.snowplowanalytics.actors.routes.ApiRouterActor
import com.snowplowanalytics.actors.routes.EventRoute
/**
* Factory method for ActorSystemBean class
*/
object ActorServiceSystem {
def apply(): ActorServiceSystem = new ActorServiceSystem()
}
/**
* Defines an actor system with the actors used by
* the spray-events application
*/
class ActorServiceSystem {
implicit val system = ActorSystem("event")
lazy val eventRoute = system.actorOf(EventRoute.props, "event-route")
lazy val apiRouterActor = system.actorOf(ApiRouterActor.props(eventRoute), "api-router")
}
| snowplow/icebucket | 3-query-engines/scala-query-engine/src/main/scala/com/snowplowanalytics/config/ActorServiceSystem.scala | Scala | apache-2.0 | 1,357 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import scala.reflect.ClassTag
import org.apache.spark.{Logging, RangePartitioner}
/**
* Extra functions available on RDDs of (key, value) pairs where the key is sortable through
* an implicit conversion. Import `org.apache.spark.SparkContext._` at the top of your program to
* use these functions. They will work with any key type that has a `scala.math.Ordered`
* implementation.
*/
class OrderedRDDFunctions[K <% Ordered[K]: ClassTag,
V: ClassTag,
P <: Product2[K, V] : ClassTag](
self: RDD[P])
extends Logging with Serializable {
/**
* Sort the RDD by key, so that each partition contains a sorted range of the elements. Calling
* `collect` or `save` on the resulting RDD will return or output an ordered list of records
* (in the `save` case, they will be written to multiple `part-X` files in the filesystem, in
* order of the keys).
*/
def sortByKey(ascending: Boolean = true, numPartitions: Int = self.partitions.size): RDD[P] = {
val part = new RangePartitioner(numPartitions, self, ascending)
val shuffled = new ShuffledRDD[K, V, P](self, part)
shuffled.mapPartitions(iter => {
val buf = iter.toArray
if (ascending) {
buf.sortWith((x, y) => x._1 < y._1).iterator
} else {
buf.sortWith((x, y) => x._1 > y._1).iterator
}
}, preservesPartitioning = true)
}
}
| iiisthu/sparkSdn | core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala | Scala | apache-2.0 | 2,240 |
package org.scalaide.debug.internal.model
import org.eclipse.debug.core.model.IValue
import org.scalaide.debug.internal.ScalaDebugger
import org.scalaide.debug.internal.ScalaDebugPlugin
import org.eclipse.debug.core.model.IVariable
case class VirtualValue
(refTypeName: String, valueString: String, fields: Seq[IVariable] = Seq.empty)
(implicit debugTarget: ScalaDebugTarget)
extends ScalaDebugElement(debugTarget)
with IValue {
override def isAllocated(): Boolean = false
override def getReferenceTypeName(): String = refTypeName
override def getValueString(): String = valueString
def withFields(vars: IVariable*) =
VirtualValue(refTypeName, valueString, fields ++ vars.toSeq)
override def getVariables(): Array[IVariable] = fields.toArray
override def hasVariables(): Boolean = fields.nonEmpty
}
case class VirtualVariable
(name: String, refTypeName: String, value: IValue)
(implicit debugTarget: ScalaDebugTarget)
extends ScalaDebugElement(debugTarget)
with IVariable {
override def getName(): String = name
override def getReferenceTypeName(): String = refTypeName
override def getValue(): IValue = value
override def hasValueChanged(): Boolean = false
override def supportsValueModification(): Boolean = false
override def setValue(value: IValue): Unit = ???
override def setValue(expr: String): Unit = ???
override def verifyValue(value: IValue): Boolean = false
override def verifyValue(expr: String): Boolean = false
}
| romanowski/scala-ide | org.scala-ide.sdt.debug/src/org/scalaide/debug/internal/model/VirtualValue.scala | Scala | bsd-3-clause | 1,516 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.sift
import com.google.inject.name.Named
import common.FutureEx
import connectors.OnlineTestEmailClient
import factories.DateTimeFactory
import javax.inject.{Inject, Singleton}
import model.EvaluationResults.{Green, Red, Withdrawn}
import model.Exceptions.{ApplicationNotFound, NoResultsReturned, SiftResultsAlreadyExistsException, UnexpectedException}
import model.ProgressStatuses.SIFT_ENTERED
import model._
import model.command.{ApplicationForSift, ApplicationForSiftExpiry}
import model.exchange.sift.{SiftState, SiftTestGroupWithActiveTest}
import model.persisted.SchemeEvaluationResult
import model.persisted.sift.NotificationExpiringSift
import model.sift.{FixStuckUser, FixUserStuckInSiftEntered, SiftReminderNotice}
import org.joda.time.DateTime
import play.api.Logging
import reactivemongo.bson.BSONDocument
import repositories.application.GeneralApplicationRepository
import repositories.contactdetails.ContactDetailsRepository
import repositories.sift.ApplicationSiftRepository
import repositories.{CommonBSONDocuments, CurrentSchemeStatusHelper, SchemeRepository}
import services.allocation.CandidateAllocationService.CouldNotFindCandidateWithApplication
import services.onlinetesting.Exceptions.NoActiveTestException
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.language.postfixOps
@Singleton
// scalastyle:off number.of.methods
class ApplicationSiftService @Inject() (applicationSiftRepo: ApplicationSiftRepository,
applicationRepo: GeneralApplicationRepository,
contactDetailsRepo: ContactDetailsRepository,
schemeRepo: SchemeRepository,
val dateTimeFactory: DateTimeFactory,
@Named("CSREmailClient") emailClient: OnlineTestEmailClient // TODO: had to change the type
) extends CurrentSchemeStatusHelper with CommonBSONDocuments with Logging {
val SiftExpiryWindowInDays: Int = 7
def nextApplicationsReadyForSiftStage(batchSize: Int): Future[Seq[ApplicationForSift]] = {
applicationSiftRepo.nextApplicationsForSiftStage(batchSize)
}
def nextApplicationForFirstReminder(timeInHours: Int): Future[Option[NotificationExpiringSift]] = {
applicationSiftRepo.nextApplicationForFirstSiftReminder(timeInHours)
}
def nextApplicationForSecondReminder(timeInHours: Int): Future[Option[NotificationExpiringSift]] = {
applicationSiftRepo.nextApplicationForSecondSiftReminder(timeInHours)
}
def nextApplicationsReadyForNumericTestsInvitation(batchSize: Int) : Future[Seq[NumericalTestApplication]] = {
val numericalSchemeIds = schemeRepo.numericTestSiftRequirementSchemeIds
applicationSiftRepo.nextApplicationsReadyForNumericTestsInvitation(batchSize, numericalSchemeIds)
}
def sendReminderNotification(expiringSift: NotificationExpiringSift,
siftReminderNotice: SiftReminderNotice)(implicit hc: HeaderCarrier): Future[Unit] = {
for {
emailAddress <- contactDetailsRepo.find(expiringSift.userId).map(_.email)
_ <- emailClient.sendSiftReminder(emailAddress, expiringSift.preferredName, siftReminderNotice.hoursBeforeReminder,
siftReminderNotice.timeUnit, expiringSift.expiryDate)
_ <- applicationRepo.addProgressStatusAndUpdateAppStatus(expiringSift.applicationId, siftReminderNotice.progressStatus)
} yield {
val msg = s"Sift reminder email sent to candidate whose applicationId = ${expiringSift.applicationId} " +
s"${siftReminderNotice.hoursBeforeReminder} hours before expiry and candidate status updated " +
s"to ${siftReminderNotice.progressStatus}"
logger.info(msg)
}
}
def isSiftExpired(applicationId: String): Future[Boolean] = {
applicationSiftRepo.isSiftExpired(applicationId)
}
def processNextApplicationFailedAtSift: Future[Unit] = applicationSiftRepo.nextApplicationFailedAtSift.flatMap(_.map { application =>
applicationRepo.addProgressStatusAndUpdateAppStatus(application.applicationId, ProgressStatuses.FAILED_AT_SIFT)
}.getOrElse(Future.successful(())))
private def requiresForms(schemeIds: Seq[SchemeId]) = {
schemeRepo.getSchemesForIds(schemeIds).exists(_.siftRequirement.contains(SiftRequirement.FORM))
}
def progressStatusForSiftStage(schemeList: Seq[SchemeId]): ProgressStatuses.ProgressStatus = ProgressStatuses.SIFT_ENTERED
def progressApplicationToSiftStage(applications: Seq[ApplicationForSift]): Future[SerialUpdateResult[ApplicationForSift]] = {
val updates = FutureEx.traverseSerial(applications) { app =>
val status = progressStatusForSiftStage(app.currentSchemeStatus.collect { case s if s.result == Green.toString => s.schemeId })
FutureEx.futureToEither(
app,
applicationRepo.addProgressStatusAndUpdateAppStatus(app.applicationId, status)
)
}
updates.map(SerialUpdateResult.fromEither)
}
def saveSiftExpiryDate(applicationId: String): Future[DateTime] = {
val expiryDate = dateTimeFactory.nowLocalTimeZone.plusDays(SiftExpiryWindowInDays)
applicationSiftRepo.saveSiftExpiryDate(applicationId, expiryDate).map(_ => expiryDate)
}
def fetchSiftExpiryDate(applicationId: String): Future[DateTime] = {
applicationSiftRepo.findSiftExpiryDate(applicationId)
}
def findApplicationsReadyForSchemeSift(schemeId: SchemeId): Future[Seq[model.Candidate]] = {
applicationSiftRepo.findApplicationsReadyForSchemeSift(schemeId)
}
def siftApplicationForScheme(applicationId: String, result: SchemeEvaluationResult): Future[Unit] = {
applicationSiftRepo.siftResultsExistsForScheme(applicationId, result.schemeId).flatMap { siftResultsExists =>
if(siftResultsExists) {
throw SiftResultsAlreadyExistsException(s"Sift result already exists for appId $applicationId and scheme ${result.schemeId}")
} else {
applicationRepo.getApplicationRoute(applicationId).flatMap { route =>
val updateFunction = route match {
case ApplicationRoute.SdipFaststream => buildSiftSettableFields(result, sdipFaststreamSchemeFilter) _
case _ => buildSiftSettableFields(result, schemeFilter) _
}
siftApplicationForScheme(applicationId, result, updateFunction)
}
}
}
}
def processExpiredCandidates(batchSize: Int, gracePeriodInSecs: Int)(implicit hc: HeaderCarrier): Future[Unit] = {
def processApplication(appForExpiry: ApplicationForSiftExpiry): Future[Unit] = {
expireCandidate(appForExpiry)
.flatMap(_ => notifyExpiredCandidate(appForExpiry.applicationId))
}
nextApplicationsForExpiry(batchSize, gracePeriodInSecs)
.flatMap {
case Nil =>
logger.info("No applications found for SIFT expiry")
Future.successful(())
case applications: Seq[ApplicationForSiftExpiry] =>
logger.info(s"${applications.size} application(s) found for SIFT expiry - appId(s): ${applications.map(_.applicationId)}")
Future.sequence(applications.map(processApplication)).map(_ => ())
}
}
private def nextApplicationsForExpiry(batchSize: Int, gracePeriodInSecs: Int): Future[Seq[ApplicationForSiftExpiry]] = {
applicationSiftRepo.nextApplicationsForSiftExpiry(batchSize, gracePeriodInSecs)
}
def expireCandidate(appForExpiry: ApplicationForSiftExpiry): Future[Unit] = {
applicationRepo
.addProgressStatusAndUpdateAppStatus(appForExpiry.applicationId, ProgressStatuses.SIFT_EXPIRED)
.map(_ => logger.info(s"Expired SIFT application: $appForExpiry"))
}
def expireCandidates(appsForExpiry: Seq[ApplicationForSiftExpiry]): Future[Unit] = {
Future.sequence(appsForExpiry.map(app => expireCandidate(app))).map(_ => ())
}
def getSiftState(applicationId: String): Future[Option[SiftState]] = {
for {
progressStatusTimestamps <- applicationRepo.getProgressStatusTimestamps(applicationId)
siftTestGroup <- applicationSiftRepo.getTestGroup(applicationId)
} yield {
val mappedStates = progressStatusTimestamps.toMap
val siftEnteredDateOpt = mappedStates.get(SIFT_ENTERED.toString)
// Both dates have to be present otherwise we return a None
// testGroups.SIFT_PHASE.expirationDate is created as soon as the candidate moves into SIFT_ENTERED
(siftEnteredDateOpt, siftTestGroup) match {
case (Some(enteredDate), Some(testGroup)) =>
Some(SiftState(siftEnteredDate = enteredDate, expirationDate = testGroup.expirationDate))
case _ => None
}
}
}
def getTestGroup(applicationId: String): Future[Option[SiftTestGroupWithActiveTest]] = {
for {
siftOpt <- applicationSiftRepo.getTestGroup(applicationId)
} yield siftOpt.map { sift =>
val test = sift.tests.getOrElse(throw UnexpectedException(s"No tests found for $applicationId in SIFT"))
.find(_.usedForResults)
.getOrElse(throw NoActiveTestException(s"No active sift test found for $applicationId"))
SiftTestGroupWithActiveTest(
sift.expirationDate,
test
)
}
}
def markTestAsStarted(cubiksUserId: Int, startedTime: DateTime = dateTimeFactory.nowLocalTimeZone): Future[Unit] = {
for {
_ <- applicationSiftRepo.updateTestStartTime(cubiksUserId, startedTime)
appId <- applicationSiftRepo.getApplicationIdForCubiksId(cubiksUserId)
- <- applicationRepo.addProgressStatusAndUpdateAppStatus(appId, ProgressStatuses.SIFT_TEST_STARTED)
} yield {}
}
// TODO: cubiks rename this without the 2
def markTestAsStarted2(orderId: String, startedTime: DateTime = dateTimeFactory.nowLocalTimeZone): Future[Unit] = {
for {
_ <- applicationSiftRepo.updateTestStartTime(orderId, startedTime)
appId <- applicationSiftRepo.getApplicationIdForOrderId(orderId)
- <- applicationRepo.addProgressStatusAndUpdateAppStatus(appId, ProgressStatuses.SIFT_TEST_STARTED)
} yield {}
}
private def notifyExpiredCandidate(applicationId: String)(implicit hc: HeaderCarrier): Future[Unit] = {
applicationRepo.find(applicationId).flatMap {
case Some(candidate) =>
for {
contactDetails <- contactDetailsRepo.find(candidate.userId)
_ <- emailClient.sendSiftExpired(contactDetails.email, candidate.name)
- <- applicationRepo.addProgressStatusAndUpdateAppStatus(applicationId, ProgressStatuses.SIFT_EXPIRED_NOTIFIED)
} yield ()
case None => throw CouldNotFindCandidateWithApplication(applicationId)
}
}
private def sdipFaststreamSchemeFilter: PartialFunction[SchemeEvaluationResult, SchemeId] = {
case s if s.result != Withdrawn.toString && s.result != Red.toString => s.schemeId
}
private def schemeFilter: PartialFunction[SchemeEvaluationResult, SchemeId] = {
case s if s.result != Withdrawn.toString && s.result != Red.toString => s.schemeId
}
def sendSiftEnteredNotification(applicationId: String, siftExpiry: DateTime)(implicit hc: HeaderCarrier): Future[Unit] = {
applicationRepo.find(applicationId).flatMap {
case Some(candidate) => contactDetailsRepo.find(candidate.userId).flatMap { contactDetails =>
emailClient.notifyCandidateSiftEnteredAdditionalQuestions(
contactDetails.email, candidate.name, siftExpiry
).map(_ => ())
}
case None => throw CouldNotFindCandidateWithApplication(applicationId)
}
}
private def siftApplicationForScheme(applicationId: String, result: SchemeEvaluationResult,
updateBuilder: (Seq[SchemeEvaluationResult], Seq[SchemeEvaluationResult]) => Seq[BSONDocument]
): Future[Unit] = {
(for {
currentSchemeStatus <- applicationRepo.getCurrentSchemeStatus(applicationId)
currentSiftEvaluation <- applicationSiftRepo.getSiftEvaluations(applicationId).recover { case _ => Nil }
} yield {
val settableFields = updateBuilder(currentSchemeStatus, currentSiftEvaluation)
applicationSiftRepo.siftApplicationForScheme(applicationId, result, settableFields)
}) flatMap identity
}
private def buildSiftSettableFields(result: SchemeEvaluationResult, schemeFilter: PartialFunction[SchemeEvaluationResult, SchemeId])
(currentSchemeStatus: Seq[SchemeEvaluationResult], currentSiftEvaluation: Seq[SchemeEvaluationResult]
): Seq[BSONDocument] = {
val newSchemeStatus = calculateCurrentSchemeStatus(currentSchemeStatus, result :: Nil)
val candidatesGreenSchemes = currentSchemeStatus.collect { schemeFilter }
val candidatesSiftableSchemes = schemeRepo.siftableAndEvaluationRequiredSchemeIds.filter(s => candidatesGreenSchemes.contains(s))
val siftedSchemes = (currentSiftEvaluation.map(_.schemeId) :+ result.schemeId).distinct
Seq(currentSchemeStatusBSON(newSchemeStatus),
maybeSetProgressStatus(siftedSchemes.toSet, candidatesSiftableSchemes.toSet),
maybeFailSdip(result),
maybeSetSdipFaststreamProgressStatus(newSchemeStatus, siftedSchemes, candidatesSiftableSchemes)
).foldLeft(Seq.empty[BSONDocument]) { (acc, doc) =>
doc match {
case _ @BSONDocument.empty => acc
case _ => acc :+ doc
}
}
}
private def maybeSetProgressStatus(siftedSchemes: Set[SchemeId], candidatesSiftableSchemes: Set[SchemeId]) = {
if (candidatesSiftableSchemes subsetOf siftedSchemes) {
progressStatusOnlyBSON(ProgressStatuses.SIFT_COMPLETED)
} else {
BSONDocument.empty
}
}
private def maybeFailSdip(result: SchemeEvaluationResult) = {
if (Scheme.isSdip(result.schemeId) && result.result == Red.toString) {
progressStatusOnlyBSON(ProgressStatuses.SDIP_FAILED_AT_SIFT)
} else {
BSONDocument.empty
}
}
def getSiftEvaluations(applicationId: String): Future[Seq[SchemeEvaluationResult]] = {
applicationSiftRepo.getSiftEvaluations(applicationId)
}
// we need to consider that all siftable schemes have been sifted with a fail or the candidate has withdrawn from them
// and sdip has been sifted with a pass
private def maybeSetSdipFaststreamProgressStatus(newSchemeEvaluationResult: Seq[SchemeEvaluationResult],
siftedSchemes: Seq[SchemeId], candidatesSiftableSchemes: Seq[SchemeId]) = {
// Sdip has been sifted and it passed
val SdipEvaluationResultPassed = SchemeEvaluationResult(Scheme.SdipId, Green.toString)
val sdipNeededSiftEvaluation = candidatesSiftableSchemes.map(_.value).contains("Sdip")
val sdipPassedSift = siftedSchemes.contains(Scheme.SdipId) && newSchemeEvaluationResult.contains(SdipEvaluationResultPassed)
val schemesExcludingSdip = newSchemeEvaluationResult.filterNot( s => s.schemeId == Scheme.SdipId)
val faststreamSchemesRedOrWithdrawn = schemesExcludingSdip.forall{ s =>
s.result == Red.toString || s.result == Withdrawn.toString
}
if ((!sdipNeededSiftEvaluation || sdipPassedSift) && faststreamSchemesRedOrWithdrawn) {
progressStatusOnlyBSON(ProgressStatuses.SIFT_FASTSTREAM_FAILED_SDIP_GREEN)
} else {
BSONDocument.empty
}
}
def findStuckUsersCalculateCorrectProgressStatus(currentSchemeStatus: Seq[SchemeEvaluationResult],
currentSiftEvaluation: Seq[SchemeEvaluationResult]): BSONDocument = {
val candidatesGreenSchemes = currentSchemeStatus.collect { schemeFilter }
val candidatesSiftableSchemes = schemeRepo.siftableAndEvaluationRequiredSchemeIds.filter(s => candidatesGreenSchemes.contains(s))
val siftedSchemes = currentSiftEvaluation.map(_.schemeId).distinct
maybeSetProgressStatus(siftedSchemes.toSet, candidatesSiftableSchemes.toSet)
}
def findUsersInSiftReadyWhoShouldHaveBeenCompleted: Future[Seq[(FixStuckUser, Boolean)]] = {
applicationSiftRepo.findAllUsersInSiftReady.map(_.map { potentialStuckUser =>
val result = findStuckUsersCalculateCorrectProgressStatus(
potentialStuckUser.currentSchemeStatus,
potentialStuckUser.currentSiftEvaluation
)
(potentialStuckUser, !result.isEmpty)
})
}
def fixUserInSiftReadyWhoShouldHaveBeenCompleted(applicationId: String): Future[Unit] = {
(for {
usersToFix <- findUsersInSiftReadyWhoShouldHaveBeenCompleted
} yield {
if (usersToFix.exists { case (user, shouldBeMoved) => user.applicationId == applicationId && shouldBeMoved }) {
applicationRepo.addProgressStatusAndUpdateAppStatus(applicationId, ProgressStatuses.SIFT_COMPLETED)
} else {
throw ApplicationNotFound(s"Application ID $applicationId is not available for fixing")
}
}).flatMap(identity)
}
def findUsersInSiftEnteredWhoShouldBeInSiftReadyWhoHaveFailedFormBasedSchemesInVideoPhase: Future[Seq[FixUserStuckInSiftEntered]] = {
def includeUser(potentialStuckUser: FixUserStuckInSiftEntered): Boolean = {
// we include the candidate if their green schemes are either numeric_test or generalist / human resources
// and there must be at least one numeric_test
val greenSchemes = potentialStuckUser.currentSchemeStatus.filter( s => s.result == Green.toString)
val allSchemesApplicable = greenSchemes.forall { s =>
schemeRepo.nonSiftableSchemeIds.contains(s.schemeId) || schemeRepo.numericTestSiftRequirementSchemeIds.contains(s.schemeId)
}
val atLeastOneNumericTestScheme = greenSchemes.exists( s => schemeRepo.numericTestSiftRequirementSchemeIds.contains(s.schemeId) )
allSchemesApplicable && atLeastOneNumericTestScheme
}
applicationSiftRepo.findAllUsersInSiftEntered.map( _.filter ( potentialStuckUser => includeUser(potentialStuckUser) ))
}
def fixUserInSiftEnteredWhoShouldBeInSiftReadyWhoHasFailedFormBasedSchemesInVideoPhase(applicationId: String): Future[Unit] = {
(for {
usersToFix <- findUsersInSiftEnteredWhoShouldBeInSiftReadyWhoHaveFailedFormBasedSchemesInVideoPhase
} yield {
if (usersToFix.exists { user => user.applicationId == applicationId }) {
applicationRepo.addProgressStatusAndUpdateAppStatus(applicationId, ProgressStatuses.SIFT_READY)
} else {
throw NoResultsReturned(s"Application ID $applicationId is not available for fixing")
}
}).flatMap(identity)
}
// candidates who are in sift_entered who have withdrawn from all form based schemes and are still in the running
// for at least one numeric scheme
def findUsersInSiftEnteredWhoShouldBeInSiftReadyAfterWithdrawingFromAllFormBasedSchemes: Future[Seq[FixUserStuckInSiftEntered]] = {
def includeUser(potentialStuckUser: FixUserStuckInSiftEntered): Boolean = {
// We include the candidate if their green schemes are either numeric_test or generalist / human resources
// and there must be at least one numeric_test and they have form based schemes, which are all withdrawn
val greenSchemes = potentialStuckUser.currentSchemeStatus.filter( s => s.result == Green.toString)
// Remaining green schemes require a numeric test or generalist / human resources
val allSchemesApplicable = greenSchemes.forall { s =>
schemeRepo.nonSiftableSchemeIds.contains(s.schemeId) || schemeRepo.numericTestSiftRequirementSchemeIds.contains(s.schemeId)
}
// The candidate must still be in the running for at least one numeric test scheme
val atLeastOneNumericTestScheme = greenSchemes.exists( s => schemeRepo.numericTestSiftRequirementSchemeIds.contains(s.schemeId) )
// Must have form based schemes and be withdrawn from all of them
val usersFormBasedSchemes = potentialStuckUser.currentSchemeStatus.filter { s =>
schemeRepo.formMustBeFilledInSchemeIds.contains(s.schemeId)
}
val hasFormBasedSchemesAndAllWithdrawn = usersFormBasedSchemes.nonEmpty && usersFormBasedSchemes.forall( _.result == Withdrawn.toString )
allSchemesApplicable && atLeastOneNumericTestScheme && hasFormBasedSchemesAndAllWithdrawn
}
applicationSiftRepo.findAllUsersInSiftEntered.map( _.filter ( potentialStuckUser => includeUser(potentialStuckUser) ))
}
def fixUserInSiftEnteredWhoShouldBeInSiftReadyAfterWithdrawingFromAllFormBasedSchemes(applicationId: String): Future[Unit] = {
(for {
usersToFix <- findUsersInSiftEnteredWhoShouldBeInSiftReadyAfterWithdrawingFromAllFormBasedSchemes
} yield {
if (usersToFix.exists { user => user.applicationId == applicationId }) {
applicationRepo.addProgressStatusAndUpdateAppStatus(applicationId, ProgressStatuses.SIFT_READY)
} else {
throw ApplicationNotFound(s"Application ID $applicationId is not available for fixing")
}
}).flatMap(identity)
}
def fixUserSiftedWithAFailByMistake(applicationId: String): Future[Unit] = {
for {
_ <- applicationSiftRepo.fixDataByRemovingSiftPhaseEvaluationAndFailureStatus(applicationId)
_ <- applicationRepo.removeProgressStatuses(applicationId,
List(ProgressStatuses.SIFT_FASTSTREAM_FAILED_SDIP_GREEN, ProgressStatuses.SIFT_COMPLETED, ProgressStatuses.FAILED_AT_SIFT,
ProgressStatuses.SDIP_FAILED_AT_SIFT, ProgressStatuses.FAILED_AT_SIFT_NOTIFIED))
} yield ()
}
def fixUserSiftedWithAFailToSiftCompleted(applicationId: String): Future[Unit] = {
for {
_ <- applicationRepo.removeProgressStatuses(applicationId,
List(ProgressStatuses.SIFT_FASTSTREAM_FAILED_SDIP_GREEN, ProgressStatuses.FAILED_AT_SIFT, ProgressStatuses.SDIP_FAILED_AT_SIFT,
ProgressStatuses.FAILED_AT_SIFT_NOTIFIED))
_ <- applicationRepo.updateApplicationStatusOnly(applicationId, ApplicationStatus.SIFT)
} yield ()
}
def extendSiftCandidateFailedByMistake(applicationId: String, extraDays: Int): Future[Unit] = {
for {
_ <- applicationSiftRepo.updateExpiryTime(applicationId, dateTimeFactory.nowLocalTimeZone.plusDays(extraDays))
} yield ()
}
def removeEvaluation(applicationId: String): Future[Unit] = {
for {
_ <- applicationSiftRepo.removeEvaluation(applicationId)
} yield ()
}
}
// scalastyle:off
| hmrc/fset-faststream | app/services/sift/ApplicationSiftService.scala | Scala | apache-2.0 | 22,945 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.