code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.v2
import org.apache.spark.sql.{AnalysisException, Row}
import org.apache.spark.sql.execution.command
/**
* The class contains tests for the `SHOW PARTITIONS` command to check V2 table catalogs.
*/
class ShowPartitionsSuite extends command.ShowPartitionsSuiteBase with CommandSuiteBase {
test("a table does not support partitioning") {
val table = s"non_part_$catalog.tab1"
withTable(table) {
sql(s"""
|CREATE TABLE $table (price int, qty int, year int, month int)
|$defaultUsing""".stripMargin)
val errMsg = intercept[AnalysisException] {
sql(s"SHOW PARTITIONS $table")
}.getMessage
assert(errMsg.contains(s"Table $table does not support partition management"))
}
}
test("SPARK-33889, SPARK-33904: null and empty string as partition values") {
withNamespaceAndTable("ns", "tbl") { t =>
createNullPartTable(t, "parquet")
runShowPartitionsSql(s"SHOW PARTITIONS $t", Row("part=") :: Row("part=null") :: Nil)
checkAnswer(spark.table(t), Row(0, "") :: Row(1, null) :: Nil)
}
}
}
| mahak/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowPartitionsSuite.scala | Scala | apache-2.0 | 1,923 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io._
import java.net._
import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.JavaConverters._
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.util._
/**
* Enumerate the type of command that will be sent to the Python worker
*/
private[spark] object PythonEvalType {
val NON_UDF = 0
val SQL_BATCHED_UDF = 100
val SQL_PANDAS_SCALAR_UDF = 200
val SQL_PANDAS_GROUP_MAP_UDF = 201
}
/**
* A helper class to run Python mapPartition/UDFs in Spark.
*
* funcs is a list of independent Python functions, each one of them is a list of chained Python
* functions (from bottom to top).
*/
private[spark] abstract class BasePythonRunner[IN, OUT](
funcs: Seq[ChainedPythonFunctions],
bufferSize: Int,
reuseWorker: Boolean,
evalType: Int,
argOffsets: Array[Array[Int]])
extends Logging {
require(funcs.length == argOffsets.length, "argOffsets should have the same length as funcs")
// All the Python functions should have the same exec, version and envvars.
protected val envVars = funcs.head.funcs.head.envVars
protected val pythonExec = funcs.head.funcs.head.pythonExec
protected val pythonVer = funcs.head.funcs.head.pythonVer
// TODO: support accumulator in multiple UDF
protected val accumulator = funcs.head.funcs.head.accumulator
def compute(
inputIterator: Iterator[IN],
partitionIndex: Int,
context: TaskContext): Iterator[OUT] = {
val startTime = System.currentTimeMillis
val env = SparkEnv.get
val localdir = env.blockManager.diskBlockManager.localDirs.map(f => f.getPath()).mkString(",")
envVars.put("SPARK_LOCAL_DIRS", localdir) // it's also used in monitor thread
if (reuseWorker) {
envVars.put("SPARK_REUSE_WORKER", "1")
}
val worker: Socket = env.createPythonWorker(pythonExec, envVars.asScala.toMap)
// Whether is the worker released into idle pool
val released = new AtomicBoolean(false)
// Start a thread to feed the process input from our parent's iterator
val writerThread = newWriterThread(env, worker, inputIterator, partitionIndex, context)
context.addTaskCompletionListener { _ =>
writerThread.shutdownOnTaskCompletion()
if (!reuseWorker || !released.get) {
try {
worker.close()
} catch {
case e: Exception =>
logWarning("Failed to close worker socket", e)
}
}
}
writerThread.start()
new MonitorThread(env, worker, context).start()
// Return an iterator that read lines from the process's stdout
val stream = new DataInputStream(new BufferedInputStream(worker.getInputStream, bufferSize))
val stdoutIterator = newReaderIterator(
stream, writerThread, startTime, env, worker, released, context)
new InterruptibleIterator(context, stdoutIterator)
}
protected def newWriterThread(
env: SparkEnv,
worker: Socket,
inputIterator: Iterator[IN],
partitionIndex: Int,
context: TaskContext): WriterThread
protected def newReaderIterator(
stream: DataInputStream,
writerThread: WriterThread,
startTime: Long,
env: SparkEnv,
worker: Socket,
released: AtomicBoolean,
context: TaskContext): Iterator[OUT]
/**
* The thread responsible for writing the data from the PythonRDD's parent iterator to the
* Python process.
*/
abstract class WriterThread(
env: SparkEnv,
worker: Socket,
inputIterator: Iterator[IN],
partitionIndex: Int,
context: TaskContext)
extends Thread(s"stdout writer for $pythonExec") {
@volatile private var _exception: Exception = null
private val pythonIncludes = funcs.flatMap(_.funcs.flatMap(_.pythonIncludes.asScala)).toSet
private val broadcastVars = funcs.flatMap(_.funcs.flatMap(_.broadcastVars.asScala))
setDaemon(true)
/** Contains the exception thrown while writing the parent iterator to the Python process. */
def exception: Option[Exception] = Option(_exception)
/** Terminates the writer thread, ignoring any exceptions that may occur due to cleanup. */
def shutdownOnTaskCompletion() {
assert(context.isCompleted)
this.interrupt()
}
/**
* Writes a command section to the stream connected to the Python worker.
*/
protected def writeCommand(dataOut: DataOutputStream): Unit
/**
* Writes input data to the stream connected to the Python worker.
*/
protected def writeIteratorToStream(dataOut: DataOutputStream): Unit
override def run(): Unit = Utils.logUncaughtExceptions {
try {
TaskContext.setTaskContext(context)
val stream = new BufferedOutputStream(worker.getOutputStream, bufferSize)
val dataOut = new DataOutputStream(stream)
// Partition index
dataOut.writeInt(partitionIndex)
// Python version of driver
PythonRDD.writeUTF(pythonVer, dataOut)
// Write out the TaskContextInfo
dataOut.writeInt(context.stageId())
dataOut.writeInt(context.partitionId())
dataOut.writeInt(context.attemptNumber())
dataOut.writeLong(context.taskAttemptId())
// sparkFilesDir
PythonRDD.writeUTF(SparkFiles.getRootDirectory(), dataOut)
// Python includes (*.zip and *.egg files)
dataOut.writeInt(pythonIncludes.size)
for (include <- pythonIncludes) {
PythonRDD.writeUTF(include, dataOut)
}
// Broadcast variables
val oldBids = PythonRDD.getWorkerBroadcasts(worker)
val newBids = broadcastVars.map(_.id).toSet
// number of different broadcasts
val toRemove = oldBids.diff(newBids)
val cnt = toRemove.size + newBids.diff(oldBids).size
dataOut.writeInt(cnt)
for (bid <- toRemove) {
// remove the broadcast from worker
dataOut.writeLong(- bid - 1) // bid >= 0
oldBids.remove(bid)
}
for (broadcast <- broadcastVars) {
if (!oldBids.contains(broadcast.id)) {
// send new broadcast
dataOut.writeLong(broadcast.id)
PythonRDD.writeUTF(broadcast.value.path, dataOut)
oldBids.add(broadcast.id)
}
}
dataOut.flush()
dataOut.writeInt(evalType)
writeCommand(dataOut)
writeIteratorToStream(dataOut)
dataOut.writeInt(SpecialLengths.END_OF_STREAM)
dataOut.flush()
} catch {
case e: Exception if context.isCompleted || context.isInterrupted =>
logDebug("Exception thrown after task completion (likely due to cleanup)", e)
if (!worker.isClosed) {
Utils.tryLog(worker.shutdownOutput())
}
case e: Exception =>
// We must avoid throwing exceptions here, because the thread uncaught exception handler
// will kill the whole executor (see org.apache.spark.executor.Executor).
_exception = e
if (!worker.isClosed) {
Utils.tryLog(worker.shutdownOutput())
}
}
}
}
abstract class ReaderIterator(
stream: DataInputStream,
writerThread: WriterThread,
startTime: Long,
env: SparkEnv,
worker: Socket,
released: AtomicBoolean,
context: TaskContext)
extends Iterator[OUT] {
private var nextObj: OUT = _
private var eos = false
override def hasNext: Boolean = nextObj != null || {
if (!eos) {
nextObj = read()
hasNext
} else {
false
}
}
override def next(): OUT = {
if (hasNext) {
val obj = nextObj
nextObj = null.asInstanceOf[OUT]
obj
} else {
Iterator.empty.next()
}
}
/**
* Reads next object from the stream.
* When the stream reaches end of data, needs to process the following sections,
* and then returns null.
*/
protected def read(): OUT
protected def handleTimingData(): Unit = {
// Timing data from worker
val bootTime = stream.readLong()
val initTime = stream.readLong()
val finishTime = stream.readLong()
val boot = bootTime - startTime
val init = initTime - bootTime
val finish = finishTime - initTime
val total = finishTime - startTime
logInfo("Times: total = %s, boot = %s, init = %s, finish = %s".format(total, boot,
init, finish))
val memoryBytesSpilled = stream.readLong()
val diskBytesSpilled = stream.readLong()
context.taskMetrics.incMemoryBytesSpilled(memoryBytesSpilled)
context.taskMetrics.incDiskBytesSpilled(diskBytesSpilled)
}
protected def handlePythonException(): PythonException = {
// Signals that an exception has been thrown in python
val exLength = stream.readInt()
val obj = new Array[Byte](exLength)
stream.readFully(obj)
new PythonException(new String(obj, StandardCharsets.UTF_8),
writerThread.exception.getOrElse(null))
}
protected def handleEndOfDataSection(): Unit = {
// We've finished the data section of the output, but we can still
// read some accumulator updates:
val numAccumulatorUpdates = stream.readInt()
(1 to numAccumulatorUpdates).foreach { _ =>
val updateLen = stream.readInt()
val update = new Array[Byte](updateLen)
stream.readFully(update)
accumulator.add(update)
}
// Check whether the worker is ready to be re-used.
if (stream.readInt() == SpecialLengths.END_OF_STREAM) {
if (reuseWorker) {
env.releasePythonWorker(pythonExec, envVars.asScala.toMap, worker)
released.set(true)
}
}
eos = true
}
protected val handleException: PartialFunction[Throwable, OUT] = {
case e: Exception if context.isInterrupted =>
logDebug("Exception thrown after task interruption", e)
throw new TaskKilledException(context.getKillReason().getOrElse("unknown reason"))
case e: Exception if writerThread.exception.isDefined =>
logError("Python worker exited unexpectedly (crashed)", e)
logError("This may have been caused by a prior exception:", writerThread.exception.get)
throw writerThread.exception.get
case eof: EOFException =>
throw new SparkException("Python worker exited unexpectedly (crashed)", eof)
}
}
/**
* It is necessary to have a monitor thread for python workers if the user cancels with
* interrupts disabled. In that case we will need to explicitly kill the worker, otherwise the
* threads can block indefinitely.
*/
class MonitorThread(env: SparkEnv, worker: Socket, context: TaskContext)
extends Thread(s"Worker Monitor for $pythonExec") {
/** How long to wait before killing the python worker if a task cannot be interrupted. */
private val taskKillTimeout = env.conf.getTimeAsMs("spark.python.task.killTimeout", "2s")
setDaemon(true)
override def run() {
// Kill the worker if it is interrupted, checking until task completion.
// TODO: This has a race condition if interruption occurs, as completed may still become true.
while (!context.isInterrupted && !context.isCompleted) {
Thread.sleep(2000)
}
if (!context.isCompleted) {
Thread.sleep(taskKillTimeout)
if (!context.isCompleted) {
try {
// Mimic the task name used in `Executor` to help the user find out the task to blame.
val taskName = s"${context.partitionId}.${context.taskAttemptId} " +
s"in stage ${context.stageId} (TID ${context.taskAttemptId})"
logWarning(s"Incomplete task $taskName interrupted: Attempting to kill Python Worker")
env.destroyPythonWorker(pythonExec, envVars.asScala.toMap, worker)
} catch {
case e: Exception =>
logError("Exception when trying to kill worker", e)
}
}
}
}
}
}
private[spark] object PythonRunner {
def apply(func: PythonFunction, bufferSize: Int, reuseWorker: Boolean): PythonRunner = {
new PythonRunner(Seq(ChainedPythonFunctions(Seq(func))), bufferSize, reuseWorker)
}
}
/**
* A helper class to run Python mapPartition in Spark.
*/
private[spark] class PythonRunner(
funcs: Seq[ChainedPythonFunctions],
bufferSize: Int,
reuseWorker: Boolean)
extends BasePythonRunner[Array[Byte], Array[Byte]](
funcs, bufferSize, reuseWorker, PythonEvalType.NON_UDF, Array(Array(0))) {
protected override def newWriterThread(
env: SparkEnv,
worker: Socket,
inputIterator: Iterator[Array[Byte]],
partitionIndex: Int,
context: TaskContext): WriterThread = {
new WriterThread(env, worker, inputIterator, partitionIndex, context) {
protected override def writeCommand(dataOut: DataOutputStream): Unit = {
val command = funcs.head.funcs.head.command
dataOut.writeInt(command.length)
dataOut.write(command)
}
protected override def writeIteratorToStream(dataOut: DataOutputStream): Unit = {
PythonRDD.writeIteratorToStream(inputIterator, dataOut)
dataOut.writeInt(SpecialLengths.END_OF_DATA_SECTION)
}
}
}
protected override def newReaderIterator(
stream: DataInputStream,
writerThread: WriterThread,
startTime: Long,
env: SparkEnv,
worker: Socket,
released: AtomicBoolean,
context: TaskContext): Iterator[Array[Byte]] = {
new ReaderIterator(stream, writerThread, startTime, env, worker, released, context) {
protected override def read(): Array[Byte] = {
if (writerThread.exception.isDefined) {
throw writerThread.exception.get
}
try {
stream.readInt() match {
case length if length > 0 =>
val obj = new Array[Byte](length)
stream.readFully(obj)
obj
case 0 => Array.empty[Byte]
case SpecialLengths.TIMING_DATA =>
handleTimingData()
read()
case SpecialLengths.PYTHON_EXCEPTION_THROWN =>
throw handlePythonException()
case SpecialLengths.END_OF_DATA_SECTION =>
handleEndOfDataSection()
null
}
} catch handleException
}
}
}
}
private[spark] object SpecialLengths {
val END_OF_DATA_SECTION = -1
val PYTHON_EXCEPTION_THROWN = -2
val TIMING_DATA = -3
val END_OF_STREAM = -4
val NULL = -5
val START_ARROW_STREAM = -6
}
| ron8hu/spark | core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala | Scala | apache-2.0 | 15,523 |
/*
* Copyright 2017-2022 Viktor Lövgren
*
* SPDX-License-Identifier: MIT
*/
package ciris.circe
import cats.implicits._
import ciris.{ConfigDecoder, ConfigError}
import io.circe.{Decoder, Json}
import io.circe.{DecodingFailure, ParsingFailure}
import io.circe.yaml.parser.parse
package object yaml {
final def circeYamlConfigDecoder[A](
typeName: String
)(implicit decoder: Decoder[A]): ConfigDecoder[String, A] =
ConfigDecoder[String].mapEither { (key, value) =>
def decodeError(json: Json, decodingFailure: DecodingFailure): ConfigError = {
def message(valueShown: Option[String], decodingFailureMessage: Option[String]): String = {
def trailingDecodingFailureMessage =
decodingFailureMessage match {
case Some(message) => s": $message"
case None => ""
}
(key, valueShown) match {
case (Some(key), Some(value)) =>
s"${key.description.capitalize} with json $value cannot be decoded to $typeName$trailingDecodingFailureMessage"
case (Some(key), None) =>
s"${key.description.capitalize} cannot be decoded to $typeName$trailingDecodingFailureMessage"
case (None, Some(value)) =>
s"Unable to decode json $value to $typeName$trailingDecodingFailureMessage"
case (None, None) =>
s"Unable to decode json to $typeName$trailingDecodingFailureMessage"
}
}
ConfigError.sensitive(
message = message(Some(json.noSpaces), Some(decodingFailure.getMessage)),
redactedMessage = message(None, None)
)
}
def parseError(parsingFailure: ParsingFailure): ConfigError = {
def message(valueShown: Option[String], parsingFailureMessage: Option[String]): String = {
def trailingParsingFailureMessage =
parsingFailureMessage match {
case Some(message) => s": $message"
case None => ""
}
(key, valueShown) match {
case (Some(key), Some(value)) =>
s"${key.description.capitalize} with value $value cannot be parsed as json$trailingParsingFailureMessage"
case (Some(key), None) =>
s"${key.description.capitalize} cannot be parsed as json$trailingParsingFailureMessage"
case (None, Some(value)) =>
s"Unable to parse value $value as json$trailingParsingFailureMessage"
case (None, None) =>
s"Unable to parse value as json$trailingParsingFailureMessage"
}
}
ConfigError.sensitive(
message = message(Some(value), Some(parsingFailure.getMessage)),
redactedMessage = message(None, None)
)
}
for {
json <- parse(value).leftMap(parseError)
a <- json.as[A].leftMap(decodeError(json, _))
} yield a
}
implicit final val yamlConfigDecoder: ConfigDecoder[String, Json] =
circeYamlConfigDecoder("Yaml")
}
| vlovgr/ciris | modules/circe-yaml/src/main/scala/ciris/circe/yaml/yaml.scala | Scala | mit | 3,045 |
package sbtbuildinfo
case class ScalaCaseObjectRenderer(options: Seq[BuildInfoOption], pkg: String, obj: String) extends ScalaRenderer {
override def fileType = BuildInfoType.Source
override def extension = "scala"
val traitNames = options.collect{case BuildInfoOption.Traits(ts @ _*) => ts}.flatten
val objTraits = if (traitNames.isEmpty) "" else " extends " ++ traitNames.mkString(" with ")
val constantValue = options.contains(BuildInfoOption.ConstantValue)
// It is safe to add `import scala.Predef` even though we need to keep `-Ywarn-unused-import` in mind
// because we always generate code that has a reference to `String`. If the "base" generated code were to be
// changed and no longer contain a reference to `String`, we would need to remove `import scala.Predef` and
// fully qualify every reference. Note it is NOT safe to use `import scala._` because of the possibility of
// the project using `-Ywarn-unused-import` because we do not always generated references that are part of
// `scala` such as `scala.Option`.
val importScalaPredef = options.contains(BuildInfoOption.ImportScalaPredef)
def header = List(
"// $COVERAGE-OFF$",
s"package $pkg",
""
)
val imports = if (importScalaPredef) List(
"import scala.Predef._",
""
) else Nil
val objectHeader = List(
s"/** This object was generated by sbt-buildinfo. */",
withPkgPriv(s"case object $obj$objTraits {")
)
def footer = List("}", "// $COVERAGE-ON$")
override def renderKeys(buildInfoResults: Seq[BuildInfoResult]) =
header ++ imports ++ objectHeader ++
buildInfoResults.flatMap(line) ++
Seq(toStringLines(buildInfoResults)) ++
toMapLines(buildInfoResults) ++
toJsonLines ++
footer
private val constantTypes = Set("scala.Int", "scala.Long", "scala.Double", "scala.Boolean", "scala.Symbol", "String")
private def line(result: BuildInfoResult): Seq[String] = {
import result._
val (typeDecl, modifier) =
getType(result.typeExpr) match {
case Some(tp) if !constantValue || !constantTypes(tp) =>
(s": $tp", "")
case _ if constantValue =>
("", "final ")
case _ =>
("", "")
}
List(
s" /** The value is ${quote(value)}. */",
s" ${modifier}val $identifier$typeDecl = ${quote(value)}"
)
}
def toStringLines(results: Seq[BuildInfoResult]): String = {
val idents = results.map(_.identifier)
val fmt = idents.map("%s: %%s" format _).mkString(", ")
val vars = idents.mkString(", ")
s""" override val toString: String = {
| "$fmt".format(
| $vars
| )
| }""".stripMargin
}
}
| sbt/sbt-buildinfo | src/main/scala/sbtbuildinfo/ScalaCaseObjectRenderer.scala | Scala | mit | 2,700 |
//汇总统计_建模剖析1
//一、数学理论
//二、建模实例
package org.apache.spark.mllib_analysis.statistic
import org.apache.spark.{SparkConf,SparkContext}
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics}
object Summary_statistics extends App{
val conf = new SparkConf().setAppName("Spark_Lr").setMaster("local")
val sc = new SparkContext(conf)
//获取数据,转换成rdd类型
val observations_path = "C:/my_install/spark/data/mllib2/sample_stat.txt"
val observations = sc.textFile(observations_path).map(_.split("\\t")).map(f => f.map(f => f.toDouble))
val observations1 = observations.map(f => Vectors.dense(f))
//统计计算
val summary: MultivariateStatisticalSummary = Statistics.colStats(observations1) //colStats 主方法
println(summary.max) // a dense vector containing the mean value for each column 最大值
println(summary.min) // a dense vector containing the mean value for each column 最小值
println(summary.mean) // a dense vector containing the mean value for each column 均值
println(summary.variance) // column-wise variance 方差
println(summary.numNonzeros) // number of nonzeros in each column 非零值
println(summary.normL1) // L1范数
println(summary.normL2) // L2范数
}
//三、源码调用解析
//1、colStats方法
def colStats(X: RDD[Vector]): MultivariateStatisticalSummary = {
new RowMatrix(X).computeColumnSummaryStatistics()
} //利用RDD创建RowMatrix对象,通过方法computeColumnSummaryStatistics统计指标。
def computeColumnSummaryStatistics(): MultivariateStatisticalSummary = {
val summary = rows.treeAggregate(new MultivariateOnlineSummarizer)(
(aggregator, data) => aggregator.add(data),
(aggregator1, aggregator2) => aggregator1.merge(aggregator2))
updateNumRows(summary.count)
summary
}
//调用RDD的treeAggregate方法,treeAggregate是聚合方法,它迭代处理RDD中的数据。
//其中,(aggregator, data) => aggregator.add(data)处理每条数据,将其添加到MultivariateOnlineSummarizer。
//(aggregator1, aggregator2) => aggregator1.merge(aggregator2)将不同分区的MultivariateOnlineSummarizer对象汇总。
//实现的重点是add方法和merge方法。它们都定义在MultivariateOnlineSummarizer中。 下面描述这两种方法。
//2、add方法
//使用在线算法来计算均值和方差
@Since("1.1.0")
def add(sample: Vector): this.type = add(sample, 1.0)
private[spark] def add(instance: Vector, weight: Double): this.type = {
if (weight == 0.0) return this
if (n == 0) {
n = instance.size
currMean = Array.ofDim[Double](n)
currM2n = Array.ofDim[Double](n)
currM2 = Array.ofDim[Double](n)
currL1 = Array.ofDim[Double](n)
nnz = Array.ofDim[Double](n)
currMax = Array.fill[Double](n)(Double.MinValue)
currMin = Array.fill[Double](n)(Double.MaxValue)
}
val localCurrMean = currMean
val localCurrM2n = currM2n
val localCurrM2 = currM2
val localCurrL1 = currL1
val localNnz = nnz
val localCurrMax = currMax
val localCurrMin = currMin
instance.foreachActive { (index, value) =>
if (value != 0.0) {
if (localCurrMax(index) < value) {
localCurrMax(index) = value
}
if (localCurrMin(index) > value) {
localCurrMin(index) = value
}
val prevMean = localCurrMean(index)
val diff = value - prevMean
localCurrMean(index) = prevMean + weight * diff / (localNnz(index) + weight)
localCurrM2n(index) += weight * (value - localCurrMean(index)) * diff
localCurrM2(index) += weight * value * value
localCurrL1(index) += weight * math.abs(value)
localNnz(index) += weight
}
}
weightSum += weight
weightSquareSum += weight * weight
totalCnt += 1
this
}
//3、merge方法
//merge方法相对比较简单,它只是对两个MultivariateOnlineSummarizer对象的指标作合并操作。
def merge(other: MultivariateOnlineSummarizer): this.type = {
if (this.weightSum != 0.0 && other.weightSum != 0.0) {
totalCnt += other.totalCnt
weightSum += other.weightSum
weightSquareSum += other.weightSquareSum
var i = 0
while (i < n) {
val thisNnz = nnz(i)
val otherNnz = other.nnz(i)
val totalNnz = thisNnz + otherNnz
if (totalNnz != 0.0) {
val deltaMean = other.currMean(i) - currMean(i)
// merge mean together
currMean(i) += deltaMean * otherNnz / totalNnz
// merge m2n together,不单纯是累加
currM2n(i) += other.currM2n(i) + deltaMean * deltaMean * thisNnz * otherNnz / totalNnz
// merge m2 together
currM2(i) += other.currM2(i)
// merge l1 together
currL1(i) += other.currL1(i)
// merge max and min
currMax(i) = math.max(currMax(i), other.currMax(i))
currMin(i) = math.min(currMin(i), other.currMin(i))
}
nnz(i) = totalNnz
i += 1
}
} else if (weightSum == 0.0 && other.weightSum != 0.0) {
this.n = other.n
this.currMean = other.currMean.clone()
this.currM2n = other.currM2n.clone()
this.currM2 = other.currM2.clone()
this.currL1 = other.currL1.clone()
this.totalCnt = other.totalCnt
this.weightSum = other.weightSum
this.weightSquareSum = other.weightSquareSum
this.nnz = other.nnz.clone()
this.currMax = other.currMax.clone()
this.currMin = other.currMin.clone()
}
this
}
//4、真实的样本均值和样本方差
//在线算法的并行化实现是一种特殊情况,样本方差进行无偏估计。
//真实的样本均值和样本方差通过下面的代码实现。
override def mean: Vector = {
val realMean = Array.ofDim[Double](n)
var i = 0
while (i < n) {
realMean(i) = currMean(i) * (nnz(i) / weightSum)
i += 1
}
Vectors.dense(realMean)
}
override def variance: Vector = {
val realVariance = Array.ofDim[Double](n)
val denominator = weightSum - (weightSquareSum / weightSum)
// Sample variance is computed, if the denominator is less than 0, the variance is just 0.
if (denominator > 0.0) {
val deltaMean = currMean
var i = 0
val len = currM2n.length
while (i < len) {
realVariance(i) = (currM2n(i) + deltaMean(i) * deltaMean(i) * nnz(i) *
(weightSum - nnz(i)) / weightSum) / denominator
i += 1
}
}
Vectors.dense(realVariance)
}
| xieguobin/Spark_2.0.0_cn1 | ds_analysis/statistics/base_stat/汇总统计_建模剖析1.scala | Scala | apache-2.0 | 6,857 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @builder scalation.linalgebra.bld.BldBidMatrix
* @version 1.3
* @date Mon May 19 15:52:24 EDT 2014
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra
import scala.io.Source.fromFile
import scala.math.{abs => ABS}
import scalation.math.{long_exp, oneIf}
import scalation.util.Error
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BidMatrixL` class stores and operates on square (upper) bidiagonal matrices.
* The elements are of type of `Long`. A matrix is stored as two vectors:
* the diagonal vector and the sup-diagonal vector.
* @param d1 the first/row dimension (square => d2 = d1)
*/
class BidMatrixL (val d1: Int)
extends MatriL with Error with Serializable
{
/** Dimension 1
*/
lazy val dim1 = d1
/** Dimension 2
*/
lazy val dim2 = d1
/** Size of the sup-diagonal
*/
private val n = d1 - 1
/** Range for the diagonal
*/
private val range_d = 0 until d1
/** Range for the sup-diagonal
*/
private val range_s = 0 until n
/** Diagonal of the matrix
*/
private var _dg: VectorL = new VectorL (d1)
/** Sup-diagonal of the matrix
*/
private var _sd: VectorL = new VectorL (n)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a bidiagonal matrix with the given diagonal and sup-diagonal.
* @param v1 the diagonal vector
* @param v2 the sup-diagonal vector
*/
def this (v1: VectoL, v2: VectoL)
{
this (v1.dim)
for (i <- range_d) _dg(i) = v1(i)
for (i <- range_s) _sd(i) = v2(i)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a bidiagonal matrix from the given matrix.
* @param b the matrix of values to assign
*/
def this (b: MatriL)
{
this (b.dim1)
for (i <- range_d) _dg(i) = b(i, i)
for (i <- range_s) _sd(i) = b(i, i+1)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a clone of 'this' m-by-n matrix.
*/
def copy (): BidMatrixL = new BidMatrixL (_dg, _sd)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create an m-by-n matrix with all elements initialized to zero.
* @param m the number of rows
* @param n the number of columns
*/
def zero (m: Int = dim1, n: Int = dim2): BidMatrixL = new BidMatrixL (m)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the diagonal of 'this' bidiagonal matrix.
*/
def dg: VectorL = _dg
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the diagonal of 'this' bidiagonal matrix.
* @param v the vector to assign to the diagonal
*/
def dg_ (v: VectorL) { _dg = v }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the sup-diagonal of this bidiagonal matrix.
*/
def sd: VectorL = _sd
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the sup-diagonal of 'this' bidiagonal matrix.
* @param v the vector to assign to the sup-diagonal
*/
def sd_ (v: VectorL) { _sd = v }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' bidiagonal matrix's element at the 'i,j'-th index position.
* @param i the row index
* @param j the column index
*/
def apply (i: Int, j: Int): Long =
{
if (i == j) _dg(i) // on diagonal
else if (i + 1 == j) _sd(i) // on sup-diagonal (above diagonal)
else throw new Exception ("BidMatrixL.apply: element not on diagonals")
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' bidiagonal matrix's element at the 'i,j'-th index position,
* returning 0, if off bidiagonal.
* @param i the row index
* @param j the column index
*/
def at (i: Int, j: Int): Long =
{
if (i < 0 || j < 0 || i >= d1 || j >= d1) 0l
else if (i == j) _dg(i) // on diagonal
else if (i + 1 == j) _sd(i) // on sup-diagonal (above diagonal)
else 0l
} // at
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' bidiagonal matrix's vector at the 'i'-th index position ('i'-th row).
* @param i the row index
*/
def apply (i: Int): VectorL =
{
val u = new VectorL (d1)
u(i) = _dg(i)
if (i > 0) u(i-1) = _sd(i-1)
if (i < n) u(i+1) = _sd(i)
u
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get a slice 'this' bidiagonal matrix row-wise on range 'ir' and column-wise
* on range 'jr'.
* Ex: b = a(2..4, 3..5)
* @param ir the row range
* @param jr the column range
*/
def apply (ir: Range, jr: Range): BidMatrixL =
{
if (ir != jr) flaw ("apply", "requires same ranges to maintain squareness")
slice (ir.start, ir.end)
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' bidiagonal matrix's element at the 'i,j'-th index position to
* the scalar 'x'.
* @param i the row index
* @param j the column index
* @param x the scalar value to assign
*/
def update (i: Int, j: Int, x: Long)
{
if (i == j) _dg(i) = x
else if (i == j + 1) _sd(j) = x
else if (i + 1 == j) _sd(i) = x
else flaw ("update", "element not on bidiagonal")
} // update
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' bidiagonal matrix's row at the 'i'-th index position to the
* vector 'u'.
* @param i the row index
* @param u the vector value to assign
*/
def update (i: Int, u: VectoL)
{
_dg(i) = u(i)
if (i > 0) _sd(i-1) = u(i-1)
if (i < n) _sd(i) = u(i+1)
} // update
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set a slice 'this' bidiagonal matrix row-wise on range 'ir' and column-wise
* on range 'jr'.
* Ex: a(2..4, 3..5) = b
* @param ir the row range
* @param jr the column range
* @param b the matrix to assign
*/
def update (ir: Range, jr: Range, b: MatriL)
{
if (ir != jr) flaw ("update", "requires same ranges to maintain squareness")
if (b.isInstanceOf [BidMatrixL]) {
val bb = b.asInstanceOf [BidMatrixL]
for (i <- ir) {
_dg(i) = bb.dg(i - ir.start)
if (i > ir.start) _sd(i-1) = bb.sd(i - ir.start - 1)
} // for
} else {
flaw ("update", "must convert b to a BidMatrixL first")
} // if
} // update
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set all the elements in 'this' bidiagonal matrix to the scalar 'x'.
* @param x the scalar value to assign
*/
def set (x: Long)
{
for (i <- range1) {
_dg(i) = x
if (i > 0) _sd(i) = x
} // for
} // set
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set all the values in 'this' bidiagonal matrix as copies of the values in 2D array u.
* @param u the 2D array of values to assign
*/
def set (u: Array [Array [Long]])
{
throw new NoSuchMethodException ("values for BidMatrixL should be diagonal")
} // set
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' bidiagonal matrix's 'i'th row starting at column 'j' to the
* vector 'u'.
* @param i the row index
* @param u the vector value to assign
* @param j the starting column index
*/
def set (i: Int, u: VectoL, j: Int = 0)
{
if (i >= j) _dg(i) = u(i)
if (i-1 >= j) _sd(i-1) = u(i+1)
} // set
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `BidMatrixL` into a `BidMatrixI`.
*/
def toInt: BidMatrixI = new BidMatrixI (_dg.toInt, _sd.toInt)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' tridiagonal matrix to a dense matrix.
*/
def toDense: MatrixL =
{
val c = new MatrixL (dim1, dim1)
for (i <- range1) {
c(i, i) = _dg(i)
if (i > 0) c(i, i-1) = _sd(i-1)
} // for
c
} // for
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' bidiagonal matrix row-wise 'from' to 'end'.
* @param from the start row of the slice (inclusive)
* @param end the end row of the slice (exclusive)
*/
def slice (from: Int, end: Int): BidMatrixL =
{
val c = new BidMatrixL (end - from)
for (i <- c.range1) {
c._dg(i) = _dg(i + from)
if (i > 0) c._sd(i - 1) = _sd(i + from - 1)
} // for
c
} // slice
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' bidiagonal matrix column-wise 'from' to 'end'.
* @param from the start column of the slice (inclusive)
* @param end the end column of the slice (exclusive)
*/
def sliceCol (from: Int, end: Int): BidMatrixL =
{
val c = new BidMatrixL (end - from)
for (j <- c.range2) {
c._dg(j) = _dg(j + from)
if (j > 0) c._sd(j - 1) = _sd(j + from - 1)
} // for
c
} // sliceCol
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' bidiagonal matrix row-wise 'r_from' to 'r_end' and column-wise
* 'c_from' to 'c_end'.
* @param r_from the start of the row slice
* @param r_end the end of the row slice
* @param c_from the start of the column slice
* @param c_end the end of the column slice
*/
def slice (r_from: Int, r_end: Int, c_from: Int, c_end: Int): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL must be square")
} // slice
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' bidiagonal matrix excluding the given 'row' and 'col'umn.
* @param row the row to exclude
* @param col the column to exclude
*/
def sliceExclude (row: Int, col: Int): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support sliceExclude")
} // sliceExclude
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select rows from 'this' bidiagonal matrix according to the given index/basis.
* @param rowIndex the row index positions (e.g., (0, 2, 5))
*/
def selectRows (rowIndex: Array [Int]): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support selectRows")
} // selectRows
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get column 'col' from 'this' bidiagonal matrix, returning it as a vector.
* @param col the column to extract from the matrix
* @param from the position to start extracting from
*/
def col (col: Int, from: Int = 0): VectorL =
{
val u = new VectorL (d1 - from)
for (i <- (from max col-1) until (d1 min col+2)) u(i-from) = this(i, col)
u
} // col
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set column 'col' of 'this' bidiagonal matrix to a vector.
* @param col the column to set
* @param u the vector to assign to the column
*/
def setCol (col: Int, u: VectoL)
{
_dg(col) = u(col)
if (col > 0) _sd(col-1) = u(col-1)
} // setCol
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select columns from 'this' bidiagonal matrix according to the given index/basis.
* Ex: Can be used to divide a matrix into a basis and a non-basis.
* @param colIndex the column index positions (e.g., (0, 2, 5))
*/
def selectCols (colIndex: Array [Int]): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support selectCols")
} // selectCols
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Transpose 'this' bidiagonal matrix (rows => columns).
*/
def t: BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support transpose")
} // t
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (row) vector 'u' and 'this' matrix, i.e., prepend 'u' to 'this'.
* @param u the vector to be prepended as the new first row in new matrix
*/
def +: (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support +:")
} // +:
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (column) vector 'u' and 'this' matrix, i.e., prepend 'u' to 'this'.
* @param u the vector to be prepended as the new first column in new matrix
*/
def +^: (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support +^:")
} // +^:
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' matrix and (row) vector 'u', i.e., append 'u' to 'this'.
* @param u the vector to be appended as the new last row in new matrix
*/
def :+ (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support :+")
} // :+
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' matrix and (column) vector 'u', i.e., append 'u' to 'this'.
* @param u the vector to be appended as the new last column in new matrix
*/
def :^+ (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support :^+")
} // :^+
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (row-wise) 'this' matrix and matrix 'b'.
* @param b the matrix to be concatenated as the new last rows in new matrix
*/
def ++ (b: MatriL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support ++")
} // ++
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate (column-wise) 'this' matrix and matrix 'b'.
* @param b the matrix to be concatenated as the new last columns in new matrix
*/
def ++^ (b: MatriL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support ++^")
} // ++^
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' bidiagonal matrix and matrix 'b'.
* @param b the matrix to add (requires 'leDimensions')
*/
def + (b: MatriL): BidMatrixL =
{
val bid = b.asInstanceOf [BidMatrixL]
if (d1 == bid.d1) {
new BidMatrixL (_dg + bid.dg, _sd + bid.sd)
} else {
flaw ("+", "matrix b has the wrong dimensions")
null
} // if
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' bidiagonal matrix and (row) vector u.
* @param u the vector to add
*/
def + (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support + with VectoL")
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' bidiagonal matrix and scalar 'x'.
* @param x the scalar to add
*/
def + (x: Long): BidMatrixL =
{
new BidMatrixL (_dg + x, _sd + x)
} // +
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' bidiagonal matrix and matrix 'b'.
* @param b the matrix to add (requires 'leDimensions')
*/
def += (b: MatriL): BidMatrixL =
{
val bid = b.asInstanceOf [BidMatrixL]
if (d1 == bid.d1) {
_dg += bid.dg
_sd += bid.sd
} else {
flaw ("+=", "matrix b has the wrong dimensions")
} // if
this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' bidiagonal matrix and (row) vector 'u'.
* @param u the vector to add
*/
def += (u: VectoL): MatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support += with VectoL")
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' bidiagonal matrix and scalar 'x'.
* @param x the scalar to add
*/
def += (x: Long): BidMatrixL =
{
_dg += x; _sd += x; this
} // +=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' bidiagonal matrix subtract matrix 'b'.
* @param b the matrix to subtract (requires 'leDimensions')
*/
def - (b: MatriL): BidMatrixL =
{
val bid = b.asInstanceOf [BidMatrixL]
if (d1 == bid.d1) {
new BidMatrixL (_dg - bid.dg, _sd - bid.sd)
} else {
flaw ("-", "matrix b has the wrong dimensions")
null
} // if
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' bidiagonal matrix subtract (row) vector 'u'.
* @param u the vector to subtract
*/
def - (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support - with VectoL")
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' bidiagonal matrix subtract scalar 'x'.
* @param x the scalar to subtract
*/
def - (x: Long): BidMatrixL =
{
new BidMatrixL (_dg - x, _sd - x)
} // -
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' bidiagonal bidiagonal matrix subtract in-place matrix 'b'.
* @param b the matrix to subtract (requires 'leDimensions')
*/
def -= (b: MatriL): BidMatrixL =
{
val bid = b.asInstanceOf [BidMatrixL]
if (d1 == bid.d1) {
_dg -= bid.dg
_sd -= bid.sd
} else {
flaw ("-=", "matrix b has the wrong dimensions")
} // if
this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' bidiagonal matrix subtract in-place (row) vector 'u'.
* @param u the vector to subtract
*/
def -= (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support -= with VectoL")
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' bidiagonal matrix subtract in-place scalar 'x'.
* @param x the scalar to subtract
*/
def -= (x: Long): BidMatrixL =
{
_dg -= x; _sd -= x; this
} // -=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' bidiagonal matrix by matrix 'b'.
* @param b the matrix to multiply by
*/
def * (b: MatriL): BidMatrixL =
{
throw new NoSuchMethodException ("BidMatrixL does not support * with general matrices")
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' bidiagonal matrix by matrix 'b'. Requires 'b' to have
* type `BidMatrixL`, but returns a more general type of matrix.
* @param b the matrix to multiply by
*/
def * (b: BidMatrixL): MatrixL =
{
val c = new MatrixL (d1)
for (i <- 0 until d1; j <- (i-2 max 0) to (i+2 min n)) {
var sum = 0l
val k1 = ((i min j) - 1) max 0
val k2 = ((i max j) + 1) min n
for (k <- k1 to k2) sum += at(i, k) * b.at(k, j)
c(i, j) = sum
} // for
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' bidiagonal matrix by vector 'u'.
* @param u the vector to multiply by
*/
def * (u: VectoL): VectorL =
{
val c = new VectorL (d1)
for (i <- 0 until n) c(i) = _dg(i) * u(i) + _sd(i) * u(i+1)
c(n) = _dg(d1-1) * u(d1-1)
c
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' bidiagonal matrix by scalar 'x'.
* @param x the scalar to multiply by
*/
def * (x: Long): BidMatrixL =
{
new BidMatrixL (_dg * x, _sd * x)
} // *
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' bidiagonal matrix by matrix 'b'.
* @param b the matrix to multiply by
*/
def *= (b: MatriL): BidMatrixL =
{
throw new NoSuchMethodException ("inplace matrix multiplication not implemented")
} // *=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' bidiagonal matrix by scalar 'x'.
* @param x the scalar to multiply by
*/
def *= (x: Long): BidMatrixL =
{
_dg *= x; _sd *= x; this
} // *=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product of 'this' matrix and vector 'u', by conceptually
* transposing 'this' matrix and then multiplying by 'u' (i.e., 'a dot u = a.t * u').
* @param u the vector to multiply by (requires same first dimensions)
*/
def dot (u: VectoL): VectorL =
{
if (dim1 != u.dim) flaw ("dot", "matrix dot vector - incompatible first dimensions")
val c = new VectorL (d1)
c(0) = _dg(0) * u(0)
for (i <- 1 until d1) c(i) = _sd(i-1) * u(i-1) + _dg(i) * u(i)
c
} // dot
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product of 'this' matrix with matrix 'b' to produce a vector.
* @param b the second matrix of the dot product
*/
def dot (b: MatriL): VectorL =
{
if (dim1 != b.dim1) flaw ("dot", "matrix dot matrix - incompatible first dimensions")
val c = new VectorL (d1)
c(0) = _dg(0) * b(0, 0)
for (i <- 1 until d1) c(i) = _sd(i-1) * b(i-1, i) + _dg(i) * b(i, i)
c
} // dot
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the matrix dot product of 'this' matrix with matrix 'b' to produce a matrix.
* @param b the second matrix of the dot product
*/
def mdot (b: BidMatrixL): MatrixL =
{
if (dim1 != b.dim1) flaw ("mdot", "matrix mdot matrix - incompatible first dimensions")
val c = new MatrixL (dim2, b.dim2)
c(0, 0) = _dg(0) * b._dg(0)
for (i <- 1 until dim1) {
c(i, i) = _dg(i) * b._dg(i) + _sd(i-1) * b._sd(i-1)
c(i-1, i) = _dg(i-1) * b._sd(i-1)
c(i, i-1) = _sd(i-1) * b._dg(i-1)
} // for
c
} // mdot
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the matrix dot product of 'this' matrix with matrix 'b' to produce a matrix.
* @param b the second matrix of the dot product
*/
def mdot (b: MatriL): MatrixL =
{
if (dim1 != b.dim1) flaw ("mdot", "matrix mdot matrix - incompatible first dimensions")
val c = new MatrixL (dim2, b.dim2)
for (j <- 0 until b.dim2) {
c(0, j) = _dg(0) * b(0, j)
for (i <- 1 until dim1) {
c(i, j) = _sd(i-1) * b(i-1, j) + _dg(i) * b(i, j)
} // for
} // for
c
} // mdot
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' bidiagonal matrix by vector 'u' to produce another matrix
* 'a_ij * u_j'.
* @param u the vector to multiply by
*/
def ** (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("matrix * vector -> matrix not implemented")
} // **
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' bidiagonal matrix by vector 'u' to produce another
* matrix 'a_ij * u_j'.
* @param u the vector to multiply by
*/
def **= (u: VectoL): BidMatrixL =
{
throw new NoSuchMethodException ("inplace matrix * vector -> matrix not implemented")
} // **=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' bidiagonal matrix by scalar 'x'.
* @param x the scalar to divide by
*/
def / (x: Long): BidMatrixL =
{
new BidMatrixL (_dg / x, _sd / x)
} // /
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' bidiagonal matrix by scalar 'x'.
* @param x the scalar to divide by
*/
def /= (x: Long): BidMatrixL =
{
_dg /= x; _sd /= x; this
} // /=
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Raise 'this' bidiagonal matrix to the 'p'th power (for some integer 'p' >= 2).
* @param p the power to raise 'this' matrix to
*/
def ~^ (p: Int): BidMatrixL =
{
throw new NoSuchMethodException ("matrix power function (~^) not implemented")
} // ~^
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the maximum element in 'this' bidiagonal matrix.
* @param e the ending row index (exclusive) for the search
*/
def max (e: Int = dim1): Long = _dg(0 until e).max() max _sd(0 until e).max()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the minimum element in 'this' bidiagonal matrix.
* @param e the ending row index (exclusive) for the search
*/
def min (e: Int = dim1): Long = _dg(0 until e).min() min _sd(0 until e).min()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' using back substitution in the equation 'u*x = y' where
* 'this' matrix ('u') is upper triangular (see 'lud' above).
* @param y the constant vector
*/
def bsolve (y: VectoL): VectorL = solve (y)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' in the equation 'a*x = b' where 'a' is 'this' bidiagonal matrix.
* @param b the constant vector
*/
def solve (b: VectoL): VectorL =
{
val d = _dg // diagonal
val e = _sd // super-diagonal
val x = new VectorL (d1)
x(n) = b(n) / d(n)
for (i <- n-1 to 0 by -1) x(i) = (b(i) - e(i) * x(i+1)) / d(i)
x
} // solve
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Solve for 'x' in the equation 'l*u*x = b' (see 'lud' above).
* @param lu the lower and upper triangular matrices
* @param b the constant vector
*/
def solve (lu: Tuple2 [MatriL, MatriL], b: VectoL): VectorL = solve (lu._1, lu._2, b)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Combine 'this' bidiagonal matrix with matrix 'b', placing them along the
* diagonal and filling in the bottom left and top right regions with zeros:
* '[this, b]'.
* @param b the matrix to combine with 'this' bidiagonal matrix
*/
def diag (b: MatriL): MatriL =
{
val m = d1 + b.dim1
val n = d1 + b.dim2
val c = new MatrixL (m, n)
c(0, 0) = _dg(0)
c(0, 1) = _sd(0)
for (i <- 1 until m) {
if (i < d1) {
c(i, i) = _dg(i)
if (i < n) c(i, i+1) = _sd(i)
} else {
for (j <- d1 until n) c(i, j) = b(i-d1, j-d1)
} // if
} // for
c
} // diag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Form a matrix '[Ip, this, Iq]' where Ir is a 'r-by-r' identity matrix, by
* positioning the three matrices 'Ip', 'this' and 'Iq' along the diagonal.
* Fill the rest of matrix with zeros.
* @param p the size of identity matrix Ip
* @param q the size of identity matrix Iq
*/
def diag (p: Int, q: Int): SymTriMatrixL =
{
val nn = d1 + p + q
val dd = new VectorL (nn)
val ss = new VectorL (nn-1)
for (i <- 0 until p) dd(i) = 1l // Ip
for (i <- 0 until d1) dd(i+p) = _dg(i) // this
for (i <- 0 until n) ss(i+p) = _sd(i) // this
for (i <- p + d1 until nn) dd(i) = 1l // Iq
new SymTriMatrixL (dd, ss)
} // diag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the 'k'th diagonal of 'this' bidiagonal matrix. Assumes 'dim2 >= dim1'.
* @param k how far above the main diagonal, e.g., (0, 1) for (main, super)
*/
def getDiag (k: Int = 0): VectorL =
{
if (k == 0) _dg.toDense
else if (k == 1) _sd.toDense
else { flaw ("getDiag", "nothing stored for diagonal " + k); null }
} // getDiag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the 'k'th diagonal of 'this' bidiagonal matrix to the vector 'u'.
* Assumes 'dim2 >= dim1'.
* @param u the vector to set the diagonal to
* @param k how far above the main diagonal, e.g., (-1, 0, 1) for (sub, main, super)
*/
def setDiag (u: VectoL, k: Int = 0)
{
if (k == 0) _dg = u.toDense
else if (k == 1) _sd = u.toDense
else flaw ("setDiag", "nothing stored for diagonal " + k)
} // setDiag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the main diagonal of 'this' bidiagonal matrix to the scalar 'x'.
* Assumes 'dim2 >= dim1'.
* @param x the scalar to set the diagonal to
*/
def setDiag (x: Long) { _dg.set (x) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Invert 'this' bidiagonal matrix.
*/
def inverse: MatriL =
{
val d = _dg // diagonal
val e = _sd // augmented super-diagonal
val b = new MatrixL (d1, d1)
for (i <- 0 until d1) b(i, i) = 1l / d(i)
for (i <- n to 1 by -1; j <- i+1 until d1) {
b(i, j) = -(e(j-1) / d(j)) * b(i, j-1)
} // for
b
} // inverse
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Clean values in 'this' bidiagonal matrix at or below the threshold by setting
* them to zero. Iterative algorithms give approximate values and if very close
* to zero, may throw off other calculations, e.g., in computing eigenvectors.
* @param thres the cutoff threshold (a small value)
* @param relative whether to use relative or absolute cutoff
*/
def clean (thres: Double, relative: Boolean = true): BidMatrixL =
{
val s = if (relative) mag else 1l // use matrix magnitude or 1
for (i <- range_d) if (ABS (_dg(i)) <= thres * s) _dg(i) = 0l
for (i <- range_s) if (ABS (_sd(i)) <= thres * s) _sd(i) = 0l
this
} // clean
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the (right) nullspace of 'this' 'm-by-n' matrix (requires 'n = m+1')
* by performing Gauss-Jordan reduction and extracting the negation of the
* last column augmented by 1.
* <p>
* nullspace (a) = set of orthogonal vectors v s.t. a * v = 0
* <p>
* The left nullspace of matrix 'a' is the same as the right nullspace of 'a.t'.
* FIX: need a more robust algorithm for computing nullspace (@see Fac_QR.scala).
* FIX: remove the 'n = m+1' restriction.
* @see http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/ax-b-and-the-four-subspaces
* @see /solving-ax-0-pivot-variables-special-solutions/MIT18_06SCF11_Ses1.7sum.pdf
*/
def nullspace: VectorL =
{
if (dim2 != dim1 + 1) flaw ("nullspace", "requires n (columns) = m (rows) + 1")
reduce.col(dim2 - 1) * -1l ++ 1l
} // nullspace
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute in-place the (right) nullspace of 'this' 'm-by-n' matrix (requires 'n = m+1')
* by performing Gauss-Jordan reduction and extracting the negation of the
* last column augmented by 1.
* <p>
* nullspace (a) = set of orthogonal vectors v s.t. a * v = 0
* <p>
* The left nullspace of matrix 'a' is the same as the right nullspace of 'a.t'.
* FIX: need a more robust algorithm for computing nullspace (@see Fac_QR.scala).
* FIX: remove the 'n = m+1' restriction.
* @see http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/ax-b-and-the-four-subspaces
* @see /solving-ax-0-pivot-variables-special-solutions/MIT18_06SCF11_Ses1.7sum.pdf
*/
def nullspace_ip (): VectorL =
{
if (dim2 != dim1 + 1) flaw ("nullspace", "requires n (columns) = m (rows) + 1")
reduce_ip ()
col(dim2 - 1) * -1l ++ 1l
} // nullspace_ip
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the trace of 'this' bidiagonal matrix, i.e., the sum of the elements
* on the main diagonal. Should also equal the sum of the eigenvalues.
* @see Eigen.scala
*/
def trace: Long = _dg.sum
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the sum of 'this' bidiagonal matrix, i.e., the sum of its elements.
*/
def sum: Long = _dg.sum + _sd.sum
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the 'abs' sum of 'this' bidiagonal matrix, i.e., the sum of the absolute
* value of its elements. This is useful for comparing matrices '(a - b).sumAbs'.
*/
def sumAbs: Long = _dg.sumAbs + _sd.sumAbs
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the sum of the lower triangular region of 'this' bidiagonal matrix.
*/
def sumLower: Long = 0l
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the determinant of 'this' bidiagonal matrix.
*/
def det: Long = detHelper (n)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Helper method for computing the determinant of 'this' bidiagonal matrix. FIX
* @param nn the current dimension
*/
private def detHelper (nn: Int): Long =
{
if (nn == 0) _dg(0)
else if (nn == 1) _dg(0) * _dg(1) - _sd(0) * _sd(0)
else _dg(n) * detHelper (nn-1) - _sd(nn-1) * _sd(nn-1) * detHelper (nn-2)
} // detHelper
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the lower triangular of 'this' matrix (rest are zero).
*/
def lowerT: MatrixL = { val c = new MatrixL (dim1, dim1); c.setDiag (_dg); c }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the upper triangular of 'this' matrix (rest are zero).
*/
def upperT: MatrixL = { val c = new MatrixL (dim1, dim1); c.setDiag (_dg); c.setDiag (_sd, 1); c }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' matrix is bidiagonal (has non-zero elements only in
* main diagonal and super-diagonal).
*/
override def isBidiagonal: Boolean = true
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' bidiagonal matrix is nonnegative (has no negative elements).
*/
override def isNonnegative: Boolean = _dg.isNonnegative && _sd.isNonnegative
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' bidiagonal matrix is rectangular (all rows have the same
* number of columns).
*/
def isRectangular: Boolean = true
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' matrix is bidiagonal (has non-zero elements only in
* main diagonal and super-diagonal).
*/
override def isTridiagonal: Boolean = false
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' bidiagonal matrix to a string showing the diagonal
* vector followed by the sup-diagonal vector.
*/
override def toString: String = "\\nBidMatrixL(\\t" + _dg + ", \\n\\t\\t" + _sd + ")"
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Write 'this' matrix to a CSV-formatted text file with name 'fileName'.
* @param fileName the name of file to hold the data
*/
def write (fileName: String)
{
// FIX - implement write method
} // write
//--------------------------------------------------------------------------
// The following methods are currently not implemented for Bidiagonal matrices:
//--------------------------------------------------------------------------
def lud: Tuple2 [MatriL, MatriL] =
{
throw new NoSuchMethodException ("lud not implemented since it's already an upper matrix")
} // lud
def lud_ip (): Tuple2 [MatriL, MatriL] =
{
throw new NoSuchMethodException ("lud_ip not implemented since it's already an upper matrix")
} // lud_ip
def solve (l: MatriL, u: MatriL, b: VectoL): VectorL =
{
throw new NoSuchMethodException ("solve lu not implemented, since lud not needed")
} // solve
def inverse_ip (): BidMatrixL =
{
throw new NoSuchMethodException ("inverse_ip not implemented since result may not be BidMatrix")
} // inverse_ip
def reduce: BidMatrixL =
{
throw new NoSuchMethodException ("reduce not yet implemented")
} // reduce
def reduce_ip ()
{
throw new NoSuchMethodException ("reduce_ip not implemented since result may not be BidMatrix")
} // reduce_ip
} // BidMatrixL class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BidMatrixL` object is the companion object for the `BidMatrixL` class.
*/
object BidMatrixL extends Error
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix and assign values from the array of vectors 'u'.
* @param u the array of vectors to assign
* @param columnwise whether the vectors are treated as column or row vectors
*/
def apply (u: Array [VectoL], columnwise: Boolean = true): BidMatrixL =
{
var x: BidMatrixL = null
val u_dim = u(0).dim
if (u_dim != u.length) flaw ("apply", "symmetric matrices must be square")
if (columnwise) {
x = new BidMatrixL (u_dim)
for (j <- 0 until u_dim) x.setCol (j, u(j)) // assign column vectors
} else {
x = new BidMatrixL (u_dim)
for (i <- 0 until u_dim) x(i) = u(i) // assign row vectors
} // if
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix and assign values from the Scala `Vector` of vectors 'u'.
* Assumes vectors are column-wise.
* @param u the Vector of vectors to assign
*/
def apply (u: Vector [VectoL]): BidMatrixL =
{
val u_dim = u(0).dim
val x = new BidMatrixL (u_dim)
for (j <- 0 until u.length) x.setCol (j, u(j)) // assign column vectors
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a matrix by reading from a text file, e.g., a CSV file.
* @param fileName the name of file holding the data
*/
def apply (fileName: String): BidMatrixL =
{
val sp = ',' // character separating the values
val lines = fromFile (fileName).getLines.toArray // get the lines from file
val (m, n) = (lines.length, lines(0).split (sp).length)
if (m != n) flaw ("apply", "symmetric matrices must be square")
val x = new BidMatrixL (m)
for (i <- 0 until m) x(i) = VectorL (lines(i).split (sp))
x
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create an 'm-by-m' identity matrix I (ones on main diagonal, zeros elsewhere).
* @param m the row and column dimensions of the matrix
*/
def eye (m: Int): BidMatrixL =
{
val c = new BidMatrixL (m)
for (i <- 0 until m) c(i, i) = 1l
c
} // eye
} // BidMatrixL object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BidMatrixLTest` object is used to test the `BidMatrixL` class.
* > run-main scalation.linalgebra.BidMatrixLTest
*/
object BidMatrixLTest extends App
{
val a = new BidMatrixL (VectorL (3, 4, 5),
VectorL (2, 1))
val b = new BidMatrixL (VectorL (2, 3, 4),
VectorL (5, 6))
val v = VectorL (5, 3, 6)
val c = new MatrixL ((3, 3), 3, 1, 0,
0, 4, 2,
0, 0, 5)
val d = new MatrixL ((3, 3), 2, 5, 0,
5, 3, 6,
0, 6, 4)
println ("a = " + a)
println ("b = " + b)
println ("a + b = " + (a + b))
println ("a - b = " + (a - b))
println ("a * b = " + (a * b))
println ("a * v = " + (a * v))
println ("c * d = " + (c * d))
println ("a.det = " + a.det)
val x2 = a.solve (v)
println ("a.solve (v) = " + x2)
println ("a * x2 = " + a * x2)
println ("a.inverse = " + a.inverse)
println ("a.inverse * a = " + a.inverse * a)
} // BidMatrixLTest object
| NBKlepp/fda | scalation_1.3/scalation_mathstat/src/main/scala/scalation/linalgebra/BidMatrixL.scala | Scala | mit | 43,579 |
package org.scalajs.core.compiler.test
import util._
import org.junit.Test
import org.scalajs.core.ir.{Trees => js, Types => jstpe}
class OptimizationTest extends JSASTTest {
@Test
def unwrapScalaFunWrapper: Unit = {
// Make sure we do not wrap and unwrap right away
"""
import scala.scalajs.js
class A {
val jsFun: js.Function = (x: Int) => x * 2
}
""".
hasNot("runtime.AnonFunction ctor") {
case js.New(jstpe.ClassType("sjsr_AnonFunction1"), _, _) =>
}
// Make sure our wrapper matcher has the right name
"""
import scala.scalajs.js
class A {
val scalaFun = (x: Int) => x * 2
val jsFun: js.Function = scalaFun
}
""".
has("runtime.AnonFunction ctor") {
case js.New(jstpe.ClassType("sjsr_AnonFunction1"), _, _) =>
}
/* Make sure js.Array(...) is optimized away completely for several kinds
* of data types.
*/
"""
import scala.scalajs.js
class VC(val x: Int) extends AnyVal
class A {
val a = js.Array(5, 7, 9, -3)
val b = js.Array("hello", "world")
val c = js.Array('a', 'b')
val d = js.Array(Nil)
val e = js.Array(new VC(151189))
}
""".
hasNot("any of the wrapArray methods") {
case js.Apply(_, js.Ident(name, _), _)
if name.startsWith("wrap") && name.endsWith("__scm_WrappedArray") =>
}
/* Make sure varargs are optimized to use js.WrappedArray instead of
* scm.WrappedArray, for various data types.
*/
"""
import scala.scalajs.js
class VC(val x: Int) extends AnyVal
class A {
val a = List(5, 7, 9, -3)
val b = List("hello", "world")
val c = List('a', 'b')
val d = List(Nil)
val e = List(new VC(151189))
}
""".
hasNot("any of the wrapArray methods") {
case js.Apply(_, js.Ident(name, _), _)
if name.startsWith("wrap") && name.endsWith("__scm_WrappedArray") =>
}
// Make sure our wrapper matcher has the right name
"""
import scala.scalajs.js
class A {
val a: Seq[Int] = new Array[Int](5)
}
""".
has("one of the wrapArray methods") {
case js.Apply(_, js.Ident(name, _), _)
if name.startsWith("wrap") && name.endsWith("__scm_WrappedArray") =>
}
// Verify the optimized emitted code for 'new js.Object' and 'new js.Array'
"""
import scala.scalajs.js
class A {
val o = new js.Object
val a = new js.Array
}
""".
hasNot("any reference to the global scope") {
case js.JSLinkingInfo() =>
}
}
@Test
def switchWithoutGuards: Unit = {
"""
class Test {
def switchWithGuardsStat(x: Int, y: Int): Unit = {
x match {
case 1 => println("one")
case 2 => println("two")
case z if y > 100 => println("big " + z)
case _ => println("None of those")
}
}
}
""".hasNot("Labeled block") {
case js.Labeled(_, _, _) =>
}.has("Match node") {
case js.Match(_, _, _) =>
}
}
@Test
def switchWithGuards: Unit = {
// Statement position
"""
class Test {
def switchWithGuardsStat(x: Int, y: Int): Unit = {
x match {
case 1 => println("one")
case 2 if y < 10 => println("two special")
case 2 => println("two")
case 3 if y < 10 => println("three special")
case 3 if y > 100 => println("three big special")
case z if y > 100 => println("big " + z)
case _ => println("None of those")
}
}
}
""".hasExactly(1, "default case (\\"None of those\\")") {
case js.StringLiteral("None of those") =>
}.has("Match node") {
case js.Match(_, _, _) =>
}
// Expression position
"""
class Test {
def switchWithGuardsExpr(x: Int, y: Int): Unit = {
val message = x match {
case 1 => "one"
case 2 if y < 10 => "two special"
case 2 => "two"
case 3 if y < 10 => "three special"
case 3 if y > 100 => "three big special"
case z if y > 100 => "big " + z
case _ => "None of those"
}
println(message)
}
}
""".hasExactly(1, "default case (\\"None of those\\")") {
case js.StringLiteral("None of those") =>
}.has("Match node") {
case js.Match(_, _, _) =>
}
}
}
| lrytz/scala-js | compiler/src/test/scala/org/scalajs/core/compiler/test/OptimizationTest.scala | Scala | bsd-3-clause | 4,507 |
package com.twitter.zipkin.storage.anormdb
import com.twitter.zipkin.storage.SpanStoreSpec
class AnormSpanStoreSpec extends SpanStoreSpec {
val db = DB(new DBConfig("sqlite-memory", install = true))
var store = new AnormSpanStore(db, Some(db.install()))
override def clear = {
store = new AnormSpanStore(db, Some(db.install()))
}
}
| jstanier/zipkin | zipkin-anormdb/src/test/scala/com/twitter/zipkin/storage/anormdb/AnormSpanStoreSpec.scala | Scala | apache-2.0 | 347 |
package com.github.dzhg.tedis.server
import com.github.dzhg.tedis.TedisErrors
import com.github.dzhg.tedis.utils.{ServerAndClient, TedisSuite}
class StrlenSpec extends TedisSuite with ServerAndClient with TedisErrors {
"TedisServer" when {
"strlen" must {
"return correct length for key" in {
client.set("key", "value123")
val l = client.strlen("key")
l.value must be ("value123".length)
}
"return 0 if key does not exist" in {
val l = client.strlen("key")
l.value must be (0L)
}
"throw exception if key does not hold a string" in {
client.hset1("key", "f1", "v1")
val ex = the [Exception] thrownBy client.strlen("key")
ex.getMessage must be (s"${WRONG_TYPE.error} ${WRONG_TYPE.msg}")
}
}
}
}
| dzhg/tedis | src/test/scala/com/github/dzhg/tedis/server/StrlenSpec.scala | Scala | mit | 810 |
/*
* Copyright (c) 2012-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich
package common
package enrichments
package registry
package apirequest
// Maven Artifact
import org.apache.maven.artifact.versioning.DefaultArtifactVersion
// Scalaz
import scalaz._
import Scalaz._
import Validation.FlatMap._
import Tags._
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.fromJsonNode
// Akka
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
// Iglu
import com.snowplowanalytics.iglu.client.{SchemaCriterion, SchemaKey}
// This project
import outputs.EnrichedEvent
import utils.{HttpClient, ScalazJson4sUtils}
/**
* Lets us create an ApiRequestEnrichmentConfig from a JValue
*/
object ApiRequestEnrichmentConfig extends ParseableEnrichment {
val supportedSchema = SchemaCriterion("com.snowplowanalytics.snowplow.enrichments",
"api_request_enrichment_config",
"jsonschema",
1,
0,
0)
/**
* Creates an ApiRequestEnrichment instance from a JValue.
*
* @param config The enrichment JSON
* @param schemaKey The SchemaKey provided for the enrichment
* Must be a supported SchemaKey for this enrichment
* @return a configured ApiRequestEnrichment instance
*/
def parse(config: JValue, schemaKey: SchemaKey): ValidatedNelMessage[ApiRequestEnrichment] =
isParseable(config, schemaKey).flatMap { conf =>
(for {
inputs <- ScalazJson4sUtils.extract[List[Input]](config, "parameters", "inputs")
httpApi <- ScalazJson4sUtils.extract[HttpApi](config, "parameters", "api", "http")
outputs <- ScalazJson4sUtils.extract[List[Output]](config, "parameters", "outputs")
cache <- ScalazJson4sUtils.extract[Cache](config, "parameters", "cache")
} yield ApiRequestEnrichment(inputs, httpApi, outputs, cache)).toValidationNel
}
}
case class ApiRequestEnrichment(inputs: List[Input],
api: HttpApi,
outputs: List[Output],
cache: Cache)
extends Enrichment {
import ApiRequestEnrichment._
val version = new DefaultArtifactVersion("0.1.0")
/**
* Primary function of the enrichment
* Failure means HTTP failure, failed unexpected JSON-value, etc
* Successful None skipped lookup (missing key for eg.)
*
* @param event currently enriching event
* @param derivedContexts derived contexts
* @return none if some inputs were missing, validated JSON context if lookup performed
*/
def lookup(event: EnrichedEvent,
derivedContexts: List[JObject],
customContexts: JsonSchemaPairs,
unstructEvent: JsonSchemaPairs): ValidationNel[String, List[JObject]] = {
/**
* Note that [[JsonSchemaPairs]] have specific structure - it is a pair,
* where first element is [[SchemaKey]], second element is JSON Object
* with keys: `data`, `schema` and `hierarchy` and `schema` contains again [[SchemaKey]]
* but as nested JSON object. `schema` and `hierarchy` can be ignored here
*/
val jsonCustomContexts = transformRawPairs(customContexts)
val jsonUnstructEvent = transformRawPairs(unstructEvent).headOption
val templateContext = Input
.buildTemplateContext(inputs, event, derivedContexts, jsonCustomContexts, jsonUnstructEvent)
templateContext.flatMap(opt => getOutputs(opt.map(taggedMap2Map)).toValidationNel)
}
private def taggedMap2Map[K, V](m: Map[K, V @@ LastVal]): Map[K, V] =
m.asInstanceOf[Map[K, V]]
/**
* Build URI and try to get value for each of [[outputs]]
*
* @param validInputs map to build template context
* @return validated list of lookups, whole lookup will be failed if any of
* outputs were failed
*/
private[apirequest] def getOutputs(
validInputs: Option[Map[String, String]]): Validation[String, List[JObject]] = {
val result = for {
templateContext <- validInputs.toList
url <- api.buildUrl(templateContext).toList
output <- outputs
} yield cachedOrRequest(url, output).leftMap(_.toString)
result.sequenceU
}
/**
* Check cache for URL and perform HTTP request if value wasn't found
*
* @param url URL to request
* @param output currently processing output
* @return validated JObject, in case of success ready to be attached to derived contexts
*/
private[apirequest] def cachedOrRequest(url: String,
output: Output): Validation[Throwable, JObject] = {
val value = cache.get(url) match {
case Some(cachedResponse) => cachedResponse
case None => {
val json = api.perform(client, url).flatMap(output.parse)
cache.put(url, json)
json
}
}
value.flatMap(output.extract).map(output.describeJson)
}
}
/**
* Companion object containing common methods for requests and manipulating data
*/
object ApiRequestEnrichment {
// TODO: share it as soon as there will be another dependent enrichment
private lazy val actorSystem =
ActorSystem("api-request-system", ConfigFactory.parseString("akka.daemonic=on"))
private lazy val client = new HttpClient(actorSystem)
/**
* Transform pairs of schema and node obtained from [[utils.shredder.Shredder]]
* into list of regular self-describing instance representing custom context
* or unstruct event
*
* @param pairs list of pairs consisting of schema and Json nodes
* @return list of regular JObjects
*/
def transformRawPairs(pairs: JsonSchemaPairs): List[JObject] =
pairs.map {
case (schema, node) =>
val uri = schema.toSchemaUri
val data = fromJsonNode(node)
("schema" -> uri) ~ ("data" -> data \\ "data")
}
}
| TimothyKlim/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/apirequest/ApiRequestEnrichment.scala | Scala | apache-2.0 | 6,723 |
package org.overviewproject.pdfocr.pdf
import java.awt.image.BufferedImage
import java.io.IOException
import java.nio.file.{Files,Paths}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import org.overviewproject.pdfocr.exceptions._
import org.overviewproject.pdfocr.test.UnitSpec
class PdfDocumentSpec extends UnitSpec {
private def load(resourceName: String): Future[PdfDocument] = {
val pathString: String = try {
getClass.getResource(s"/trivial-pdfs/$resourceName").toString.replace("file:", "")
} catch {
case ex: NullPointerException => {
throw new Exception(s"Missing test file /trivial-pdfs/$resourceName")
}
}
val path = Paths.get(pathString)
PdfDocument.load(path)
}
describe("load") {
it("loads a valid PDF") {
val pdfDocument = load("empty-page.pdf").futureValue
pdfDocument.path.getFileName.toString must equal("empty-page.pdf")
pdfDocument.close
}
it("throws PdfEncryptedException") {
load("empty-page-encrypted.pdf").failed.futureValue mustBe a[PdfEncryptedException]
}
it("throws PdfInvalidException when the file is not a PDF") {
val ex = load("not-a-pdf.pdf").failed.futureValue
ex mustBe a[PdfInvalidException]
}
it("throws IOException when the file does not exist") {
PdfDocument.load(Paths.get("/this/path/is/very/unlikely/to/exist.pdf")).failed.futureValue mustBe a[IOException]
}
it("removes the owner password (so long as there is no user password)") {
val pdfDocument = load("owner-protected.pdf").futureValue
val outPath = Files.createTempFile("pdfocr-test-pdfdocument-", ".pdf")
pdfDocument.write(outPath).futureValue must equal(())
Files.delete(outPath)
}
}
describe("nPages") {
it("returns the number of pages") {
val pdf = load("empty-page.pdf").futureValue
pdf.nPages must equal(1)
pdf.close
}
}
describe("pages") {
it("iterates over each page") {
val pdf = load("2-pages.pdf").futureValue
val it = pdf.pages
try {
it.hasNext must equal(true)
it.next.futureValue.toText must equal("Page 1\n")
it.hasNext must equal(true)
it.next.futureValue.toText must equal("Page 2\n")
it.hasNext must equal(false)
} finally {
pdf.close
}
}
it("return a page even if it contains an invalid stream") {
val pdf = load("2nd-page-invalid.pdf").futureValue
try {
val it = pdf.pages
it.next.futureValue
it.next.futureValue
it.hasNext must equal(false)
} finally {
pdf.close
}
}
}
}
| overview/pdfocr | src/test/scala/org/overviewproject/pdfocr/pdf/PdfDocumentSpec.scala | Scala | agpl-3.0 | 2,695 |
package com.pbassiner
sealed trait DiskIO[A]
object DiskIO {
final case class Read(file: String) extends DiskIO[Array[Byte]]
final case class Write(file: String, contents: Array[Byte]) extends DiskIO[Unit]
final case class Delete(file: String) extends DiskIO[Boolean]
}
| pbassiner/free-as-in-monads | src/main/scala/com/pbassiner/DiskIO.scala | Scala | mit | 278 |
package toguru.toggles
import toguru.toggles.events.Rollout
case class Toggle(
id: String,
name: String,
description: String,
tags: Map[String, String] = Map.empty,
activations: IndexedSeq[ToggleActivation] = IndexedSeq.empty)
case class ToggleActivation(attributes: Map[String, Seq[String]] = Map.empty, rollout: Option[Rollout] = None)
| andreas-schroeder/toguru | app/toguru/toggles/Toggle.scala | Scala | mit | 406 |
package ch.epfl.scala.index
package views
package html
object PaginationTests extends org.specs2.mutable.Specification {
"pagination" >> {
"base case" >> {
// *1*
paginationRender(1, 1) ==== ((None, List(1), None))
}
"full" >> {
// *1* 2 3 >
paginationRender(1, 3) ==== ((None, List(1, 2, 3), Some(2)))
// *1* 2 3 4 5 6 7 8 9 10 >
paginationRender(1, 12) ==== ((None, List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), Some(2)))
// < 1 *2* 3 4 5 6 7 8 9 10 >
paginationRender(2, 12) ==== ((Some(1), List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), Some(3)))
// < 1 2 3 4 5 *6* 7 8 9 10 >
paginationRender(6, 12) ==== ((Some(5), List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), Some(7)))
// < 2 3 4 5 6 *7* 8 9 10 11 >
paginationRender(7, 12) ==== ((Some(6), List(2, 3, 4, 5, 6, 7, 8, 9, 10, 11), Some(8)))
// < 3 4 5 6 7 *8* 9 10 11 12 >
paginationRender(8, 12) ==== ((Some(7), List(3, 4, 5, 6, 7, 8, 9, 10, 11, 12), Some(9)))
// < 3 4 5 6 7 8 9 10 *11* 12 >
paginationRender(11, 12) ==== ((Some(10), List(3, 4, 5, 6, 7, 8, 9, 10, 11, 12), Some(12)))
// < 3 4 5 6 7 8 9 10 11 *12*
paginationRender(12, 12) ==== ((Some(11), List(3, 4, 5, 6, 7, 8, 9, 10, 11, 12), None))
}
}
}
| adamwy/scaladex | template/src/test/scala/ch.epfl.scala.index.views.html/PaginationTests.scala | Scala | bsd-3-clause | 1,343 |
package com.socrata.datacoordinator
package secondary
import java.sql.SQLException
import com.rojoma.simplearm.v2._
import com.socrata.datacoordinator.truth.loader.{Delogger, MissingVersion}
import com.socrata.datacoordinator.id.DatasetId
import com.socrata.datacoordinator.truth.metadata
import com.socrata.datacoordinator.truth.metadata._
import com.socrata.datacoordinator.truth.sql.SqlColumnReadRep
import com.socrata.datacoordinator.truth.universe._
import com.socrata.datacoordinator.util.collection.ColumnIdMap
import com.socrata.datacoordinator.util.TimingReport
import com.socrata.soql.environment.TypeName
import com.socrata.thirdparty.metrics.Metrics
import org.postgresql.util.PSQLException
import scala.concurrent.duration.Duration
import scala.util.control.ControlThrowable
import org.slf4j.LoggerFactory
object PlaybackToSecondary {
type SuperUniverse[CT, CV] = Universe[CT, CV] with Commitable with
SecondaryManifestProvider with
SecondaryMetricsProvider with
DatasetMapReaderProvider with
DatasetMapWriterProvider with
DatasetReaderProvider with
DeloggerProvider
private val logger = LoggerFactory.getLogger(classOf[PlaybackToSecondary[_, _]])
}
class PlaybackToSecondary[CT, CV](u: PlaybackToSecondary.SuperUniverse[CT, CV],
repFor: metadata.ColumnInfo[CT] => SqlColumnReadRep[CT, CV],
typeForName: TypeName => Option[CT],
datasetIdFormatter: DatasetId => String,
timingReport: TimingReport) {
import PlaybackToSecondary.logger
val datasetLockTimeout = Duration.Inf
class LifecycleStageTrackingIterator(underlying: Iterator[Delogger.LogEvent[CV]],
initialStage: metadata.LifecycleStage)
extends BufferedIterator[Delogger.LogEvent[CV]] {
private var currentStage = initialStage
private var lookahead: Delogger.LogEvent[CV] = null
def stageBeforeNextEvent: metadata.LifecycleStage = currentStage
def stageAfterNextEvent: metadata.LifecycleStage = computeNextStage(head)
def hasNext: Boolean = lookahead != null || underlying.hasNext
def head: Delogger.LogEvent[CV] = {
if(lookahead == null) lookahead = advance()
lookahead
}
private def advance() = underlying.next()
def next(): Delogger.LogEvent[CV] = {
val ev =
if(lookahead == null) {
advance()
} else {
val peeked = lookahead
lookahead = null
peeked
}
currentStage = computeNextStage(ev)
ev
}
private def computeNextStage(ev: Delogger.LogEvent[CV]) =
ev match {
case Delogger.WorkingCopyCreated(_, _) =>
metadata.LifecycleStage.Unpublished
case Delogger.WorkingCopyDropped | Delogger.WorkingCopyPublished =>
metadata.LifecycleStage.Published
case _ =>
currentStage
}
}
// This is guaranteed to consume no more than necessary out of the iterator.
// In particular, when it is done, the underlying iterator will either be empty
// or positioned so that next() is a stage-changing event.
class StageLimitedIterator(underlying: LifecycleStageTrackingIterator) extends Iterator[Delogger.LogEvent[CV]] {
private val wantedStage = underlying.stageBeforeNextEvent
def hasNext: Boolean = underlying.hasNext && underlying.stageAfterNextEvent == wantedStage
def next(): Delogger.LogEvent[CV] =
if(hasNext) underlying.next()
else Iterator.empty.next()
def finish(): Unit = while(hasNext) next()
}
// Instruments via metrics and logging the iteration of rows/events
class InstrumentedIterator[T](name: String,
datasetName: String,
underlying: Iterator[T],
loggingRate: Int = 10000) extends Iterator[T] with Metrics {
var itemNum = 0
val meter = metrics.meter(name + ".rows")
def hasNext: Boolean = underlying.hasNext
def next(): T = {
meter.mark()
itemNum += 1
if(itemNum % loggingRate == 0)
logger.info("[{}] {}: {} rows/events processed", name, datasetName, itemNum.toString)
underlying.next()
}
}
def apply(secondary: NamedSecondary[CT, CV], job: SecondaryRecord): Unit = {
if (job.pendingDrop) {
logger.info("Executing pending drop of dataset {} from store {}.", job.datasetId : Any, job.storeId)
new UpdateOp(secondary, job).drop()
} else {
new UpdateOp(secondary, job).go()
}
}
def makeSecondaryDatasetInfo(dsInfo: metadata.DatasetInfoLike) =
DatasetInfo(datasetIdFormatter(dsInfo.systemId), dsInfo.localeName, dsInfo.obfuscationKey.clone(), dsInfo.resourceName)
def makeSecondaryCopyInfo(copyInfo: metadata.CopyInfoLike) =
CopyInfo(copyInfo.systemId,
copyInfo.copyNumber,
copyInfo.lifecycleStage.correspondingSecondaryStage,
copyInfo.dataVersion,
copyInfo.dataShapeVersion,
copyInfo.lastModified)
def makeSecondaryRollupInfo(rollupInfo: metadata.RollupInfoLike) =
RollupInfo(rollupInfo.name.underlying, rollupInfo.soql)
def makeSecondaryColumnInfo(colInfo: metadata.ColumnInfoLike) = {
typeForName(TypeName(colInfo.typeName)) match {
case Some(typ) =>
ColumnInfo(colInfo.systemId,
colInfo.userColumnId,
colInfo.fieldName,
typ,
isSystemPrimaryKey = colInfo.isSystemPrimaryKey,
isUserPrimaryKey = colInfo.isUserPrimaryKey,
isVersion = colInfo.isVersion,
colInfo.computationStrategyInfo.map { strategy =>
ComputationStrategyInfo(strategy.strategyType, strategy.sourceColumnIds, strategy.parameters)
})
case None =>
sys.error("Typename " + colInfo.typeName + " got into the logs somehow!")
}
}
private class UpdateOp(secondary: NamedSecondary[CT, CV],
job: SecondaryRecord)
{
private val datasetId = job.datasetId
private val claimantId = job.claimantId
private var currentCookie = job.initialCookie
private val datasetMapReader = u.datasetMapReader
def go(): Unit = {
datasetMapReader.datasetInfo(datasetId) match {
case Some(datasetInfo) =>
logger.info("Found dataset " + datasetInfo.systemId + " in truth")
try {
reconsolidateAndPlayback(datasetInfo)
} catch {
case e: MissingVersion =>
logger.info("Couldn't find version {} in log; resyncing", e.version.toString)
resyncSerially()
case ResyncSecondaryException(reason) =>
logger.info("Incremental update requested full resync: {}", reason)
resyncSerially()
}
saveMetrics(datasetInfo)
case None =>
drop()
}
}
def reconsolidateAndPlayback(datasetInfo: metadata.DatasetInfo): Unit = {
var dataVersion = job.startingDataVersion
while(dataVersion <= job.endingDataVersion) {
logger.trace("Playing back version {}", dataVersion)
using(u.delogger(datasetInfo)) { delogger =>
val lastConsolidatableVersion: Option[Long] =
// We'll scan over the range of versions, eliminating
// possibilities for consolidation until there's nothing
// left, and then play them all back together.
(job.startingDataVersion to job.endingDataVersion).iterator.scanLeft((-1L, Consolidatable.all)) { (dvAcc, dv) =>
val (_, acc) = dvAcc
using(delogger.delogOnlyTypes(dv)) { it =>
(dv, acc.intersect(consolidatable(it.buffered)))
}
}.drop(1).takeWhile(_._2.nonEmpty).map(_._1).toStream.lastOption
lastConsolidatableVersion match {
case Some(n) if n > dataVersion =>
// at least two versions can be consolidated into one
logger.info("Consolidating data versions {} through {} into a single thing", dataVersion, n)
using(consolidate(delogger, dataVersion, n)) { it =>
playbackLog(datasetInfo, it, dataVersion, n)
}
dataVersion = n
case _ =>
// Zero or one version can be consolidated into one
using(delogger.delog(dataVersion)) { it =>
playbackLog(datasetInfo, it, dataVersion, dataVersion)
}
}
}
updateSecondaryMap(dataVersion)
dataVersion += 1
}
}
sealed abstract class Consolidatable
object Consolidatable {
def all = Set[Consolidatable](Rows, Rollups)
}
case object Rows extends Consolidatable
case object Rollups extends Consolidatable
// A series of events is consolidatable if it is an update which only
// changes row data or rollups, which is to say if it has the form:
// RowsChangedPreview
// zero or one Truncated
// zero or more RowDataUpdated
// LastModifiedChanged
// or
// zero or more intermingled RollupCreatedOrUpdated and RollupDropped
// LastModifiedChanged
// or
// LastModifiedChanged
def consolidatable(it: BufferedIterator[Delogger.LogEventCompanion]): Set[Consolidatable] = {
if(it.hasNext && it.head == Delogger.LastModifiedChanged) {
it.next()
if(it.hasNext) {
return Set.empty
} else {
// Empty changeset, can be consolidated with anything
return Consolidatable.all
}
}
if(!it.hasNext) {
return Set.empty
}
it.next() match {
case Delogger.RollupCreatedOrUpdated | Delogger.RollupDropped =>
while(it.hasNext && (it.head == Delogger.RollupCreatedOrUpdated || it.head == Delogger.RollupDropped)) {
it.next()
}
if(!it.hasNext || it.next() != Delogger.LastModifiedChanged) {
return Set.empty
}
if(it.hasNext) Set.empty // didn't end with LastModifiedChanged?
else Set(Rollups)
case Delogger.RowsChangedPreview =>
if(it.hasNext && it.head == Delogger.Truncated) {
it.next()
}
while(it.hasNext && it.head == Delogger.RowDataUpdated) {
it.next()
}
if(!it.hasNext || it.next() != Delogger.LastModifiedChanged) {
return Set.empty
}
if(it.hasNext) Set.empty // didn't end with LastModifiedChanged?
else Set(Rows)
case _ =>
Set.empty
}
}
def consolidate(delogger: Delogger[CV], startingDataVersion: Long, endingDataVersion: Long) =
new Iterator[Delogger.LogEvent[CV]] with AutoCloseable {
// ok, so we're going to want to produce a fake event stream
// shaped like a consolidatable (see above) version. To do
// this, we'll combine all the versions' RowsChangedPreview
// events then the concatenation of their RowDataUpdateds,
// then the final job's LastModifiedChanged.
val rs = new ResourceScope
sealed abstract class State
case object Unknown extends State
case class ConsolidatingRows(preview: Delogger.RowsChangedPreview) extends State
case object ConsolidatingRollups extends State
val (effectiveStartingDataVersion, state) =
(startingDataVersion to endingDataVersion).foldLeft((startingDataVersion, Unknown:State)) { (acc, i) =>
val (currentStartingDataVersion, state) = acc
def rcpOfState() =
state match {
case Unknown => Delogger.RowsChangedPreview(0, 0, 0, false)
case ConsolidatingRows(rcp) => rcp
case ConsolidatingRollups => throw new ResyncSecondaryException("Trying to consolidate rows, but current state is for rollups?")
}
def ensureRollups() =
state match {
case Unknown | ConsolidatingRollups => ConsolidatingRollups
case ConsolidatingRows(_) => throw new ResyncSecondaryException("Trying to consoldate rollups, but current state is for rows?")
}
managed(delogger.delog(i)).run(_.buffered.headOption) match {
case Some(Delogger.RowsChangedPreview(rowsInserted, rowsUpdated, rowsDeleted, false)) =>
val rcp = rcpOfState()
(currentStartingDataVersion, ConsolidatingRows(Delogger.RowsChangedPreview(rcp.rowsInserted + rowsInserted,
rcp.rowsUpdated + rowsUpdated,
rcp.rowsDeleted + rowsDeleted,
rcp.truncated)))
case Some(rcp : Delogger.RowsChangedPreview) =>
rcpOfState() // ensure we don't think we're looking at rollups
// it was truncated; just start from here
(i, ConsolidatingRows(rcp))
case Some(Delogger.LastModifiedChanged(_)) =>
// no change -- just keep acc
(currentStartingDataVersion, state)
case Some(Delogger.RollupCreatedOrUpdated(_) | Delogger.RollupDropped(_)) =>
(currentStartingDataVersion, ensureRollups())
case _ =>
throw ResyncSecondaryException("Consolidation saw the log change?! No RowsChangedPreview!")
}
}
val finalIterator =
state match {
case Unknown =>
// bunch of empty updates...? Ok.
consolidateNothing(delogger, effectiveStartingDataVersion, endingDataVersion, rs)
case ConsolidatingRows(fakeRCP) =>
consolidateRows(fakeRCP, delogger, effectiveStartingDataVersion, endingDataVersion, rs)
case ConsolidatingRollups =>
consolidateRollups(delogger, effectiveStartingDataVersion, endingDataVersion, rs)
}
def hasNext = finalIterator.hasNext
def next() = finalIterator.next()
def close() {
rs.close()
}
}
private def consolidateNothing(delogger: Delogger[CV], startingDataVersion: Long, endingDataVersion: Long, rs: ResourceScope): Iterator[Delogger.LogEvent[CV]] = {
val data = (startingDataVersion to endingDataVersion).iterator.flatMap { i =>
new Iterator[Delogger.LogEvent[CV]] {
var done = false
val eventsRaw = rs.open(delogger.delog(i))
val events = eventsRaw.buffered
def hasNext: Boolean = {
if(done) return false
events.headOption match {
case Some(Delogger.LastModifiedChanged(_)) =>
true
case Some(other) =>
throw ResyncSecondaryException(s"Consolidation saw the log change?! Saw s{other.companion.productPrefix} while expecting LastModifiedChanged!")
case None =>
done = true
rs.close(eventsRaw)
false
}
}
def next() = {
if(hasNext) events.next()
else Iterator.empty.next()
}
}
}
if(data.hasNext) Iterator.single(data.toStream.last)
else throw ResyncSecondaryException(s"Consolidation saw the log change?! No LastModifiedChanged!")
}
private def consolidateRows(fakeRCP: Delogger.RowsChangedPreview, delogger: Delogger[CV], startingDataVersion: Long, endingDataVersion: Long, rs: ResourceScope): Iterator[Delogger.LogEvent[CV]] = {
// bleargghhgh -- annoying that this super lazy flatmap over
// the data versions has a side effect, but I can't think of a
// clearer way to do this.
var mostRecentLastModified: Option[Delogger.LastModifiedChanged] = None
val data = (startingDataVersion to endingDataVersion).iterator.flatMap { i =>
new Iterator[Delogger.LogEvent[CV]] {
var done: Boolean = false
val eventsRaw = rs.open(delogger.delog(i))
val events = eventsRaw.buffered
if(events.hasNext && events.head.companion == Delogger.LastModifiedChanged) {
// empty changeset.
} else {
if(!events.hasNext || events.head.companion != Delogger.RowsChangedPreview) {
throw ResyncSecondaryException("Consolidation saw the log change?! First item was gone or not RowsChangedPreview!")
}
if(events.next().asInstanceOf[Delogger.RowsChangedPreview].truncated) {
if(!events.hasNext || events.next().companion != Delogger.Truncated) {
throw ResyncSecondaryException("RowsChangedPreview said the dataset was to be truncated, but there was no Truncated event!")
}
if(mostRecentLastModified.isDefined) {
throw ResyncSecondaryException("Dataset was truncated but it wasn't the first batch we were looking at!")
}
}
}
def hasNext: Boolean = {
if(done) return false
events.headOption match {
case Some(v@Delogger.RowDataUpdated(_)) =>
true
case Some(lmc@Delogger.LastModifiedChanged(_)) =>
mostRecentLastModified = Some(lmc) // this is the reason for the bleargghhgh before
done = true
ensureEnd(events)
rs.close(eventsRaw)
false
case Some(other) =>
throw ResyncSecondaryException(s"Consolidation saw the log change?! Saw s{other.companion.productPrefix} while expecting RowsChangedPreview or LastModifiedChanged!")
case None =>
throw ResyncSecondaryException("Consolidation saw the log change?! Reached EOF while expecting RowsChangedPreview or LastModifiedChanged!")
}
}
def next() = {
if(hasNext) events.next()
else Iterator.empty.next()
}
}
}
Iterator(fakeRCP) ++
(if(fakeRCP.truncated) Iterator(Delogger.Truncated) else Iterator.empty) ++
data ++
// bleargghhgh part 3: Iterator#++ is sufficiently lazy
// for this lookup of the var that's modified by iterating
// through `data` to work.
Iterator(mostRecentLastModified.getOrElse {
throw ResyncSecondaryException("Consolidation saw the log change - no LastModifiedChanged?!")
})
}
private def consolidateRollups(delogger: Delogger[CV], startingDataVersion: Long, endingDataVersion: Long, rs: ResourceScope): Iterator[Delogger.LogEvent[CV]] = {
// Same "bleargh" as in consolidateRows re: iterator operation
// with this mutable thing. At least consolidating rollups is
// way simpler...
var mostRecentLastModified: Option[Delogger.LastModifiedChanged] = None
val data = (startingDataVersion to endingDataVersion).iterator.flatMap { i =>
new Iterator[Delogger.LogEvent[CV]] {
var done: Boolean = false
val eventsRaw = rs.open(delogger.delog(i))
val events = eventsRaw.buffered
if(events.hasNext && events.head.companion == Delogger.LastModifiedChanged) {
// empty changeset.
} else if(!events.hasNext || (events.head.companion != Delogger.RollupCreatedOrUpdated && events.head.companion != Delogger.RollupDropped)) {
throw ResyncSecondaryException("Consolidation saw the log change?! First item was gone or not a rollup event!")
}
def hasNext: Boolean = {
if(done) return false
events.headOption match {
case Some(Delogger.RollupCreatedOrUpdated(_) | Delogger.RollupDropped(_)) =>
true
case Some(lmc@Delogger.LastModifiedChanged(_)) =>
mostRecentLastModified = Some(lmc)
done = true
ensureEnd(events)
rs.close(eventsRaw)
false
case Some(other) =>
throw ResyncSecondaryException(s"Consolidation saw the log change?! Saw s{other.companion.productPrefix} while expecting RollupCreatedOrUpdated, RollupDropped, or LastModifiedChanged!")
case None =>
throw ResyncSecondaryException("Consolidation saw the log change?! Reached EOF while expecting RollupCreatedOrUpdated, RollupDropped, or LastModifiedChanged!")
}
}
def next() = {
if(hasNext) events.next()
else Iterator.empty.next()
}
}
}
data ++ Iterator(mostRecentLastModified.getOrElse {
throw new ResyncSecondaryException("Consolidation saw the log change - no LastModifiedChanged?!")
})
}
// This is called when the iterator is focused on a
// LastModifiedChanged to ensure it's the final event.
private def ensureEnd(events: BufferedIterator[Delogger.LogEvent[CV]]): Unit = {
events.next() // skip LastModifiedChanged
events.headOption match {
case None =>
// ok good
case Some(other) =>
throw ResyncSecondaryException(s"Consolidation saw the log change?! Saw s{other.companion.productPrefix} while expecting EndTransaction!")
}
}
def playbackLog(datasetInfo: metadata.DatasetInfo, it: Iterator[Delogger.LogEvent[CV]], initialDataVersion: Long, finalDataVersion: Long) {
val secondaryDatasetInfo = makeSecondaryDatasetInfo(datasetInfo)
val instrumentedIt = new InstrumentedIterator("playback-log-throughput",
datasetInfo.systemId.toString,
it)
currentCookie = secondary.store.version(secondaryDatasetInfo, initialDataVersion, finalDataVersion,
currentCookie, instrumentedIt.flatMap(convertEvent))
}
def convertOp(op: truth.loader.Operation[CV]): Operation[CV] = op match {
case truth.loader.Insert(systemId, data) => Insert(systemId, data)
case truth.loader.Update(systemId, oldData, newData) => Update(systemId, newData)(oldData)
case truth.loader.Delete(systemId, oldData) => Delete(systemId)(oldData)
}
def convertEvent(ev: Delogger.LogEvent[CV]): Option[Event[CT, CV]] = ev match {
case rdu: Delogger.RowDataUpdated[CV] =>
Some(RowDataUpdated(rdu.operations.view.map(convertOp)))
case Delogger.Truncated =>
Some(Truncated)
case Delogger.WorkingCopyDropped =>
Some(WorkingCopyDropped)
case Delogger.DataCopied =>
Some(DataCopied)
case Delogger.WorkingCopyPublished =>
Some(WorkingCopyPublished)
case Delogger.ColumnCreated(info) =>
Some(ColumnCreated(makeSecondaryColumnInfo(info)))
case Delogger.ColumnRemoved(info) =>
Some(ColumnRemoved(makeSecondaryColumnInfo(info)))
case Delogger.ComputationStrategyCreated(info) =>
Some(ComputationStrategyCreated(makeSecondaryColumnInfo(info)))
case Delogger.ComputationStrategyRemoved(info) =>
Some(ComputationStrategyRemoved(makeSecondaryColumnInfo(info)))
case Delogger.FieldNameUpdated(info) =>
Some(FieldNameUpdated(makeSecondaryColumnInfo(info)))
case Delogger.RowIdentifierSet(info) =>
Some(RowIdentifierSet(makeSecondaryColumnInfo(info)))
case Delogger.RowIdentifierCleared(info) =>
Some(RowIdentifierCleared(makeSecondaryColumnInfo(info)))
case Delogger.SystemRowIdentifierChanged(info) =>
Some(SystemRowIdentifierChanged(makeSecondaryColumnInfo(info)))
case Delogger.VersionColumnChanged(info) =>
Some(VersionColumnChanged(makeSecondaryColumnInfo(info)))
case Delogger.LastModifiedChanged(lastModified) =>
Some(LastModifiedChanged(lastModified))
case Delogger.WorkingCopyCreated(datasetInfo, copyInfo) =>
Some(WorkingCopyCreated(makeSecondaryCopyInfo(copyInfo)))
case Delogger.SnapshotDropped(info) =>
Some(SnapshotDropped(makeSecondaryCopyInfo(info)))
case Delogger.CounterUpdated(nextCounter) =>
None
case Delogger.RollupCreatedOrUpdated(info) =>
Some(RollupCreatedOrUpdated(makeSecondaryRollupInfo(info)))
case Delogger.RollupDropped(info) =>
Some(RollupDropped(makeSecondaryRollupInfo(info)))
case Delogger.RowsChangedPreview(inserted, updated, deleted, truncated) =>
Some(RowsChangedPreview(inserted, updated, deleted, truncated))
case Delogger.SecondaryReindex =>
Some(SecondaryReindex)
case Delogger.IndexDirectiveCreatedOrUpdated(info, directives) =>
Some(IndexDirectiveCreatedOrUpdated(makeSecondaryColumnInfo(info), directives))
case Delogger.IndexDirectiveDropped(info) =>
Some(IndexDirectiveDropped(makeSecondaryColumnInfo(info)))
case Delogger.EndTransaction =>
None
}
def saveMetrics(datasetInfo: metadata.DatasetInfo): Unit = {
val datasetInternalName = makeSecondaryDatasetInfo(datasetInfo).internalName
secondary.store.metric(datasetInternalName, currentCookie).foreach { metric =>
u.secondaryMetrics.upsertDataset(secondary.storeId, datasetInfo.systemId, metric)
}
}
def drop(): Unit = {
timingReport("drop", "dataset" -> datasetId) {
secondary.store.dropDataset(datasetIdFormatter(datasetId), currentCookie)
dropFromSecondaryMap()
}
}
private def retrying[T](actions: => T, filter: Throwable => Unit): T = {
try {
return actions
} catch {
case e: Throwable =>
logger.info("Rolling back to end transaction due to thrown exception: {}", e.getMessage)
u.rollback()
// transaction isolation level is now reset to READ COMMITTED
filter(e)
}
retrying[T](actions, filter)
}
/**
* Serialize resync in the same store group
* because resync will cause reads to be unavailable (at least in pg)
*/
def resyncSerially(): Unit = {
val sm = u.secondaryManifest
try {
// resync immediately calls commit before actual work is done.
// That allows us to see resync in progress from the resync table.
sm.lockResync(datasetId, secondary.storeId, secondary.groupName)
resync()
} finally {
sm.unlockResync(datasetId, secondary.storeId, secondary.groupName)
}
}
def resync(): Unit = {
val mostRecentlyUpdatedCopyInfo = retrying[Option[metadata.CopyInfo]]({
timingReport("resync", "dataset" -> datasetId) {
u.commit() // all updates must be committed before we can change the transaction isolation level
val r = u.datasetMapReader
r.datasetInfo(datasetId, repeatableRead = true) match {
// transaction isolation level is now set to REPEATABLE READ
case Some(datasetInfo) =>
val allCopies = r.allCopies(datasetInfo).toSeq.sortBy(_.dataVersion)
val mostRecentCopy =
if(allCopies.nonEmpty) {
val latestLiving = r.latest(datasetInfo) // this is the newest _living_ copy
val latestCopy = allCopies.maxBy(_.copyNumber)
for (copy <- allCopies) {
timingReport("copy", "number" -> copy.copyNumber) {
// secondary.store.resync(.) will be called
// on all _living_ copies in order by their copy number
def isDiscardedLike(stage: metadata.LifecycleStage) =
Set(metadata.LifecycleStage.Discarded, metadata.LifecycleStage.Snapshotted).contains(stage)
if (isDiscardedLike(copy.lifecycleStage)) {
val secondaryDatasetInfo = makeSecondaryDatasetInfo(copy.datasetInfo)
val secondaryCopyInfo = makeSecondaryCopyInfo(copy)
secondary.store.dropCopy(secondaryDatasetInfo, secondaryCopyInfo, currentCookie,
isLatestCopy = copy.copyNumber == latestCopy.copyNumber)
} else
syncCopy(copy, isLatestLivingCopy = copy.copyNumber == latestLiving.copyNumber)
}
}
Some(allCopies.last)
} else {
logger.error("Have dataset info for dataset {}, but it has no copies?", datasetInfo.toString)
None
}
// end transaction to not provoke a serialization error from touching the secondary_manifest table
u.commit()
// transaction isolation level is now reset to READ COMMITTED
mostRecentCopy
case None =>
drop()
None
}
}
}, {
case ResyncSecondaryException(reason) =>
logger.warn("Received resync while resyncing. Resyncing as requested after waiting 10 seconds. " +
" Reason: " + reason)
Thread.sleep(10L * 1000)
case e: Throwable => ignoreSerializationFailure(e)
})
retrying[Unit]({
mostRecentlyUpdatedCopyInfo.foreach { case mostRecent =>
timingReport("resync-update-secondary-map", "dataset" -> datasetId) {
updateSecondaryMap(mostRecent.dataVersion)
}
}
}, ignoreSerializationFailure)
}
private def ignoreSerializationFailure(e: Throwable): Unit = e match {
case s: SQLException => extractPSQLException(s) match {
case Some(p) =>
if (p.getSQLState == "40001")
logger.warn("Serialization failure occurred during REPEATABLE READ transaction: {}", p.getMessage)
else
throw s
case None => throw s
}
case _ => throw e
}
private def extractPSQLException(s: SQLException): Option[PSQLException] = {
var cause = s.getCause
while(cause != null) cause match {
case p: PSQLException => return Some(p)
case _ => cause = cause.getCause
}
None
}
def syncCopy(copyInfo: metadata.CopyInfo, isLatestLivingCopy: Boolean): Unit = {
timingReport("sync-copy",
"secondary" -> secondary.storeId,
"dataset" -> copyInfo.datasetInfo.systemId,
"copy" -> copyInfo.copyNumber) {
for(reader <- u.datasetReader.openDataset(copyInfo)) {
val copyCtx = new DatasetCopyContext(reader.copyInfo, reader.schema)
val secondaryDatasetInfo = makeSecondaryDatasetInfo(copyCtx.datasetInfo)
val secondaryCopyInfo = makeSecondaryCopyInfo(copyCtx.copyInfo)
val secondarySchema = copyCtx.schema.mapValuesStrict(makeSecondaryColumnInfo)
val itRows = reader.rows(sorted=false)
// Sigh. itRows is a simple-arm v1 Managed. v2 has a monad map() which makes the code below
// much, much shorter.
val wrappedRows = new Managed[Iterator[ColumnIdMap[CV]]] {
def run[A](f: Iterator[ColumnIdMap[CV]] => A): A = {
itRows.run { it: Iterator[ColumnIdMap[CV]] =>
f(new InstrumentedIterator("sync-copy-throughput",
copyInfo.datasetInfo.systemId.toString,
it))
}
}
}
val rollups: Seq[RollupInfo] = u.datasetMapReader.rollups(copyInfo).toSeq.map(makeSecondaryRollupInfo)
val indexDirectives = u.datasetMapReader.indexDirectives(copyInfo)
currentCookie = secondary.store.resync(secondaryDatasetInfo,
secondaryCopyInfo,
secondarySchema,
currentCookie,
wrappedRows,
rollups,
indexDirectives,
isLatestLivingCopy)
}
}
}
def updateSecondaryMap(newLastDataVersion: Long): Unit = {
// We want to end the current transaction here. We don't want to be holding share locks on data-tables like log
// tables while updating a row on the secondary_manifest. This is o avoid deadlocks when data-coordinator also has
// locks out on the data-tables and is also updating the same row on the secondary_manifest.
//
// The activity in the current transaction (before committing) should all
// be _reads_ from metadata tables and the dataset's log table.
u.commit()
u.secondaryManifest.completedReplicationTo(secondary.storeId,
claimantId,
datasetId,
newLastDataVersion,
currentCookie)
u.commit()
}
def dropFromSecondaryMap(): Unit = {
u.secondaryMetrics.dropDataset(secondary.storeId, datasetId)
u.secondaryManifest.dropDataset(secondary.storeId, datasetId)
u.commit()
}
private class InternalResyncForPickySecondary extends ControlThrowable
}
}
| socrata-platform/data-coordinator | coordinatorlib/src/main/scala/com/socrata/datacoordinator/secondary/PlaybackToSecondary.scala | Scala | apache-2.0 | 34,139 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.physical.stream
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalSort
import org.apache.flink.table.plan.nodes.physical.stream.StreamExecTemporalSort
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelTraitSet}
import org.apache.calcite.rel.RelFieldCollation.Direction
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
/**
* Rule that matches [[FlinkLogicalSort]] which is sorted by time attribute in ascending order
* and its `fetch` and `offset` are null, and converts it to [[StreamExecTemporalSort]].
*/
class StreamExecTemporalSortRule
extends ConverterRule(
classOf[FlinkLogicalSort],
FlinkConventions.LOGICAL,
FlinkConventions.STREAM_PHYSICAL,
"StreamExecTemporalSortRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val sort: FlinkLogicalSort = call.rel(0)
StreamExecTemporalSortRule.canConvertToTemporalSort(sort)
}
override def convert(rel: RelNode): RelNode = {
val sort: FlinkLogicalSort = rel.asInstanceOf[FlinkLogicalSort]
val input = sort.getInput()
val traitSet: RelTraitSet = rel.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
val convInput: RelNode = RelOptRule.convert(input, FlinkConventions.STREAM_PHYSICAL)
new StreamExecTemporalSort(
rel.getCluster,
traitSet,
convInput,
sort.collation)
}
}
object StreamExecTemporalSortRule {
val INSTANCE: RelOptRule = new StreamExecTemporalSortRule
/**
* Whether the given sort could be converted to [[StreamExecTemporalSort]].
*
* Return true if the given sort is sorted by time attribute in ascending order
* and its `fetch` and `offset` are null, else false.
*
* @param sort the [[FlinkLogicalSort]] node
* @return True if the input sort could be converted to [[StreamExecTemporalSort]]
*/
def canConvertToTemporalSort(sort: FlinkLogicalSort): Boolean = {
val fieldCollations = sort.collation.getFieldCollations
if (sort.fetch != null || sort.offset != null) {
return false
}
if (fieldCollations.isEmpty) {
false
} else {
// get type of first sort field
val firstSortField = fieldCollations.get(0)
val inputRowType = sort.getInput.getRowType
val firstSortFieldType = inputRowType.getFieldList.get(firstSortField.getFieldIndex).getType
// checks if first sort attribute is time attribute type and order is ascending
FlinkTypeFactory.isTimeIndicatorType(firstSortFieldType) &&
firstSortField.direction == Direction.ASCENDING
}
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/rules/physical/stream/StreamExecTemporalSortRule.scala | Scala | apache-2.0 | 3,552 |
package ch.wsl.box.client.mocks
import ch.wsl.box.client.services.REST
import ch.wsl.box.client.viewmodel.BoxDef.BoxDefinitionMerge
import ch.wsl.box.client.viewmodel.BoxDefinition
import ch.wsl.box.model.shared.{BoxTranslationsFields, CSVTable, Child, ExportDef, Field, FormActionsMetadata, IDs, JSONCount, JSONField, JSONFieldMap, JSONFieldTypes, JSONID, JSONKeyValue, JSONLookup, JSONMetadata, JSONQuery, Layout, LayoutBlock, LoginRequest, NewsEntry, PDFTable, SharedLabels, TableAccess, WidgetsNames, XLSTable}
import ch.wsl.box.shared.utils.JSONUtils._
import io.circe.Json
import io.circe.syntax._
import org.scalajs.dom.File
import scribe.Logging
import scala.concurrent.Future
class RestMock(values:Values) extends REST with Logging {
override def version(): Future[String] = Future.successful("version")
override def appVersion(): Future[String] = Future.successful("appVersion")
override def validSession(): Future[Boolean] = Future.successful{
true
}
override def cacheReset(): Future[String] = {
println("cacheReset not implemented")
???
}
override def entities(kind: String): Future[Seq[String]] = {
kind match {
case "form" => Future.successful(values.formEntities)
case _ => {
println(s"entities for $kind not implemented")
???
}
}
}
override def specificKind(kind: String, lang: String, entity: String): Future[String] = {
println("specificKind not implemented")
???
}
override def list(kind: String, lang: String, entity: String, limit: Int): Future[Seq[Json]] = {
println("list1 not implemented")
???
}
override def list(kind: String, lang: String, entity: String, query: JSONQuery): Future[Seq[Json]] = {
println("list2 not implemented")
???
}
override def csv(kind: String, lang: String, entity: String, q: JSONQuery): Future[Seq[Seq[String]]] = {
println("csv not implemented")
???
}
override def count(kind: String, lang: String, entity: String): Future[Int] = {
println("count not implemented")
???
}
override def keys(kind: String, lang: String, entity: String): Future[Seq[String]] = {
println("keys not implemented")
???
}
override def ids(kind: String, lang: String, entity: String, q: JSONQuery): Future[IDs] = {
println("ids not implemented")
???
}
override def metadata(kind: String, lang: String, entity: String, public:Boolean): Future[JSONMetadata] = Future.successful{
values.metadata
}
override def tabularMetadata(kind: String, lang: String, entity: String): Future[JSONMetadata] = {
println("tabularMetadata not implemented")
???
}
override def children(kind: String, entity: String, lang: String, public:Boolean): Future[Seq[JSONMetadata]] = Future.successful{
values.children(entity)
}
override def lookup(kind:String, lang:String,entity:String, field:String, queryWithSubstitutions: Json,public:Boolean): Future[Seq[JSONLookup]] = {
println("lookup not implemented")
???
}
override def get(kind: String, lang: String, entity: String, id: JSONID, public:Boolean): Future[Json] = Future.successful{
values.get(id)
}
override def update(kind: String, lang: String, entity: String, id: JSONID, data: Json, public:Boolean): Future[Json] = {
Future.successful(values.update(id,data))
}
override def updateMany(kind: String, lang: String, entity: String, ids: Seq[JSONID], data: Seq[Json]): Future[Seq[Json]] = ???
override def insert(kind: String, lang: String, entity: String, data: Json, public:Boolean): Future[Json] = Future.successful{
values.insert(data)
}
override def delete(kind: String, lang: String, entity: String, id: JSONID): Future[JSONCount] = {
println("delete not implemented")
???
}
override def deleteMany(kind: String, lang: String, entity: String, ids: Seq[JSONID]): Future[JSONCount] = ???
override def sendFile(file: File, id: JSONID, entity: String): Future[Int] = {
println("sendFile not implemented")
???
}
override def login(request: LoginRequest): Future[Json] = Future.successful{
Json.True
}
override def logout(): Future[String] = {
println("logout not implemented")
???
}
override def labels(lang: String): Future[Map[String, String]] = {
Future.successful(lang match {
case "en" => Map(
SharedLabels.header.lang -> values.headerLangEn
)
case "it" => Map(
SharedLabels.header.lang -> values.headerLangIt
)
})
}
override def conf(): Future[Map[String, String]] = Future.successful{
values.conf
}
override def ui(): Future[Map[String, String]] = Future.successful{
values.uiConf
}
override def news(lang: String): Future[Seq[NewsEntry]] = {
println("news not implemented")
???
}
override def dataMetadata(kind: String, name: String, lang: String): Future[JSONMetadata] = {
println("dataMetadata not implemented")
???
}
override def dataDef(kind: String, name: String, lang: String): Future[ExportDef] = {
println("dataDef not implemented")
???
}
override def dataList(kind: String, lang: String): Future[Seq[ExportDef]] = {
println("dataList not implemented")
???
}
override def data(kind: String, name: String, params: Json, lang: String): Future[Seq[Seq[String]]] = {
println("data not implemented")
???
}
override def tableAccess(table: String, kind: String): Future[TableAccess] = {
println("table Access not implemented")
???
}
override def renderTable(table: PDFTable): Future[String] = ???
override def exportCSV(table: CSVTable): Future[File] = ???
override def exportXLS(table: XLSTable): Future[File] = ???
override def generateStub(entity: String): Future[Boolean] = {
println("generateStub not implemented")
???
}
override def definition(): Future[BoxDefinition] = ???
override def definitionDiff(definition: BoxDefinition): Future[BoxDefinitionMerge] = ???
override def definitionCommit(merge: BoxDefinitionMerge): Future[Boolean] = ???
override def translationsFields(lang: String): Future[Seq[Field]] = ???
override def translationsFieldsCommit(merge: BoxTranslationsFields): Future[Boolean] = ???
override def execute(functionName: String, lang: String, data:Json) = ???
}
| Insubric/box | client/src/test/scala/ch/wsl/box/client/mocks/RestMock.scala | Scala | apache-2.0 | 6,338 |
package ch.dyn.nawak.graphs.tests.equality
import org.scalatest._
import ch.dyn.nawak.graphs._
class NodeEqualitySpec extends FlatSpec with Matchers {
val n1 = new Node[Int](Option(1))
val n2 = new Node[Int](Option(1))
val n3 = new Node[Int](Option(3))
val emptyNode = new Node[Int](None)
val emptyNode2 = new Node[Int]()
"Two nodes" should "be equals if they have the same value" in {
n1 should be (n2)
}
they should "not be equals if they have different values" in {
n1 should not be (n3)
n1 should not be (emptyNode)
}
they should "be equals if both are empty" in {
emptyNode should be (emptyNode2)
}
they should "have the same hashcode when equals" in {
n1.hashCode() should be (n2.hashCode())
n1.hashCode() should not be (n3.hashCode())
}
} | triman/graphs | GraphTests/src/ch/dyn/nawak/graphs/tests/equality/NodeEqualitySpec.scala | Scala | mit | 782 |
package effectful.examples.pure.dao.sql
case class FieldColumnMapping(
fieldName: String,
columnIndex: Int,
columnName: ColName
)
//object FieldColumnMapping {
// def apply(
// fieldName: String,
// columnIndex: Int,
// columnName: String
// ) : FieldColumnMapping =
// FieldColumnMapping(
// fieldName = fieldName,
// columnIndex = columnIndex,
// columnName = ColName(columnName)
// )
//}
// todo: simple DDL generator
case class RecordMapping[ID,A](
tableName: TableName,
recordFields: Seq[FieldColumnMapping],
idField: FieldColumnMapping
) {
def recordFieldCount = recordFields.size
val recordFieldsOrdered = recordFields.sortBy(_.columnIndex)
val allFields = idField +: recordFields
val allFieldsOrdered = allFields.sortBy(_.columnIndex)
}
//object RecordMapping {
// def apply[ID,A](
// tableName: String,
// recordFields: Seq[FieldColumnMapping],
// idField: FieldColumnMapping
// ) : RecordMapping[ID,A] =
// RecordMapping(
// tableName = TableName(tableName),
// recordFields = recordFields,
// idField = idField
// )
//} | S-Mach/effectful | src/test/scala/effectful/examples/pure/dao/sql/RecordMapping.scala | Scala | mit | 1,118 |
package com.github.rosmith.nlp.service.datasource
import com.github.rosmith.nlp.query.Query
import com.github.rosmith.nlp.query.model.AnnotatedSentence
import com.github.rosmith.nlp.query.solution.IQuerySolution
import com.hp.hpl.jena.ontology.OntModel
import org.slf4j.Logger
import com.hp.hpl.jena.rdf.model.Model
import com.github.rosmith.nlp.query.model.Data4Storage
trait Datasource[T <: Query, D <: Data4Storage, M <: Model] {
def init(): Boolean
def save(dataForStorage: D, model: M): Boolean
def save(dataForStorage: D): Boolean
def executeQuery(query: T): IQuerySolution
def getLogger: Logger
} | rosmith/giet | src/main/scala/com/github/rosmith/nlp/service/datasource/Datasource.scala | Scala | mit | 621 |
package scalaxy.streams
import scala.language.existentials
private[streams] trait Strategies
extends Streams
with SideEffectsDetection
with Reporters
{
self: StreamTransforms =>
val global: scala.reflect.api.Universe
import global._
def hasKnownLimitationOrBug(stream: Stream): Boolean = {
def hasTakeOrDrop: Boolean = stream.ops.exists({
case TakeWhileOp(_, _) | DropWhileOp(_, _) | TakeOp(_) | DropOp(_) =>
true
case _ =>
false
})
// Detects two potentially-related issues.
def hasTryOrByValueSubTrees: Boolean = stream.components.exists(_.subTrees.exists {
case Try(_, _, _) =>
// This one is... interesting.
// Something horrible (foo not found) happens to the following snippet in lambdalift:
//
// val msg = {
// try {
// val foo = 10
// Some(foo)
// } catch {
// case ex: Throwable => None
// }
// } get;
// msg
//
// I'm being super-mega cautious with try/catch here, until the bug is understood / fixed.
true
case t @ Apply(target, args)
if Option(t.symbol).exists(_.isMethod) =>
// If one of the subtrees is a method call with by-name params, then
// weird symbol ownership issues arise (x not found in the following snippet)
//
// def wrap[T](body: => T): Option[T] = Option(body)
// wrap({ val x = 10; Option(x) }) getOrElse 0
//
t.symbol.asMethod.paramLists.exists(_.exists(_.asTerm.isByNameParam))
case _ =>
false
})
def isRangeTpe(tpe: Type): Boolean =
tpe <:< typeOf[Range] ||
tpe <:< typeOf[collection.immutable.NumericRange[_]]
def isOptionTpe(tpe: Type): Boolean =
tpe <:< typeOf[Option[_]]
def isWithFilterOp(op: StreamOp): Boolean = op match {
case CoerceOp(_) => true
case WithFilterOp(_) => true
case _ => false
}
def streamTpe: Option[Type] = findType(stream.tree)
stream.source match {
case RangeStreamSource(_) if hasTakeOrDrop && streamTpe.exists(isRangeTpe) =>
// Range.take / drop / takeWhile / dropWhile return Ranges: not handled yet.
true
case OptionStreamSource(_) if hasTakeOrDrop && streamTpe.exists(isOptionTpe) =>
// Option.take / drop / takeWhile / dropWhile return Lists: not handled yet.
true
case _ if stream.ops.lastOption.exists(isWithFilterOp) =>
// Option.withFilter returns an Option#WithFilter
true
case _ if !stream.sink.isImplemented =>
true
case _ if hasTryOrByValueSubTrees =>
true
case _ =>
false
}
}
// TODO: refine this.
def isWorthOptimizing(stream: Stream,
strategy: OptimizationStrategy) = {
(strategy.speedup != SpeedupCriteria.Never) &&
!stream.isDummy && {
var reportedSideEffects = Set[SideEffect]()
val safeSeverities: Set[SideEffectSeverity] = strategy.safety match {
case SafetyCriteria.Safe =>
Set()
case SafetyCriteria.ProbablySafe =>
Set(SideEffectSeverity.ProbablySafe)
case SafetyCriteria.Unsafe =>
Set(SideEffectSeverity.ProbablySafe, SideEffectSeverity.Unsafe)
}
// println(s"safeSeverities(strategy: $strategy) = $safeSeverities")
def hasUnsafeEffect(effects: List[SideEffect]): Boolean =
effects.exists(e => !safeSeverities(e.severity))
def couldSkipSideEffects: Boolean = {
var foundCanInterruptLoop = false
for (op <- stream.ops.reverse) {
if (op.canInterruptLoop || op.canAlterSize) {
foundCanInterruptLoop = true
} else {
if (foundCanInterruptLoop &&
hasUnsafeEffect(op.closureSideEffectss.flatten)) {
return true
}
}
}
return false
}
def reportIgnoredUnsafeSideEffects(): Unit = if (!flags.quietWarnings) {
for (effects <- stream.closureSideEffectss ++ stream.preservedSubTreesSideEffectss;
effect <- effects;
if effect.severity == SideEffectSeverity.Unsafe) {
reportedSideEffects += effect
warning(effect.tree.pos, Optimizations.messageHeader +
s"Potential side effect could cause issues with ${strategy.name} optimization strategy: ${effect.description}")
}
}
def hasTakeOrDropWhileOp: Boolean = stream.ops.exists({
case TakeWhileOp(_, _) | DropWhileOp(_, _) => true
case _ => false
})
def isKnownNotToBeFaster = stream match {
case Stream(_, ListStreamSource(_, _, _), _, _, _)
if stream.lambdaCount == 1 =>
// List operations are now quite heavily optimized. It only makes sense to
// rewrite more than one operation.
true
case Stream(_, ArrayStreamSource(_, _, _), ops, _, _)
if stream.lambdaCount == 1 &&
hasTakeOrDropWhileOp =>
// Array.takeWhile / .dropWhile needs to be optimized better :-)
true
case Stream(_, source, ops, sink, _) =>
false
}
// Note: we count the number of closures / ops that have side effects, not the
// number of side-effects themselves: we assume that within a closure the
// side effects are still done in the same order, and likewise for preserved
// sub-trees that they're still evaluated in the same order within the same
// originating op. For instance with mkString(prefix, sep, suffix), prefix,
// sep and suffix will still be evaluated in the same order after the rewrite.
val unsafeClosureSideEffectCount =
stream.closureSideEffectss.count(hasUnsafeEffect)
def unsafePreservedTreesSideEffectsCount =
stream.preservedSubTreesSideEffectss.count(hasUnsafeEffect)
def isStreamSafe = {
unsafeClosureSideEffectCount <= 1 &&
(unsafeClosureSideEffectCount + unsafePreservedTreesSideEffectsCount) <= 1 &&
!couldSkipSideEffects
}
// At least one lambda.
def isFaster =
!isKnownNotToBeFaster &&
stream.lambdaCount >= 1
val isStrategyUnsafe =
strategy.safety == SafetyCriteria.Unsafe
if (isStrategyUnsafe) {
reportIgnoredUnsafeSideEffects()
}
val worthOptimizing =
(isStrategyUnsafe || isStreamSafe) &&
((strategy.speedup == SpeedupCriteria.AlwaysEvenIfSlower) || isFaster)
if (flags.veryVerbose) {
for (effects <- stream.closureSideEffectss;
effect <- effects;
if !reportedSideEffects(effect)) {
info(effect.tree.pos, Optimizations.messageHeader + s"Side effect: ${effect.description} (${effect.severity.description})")
}
}
// if (flags.debug) {
// // info(stream.tree.pos,
// println(s"""
// tree = ${stream.tree}
// stream = ${stream.describe()}
// strategy = $strategy
// lambdaCount = ${stream.lambdaCount}
// closureSideEffectss = ${stream.closureSideEffectss}
// couldSkipSideEffects = $couldSkipSideEffects
// isWorthOptimizing = $worthOptimizing
// isFaster = $isFaster
// unsafeClosureSideEffectCount = $unsafeClosureSideEffectCount
// unsafePreservedTreesSideEffectsCount = $unsafePreservedTreesSideEffectsCount
// isStreamSafe = $isStreamSafe
// """)//, force = true)
// }
worthOptimizing
}
}
}
| nativelibs4java/scalaxy-streams | src/main/scala/streams/Strategies.scala | Scala | bsd-3-clause | 7,700 |
package com.googlecode.kanbanik.integration
import org.junit.runner.RunWith
import org.scalatest.BeforeAndAfter
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import com.googlecode.kanbanik.commands.CreateUserCommand
import com.googlecode.kanbanik.commands.DeleteUserCommand
import com.googlecode.kanbanik.commands.EditUserCommand
import com.googlecode.kanbanik.commands.GetAllUsersCommand
import com.googlecode.kanbanik.model.DbCleaner
import com.googlecode.kanbanik.model.User
import org.apache.shiro.SecurityUtils
import com.googlecode.kanbanik.security.KanbanikRealm
import org.apache.shiro.mgt.DefaultSecurityManager
import com.googlecode.kanbanik.commands.LoginCommand
import com.googlecode.kanbanik.dtos.{UserDto, SessionDto, ManipulateUserDto, LoginDto}
@RunWith(classOf[JUnitRunner])
class UserIntegrationTest extends FlatSpec with BeforeAndAfter {
"users" should "should be able to do the whole cycle" in {
val userDto = ManipulateUserDto(
"username",
"real name",
"some://picture.url",
"session id",
1,
"password",
"new password")
// create the first user
new CreateUserCommand().execute(userDto)
val securityManager = new DefaultSecurityManager(new KanbanikRealm)
SecurityUtils.setSecurityManager(securityManager)
val loginRes = new LoginCommand().execute(LoginDto("login", "username", "password"))
assert(loginRes.isLeft)
val user = User.byId("username")
assert(user.realName === "real name")
// rename it with correct credentials
new EditUserCommand().execute(userDto.copy(realName = "other name"))
assert(User.byId("username").realName === "other name")
// try to rename with incorrect credentials
new EditUserCommand().execute(userDto.copy(
realName = "other name2",
password = "incorrect password",
version = 2
))
assert(User.byId("username").realName === "other name")
// try to create existing user
val createExistingUserRes = new CreateUserCommand().execute(userDto)
assert(createExistingUserRes.isRight)
assertNumOfUsersIs(1)
// delete this only user should fail
val deleteLastUserResult = new DeleteUserCommand().execute(UserDto(
"username",
"real name",
"some://picture.url",
"session id",
1
))
assert(deleteLastUserResult.isLeft === false)
new CreateUserCommand().execute(userDto.copy(userName = "otherUser"))
assertNumOfUsersIs(2)
new DeleteUserCommand().execute(UserDto(
"username",
"real name",
"some://picture.url",
"session id",
2
))
assertNumOfUsersIs(1)
}
def assertNumOfUsersIs(expected: Int) {
new GetAllUsersCommand().execute(SessionDto("")) match {
case Left(allUsers) => assert(allUsers.values.size === expected)
}
}
after {
// cleanup database
DbCleaner.clearDb
}
} | mortenpoulsen/kanbanik | kanbanik-server/src/test/scala/com/googlecode/kanbanik/integration/UserIntegrationTest.scala | Scala | apache-2.0 | 2,911 |
package io.iohk.ethereum.consensus.blocks
import java.util.concurrent.atomic.AtomicReference
import akka.util.ByteString
import io.iohk.ethereum.consensus.ConsensusConfig
import io.iohk.ethereum.consensus.difficulty.DifficultyCalculator
import io.iohk.ethereum.consensus.ethash.blocks.Ommers
import io.iohk.ethereum.consensus.validators.std.MptListValidator.intByteArraySerializable
import io.iohk.ethereum.crypto.kec256
import io.iohk.ethereum.db.dataSource.EphemDataSource
import io.iohk.ethereum.db.storage.StateStorage
import io.iohk.ethereum.domain._
import io.iohk.ethereum.domain.BlockHeader.HeaderExtraFields._
import io.iohk.ethereum.consensus.ethash.blocks.OmmersSeqEnc
import io.iohk.ethereum.ledger.Ledger.{BlockResult, PreparedBlock}
import io.iohk.ethereum.ledger.{BlockPreparator, BloomFilter, InMemoryWorldStateProxy}
import io.iohk.ethereum.mpt.{ByteArraySerializable, MerklePatriciaTrie}
import io.iohk.ethereum.utils.BlockchainConfig
import io.iohk.ethereum.utils.ByteUtils.or
/**
* This is a skeleton for a generic [[io.iohk.ethereum.consensus.blocks.BlockGenerator BlockGenerator]].
*/
abstract class BlockGeneratorSkeleton(
blockchain: Blockchain,
blockchainConfig: BlockchainConfig,
consensusConfig: ConsensusConfig,
difficultyCalc: DifficultyCalculator,
_blockTimestampProvider: BlockTimestampProvider = DefaultBlockTimestampProvider
) extends TestBlockGenerator {
protected val headerExtraData = consensusConfig.headerExtraData
protected val blockCacheSize = consensusConfig.blockCacheSize
protected val cache: AtomicReference[List[PendingBlockAndState]] = new AtomicReference(Nil)
protected def newBlockBody(transactions: Seq[SignedTransaction], x: X): BlockBody
protected def defaultPrepareHeader(
blockNumber: BigInt,
parent: Block,
beneficiary: Address,
blockTimestamp: Long,
x: Ommers
): BlockHeader = {
val extraFields =
if (blockNumber >= blockchainConfig.ecip1097BlockNumber)
HefPostEcip1097(consensusConfig.treasuryOptOut, None)
else if (blockNumber >= blockchainConfig.ecip1098BlockNumber)
HefPostEcip1098(consensusConfig.treasuryOptOut)
else
HefEmpty
BlockHeader(
parentHash = parent.header.hash,
ommersHash = ByteString(kec256(x.toBytes: Array[Byte])),
beneficiary = beneficiary.bytes,
stateRoot = ByteString.empty,
//we are not able to calculate transactionsRoot here because we do not know if they will fail
transactionsRoot = ByteString.empty,
receiptsRoot = ByteString.empty,
logsBloom = ByteString.empty,
difficulty = difficultyCalc.calculateDifficulty(blockNumber, blockTimestamp, parent.header),
number = blockNumber,
gasLimit = calculateGasLimit(parent.header.gasLimit),
gasUsed = 0,
unixTimestamp = blockTimestamp,
extraData = blockchainConfig.daoForkConfig
.flatMap(daoForkConfig => daoForkConfig.getExtraData(blockNumber))
.getOrElse(headerExtraData),
mixHash = ByteString.empty,
nonce = ByteString.empty,
extraFields = extraFields
)
}
protected def prepareHeader(
blockNumber: BigInt,
parent: Block,
beneficiary: Address,
blockTimestamp: Long,
x: X
): BlockHeader
protected def prepareBlock(
parent: Block,
transactions: Seq[SignedTransaction],
beneficiary: Address,
blockNumber: BigInt,
blockPreparator: BlockPreparator,
x: X,
initialWorldStateBeforeExecution: Option[InMemoryWorldStateProxy]
): PendingBlockAndState = {
val blockTimestamp = blockTimestampProvider.getEpochSecond
val header = prepareHeader(blockNumber, parent, beneficiary, blockTimestamp, x)
val transactionsForBlock = prepareTransactions(transactions, header.gasLimit)
val body = newBlockBody(transactionsForBlock, x)
val block = Block(header, body)
blockPreparator.prepareBlock(block, parent.header, initialWorldStateBeforeExecution) match {
case PreparedBlock(prepareBlock, BlockResult(_, gasUsed, receipts), stateRoot, updatedWorld) =>
val receiptsLogs: Seq[Array[Byte]] =
BloomFilter.EmptyBloomFilter.toArray +: receipts.map(_.logsBloomFilter.toArray)
val bloomFilter = ByteString(or(receiptsLogs: _*))
PendingBlockAndState(
PendingBlock(
block.copy(
header = block.header.copy(
transactionsRoot = buildMpt(prepareBlock.body.transactionList, SignedTransaction.byteArraySerializable),
stateRoot = stateRoot,
receiptsRoot = buildMpt(receipts, Receipt.byteArraySerializable),
logsBloom = bloomFilter,
gasUsed = gasUsed
),
body = prepareBlock.body
),
receipts
),
updatedWorld
)
}
}
protected def prepareTransactions(
transactions: Seq[SignedTransaction],
blockGasLimit: BigInt
): Seq[SignedTransaction] = {
val sortedTransactions: Seq[SignedTransaction] = transactions
//should be safe to call get as we do not insert improper transactions to pool.
.groupBy(tx => SignedTransaction.getSender(tx).get)
.values
.toList
.flatMap { txsFromSender =>
val ordered = txsFromSender
.sortBy(-_.tx.gasPrice)
.sortBy(_.tx.nonce)
.foldLeft(Seq.empty[SignedTransaction]) { case (txs, tx) =>
if (txs.exists(_.tx.nonce == tx.tx.nonce)) {
txs
} else {
txs :+ tx
}
}
.takeWhile(_.tx.gasLimit <= blockGasLimit)
ordered.headOption.map(_.tx.gasPrice -> ordered)
}
.sortBy { case (gasPrice, _) => gasPrice }
.reverse
.flatMap { case (_, txs) => txs }
val transactionsForBlock: Seq[SignedTransaction] = sortedTransactions
.scanLeft(BigInt(0), None: Option[SignedTransaction]) { case ((accumulatedGas, _), stx) =>
(accumulatedGas + stx.tx.gasLimit, Some(stx))
}
.collect { case (gas, Some(stx)) => (gas, stx) }
.takeWhile { case (gas, _) => gas <= blockGasLimit }
.map { case (_, stx) => stx }
transactionsForBlock
}
/*
Returns the same gas limit as the parent block
In Mantis only testnets (and without this changed), this means that all blocks will have the same gasLimit as
the genesis block
*/
protected def calculateGasLimit(parentGas: BigInt): BigInt = parentGas
protected def buildMpt[K](entities: Seq[K], vSerializable: ByteArraySerializable[K]): ByteString = {
val stateStorage = StateStorage.getReadOnlyStorage(EphemDataSource())
val mpt = MerklePatriciaTrie[Int, K](
source = stateStorage
)(intByteArraySerializable, vSerializable)
val hash = entities.zipWithIndex.foldLeft(mpt) { case (trie, (value, key)) => trie.put(key, value) }.getRootHash
ByteString(hash)
}
def blockTimestampProvider: BlockTimestampProvider = _blockTimestampProvider
/**
* This function returns the block currently being mined block with highest timestamp
*/
def getPendingBlock: Option[PendingBlock] =
getPendingBlockAndState.map(_.pendingBlock)
def getPendingBlockAndState: Option[PendingBlockAndState] = {
val pendingBlocks = cache.get()
if (pendingBlocks.isEmpty) None
else Some(pendingBlocks.maxBy(_.pendingBlock.block.header.unixTimestamp))
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/consensus/blocks/BlockGeneratorSkeleton.scala | Scala | mit | 7,487 |
package com.datastax.spark.connector
import com.datastax.driver.core.{CodecRegistry, ResultSet, Row, TypeCodec}
/** Represents a single row fetched from Cassandra.
* Offers getters to read individual fields by column name or column index.
* The getters try to convert value to desired type, whenever possible.
* Most of the column types can be converted to a `String`.
* For nullable columns, you should use the `getXXXOption` getters which convert
* `null`s to `None` values, otherwise a `NullPointerException` would be thrown.
*
* All getters throw an exception if column name/index is not found.
* Column indexes start at 0.
*
* If the value cannot be converted to desired type,
* [[com.datastax.spark.connector.types.TypeConversionException]] is thrown.
*
* Recommended getters for Cassandra types:
*
* - `ascii`: `getString`, `getStringOption`
* - `bigint`: `getLong`, `getLongOption`
* - `blob`: `getBytes`, `getBytesOption`
* - `boolean`: `getBool`, `getBoolOption`
* - `counter`: `getLong`, `getLongOption`
* - `decimal`: `getDecimal`, `getDecimalOption`
* - `double`: `getDouble`, `getDoubleOption`
* - `float`: `getFloat`, `getFloatOption`
* - `inet`: `getInet`, `getInetOption`
* - `int`: `getInt`, `getIntOption`
* - `text`: `getString`, `getStringOption`
* - `timestamp`: `getDate`, `getDateOption`
* - `timeuuid`: `getUUID`, `getUUIDOption`
* - `uuid`: `getUUID`, `getUUIDOption`
* - `varchar`: `getString`, `getStringOption`
* - `varint`: `getVarInt`, `getVarIntOption`
* - `list`: `getList[T]`
* - `set`: `getSet[T]`
* - `map`: `getMap[K, V]`
*
* Collection getters `getList`, `getSet` and `getMap` require to explicitly pass an appropriate item type:
* {{{
* row.getList[String]("a_list")
* row.getList[Int]("a_list")
* row.getMap[Int, String]("a_map")
* }}}
*
* Generic `get` allows to automatically convert collections to other collection types.
* Supported containers:
* - `scala.collection.immutable.List`
* - `scala.collection.immutable.Set`
* - `scala.collection.immutable.TreeSet`
* - `scala.collection.immutable.Vector`
* - `scala.collection.immutable.Map`
* - `scala.collection.immutable.TreeMap`
* - `scala.collection.Iterable`
* - `scala.collection.IndexedSeq`
* - `java.util.ArrayList`
* - `java.util.HashSet`
* - `java.util.HashMap`
*
* Example:
* {{{
* row.get[List[Int]]("a_list")
* row.get[Vector[Int]]("a_list")
* row.get[java.util.ArrayList[Int]]("a_list")
* row.get[TreeMap[Int, String]]("a_map")
* }}}
*
*
* Timestamps can be converted to other Date types by using generic `get`. Supported date types:
* - java.util.Date
* - java.sql.Date
* - org.joda.time.DateTime
*/
final class CassandraRow(val metaData: CassandraRowMetadata, val columnValues: IndexedSeq[AnyRef])
extends ScalaGettableData with Serializable {
/**
* The constructor is for testing and backward compatibility only.
* Use default constructor with shared metadata for memory saving and performance.
*
* @param columnNames
* @param columnValues
*/
@deprecated("Use default constructor", "1.6.0")
def this(columnNames: IndexedSeq[String], columnValues: IndexedSeq[AnyRef]) =
this(CassandraRowMetadata.fromColumnNames(columnNames), columnValues)
override def toString = "CassandraRow" + dataAsString
}
/**
* All CassandraRows shared data
*
* @param columnNames row column names
* @param resultSetColumnNames column names from java driver row result set, without connector aliases.
* @param codecs cached java driver codecs to avoid registry lookups
*
*/
case class CassandraRowMetadata(columnNames: IndexedSeq[String],
resultSetColumnNames: Option[IndexedSeq[String]] = None,
// transient because codecs are not serializable and used only at Row parsing
// not and option as deserialized fileld will be null not None
@transient private[connector] val codecs: IndexedSeq[TypeCodec[AnyRef]] = null) {
@transient
lazy val namesToIndex: Map[String, Int] = columnNames.zipWithIndex.toMap.withDefaultValue(-1)
@transient
lazy val indexOfCqlColumnOrThrow = unaliasedColumnNames.zipWithIndex.toMap.withDefault { name =>
throw new ColumnNotFoundException(
s"Column not found: $name. " +
s"Available columns are: ${columnNames.mkString("[", ", ", "]")}")
}
@transient
lazy val indexOfOrThrow = namesToIndex.withDefault { name =>
throw new ColumnNotFoundException(
s"Column not found: $name. " +
s"Available columns are: ${columnNames.mkString("[", ", ", "]")}")
}
def unaliasedColumnNames = resultSetColumnNames.getOrElse(columnNames)
}
object CassandraRowMetadata {
def fromResultSet(columnNames: IndexedSeq[String], rs: ResultSet) = {
import scala.collection.JavaConversions._
val columnDefs = rs.getColumnDefinitions.asList().toList
val rsColumnNames = columnDefs.map(_.getName)
val codecs = columnDefs.map(col => CodecRegistry.DEFAULT_INSTANCE.codecFor(col.getType))
.asInstanceOf[List[TypeCodec[AnyRef]]]
CassandraRowMetadata(columnNames, Some(rsColumnNames.toIndexedSeq), codecs.toIndexedSeq)
}
/**
* create metadata object without codecs. Should be used for testing only
*
* @param columnNames
* @return
*/
def fromColumnNames(columnNames: IndexedSeq[String]): CassandraRowMetadata =
CassandraRowMetadata(columnNames, None)
def fromColumnNames(columnNames: Seq[String]): CassandraRowMetadata =
fromColumnNames(columnNames.toIndexedSeq)
}
object CassandraRow {
/** Deserializes first n columns from the given `Row` and returns them as
* a `CassandraRow` object. The number of columns retrieved is determined by the length
* of the columnNames argument. The columnNames argument is used as metadata for
* the newly created `CassandraRow`, but it is not used to fetch data from
* the input `Row` in order to improve performance. Fetching column values by name is much
* slower than fetching by index. */
def fromJavaDriverRow(row: Row, metaData: CassandraRowMetadata): CassandraRow = {
new CassandraRow(metaData, CassandraRow.dataFromJavaDriverRow(row, metaData))
}
def dataFromJavaDriverRow(row: Row, metaData: CassandraRowMetadata): Array[Object] = {
val length = metaData.columnNames.length
var i = 0
val data = new Array[Object](length)
// Here we use a mutable while loop for performance reasons, scala for loops are
// converted into range.foreach() and the JVM is unable to inline the foreach closure.
// 'match' is replaced with 'if' for the same reason.
// It is also out of the loop for performance.
if (metaData.codecs == null) {
//that should not happen in production, but just in case
while (i < length) {
data(i) = GettableData.get(row, i)
i += 1
}
}
else {
while (i < length) {
data(i) = GettableData.get(row, i, metaData.codecs(i))
i += 1
}
}
data
}
/** Creates a CassandraRow object from a map with keys denoting column names and
* values denoting column values. */
def fromMap(map: Map[String, Any]): CassandraRow = {
val (columnNames, values) = map.unzip
new CassandraRow(CassandraRowMetadata.fromColumnNames(columnNames.toIndexedSeq), values.map(_.asInstanceOf[AnyRef]).toIndexedSeq)
}
}
| ponkin/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/CassandraRow.scala | Scala | apache-2.0 | 7,610 |
package models
import play.api.db._
import play.api.Play.current
import anorm._
import anorm.SqlParser._
case class Board(id: Pk[Long] = NotAssigned, name: String, comment: String)
object Board {
val simple = {
get[Pk[Long]]("id") ~
get[String]("name") ~
get[String]("comment") map {
case id~name~comment => Board(id, name, comment)
}
}
def last5 = {
DB.withConnection { implicit conn =>
SQL(
"""
SELECT *
FROM Board
LIMIT 5
"""
).as(Board.simple *)
}
}
} | gorgon-zola/playchat | app/models/Board.scala | Scala | apache-2.0 | 529 |
package com.socrata.datacoordinator.resources.collocation
import java.util.UUID
import com.socrata.http.server.{HttpRequest, HttpResponse}
case class SecondaryMoveJobsJobResource(jobId: String,
secondaryMoveJobs: UUID => SecondaryMoveJobsResult,
deleteJob: UUID => Unit) extends CollocationSodaResource {
override def get = doGetSecondaryMoveJobs
override def delete = doDeleteSecondaryMoveJobs
def doGetSecondaryMoveJobs(req: HttpRequest): HttpResponse = {
withJobId(jobId, req) { id =>
responseOK(secondaryMoveJobs(id))
}
}
def doDeleteSecondaryMoveJobs(req: HttpRequest): HttpResponse = {
withJobId(jobId, req) { id =>
responseOK(deleteJob(id))
}
}
}
| socrata-platform/data-coordinator | coordinator/src/main/scala/com/socrata/datacoordinator/resources/collocation/SecondaryMoveJobsJobResource.scala | Scala | apache-2.0 | 781 |
package edu.usc.irds.sparkler.storage.elasticsearch
import edu.usc.irds.sparkler.Constants
import edu.usc.irds.sparkler.storage.{StorageRDD, SparklerGroupPartition}
import edu.usc.irds.sparkler.model.{Resource, ResourceStatus, SparklerJob}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.elasticsearch.action.search.SearchRequest
import org.elasticsearch.action.search.SearchResponse
import org.elasticsearch.search.builder.SearchSourceBuilder
import org.elasticsearch.client.RequestOptions
import org.elasticsearch.index.query.QueryBuilders
import org.elasticsearch.index.query.BoolQueryBuilder
import org.elasticsearch.client.RestHighLevelClient
import org.elasticsearch.search.sort.SortOrder
import org.elasticsearch.search.aggregations.AggregationBuilders
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder
import org.elasticsearch.search.aggregations.Aggregations
import org.elasticsearch.search.aggregations.Aggregation
import org.apache.lucene.queryparser.classic.QueryParserBase
import org.elasticsearch.search.SearchHits
import org.elasticsearch.search.SearchHit
import org.elasticsearch.common.document.DocumentField
import scala.collection.JavaConversions._
import java.net.URL
class ElasticsearchDeepRDD(sc: SparkContext,
job: SparklerJob,
sortBy: String = ElasticsearchDeepRDD.DEFAULT_ORDER,
generateQry: String = ElasticsearchDeepRDD.DEFAULT_FILTER_QRY,
maxGroups: Int = ElasticsearchDeepRDD.DEFAULT_GROUPS,
topN: Int = ElasticsearchDeepRDD.DEFAULT_TOPN,
deepCrawlHosts: Array[String] = new Array[String](0))
extends RDD[Resource](sc, Seq.empty) {
assert(topN > 0)
assert(maxGroups > 0)
val storageFactory = job.getStorageFactory
override def compute(split: Partition, context: TaskContext): Iterator[Resource] = {
val partition: SparklerGroupPartition = split.asInstanceOf[SparklerGroupPartition]
val batchSize = 100
var searchRequest : SearchRequest = new SearchRequest("crawldb")
var searchSourceBuilder : SearchSourceBuilder = new SearchSourceBuilder()
var q : BoolQueryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.matchQuery(Constants.storage.PARENT, QueryParserBase.escape(partition.group)))
.must(QueryBuilders.matchQuery(Constants.storage.CRAWL_ID, job.id))
for(url <- deepCrawlHosts) {
try {
val hostname = new URL(url).getHost
// should is similar to OR for Elasticsearch
q.should(QueryBuilders.matchQuery(Constants.storage.HOSTNAME, hostname))
} catch {
case e: Exception => print(s"Exception occured while getting host from $url")
}
}
// querying
for (query <- generateQry.split(",")) {
try {
val Array(field, value) = query.split(":").take(2)
q.must(QueryBuilders.matchQuery(field, value))
} catch {
case e: Exception => println("Exception parsing generateQry: " + generateQry)
}
}
// sorting
for (sort <- sortBy.split(",")) {
try {
val Array(field, order) = sort.split(" ").take(2)
if (order.toLowerCase() == "asc") {
searchSourceBuilder.sort(field, SortOrder.ASC)
}
else if (order.toLowerCase() == "desc") {
searchSourceBuilder.sort(field, SortOrder.DESC)
}
else {
println("Invalid sort order for: " + field)
}
} catch {
case e: Exception => println("Exception parsing sortBy: " + sortBy)
}
}
searchSourceBuilder.size(batchSize)
searchSourceBuilder.query(q)
searchRequest.source(searchSourceBuilder)
val proxy = storageFactory.getProxy
var client : RestHighLevelClient = null
try {
client = proxy.getClient().asInstanceOf[RestHighLevelClient]
} catch {
case e: ClassCastException => println("client is not RestHighLevelClient.")
}
new ElasticsearchResultIterator[Resource](client, searchRequest,
batchSize, classOf[Resource], closeClient = true, limit = topN)
}
override protected def getPartitions: Array[Partition] = {
var searchRequest : SearchRequest = new SearchRequest("crawldb")
var searchSourceBuilder : SearchSourceBuilder = new SearchSourceBuilder()
// querying
var q : BoolQueryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.matchQuery(Constants.storage.CRAWL_ID, job.id))
for (query <- generateQry.split(",")) {
try {
val Array(field, value) = query.split(":").take(2)
q.must(QueryBuilders.matchQuery(field, value))
} catch {
case e: Exception => println("Exception parsing generateQry: " + generateQry)
}
}
searchSourceBuilder.query(q)
// sorting
for (sort <- sortBy.split(",")) {
try {
val Array(field, order) = sort.split(" ").take(2)
if (order.toLowerCase() == "asc") {
searchSourceBuilder.sort(field, SortOrder.ASC)
}
else if (order.toLowerCase() == "desc") {
searchSourceBuilder.sort(field, SortOrder.DESC)
}
else {
println("Invalid sort order for: " + field)
}
} catch {
case e: Exception => println("Exception parsing sortBy: " + sortBy)
}
}
// grouping
var groupBy : TermsAggregationBuilder = AggregationBuilders.terms("by" + Constants.storage.PARENT)
.field(Constants.storage.PARENT + ".keyword")
groupBy.size(1)
searchSourceBuilder.aggregation(groupBy)
searchSourceBuilder.size(maxGroups)
searchRequest.source(searchSourceBuilder)
val proxy = storageFactory.getProxy
var client : RestHighLevelClient = null
try {
client = proxy.getClient().asInstanceOf[RestHighLevelClient]
} catch {
case e: ClassCastException => println("client is not RestHighLevelClient.")
}
var searchResponse : SearchResponse = client.search(searchRequest, RequestOptions.DEFAULT)
var shs : SearchHits = searchResponse.getHits()
val res = new Array[Partition](shs.getTotalHits().value.toInt)
for (i <- 0 until shs.getTotalHits().value.toInt) {
//TODO: improve partitioning : (1) club smaller domains, (2) support for multiple partitions for larger domains
res(i) = new SparklerGroupPartition(i, shs.getHits()(i).getSourceAsMap().get("group").asInstanceOf[String])
}
proxy.close()
res
}
}
object ElasticsearchDeepRDD extends StorageRDD {
override val DEFAULT_ORDER = Constants.storage.DISCOVER_DEPTH + " asc," + Constants.storage.SCORE + " desc"
override val DEFAULT_FILTER_QRY = Constants.storage.STATUS + ":" + ResourceStatus.UNFETCHED
override val DEFAULT_GROUPS = 10
override val DEFAULT_TOPN = 1000
} | USCDataScience/sparkler | sparkler-app/src/main/scala/edu/usc/irds/sparkler/storage/elasticsearch/ElasticsearchDeepRDD.scala | Scala | apache-2.0 | 6,878 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.infinity.server.util
import java.io.Closeable
import org.mockito.Mockito.inOrder
import org.scalatest.FlatSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.mock.MockitoSugar
class ToCloseTest extends FlatSpec with MustMatchers with MockitoSugar {
"A ToClose" must "close all resources after using the principal one" in {
val c1 = mock[Closeable]("principal")
val c2 = mock[Closeable]("secondary")
val nameOfResourceUsed = ToClose(c1, c2).useAndClose(_.toString)
val order = inOrder(c1, c2)
nameOfResourceUsed must be ("principal")
order.verify(c1).close()
order.verify(c2).close()
}
it must "only allow to use the resource once" in {
val toClose = ToClose(mock[Closeable])
toClose.useAndClose(identity)
evaluating (toClose.useAndClose(identity)) must produce [IllegalArgumentException]
}
}
| telefonicaid/fiware-cosmos-platform | infinity/server/src/test/scala/es/tid/cosmos/infinity/server/util/ToCloseTest.scala | Scala | apache-2.0 | 1,525 |
package io.github.pauljamescleary.petstore
package infrastructure.endpoint
import domain.orders._
import infrastructure.repository.inmemory._
import cats.effect._
import io.circe._
import io.circe.generic.semiauto._
import org.http4s._
import org.http4s.implicits._
import org.http4s.dsl._
import org.http4s.circe._
import org.http4s.client.dsl.Http4sClientDsl
import org.http4s.server.Router
import org.scalatest.funsuite.AnyFunSuite
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import tsec.mac.jca.HMACSHA256
import org.scalatest.matchers.should.Matchers
class OrderEndpointsSpec
extends AnyFunSuite
with Matchers
with ScalaCheckPropertyChecks
with PetStoreArbitraries
with Http4sDsl[IO]
with Http4sClientDsl[IO] {
implicit val statusDec: EntityDecoder[IO, OrderStatus] = jsonOf
implicit val statusEnc: EntityEncoder[IO, OrderStatus] = jsonEncoderOf
implicit val orderEncoder: Encoder[Order] = deriveEncoder
implicit val orderEnc: EntityEncoder[IO, Order] = jsonEncoderOf
implicit val orderDecoder: Decoder[Order] = deriveDecoder
implicit val orderDec: EntityDecoder[IO, Order] = jsonOf
def getTestResources(): (AuthTest[IO], HttpApp[IO]) = {
val userRepo = UserRepositoryInMemoryInterpreter[IO]()
val auth = new AuthTest[IO](userRepo)
val orderService = OrderService(OrderRepositoryInMemoryInterpreter[IO]())
val orderEndpoint =
OrderEndpoints.endpoints[IO, HMACSHA256](orderService, auth.securedRqHandler)
val orderRoutes = Router(("/orders", orderEndpoint)).orNotFound
(auth, orderRoutes)
}
test("place and get order") {
val (auth, orderRoutes) = getTestResources()
forAll { (order: Order, user: AdminUser) =>
(for {
createRq <- POST(order, uri"/orders")
createRqAuth <- auth.embedToken(user.value, createRq)
createResp <- orderRoutes.run(createRqAuth)
orderResp <- createResp.as[Order]
getOrderRq <- GET(Uri.unsafeFromString(s"/orders/${orderResp.id.get}"))
getOrderRqAuth <- auth.embedToken(user.value, getOrderRq)
getOrderResp <- orderRoutes.run(getOrderRqAuth)
orderResp2 <- getOrderResp.as[Order]
} yield {
createResp.status shouldEqual Ok
orderResp.petId shouldBe order.petId
getOrderResp.status shouldEqual Ok
orderResp2.userId shouldBe defined
}).unsafeRunSync()
}
}
test("user roles") {
val (auth, orderRoutes) = getTestResources()
forAll { user: CustomerUser =>
(for {
deleteRq <- DELETE(Uri.unsafeFromString(s"/orders/1"))
.flatMap(auth.embedToken(user.value, _))
deleteResp <- orderRoutes.run(deleteRq)
} yield deleteResp.status shouldEqual Unauthorized).unsafeRunSync()
}
forAll { user: AdminUser =>
(for {
deleteRq <- DELETE(Uri.unsafeFromString(s"/orders/1"))
.flatMap(auth.embedToken(user.value, _))
deleteResp <- orderRoutes.run(deleteRq)
} yield deleteResp.status shouldEqual Ok).unsafeRunSync()
}
}
}
| pauljamescleary/scala-pet-store | src/test/scala/io/github/pauljamescleary/petstore/infrastructure/endpoint/OrderEndpointsSpec.scala | Scala | apache-2.0 | 3,050 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.runtime.directio
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.util.ReflectionUtils
class HadoopObjectFactory(conf: Configuration) extends ObjectFactory {
override def newInstance[T](cls: Class[T]): T = {
ReflectionUtils.newInstance(cls, conf)
}
}
| ueshin/asakusafw-spark | runtime/src/main/scala/com/asakusafw/spark/runtime/directio/HadoopObjectFactory.scala | Scala | apache-2.0 | 922 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.collection.mutable.ListBuffer
import org.scalatest.events.Event
import org.scalatest.events.Ordinal
import org.scalatest.SharedHelpers.SilentReporter
import org.scalatest.SharedHelpers.EventRecordingReporter
import org.scalatest.events.InfoProvided
class BeforeAndAfterSuite extends FunSuite {
class TheSuper extends Suite {
var runTestWasCalled = false
var runWasCalled = false
protected override def runTest(testName: String, args: Args): Status = {
runTestWasCalled = true
super.runTest(testName, args)
}
override def run(testName: Option[String], args: Args): Status = {
runWasCalled = true
super.run(testName, args)
}
}
class MySuite extends TheSuper with BeforeAndAfterEach with BeforeAndAfterAll {
var beforeEachCalledBeforeRunTest = false
var afterEachCalledAfterRunTest = false
var beforeAllCalledBeforeExecute = false
var afterAllCalledAfterExecute = false
var beforeEachConfigCalledBeforeRunTest = false
var afterEachConfigCalledAfterRunTest = false
var beforeAllConfigCalledBeforeExecute = false
var afterAllConfigCalledAfterExecute = false
var beforeEachConfigGotTheGreeting = false
var afterEachConfigGotTheGreeting = false
var beforeAllConfigGotTheGreeting = false
var afterAllConfigGotTheGreeting = false
override def beforeAll() {
if (!runWasCalled)
beforeAllCalledBeforeExecute = true
}
override def beforeEach() {
if (!runTestWasCalled)
beforeEachCalledBeforeRunTest = true
}
def testSomething() = ()
override def afterEach() {
if (runTestWasCalled)
afterEachCalledAfterRunTest = true
}
override def afterAll() {
if (runWasCalled)
afterAllCalledAfterExecute = true
}
override def beforeAll(config: ConfigMap) {
if (!runWasCalled)
beforeAllConfigCalledBeforeExecute = true
if (config.contains("hi") && config("hi") == "there")
beforeAllConfigGotTheGreeting = true
super.beforeAll(config)
}
override def beforeEach(config: ConfigMap) {
if (!runTestWasCalled)
beforeEachConfigCalledBeforeRunTest = true
if (config.contains("hi") && config("hi") == "there")
beforeEachConfigGotTheGreeting = true
super.beforeEach(config)
}
override def afterEach(config: ConfigMap) {
if (runTestWasCalled)
afterEachConfigCalledAfterRunTest = true
if (config.contains("hi") && config("hi") == "there")
afterEachConfigGotTheGreeting = true
super.afterEach(config)
}
override def afterAll(config: ConfigMap) {
if (runWasCalled)
afterAllConfigCalledAfterExecute = true
if (config.contains("hi") && config("hi") == "there")
afterAllConfigGotTheGreeting = true
super.afterAll(config)
}
}
test("super's runTest must be called") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.runTestWasCalled)
}
test("super's run must be called") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.runWasCalled)
}
test("beforeEach gets called before runTest") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.beforeEachCalledBeforeRunTest)
assert(a.beforeEachConfigCalledBeforeRunTest)
}
test("afterEach gets called after runTest") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.afterEachCalledAfterRunTest)
assert(a.afterEachConfigCalledAfterRunTest)
}
test("beforeAll gets called before run") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.beforeAllCalledBeforeExecute)
assert(a.beforeAllConfigCalledBeforeExecute)
}
test("afterAll gets called after run") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.afterAllCalledAfterExecute)
assert(a.afterAllConfigCalledAfterExecute)
}
test("beforeEach(config) gets the config passed to run") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.beforeEachConfigGotTheGreeting)
}
test("afterEach(config) gets the config passed to run") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.afterEachConfigGotTheGreeting)
}
test("beforeAll(config) gets the config passed to run") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.beforeAllConfigGotTheGreeting)
}
test("afterAll(config) gets the config passed to run") {
val a = new MySuite
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> "there"), None, new Tracker, Set.empty))
assert(a.afterAllConfigGotTheGreeting)
}
// test exceptions with runTest
test("If any invocation of beforeEach completes abruptly with an exception, runTest " +
"will complete abruptly with the same exception.") {
class MySuite extends Suite with BeforeAndAfterEach with BeforeAndAfterAll {
override def beforeEach() { throw new NumberFormatException }
}
intercept[NumberFormatException] {
val a = new MySuite
a.run(Some("july"), Args(StubReporter))
}
}
test("If any call to super.runTest completes abruptly with an exception, runTest " +
"will complete abruptly with the same exception, however, before doing so, it will invoke afterEach") {
trait FunkySuite extends Suite {
protected override def runTest(testName: String, args: Args): Status = {
throw new NumberFormatException
}
}
class MySuite extends FunkySuite with BeforeAndAfterEach with BeforeAndAfterAll {
var afterEachCalled = false
override def afterEach() {
afterEachCalled = true
}
}
val a = new MySuite
intercept[NumberFormatException] {
a.run(Some("july"), Args(StubReporter))
}
assert(a.afterEachCalled)
}
test("If both super.runTest and afterEach complete abruptly with an exception, runTest " +
"will complete abruptly with the exception thrown by super.runTest.") {
trait FunkySuite extends Suite {
protected override def runTest(testName: String, args: Args): Status = {
throw new NumberFormatException
}
}
class MySuite extends FunkySuite with BeforeAndAfterEach with BeforeAndAfterAll {
var afterEachCalled = false
override def afterEach() {
afterEachCalled = true
throw new IllegalArgumentException
}
}
val a = new MySuite
intercept[NumberFormatException] {
a.run(Some("july"), Args(StubReporter))
}
assert(a.afterEachCalled)
}
test("If super.runTest returns normally, but afterEach completes abruptly with an " +
"exception, runTest will complete abruptly with the same exception.") {
class MySuite extends Suite with BeforeAndAfterEach with BeforeAndAfterAll {
override def afterEach() { throw new NumberFormatException }
def testJuly() = ()
}
intercept[NumberFormatException] {
val a = new MySuite
a.run(Some("testJuly"), Args(StubReporter))
}
}
// test exceptions with run
test("If any invocation of beforeAll completes abruptly with an exception, run " +
"will complete abruptly with the same exception.") {
class MySuite extends Suite with BeforeAndAfterEach with BeforeAndAfterAll {
override def beforeAll() { throw new NumberFormatException }
def testJuly() = ()
}
intercept[NumberFormatException] {
val a = new MySuite
a.run(None, Args(StubReporter))
}
}
test("If any call to super.run completes abruptly with an exception, run " +
"will complete abruptly with the same exception, however, before doing so, it will invoke afterAll") {
trait FunkySuite extends Suite {
override def run(testName: Option[String], args: Args): Status = {
throw new NumberFormatException
}
}
class MySuite extends FunkySuite with BeforeAndAfterEach with BeforeAndAfterAll {
var afterAllCalled = false
override def afterAll() {
afterAllCalled = true
}
}
val a = new MySuite
intercept[NumberFormatException] {
a.run(None, Args(StubReporter))
}
assert(a.afterAllCalled)
}
test("If both super.run and afterAll complete abruptly with an exception, run " +
"will complete abruptly with the exception thrown by super.run.") {
trait FunkySuite extends Suite {
override def run(testName: Option[String], args: Args): Status = {
throw new NumberFormatException
}
}
class MySuite extends FunkySuite with BeforeAndAfterEach with BeforeAndAfterAll {
var afterAllCalled = false
override def afterAll() {
afterAllCalled = true
throw new IllegalArgumentException
}
}
val a = new MySuite
intercept[NumberFormatException] {
a.run(None, Args(StubReporter))
}
assert(a.afterAllCalled)
}
test("If super.run returns normally, but afterAll completes abruptly with an " +
"exception, run will complete abruptly with the same exception.") {
class MySuite extends Suite with BeforeAndAfterEach with BeforeAndAfterAll {
override def afterAll() { throw new NumberFormatException }
def testJuly() = ()
}
intercept[NumberFormatException] {
val a = new MySuite
a.run(None, Args(StubReporter))
}
}
}
class BeforeAndAfterExtendingSuite extends Suite with BeforeAndAfterEach with BeforeAndAfterAll {
var sb: StringBuilder = _
val lb = new ListBuffer[String]
override def beforeEach() {
sb = new StringBuilder("ScalaTest is ")
lb.clear()
}
def testEasy() {
sb.append("easy!")
assert(sb.toString === "ScalaTest is easy!")
assert(lb.isEmpty)
lb += "sweet"
}
def testFun() {
sb.append("fun!")
assert(sb.toString === "ScalaTest is fun!")
assert(lb.isEmpty)
}
}
class BeforeAndAfterExtendingFunSuite extends FunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
var sb: StringBuilder = _
val lb = new ListBuffer[String]
override def beforeEach() {
sb = new StringBuilder("ScalaTest is ")
lb.clear()
}
test("easy") {
sb.append("easy!")
assert(sb.toString === "ScalaTest is easy!")
assert(lb.isEmpty)
lb += "sweet"
}
test("fun") {
sb.append("fun!")
assert(sb.toString === "ScalaTest is fun!")
assert(lb.isEmpty)
}
// This now fails to compile, as I want
// class IWantThisToFailToCompile extends Examples with BeforeAndAfter
}
class BeforeAndAfterInfoSuite extends FunSuite {
test("InfoProvided in the before should be fired") {
class ExampleSpec extends FunSuite with BeforeAndAfter {
before {
info("In Before")
}
test("test 1") {
info("info 1")
}
}
val exampleSpec = new ExampleSpec()
val rep = new EventRecordingReporter
exampleSpec.run(None, Args(rep))
assert(rep.infoProvidedEventsReceived.size === 1)
val infoProvided = rep.infoProvidedEventsReceived(0)
assert(infoProvided.message === "In Before")
assert(rep.testSucceededEventsReceived.size === 1)
val testSucceeded = rep.testSucceededEventsReceived(0)
assert(testSucceeded.testName === "test 1")
assert(testSucceeded.recordedEvents.size === 1)
val testInfoProvided = testSucceeded.recordedEvents(0).asInstanceOf[InfoProvided]
assert(testInfoProvided.message === "info 1")
}
test("InfoProvided in the after should be fired") {
class ExampleSpec extends FunSuite with BeforeAndAfter {
test("test 1") {
info("info 1")
}
after {
info("In After")
}
}
val exampleSpec = new ExampleSpec()
val rep = new EventRecordingReporter
exampleSpec.run(None, Args(rep))
assert(rep.infoProvidedEventsReceived.size === 1)
val infoProvided = rep.infoProvidedEventsReceived(0)
assert(infoProvided.message === "In After")
assert(rep.testSucceededEventsReceived.size === 1)
val testSucceeded = rep.testSucceededEventsReceived(0)
assert(testSucceeded.testName === "test 1")
assert(testSucceeded.recordedEvents.size === 1)
val testInfoProvided = testSucceeded.recordedEvents(0).asInstanceOf[InfoProvided]
assert(testInfoProvided.message === "info 1")
}
}
| svn2github/scalatest | src/test/scala/org/scalatest/BeforeAndAfterSuite.scala | Scala | apache-2.0 | 13,779 |
import test._
import org.specs2.mutable.Specification
class Specific2ArityHomoSpec extends Specification {
"A case class with an `Int` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest14(1, 2)
val record2 = AvroTypeProviderTest14(3, 4)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Float` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest15(1F, 2F)
val record2 = AvroTypeProviderTest15(3F, 4F)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Long` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest16(1L, 2L)
val record2 = AvroTypeProviderTest16(3L, 4L)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Double` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest17(1D, 2D)
val record2 = AvroTypeProviderTest17(3D, 4D)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Boolean` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest18(true, false)
val record2 = AvroTypeProviderTest18(false, true)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `String` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest19("1", "2")
val record2 = AvroTypeProviderTest19("1", "2")
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Null` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest20(null, null)
val record2 = AvroTypeProviderTest20(null, null)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Array[String]` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest24(Array("mekka.lekka.hi"), Array("mekka.hiney.ho"))
val record2 = AvroTypeProviderTest24(Array("time"), Array("travel"))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Array[Int]` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest25(Array(1, 2), Array(3,4))
val record2 = AvroTypeProviderTest25(Array(5, 6), Array(7,8))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Option[String]` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest26(Some("sun"), Some("moon"))
val record2 = AvroTypeProviderTest26(Some("day"), Some("night"))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Option[Int]` field in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest27(Some(1), Some(2))
val record2 = AvroTypeProviderTest27(Some(3), Some(4))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with two Map[Int, Int] fields" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTestMap04(Map("Gorgonzola"->2), Map("Cheddar"->4))
val record2 = AvroTypeProviderTestMap04(Map("Gouda"->5), Map("Swiss"->6))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with two Map[Int, String] fields" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTestMap05(Map("Havana"->"Cuba"), Map("World"->"series"))
val record2 = AvroTypeProviderTestMap05(Map("Bogota"->"Colombia"), Map("time"->"series"))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with two Map[String, Option[Array[Int]]] fields" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTestMap06(Map("Olala"->Some(Array(1,4))), Map("Rumpole"->None))
val record2 = AvroTypeProviderTestMap06(Map("Cran"->Some(Array(3,5))), Map("Doc"->None))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
}
| julianpeeters/sbt-avrohugger | src/sbt-test/avrohugger/SpecificStringEnumSerializationTests/src/test/scala/specific/Specific2ArityHomoSpec.scala | Scala | apache-2.0 | 5,086 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.metrics.sender
import java.net.InetSocketAddress
import scala.concurrent.duration._
import akka.testkit._
import akka.io.Tcp._
import akka.util.ByteString
import io.gatling.AkkaSpec
import io.gatling.metrics.message.GraphiteMetrics
class TcpSenderSpec extends AkkaSpec {
val dummySocketAddress = new InetSocketAddress(9999)
class TcpSenderNoIo extends TcpSender(dummySocketAddress, 2, 1.second) {
override def askForConnection(): Unit = ()
}
"TcpSender" should "fail if server is unreachable" in {
val tcpSender = TestFSMRef(new TcpSenderNoIo)
// Fail 2 times in a row, retry limit is exhausted
tcpSender ! CommandFailed(Connect(dummySocketAddress))
tcpSender ! CommandFailed(Connect(dummySocketAddress))
tcpSender.stateName shouldBe RetriesExhausted
tcpSender.stateData shouldBe NoData
}
it should "go to the Running state and send metrics if it could connect without issues" in {
val tcpSender = TestFSMRef(new TcpSenderNoIo)
tcpSender ! Connected(dummySocketAddress, dummySocketAddress)
expectMsg(Register(tcpSender))
tcpSender.stateName shouldBe Running
val metrics = GraphiteMetrics(Iterator.single("foo" -> 1), 1)
tcpSender ! metrics
expectMsg(Write(metrics.byteString))
}
it should "retry to connected until the retry limit has been exceeded to finally stop" in {
val tcpSender = TestFSMRef(new TcpSenderNoIo)
// Connect
tcpSender ! Connected(dummySocketAddress, dummySocketAddress)
expectMsg(Register(tcpSender))
tcpSender.stateName shouldBe Running
// Fail one time, retries limit is not exhausted
tcpSender ! PeerClosed
tcpSender ! Connected(dummySocketAddress, dummySocketAddress)
tcpSender.stateName shouldBe Running
// Make sure one second has passed to reset the retry window
Thread.sleep(1.second.toMillis)
// Fail 2 times in a row, retry limit is exhausted
tcpSender ! CommandFailed(Write(ByteString.empty))
tcpSender ! CommandFailed(Write(ByteString.empty))
tcpSender.stateName shouldBe RetriesExhausted
tcpSender.stateData shouldBe NoData
}
}
| ryez/gatling | gatling-metrics/src/test/scala/io/gatling/metrics/sender/TcpSenderSpec.scala | Scala | apache-2.0 | 2,761 |
/**
* Created by zhouqihua on 2017/9/26.
*/
import scala.collection.mutable.ArrayBuffer
class KMSchedulerFAIR(val parallelism: Int = 5) extends KMSchedulerSFSH {
override protected val schedulerType: String = "FAIR"
override def scheduling(timeSlice: Double): Unit = {
}
def schedulingInOneChannel(channel: KMChannel): Unit = {
val bandwidth = channel.bottleneckPort().remBandwidth
val parallelFlows: ArrayBuffer[KMFlow] = ArrayBuffer[KMFlow]()
while(this.uncompletedFlows.length > 0) {
var step: Int = 0
if (this.uncompletedFlows.length >= this.parallelism)
step = this.parallelism
else
step = this.uncompletedFlows.length
var sizeSum: Double = 0.0
for (i <- 0 until step) { // same as "i <- 0 to step-1"
val aFlow: KMFlow = this.uncompletedFlows(i)
parallelFlows += aFlow
sizeSum = sizeSum + aFlow.remSize.mixedSize
}
val consumedTime = KMScalaKit.bigDemicalDoubleDiv(sizeSum, bandwidth)
this.updateUncompletedFlowsWithConsumedTime(consumedTime)
for (aFlow <- parallelFlows) {
aFlow.remSize.updateWith(compressedSize = 0.0, rawSize = 0.0)
this.updateFlowArraysWithOneFlow(aFlow)
}
parallelFlows.clear()
}
}
}
| kimihe/Swallow | swallow-sim/flow/src/main/scala/KMSchedulerFAIR.scala | Scala | apache-2.0 | 1,324 |
package org.json
/**
*
* JSONException
* ledger-wallet-ripple-chrome
*
* Created by Pierre Pollastri on 16/06/2016.
*
* The MIT License (MIT)
*
* Copyright (c) 2016 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
class JSONException(message: String) extends Exception(message)
| LedgerHQ/ledger-wallet-ripple | src/main/scala/org/json/JSONException.scala | Scala | mit | 1,357 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.NotSerializableException
import java.nio.ByteBuffer
import java.util.Arrays
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.math.{max, min}
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.SchedulingMode._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.util.{AccumulatorV2, Clock, SystemClock, Utils}
/**
* Schedules the tasks within a single TaskSet in the TaskSchedulerImpl. This class keeps track of
* each task, retries tasks if they fail (up to a limited number of times), and
* handles locality-aware scheduling for this TaskSet via delay scheduling. The main interfaces
* to it are resourceOffer, which asks the TaskSet whether it wants to run a task on one node,
* and statusUpdate, which tells it that one of its tasks changed state (e.g. finished).
*
* THREADING: This class is designed to only be called from code with a lock on the
* TaskScheduler (e.g. its event handlers). It should not be called from other threads.
*
* @param sched the TaskSchedulerImpl associated with the TaskSetManager
* @param taskSet the TaskSet to manage scheduling for
* @param maxTaskFailures if any particular task fails this number of times, the entire
* task set will be aborted
*/
private[spark] class TaskSetManager(
sched: TaskSchedulerImpl,
val taskSet: TaskSet,
val maxTaskFailures: Int,
clock: Clock = new SystemClock()) extends Schedulable with Logging {
private val conf = sched.sc.conf
// Quantile of tasks at which to start speculation
val SPECULATION_QUANTILE = conf.getDouble("spark.speculation.quantile", 0.75)
val SPECULATION_MULTIPLIER = conf.getDouble("spark.speculation.multiplier", 1.5)
// Limit of bytes for total size of results (default is 1GB)
val maxResultSize = Utils.getMaxResultSize(conf)
// Serializer for closures and tasks.
val env = SparkEnv.get
val ser = env.closureSerializer.newInstance()
val tasks = taskSet.tasks
val numTasks = tasks.length
val copiesRunning = new Array[Int](numTasks)
val successful = new Array[Boolean](numTasks)
private val numFailures = new Array[Int](numTasks)
val taskAttempts = Array.fill[List[TaskInfo]](numTasks)(Nil)
var tasksSuccessful = 0
var weight = 1
var minShare = 0
var priority = taskSet.priority
var stageId = taskSet.stageId
val name = "TaskSet_" + taskSet.id
var parent: Pool = null
var totalResultSize = 0L
var calculatedTasks = 0
private val taskSetBlacklistHelperOpt: Option[TaskSetBlacklist] = {
if (BlacklistTracker.isBlacklistEnabled(conf)) {
Some(new TaskSetBlacklist(conf, stageId, clock))
} else {
None
}
}
val runningTasksSet = new HashSet[Long]
override def runningTasks: Int = runningTasksSet.size
// True once no more tasks should be launched for this task set manager. TaskSetManagers enter
// the zombie state once at least one attempt of each task has completed successfully, or if the
// task set is aborted (for example, because it was killed). TaskSetManagers remain in the zombie
// state until all tasks have finished running; we keep TaskSetManagers that are in the zombie
// state in order to continue to track and account for the running tasks.
// TODO: We should kill any running task attempts when the task set manager becomes a zombie.
var isZombie = false
// Set of pending tasks for each executor. These collections are actually
// treated as stacks, in which new tasks are added to the end of the
// ArrayBuffer and removed from the end. This makes it faster to detect
// tasks that repeatedly fail because whenever a task failed, it is put
// back at the head of the stack. These collections may contain duplicates
// for two reasons:
// (1): Tasks are only removed lazily; when a task is launched, it remains
// in all the pending lists except the one that it was launched from.
// (2): Tasks may be re-added to these lists multiple times as a result
// of failures.
// Duplicates are handled in dequeueTaskFromList, which ensures that a
// task hasn't already started running before launching it.
private val pendingTasksForExecutor = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each host. Similar to pendingTasksForExecutor,
// but at host level.
private val pendingTasksForHost = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each rack -- similar to the above.
private val pendingTasksForRack = new HashMap[String, ArrayBuffer[Int]]
// Set containing pending tasks with no locality preferences.
var pendingTasksWithNoPrefs = new ArrayBuffer[Int]
// Set containing all pending tasks (also used as a stack, as above).
val allPendingTasks = new ArrayBuffer[Int]
// Tasks that can be speculated. Since these will be a small fraction of total
// tasks, we'll just hold them in a HashSet.
val speculatableTasks = new HashSet[Int]
// Task index, start and finish time for each task attempt (indexed by task ID)
val taskInfos = new HashMap[Long, TaskInfo]
// How frequently to reprint duplicate exceptions in full, in milliseconds
val EXCEPTION_PRINT_INTERVAL =
conf.getLong("spark.logging.exceptionPrintInterval", 10000)
// Map of recent exceptions (identified by string representation and top stack frame) to
// duplicate count (how many times the same exception has appeared) and time the full exception
// was printed. This should ideally be an LRU map that can drop old exceptions automatically.
val recentExceptions = HashMap[String, (Int, Long)]()
// Figure out the current map output tracker epoch and set it on all tasks
val epoch = sched.mapOutputTracker.getEpoch
logDebug("Epoch for " + taskSet + ": " + epoch)
for (t <- tasks) {
t.epoch = epoch
}
// Add all our tasks to the pending lists. We do this in reverse order
// of task index so that tasks with low indices get launched first.
for (i <- (0 until numTasks).reverse) {
addPendingTask(i)
}
// Figure out which locality levels we have in our TaskSet, so we can do delay scheduling
var myLocalityLevels = computeValidLocalityLevels()
var localityWaits = myLocalityLevels.map(getLocalityWait) // Time to wait at each level
// Delay scheduling variables: we keep track of our current locality level and the time we
// last launched a task at that level, and move up a level when localityWaits[curLevel] expires.
// We then move down if we manage to launch a "more local" task.
var currentLocalityIndex = 0 // Index of our current locality level in validLocalityLevels
var lastLaunchTime = clock.getTimeMillis() // Time we last launched a task at this level
override def schedulableQueue: ConcurrentLinkedQueue[Schedulable] = null
override def schedulingMode: SchedulingMode = SchedulingMode.NONE
var emittedTaskSizeWarning = false
/** Add a task to all the pending-task lists that it should be on. */
private def addPendingTask(index: Int) {
for (loc <- tasks(index).preferredLocations) {
loc match {
case e: ExecutorCacheTaskLocation =>
pendingTasksForExecutor.getOrElseUpdate(e.executorId, new ArrayBuffer) += index
case e: HDFSCacheTaskLocation =>
val exe = sched.getExecutorsAliveOnHost(loc.host)
exe match {
case Some(set) =>
for (e <- set) {
pendingTasksForExecutor.getOrElseUpdate(e, new ArrayBuffer) += index
}
logInfo(s"Pending task $index has a cached location at ${e.host} " +
", where there are executors " + set.mkString(","))
case None => logDebug(s"Pending task $index has a cached location at ${e.host} " +
", but there are no executors alive there.")
}
case _ =>
}
pendingTasksForHost.getOrElseUpdate(loc.host, new ArrayBuffer) += index
for (rack <- sched.getRackForHost(loc.host)) {
pendingTasksForRack.getOrElseUpdate(rack, new ArrayBuffer) += index
}
}
if (tasks(index).preferredLocations == Nil) {
pendingTasksWithNoPrefs += index
}
allPendingTasks += index // No point scanning this whole list to find the old task there
}
/**
* Return the pending tasks list for a given executor ID, or an empty list if
* there is no map entry for that host
*/
private def getPendingTasksForExecutor(executorId: String): ArrayBuffer[Int] = {
pendingTasksForExecutor.getOrElse(executorId, ArrayBuffer())
}
/**
* Return the pending tasks list for a given host, or an empty list if
* there is no map entry for that host
*/
protected def getPendingTasksForHost(host: String): ArrayBuffer[Int] = {
pendingTasksForHost.getOrElse(host, ArrayBuffer())
}
/**
* Return the pending rack-local task list for a given rack, or an empty list if
* there is no map entry for that rack
*/
private def getPendingTasksForRack(rack: String): ArrayBuffer[Int] = {
pendingTasksForRack.getOrElse(rack, ArrayBuffer())
}
/**
* Dequeue a pending task from the given list and return its index.
* Return None if the list is empty.
* This method also cleans up any tasks in the list that have already
* been launched, since we want that to happen lazily.
*/
private def dequeueTaskFromList(
execId: String,
host: String,
list: ArrayBuffer[Int]): Option[Int] = {
var indexOffset = list.size
while (indexOffset > 0) {
indexOffset -= 1
val index = list(indexOffset)
if (!isTaskBlacklistedOnExecOrNode(index, execId, host)) {
// This should almost always be list.trimEnd(1) to remove tail
list.remove(indexOffset)
if (copiesRunning(index) == 0 && !successful(index)) {
return Some(index)
}
}
}
None
}
/** Check whether a task is currently running an attempt on a given host */
private def hasAttemptOnHost(taskIndex: Int, host: String): Boolean = {
taskAttempts(taskIndex).exists(_.host == host)
}
private def isTaskBlacklistedOnExecOrNode(index: Int, execId: String, host: String): Boolean = {
taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTask(host, index) ||
blacklist.isExecutorBlacklistedForTask(execId, index)
}
}
/**
* Return a speculative task for a given executor if any are available. The task should not have
* an attempt running on this host, in case the host is slow. In addition, the task should meet
* the given locality constraint.
*/
// Labeled as protected to allow tests to override providing speculative tasks if necessary
protected def dequeueSpeculativeTask(execId: String, host: String, locality: TaskLocality.Value)
: Option[(Int, TaskLocality.Value)] =
{
speculatableTasks.retain(index => !successful(index)) // Remove finished tasks from set
def canRunOnHost(index: Int): Boolean = {
!hasAttemptOnHost(index, host) &&
!isTaskBlacklistedOnExecOrNode(index, execId, host)
}
if (!speculatableTasks.isEmpty) {
// Check for process-local tasks; note that tasks can be process-local
// on multiple nodes when we replicate cached blocks, as in Spark Streaming
for (index <- speculatableTasks if canRunOnHost(index)) {
val prefs = tasks(index).preferredLocations
val executors = prefs.flatMap(_ match {
case e: ExecutorCacheTaskLocation => Some(e.executorId)
case _ => None
});
if (executors.contains(execId)) {
speculatableTasks -= index
return Some((index, TaskLocality.PROCESS_LOCAL))
}
}
// Check for node-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.NODE_LOCAL)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val locations = tasks(index).preferredLocations.map(_.host)
if (locations.contains(host)) {
speculatableTasks -= index
return Some((index, TaskLocality.NODE_LOCAL))
}
}
}
// Check for no-preference tasks
if (TaskLocality.isAllowed(locality, TaskLocality.NO_PREF)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val locations = tasks(index).preferredLocations
if (locations.size == 0) {
speculatableTasks -= index
return Some((index, TaskLocality.PROCESS_LOCAL))
}
}
}
// Check for rack-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.RACK_LOCAL)) {
for (rack <- sched.getRackForHost(host)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val racks = tasks(index).preferredLocations.map(_.host).flatMap(sched.getRackForHost)
if (racks.contains(rack)) {
speculatableTasks -= index
return Some((index, TaskLocality.RACK_LOCAL))
}
}
}
}
// Check for non-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.ANY)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
speculatableTasks -= index
return Some((index, TaskLocality.ANY))
}
}
}
None
}
/**
* Dequeue a pending task for a given node and return its index and locality level.
* Only search for tasks matching the given locality constraint.
*
* @return An option containing (task index within the task set, locality, is speculative?)
*/
private def dequeueTask(execId: String, host: String, maxLocality: TaskLocality.Value)
: Option[(Int, TaskLocality.Value, Boolean)] =
{
for (index <- dequeueTaskFromList(execId, host, getPendingTasksForExecutor(execId))) {
return Some((index, TaskLocality.PROCESS_LOCAL, false))
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NODE_LOCAL)) {
for (index <- dequeueTaskFromList(execId, host, getPendingTasksForHost(host))) {
return Some((index, TaskLocality.NODE_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NO_PREF)) {
// Look for noPref tasks after NODE_LOCAL for minimize cross-rack traffic
for (index <- dequeueTaskFromList(execId, host, pendingTasksWithNoPrefs)) {
return Some((index, TaskLocality.PROCESS_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.RACK_LOCAL)) {
for {
rack <- sched.getRackForHost(host)
index <- dequeueTaskFromList(execId, host, getPendingTasksForRack(rack))
} {
return Some((index, TaskLocality.RACK_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.ANY)) {
for (index <- dequeueTaskFromList(execId, host, allPendingTasks)) {
return Some((index, TaskLocality.ANY, false))
}
}
// find a speculative task if all others tasks have been scheduled
dequeueSpeculativeTask(execId, host, maxLocality).map {
case (taskIndex, allowedLocality) => (taskIndex, allowedLocality, true)}
}
/**
* Respond to an offer of a single executor from the scheduler by finding a task
*
* NOTE: this function is either called with a maxLocality which
* would be adjusted by delay scheduling algorithm or it will be with a special
* NO_PREF locality which will be not modified
*
* @param execId the executor Id of the offered resource
* @param host the host Id of the offered resource
* @param maxLocality the maximum locality we want to schedule the tasks at
*/
@throws[TaskNotSerializableException]
def resourceOffer(
execId: String,
host: String,
maxLocality: TaskLocality.TaskLocality)
: Option[TaskDescription] =
{
val offerBlacklisted = taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTaskSet(host) ||
blacklist.isExecutorBlacklistedForTaskSet(execId)
}
if (!isZombie && !offerBlacklisted) {
val curTime = clock.getTimeMillis()
var allowedLocality = maxLocality
if (maxLocality != TaskLocality.NO_PREF) {
allowedLocality = getAllowedLocalityLevel(curTime)
if (allowedLocality > maxLocality) {
// We're not allowed to search for farther-away tasks
allowedLocality = maxLocality
}
}
dequeueTask(execId, host, allowedLocality).map { case ((index, taskLocality, speculative)) =>
// Found a task; do some bookkeeping and return a task description
val task = tasks(index)
val taskId = sched.newTaskId()
// Do various bookkeeping
copiesRunning(index) += 1
val attemptNum = taskAttempts(index).size
val info = new TaskInfo(taskId, index, attemptNum, curTime,
execId, host, taskLocality, speculative)
taskInfos(taskId) = info
taskAttempts(index) = info :: taskAttempts(index)
// Update our locality level for delay scheduling
// NO_PREF will not affect the variables related to delay scheduling
if (maxLocality != TaskLocality.NO_PREF) {
currentLocalityIndex = getLocalityIndex(taskLocality)
lastLaunchTime = curTime
}
// Serialize and return the task
val startTime = clock.getTimeMillis()
val serializedTask: ByteBuffer = try {
Task.serializeWithDependencies(task, sched.sc.addedFiles, sched.sc.addedJars, ser)
} catch {
// If the task cannot be serialized, then there's no point to re-attempt the task,
// as it will always fail. So just abort the whole task-set.
case NonFatal(e) =>
val msg = s"Failed to serialize task $taskId, not attempting to retry it."
logError(msg, e)
abort(s"$msg Exception during serialization: $e")
throw new TaskNotSerializableException(e)
}
if (serializedTask.limit > TaskSetManager.TASK_SIZE_TO_WARN_KB * 1024 &&
!emittedTaskSizeWarning) {
emittedTaskSizeWarning = true
logWarning(s"Stage ${task.stageId} contains a task of very large size " +
s"(${serializedTask.limit / 1024} KB). The maximum recommended task size is " +
s"${TaskSetManager.TASK_SIZE_TO_WARN_KB} KB.")
}
addRunningTask(taskId)
// We used to log the time it takes to serialize the task, but task size is already
// a good proxy to task serialization time.
// val timeTaken = clock.getTime() - startTime
val taskName = s"task ${info.id} in stage ${taskSet.id}"
logInfo(s"Starting $taskName (TID $taskId, $host, executor ${info.executorId}, " +
s"partition ${task.partitionId}, $taskLocality, ${serializedTask.limit} bytes)")
sched.dagScheduler.taskStarted(task, info)
new TaskDescription(taskId = taskId, attemptNumber = attemptNum, execId,
taskName, index, serializedTask)
}
} else {
None
}
}
private def maybeFinishTaskSet() {
if (isZombie && runningTasks == 0) {
sched.taskSetFinished(this)
}
}
/**
* Get the level we can launch tasks according to delay scheduling, based on current wait time.
*/
private def getAllowedLocalityLevel(curTime: Long): TaskLocality.TaskLocality = {
// Remove the scheduled or finished tasks lazily
def tasksNeedToBeScheduledFrom(pendingTaskIds: ArrayBuffer[Int]): Boolean = {
var indexOffset = pendingTaskIds.size
while (indexOffset > 0) {
indexOffset -= 1
val index = pendingTaskIds(indexOffset)
if (copiesRunning(index) == 0 && !successful(index)) {
return true
} else {
pendingTaskIds.remove(indexOffset)
}
}
false
}
// Walk through the list of tasks that can be scheduled at each location and returns true
// if there are any tasks that still need to be scheduled. Lazily cleans up tasks that have
// already been scheduled.
def moreTasksToRunIn(pendingTasks: HashMap[String, ArrayBuffer[Int]]): Boolean = {
val emptyKeys = new ArrayBuffer[String]
val hasTasks = pendingTasks.exists {
case (id: String, tasks: ArrayBuffer[Int]) =>
if (tasksNeedToBeScheduledFrom(tasks)) {
true
} else {
emptyKeys += id
false
}
}
// The key could be executorId, host or rackId
emptyKeys.foreach(id => pendingTasks.remove(id))
hasTasks
}
while (currentLocalityIndex < myLocalityLevels.length - 1) {
val moreTasks = myLocalityLevels(currentLocalityIndex) match {
case TaskLocality.PROCESS_LOCAL => moreTasksToRunIn(pendingTasksForExecutor)
case TaskLocality.NODE_LOCAL => moreTasksToRunIn(pendingTasksForHost)
case TaskLocality.NO_PREF => pendingTasksWithNoPrefs.nonEmpty
case TaskLocality.RACK_LOCAL => moreTasksToRunIn(pendingTasksForRack)
}
if (!moreTasks) {
// This is a performance optimization: if there are no more tasks that can
// be scheduled at a particular locality level, there is no point in waiting
// for the locality wait timeout (SPARK-4939).
lastLaunchTime = curTime
logDebug(s"No tasks for locality level ${myLocalityLevels(currentLocalityIndex)}, " +
s"so moving to locality level ${myLocalityLevels(currentLocalityIndex + 1)}")
currentLocalityIndex += 1
} else if (curTime - lastLaunchTime >= localityWaits(currentLocalityIndex)) {
// Jump to the next locality level, and reset lastLaunchTime so that the next locality
// wait timer doesn't immediately expire
lastLaunchTime += localityWaits(currentLocalityIndex)
logDebug(s"Moving to ${myLocalityLevels(currentLocalityIndex + 1)} after waiting for " +
s"${localityWaits(currentLocalityIndex)}ms")
currentLocalityIndex += 1
} else {
return myLocalityLevels(currentLocalityIndex)
}
}
myLocalityLevels(currentLocalityIndex)
}
/**
* Find the index in myLocalityLevels for a given locality. This is also designed to work with
* localities that are not in myLocalityLevels (in case we somehow get those) by returning the
* next-biggest level we have. Uses the fact that the last value in myLocalityLevels is ANY.
*/
def getLocalityIndex(locality: TaskLocality.TaskLocality): Int = {
var index = 0
while (locality > myLocalityLevels(index)) {
index += 1
}
index
}
/**
* Check whether the given task set has been blacklisted to the point that it can't run anywhere.
*
* It is possible that this taskset has become impossible to schedule *anywhere* due to the
* blacklist. The most common scenario would be if there are fewer executors than
* spark.task.maxFailures. We need to detect this so we can fail the task set, otherwise the job
* will hang.
*
* There's a tradeoff here: we could make sure all tasks in the task set are schedulable, but that
* would add extra time to each iteration of the scheduling loop. Here, we take the approach of
* making sure at least one of the unscheduled tasks is schedulable. This means we may not detect
* the hang as quickly as we could have, but we'll always detect the hang eventually, and the
* method is faster in the typical case. In the worst case, this method can take
* O(maxTaskFailures + numTasks) time, but it will be faster when there haven't been any task
* failures (this is because the method picks one unscheduled task, and then iterates through each
* executor until it finds one that the task isn't blacklisted on).
*/
private[scheduler] def abortIfCompletelyBlacklisted(
hostToExecutors: HashMap[String, HashSet[String]]): Unit = {
taskSetBlacklistHelperOpt.foreach { taskSetBlacklist =>
// Only look for unschedulable tasks when at least one executor has registered. Otherwise,
// task sets will be (unnecessarily) aborted in cases when no executors have registered yet.
if (hostToExecutors.nonEmpty) {
// find any task that needs to be scheduled
val pendingTask: Option[Int] = {
// usually this will just take the last pending task, but because of the lazy removal
// from each list, we may need to go deeper in the list. We poll from the end because
// failed tasks are put back at the end of allPendingTasks, so we're more likely to find
// an unschedulable task this way.
val indexOffset = allPendingTasks.lastIndexWhere { indexInTaskSet =>
copiesRunning(indexInTaskSet) == 0 && !successful(indexInTaskSet)
}
if (indexOffset == -1) {
None
} else {
Some(allPendingTasks(indexOffset))
}
}
pendingTask.foreach { indexInTaskSet =>
// try to find some executor this task can run on. Its possible that some *other*
// task isn't schedulable anywhere, but we will discover that in some later call,
// when that unschedulable task is the last task remaining.
val blacklistedEverywhere = hostToExecutors.forall { case (host, execsOnHost) =>
// Check if the task can run on the node
val nodeBlacklisted =
taskSetBlacklist.isNodeBlacklistedForTaskSet(host) ||
taskSetBlacklist.isNodeBlacklistedForTask(host, indexInTaskSet)
if (nodeBlacklisted) {
true
} else {
// Check if the task can run on any of the executors
execsOnHost.forall { exec =>
taskSetBlacklist.isExecutorBlacklistedForTaskSet(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTask(exec, indexInTaskSet)
}
}
}
if (blacklistedEverywhere) {
val partition = tasks(indexInTaskSet).partitionId
abort(s"Aborting $taskSet because task $indexInTaskSet (partition $partition) " +
s"cannot run anywhere due to node and executor blacklist. Blacklisting behavior " +
s"can be configured via spark.blacklist.*.")
}
}
}
}
}
/**
* Marks the task as getting result and notifies the DAG Scheduler
*/
def handleTaskGettingResult(tid: Long): Unit = {
val info = taskInfos(tid)
info.markGettingResult()
sched.dagScheduler.taskGettingResult(info)
}
/**
* Check whether has enough quota to fetch the result with `size` bytes
*/
def canFetchMoreResults(size: Long): Boolean = sched.synchronized {
totalResultSize += size
calculatedTasks += 1
if (maxResultSize > 0 && totalResultSize > maxResultSize) {
val msg = s"Total size of serialized results of ${calculatedTasks} tasks " +
s"(${Utils.bytesToString(totalResultSize)}) is bigger than spark.driver.maxResultSize " +
s"(${Utils.bytesToString(maxResultSize)})"
logError(msg)
abort(msg)
false
} else {
true
}
}
/**
* Marks a task as successful and notifies the DAGScheduler that the task has ended.
*/
def handleSuccessfulTask(tid: Long, result: DirectTaskResult[_]): Unit = {
val info = taskInfos(tid)
val index = info.index
info.markFinished(TaskState.FINISHED)
removeRunningTask(tid)
// This method is called by "TaskSchedulerImpl.handleSuccessfulTask" which holds the
// "TaskSchedulerImpl" lock until exiting. To avoid the SPARK-7655 issue, we should not
// "deserialize" the value when holding a lock to avoid blocking other threads. So we call
// "result.value()" in "TaskResultGetter.enqueueSuccessfulTask" before reaching here.
// Note: "result.value()" only deserializes the value when it's called at the first time, so
// here "result.value()" just returns the value and won't block other threads.
sched.dagScheduler.taskEnded(tasks(index), Success, result.value(), result.accumUpdates, info)
// Kill any other attempts for the same task (since those are unnecessary now that one
// attempt completed successfully).
for (attemptInfo <- taskAttempts(index) if attemptInfo.running) {
logInfo(s"Killing attempt ${attemptInfo.attemptNumber} for task ${attemptInfo.id} " +
s"in stage ${taskSet.id} (TID ${attemptInfo.taskId}) on ${attemptInfo.host} " +
s"as the attempt ${info.attemptNumber} succeeded on ${info.host}")
sched.backend.killTask(attemptInfo.taskId, attemptInfo.executorId, true)
}
if (!successful(index)) {
tasksSuccessful += 1
logInfo(s"Finished task ${info.id} in stage ${taskSet.id} (TID ${info.taskId}) in" +
s" ${info.duration} ms on ${info.host} (executor ${info.executorId})" +
s" ($tasksSuccessful/$numTasks)")
// Mark successful and stop if all the tasks have succeeded.
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
} else {
logInfo("Ignoring task-finished event for " + info.id + " in stage " + taskSet.id +
" because task " + index + " has already completed successfully")
}
maybeFinishTaskSet()
}
/**
* Marks the task as failed, re-adds it to the list of pending tasks, and notifies the
* DAG Scheduler.
*/
def handleFailedTask(tid: Long, state: TaskState, reason: TaskFailedReason) {
val info = taskInfos(tid)
if (info.failed || info.killed) {
return
}
removeRunningTask(tid)
info.markFinished(state)
val index = info.index
copiesRunning(index) -= 1
var accumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty
val failureReason = s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid, ${info.host}," +
s" executor ${info.executorId}): ${reason.toErrorString}"
val failureException: Option[Throwable] = reason match {
case fetchFailed: FetchFailed =>
logWarning(failureReason)
if (!successful(index)) {
successful(index) = true
tasksSuccessful += 1
}
isZombie = true
None
case ef: ExceptionFailure =>
// ExceptionFailure's might have accumulator updates
accumUpdates = ef.accums
if (ef.className == classOf[NotSerializableException].getName) {
// If the task result wasn't serializable, there's no point in trying to re-execute it.
logError("Task %s in stage %s (TID %d) had a not serializable result: %s; not retrying"
.format(info.id, taskSet.id, tid, ef.description))
abort("Task %s in stage %s (TID %d) had a not serializable result: %s".format(
info.id, taskSet.id, tid, ef.description))
return
}
val key = ef.description
val now = clock.getTimeMillis()
val (printFull, dupCount) = {
if (recentExceptions.contains(key)) {
val (dupCount, printTime) = recentExceptions(key)
if (now - printTime > EXCEPTION_PRINT_INTERVAL) {
recentExceptions(key) = (0, now)
(true, 0)
} else {
recentExceptions(key) = (dupCount + 1, printTime)
(false, dupCount + 1)
}
} else {
recentExceptions(key) = (0, now)
(true, 0)
}
}
if (printFull) {
logWarning(failureReason)
} else {
logInfo(
s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid) on ${info.host}, executor" +
s" ${info.executorId}: ${ef.className} (${ef.description}) [duplicate $dupCount]")
}
ef.exception
case e: ExecutorLostFailure if !e.exitCausedByApp =>
logInfo(s"Task $tid failed because while it was being computed, its executor " +
"exited for a reason unrelated to the task. Not counting this failure towards the " +
"maximum number of failures for the task.")
None
case e: TaskFailedReason => // TaskResultLost, TaskKilled, and others
logWarning(failureReason)
None
}
sched.dagScheduler.taskEnded(tasks(index), reason, null, accumUpdates, info)
if (successful(index)) {
logInfo(
s"Task ${info.id} in stage ${taskSet.id} (TID $tid) failed, " +
"but another instance of the task has already succeeded, " +
"so not re-queuing the task to be re-executed.")
} else {
addPendingTask(index)
}
if (!isZombie && reason.countTowardsTaskFailures) {
taskSetBlacklistHelperOpt.foreach(_.updateBlacklistForFailedTask(
info.host, info.executorId, index))
assert (null != failureReason)
numFailures(index) += 1
if (numFailures(index) >= maxTaskFailures) {
logError("Task %d in stage %s failed %d times; aborting job".format(
index, taskSet.id, maxTaskFailures))
abort("Task %d in stage %s failed %d times, most recent failure: %s\\nDriver stacktrace:"
.format(index, taskSet.id, maxTaskFailures, failureReason), failureException)
return
}
}
maybeFinishTaskSet()
}
def abort(message: String, exception: Option[Throwable] = None): Unit = sched.synchronized {
// TODO: Kill running tasks if we were not terminated due to a Mesos error
sched.dagScheduler.taskSetFailed(taskSet, message, exception)
isZombie = true
maybeFinishTaskSet()
}
/** If the given task ID is not in the set of running tasks, adds it.
*
* Used to keep track of the number of running tasks, for enforcing scheduling policies.
*/
def addRunningTask(tid: Long) {
if (runningTasksSet.add(tid) && parent != null) {
parent.increaseRunningTasks(1)
}
}
/** If the given task ID is in the set of running tasks, removes it. */
def removeRunningTask(tid: Long) {
if (runningTasksSet.remove(tid) && parent != null) {
parent.decreaseRunningTasks(1)
}
}
override def getSchedulableByName(name: String): Schedulable = {
null
}
override def addSchedulable(schedulable: Schedulable) {}
override def removeSchedulable(schedulable: Schedulable) {}
override def getSortedTaskSetQueue(): ArrayBuffer[TaskSetManager] = {
var sortedTaskSetQueue = new ArrayBuffer[TaskSetManager]()
sortedTaskSetQueue += this
sortedTaskSetQueue
}
/** Called by TaskScheduler when an executor is lost so we can re-enqueue our tasks */
override def executorLost(execId: String, host: String, reason: ExecutorLossReason) {
// Re-enqueue any tasks that ran on the failed executor if this is a shuffle map stage,
// and we are not using an external shuffle server which could serve the shuffle outputs.
// The reason is the next stage wouldn't be able to fetch the data from this dead executor
// so we would need to rerun these tasks on other executors.
if (tasks(0).isInstanceOf[ShuffleMapTask] && !env.blockManager.externalShuffleServiceEnabled) {
for ((tid, info) <- taskInfos if info.executorId == execId) {
val index = taskInfos(tid).index
if (successful(index)) {
successful(index) = false
copiesRunning(index) -= 1
tasksSuccessful -= 1
addPendingTask(index)
// Tell the DAGScheduler that this task was resubmitted so that it doesn't think our
// stage finishes when a total of tasks.size tasks finish.
sched.dagScheduler.taskEnded(
tasks(index), Resubmitted, null, Seq.empty, info)
}
}
}
for ((tid, info) <- taskInfos if info.running && info.executorId == execId) {
val exitCausedByApp: Boolean = reason match {
case exited: ExecutorExited => exited.exitCausedByApp
case ExecutorKilled => false
case _ => true
}
handleFailedTask(tid, TaskState.FAILED, ExecutorLostFailure(info.executorId, exitCausedByApp,
Some(reason.toString)))
}
// recalculate valid locality levels and waits when executor is lost
recomputeLocality()
}
/**
* Check for tasks to be speculated and return true if there are any. This is called periodically
* by the TaskScheduler.
*
* TODO: To make this scale to large jobs, we need to maintain a list of running tasks, so that
* we don't scan the whole task set. It might also help to make this sorted by launch time.
*/
override def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean = {
// Can't speculate if we only have one task, and no need to speculate if the task set is a
// zombie.
if (isZombie || numTasks == 1) {
return false
}
var foundTasks = false
val minFinishedForSpeculation = (SPECULATION_QUANTILE * numTasks).floor.toInt
logDebug("Checking for speculative tasks: minFinished = " + minFinishedForSpeculation)
if (tasksSuccessful >= minFinishedForSpeculation && tasksSuccessful > 0) {
val time = clock.getTimeMillis()
val durations = taskInfos.values.filter(_.successful).map(_.duration).toArray
Arrays.sort(durations)
val medianDuration = durations(min((0.5 * tasksSuccessful).round.toInt, durations.length - 1))
val threshold = max(SPECULATION_MULTIPLIER * medianDuration, minTimeToSpeculation)
// TODO: Threshold should also look at standard deviation of task durations and have a lower
// bound based on that.
logDebug("Task length threshold for speculation: " + threshold)
for ((tid, info) <- taskInfos) {
val index = info.index
if (!successful(index) && copiesRunning(index) == 1 && info.timeRunning(time) > threshold &&
!speculatableTasks.contains(index)) {
logInfo(
"Marking task %d in stage %s (on %s) as speculatable because it ran more than %.0f ms"
.format(index, taskSet.id, info.host, threshold))
speculatableTasks += index
foundTasks = true
}
}
}
foundTasks
}
private def getLocalityWait(level: TaskLocality.TaskLocality): Long = {
val defaultWait = conf.get("spark.locality.wait", "3s")
val localityWaitKey = level match {
case TaskLocality.PROCESS_LOCAL => "spark.locality.wait.process"
case TaskLocality.NODE_LOCAL => "spark.locality.wait.node"
case TaskLocality.RACK_LOCAL => "spark.locality.wait.rack"
case _ => null
}
if (localityWaitKey != null) {
conf.getTimeAsMs(localityWaitKey, defaultWait)
} else {
0L
}
}
/**
* Compute the locality levels used in this TaskSet. Assumes that all tasks have already been
* added to queues using addPendingTask.
*
*/
private def computeValidLocalityLevels(): Array[TaskLocality.TaskLocality] = {
import TaskLocality.{PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY}
val levels = new ArrayBuffer[TaskLocality.TaskLocality]
if (!pendingTasksForExecutor.isEmpty && getLocalityWait(PROCESS_LOCAL) != 0 &&
pendingTasksForExecutor.keySet.exists(sched.isExecutorAlive(_))) {
levels += PROCESS_LOCAL
}
if (!pendingTasksForHost.isEmpty && getLocalityWait(NODE_LOCAL) != 0 &&
pendingTasksForHost.keySet.exists(sched.hasExecutorsAliveOnHost(_))) {
levels += NODE_LOCAL
}
if (!pendingTasksWithNoPrefs.isEmpty) {
levels += NO_PREF
}
if (!pendingTasksForRack.isEmpty && getLocalityWait(RACK_LOCAL) != 0 &&
pendingTasksForRack.keySet.exists(sched.hasHostAliveOnRack(_))) {
levels += RACK_LOCAL
}
levels += ANY
logDebug("Valid locality levels for " + taskSet + ": " + levels.mkString(", "))
levels.toArray
}
def recomputeLocality() {
val previousLocalityLevel = myLocalityLevels(currentLocalityIndex)
myLocalityLevels = computeValidLocalityLevels()
localityWaits = myLocalityLevels.map(getLocalityWait)
currentLocalityIndex = getLocalityIndex(previousLocalityLevel)
}
def executorAdded() {
recomputeLocality()
}
}
private[spark] object TaskSetManager {
// The user will be warned if any stages contain a task that has a serialized size greater than
// this.
val TASK_SIZE_TO_WARN_KB = 100
}
| kimoonkim/spark | core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala | Scala | apache-2.0 | 41,258 |
object Foo {
// Will cause a warning
val bar = 1 == ""
}
| xuwei-k/xsbt | sbt-app/src/sbt-test/reporter/source-mapper/src/main/scala/Foo.scala | Scala | apache-2.0 | 61 |
package me.lachlanap.summis.controllers
import play.api.mvc._
import me.lachlanap.summis.logic._
case class Context(request: Request[_],
appVersion: String,
loggedInAccount: Option[Account],
url: String, menu: Menu,
session: Session) {
/**
* <code>username</code> is assumed to be an actual account.
*/
def login(username: String) = {
Context(request,
appVersion,
loggedInAccount,
url, menu,
session + ("user" -> username))
}
def logout = {
Context(request,
appVersion,
loggedInAccount,
url, menu,
session - "user")
}
def url(project: Project) = {
routes.ProjectController.info(project.slug)
}
def url(release: Release) = {
"TODO"
}
}
case class Menu(items: Seq[MenuItem])
case class MenuItem(url: String, name: String, active: Boolean = false, auth: Boolean = false) {
def id = name.toLowerCase.replaceAll(" ", "-")
def toActive = MenuItem(url, name, true)
}
| thorinii/summis-server | app/me/lachlanap/summis/controllers/Context.scala | Scala | mit | 1,084 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.storage.StorageLevel
/**
* Counts words in UTF8 encoded, '\n' delimited text received from the network every second.
* 单词统计UTF8编码,每一秒从网络中接收到的分隔的文本
* Usage: NetworkWordCount <hostname> <port>
* <hostname> and <port> describe the TCP server that Spark Streaming would connect to receive data.
* 描述:Spark流将接收数据的TCP服务器
*
* To run this on your local machine, you need to first run a Netcat server
* 运行在本地机器上,你需要先运行一个netcat服务器
* `$ nc -lk 9999`
* and then run the example
* `$ bin/run-example org.apache.spark.examples.streaming.NetworkWordCount localhost 9999`
*/
object NetworkWordCount {
def main(args: Array[String]) {
/** if (args.length < 2) {
System.err.println("Usage: NetworkWordCount <hostname> <port>")
System.exit(1)
}
**/
//StreamingExamples.setStreamingLogLevels()
// Create the context with a 1 second batch size
//创建上下文一个1秒批量大小
val sparkConf = new SparkConf().setMaster("local").setAppName("NetworkWordCount")
val ssc = new StreamingContext(sparkConf, Seconds(1))
// Create a socket stream on target ip:port and count the
// words in input stream of \n delimited text (eg. generated by 'nc')
//在目标IP上创建一个套接字流:端口,并在“n”分隔的文本的输入流中计数单词
// Note that no duplication in storage level only for running locally.
//请注意,只有在本地运行的存储级别没有重复
// Replication necessary in distributed scenario for fault tolerance.
//分布式容错中必要的复制
val lines = ssc.socketTextStream("192.168.0.39", 8088, StorageLevel.MEMORY_AND_DISK_SER)
val words = lines.flatMap(_.split(" "))//以空格分隔
//println(">>>>>>>>>>>>>>."+words)
val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)//单词计数
wordCounts.print()
ssc.start()
ssc.awaitTermination()
}
}
// scalastyle:on println | tophua/spark1.52 | examples/src/main/scala/org/apache/spark/examples/NetworkWordCount.scala | Scala | apache-2.0 | 3,033 |
/*
* Copyright 2015 Foundational Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pro.foundev.benchmarks.spark_throughput
import pro.foundev.benchmarks.spark_throughput.launchers.MaxBenchmarkLauncher
class MaxBenchmarkLauncherSpec extends BenchmarkSupport {
override def beforeEach {
super.beforeEach()
benchmarkLauncher = new MaxBenchmarkLauncher(sc, tableSuffix)
benchmarkLauncher.timer = timer
}
"A MaxBenchmarkLauncher" should "get a max value" in {
benchmarkLauncher.all()(0).value should be (10L)
}
it should "have the name of max" in {
benchmarkLauncher.all()(0).name should be ("max")
}
it should "time the result" in {
timer.setDuration(2000)
benchmarkLauncher.all()(0).milliSeconds should be (0.002)
}
"A MaxBenchmarkLauncher" should "get a sqlMax value" in {
benchmarkLauncher.sqlAll()(0).value should be (10L)
}
it should "have the name of sqlMax" in {
benchmarkLauncher.sqlAll()(0).name should be ("sqlMax")
}
it should "time the result of sqlMax" in {
timer.setDuration(2000)
benchmarkLauncher.sqlAll()(0).milliSeconds should be (0.002)
}
}
| rssvihla/datastax_work | spark_commons/benchmarks/spark_throughput/src/test/scala/pro/foundev/benchmarks/spark_throughput/MaxBenchmarkLauncherSpec.scala | Scala | apache-2.0 | 1,668 |
package scala.collection
import org.junit.Assert._
import org.junit.Test
import scala.util.Random
/* Test for scala/bug#7614 */
class TraversableOnceTest {
val list = List.fill(1000)(Random.nextInt(10000) - 5000)
@deprecated("Tests deprecated API", since="2.13")
@Test
def copyToBuffer(): Unit = {
val b1 = mutable.ArrayBuffer.empty[Int]
list.copyToBuffer(b1)
val b2 = mutable.ArrayBuffer.empty[Int]
b2 ++= list
assertEquals(b1, b2)
}
}
| lrytz/scala | test/junit/scala/collection/TraversableOnceTest.scala | Scala | apache-2.0 | 471 |
package edu.stanford.graphics.shapenet.common
import edu.stanford.graphics.shapenet.Constants
import edu.stanford.graphics.shapenet.util.{CSVFile, IOUtils}
import org.json.simple.JSONValue
import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* Simple category taxonomy with mapping to WordNet.
* @author Angel Chang
*/
class CategoryTaxonomy() {
var categoryMap: Map[String, CategoryInfo] = null
var nameToCategoriesMap: Map[String, Set[CategoryInfo]] = null
var synsetToCategoriesMap: Map[String, Set[CategoryInfo]] = null
def init(filename: String, format: String): Unit = {
if (format == "json") {
categoryMap = initFromJson(filename)
} else if (format == "csv") {
categoryMap = initFromCsv(filename)
} else {
throw new IllegalArgumentException("Unsupported format")
}
synsetToCategoriesMap = categoryMap.values.filter(x => x.synsetId != null).groupBy(x => x.synsetId ).mapValues( y => y.toSet )
nameToCategoriesMap = categoryMap.values.map( x => x.names.map( n => (n,x))).flatten.groupBy( x => x._1 ).mapValues( y => y.map( z => z._2 ).toSet )
}
def initFromJson(jsonFile: String): Map[String, CategoryInfo] = {
val json = JSONValue.parse(IOUtils.fileReader(jsonFile))
json match {
case a:java.util.List[_] => {
val categories = convertCategories(a)
val map = categories.map( c => c.name -> c ).toMap
for (cat <- categories) {
for (child <- cat.children) {
val ccat = map(child)
ccat.parent = cat.name
}
}
map
}
case _ => throw new IllegalArgumentException("Invalid taxonomy " + jsonFile)
}
}
private def convertCategories(a: java.util.List[_]): IndexedSeq[CategoryInfo] = {
val cats = a.map( c => {
val m = c.asInstanceOf[java.util.Map[String,_]]
val synsetId = m("synsetId").asInstanceOf[java.lang.String]
val names = m("name").asInstanceOf[java.lang.String].split(",")
val children = m("children").asInstanceOf[java.util.List[_]].map ( x => x.asInstanceOf[java.lang.String] )
val cat = new CategoryInfo(synsetId)
cat.children ++= children
cat.names ++= names
cat.synsetId = synsetId
cat
})
cats.toIndexedSeq
}
def initFromCsv(categoriesFile: String = Constants.CATEGORIES_FILE,
materialsFile: String = Constants.CATEGORY_MATERIALS_FILE,
isContainerFile: String = Constants.CATEGORY_ISCONTAINER_FILE): Map[String, CategoryInfo] = {
val map = new mutable.HashMap[String, CategoryInfo]()
readCategories(map, categoriesFile)
readMaterialsFile(map, materialsFile)
readIsContainerFile(map, isContainerFile)
map.toMap
}
def isSubCategory(subCat: String, cat: String): Boolean = {
getSubCategoryLevel(subCat, cat) >= 0
}
def isSimilarCategory(subCat: String, cat: String): Boolean = {
isSubCategory(subCat, cat) || isSubCategory(cat, subCat)
}
def getContainerCategories(): Set[String] = {
categoryMap.values.filter(x => x.isContainer).map(x => x.name).toSet
}
def getLeafCategories(): Set[String] = {
categoryMap.values.filter(x => !x.hasChildren).map(x => x.name).toSet
}
/** Read in simple 2 level category hierarchy
* @param categoriesFile with category hierarchy definitions: "category subcategory0 subcategory1"
*/
@deprecated
private def readSubcategories(map: mutable.HashMap[String, CategoryInfo], categoriesFile: String) {
val lines = IOUtils.fileSource(categoriesFile).getLines()
for (s <- lines) {
if (!s.startsWith("#")) {
val fields: Array[String] = s.split("\\\\s+", 2)
val category = fields(0)
val subcategories: Set[String] =
if (fields.length > 1) {
fields(1).split("\\\\s*,\\\\s*").toSet
} else {
Set()
}
val existing = map.getOrElseUpdate(category, new CategoryInfo(category))
existing.children ++= subcategories
for (sc <- subcategories) {
map.getOrElseUpdate(sc, new CategoryInfo(sc, category)).parent = category
}
}
}
}
private def readCategories(map: mutable.HashMap[String, CategoryInfo], categoriesFile: String): Unit = {
//wnsynsetid,children,symmetries,attachmentSide,hasFront,viewingSide,isContainer,isAnimate,
// isManmade,foundIn,roughShape,roughSize,inShapeNetCore,exampleModel,text
val csvfile = new CSVFile(categoriesFile, includesHeader = true)
val iCategory: Int = csvfile.index("category")
val iChildren: Int = csvfile.index("children")
val iSynsetId: Int = csvfile.index("wnsynsetid")
val knownColumns = Set(iCategory, iChildren, iSynsetId)
val header = csvfile.getHeader()
for (row <- csvfile) {
val category: String = row(iCategory).trim
val children: String = row(iChildren).trim
val synsetId: String = row(iSynsetId).trim
val subcategories: Set[String] =
if (children.length > 0) {
children.split("\\\\s*,\\\\s*").toSet
} else {
Set()
}
val existing = map.getOrElseUpdate(category, new CategoryInfo(category))
existing.children ++= subcategories
existing.synsetId = synsetId
for (sc <- subcategories) {
map.getOrElseUpdate(sc, new CategoryInfo(sc, category)).parent = category
}
// Go over other fields
for ((fieldvalue, i) <- row.zipWithIndex) {
val fv = fieldvalue.trim()
if (!knownColumns.contains(i) && fv.length > 0) {
val fieldname = header(i)
existing.addAttribute(fieldname, fv)
}
}
}
}
private def readMaterialsFile(map: mutable.HashMap[String, CategoryInfo], filename: String): Unit = {
val csvfile = new CSVFile(filename, includesHeader = true)
val iCategory: Int = csvfile.index("Category")
val iMaterial: Int = csvfile.index("Material")
val iRatio: Int = csvfile.index("Ratio")
for (row <- csvfile) {
val category: String = row(iCategory).trim
val material: String = row(iMaterial).trim
val ratio: Double = row(iRatio).trim.toDouble
val catInfo = map.getOrElseUpdate(category, new CategoryInfo(category))
if (catInfo.materials == null) {
catInfo.materials = Map(material -> ratio)
} else {
catInfo.materials += (material -> ratio)
}
}
}
private def readIsContainerFile(map: mutable.HashMap[String, CategoryInfo], filename: String): Unit = {
val csvfile = new CSVFile(filename, includesHeader = true)
val iCategory: Int = csvfile.index("category")
val iIsContainer: Int = csvfile.index("isContainer")
for (row <- csvfile) {
val category: String = row(iCategory).trim
val isContainerStr: String = row(iIsContainer).trim
val isContainer: Boolean = isContainerStr == "1"
val catInfo = map.getOrElseUpdate(category, new CategoryInfo(category))
catInfo.isContainer = isContainer
}
}
def getCategoryInfo(category: String): CategoryInfo = {
categoryMap.getOrElse(category, null)
}
def getCategoriesBySynsetId(synset: String): Set[CategoryInfo] = {
synsetToCategoriesMap.getOrElse(synset, Set())
}
def getCategoriesByName(name: String): Set[CategoryInfo] = {
nameToCategoriesMap.getOrElse(name, Set())
}
def getSynsetId(category: String): String = {
val cat = categoryMap.getOrElse(category, null)
if (cat != null) cat.synsetId else null
}
def getParent(category: String): String = {
val cat = categoryMap.getOrElse(category, null)
if (cat != null) cat.parent else null
}
def getSubCategoryLevel(subCat: String, cat: String): Int = {
var level = 0
var parent = subCat
while (parent != null) {
if (parent == cat) return level
level += 1
parent = getParent(parent)
}
-1
}
def getAncestors(category: String): Seq[String] = {
val ancestors = ArrayBuffer[String]()
var parent = category
while (parent != null) {
ancestors.append(parent)
parent = getParent(parent)
}
ancestors.toSeq
}
// Returns list of categories including ancestors, from finest to coarsest
def getCategoriesWithAncestors(categories: Seq[String]): Seq[String] = {
val ancestorsWithDepth: Seq[Seq[(String, Int)]] = categories.map(
category => {
val anc = getAncestors(category)
val s = anc.size
anc.zipWithIndex.map(x => (x._1, s - x._2))
}
)
ancestorsWithDepth.flatten.sortBy(x => -x._2).map(x => x._1).distinct
}
}
/**
* Node in category hierarchy with information about a category.
* @param name of category
* @param parent of category
*/
class CategoryInfo(val name: String, var parent: String = null) extends HasAttributes {
val children: mutable.Set[String] = mutable.Set[String]()
var names: mutable.Set[String] = mutable.Set[String]()
var synsetId: String = null
var materials: Map[String, Double] = null // Priors on what an object of this category is made of
var isContainer: Boolean = false // is this category typically a container?
var attributes: IndexedSeq[(String,String)] = IndexedSeq()
def hasParent = parent != null
def hasChildren = !children.isEmpty
def hasNoOne = parent == null && children.isEmpty
override def toString = {
name + ", parent: " + parent + ", children: " + children + ", synset: " + synsetId
}
} | ShapeNet/shapenet-viewer | src/main/scala/edu/stanford/graphics/shapenet/common/CategoryTaxonomy.scala | Scala | mit | 9,483 |
import org.apache.http.client.methods.HttpUriRequest
import org.apache.http.client.{ResponseHandler, HttpClient}
import org.apache.http.protocol.HttpContext
import org.apache.http.{HttpRequest, HttpHost}
import org.specs2.mutable.Specification
object HttpSpec extends Specification {
import dispatch.classic._
import org.apache.http.protocol.HTTP.CONTENT_ENCODING
val jane = "It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife.\\n"
"Singleton Http test get" should {
val req = new Request("http://technically.us/test.text")
"not throw exception if credentials are specified without explicit host" in {
Http (req as ("user", "pass") as_str) must_== (jane)
}
get_specs(req)
}
"Bound host get" should {
val req = :/("technically.us") / "test.text"
"not throw exception if credentials are specified with explicit host" in {
Http (req as ("user", "pass") as_str) must_== (jane)
}
get_specs(req)
}
"Combined request get" should {
get_specs(:/("technically.us") <& /("test.text"))
}
"Backwards combined request get" should {
get_specs(/("test.text") <& :/("technically.us"))
}
"Http" should {
class SimpleDelegatingHttpClient(realClient: HttpClient) extends HttpClient {
def getParams = realClient.getParams
def getConnectionManager = realClient.getConnectionManager
def execute(request: HttpUriRequest) = realClient.execute(request)
def execute(request: HttpUriRequest, context: HttpContext) = realClient.execute(request, context)
def execute(target: HttpHost, request: HttpRequest) = realClient.execute(target, request)
def execute(target: HttpHost, request: HttpRequest, context: HttpContext) = realClient.execute(target, request, context)
def execute[T](request: HttpUriRequest, responseHandler: ResponseHandler[_ <: T]) = realClient.execute(request, responseHandler)
def execute[T](request: HttpUriRequest, responseHandler: ResponseHandler[_ <: T], context: HttpContext) = realClient.execute(request, responseHandler, context)
def execute[T](target: HttpHost, request: HttpRequest, responseHandler: ResponseHandler[_ <: T]) = realClient.execute(target, request, responseHandler)
def execute[T](target: HttpHost, request: HttpRequest, responseHandler: ResponseHandler[_ <: T], context: HttpContext) = realClient.execute(target, request, responseHandler, context)
}
"allow override" in {
val http = new Http with thread.Safety {
override def make_client: HttpClient = new SimpleDelegatingHttpClient(super.make_client)
}
http must not beNull; // i.e. this code should compile
http.shutdown()
success
}
}
val http = new Http
val httpfuture = new thread.Http
def get_specs(test: Request) = {
// start some connections as futures
val stream = httpfuture(test >> { stm =>
// the nested scenario here contrived fails with actors.Futures
httpfuture((test >> { stm =>
scala.io.Source.fromInputStream(stm).mkString
}) ~> { string =>
string // identity function
})
})
val string = httpfuture(test as_str)
val bytes = httpfuture(test >>> new java.io.ByteArrayOutputStream)
// test a few other things
"throw status code exception when applied to non-existent resource" in {
http (test / "do_not_want" as_str) must throwA[StatusCode]
}
"allow any status code with x" in {
(http x (test / "do_not_want" as_str) {
case (404, _, _, out) => out()
case _ => "success is failure"
}) must contain ("404 Not Found")
}
"serve a gzip header" in {
http(test.gzip >:> { _(CONTENT_ENCODING) }) must_== (Set("gzip"))
}
// check back on the futures
"equal expected string" in {
string() must_== jane
}
"stream to expected sting" in {
stream()() must_== jane
}
"write to expected sting bytes" in {
bytes().toByteArray.toList must_== jane.getBytes.toList
}
"equal expected string with gzip encoding, using future" in {
httpfuture(test.gzip >+ { r => (r as_str, r >:> { _(CONTENT_ENCODING) }) } )() must_== (jane, Set("gzip"))
}
val h = new Http// single threaded Http instance
"equal expected string with a gzip defaulter" in {
val my_defaults = /\\.gzip
h(my_defaults <& test >+ { r => (r as_str, r >:> { _(CONTENT_ENCODING) }) } ) must_== (jane, Set("gzip"))
}
"process html page" in {
import XhtmlParsing._
h(url("http://technically.us/") </> { xml =>
(xml \\\\ "title").text
}) must_== "technically.us"
}
"process xml response" in {
h(url("http://technically.us/test.xml") <> { xml =>
(xml \\ "quote").text.trim
}) must_== jane.trim
}
"equal expected string without gzip encoding, with handler chaining" in {
h(test >+> { r => r >:> { headers =>
r >- { (_, headers(CONTENT_ENCODING)) }
} }) must_== (jane, Set())
}
"equal expected string with gzip encoding, with >:+" in {
h(test.gzip >:+ { (headers, r) =>
r >- { (_, headers(CONTENT_ENCODING.toLowerCase)) }
}) must_== (jane, Seq("gzip"))
}
}
"Path building responses" should {
// using singleton Http, will need to shut down after all tests
val test2 = "and they were both ever sensible of the warmest gratitude\\n"
"work with chaining" in {
Http( :/("technically.us") / "test" / "test.text" as_str ) must_== test2
}
"work with factories" in {
Http( :/("technically.us") <& /("test") <& /("test.text") as_str ) must_== test2
}
}
}
| dispatch/dispatch | http/src/test/scala/HttpSpec.scala | Scala | lgpl-2.1 | 5,704 |
package spark.scheduler
import spark.scheduler.cluster.TaskInfo
import scala.collection.mutable.Map
import spark._
import spark.executor.TaskMetrics
/**
* Types of events that can be handled by the DAGScheduler. The DAGScheduler uses an event queue
* architecture where any thread can post an event (e.g. a task finishing or a new job being
* submitted) but there is a single "logic" thread that reads these events and takes decisions.
* This greatly simplifies synchronization.
*/
private[spark] sealed trait DAGSchedulerEvent
private[spark] case class JobSubmitted(
finalRDD: RDD[_],
func: (TaskContext, Iterator[_]) => _,
partitions: Array[Int],
allowLocal: Boolean,
callSite: String,
listener: JobListener)
extends DAGSchedulerEvent
private[spark] case class CompletionEvent(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Map[Long, Any],
taskInfo: TaskInfo,
taskMetrics: TaskMetrics)
extends DAGSchedulerEvent
private[spark] case class ExecutorLost(execId: String) extends DAGSchedulerEvent
private[spark] case class TaskSetFailed(taskSet: TaskSet, reason: String) extends DAGSchedulerEvent
private[spark] case object StopDAGScheduler extends DAGSchedulerEvent
| koeninger/spark | core/src/main/scala/spark/scheduler/DAGSchedulerEvent.scala | Scala | bsd-3-clause | 1,249 |
/*
* Copyright © 2017 Full 360 Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package com.full360.prometheus.http.akka
import com.full360.prometheus.Prometheus
import akka.http.scaladsl.server.Directives.{ complete, get, path }
import akka.http.scaladsl.server.{ PathMatchers, Route }
object AkkaHttpMetricService {
private[akka] def metricsBase(path: String) = PathMatchers.separateOnSlashes(path)
}
trait AkkaHttpMetricConfig {
def metricsBasePath: String = "metrics"
}
trait AkkaHttpMetricService extends AkkaHttpMetricConfig {
import AkkaHttpMetricService._
def route: Route = {
val base = metricsBase(metricsBasePath)
path(base) {
get {
complete(Prometheus.getRegistry)
}
}
}
}
| full360/prometheus_client_scala | client-akka/src/main/scala/com/full360/prometheus/http/akka/AkkaHttpMetricService.scala | Scala | mit | 1,749 |
package mechanics
import Node._
/**
* The Node class represents the nodes used by the character pathfinding included
* in the Grid class. One node approximately represents the middle of a grid cell.
* The algoritm used is a variation of the A* pathfinding algorithm.
* Here, efficiency is more important than mathematical perfection so the
* pathfinding algorithm used is not ideal.
*/
class Node(val x: Int, val y: Int, var parent: Option[Node], start: (Int, Int), target: (Int, Int)) {
var g = 0 //Distance across nodes to the start coordinates.
var h = 0 //Approximated straight distance to the target.
updateG()
updateH()
def f = g + h //The sum of g and h.
/**
* Updates the g value recursively.
*/
def updateG() = {
if (parent.isDefined) {
if (parent.get.x == this.x || parent.get.y == this.y) { //Horizontal or vertical.
g = parent.get.g + horizontalD
} else { //Diagonal.
g = parent.get.g + diagonalD
}
} else {
g = 0 //This is the start node.
}
}
/**
* Updates the h value. This is only a very crude approximation of the distance to
* the target, but since the maps aren't so large, the efficiency this comes with
* is more important than always finding the very fastest path.
*/
def updateH() = {
(Math.abs(target._1 - this.x) + Math.abs(target._2 - this.y)) * horizontalD
}
/**
* Updates all node values.
*/
def updateValues(): Unit = {
updateG()
updateH()
}
/**
* Returns this node's grid coordinates in the form of a tuple: (x, y).
*/
def toCoords = (this.x, this.y)
}
/**
* A companion object to the Node class.
* Contains some constans for the pathfinding algorithm.
*/
object Node {
val horizontalD = 10 //An approximation of the relative distance between two adjacent nodes.
val diagonalD = 14 //An approximation of the relative distance between two diagonal nodes.
}
| Berthur/AgeOfLords | src/mechanics/Node.scala | Scala | gpl-3.0 | 2,060 |
package io.gatling.keycloak
import akka.actor.ActorDSL._
import akka.actor.ActorRef
import io.gatling.core.action.Interruptable
import io.gatling.core.action.builder.ActionBuilder
import io.gatling.core.config.Protocols
import io.gatling.core.result.writer.DataWriterClient
import io.gatling.core.session.{Session, Expression}
import io.gatling.core.validation.Validation
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
class RefreshTokenActionBuilder(requestName: Expression[String]) extends ActionBuilder{
override def build(next: ActorRef, protocols: Protocols): ActorRef = {
actor(actorName("refresh-token"))(new RefreshTokenAction(requestName, next))
}
}
class RefreshTokenAction(
requestName: Expression[String],
val next: ActorRef
) extends Interruptable with ExitOnFailure with DataWriterClient {
override def executeOrFail(session: Session): Validation[_] = {
val requestAuth: MockRequestAuthenticator = session(MockRequestAuthenticator.KEY).as[MockRequestAuthenticator]
Blocking(() =>
Stopwatch(() => requestAuth.getKeycloakSecurityContext.refreshExpiredToken(false))
.check(identity, _ => "Could not refresh token")
.recordAndContinue(this, session, requestName(session).get)
)
}
}
| rvansa/keycloak-benchmark | src/main/scala/io/gatling/keycloak/RefreshToken.scala | Scala | apache-2.0 | 1,329 |
package com.normation.rudder.domain.eventlog
import scala.xml.Elem
import com.normation.eventlog.EventLog
import com.normation.eventlog.EventLogDetails
import com.normation.eventlog.EventLogFilter
import com.normation.eventlog.EventLogType
import com.normation.rudder.domain.Constants
import com.normation.utils.HashcodeCaching
/**
* Update the policy server
*/
sealed trait PolicyServerEventLog extends EventLog
final case class UpdatePolicyServer(
override val eventDetails : EventLogDetails
) extends PolicyServerEventLog with HashcodeCaching {
override val cause = None
override val eventType = UpdatePolicyServer.eventType
override val eventLogCategory = PolicyServerLogCategory
}
final case class AuthorizedNetworkModification(
oldNetworks: Seq[String]
, newNetworks: Seq[String]
)
object UpdatePolicyServer extends EventLogFilter {
override val eventType = UpdatePolicyServerEventType
override def apply(x : (EventLogType, EventLogDetails)) : UpdatePolicyServer = UpdatePolicyServer(x._2)
def buildDetails(modification: AuthorizedNetworkModification) : Elem = {
EventLog.withContent {
<changeAuthorizedNetworks fileFormat={Constants.XML_CURRENT_FILE_FORMAT.toString}>
<oldAuthorizedNetworks>{
modification.oldNetworks.map { net => <net>{net}</net>}
}</oldAuthorizedNetworks>
<newAuthorizedNetworks>{
modification.newNetworks.map { net => <net>{net}</net>}
}</newAuthorizedNetworks>
</changeAuthorizedNetworks>
}
}
}
object PolicyServerEventLogsFilter {
final val eventList : List[EventLogFilter] = List(
UpdatePolicyServer
)
}
| armeniaca/rudder | rudder-core/src/main/scala/com/normation/rudder/domain/eventlog/PolicyServerEventLog.scala | Scala | gpl-3.0 | 1,655 |
package org.scalafmt.config
import metaconfig._
import metaconfig.generic.Surface
/**
*
* @param openParenCallSite
* If true AND bin-packing is true, then call-site
* arguments won't be aligned by the opening
* parenthesis. For example, this output
* will be disallowed
*
* function(a,
* b,
* c)
* @param openParenDefnSite Same as [[openParenCallSite]], except definition site.
* @param tokens The tokens to vertically align by. The "owner" is the
* scala.meta.Tree.getClass.getName of the deepest tree node
* that "owns" the token to align by.
*
* Examples:
*
* align.tokens = ["="] // align = owned by any tree node (not recommended)
*
* align.tokens = [
* { code = "=", owner = "Param" } // align = when owned by parameter tree nodes
* ]
*
* Pro tip. if you use for example
*
* style = defaultWithAlign
*
* and want to add one extra token (for example "|>") to align by, write
*
* align.tokens.add = [ "|> ]
*
* NOTE. Adding more alignment tokens may potentially decrease the
* vertical alignment in formatted output. Customize at your own
* risk, I recommend you try and stick to the default settings.
* @param arrowEnumeratorGenerator If true, aligns by <- in for comprehensions.
* @param ifWhileOpenParen
* If true, aligns by ( in if/while/for. If false,
* indents by continuation indent at call site.
* @param tokenCategory
* Customize which token kinds can align together. By default, only tokens with
* the same `Token.productPrefix` align. To for example align = and <-,
* set the values to:
* Map("Equals" -> "Assign", "LeftArrow" -> "Assign")
* Note. Requires mixedTokens to be true.
* @param treeCategory
* Customize which tree kinds can align together. By default, only trees with
* the same `Tree.productPrefix` align. To for example align Defn.Val and
* Defn.Var, set the values to:
* Map("Defn.Var" -> "Assign", "Defn.Val" -> "Assign")
* Note. Requires mixedOwners to be true.
*/
case class Align(
openParenCallSite: Boolean = false,
openParenDefnSite: Boolean = false,
tokens: Set[AlignToken] = Set(AlignToken.caseArrow),
arrowEnumeratorGenerator: Boolean = false,
ifWhileOpenParen: Boolean = true,
tokenCategory: Map[String, String] = Map(),
treeCategory: Map[String, String] = Map(
"Defn.Val" -> "val/var/def",
"Defn.Var" -> "val/var/def",
"Defn.Def" -> "val/var/def",
"Defn.Class" -> "class/object/trait",
"Defn.Object" -> "class/object/trait",
"Defn.Trait" -> "class/object/trait",
"Enumerator.Generator" -> "for",
"Enumerator.Val" -> "for"
)
) {
implicit val reader: ConfDecoder[Align] = generic.deriveDecoder(this).noTypos
implicit val alignReader: ConfDecoder[Set[AlignToken]] =
ScalafmtConfig.alignTokenReader(tokens)
}
object Align {
// no vertical alignment whatsoever, if you find any vertical alignment with
// this settings, please report an issue.
val none: Align = Align(
openParenCallSite = false,
openParenDefnSite = false,
tokens = Set.empty,
ifWhileOpenParen = false,
tokenCategory = Map.empty,
treeCategory = Map.empty
)
// stable set of alignment operators, the previous defaultWithAlign.
val some = Align()
val default = some
val more: Align = some.copy(tokens = AlignToken.default)
implicit lazy val surface: Surface[Align] = generic.deriveSurface[Align]
implicit lazy val encoder: ConfEncoder[Align] = generic.deriveEncoder
// TODO: metaconfig should handle iterables
implicit def encoderSet[T: ConfEncoder]: ConfEncoder[Set[T]] =
implicitly[ConfEncoder[Seq[T]]].contramap(_.toSeq)
implicit val mapEncoder: ConfEncoder[Map[String, String]] =
ConfEncoder.instance[Map[String, String]] { m =>
Conf.Obj(m.iterator.map {
case (k, v) => k -> Conf.fromString(v)
}.toList)
}
// only for the truest vertical aligners, this setting is open for changes,
// please open PR addding more stuff to it if you like.
val most: Align = more.copy(
arrowEnumeratorGenerator = true,
tokenCategory = Map(
"Equals" -> "Assign",
"LeftArrow" -> "Assign"
)
)
val allValues = List(default, none, some, most)
object Builtin {
def unapply(conf: Conf): Option[Align] = Option(conf).collect {
case Conf.Str("none") | Conf.Bool(false) => Align.none
case Conf.Str("some" | "default") => Align.some
case Conf.Str("more") | Conf.Bool(true) => Align.more
case Conf.Str("most") => Align.most
}
}
}
| olafurpg/scalafmt | scalafmt-core/shared/src/main/scala/org/scalafmt/config/Align.scala | Scala | apache-2.0 | 4,808 |
package com.hideto0710.scalack.builders
import com.hideto0710.scalack.Scalack
import com.hideto0710.scalack.Scalack.Auth
import com.typesafe.config.ConfigFactory
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.Await
import scala.concurrent.duration.Duration
class ListChannelsSpec extends FlatSpec() with Matchers {
val conf = ConfigFactory.load()
implicit val auth = Auth(conf.getString("develop.token"))
"An SlackApiClient" should "be able to get excludeArchived" in {
val s = Scalack.listChannels.excludeArchived(1)
s._excludeArchived.getOrElse(0) should be (1)
}
it should "be not able to get channel list because of toke" in {
val f = Scalack.listChannels.excludeArchived(0).execute(Auth("v"))
val result = Await.result(f, Duration.Inf)
result.ok should be (right = false)
result.error.get should be ("invalid_auth")
}
}
| hideto0710/scalack | src/test/scala/com/hideto0710/scalack/builders/ListChannelsSpec.scala | Scala | mit | 886 |
package scalapb.docs
object CommonProtos {
def row(libName: String, version: String): String = {
s"""### $libName
|ScalaPB 0.10.x:
|```scala
|libraryDependencies ++= Seq(
| "com.thesamet.scalapb.common-protos" %% "${libName}-scalapb_0.10" % "${version}" % "protobuf",
| "com.thesamet.scalapb.common-protos" %% "${libName}-scalapb_0.10" % "${version}"
|)
|```
|ScalaPB 0.9.x:
|```scala
|libraryDependencies ++= Seq(
| "com.thesamet.scalapb.common-protos" %% "${libName}-scalapb_0.9" % "${version}" % "protobuf",
| "com.thesamet.scalapb.common-protos" %% "${libName}-scalapb_0.9" % "${version}"
|)
|```
|""".stripMargin
}
def header: String = ""
def footer: String = ""
def table: String = {
Seq(
("proto-google-common-protos", "1.17.0-0"),
("proto-google-cloud-pubsub-v1", "1.85.1-0")
).map((row _).tupled).mkString(header, "", "footer")
}
def printTable(): Unit = {
println(table)
}
}
| trueaccord/ScalaPB | docs/src/main/scala/scalapb/docs/CommonProtos.scala | Scala | apache-2.0 | 1,057 |
/* scala-stm - (c) 2009-2011, Stanford University, PPL */
package stmbench7.scalastm
import scala.concurrent.stm._
import stmbench7.core._
class ModuleImpl(id: Int, typ: String, buildDate: Int, man: Manual
) extends DesignObjImpl(id, typ, buildDate) with Module {
val designRoot = Ref(null : ComplexAssembly).single
man.setModule(this)
def setDesignRoot(v: ComplexAssembly ) { designRoot() = v }
def getDesignRoot = designRoot()
def getManual = man
}
| nbronson/scala-stm | disabled/stmbench7/scalastm/ModuleImpl.scala | Scala | bsd-3-clause | 475 |
package scorex.network
import java.net.{InetAddress, InetSocketAddress, NetworkInterface, URI}
import akka.actor._
import akka.io.Tcp._
import akka.io.{IO, Tcp}
import akka.pattern.ask
import akka.util.Timeout
import scorex.app.Application
import scorex.network.message.{Message, MessageSpec}
import scorex.network.peer.PeerManager
import scorex.utils.ScorexLogging
import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.util.{Failure, Random, Success, Try}
/**
* Control all network interaction
* must be singleton
*/
class NetworkController(application: Application) extends Actor with ScorexLogging {
import NetworkController._
private implicit val system = context.system
private implicit val timeout = Timeout(5.seconds)
private lazy val settings = application.settings
private lazy val peerManager = application.peerManager
private val messageHandlers = mutable.Map[Seq[Message.MessageCode], ActorRef]()
//check own declared address for validity
if (!settings.localOnly) {
settings.declaredAddress.map { myAddress =>
Try {
val uri = new URI("http://" + myAddress)
val myHost = uri.getHost
val myAddrs = InetAddress.getAllByName(myHost)
NetworkInterface.getNetworkInterfaces.exists { intf =>
intf.getInterfaceAddresses.exists { intfAddr =>
val extAddr = intfAddr.getAddress
myAddrs.contains(extAddr)
}
} match {
case true => true
case false =>
if (settings.upnpEnabled) {
val extAddr = application.upnp.externalAddress
myAddrs.contains(extAddr)
} else false
}
}.recover { case t: Throwable =>
log.error("Declared address validation failed: ", t)
false
}.getOrElse(false)
}.getOrElse(true).ensuring(_ == true, "Declared address isn't valid")
}
lazy val localAddress = new InetSocketAddress(InetAddress.getByName(settings.bindAddress), settings.port)
lazy val externalSocketAddress = settings.declaredAddress
.flatMap(s => Try(InetAddress.getByName(s)).toOption)
.orElse {
if (settings.upnpEnabled) application.upnp.externalAddress else None
}.map(ia => new InetSocketAddress(ia, application.settings.port))
//an address to send to peers
lazy val ownSocketAddress = externalSocketAddress
log.info(s"Declared address: $ownSocketAddress")
private lazy val handshakeTemplate = Handshake(application.applicationName,
application.appVersion,
settings.nodeName,
application.settings.nodeNonce,
ownSocketAddress,
0
)
lazy val connTimeout = Some(new FiniteDuration(settings.connectionTimeout, SECONDS))
//bind to listen incoming connections
IO(Tcp) ! Bind(self, localAddress)
private def bindingLogic: Receive = {
case b@Bound(localAddr) =>
log.info("Successfully bound to the port " + settings.port)
context.system.scheduler.schedule(600.millis, 5.seconds)(peerManager ! PeerManager.CheckPeers)
case CommandFailed(_: Bind) =>
log.error("Network port " + settings.port + " already in use!")
context stop self
application.stopAll()
}
def businessLogic: Receive = {
//a message coming in from another peer
case Message(spec, Left(msgBytes), Some(remote)) =>
val msgId = spec.messageCode
spec.deserializeData(msgBytes) match {
case Success(content) =>
messageHandlers.find(_._1.contains(msgId)).map(_._2) match {
case Some(handler) =>
handler ! DataFromPeer(msgId, content, remote)
case None =>
log.error("No handlers found for message: " + msgId)
//todo: ban a peer
}
case Failure(e) =>
log.error("Failed to deserialize data: " + e.getMessage)
//todo: ban peer
}
case SendToNetwork(message, sendingStrategy) =>
val delay = if (settings.fuzzingDelay > 0) Random.nextInt(settings.fuzzingDelay) else 0
system.scheduler.scheduleOnce(delay.millis) {
(peerManager ? PeerManager.FilterPeers(sendingStrategy))
.map(_.asInstanceOf[Seq[ConnectedPeer]])
.foreach(_.foreach(_.handlerRef ! message))
}
}
def peerLogic: Receive = {
case ConnectTo(remote) =>
log.info(s"Connecting to: $remote")
IO(Tcp) ! Connect(remote, localAddress = None, timeout = connTimeout, pullMode = true)
case c@Connected(remote, local) =>
val connection = sender()
val handler = context.actorOf(Props(classOf[PeerConnectionHandler], application, connection, remote))
connection ! Register(handler, keepOpenOnPeerClosed = false, useResumeWriting = true)
val newPeer = new ConnectedPeer(remote, handler)
newPeer.handlerRef ! handshakeTemplate.copy(time = System.currentTimeMillis() / 1000)
peerManager ! PeerManager.Connected(newPeer)
case CommandFailed(c: Connect) =>
log.info("Failed to connect to : " + c.remoteAddress)
peerManager ! PeerManager.Disconnected(c.remoteAddress)
}
//calls from API / application
def interfaceCalls: Receive = {
case ShutdownNetwork =>
log.info("Going to shutdown all connections & unbind port")
(peerManager ? PeerManager.FilterPeers(Broadcast))
.map(_.asInstanceOf[Seq[ConnectedPeer]])
.foreach(_.foreach(_.handlerRef ! PeerConnectionHandler.CloseConnection))
self ! Unbind
context stop self
}
override def receive: Receive = bindingLogic orElse businessLogic orElse peerLogic orElse interfaceCalls orElse {
case RegisterMessagesHandler(specs, handler) =>
messageHandlers += specs.map(_.messageCode) -> handler
case CommandFailed(cmd: Tcp.Command) =>
log.info("Failed to execute command : " + cmd)
case nonsense: Any =>
log.warn(s"NetworkController: got something strange $nonsense")
}
}
object NetworkController {
case class RegisterMessagesHandler(specs: Seq[MessageSpec[_]], handler: ActorRef)
//todo: more stricter solution for messageType than number?
case class DataFromPeer[V](messageType: Message.MessageCode, data: V, source: ConnectedPeer)
case class SendToNetwork(message: Message[_], sendingStrategy: SendingStrategy)
case object ShutdownNetwork
case class ConnectTo(address: InetSocketAddress)
}
| ScorexProject/Scorex-Lagonaki | scorex-basics/src/main/scala/scorex/network/NetworkController.scala | Scala | cc0-1.0 | 6,458 |
/*
* OneTimeFiltering.scala
* One-time filtering algorithms.
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.filtering
import com.cra.figaro.algorithm._
import com.cra.figaro.language._
/**
* One-time filtering algorithms. An implementation of OneTimeFiltering must implement the advanceTime,
* computeCurrentDistribution, and computeCurrentExpectation methods.
*/
trait OneTimeFiltering extends Filtering with OneTime {
/**
* Returns the distribution over the element referred to by the reference at the current time point.
*/
def currentDistribution[T](reference: Reference[T]): Stream[(Double, T)] =
computeCurrentDistribution(reference)
/**
* Returns the expectation of the element referred to by the reference
* under the given function at the current time point.
*/
def currentExpectation[T](reference: Reference[T], function: T => Double): Double =
computeCurrentExpectation(reference, function)
/**
* Returns the probability that the element referred to by the reference
* satisfies the given predicate at the current time point.
*/
def currentProbability[T](reference: Reference[T], predicate: T => Boolean): Double =
computeCurrentProbability(reference, predicate)
}
| jyuhuan/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/filtering/OneTimeFiltering.scala | Scala | bsd-3-clause | 1,532 |
/*
* Copyright 2012 Arktekk AS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package no.arktekk.atom.extension.opensearch
import no.arktekk.atom.extension.opensearch.OpensearchConstants._
import no.arktekk.atom.extension.OptionSelectableElementWrapperAtomExtension
import com.codecommit.antixml._
import no.arktekk.atom._
/**
* @author Erlend Hamnaberg<erlend@hamnaberg.net>
*/
private[opensearch] class IntAtomExtension(name:String) extends OptionSelectableElementWrapperAtomExtension[FeedLike, Int] {
protected val selector: Selector[Elem] = NSRepr(ns) -> name
protected def function = (e) => (e \ text).headOption.map(_.toInt).get
def toChildren(a: Option[Int]) = {
a.map(x => ElementWrapper.withNameAndText(NamespaceBinding(prefix, ns), name, x.toString)).toIndexedSeq
}
}
| arktekk/scala-atom | src/main/scala/no/arktekk/atom/extension/opensearch/IntAtomExtension.scala | Scala | apache-2.0 | 1,312 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import java.lang.{Double => jDouble, Long => jLong}
import java.util.Date
import com.vividsolutions.jts.geom.Geometry
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.{GeoToolsDateFormat, SimpleFeatureTypes}
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TopKTest extends Specification {
val sft = SimpleFeatureTypes.createType("topk", "name:String,score:Long,height:Double,dtg:Date,*geom:Point:srid=4326")
val builder = new SimpleFeatureBuilder(sft)
val features1 = (0 until 100).map { i =>
if (i < 10) {
builder.addAll(Array[AnyRef]("name10", "10", "10.0", "2010-01-01T00:00:00.000Z", "POINT(10 0)"))
} else if (i < 15) {
builder.addAll(Array[AnyRef]("name15", "15", "15.0", "2015-01-01T00:00:00.000Z", "POINT(15 0)"))
} else if (i < 30) {
builder.addAll(Array[AnyRef]("name30", "30", "30.0", "2030-01-01T00:00:00.000Z", "POINT(30 0)"))
} else if (i < 50) {
builder.addAll(Array[AnyRef]("name50", "50", "50.0", "2050-01-01T00:00:00.000Z", "POINT(50 0)"))
} else {
builder.addAll(Array[AnyRef]("name100", "100", "100.0", "2100-01-01T00:00:00.000Z", "POINT(100 0)"))
}
builder.buildFeature(i.toString)
}
val features2 = (0 until 100).map { i =>
if (i < 10) {
builder.addAll(Array[AnyRef]("name10-2", "210", "10.2", "2010-01-01T02:00:00.000Z", "POINT(10 2)"))
} else if (i < 15) {
builder.addAll(Array[AnyRef]("name15-2", "215", "15.2", "2015-01-01T02:00:00.000Z", "POINT(15 2)"))
} else if (i < 30) {
builder.addAll(Array[AnyRef]("name30-2", "230", "30.2", "2030-01-01T02:00:00.000Z", "POINT(30 2)"))
} else if (i < 50) {
builder.addAll(Array[AnyRef]("name50-2", "250", "50.2", "2050-01-01T02:00:00.000Z", "POINT(50 2)"))
} else {
builder.addAll(Array[AnyRef]("name100-2", "2100", "100.2", "2100-01-01T02:00:00.000Z", "POINT(100 2)"))
}
builder.buildFeature(i.toString)
}
def createStat[T](attribute: String): TopK[T] = Stat(sft, s"TopK($attribute)").asInstanceOf[TopK[T]]
def stringStat = createStat[String]("name")
def longStat = createStat[jLong]("score")
def doubleStat = createStat[jDouble]("height")
def dateStat = createStat[Date]("dtg")
def geomStat = createStat[Geometry]("geom")
"TopK stat" should {
"work with strings" >> {
"be empty initially" >> {
val stat = stringStat
stat.isEmpty must beTrue
stat.topK(10) must beEmpty
}
"correctly calculate values" >> {
val stat = stringStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10).toSeq mustEqual Seq(("name100", 50), ("name50", 20), ("name30", 15), ("name10", 10), ("name15", 5))
}
"serialize and deserialize" >> {
val stat = stringStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].property mustEqual stat.property
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
"serialize and deserialize empty stats" >> {
val stat = stringStat
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].property mustEqual stat.property
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
"deserialize as immutable value" >> {
val stat = stringStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].property mustEqual stat.property
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features1.head) must throwAn[Exception]
unpacked.unobserve(features1.head) must throwAn[Exception]
}
"combine two TopKs" >> {
val stat = stringStat
val stat2 = stringStat
features1.foreach(stat.observe)
features2.foreach(stat2.observe)
stat2.size mustEqual 5
stat2.topK(10).toSeq mustEqual Seq(("name100-2", 50), ("name50-2", 20), ("name30-2", 15), ("name10-2", 10), ("name15-2", 5))
stat += stat2
stat.size mustEqual 10
stat.topK(10).toSeq mustEqual Seq(("name100", 50), ("name100-2", 50), ("name50", 20), ("name50-2", 20),
("name30", 15), ("name30-2", 15), ("name10", 10), ("name10-2", 10), ("name15", 5), ("name15-2", 5))
stat2.size mustEqual 5
stat2.topK(10).toSeq mustEqual Seq(("name100-2", 50), ("name50-2", 20), ("name30-2", 15), ("name10-2", 10), ("name15-2", 5))
}
"clear" >> {
val stat = stringStat
features1.foreach(stat.observe)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
stat.topK(10).toSeq must beEmpty
}
}
"work with longs" >> {
"correctly calculate values" >> {
val stat = longStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10).toSeq mustEqual Seq((100L, 50), (50L, 20), (30L, 15), (10L, 10), (15L, 5))
}
"serialize and deserialize" >> {
val stat = longStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].property mustEqual stat.property
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
}
"work with doubles" >> {
"correctly calculate values" >> {
val stat = doubleStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10).toSeq mustEqual Seq((100.0, 50), (50.0, 20), (30.0, 15), (10.0, 10), (15.0, 5))
}
"serialize and deserialize" >> {
val stat = doubleStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].property mustEqual stat.property
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
}
"work with dates" >> {
def toDate(year: Int) = java.util.Date.from(java.time.LocalDateTime.parse(f"2$year%03d-01-01T00:00:00.000Z", GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
"correctly calculate values" >> {
val stat = dateStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10).toSeq mustEqual Seq((toDate(100), 50), (toDate(50), 20), (toDate(30), 15), (toDate(10), 10), (toDate(15), 5))
}
"serialize and deserialize" >> {
val stat = dateStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].property mustEqual stat.property
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
}
"work with geometries" >> {
def toGeom(lon: Int) = WKTUtils.read(s"POINT($lon 0)")
"correctly calculate values" >> {
val stat = geomStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10).toSeq mustEqual Seq((toGeom(100), 50), (toGeom(50), 20), (toGeom(30), 15), (toGeom(10), 10), (toGeom(15), 5))
}
"serialize and deserialize" >> {
val stat = geomStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].property mustEqual stat.property
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
}
}
}
| ddseapy/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/TopKTest.scala | Scala | apache-2.0 | 10,185 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan
import org.apache.flink.api.common.typeutils.CompositeType
import org.apache.flink.table.api.{TableEnvironment, ValidationException}
import org.apache.flink.table.expressions._
import org.apache.flink.table.plan.logical.{LogicalNode, LogicalOverWindow, Project}
import org.apache.flink.table.typeutils.RowIntervalTypeInfo
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
object ProjectionTranslator {
/**
* Extracts and deduplicates all aggregation and window property expressions (zero, one, or more)
* from the given expressions.
*
* @param exprs a list of expressions to extract
* @param tableEnv the TableEnvironment
* @return a Tuple2, the first field contains the extracted and deduplicated aggregations,
* and the second field contains the extracted and deduplicated window properties.
*/
def extractAggregationsAndProperties(
exprs: Seq[PlannerExpression],
tableEnv: TableEnvironment)
: (Map[PlannerExpression, String], Map[PlannerExpression, String]) = {
exprs.foldLeft((Map[PlannerExpression, String](), Map[PlannerExpression, String]())) {
(x, y) => identifyAggregationsAndProperties(y, tableEnv, x._1, x._2)
}
}
/** Identifies and deduplicates aggregation functions and window properties. */
private def identifyAggregationsAndProperties(
exp: PlannerExpression,
tableEnv: TableEnvironment,
aggNames: Map[PlannerExpression, String],
propNames: Map[PlannerExpression, String])
: (Map[PlannerExpression, String], Map[PlannerExpression, String]) = {
exp match {
case agg: Aggregation =>
if (aggNames contains agg) {
(aggNames, propNames)
} else {
(aggNames + (agg -> tableEnv.createUniqueAttributeName()), propNames)
}
case prop: WindowProperty =>
if (propNames contains prop) {
(aggNames, propNames)
} else {
(aggNames, propNames + (prop -> tableEnv.createUniqueAttributeName()))
}
case l: LeafExpression =>
(aggNames, propNames)
case u: UnaryExpression =>
identifyAggregationsAndProperties(u.child, tableEnv, aggNames, propNames)
case b: BinaryExpression =>
val l = identifyAggregationsAndProperties(b.left, tableEnv, aggNames, propNames)
identifyAggregationsAndProperties(b.right, tableEnv, l._1, l._2)
case sfc @ PlannerScalarFunctionCall(clazz, args) =>
args.foldLeft((aggNames, propNames)){
(x, y) => identifyAggregationsAndProperties(y, tableEnv, x._1, x._2)
}
// General expression
case e: PlannerExpression =>
e.productIterator.foldLeft((aggNames, propNames)){
(x, y) => y match {
case e: PlannerExpression => identifyAggregationsAndProperties(e, tableEnv, x._1, x._2)
case _ => (x._1, x._2)
}
}
// Expression is null
case null =>
throw new ValidationException("Scala 'null' is not a valid expression. " +
"Use 'Null(TYPE)' to specify typed null expressions. For example: Null(Types.INT)")
}
}
/**
* Replaces expressions with deduplicated aggregations and properties.
*
* @param exprs a list of expressions to replace
* @param tableEnv the TableEnvironment
* @param aggNames the deduplicated aggregations
* @param propNames the deduplicated properties
* @return a list of replaced expressions
*/
def replaceAggregationsAndProperties(
exprs: Seq[PlannerExpression],
tableEnv: TableEnvironment,
aggNames: Map[PlannerExpression, String],
propNames: Map[PlannerExpression, String]): Seq[NamedExpression] = {
val projectedNames = new mutable.HashSet[String]
exprs.map((exp: PlannerExpression) => replaceAggregationsAndProperties(exp, tableEnv,
aggNames, propNames, projectedNames))
.map(UnresolvedAlias)
}
private def replaceAggregationsAndProperties(
exp: PlannerExpression,
tableEnv: TableEnvironment,
aggNames: Map[PlannerExpression, String],
propNames: Map[PlannerExpression, String],
projectedNames: mutable.HashSet[String])
: PlannerExpression = {
exp match {
case agg: Aggregation =>
val name = aggNames(agg)
if (projectedNames.add(name)) {
UnresolvedFieldReference(name)
} else {
Alias(UnresolvedFieldReference(name), tableEnv.createUniqueAttributeName())
}
case prop: WindowProperty =>
val name = propNames(prop)
if (projectedNames.add(name)) {
UnresolvedFieldReference(name)
} else {
Alias(UnresolvedFieldReference(name), tableEnv.createUniqueAttributeName())
}
case n @ Alias(agg: Aggregation, name, _) =>
val aName = aggNames(agg)
Alias(UnresolvedFieldReference(aName), name)
case n @ Alias(prop: WindowProperty, name, _) =>
val pName = propNames(prop)
Alias(UnresolvedFieldReference(pName), name)
case l: LeafExpression => l
case u: UnaryExpression =>
val c = replaceAggregationsAndProperties(u.child, tableEnv,
aggNames, propNames, projectedNames)
u.makeCopy(Array(c))
case b: BinaryExpression =>
val l = replaceAggregationsAndProperties(b.left, tableEnv,
aggNames, propNames, projectedNames)
val r = replaceAggregationsAndProperties(b.right, tableEnv,
aggNames, propNames, projectedNames)
b.makeCopy(Array(l, r))
case sfc @ PlannerScalarFunctionCall(clazz, args) =>
val newArgs: Seq[PlannerExpression] = args
.map((exp: PlannerExpression) =>
replaceAggregationsAndProperties(exp, tableEnv, aggNames, propNames, projectedNames))
sfc.makeCopy(Array(clazz, newArgs))
// array constructor
case c @ ArrayConstructor(args) =>
val newArgs = c.elements
.map((exp: PlannerExpression) =>
replaceAggregationsAndProperties(exp, tableEnv, aggNames, propNames, projectedNames))
c.makeCopy(Array(newArgs))
// map constructor
case c @ MapConstructor(args) =>
val newArgs = c.elements
.map((exp: PlannerExpression) =>
replaceAggregationsAndProperties(exp, tableEnv, aggNames, propNames, projectedNames))
c.makeCopy(Array(newArgs))
// General expression
case e: PlannerExpression =>
val newArgs = e.productIterator.map {
case arg: PlannerExpression =>
replaceAggregationsAndProperties(arg, tableEnv, aggNames, propNames, projectedNames)
}
e.makeCopy(newArgs.toArray)
}
}
/**
* Expands an UnresolvedFieldReference("*") to parent's full project list.
*/
def expandProjectList(
exprs: Seq[PlannerExpression],
parent: LogicalNode,
tableEnv: TableEnvironment)
: Seq[PlannerExpression] = {
val projectList = new ListBuffer[PlannerExpression]
exprs.foreach {
case n: UnresolvedFieldReference if n.name == "*" =>
projectList ++= parent.output.map(a => UnresolvedFieldReference(a.name))
case Flattening(unresolved) =>
// simulate a simple project to resolve fields using current parent
val project = Project(Seq(UnresolvedAlias(unresolved)), parent).validate(tableEnv)
val resolvedExpr = project
.output
.headOption
.getOrElse(throw new RuntimeException("Could not find resolved composite."))
resolvedExpr.validateInput()
val newProjects = resolvedExpr.resultType match {
case ct: CompositeType[_] =>
(0 until ct.getArity).map { idx =>
projectList += GetCompositeField(unresolved, ct.getFieldNames()(idx))
}
case _ =>
projectList += unresolved
}
case e: PlannerExpression => projectList += e
}
projectList
}
def resolveOverWindows(
exprs: Seq[PlannerExpression],
overWindows: Seq[LogicalOverWindow],
tEnv: TableEnvironment): Seq[PlannerExpression] = {
exprs.map(e => replaceOverCall(e, overWindows, tEnv))
}
/**
* Find and replace UnresolvedOverCall with OverCall
*
* @param expr the expression to check
* @return an expression with correct resolved OverCall
*/
private def replaceOverCall(
expr: PlannerExpression,
overWindows: Seq[LogicalOverWindow],
tableEnv: TableEnvironment)
: PlannerExpression = {
expr match {
case u: UnresolvedOverCall =>
overWindows.find(_.alias.equals(u.alias)) match {
case Some(overWindow) =>
OverCall(
u.agg,
overWindow.partitionBy,
overWindow.orderBy,
overWindow.preceding,
overWindow.following.getOrElse {
// set following to CURRENT_ROW / CURRENT_RANGE if not defined
if (overWindow.preceding.resultType.isInstanceOf[RowIntervalTypeInfo]) {
CurrentRow()
} else {
CurrentRange()
}
})
case None => u
}
case u: UnaryExpression =>
val c = replaceOverCall(u.child, overWindows, tableEnv)
u.makeCopy(Array(c))
case b: BinaryExpression =>
val l = replaceOverCall(b.left, overWindows, tableEnv)
val r = replaceOverCall(b.right, overWindows, tableEnv)
b.makeCopy(Array(l, r))
// Scala functions
case sfc @ PlannerScalarFunctionCall(clazz, args: Seq[PlannerExpression]) =>
val newArgs: Seq[PlannerExpression] =
args.map(
(exp: PlannerExpression) =>
replaceOverCall(exp, overWindows, tableEnv))
sfc.makeCopy(Array(clazz, newArgs))
// Array constructor
case c @ ArrayConstructor(args) =>
val newArgs =
c.elements
.map((exp: PlannerExpression) => replaceOverCall(exp, overWindows, tableEnv))
c.makeCopy(Array(newArgs))
// Other expressions
case e: PlannerExpression => e
}
}
/**
* Extract all field references from the given expressions.
*
* @param exprs a list of expressions to extract
* @return a list of field references extracted from the given expressions
*/
def extractFieldReferences(exprs: Seq[PlannerExpression]): Seq[NamedExpression] = {
exprs.foldLeft(Set[NamedExpression]()) {
(fieldReferences, expr) => identifyFieldReferences(expr, fieldReferences)
}.toSeq
}
private def identifyFieldReferences(
expr: PlannerExpression,
fieldReferences: Set[NamedExpression]): Set[NamedExpression] = expr match {
case f: UnresolvedFieldReference =>
fieldReferences + UnresolvedAlias(f)
case b: BinaryExpression =>
val l = identifyFieldReferences(b.left, fieldReferences)
identifyFieldReferences(b.right, l)
case PlannerScalarFunctionCall(_, args: Seq[PlannerExpression]) =>
args.foldLeft(fieldReferences) {
(fieldReferences, expr) => identifyFieldReferences(expr, fieldReferences)
}
case AggFunctionCall(_, _, _, args) =>
args.foldLeft(fieldReferences) {
(fieldReferences, expr) => identifyFieldReferences(expr, fieldReferences)
}
// array constructor
case ArrayConstructor(args) =>
args.foldLeft(fieldReferences) {
(fieldReferences, expr) => identifyFieldReferences(expr, fieldReferences)
}
// ignore fields from window property
case _: WindowProperty =>
fieldReferences
// keep this case after all unwanted unary expressions
case u: UnaryExpression =>
identifyFieldReferences(u.child, fieldReferences)
// General expression
case e: PlannerExpression =>
e.productIterator.foldLeft(fieldReferences) {
(fieldReferences, expr) => expr match {
case e: PlannerExpression => identifyFieldReferences(e, fieldReferences)
case _ => fieldReferences
}
}
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/ProjectionTranslator.scala | Scala | apache-2.0 | 12,940 |
package org.kimbasoft.akka.eventbus
import akka.actor.{DeadLetter, Props, Actor}
import org.kimbasoft.akka.eventbus.EventBusMessages._
import scala.util.{Failure, Success}
/**
* Missing documentation.
*
* @author <a href="steffen.krause@soabridge.com">Steffen Krause</a>
* @since 1.0
*/
class EventBusActor(name: String) extends Actor {
def receive: Receive = {
case BusMessage(message) =>
println(s"""$name: received BusMessage with payload "$message"""")
case BusRequestMessage(message) =>
println(s"""$name: received BusRequestMessage with payload "$message", responding""")
sender ! BusResponseMessage(Success(s"""Processed message "$message""""))
case BusPublication(message) =>
println(s"""$name: received BusPublication with payload "$message"""")
case BusPublicationRequest(message) =>
println(s"""$name: received BusPublicationRequest with payload "$message", publishing""")
context.system.eventStream.publish(BusPublication(message))
case DeadLetter(message @ BusResponseMessage(payload), sender, recipient) =>
payload match {
case Success(response) =>
println(s"""$name: received message "$response" via DeadLetter[$sender]""")
case Failure(response) =>
println(s"""$name: received failure "$response" via DeadLetter""")
}
case event: String =>
println(s"""$name: received String event "$event"""")
case event =>
println(s"""$name: received unrecognized event "$event"""")
}
}
object EventBusActor {
def props(name: String): Props = Props(classOf[EventBusActor], name)
}
| kimba74/sandbox-scala | src/main/scala/org/kimbasoft/akka/eventbus/EventBusActor.scala | Scala | gpl-3.0 | 1,625 |
package api.status
import java.io.File
import java.util.UUID
import org.json4s.JInt
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{ compact, parse, render }
import api.common.DsmoqSpec
import dsmoq.AppConf
import dsmoq.controllers.AjaxResponse
import dsmoq.services.json.GroupData.Group
import dsmoq.services.json.GroupData.GroupAddImages
import scalikejdbc.config.DBsWithEnv
class GroupStatusCheckSpec extends DsmoqSpec {
private val dummyImage = new File("../testdata/image/1byteover.png")
private val dummyFile = new File("../testdata/test1.csv")
private val dummyZipFile = new File("../testdata/test1.zip")
private val testUserName = "dummy1"
private val dummyUserName = "dummy4"
private val testUserId = "023bfa40-e897-4dad-96db-9fd3cf001e79" // dummy1
private val dummyUserId = "cc130a5e-cb93-4ec2-80f6-78fa83f9bd04" // dummy 2
private val dummyUserLoginParams = Map("d" -> compact(render(("id" -> "dummy4") ~ ("password" -> "password"))))
private val OK = "OK"
private val ILLEGAL_ARGUMENT = "Illegal Argument"
private val UNAUTHORIZED = "Unauthorized"
private val NOT_FOUND = "NotFound"
private val ACCESS_DENIED = "AccessDenied"
private val BAD_REQUEST = "BadRequest"
private val NG = "NG"
private val invalidApiKeyHeader = Map("Authorization" -> "api_key=hoge,signature=fuga")
"API Status test" - {
"group" - {
"GET /api/groups" - {
"400(Illegal Argument)" in {
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
get("api/groups", params) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
"403(Unauthorized)" in {
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
get("api/groups", params, invalidApiKeyHeader) {
checkStatus(403, UNAUTHORIZED)
}
}
"500(NG)" in {
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
dbDisconnectedBlock {
get("api/groups", params) {
checkStatus(500, NG)
}
}
}
"All" in {
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
get("api/groups", params, invalidApiKeyHeader) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
"400(Illegal Argument) * 500(NG)" in {
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
dbDisconnectedBlock {
get("api/groups", params) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized) * 500(NG)" in {
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
dbDisconnectedBlock {
get("api/groups", params, invalidApiKeyHeader) {
checkStatus(500, NG)
}
}
}
}
"GET /api/groups/:group_id" - {
"404(Illegal Argument)" in {
session {
signIn()
get(s"/api/groups/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
get(s"/api/groups/${groupId}", Map.empty, invalidApiKeyHeader) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
get(s"/api/groups/${dummyId}") {
checkStatus(404, NOT_FOUND)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
dbDisconnectedBlock {
get(s"/api/groups/${groupId}") {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
dummySignIn()
val groupId = createGroup()
get(s"/api/groups/test", Map.empty, invalidApiKeyHeader) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
dbDisconnectedBlock {
get(s"/api/groups/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
dbDisconnectedBlock {
get(s"/api/groups/${groupId}", Map.empty, invalidApiKeyHeader) {
checkStatus(500, NG)
}
}
}
}
"403(Unauthorized) * 404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
get(s"/api/groups/${dummyId}", Map.empty, invalidApiKeyHeader) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"GET /api/groups/:group_id/members" - {
"400(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
get(s"/api/groups/${groupId}/members", params) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument)" in {
session {
signIn()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
get(s"/api/groups/test/members", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
get(s"/api/groups/${groupId}/members", params, invalidApiKeyHeader) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
get(s"/api/groups/${dummyId}/members", params) {
checkStatus(404, NOT_FOUND)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
dbDisconnectedBlock {
get(s"/api/groups/${groupId}/members", params) {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
dummySignIn()
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
get(s"/api/groups/test/members", params, invalidApiKeyHeader) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 404(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
get(s"/api/groups/test/members", params) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
dbDisconnectedBlock {
get(s"/api/groups/${groupId}/members", params) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
dbDisconnectedBlock {
get(s"/api/groups/test/members", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
dbDisconnectedBlock {
get(s"/api/groups/${groupId}/members", params, invalidApiKeyHeader) {
checkStatus(500, NG)
}
}
}
}
}
"POST /api/groups" - {
"400(Illegal Argument)" in {
session {
signIn()
post("/api/groups") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> ""))))
post("/api/groups", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"400(BadRequest)" in {
session {
signIn()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> ""))))
post("/api/groups", params) {
checkStatus(200, OK)
}
post("/api/groups", params) {
checkStatus(400, BAD_REQUEST)
}
}
}
"500(NG)" in {
session {
signIn()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> ""))))
dbDisconnectedBlock {
post("/api/groups", params) {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
signIn()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> ""))))
post("/api/groups", params) {
checkStatus(200, OK)
}
signOut()
post("/api/groups") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 500(NG)" in {
session {
signIn()
dbDisconnectedBlock {
post("/api/groups") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> ""))))
dbDisconnectedBlock {
post("/api/groups", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
}
"PUT /api/groups/:group_id" - {
"400(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
put(s"/api/groups/${groupId}") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument)" in {
session {
signIn()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> "desc1"))))
put(s"/api/groups/test", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> "desc1"))))
signOut()
put(s"/api/groups/${groupId}", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> "desc1"))))
put(s"/api/groups/${dummyId}", params) {
checkStatus(404, NOT_FOUND)
}
}
}
"403(AccessDenied)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> "desc1"))))
signOut()
dummySignIn()
put(s"/api/groups/${groupId}", params) {
checkStatus(403, ACCESS_DENIED)
}
}
}
"400(BadRequest)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> ""))))
post("/api/groups", params) {
checkStatus(200, OK)
}
put(s"/api/groups/${groupId}", params) {
checkStatus(400, BAD_REQUEST)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> "desc1"))))
dbDisconnectedBlock {
put(s"/api/groups/${groupId}", params) {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
dummySignIn()
put(s"/api/groups/test") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 404(Illegal Argument)" in {
session {
signIn()
put(s"/api/groups/test") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
dbDisconnectedBlock {
put(s"/api/groups/${groupId}") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> "desc1"))))
dbDisconnectedBlock {
put(s"/api/groups/test", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> "desc1"))))
signOut()
dbDisconnectedBlock {
put(s"/api/groups/${groupId}", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"403(Unauthorized) * 404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> "desc1"))))
signOut()
put(s"/api/groups/${dummyId}", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"403(AccessDenied) * 400(BadRequest)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("name" -> "test1") ~ ("description" -> ""))))
post("/api/groups", params) {
checkStatus(200, OK)
}
signOut()
dummySignIn()
put(s"/api/groups/${groupId}", params) {
checkStatus(403, ACCESS_DENIED)
}
}
}
}
"GET /api/groups/:group_id/images" - {
"400(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
get(s"/api/groups/${groupId}/images", params) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument)" in {
session {
signIn()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
get(s"/api/groups/test/images", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
get(s"/api/groups/${groupId}/images", params, invalidApiKeyHeader) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
get(s"/api/groups/${dummyId}/images", params) {
checkStatus(404, NOT_FOUND)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
dbDisconnectedBlock {
get(s"/api/groups/${groupId}/images", params) {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
get(s"/api/groups/test/images", params, invalidApiKeyHeader) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 404(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
get(s"/api/groups/test/images", params) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(-1)))))
dbDisconnectedBlock {
get(s"/api/groups/${groupId}/images", params) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
dbDisconnectedBlock {
get(s"/api/groups/test/images", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
dbDisconnectedBlock {
get(s"/api/groups/${groupId}/images", params, invalidApiKeyHeader) {
checkStatus(500, NG)
}
}
}
}
"403(Unauthorized) * 404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val params = Map("d" -> compact(render(("limit" -> JInt(1)))))
get(s"/api/groups/${dummyId}/images", params, invalidApiKeyHeader) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"POST /api/groups/:group_id/images" - {
"400(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
post(s"/api/groups/${groupId}/images", Map.empty) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument)" in {
session {
signIn()
val images = Map("images" -> dummyImage)
post(s"/api/groups/test/images", Map.empty, images) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
val images = Map("images" -> dummyImage)
signOut()
post(s"/api/groups/${groupId}/images", Map.empty, images) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val images = Map("images" -> dummyImage)
post(s"/api/groups/${dummyId}/images", Map.empty, images) {
checkStatus(404, NOT_FOUND)
}
}
}
"403(AccessDenied)" in {
session {
signIn()
val groupId = createGroup()
val images = Map("images" -> dummyImage)
signOut()
dummySignIn()
post(s"/api/groups/${groupId}/images", Map.empty, images) {
checkStatus(403, ACCESS_DENIED)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
val images = Map("images" -> dummyImage)
dbDisconnectedBlock {
post(s"/api/groups/${groupId}/images", Map.empty, images) {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
post(s"/api/groups/test/images", Map.empty) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 404(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
post(s"/api/groups/test/images", Map.empty) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
dbDisconnectedBlock {
post(s"/api/groups/${groupId}/images", Map.empty) {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val images = Map("images" -> dummyImage)
post(s"/api/groups/test/images", Map.empty, images) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val images = Map("images" -> dummyImage)
signOut()
dbDisconnectedBlock {
post(s"/api/groups/${groupId}/images", Map.empty, images) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"404(NotFound) * 403(AccessDenied)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val images = Map("images" -> dummyImage)
signOut()
dummySignIn()
post(s"/api/groups/${dummyId}/images", Map.empty, images) {
checkStatus(404, NOT_FOUND)
}
}
}
}
"PUT /api/groups/:group_id/images/primary" - {
"400(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
put(s"/api/groups/${groupId}/images/primary") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument)" in {
session {
signIn()
val imageId = AppConf.defaultGroupImageId
val params = Map("d" -> compact(render(("imageId" -> imageId))))
put(s"/api/groups/test/images/primary", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
val imageId = AppConf.defaultGroupImageId
val params = Map("d" -> compact(render(("imageId" -> imageId))))
signOut()
put(s"/api/groups/${groupId}/images/primary", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val imageId = AppConf.defaultGroupImageId
val params = Map("d" -> compact(render(("imageId" -> imageId))))
put(s"/api/groups/${dummyId}/images/primary", params) {
checkStatus(404, NOT_FOUND)
}
}
}
"403(AccessDenied)" in {
session {
signIn()
val groupId = createGroup()
val imageId = AppConf.defaultGroupImageId
val params = Map("d" -> compact(render(("imageId" -> imageId))))
signOut()
dummySignIn()
put(s"/api/groups/${groupId}/images/primary", params) {
checkStatus(403, ACCESS_DENIED)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
val imageId = AppConf.defaultGroupImageId
val params = Map("d" -> compact(render(("imageId" -> imageId))))
dbDisconnectedBlock {
put(s"/api/groups/${groupId}/images/primary", params) {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
put(s"/api/groups/test/images/primary") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 404(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
put(s"/api/groups/test/images/primary") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
dbDisconnectedBlock {
put(s"/api/groups/${groupId}/images/primary") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val imageId = AppConf.defaultGroupImageId
val params = Map("d" -> compact(render(("imageId" -> imageId))))
dbDisconnectedBlock {
put(s"/api/groups/test/images/primary", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val imageId = AppConf.defaultGroupImageId
val params = Map("d" -> compact(render(("imageId" -> imageId))))
signOut()
dbDisconnectedBlock {
put(s"/api/groups/${groupId}/images/primary", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"404(NotFound) * 403(AccessDenied)" in {
session {
val dummyId = UUID.randomUUID.toString
val imageId = AppConf.defaultGroupImageId
val params = Map("d" -> compact(render(("imageId" -> imageId))))
dummySignIn()
put(s"/api/groups/${dummyId}/images/primary", params) {
checkStatus(404, NOT_FOUND)
}
}
}
}
"DELETE /api/groups/:group_id/images/:image_id" - {
"404(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
delete(s"/api/groups/${groupId}/images/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
val imageId = getGroupImageId(groupId)
signOut()
delete(s"/api/groups/${groupId}/images/${imageId}") {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val groupId = createGroup()
val dummyId = UUID.randomUUID.toString
delete(s"/api/groups/${groupId}/images/${dummyId}") {
checkStatus(404, NOT_FOUND)
}
}
}
"403(AccessDenied)" in {
session {
signIn()
val groupId = createGroup()
val imageId = getGroupImageId(groupId)
signOut()
dummySignIn()
delete(s"/api/groups/${groupId}/images/${imageId}") {
checkStatus(403, ACCESS_DENIED)
}
}
}
"400(BadRequest)" in {
session {
signIn()
val groupId = createGroup()
val imageId = AppConf.defaultGroupImageId
delete(s"/api/groups/${groupId}/images/${imageId}") {
checkStatus(400, BAD_REQUEST)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
val imageId = getGroupImageId(groupId)
dbDisconnectedBlock {
delete(s"/api/groups/${groupId}/images/${imageId}") {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
val imageId = AppConf.defaultGroupImageId
delete(s"/api/groups/test/images/${imageId}") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
dbDisconnectedBlock {
delete(s"/api/groups/${groupId}/images/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val imageId = getGroupImageId(groupId)
signOut()
dbDisconnectedBlock {
delete(s"/api/groups/${groupId}/images/${imageId}") {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"403(Unauthorized) * 404(NotFound)" in {
session {
signIn()
val groupId = createGroup()
val dummyId = UUID.randomUUID.toString
signOut()
delete(s"/api/groups/${groupId}/images/${dummyId}") {
checkStatus(403, UNAUTHORIZED)
}
}
}
"403(AccessDenied) * 400(BadRequest)" in {
session {
signIn()
val groupId = createGroup()
val imageId = AppConf.defaultGroupImageId
signOut()
dummySignIn()
delete(s"/api/groups/${groupId}/images/${imageId}") {
checkStatus(403, ACCESS_DENIED)
}
}
}
}
"POST /api/groups/:group_id/members" - {
"400(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
post(s"/api/groups/${groupId}/members") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument)" in {
session {
signIn()
val params = Map("d" -> compact(render(Seq(("userId" -> testUserId) ~ ("role" -> JInt(2))))))
post(s"/api/groups/test/members", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(Seq(("userId" -> testUserId) ~ ("role" -> JInt(2))))))
signOut()
post(s"/api/groups/${groupId}/members", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val params = Map("d" -> compact(render(Seq(("userId" -> testUserId) ~ ("role" -> JInt(2))))))
post(s"/api/groups/${dummyId}/members", params) {
checkStatus(404, NOT_FOUND)
}
}
}
"403(AccessDenied)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(Seq(("userId" -> testUserId) ~ ("role" -> JInt(2))))))
signOut()
dummySignIn()
post(s"/api/groups/${groupId}/members", params) {
checkStatus(403, ACCESS_DENIED)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(Seq(("userId" -> testUserId) ~ ("role" -> JInt(2))))))
dbDisconnectedBlock {
post(s"/api/groups/${groupId}/members", params) {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
post(s"/api/groups/test/members") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 404(Illegal Argument)" in {
session {
signIn()
post(s"/api/groups/test/members") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
dbDisconnectedBlock {
post(s"/api/groups/${groupId}/members") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val params = Map("d" -> compact(render(Seq(("userId" -> testUserId) ~ ("role" -> JInt(2))))))
post(s"/api/groups/test/members", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
val params = Map("d" -> compact(render(Seq(("userId" -> testUserId) ~ ("role" -> JInt(2))))))
signOut()
dbDisconnectedBlock {
post(s"/api/groups/${groupId}/members", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"403(Unauthorized) * 404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val params = Map("d" -> compact(render(Seq(("userId" -> testUserId) ~ ("role" -> JInt(2))))))
signOut()
post(s"/api/groups/${dummyId}/members", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"PUT /api/groups/:group_id/members/:user_id" - {
"400(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
put(s"/api/groups/${groupId}/members/${userId}") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument)" in {
session {
signIn()
val userId = dummyUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
put(s"/api/groups/test/members/${userId}", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
signOut()
put(s"/api/groups/${groupId}/members/${userId}", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val userId = dummyUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
put(s"/api/groups/${dummyId}/members/${userId}", params) {
checkStatus(404, NOT_FOUND)
}
}
}
"403(AccessDenied)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
val params = Map("d" -> compact(render(("role" -> JInt(2)))))
signOut()
dummySignIn()
put(s"/api/groups/${groupId}/members/${userId}", params) {
checkStatus(403, ACCESS_DENIED)
}
}
}
"400(BadRequest)" in {
session {
signIn()
val groupId = createGroup()
val userId = testUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
put(s"/api/groups/${groupId}/members/${userId}", params) {
checkStatus(400, BAD_REQUEST)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
dbDisconnectedBlock {
put(s"/api/groups/${groupId}/members/${userId}", params) {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
val dummyId = UUID.randomUUID.toString
put(s"/api/groups/test/members/${dummyId}") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 404(Illegal Argument)" in {
session {
signIn()
val userId = dummyUserId
put(s"/api/groups/test/members/${userId}") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
"400(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
dbDisconnectedBlock {
put(s"/api/groups/${groupId}/members/${userId}") {
checkStatus(400, ILLEGAL_ARGUMENT)
}
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val userId = dummyUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
put(s"/api/groups/test/members/${userId}", params) {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
signOut()
dbDisconnectedBlock {
put(s"/api/groups/${groupId}/members/${userId}", params) {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"404(NotFound) * 403(AccessDenied)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val userId = dummyUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
signOut()
dummySignIn()
put(s"/api/groups/${dummyId}/members/${userId}", params) {
checkStatus(404, NOT_FOUND)
}
}
}
"404(NotFound) * 400(BadRequest)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
val userId = testUserId
val params = Map("d" -> compact(render(("role" -> JInt(1)))))
put(s"/api/groups/${dummyId}/members/${userId}", params) {
checkStatus(404, NOT_FOUND)
}
}
}
}
"DELETE /api/groups/:group_id/members/:user_id" - {
"404(Illegal Argument)" in {
session {
signIn()
val groupId = createGroup()
delete(s"/api/groups/${groupId}/members/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
signOut()
delete(s"/api/groups/${groupId}/members/${userId}") {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val dummyId = UUID.randomUUID.toString
delete(s"/api/groups/${groupId}/members/${dummyId}") {
checkStatus(404, NOT_FOUND)
}
}
}
"403(AccessDenied)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
signOut()
dummySignIn()
delete(s"/api/groups/${groupId}/members/${userId}") {
checkStatus(403, ACCESS_DENIED)
}
}
}
"400(BadRequest)" in {
session {
signIn()
val groupId = createGroup()
val userId = testUserId
delete(s"/api/groups/${groupId}/members/${userId}") {
checkStatus(400, BAD_REQUEST)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
dbDisconnectedBlock {
delete(s"/api/groups/${groupId}/members/${userId}") {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
val dummyId = UUID.randomUUID.toString
delete(s"/api/groups/${dummyId}/members/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
delete(s"/api/groups/${groupId}/members/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
addMember(groupId, dummyUserId)
val userId = dummyUserId
signOut()
dbDisconnectedBlock {
delete(s"/api/groups/${groupId}/members/${userId}") {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"403(Unauthorized) * 404(NotFound)" in {
session {
signIn()
val groupId = createGroup()
val dummyId = UUID.randomUUID.toString
signOut()
delete(s"/api/groups/${groupId}/members/${dummyId}") {
checkStatus(403, UNAUTHORIZED)
}
}
}
"403(AccessDenied) * 400(BadRequest)" in {
session {
signIn()
val groupId = createGroup()
val userId = testUserId
signOut()
dummySignIn()
delete(s"/api/groups/${groupId}/members/${userId}") {
checkStatus(403, ACCESS_DENIED)
}
}
}
}
"DELETE /api/groups/:group_id" - {
"404(Illegal Argument)" in {
session {
signIn()
delete(s"/api/groups/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"403(Unauthorized)" in {
session {
signIn()
val groupId = createGroup()
signOut()
delete(s"/api/groups/${groupId}") {
checkStatus(403, UNAUTHORIZED)
}
}
}
"404(NotFound)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
delete(s"/api/groups/${dummyId}") {
checkStatus(404, NOT_FOUND)
}
}
}
"403(AccessDenied)" in {
session {
signIn()
val groupId = createGroup()
signOut()
dummySignIn()
delete(s"/api/groups/${groupId}") {
checkStatus(403, ACCESS_DENIED)
}
}
}
"500(NG)" in {
session {
signIn()
val groupId = createGroup()
dbDisconnectedBlock {
delete(s"/api/groups/${groupId}") {
checkStatus(500, NG)
}
}
}
}
"All" in {
session {
delete(s"/api/groups/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
"404(Illegal Argument) * 500(NG)" in {
session {
signIn()
dbDisconnectedBlock {
delete(s"/api/groups/test") {
checkStatus(404, ILLEGAL_ARGUMENT)
}
}
}
}
"403(Unauthorized) * 500(NG)" in {
session {
signIn()
val groupId = createGroup()
signOut()
dbDisconnectedBlock {
delete(s"/api/groups/${groupId}") {
checkStatus(403, UNAUTHORIZED)
}
}
}
}
"404(NotFound) * 403(AccessDenied)" in {
session {
signIn()
val dummyId = UUID.randomUUID.toString
signOut()
dummySignIn()
delete(s"/api/groups/${dummyId}") {
checkStatus(404, NOT_FOUND)
}
}
}
}
}
}
/**
* DBが切断される独自スコープを持つブロックを作成するためのメソッドです。
*
* @param procedure ブロックで行う処理
* @return ブロックでの処理結果
*/
private def dbDisconnectedBlock[T](procedure: => T): T = {
DBsWithEnv("test").close()
try {
procedure
} finally {
DBsWithEnv("test").setup()
}
}
/**
* サインアウトします。
*/
private def signOut() {
post("/api/signout") {
checkStatus(200, "OK")
}
}
/**
* ダミーユーザでサインインします。
*/
private def dummySignIn(): Unit = {
signIn("dummy4")
}
/**
* グループにメンバーを追加します。
*
* @param groupId グループID
* @param userId 追加するユーザID
*/
private def addMember(groupId: String, userId: String): Unit = {
val params = Map("d" -> compact(render(Seq(("userId" -> userId) ~ ("role" -> JInt(1))))))
post(s"/api/groups/${groupId}/members", params) {
checkStatus(200, "OK")
}
}
/**
* グループに画像を追加し、そのIDを取得します。
*
* @param groupId グループID
* @return 画像ID
*/
private def getGroupImageId(groupId: String): String = {
val files = Map("images" -> dummyImage)
post(s"/api/groups/${groupId}/images", Map.empty, files) {
checkStatus(200, "OK")
parse(body).extract[AjaxResponse[GroupAddImages]].data.images.headOption.map(_.id).getOrElse("")
}
}
/**
* グループを作成します。
*
* @return 作成したグループ
*/
private def createGroup(): String = {
val params = Map("d" -> compact(render(("name" -> "group1") ~ ("description" -> "des1"))))
post("/api/groups", params) {
checkStatus(200, "OK")
parse(body).extract[AjaxResponse[Group]].data.id
}
}
private def checkStatus(code: Int, str: String): Unit = {
checkStatus(code, Some(str))
}
}
| nkawa/dsmoq | server/apiServer/src/test/scala/api/status/GroupStatusCheckSpec.scala | Scala | apache-2.0 | 49,657 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.businessactivities
import com.google.inject.Inject
import connectors.DataCacheConnector
import controllers.{AmlsBaseController, CommonPlayDependencies}
import forms.{EmptyForm, Form2, InvalidForm, ValidForm}
import models.businessactivities.{BusinessActivities, RiskAssessmentHasPolicy}
import models.businessmatching.BusinessMatching
import play.api.mvc.MessagesControllerComponents
import utils.{AuthAction, ControllerHelper}
import views.html.businessactivities._
import scala.concurrent.Future
class RiskAssessmentController @Inject() (val dataCacheConnector: DataCacheConnector,
val authAction: AuthAction,
val ds: CommonPlayDependencies,
val cc: MessagesControllerComponents,
risk_assessment_policy: risk_assessment_policy,
implicit val error: views.html.error) extends AmlsBaseController(ds, cc) {
def get(edit: Boolean = false) = authAction.async {
implicit request =>
dataCacheConnector.fetch[BusinessActivities](request.credId, BusinessActivities.key) map {
response =>
val form: Form2[RiskAssessmentHasPolicy] = (for {
businessActivities <- response
riskAssessmentPolicy <- businessActivities.riskAssessmentPolicy
} yield Form2[RiskAssessmentHasPolicy](riskAssessmentPolicy.hasPolicy)).getOrElse(EmptyForm)
Ok(risk_assessment_policy(form, edit))
}
}
def post(edit: Boolean = false) = authAction.async {
implicit request =>
Form2[RiskAssessmentHasPolicy](request.body) match {
case f: InvalidForm =>
Future.successful(BadRequest(risk_assessment_policy(f, edit)))
case ValidForm(_, data: RiskAssessmentHasPolicy) => {
dataCacheConnector.fetchAll(request.credId) flatMap { maybeCache =>
val businessMatching = for {
cacheMap <- maybeCache
bm <- cacheMap.getEntry[BusinessMatching](BusinessMatching.key)
} yield bm
for {
businessActivities <- dataCacheConnector.fetch[BusinessActivities](request.credId, BusinessActivities.key)
_ <- dataCacheConnector.save[BusinessActivities](request.credId, BusinessActivities.key, businessActivities.riskAssessmentHasPolicy(data))
} yield redirectDependingOnAccountancyServices(ControllerHelper.isAccountancyServicesSelected(Some(businessMatching)), data)
}
} recoverWith {
case _: IndexOutOfBoundsException => Future.successful(NotFound(notFoundView))
}
}
}
private def redirectDependingOnAccountancyServices(accountancyServices: Boolean, data: RiskAssessmentHasPolicy) =
accountancyServices match {
case _ if data == RiskAssessmentHasPolicy(true) => Redirect(routes.DocumentRiskAssessmentController.get())
case true => Redirect(routes.SummaryController.get)
case false => Redirect(routes.AccountantForAMLSRegulationsController.get())
}
}
| hmrc/amls-frontend | app/controllers/businessactivities/RiskAssessmentController.scala | Scala | apache-2.0 | 3,733 |
/**
* Copyright (c) 2016-2017, Benjamin Fradet, and other contributors.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.github.benfradet.spark.kafka.writer
import org.apache.kafka.clients.producer._
import scala.concurrent.duration._
class RDDKafkaWriterSpec extends SKRSpec {
"a RDDKafkaWriterSpec" when {
"given a RDD" should {
"write its content to Kafka" in {
val localTopic = topic
val msgs = (1 to 10).map(_.toString)
val rdd = ssc.sparkContext.parallelize(msgs)
rdd.writeToKafka(
producerConfig,
s => new ProducerRecord[String, String](localTopic, s)
)
val results = collect(ssc, localTopic)
ssc.start()
eventually(timeout(30.seconds), interval(1.second)) {
results shouldBe msgs
}
}
"trigger a given callback for every write to Kafka" in {
val localTopic = topic
val msgs = (1 to 10).map(_.toString)
val rdd = ssc.sparkContext.parallelize(msgs)
rdd.writeToKafka(
producerConfig,
s => new ProducerRecord[String, String](localTopic, s),
Some(new Callback with Serializable {
override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = {
SKRSpec.callbackTriggerCount.incrementAndGet()
}
})
)
eventually(timeout(30.seconds), interval(1.second)) {
SKRSpec.callbackTriggerCount.get() shouldBe msgs.size
}
}
}
}
}
| BenFradet/spark-kafka-writer | src/test/scala/com/github/benfradet/spark/kafka/writer/RDDKafkaWriterSpec.scala | Scala | apache-2.0 | 2,299 |
/*
* The Bluejelly project, Copyright 2012.
*
* This source code is distributed under the terms of
* the BSD license, see the LICENSE file for details.
*/
package bluejelly.bjc.parser
import java.math.BigInteger
import scala.annotation.tailrec
import scala.collection.mutable.StringBuilder
import scala.language.implicitConversions
import scala.util.parsing.input.{Positional}
import scala.util.parsing.combinator.{RegexParsers}
import bluejelly.utils.Name
/**
* Basic lexer for the bluejelly top-level language.
* @author ppedemon
*/
object Lexer extends RegexParsers with Tokens {
override val whiteSpace = "".r
private def any = elem("",_ => true)
private def printChar(c:Char) = c match {
case '\\n' => "\\\\n"
case '\\r' => "\\\\r"
case '\\t' => "\\\\t"
case '\\f' => "\\\\f"
case '\\b' => "\\\\b"
case '\\07' => "\\\\a"
case '\\013' => "\\\\v"
case _ if c.isControl || c.isSpaceChar =>
"\\\\u%s" format (Integer.toHexString(c))
case _ => "%c" format c
}
// ---------------------------------------------------------------------
// Basic definitions
// ---------------------------------------------------------------------
private val keywords:Map[String, Unit => Token] = Map(
"case" -> (_ => TCase()),
"class" -> (_ => TClass()),
"data" -> (_ => TData()),
"default" -> (_ => TDefault()),
"deriving" -> (_ => TDeriving()),
"do" -> (_ => TDo()),
"else" -> (_ => TElse()),
"forall" -> (_ => TForall()),
"if" -> (_ => TIf()),
"import" -> (_ => TImport()),
"in" -> (_ => TIn()),
"infix" -> (_ => TInfix()),
"infixl" -> (_ => TInfixl()),
"infixr" -> (_ => TInfixr()),
"instance" -> (_ => TInstance()),
"let" -> (_ => TLet()),
"mdo" -> (_ => TMDo()),
"module" -> (_ => TModule()),
"newtype" -> (_ => TNewtype()),
"of" -> (_ => TOf()),
"primitive" -> (_ => TPrim()),
"then" -> (_ => TThen()),
"type" -> (_ => TType()),
"where" -> (_ => TWhere()),
"_" -> (_ => TUnder())
)
private val reservedOps:Map[String, Unit => Token] = Map(
".." -> (_ => new TDotDot),
"::" -> (_ => new TCoCo),
"=" -> (_ => new TEq),
"\\\\" -> (_ => new TLam),
"|" -> (_ => new TBar),
"<-" -> (_ => new TLArr),
"->" -> (_ => new TRArr),
"=>" -> (_ => new TImplies),
"@" -> (_ => new TAt),
"~" -> (_ => new TTilde)
)
private val small = """[\\p{Ll}_]"""
private val large = """[\\p{Lu}\\p{Lt}]"""
private val digit = """\\p{Nd}"""
private val white = """[\\p{Zs}\\n\\r\\t\\f]"""
private val sym = """[\\p{S}!#%&/<=>@~:\\$\\*\\+\\.\\?\\^\\|\\-\\\\]"""
def isSymbol(c:Char) =
(("()[]{},;`" indexOf c) == -1) &&
(("_'\\"" indexOf c) == -1) &&
(c.toString.matches(sym))
override implicit def accept(e: Elem): Parser[Elem] =
acceptIf(_ == e)(c =>
if (c == PositionedReader.EofCh)
"unexpected end of input (`%s' expected)" format e
else
"`%s' expected but `%s' found" format (printChar(e), printChar(c)))
private val varid =
"%s(%s|%s|%s|')*" format (small,small,large,digit)
private val conid =
"%s(%s|%s|%s|')*" format (large,small,large,digit)
private val modid =
"(%s\\\\.)*%s" format (varid,conid)
private val varsym = "%s+" format sym
private val consym = ":%s" format sym
// ---------------------------------------------------------------------
// Identifiers
// ---------------------------------------------------------------------
private def ident =
(("(%s\\\\.)?(%s|%s|%s|%s)"
format (modid,varid,conid,varsym,consym)).r ^? qident
| varid.r ^^ {keywords(_)()}
| varsym.r ^^ {reservedOps(_)()}
| consym.r ^^ {reservedOps(_)()}
| conid.r ^^ {conId(None,_)})
/*
* This partial function is the core of the lexer. It tries to
* lex possibly qualified ids or syms. If it turns out that the
* last component of the possibly qualified name refers to a
* reserved word or operator, we call the function not defined,
* so the parser can backtrack.
*
* Performance: haven't profiled, but I'm sure this function
* accounts for the 80% of the lexer execution time. The lexer
* goes almost twice as fast now that I'm using regexps instead
* of combinators, but it's still too slow for my taste. The bad
* thing is that I can't think of any obvious hack to improve
* lexing performance :(
*/
private def qident = new PartialFunction[String,Token] {
// When the name obtained from splitting is a reserved word
// or operator, call the function undefined and allow parser
// to backtrack. As a special case, we don't allow qualified
// operators of the form M.-{2,-}. Such operator is a comment,
// and it could have never been defined in module M.
def isDefinedAt(s:String) = {
val (q,n) = split(s)
!keywords.contains(n) &&
!reservedOps.contains(n) &&
(q.isEmpty || !n.matches("-{2,}"))
}
// Application: the easy part, depending on the name call
// a suitable factory function
def apply(s:String) = {
val (q,n) = split(s)
n match {
case _ if n.charAt(0).isLower || n.charAt(0) == '_' => varId(q,n)
case _ if n.charAt(0).isUpper => conId(q,n)
case _ if n.charAt(0) == ':' => conSym(q,n)
case _ => varSym(q,n)
}
}
// Auxiliary: split qualifier and name parts, qualifier
// will be None for non-qualified tokens
private def split(s:String) = {
val p = ("(%s\\\\.)?(.+)" format modid).r
s match {
case p(q,_,_,_,n) if q != null => (Some(q.dropRight(1)),n)
case _ => (None,s)
}
}
}
// Factory stuff, handle a couple of awkward cases here
@inline
private def varId(q:Option[String],s:String) = q match {
case None if s == "as" => TAs()
case None if s == "hiding" => THiding()
case None if s == "qualified" => TQualified()
case None => VarId(Name(s))
case Some(q) => QVarId(Name(q,s))
}
@inline
private def conId(q:Option[String],s:String) = q match {
case None => ConId(Name(s))
case Some(q) => QConId(Name(q,s))
}
@inline
private def varSym(q:Option[String],s:String) = q match {
case None if s == "-" => TMinus()
case None => VarSym(Name(s))
case Some(q) => QVarSym(Name(q,s))
}
@inline
private def conSym(q:Option[String],s:String) = q match {
case None => ConSym(Name(s))
case Some(q) => QConSym(Name(q,s))
}
// ---------------------------------------------------------------------
// Floating point literals
// ---------------------------------------------------------------------
private def exp =
"""[eE][\\+-]?(%s)+""" format digit
private def fp =
( ("""%s+\\.%s+(%s)?""" format (digit,digit,exp)).r ^^ (s => (s,s.toDouble))
| ("""%s+%s""" format (digit,exp)).r ^^ (s => (s,s.toDouble)))
private def floating =
fp >> {
case (_,n) if !n.isInfinity && !n.isNaN => success(FloatLit(n))
case (s,_) => err("invalid floating point literal: `%s'" format s)
}
// ---------------------------------------------------------------------
// Integer literals (decimal, hexadecimal or octal)
// ---------------------------------------------------------------------
private def toBigInt(ds:String, radix:Int) =
new BigInt(new BigInteger(ds,radix))
private def integer =
( """0[xX][A-Fa-f0-9]+""".r ^^ (s => IntLit(toBigInt(s.drop(2),16)))
| """0[oO][0-7]+""".r ^^ (s => IntLit(toBigInt(s.drop(2),8)))
| ("""(%s)+""" format digit).r ^^ (s => IntLit(toBigInt(s,10))))
// ---------------------------------------------------------------------
// String and char literals
// ---------------------------------------------------------------------
private def unquote(s:String) = s.drop(1).dropRight(1)
private val escMap = Map(
'a' -> '\\07',
'b' -> '\\b' ,
'f' -> '\\f',
'n' -> '\\n',
'r' -> '\\r',
't' -> '\\t' ,
'v' -> '\\013',
'"' -> '"' ,
'\\'' -> '\\'',
'\\\\' -> '\\\\')
private val esc = """\\\\[abfnrtv"'\\\\]|\\\\[0-7]{1,3}"""
private val sesc = """\\\\[abfnrtv"'\\\\]|\\\\%s+\\\\|\\\\[0-7]{1,3}""" format white
/*
* Mini parser for a character or string literals, we ignore
* gaps and turn escape sequences into the real characters.
*/
@tailrec
private def unesc(s:String,r:StringBuilder=new StringBuilder):String = {
@tailrec
def oct(s:String,n:Int=0):(String,Int) =
if (s.isEmpty) (s,n) else
s.head match {
case x if x.isDigit => oct(s.tail, (n<<3)|(x.toInt & 0xf))
case _ => (s,n)
}
def gap(s:String) = s.dropWhile(_ != '\\\\').tail
def esc(s:String) = (s.tail, escMap(s.head))
if (s.isEmpty) r.toString else
s.head match {
case '\\\\' => s.tail.head match {
case c if c.isDigit =>
val (rest,n) = oct(s.tail)
unesc(rest, r+n.toChar)
case c if c.isWhitespace => unesc(gap(s.tail),r)
case _ =>
val (rest,c) = esc(s.tail)
unesc(rest, r+c)
}
case c => unesc(s.tail, r+c)
}
}
private def char =
( ("""'([^'\\p{Cntrl}\\\\]|%s)'""" format esc).r ^^
(s => CharLit(unesc(unquote(s)).head))
| "'[^']*".r ~> eoi ~> err("unclosed character literal")
| "'[^']*'".r ~> err("invalid character literal"))
private def string =
( (""""([^"\\p{Cntrl}\\\\]|%s)*"""" format sesc).r ^^
(s => StringLit(unesc(unquote(s))))
| """"[^"]*""".r ~> eoi ~> err("unclosed string literal")
| """"[^"]*"""".r ~> err("invalid string literal"))
// ---------------------------------------------------------------------
// Special symbols
// ---------------------------------------------------------------------
private def special = """[\\(\\)\\[\\]\\{\\},;`]""".r ^^ {
case "(" => TLParen()
case ")" => TRParen()
case "[" => TLBrack()
case "]" => TRBrack()
case "{" => TLCurly()
case "}" => TRCurly()
case "," => TComma()
case ";" => TSemi()
case "`" => TBack()
}
// ---------------------------------------------------------------------
// Everything together
// ---------------------------------------------------------------------
private def eoi:Parser[Token] = Parser[Token] { in =>
if (in.atEnd) Success(EOI(), in)
else Failure("End of Input expected", in)
}
def token:Parser[Token] = positioned(
special |
ident |
floating |
integer |
char |
string |
eoi |
any >> {c => err("invalid character: `%s'" format printChar(c))})
}
| ppedemon/Bluejelly | bluejelly-bjc/src/main/scala/bluejelly/bjc/parser/Lexer.scala | Scala | bsd-3-clause | 10,838 |
package tomduhourq.learningscalaz.truthy
import tomduhourq.learningscalaz.op.ToCanIsTruthyOps
/**
* Boolean typeclass.
*/
trait CanTruthy[A] { self =>
def truthys(a: A): Boolean
}
object CanTruthy {
def apply[A](implicit ev: CanTruthy[A]) = ev
def truthys[A](f: A => Boolean) = new CanTruthy[A] {
def truthys(a: A) = f(a)
}
// Identity
implicit val booleanCanTruthy: CanTruthy[Boolean] = CanTruthy.truthys(identity)
// Some CanTruthy examples
implicit val intCanTruthy: CanTruthy[Int] = CanTruthy.truthys {
case 0 => false
case _ => true
}
implicit def listCanTruthy[A]: CanTruthy[List[A]] = CanTruthy.truthys {
case Nil => false
case _ => true
}
// Since A is invariant, you need to define a new CanTruthy[Nil], so you can do Nil.truthy
implicit val nilCanTruthy: CanTruthy[scala.collection.immutable.Nil.type] = CanTruthy.truthys(_ => false)
}
| tomduhourq/learning-scalaz | src/main/scala/tomduhourq/learningscalaz/truthy/CanTruthy.scala | Scala | apache-2.0 | 905 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.http
import scala.concurrent.duration.Duration
import scala.concurrent.duration.FiniteDuration
import scala.util.Try
import akka.http.scaladsl.model.StatusCode
import akka.http.scaladsl.model.StatusCodes.Forbidden
import akka.http.scaladsl.model.StatusCodes.NotFound
import akka.http.scaladsl.model.MediaType
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport.sprayJsonMarshaller
import akka.http.scaladsl.server.StandardRoute
import spray.json._
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.entity.SizeError
import org.apache.openwhisk.core.entity.ByteSize
import org.apache.openwhisk.core.entity.Exec
import org.apache.openwhisk.core.entity.ExecMetaDataBase
import org.apache.openwhisk.core.entity.ActivationId
object Messages {
/** Standard message for reporting resource conflicts. */
val conflictMessage = "Concurrent modification to resource detected."
/**
* Standard message for reporting resource conformance error when trying to access
* a resource from a different collection.
*/
val conformanceMessage = "Resource by this name exists but is not in this collection."
val corruptedEntity = "Resource is corrupted and cannot be read."
/**
* Standard message for reporting deprecated runtimes.
*/
def runtimeDeprecated(e: Exec) =
s"The '${e.kind}' runtime is no longer supported. You may read and delete but not update or invoke this action."
/**
* Standard message for reporting deprecated runtimes.
*/
def runtimeDeprecated(e: ExecMetaDataBase) =
s"The '${e.kind}' runtime is no longer supported. You may read and delete but not update or invoke this action."
/** Standard message for resource not found. */
val resourceDoesNotExist = "The requested resource does not exist."
def resourceDoesntExist(value: String) = s"The requested resource '$value' does not exist."
/** Standard message for too many activation requests within a rolling time window. */
def tooManyRequests(count: Int, allowed: Int) =
s"Too many requests in the last minute (count: $count, allowed: $allowed)."
/** Standard message for too many concurrent activation requests within a time window. */
val tooManyConcurrentRequests = s"Too many concurrent requests in flight."
def tooManyConcurrentRequests(count: Int, allowed: Int) =
s"Too many concurrent requests in flight (count: $count, allowed: $allowed)."
/** System overload message. */
val systemOverloaded = "System is overloaded, try again later."
/** Standard message when supplied authkey is not authorized for an operation. */
val notAuthorizedtoOperateOnResource = "The supplied authentication is not authorized to access this resource."
def notAuthorizedtoAccessResource(value: String) =
s"The supplied authentication is not authorized to access '$value'."
def notAuthorizedtoActionKind(value: String) =
s"The supplied authentication is not authorized to access actions of kind '$value'."
/** Standard error message for malformed fully qualified entity names. */
val malformedFullyQualifiedEntityName =
"The fully qualified name of the entity must contain at least the namespace and the name of the entity."
def entityNameTooLong(error: SizeError) = {
s"${error.field} longer than allowed: ${error.is.toBytes} > ${error.allowed.toBytes}."
}
val entityNameIllegal = "The name of the entity contains illegal characters."
val namespaceIllegal = "The namespace contains illegal characters."
/** Standard error for malformed activation id. */
val activationIdIllegal = "The activation id is not valid."
def activationIdLengthError(error: SizeError) = {
s"${error.field} length is ${error.is.toBytes} but must be ${error.allowed.toBytes}."
}
/** Standard error for malformed creation id. */
val creationIdIllegal = "The creation id is not valid."
def creationIdLengthError(error: SizeError) = {
s"${error.field} length is ${error.is.toBytes} but must be ${error.allowed.toBytes}."
}
/** Error messages for sequence actions. */
val sequenceIsTooLong = "Too many actions in the sequence."
val sequenceNoComponent = "No component specified for the sequence."
val sequenceIsCyclic = "Sequence may not refer to itself."
val sequenceComponentNotFound = "Sequence component does not exist."
/** Error message for packages. */
val bindingDoesNotExist = "Binding references a package that does not exist."
val packageCannotBecomeBinding = "Resource is a package and cannot be converted into a binding."
val bindingCannotReferenceBinding = "Cannot bind to another package binding."
val requestedBindingIsNotValid = "Cannot bind to a resource that is not a package."
val notAllowedOnBinding = "Operation not permitted on package binding."
def packageNameIsReserved(name: String) = s"Package name '$name' is reserved."
def packageBindingCircularReference(name: String) = s"Package binding '$name' contains a circular reference."
/** Error messages for triggers */
def triggerWithInactiveRule(rule: String, action: String) = {
s"Rule '$rule' is inactive, action '$action' was not activated."
}
/** Error messages for sequence activations. */
def sequenceRetrieveActivationTimeout(id: ActivationId) =
s"Timeout reached when retrieving activation $id for sequence component."
val sequenceActivationFailure = "Sequence failed."
/** Error messages for compositions. */
val compositionIsTooLong = "Too many actions in the composition."
val compositionActivationFailure = "Activation failure during composition."
def compositionActivationTimeout(id: ActivationId) =
s"Timeout reached when retrieving activation $id during composition."
def compositionComponentInvalid(value: JsValue) =
s"Failed to parse action name from json value $value during composition."
def compositionComponentNotFound(name: String) =
s"Failed to resolve action with name '$name' during composition."
def compositionComponentNotAccessible(name: String) =
s"Failed entitlement check for action with name '$name' during composition."
/** Error messages for bad requests where parameters do not conform. */
val parametersNotAllowed = "Request defines parameters that are not allowed (e.g., reserved properties)."
def invalidTimeout(max: FiniteDuration) = s"Timeout must be number of milliseconds up to ${max.toMillis}."
/** Error messages for activations. */
val abnormalInitialization = "The action did not initialize and exited unexpectedly."
val abnormalRun = "The action did not produce a valid response and exited unexpectedly."
val memoryExhausted = "The action exhausted its memory and was aborted."
val docsNotAllowedWithCount = "The parameter 'docs' is not permitted with 'count'."
def badNameFilter(value: String) = s"Parameter may be a 'simple' name or 'package-name/simple' name: $value"
def badEpoch(value: String) = s"Parameter is not a valid value for epoch seconds: $value"
/** Error message for size conformance. */
def entityTooBig(error: SizeError) = {
s"${error.field} larger than allowed: ${error.is.toBytes} > ${error.allowed.toBytes} bytes."
}
def listLimitOutOfRange(collection: String, value: Int, max: Int) = {
s"The value '$value' is not in the range of 0 to $max for $collection."
}
def invalidRuntimeError(kind: String, runtimes: Set[String]) = {
s"The specified runtime '$kind' is not supported by this platform. Valid values are: ${runtimes.mkString("'", "', '", "'")}."
}
def listSkipOutOfRange(collection: String, value: Int) = {
s"The value '$value' is not greater than or equal to 0 for $collection."
}
def argumentNotInteger(collection: String, value: String) = s"The value '$value' is not an integer for $collection."
def truncateLogs(limit: ByteSize) = {
s"Logs were truncated because the total bytes size exceeds the limit of ${limit.toBytes} bytes."
}
val logFailure = "There was an issue while collecting your logs. Data might be missing."
val logWarningDeveloperError = "The action did not initialize or run as expected. Log data might be missing."
/** Error for meta api. */
val propertyNotFound = "Response does not include requested property."
def invalidMedia(m: MediaType) = s"Response is not valid '${m.value}'."
def contentTypeExtensionNotSupported(extensions: Set[String]) = {
s"""Extension must be specified and one of ${extensions.mkString("[", ", ", "]")}."""
}
val unsupportedContentType = """Content type is not supported."""
def unsupportedContentType(m: MediaType) = s"""Content type '${m.value}' is not supported."""
val errorExtractingRequestBody = "Failed extracting request body."
val responseNotReady = "Response not yet ready."
val httpUnknownContentType = "Response did not specify a known content-type."
val httpContentTypeError = "Response type in header did not match generated content type."
val errorProcessingRequest = "There was an error processing your request."
def invalidInitResponse(actualResponse: String) = {
"The action failed during initialization" + {
Option(actualResponse) filter { _.nonEmpty } map { s =>
s": $s"
} getOrElse "."
}
}
def invalidRunResponse(actualResponse: String) = {
"The action did not produce a valid JSON response" + {
Option(actualResponse) filter { _.nonEmpty } map { s =>
s": $s"
} getOrElse "."
}
}
def truncatedResponse(length: ByteSize, maxLength: ByteSize): String = {
s"The action produced a response that exceeded the allowed length: ${length.toBytes} > ${maxLength.toBytes} bytes."
}
def truncatedResponse(trunk: String, length: ByteSize, maxLength: ByteSize): String = {
s"${truncatedResponse(length, maxLength)} The truncated response was: $trunk"
}
def timedoutActivation(timeout: Duration, init: Boolean) = {
s"The action exceeded its time limits of ${timeout.toMillis} milliseconds" + {
if (!init) "." else " during initialization."
}
}
val namespacesBlacklisted = "The action was not invoked due to a blacklisted namespace."
val namespaceLimitUnderZero = "The namespace limit is less than or equal to 0."
val actionRemovedWhileInvoking = "Action could not be found or may have been deleted."
val actionMismatchWhileInvoking = "Action version is not compatible and cannot be invoked."
val actionFetchErrorWhileInvoking = "Action could not be fetched."
/** Indicates that the image could not be pulled. */
def imagePullError(image: String) = s"Failed to pull container image '$image'."
/** Indicates that the container for the action could not be started. */
val resourceProvisionError = "Failed to provision resources to run the action."
def forbiddenGetActionBinding(entityDocId: String) =
s"GET not permitted for '$entityDocId'. Resource does not exist or is an action in a shared package binding."
def forbiddenGetAction(entityPath: String) =
s"GET not permitted for '$entityPath' since it's an action in a shared package"
def forbiddenGetPackageBinding(packageName: String) =
s"GET not permitted since $packageName is a binding of a shared package"
def forbiddenGetPackage(packageName: String) =
s"GET not permitted for '$packageName' since it's a shared package"
}
/** Replaces rejections with Json object containing cause and transaction id. */
case class ErrorResponse(error: String, code: TransactionId)
object ErrorResponse extends Directives with DefaultJsonProtocol {
def terminate(status: StatusCode, error: String)(implicit transid: TransactionId,
jsonPrinter: JsonPrinter): StandardRoute = {
terminate(status, Option(error) filter { _.trim.nonEmpty } map { e =>
Some(ErrorResponse(e.trim, transid))
} getOrElse None)
}
def terminate(status: StatusCode, error: Option[ErrorResponse] = None, asJson: Boolean = true)(
implicit transid: TransactionId,
jsonPrinter: JsonPrinter): StandardRoute = {
val errorResponse = error getOrElse response(status)
if (asJson) {
complete(status, errorResponse)
} else {
complete(status, s"${errorResponse.error} (code: ${errorResponse.code})")
}
}
def response(status: StatusCode)(implicit transid: TransactionId): ErrorResponse = status match {
case NotFound => ErrorResponse(Messages.resourceDoesNotExist, transid)
case Forbidden => ErrorResponse(Messages.notAuthorizedtoOperateOnResource, transid)
case _ => ErrorResponse(status.defaultMessage, transid)
}
implicit val serializer: RootJsonFormat[ErrorResponse] = new RootJsonFormat[ErrorResponse] {
def write(er: ErrorResponse) = JsObject("error" -> er.error.toJson, "code" -> er.code.meta.id.toJson)
def read(v: JsValue) =
Try {
v.asJsObject.getFields("error", "code") match {
case Seq(JsString(error), JsString(code)) =>
ErrorResponse(error, TransactionId(code))
case Seq(JsString(error)) =>
ErrorResponse(error, TransactionId.unknown)
}
} getOrElse deserializationError("error response malformed")
}
}
| style95/openwhisk | common/scala/src/main/scala/org/apache/openwhisk/http/ErrorResponse.scala | Scala | apache-2.0 | 14,052 |
package amora.backend
import scala.concurrent.Future
import scala.util.Failure
import scala.util.Success
import akka.actor.ActorRef
import akka.actor.ActorSystem
import amora.backend.actors.NvimMsg.NvimSignal
import amora.backend.internal.WindowTreeCreator
import nvim.{Selection ⇒ _, _}
import nvim.internal.Notification
import amora.protocol.{Mode ⇒ _, _}
import amora.protocol.ui.WindowTree
final class NvimAccessor(self: ActorRef)(implicit system: ActorSystem) {
import system.dispatcher
private val nvim = Nvim(Connection("127.0.0.1", 6666))
private var windows = Set[Int]()
/**
* We need to track the active window here in order to avoid to tell nvim to update
* the active window to the very same window that is already active. In case we do
* this, nvim gets confused and no longer can no longer recognize events that
* belong together.
*/
private var activeWinId = -1
/**
* Whenever this is set to `true`, the next WinEnter event that is received by
* `handler` needs to be ignored. This is necessary because in some cases we
* know that we changed the active window and therefore there may not be a
* need to handle the sent WinEnter event.
*/
@volatile
private var ignoreNextWinEnter = false
object events {
val WinEnter = "_WinEnter"
val WinLeave = "_WinLeave"
val AllEvents = Seq(WinEnter, WinLeave)
}
private val handler: Notification ⇒ Unit = n ⇒ {
import events._
n.method match {
case WinEnter if ignoreNextWinEnter ⇒
ignoreNextWinEnter = false
case WinEnter ⇒
val resp = updateWindows() flatMap (_ ⇒ clientUpdate)
handle(resp, "Failed to send a broadcast event.") { resp ⇒
NvimSignal(resp)
}
case WinLeave ⇒
case _ ⇒
system.log.warning(s"Notification for unknown event type arrived: $n")
}
}
nvim.connection.addNotificationHandler(handler)
events.AllEvents foreach nvim.subscribe
updateWindows()
private def updateWindows(): Future[Unit] = {
nvim.windows map { ws ⇒
val winIds = ws.map(_.id).toSet
val removed = windows diff winIds
val added = winIds diff windows
windows --= removed
windows ++= added
}
}
private def bufferContent(b: Buffer): Future[Seq[String]] = for {
count ← b.lineCount
s ← b.lineSlice(0, count)
} yield s
private def selection = for {
win ← nvim.window
buf ← win.buffer
sel ← nvim.selection
} yield {
val List(start, end) = List(
Pos(sel.start.row-1, sel.start.col-1),
Pos(sel.end.row-1, sel.end.col-1)
).sorted
Selection(win.id, buf.id, start, end)
}
private def winOf(winId: Int): Future[Window] =
Future.successful(Window(winId, nvim.connection))
private def winInfo(winId: Int) = for {
win ← winOf(winId)
buf ← win.buffer
content ← bufferContent(buf)
pos ← win.position
w ← win.width
h ← win.height
} yield WindowUpdate(win.id, buf.id, content, WinDim(pos.row, pos.col, w, h))
private def clientUpdate = for {
wins ← Future.sequence(windows map winInfo)
mode ← nvim.activeMode
sel ← selection
tree ← windowTree
} yield ClientUpdate(wins.toSeq, Mode.asString(mode), sel, Some(tree))
private def windowTree: Future[WindowTree] = for {
windows ← Future.sequence(windows map winOf)
infos ← Future.sequence(windows.toList map { win ⇒
for {
pos ← win.position
w ← win.width
h ← win.height
} yield WindowTreeCreator.WinInfo(win.id, pos.col, pos.row, w, h)
})
} yield WindowTreeCreator.mkWindowTree(infos)
def handleClientJoined(sender: String): Unit = {
val resp = clientUpdate
handle(resp, s"Failed to send an update to the client `$sender`.") {
resp ⇒ NvimSignal(sender, resp)
}
}
def handleTextChange(change: TextChange, sender: String): Unit = {
system.log.info(s"received: $change")
val resp = for {
_ ← updateActiveWindow(change.winId)
_ ← nvim.sendInput(change.text)
update ← winInfo(change.winId)
mode ← nvim.activeMode
s ← selection
} yield ClientUpdate(Seq(update), Mode.asString(mode), s, None)
handle(resp, s"Failed to send response after client request `$change`.") {
resp ⇒ NvimSignal(sender, resp)
}
}
def handleSelectionChange(change: SelectionChange, sender: String): Unit = {
system.log.info(s"received: $change")
val resp = for {
w ← nvim.window
_ = if (w.id != change.winId) ignoreNextWinEnter = true
win ← updateActiveWindow(change.winId)
_ ← win.cursor = Position(change.cursorRow+1, change.cursorColumn)
s ← selection
} yield SelectionChangeAnswer(win.id, change.bufferId, s)
handle(resp, s"Failed to send response after client request `$change`.") {
resp ⇒ NvimSignal(sender, resp)
}
}
def handleControl(control: Control, sender: String): Unit = {
system.log.info(s"received: $control")
val resp = for {
_ ← updateActiveWindow(control.winId)
_ ← nvim.sendInput(control.controlSeq)
update ← winInfo(control.winId)
mode ← nvim.activeMode
s ← selection
} yield ClientUpdate(Seq(update), Mode.asString(mode), s, None)
handle(resp, s"Failed to send response after client request `$control`.") {
resp ⇒ NvimSignal(sender, resp)
}
}
private def updateActiveWindow(winId: Int): Future[Window] =
if (activeWinId == winId)
winOf(activeWinId)
else {
activeWinId = winId
nvim.window = winId
}
private def handle[A, B](f: Future[A], errMsg: String)(onSuccess: A ⇒ B): Unit = {
f onComplete {
case Success(a) ⇒
val res = onSuccess(a)
self ! res
system.log.info(s"sent: $res")
case Failure(t) ⇒
system.log.error(t, errMsg)
}
}
}
| sschaef/tooling-research | backend/src/main/scala/amora/backend/NvimAccessor.scala | Scala | mit | 5,964 |
package jk_5.nailed.logging
import java.util.logging.{LogManager, Level, Logger}
import java.io.PrintStream
import jk_5.nailed.Nailed
/**
* No description given
*
* @author jk-5
*/
object NailedLogging {
final val sysOut = System.out
private final val globalLogger = Logger.getLogger(Logger.GLOBAL_LOGGER_NAME)
private final val nailedLogger = Logger.getLogger("Nailed")
private final val vanillaLogger = Logger.getLogger("Minecraft")
private final val sysoutLogger = Logger.getLogger("STDOUT")
private final val syserrLogger = Logger.getLogger("STDERR")
private var consoleHandler: TerminalConsoleHandler = _
def init(){
this.consoleHandler = new TerminalConsoleHandler(Nailed.reader)
LogManager.getLogManager.reset()
this.globalLogger.setLevel(Level.ALL)
this.globalLogger.getHandlers.foreach(h => globalLogger.removeHandler(h))
this.globalLogger.addHandler(this.consoleHandler)
this.consoleHandler.setFormatter(new LogFormatter)
System.setOut(new PrintStream(new LoggerOutputStream(this.sysoutLogger, Level.INFO), true))
System.setErr(new PrintStream(new LoggerOutputStream(this.syserrLogger, Level.SEVERE), true))
this.nailedLogger.setParent(this.globalLogger)
this.vanillaLogger.setParent(this.globalLogger)
this.sysoutLogger.setParent(this.globalLogger)
this.syserrLogger.setParent(this.globalLogger)
this.nailedLogger.info("Initialized")
}
}
| nailed/nailed-legacy | src/main/scala/jk_5/nailed/logging/NailedLogging.scala | Scala | unlicense | 1,433 |
package jigg.nlp.ccg
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.io.FileWriter
import scala.collection.mutable.ArrayBuffer
import scala.sys.process.Process
import scala.collection.mutable.HashMap
import lexicon._
import breeze.config.CommandLineParser
object OutputCategoryList {
case class Params(
bank: Opts.BankInfo,
dict: Opts.DictParams
)
case class CategoryInfo(sentence: GoldSuperTaggedSentence, position: Int, num: Int = 1) {
def increment(): CategoryInfo = this.copy(num = num + 1)
def replace(_sentence: GoldSuperTaggedSentence, _p: Int) =
CategoryInfo(_sentence, _p, num + 1)
}
def main(args:Array[String]) = {
val params = CommandLineParser.readIn[Params](args)
val dict = new JapaneseDictionary(params.dict.categoryDictinoary)
val bank = CCGBank.select(params.bank, dict)
val trainSentences: Array[GoldSuperTaggedSentence] = bank.trainSentences
val stats = new HashMap[Category, CategoryInfo]
trainSentences foreach { sentence =>
(0 until sentence.size) foreach { i =>
val cat = sentence.cat(i)
stats.get(cat) match {
case Some(info) =>
if (sentence.size > info.sentence.size)
stats += ((cat, info.replace(sentence, i)))
else
stats += ((cat, info.increment()))
case None => stats += ((cat, CategoryInfo(sentence, i)))
case _ =>
}
}
}
def highlight(sentence: Sentence, i: Int) = {
val tokens = sentence.wordSeq
// tokens.take(i).mkString("") + s"\\\\x1b[1;31m{${tokens(i)}}\\\\x1b[0m" + tokens.drop(i+1).mkString("")
tokens.slice(i-5, i).mkString("") + s"[01;31m${tokens(i)}[00m" + tokens.slice(i+1, i+6).mkString("")
}
var fw = new FileWriter("./category.lst")
stats.toSeq.sortBy(_._2.num).reverse.foreach {
case (cat, CategoryInfo(sentence, i, num)) =>
fw.write("%s\\t%s\\t%s\\t%s\\n"
.format(num, cat, sentence.pos(i), highlight(sentence, i)))
}
fw.flush
fw.close
val noFeatureCategories = new HashMap[String, CategoryInfo]
stats foreach { case (cat, CategoryInfo(sentence, i, numWithFeat)) =>
val noFeature = cat.toStringNoFeature
noFeatureCategories.get(noFeature) match {
case Some(exist) =>
val newNum = numWithFeat + exist.num
val newInfo = exist.copy(num = newNum)
noFeatureCategories += (noFeature -> newInfo)
case None =>
noFeatureCategories += (noFeature -> CategoryInfo(sentence, i, numWithFeat))
case _ =>
}
}
fw = new FileWriter("./category.nofeature.lst")
noFeatureCategories.toSeq.sortBy(_._2.num).reverse.foreach {
case (cat, CategoryInfo(sentence, i, num)) =>
fw.write("%s\\t%s\\t%s\\t%s\\n"
.format(num, cat, sentence.pos(i), highlight(sentence, i)))
}
fw.flush
fw.close
}
}
| mynlp/jigg | src/main/scala/jigg/nlp/ccg/OutputCategoryList.scala | Scala | apache-2.0 | 3,445 |
package org.tuubes.core.engine
/**
* Asynchronous source of events consumed by observers (or "listeners").
*
* @tparam A events emitted by this observables
*/
trait Observable[A] {
def subscribe(f: A => Unit): Unit
// TODO add unsubscribe
// TODO add functional operations like filter and map
}
trait BiObservable[A, B] {
def subscribe(f: (A, B) => Unit): Unit
// TODO add unsubscribe
// TODO add functional operations like filter and map
}
| mcphoton/Photon-Server | core/src/main/scala/org/tuubes/core/engine/Observable.scala | Scala | lgpl-3.0 | 458 |
package sampler.cluster.abc.actor.root
import akka.actor.Actor
import akka.actor.Props
import akka.routing.FromConfig
import sampler.cluster.abc.actor.BroadcastActor
import sampler.cluster.abc.actor.ReceiveActor
import sampler.cluster.abc.actor.worker.WorkerActorImpl
import sampler.cluster.abc.config.ABCConfig
import sampler.cluster.abc.Model
import sampler.cluster.abc.actor.ReportingActor
trait ChildrenActorsComponent[P] {
this: Actor with ABCActor[P] =>
val childActors: ChildActors
trait ChildActors {
val broadcaster = context.actorOf(
Props(classOf[BroadcastActor], config),
"broadcaster"
)
val receiver = context.actorOf(
Props[ReceiveActor],
"receiver"
)
val router = context.actorOf(
// Props(new WorkerActorImpl[P](model)).withRouter(FromConfig()), // Akka 2.2.3
FromConfig.props(Props(new WorkerActorImpl[P](model))), // Akka 2.3
"work-router"
)
val reportingActor = context.actorOf(
Props(
classOf[ReportingActor[P]],
reportAction
)
)
}
} | tsaratoon/Sampler | sampler-cluster/src/main/scala/sampler/cluster/abc/actor/root/ChildActorsComponent.scala | Scala | apache-2.0 | 1,022 |
package is.hail.types.physical
import is.hail.annotations.UnsafeUtils
import is.hail.types.BaseStruct
import is.hail.types.virtual.{TTuple, Type}
import is.hail.utils._
object PCanonicalTuple {
def apply(required: Boolean, args: PType*): PCanonicalTuple = PCanonicalTuple(args.iterator.zipWithIndex.map { case (t, i) => PTupleField(i, t)}.toIndexedSeq, required)
}
final case class PCanonicalTuple(_types: IndexedSeq[PTupleField], override val required: Boolean = false) extends PCanonicalBaseStruct(_types.map(_.typ).toArray) with PTuple {
lazy val fieldIndex: Map[Int, Int] = _types.zipWithIndex.map { case (tf, idx) => tf.index -> idx }.toMap
def setRequired(required: Boolean) = if(required == this.required) this else PCanonicalTuple(_types, required)
override def _pretty(sb: StringBuilder, indent: Int, compact: Boolean) {
sb.append("PCTuple[")
_types.foreachBetween { fd =>
sb.append(fd.index)
sb.append(':')
fd.typ.pretty(sb, indent, compact)
}(sb += ',')
sb += ']'
}
override def deepRename(t: Type) = deepTupleRename(t.asInstanceOf[TTuple])
private def deepTupleRename(t: TTuple) = {
PCanonicalTuple((t._types, this._types).zipped.map( (tfield, pfield) => {
assert(tfield.index == pfield.index)
PTupleField(pfield.index, pfield.typ.deepRename(tfield.typ))
}), this.required)
}
def copiedType: PType = {
val copiedTypes = types.map(_.copiedType)
if (types.indices.forall(i => types(i).eq(copiedTypes(i))))
this
else {
PCanonicalTuple(copiedTypes.indices.map(i => _types(i).copy(typ = copiedTypes(i))), required)
}
}
}
| hail-is/hail | hail/src/main/scala/is/hail/types/physical/PCanonicalTuple.scala | Scala | mit | 1,637 |
package test_data.v20
import scala.xml.Elem
case class SectionAboutTheCareYouProvide(xml: Elem) {
val rootPath = xml \\\\ "DWPCATransaction" \\\\ "DWPCAClaim" \\\\ "Caree"
val nationalInsuranceNumber = rootPath \\\\ "NationalInsuranceNumber"
val dateOfBirth = rootPath \\\\ "DateOfBirth"
val liveSameAddressQuestion = rootPath \\\\ "LiveSameAddress" \\\\ "QuestionLabel"
val liveSameAddressAnswer = rootPath \\\\ "LiveSameAddress" \\\\ "Answer"
val dayTimeTelephoneNumber = rootPath \\\\ "DayTimePhoneNumber"
val relationToClaimantQuestion = rootPath \\\\ "RelationToClaimant" \\\\ "QuestionLabel"
val relationToClaimantAnswer = rootPath \\\\ "RelationToClaimant" \\\\ "Answer"
val cared35HoursQuestion = rootPath \\\\ "Cared35Hours" \\\\ "QuestionLabel"
val cared35HoursAnswer = rootPath \\\\ "Cared35Hours" \\\\ "Answer"
val breaksInCareQuestion = rootPath \\\\ "BreaksSinceClaim" \\\\ "QuestionLabel"
val breaksInCareAnswer = rootPath \\\\ "BreaksSinceClaim" \\\\ "Answer"
val careeLastName = rootPath \\\\ "Surname"
val careeFirstName = rootPath \\\\ "OtherNames"
val careeTitle = rootPath \\\\ "Title"
val addressCaree = (rootPath \\\\ "Address" \\\\ "Line").map(x => x.text).filterNot(x => x.isEmpty).mkString(" ")
val postCodeCaree = rootPath \\\\ "Address" \\\\ "PostCode"
}
| Department-for-Work-and-Pensions/RenderingService | test/test_data/v20/SectionAboutTheCareYouProvide.scala | Scala | mit | 1,283 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils
/**
* Can create an iterator over all combinations of items from a list-of-lists.
* Because the final list of combinations can be large, we allow for a safe
* way to query the list size that is independent of the iterator itself.
* (That is, asking for the size does not exhaust any iterator.)
*
* @param seqs the list-of-lists whose items are to be recombined
* @tparam T the type of items
*/
case class CartesianProductIterable[T](seqs:Seq[Seq[T]]) extends Iterable[Seq[T]] {
// how many (total) combinations there will be in an iterator
lazy val expectedSize : Long = seqs.map(seq => seq.size.toLong).product
// create an iterator that will visit all possible combinations
// (every time you call this, you get a NEW iterator)
def iterator : Iterator[Seq[T]] = new Iterator[Seq[T]] {
// variable index that counts off the combinations
var index : Long = 0L
// current value (combination)
private def value : Seq[T] =
seqs.foldLeft(Seq[T](),index)((t,src) => t match { case (seqSoFar,idx) => {
val modulus : Int = src.size
val itemIndex : Int = (idx % modulus).toInt
val item : T = src(itemIndex)
(seqSoFar++Seq(item), idx / modulus)
}})._1
def next() : Seq[T] = {
val result = if (hasNext) value else null
index = index + 1L
result
}
def hasNext : Boolean = index < expectedSize
}
}
| mdzimmerman/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/CartesianProductIterable.scala | Scala | apache-2.0 | 1,899 |
package com.github.jmora.scala.util
object App extends App {
println(" - Hello World!")
}
| jmora/scala.util | src/main/scala/com/github/jmora/scala/util/App.scala | Scala | mit | 94 |
import org.scalacheck._, Prop._, Gen._, Arbitrary._
import scala.tools.reflect.{ToolBox, ToolBoxError}
import scala.reflect.runtime.currentMirror
import scala.reflect.runtime.universe._, Flag._, internal.reificationSupport.setSymbol
class QuasiquoteProperties(name: String) extends Properties(name) with ArbitraryTreesAndNames with Helpers
trait Helpers {
/** Runs a code block and returns proof confirmation
* if no exception has been thrown while executing code
* block. This is useful for simple one-off tests.
*/
def test[T](block: => T) =
Prop { params =>
block
Result(Prop.Proof)
}
object simplify extends Transformer {
object SimplifiedName {
val st = scala.reflect.runtime.universe.asInstanceOf[scala.reflect.internal.SymbolTable]
val FreshName = new st.FreshNameExtractor
def unapply[T <: Name](name: T): Option[T] = name.asInstanceOf[st.Name] match {
case FreshName(prefix) =>
Some((if (name.isTermName) TermName(prefix) else TypeName(prefix)).asInstanceOf[T])
}
}
override def transform(tree: Tree): Tree = tree match {
case Ident(SimplifiedName(name)) => Ident(name)
case ValDef(mods, SimplifiedName(name), tpt, rhs) => ValDef(mods, name, transform(tpt), transform(rhs))
case Bind(SimplifiedName(name), rhs) => Bind(name, rhs)
case _ =>
super.transform(tree)
}
def apply(tree: Tree): Tree = transform(tree)
}
implicit class TestSimilarTree(tree1: Tree) {
def ≈(tree2: Tree) = simplify(tree1).equalsStructure(simplify(tree2))
}
implicit class TestSimilarListTree(lst: List[Tree]) {
def ≈(other: List[Tree]) = (lst.length == other.length) && lst.zip(other).forall { case (t1, t2) => t1 ≈ t2 }
}
implicit class TestSimilarListListTree(lst: List[List[Tree]]) {
def ≈(other: List[List[Tree]]) = (lst.length == other.length) && lst.zip(other).forall { case (l1, l2) => l1 ≈ l2 }
}
implicit class TestSimilarName(name: Name) {
def ≈(other: Name) = name == other
}
implicit class TestSimilarMods(mods: Modifiers) {
def ≈(other: Modifiers) = (mods.flags == other.flags) && (mods.privateWithin ≈ other.privateWithin) && (mods.annotations ≈ other.annotations)
}
def assertThrows[T <: AnyRef](f: => Any)(implicit manifest: Manifest[T]): Unit = {
val clazz = manifest.runtimeClass.asInstanceOf[Class[T]]
val thrown =
try {
f
false
} catch {
case u: Throwable =>
if (!clazz.isAssignableFrom(u.getClass))
assert(false, s"wrong exception: $u")
true
}
if(!thrown)
assert(false, "exception wasn't thrown")
}
def assertEqAst(tree: Tree, code: String) = assert(eqAst(tree, code))
def eqAst(tree: Tree, code: String) = tree ≈ parse(code)
val toolbox = currentMirror.mkToolBox()
val parse = toolbox.parse(_)
val compile = toolbox.compile(_)
val eval = toolbox.eval(_)
def typecheck(tree: Tree) = toolbox.typecheck(tree)
def typecheckTyp(tree: Tree) = {
val q"type $_ = $res" = typecheck(q"type T = $tree")
res
}
def typecheckPat(tree: Tree) = {
val q"$_ match { case $res => }" = typecheck(q"((): Any) match { case $tree => }")
res
}
def fails(msg: String, block: String) = {
def result(ok: Boolean, description: String = "") = {
val status = if (ok) Prop.Proof else Prop.False
val labels = if (description != "") Set(description) else Set.empty[String]
Prop { new Prop.Result(status, Nil, Set.empty, labels) }
}
try {
compile(parse(s"""
object Wrapper extends Helpers {
import scala.reflect.runtime.universe._
$block
}
"""))
result(false, "given code doesn't fail to typecheck")
} catch {
case ToolBoxError(emsg, _) =>
if (!emsg.contains(msg))
result(false, s"error message '${emsg}' is not the same as expected '$msg'")
else
result(true)
}
}
val scalapkg = setSymbol(Ident(TermName("scala")), definitions.ScalaPackage)
}
| felixmulder/scala | test/files/scalacheck/quasiquotes/QuasiquoteProperties.scala | Scala | bsd-3-clause | 4,128 |
package fuel.example
import scala.Range
import fuel.func.RunExperiment
import fuel.func.SimpleEA
import fuel.moves.PermutationMoves
import fuel.util.OptColl
/**
* Traveling Salesperson problem.
*
* Minimized fitness function.
*/
object TSP extends App {
new OptColl('numCities -> 30, 'maxGenerations -> 300) {
// Generate random distance matrix
val numCities = opt('numCities, (_: Int) > 0)
val cities = Seq.fill(numCities)((rng.nextDouble, rng.nextDouble))
val distances = for (i <- cities) yield for (j <- cities)
yield math.hypot(i._1 - j._1, i._2 - j._2)
// Fitness function
def eval(s: Seq[Int]) =
Range(0, s.size).map(i => distances(s(i))(s((i + 1) % s.size))).sum
RunExperiment(SimpleEA(PermutationMoves(numCities), eval))
}
}
| iwob/fuel | src/main/scala/fuel/example/TSP.scala | Scala | mit | 794 |
/*
* Copyright (c) 2016, Team Mion
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package io.teammion.morefood
import net.minecraft.entity.item.EntityItem
import net.minecraftforge.event.entity.living.LivingDropsEvent
import net.minecraftforge.fml.common.eventhandler.SubscribeEvent
/**
* Handles events
*
* @author Stefan Wimmer <stefanwimmer128@gmail.com>
*/
class EventHandler
{
@SubscribeEvent
def onDrop(e : LivingDropsEvent) : Unit =
{
e.getDrops.add(new EntityItem(
e.getEntity.worldObj,
e.getEntity.posX,
e.getEntity.posY,
e.getEntity.posZ,
Items.STRAWBERRY.stack
))
}
}
/**
* To get EventHandler class
*
* @author Stefan Wimmer <stefanwimmer128@gmail.com>
*/
object EventHandler
{
private val eventHandler : EventHandler = new EventHandler
/**
* Get EventHandler instance
* @return EventHandler instance
*/
def instance : EventHandler =
eventHandler
}
| teammion/tm-morefood | src/main/scala/io/teammion/morefood/EventHandler.scala | Scala | isc | 1,706 |
package app
import util.Directory._
import util.Implicits._
import util.ControlUtil._
import _root_.util.{ReferrerAuthenticator, JGitUtil, FileUtil, StringUtil}
import service._
import org.scalatra._
import java.io.File
import org.eclipse.jgit.api.Git
import org.eclipse.jgit.lib._
import org.apache.commons.io.FileUtils
import org.eclipse.jgit.treewalk._
import org.eclipse.jgit.api.errors.RefNotFoundException
class RepositoryViewerController extends RepositoryViewerControllerBase
with RepositoryService with AccountService with ReferrerAuthenticator
/**
* The repository viewer.
*/
trait RepositoryViewerControllerBase extends ControllerBase {
self: RepositoryService with AccountService with ReferrerAuthenticator =>
/**
* Returns converted HTML from Markdown for preview.
*/
post("/:owner/:repository/_preview")(referrersOnly { repository =>
contentType = "text/html"
view.helpers.markdown(params("content"), repository,
params("enableWikiLink").toBoolean,
params("enableRefsLink").toBoolean)
})
/**
* Displays the file list of the repository root and the default branch.
*/
get("/:owner/:repository")(referrersOnly {
fileList(_)
})
/**
* Displays the file list of the specified path and branch.
*/
get("/:owner/:repository/tree/*")(referrersOnly { repository =>
val (id, path) = splitPath(repository, multiParams("splat").head)
if(path.isEmpty){
fileList(repository, id)
} else {
fileList(repository, id, path)
}
})
/**
* Displays the commit list of the specified resource.
*/
get("/:owner/:repository/commits/*")(referrersOnly { repository =>
val (branchName, path) = splitPath(repository, multiParams("splat").head)
val page = params.get("page").flatMap(_.toIntOpt).getOrElse(1)
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
JGitUtil.getCommitLog(git, branchName, page, 30, path) match {
case Right((logs, hasNext)) =>
repo.html.commits(if(path.isEmpty) Nil else path.split("/").toList, branchName, repository,
logs.splitWith{ (commit1, commit2) =>
view.helpers.date(commit1.time) == view.helpers.date(commit2.time)
}, page, hasNext)
case Left(_) => NotFound
}
}
})
/**
* Displays the file content of the specified branch or commit.
*/
get("/:owner/:repository/blob/*")(referrersOnly { repository =>
val (id, path) = splitPath(repository, multiParams("splat").head)
val raw = params.get("raw").getOrElse("false").toBoolean
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
val revCommit = JGitUtil.getRevCommitFromId(git, git.getRepository.resolve(id))
@scala.annotation.tailrec
def getPathObjectId(path: String, walk: TreeWalk): ObjectId = walk.next match {
case true if(walk.getPathString == path) => walk.getObjectId(0)
case true => getPathObjectId(path, walk)
}
val objectId = using(new TreeWalk(git.getRepository)){ treeWalk =>
treeWalk.addTree(revCommit.getTree)
treeWalk.setRecursive(true)
getPathObjectId(path, treeWalk)
}
if(raw){
// Download
defining(JGitUtil.getContent(git, objectId, false).get){ bytes =>
contentType = FileUtil.getContentType(path, bytes)
bytes
}
} else {
// Viewer
val large = FileUtil.isLarge(git.getRepository.getObjectDatabase.open(objectId).getSize)
val viewer = if(FileUtil.isImage(path)) "image" else if(large) "large" else "other"
val bytes = if(viewer == "other") JGitUtil.getContent(git, objectId, false) else None
val content = if(viewer == "other"){
if(bytes.isDefined && FileUtil.isText(bytes.get)){
// text
JGitUtil.ContentInfo("text", bytes.map(StringUtil.convertFromByteArray))
} else {
// binary
JGitUtil.ContentInfo("binary", None)
}
} else {
// image or large
JGitUtil.ContentInfo(viewer, None)
}
repo.html.blob(id, repository, path.split("/").toList, content, new JGitUtil.CommitInfo(revCommit))
}
}
})
/**
* Displays details of the specified commit.
*/
get("/:owner/:repository/commit/:id")(referrersOnly { repository =>
val id = params("id")
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
defining(JGitUtil.getRevCommitFromId(git, git.getRepository.resolve(id))){ revCommit =>
JGitUtil.getDiffs(git, id) match { case (diffs, oldCommitId) =>
repo.html.commit(id, new JGitUtil.CommitInfo(revCommit),
JGitUtil.getBranchesOfCommit(git, revCommit.getName),
JGitUtil.getTagsOfCommit(git, revCommit.getName),
repository, diffs, oldCommitId)
}
}
}
})
/**
* Displays branches.
*/
get("/:owner/:repository/branches")(referrersOnly { repository =>
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
// retrieve latest update date of each branch
val branchInfo = repository.branchList.map { branchName =>
val revCommit = git.log.add(git.getRepository.resolve(branchName)).setMaxCount(1).call.iterator.next
(branchName, revCommit.getCommitterIdent.getWhen)
}
repo.html.branches(branchInfo, repository)
}
})
/**
* Displays tags.
*/
get("/:owner/:repository/tags")(referrersOnly {
repo.html.tags(_)
})
/**
* Download repository contents as an archive.
*/
get("/:owner/:repository/archive/:name")(referrersOnly { repository =>
val name = params("name")
if(name.endsWith(".zip")){
val revision = name.replaceFirst("\\\\.zip$", "")
val workDir = getDownloadWorkDir(repository.owner, repository.name, session.getId)
if(workDir.exists){
FileUtils.deleteDirectory(workDir)
}
workDir.mkdirs
// clone the repository
val cloneDir = new File(workDir, revision)
using(Git.cloneRepository
.setURI(getRepositoryDir(repository.owner, repository.name).toURI.toString)
.setDirectory(cloneDir)
.setBranch(revision)
.call){ git =>
// checkout the specified revision
git.checkout.setName(revision).call
}
// remove .git
FileUtils.deleteDirectory(new File(cloneDir, ".git"))
// create zip file
val zipFile = new File(workDir, (if(revision.length == 40) revision.substring(0, 10) else revision) + ".zip")
FileUtil.createZipFile(zipFile, cloneDir)
contentType = "application/octet-stream"
zipFile
} else {
BadRequest
}
})
get("/:owner/:repository/network/members")(referrersOnly { repository =>
repo.html.forked(
getRepository(
repository.repository.originUserName.getOrElse(repository.owner),
repository.repository.originRepositoryName.getOrElse(repository.name),
baseUrl),
getForkedRepositories(
repository.repository.originUserName.getOrElse(repository.owner),
repository.repository.originRepositoryName.getOrElse(repository.name)),
repository)
})
private def splitPath(repository: service.RepositoryService.RepositoryInfo, path: String): (String, String) = {
val id = repository.branchList.collectFirst {
case branch if(path == branch || path.startsWith(branch + "/")) => branch
} orElse repository.tags.collectFirst {
case tag if(path == tag.name || path.startsWith(tag.name + "/")) => tag.name
} orElse Some(path.split("/")(0)) get
(id, path.substring(id.length).replaceFirst("^/", ""))
}
/**
* Provides HTML of the file list.
*
* @param repository the repository information
* @param revstr the branch name or commit id(optional)
* @param path the directory path (optional)
* @return HTML of the file list
*/
private def fileList(repository: RepositoryService.RepositoryInfo, revstr: String = "", path: String = ".") = {
if(repository.commitCount == 0){
repo.html.guide(repository)
} else {
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
val revisions = Seq(if(revstr.isEmpty) repository.repository.defaultBranch else revstr, repository.branchList.head)
// get specified commit
JGitUtil.getDefaultBranch(git, repository, revstr).map { case (objectId, revision) =>
defining(JGitUtil.getRevCommitFromId(git, objectId)){ revCommit =>
// get files
val files = JGitUtil.getFileList(git, revision, path)
// process README.md
val readme = files.find(_.name == "README.md").map { file =>
StringUtil.convertFromByteArray(JGitUtil.getContent(Git.open(getRepositoryDir(repository.owner, repository.name)), file.id, true).get)
}
repo.html.files(revision, repository,
if(path == ".") Nil else path.split("/").toList, // current path
new JGitUtil.CommitInfo(revCommit), // latest commit
files, readme)
}
} getOrElse NotFound
}
}
}
}
| smly/gitbucket | src/main/scala/app/RepositoryViewerController.scala | Scala | apache-2.0 | 9,296 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.events
import org.apache.spark.sql.SparkSession
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
/**
*
* @param carbonTable
* @param ifExistsSet
* @param sparkSession
*/
case class DropTablePreEvent(
carbonTable: CarbonTable,
ifExistsSet: Boolean,
sparkSession: SparkSession,
isInternalCall: Boolean = false)
extends Event with DropTableEventInfo
/**
*
* @param carbonTable
* @param ifExistsSet
* @param sparkSession
*/
case class DropTablePostEvent(
carbonTable: CarbonTable,
ifExistsSet: Boolean,
sparkSession: SparkSession)
extends Event with DropTableEventInfo
/**
*
* @param carbonTable
* @param ifExistsSet
* @param sparkSession
*/
case class DropTableAbortEvent(
carbonTable: CarbonTable,
ifExistsSet: Boolean,
sparkSession: SparkSession)
extends Event with DropTableEventInfo
| zzcclp/carbondata | integration/spark/src/main/scala/org/apache/carbondata/events/DropTableEvents.scala | Scala | apache-2.0 | 1,703 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.File
import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.ReentrantLock
import com.yammer.metrics.core.Gauge
import kafka.common.LogCleaningAbortedException
import kafka.metrics.KafkaMetricsGroup
import kafka.server.checkpoints.{OffsetCheckpoint, OffsetCheckpointFile}
import kafka.utils.CoreUtils._
import kafka.utils.{Logging, Pool}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.utils.Time
import scala.collection.{immutable, mutable}
private[log] sealed trait LogCleaningState
private[log] case object LogCleaningInProgress extends LogCleaningState
private[log] case object LogCleaningAborted extends LogCleaningState
private[log] case object LogCleaningPaused extends LogCleaningState
/**
* Manage the state of each partition being cleaned.
* If a partition is to be cleaned, it enters the LogCleaningInProgress state.
* While a partition is being cleaned, it can be requested to be aborted and paused. Then the partition first enters
* the LogCleaningAborted state. Once the cleaning task is aborted, the partition enters the LogCleaningPaused state.
* While a partition is in the LogCleaningPaused state, it won't be scheduled for cleaning again, until cleaning is
* requested to be resumed.
*/
private[log] class LogCleanerManager(val logDirs: Array[File], val logs: Pool[TopicPartition, Log]) extends Logging with KafkaMetricsGroup {
import LogCleanerManager._
override val loggerName = classOf[LogCleaner].getName
// package-private for testing
private[log] val offsetCheckpointFile = "cleaner-offset-checkpoint"
/* the offset checkpoints holding the last cleaned point for each log */
private val checkpoints = logDirs.map(dir => (dir, new OffsetCheckpointFile(new File(dir, offsetCheckpointFile)))).toMap
/* the set of logs currently being cleaned */
private val inProgress = mutable.HashMap[TopicPartition, LogCleaningState]()
/* a global lock used to control all access to the in-progress set and the offset checkpoints */
private val lock = new ReentrantLock
/* for coordinating the pausing and the cleaning of a partition */
private val pausedCleaningCond = lock.newCondition()
/* a gauge for tracking the cleanable ratio of the dirtiest log */
@volatile private var dirtiestLogCleanableRatio = 0.0
newGauge("max-dirty-percent", new Gauge[Int] { def value = (100 * dirtiestLogCleanableRatio).toInt })
/* a gauge for tracking the time since the last log cleaner run, in milli seconds */
@volatile private var timeOfLastRun : Long = Time.SYSTEM.milliseconds
newGauge("time-since-last-run-ms", new Gauge[Long] { def value = Time.SYSTEM.milliseconds - timeOfLastRun })
/**
* @return the position processed for all logs.
*/
def allCleanerCheckpoints: Map[TopicPartition, Long] =
checkpoints.values.flatMap(_.read()).toMap
/**
* Choose the log to clean next and add it to the in-progress set. We recompute this
* each time from the full set of logs to allow logs to be dynamically added to the pool of logs
* the log manager maintains.
*/
def grabFilthiestCompactedLog(time: Time): Option[LogToClean] = {
inLock(lock) {
val now = time.milliseconds
this.timeOfLastRun = now
val lastClean = allCleanerCheckpoints
val dirtyLogs = logs.filter {
case (_, log) => log.config.compact // match logs that are marked as compacted
}.filterNot {
case (topicPartition, _) => inProgress.contains(topicPartition) // skip any logs already in-progress
}.map {
case (topicPartition, log) => // create a LogToClean instance for each
val (firstDirtyOffset, firstUncleanableDirtyOffset) = LogCleanerManager.cleanableOffsets(log, topicPartition,
lastClean, now)
LogToClean(topicPartition, log, firstDirtyOffset, firstUncleanableDirtyOffset)
}.filter(ltc => ltc.totalBytes > 0) // skip any empty logs
this.dirtiestLogCleanableRatio = if (dirtyLogs.nonEmpty) dirtyLogs.max.cleanableRatio else 0
// and must meet the minimum threshold for dirty byte ratio
val cleanableLogs = dirtyLogs.filter(ltc => ltc.cleanableRatio > ltc.log.config.minCleanableRatio)
if(cleanableLogs.isEmpty) {
None
} else {
val filthiest = cleanableLogs.max
inProgress.put(filthiest.topicPartition, LogCleaningInProgress)
Some(filthiest)
}
}
}
/**
* Find any logs that have compact and delete enabled
*/
def deletableLogs(): Iterable[(TopicPartition, Log)] = {
inLock(lock) {
val toClean = logs.filter { case (topicPartition, log) =>
!inProgress.contains(topicPartition) && isCompactAndDelete(log)
}
toClean.foreach { case (tp, _) => inProgress.put(tp, LogCleaningInProgress) }
toClean
}
}
/**
* Abort the cleaning of a particular partition, if it's in progress. This call blocks until the cleaning of
* the partition is aborted.
* This is implemented by first abortAndPausing and then resuming the cleaning of the partition.
*/
def abortCleaning(topicPartition: TopicPartition) {
inLock(lock) {
abortAndPauseCleaning(topicPartition)
resumeCleaning(topicPartition)
}
info(s"The cleaning for partition $topicPartition is aborted")
}
/**
* Abort the cleaning of a particular partition if it's in progress, and pause any future cleaning of this partition.
* This call blocks until the cleaning of the partition is aborted and paused.
* 1. If the partition is not in progress, mark it as paused.
* 2. Otherwise, first mark the state of the partition as aborted.
* 3. The cleaner thread checks the state periodically and if it sees the state of the partition is aborted, it
* throws a LogCleaningAbortedException to stop the cleaning task.
* 4. When the cleaning task is stopped, doneCleaning() is called, which sets the state of the partition as paused.
* 5. abortAndPauseCleaning() waits until the state of the partition is changed to paused.
*/
def abortAndPauseCleaning(topicPartition: TopicPartition) {
inLock(lock) {
inProgress.get(topicPartition) match {
case None =>
inProgress.put(topicPartition, LogCleaningPaused)
case Some(state) =>
state match {
case LogCleaningInProgress =>
inProgress.put(topicPartition, LogCleaningAborted)
case s =>
throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be aborted and paused since it is in $s state.")
}
}
while (!isCleaningInState(topicPartition, LogCleaningPaused))
pausedCleaningCond.await(100, TimeUnit.MILLISECONDS)
}
info(s"The cleaning for partition $topicPartition is aborted and paused")
}
/**
* Resume the cleaning of a paused partition. This call blocks until the cleaning of a partition is resumed.
*/
def resumeCleaning(topicPartition: TopicPartition) {
inLock(lock) {
inProgress.get(topicPartition) match {
case None =>
throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be resumed since it is not paused.")
case Some(state) =>
state match {
case LogCleaningPaused =>
inProgress.remove(topicPartition)
case s =>
throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be resumed since it is in $s state.")
}
}
}
info(s"Compaction for partition $topicPartition is resumed")
}
/**
* Check if the cleaning for a partition is in a particular state. The caller is expected to hold lock while making the call.
*/
private def isCleaningInState(topicPartition: TopicPartition, expectedState: LogCleaningState): Boolean = {
inProgress.get(topicPartition) match {
case None => false
case Some(state) =>
if (state == expectedState)
true
else
false
}
}
/**
* Check if the cleaning for a partition is aborted. If so, throw an exception.
*/
def checkCleaningAborted(topicPartition: TopicPartition) {
inLock(lock) {
if (isCleaningInState(topicPartition, LogCleaningAborted))
throw new LogCleaningAbortedException()
}
}
def updateCheckpoints(dataDir: File, update: Option[(TopicPartition,Long)]) {
inLock(lock) {
val checkpoint = checkpoints(dataDir)
val existing = checkpoint.read().filterKeys(logs.keys) ++ update
checkpoint.write(existing)
}
}
def maybeTruncateCheckpoint(dataDir: File, topicPartition: TopicPartition, offset: Long) {
inLock(lock) {
if (logs.get(topicPartition).config.compact) {
val checkpoint = checkpoints(dataDir)
val existing = checkpoint.read()
if (existing.getOrElse(topicPartition, 0L) > offset)
checkpoint.write(existing + (topicPartition -> offset))
}
}
}
/**
* Save out the endOffset and remove the given log from the in-progress set, if not aborted.
*/
def doneCleaning(topicPartition: TopicPartition, dataDir: File, endOffset: Long) {
inLock(lock) {
inProgress(topicPartition) match {
case LogCleaningInProgress =>
updateCheckpoints(dataDir,Option(topicPartition, endOffset))
inProgress.remove(topicPartition)
case LogCleaningAborted =>
inProgress.put(topicPartition, LogCleaningPaused)
pausedCleaningCond.signalAll()
case s =>
throw new IllegalStateException(s"In-progress partition $topicPartition cannot be in $s state.")
}
}
}
def doneDeleting(topicPartition: TopicPartition): Unit = {
inLock(lock) {
inProgress.remove(topicPartition)
}
}
}
private[log] object LogCleanerManager extends Logging {
def isCompactAndDelete(log: Log): Boolean = {
log.config.compact && log.config.delete
}
/**
* Returns the range of dirty offsets that can be cleaned.
*
* @param log the log
* @param lastClean the map of checkpointed offsets
* @param now the current time in milliseconds of the cleaning operation
* @return the lower (inclusive) and upper (exclusive) offsets
*/
def cleanableOffsets(log: Log, topicPartition: TopicPartition, lastClean: immutable.Map[TopicPartition, Long], now: Long): (Long, Long) = {
// the checkpointed offset, ie., the first offset of the next dirty segment
val lastCleanOffset: Option[Long] = lastClean.get(topicPartition)
// If the log segments are abnormally truncated and hence the checkpointed offset is no longer valid;
// reset to the log starting offset and log the error
val logStartOffset = log.logSegments.head.baseOffset
val firstDirtyOffset = {
val offset = lastCleanOffset.getOrElse(logStartOffset)
if (offset < logStartOffset) {
// don't bother with the warning if compact and delete are enabled.
if (!isCompactAndDelete(log))
warn(s"Resetting first dirty offset of ${log.name} to log start offset $logStartOffset since the checkpointed offset $offset is invalid.")
logStartOffset
} else {
offset
}
}
// dirty log segments
val dirtyNonActiveSegments = log.logSegments(firstDirtyOffset, log.activeSegment.baseOffset)
val compactionLagMs = math.max(log.config.compactionLagMs, 0L)
// find first segment that cannot be cleaned
// neither the active segment, nor segments with any messages closer to the head of the log than the minimum compaction lag time
// may be cleaned
val firstUncleanableDirtyOffset: Long = Seq(
// we do not clean beyond the first unstable offset
log.firstUnstableOffset.map(_.messageOffset),
// the active segment is always uncleanable
Option(log.activeSegment.baseOffset),
// the first segment whose largest message timestamp is within a minimum time lag from now
if (compactionLagMs > 0) {
dirtyNonActiveSegments.find { s =>
val isUncleanable = s.largestTimestamp > now - compactionLagMs
debug(s"Checking if log segment may be cleaned: log='${log.name}' segment.baseOffset=${s.baseOffset} segment.largestTimestamp=${s.largestTimestamp}; now - compactionLag=${now - compactionLagMs}; is uncleanable=$isUncleanable")
isUncleanable
}.map(_.baseOffset)
} else None
).flatten.min
debug(s"Finding range of cleanable offsets for log=${log.name} topicPartition=$topicPartition. Last clean offset=$lastCleanOffset now=$now => firstDirtyOffset=$firstDirtyOffset firstUncleanableOffset=$firstUncleanableDirtyOffset activeSegment.baseOffset=${log.activeSegment.baseOffset}")
(firstDirtyOffset, firstUncleanableDirtyOffset)
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/log/LogCleanerManager.scala | Scala | apache-2.0 | 13,682 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.preaaggregate
import scala.collection.JavaConverters._
import org.apache.spark.sql.CarbonSession
import org.apache.spark.sql.execution.command.CarbonDropTableCommand
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.schema.table.DataMapSchema
import org.apache.carbondata.events._
object DropPreAggregateTablePostListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val dropPostEvent = event.asInstanceOf[DropTablePostEvent]
val carbonTable = dropPostEvent.carbonTable
val sparkSession = dropPostEvent.sparkSession
if (carbonTable.isDefined && carbonTable.get.hasDataMapSchema) {
val childSchemas = carbonTable.get.getTableInfo.getDataMapSchemaList
for (childSchema: DataMapSchema <- childSchemas.asScala) {
if (childSchema.getRelationIdentifier != null) {
CarbonDropTableCommand(ifExistsSet = true,
Some(childSchema.getRelationIdentifier.getDatabaseName),
childSchema.getRelationIdentifier.getTableName).run(sparkSession)
}
}
}
}
}
object LoadPostAggregateListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val loadEvent = event.asInstanceOf[LoadTablePostExecutionEvent]
val sparkSession = loadEvent.sparkSession
val carbonLoadModel = loadEvent.carbonLoadModel
val table = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
if (table.hasDataMapSchema) {
for (dataMapSchema: DataMapSchema <- table.getTableInfo.getDataMapSchemaList.asScala) {
CarbonSession
.threadSet(CarbonCommonConstants.CARBON_INPUT_SEGMENTS +
carbonLoadModel.getDatabaseName + "." +
carbonLoadModel.getTableName,
carbonLoadModel.getSegmentId)
CarbonSession.threadSet(CarbonCommonConstants.VALIDATE_CARBON_INPUT_SEGMENTS +
carbonLoadModel.getDatabaseName + "." +
carbonLoadModel.getTableName, "false")
val childTableName = dataMapSchema.getRelationIdentifier.getTableName
val childDatabaseName = dataMapSchema.getRelationIdentifier.getDatabaseName
val selectQuery = dataMapSchema.getProperties.get("CHILD_SELECT QUERY")
sparkSession.sql(s"insert into $childDatabaseName.$childTableName $selectQuery")
}
}
}
}
object PreAggregateDataTypeChangePreListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
* @param operationContext
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val dataTypeChangePreListener = event.asInstanceOf[AlterTableDataTypeChangePreEvent]
val carbonTable = dataTypeChangePreListener.carbonTable
val alterTableDataTypeChangeModel = dataTypeChangePreListener.alterTableDataTypeChangeModel
val columnToBeAltered: String = alterTableDataTypeChangeModel.columnName
if (carbonTable.hasDataMapSchema) {
val dataMapSchemas = carbonTable.getTableInfo.getDataMapSchemaList
dataMapSchemas.asScala.foreach { dataMapSchema =>
val childColumns = dataMapSchema.getChildSchema.getListOfColumns
val parentColumnNames = childColumns.asScala
.flatMap(_.getParentColumnTableRelations.asScala.map(_.getColumnName))
if (parentColumnNames.contains(columnToBeAltered)) {
throw new UnsupportedOperationException(
s"Column $columnToBeAltered exists in a pre-aggregate table. Drop pre-aggregate table" +
"to continue")
}
}
}
if (carbonTable.isChildDataMap) {
throw new UnsupportedOperationException(
s"Cannot change data type for columns in pre-aggregate table ${ carbonTable.getDatabaseName
}.${ carbonTable.getFactTableName }")
}
}
}
object PreAggregateAddColumnsPreListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
* @param operationContext
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val dataTypeChangePreListener = event.asInstanceOf[AlterTableAddColumnPreEvent]
val carbonTable = dataTypeChangePreListener.carbonTable
if (carbonTable.isChildDataMap) {
throw new UnsupportedOperationException(
s"Cannot add columns in pre-aggreagate table ${ carbonTable.getDatabaseName
}.${ carbonTable.getFactTableName }")
}
}
}
object PreAggregateDeleteSegmentByDatePreListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
* @param operationContext
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val deleteSegmentByDatePreEvent = event.asInstanceOf[DeleteSegmentByDatePreEvent]
val carbonTable = deleteSegmentByDatePreEvent.carbonTable
if (carbonTable != null) {
if (carbonTable.hasDataMapSchema) {
throw new UnsupportedOperationException(
"Delete segment operation is not supported on tables which have a pre-aggregate table. " +
"Drop pre-aggregation table to continue")
}
if (carbonTable.isChildDataMap) {
throw new UnsupportedOperationException(
"Delete segment operation is not supported on pre-aggregate table")
}
}
}
}
object PreAggregateDeleteSegmentByIdPreListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
* @param operationContext
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val tableEvent = event.asInstanceOf[DeleteSegmentByIdPreEvent]
val carbonTable = tableEvent.carbonTable
if (carbonTable != null) {
if (carbonTable.hasDataMapSchema) {
throw new UnsupportedOperationException(
"Delete segment operation is not supported on tables which have a pre-aggregate table")
}
if (carbonTable.isChildDataMap) {
throw new UnsupportedOperationException(
"Delete segment operation is not supported on pre-aggregate table")
}
}
}
}
object PreAggregateDropColumnPreListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
* @param operationContext
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val dataTypeChangePreListener = event.asInstanceOf[AlterTableDropColumnPreEvent]
val carbonTable = dataTypeChangePreListener.carbonTable
val alterTableDropColumnModel = dataTypeChangePreListener.alterTableDropColumnModel
val columnsToBeDropped = alterTableDropColumnModel.columns
if (carbonTable.hasDataMapSchema) {
val dataMapSchemas = carbonTable.getTableInfo.getDataMapSchemaList
dataMapSchemas.asScala.foreach { dataMapSchema =>
val parentColumnNames = dataMapSchema.getChildSchema.getListOfColumns.asScala
.flatMap(_.getParentColumnTableRelations.asScala.map(_.getColumnName))
val columnExistsInChild = parentColumnNames.collectFirst {
case parentColumnName if columnsToBeDropped.contains(parentColumnName) =>
parentColumnName
}
if (columnExistsInChild.isDefined) {
throw new UnsupportedOperationException(
s"Column ${ columnExistsInChild.head } cannot be dropped because it exists in a " +
s"pre-aggregate table ${ dataMapSchema.getRelationIdentifier.toString }")
}
}
}
if (carbonTable.isChildDataMap) {
throw new UnsupportedOperationException(s"Cannot drop columns in pre-aggreagate table ${
carbonTable.getDatabaseName}.${ carbonTable.getFactTableName }")
}
}
}
object PreAggregateRenameTablePreListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
* @param operationContext
*/
override def onEvent(event: Event,
operationContext: OperationContext): Unit = {
val renameTablePostListener = event.asInstanceOf[AlterTableRenamePreEvent]
val carbonTable = renameTablePostListener.carbonTable
if (carbonTable.isChildDataMap) {
throw new UnsupportedOperationException(
"Rename operation for pre-aggregate table is not supported.")
}
if (carbonTable.hasDataMapSchema) {
throw new UnsupportedOperationException(
"Rename operation is not supported for table with pre-aggregate tables")
}
}
}
object UpdatePreAggregatePreListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
* @param operationContext
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val tableEvent = event.asInstanceOf[UpdateTablePreEvent]
val carbonTable = tableEvent.carbonTable
if (carbonTable != null) {
if (carbonTable.hasDataMapSchema) {
throw new UnsupportedOperationException(
"Update operation is not supported for tables which have a pre-aggregate table. Drop " +
"pre-aggregate tables to continue.")
}
if (carbonTable.isChildDataMap) {
throw new UnsupportedOperationException(
"Update operation is not supported for pre-aggregate table")
}
}
}
}
object DeletePreAggregatePreListener extends OperationEventListener {
/**
* Called on a specified event occurrence
*
* @param event
* @param operationContext
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
val tableEvent = event.asInstanceOf[DeleteFromTablePreEvent]
val carbonTable = tableEvent.carbonTable
if (carbonTable != null) {
if (carbonTable.hasDataMapSchema) {
throw new UnsupportedOperationException(
"Delete operation is not supported for tables which have a pre-aggregate table. Drop " +
"pre-aggregate tables to continue.")
}
if (carbonTable.isChildDataMap) {
throw new UnsupportedOperationException(
"Delete operation is not supported for pre-aggregate table")
}
}
}
}
| HuaweiBigData/carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateListeners.scala | Scala | apache-2.0 | 11,323 |
package org.broadinstitute.dsde.firecloud.webservice
import akka.http.scaladsl.client.RequestBuilding
import akka.http.scaladsl.server.{Directives, Route}
import com.typesafe.scalalogging.LazyLogging
import org.broadinstitute.dsde.firecloud.service.{ExportEntitiesByTypeActor, ExportEntitiesByTypeArguments, FireCloudDirectives, FireCloudRequestBuilding}
import org.broadinstitute.dsde.firecloud.utils.StandardUserInfoDirectives
import scala.concurrent.ExecutionContext
import scala.language.postfixOps
trait ExportEntitiesApiService extends Directives with RequestBuilding with StandardUserInfoDirectives with LazyLogging {
val exportEntitiesByTypeConstructor: ExportEntitiesByTypeArguments => ExportEntitiesByTypeActor
implicit val executionContext: ExecutionContext
val exportEntitiesRoutes: Route =
// Note that this endpoint works in the same way as CookieAuthedApiService tsv download.
path( "api" / "workspaces" / Segment / Segment / "entities" / Segment / "tsv" ) { (workspaceNamespace, workspaceName, entityType) =>
parameters('attributeNames.?, 'model.?) { (attributeNamesString, modelString) =>
requireUserInfo() { userInfo =>
get {
val attributeNames = attributeNamesString.map(_.split(",").toIndexedSeq)
val exportArgs = ExportEntitiesByTypeArguments(userInfo, workspaceNamespace, workspaceName, entityType, attributeNames, modelString)
complete { exportEntitiesByTypeConstructor(exportArgs).ExportEntities }
}
}
}
}
}
| broadinstitute/firecloud-orchestration | src/main/scala/org/broadinstitute/dsde/firecloud/webservice/ExportEntitiesApiService.scala | Scala | bsd-3-clause | 1,542 |
package org.jetbrains.plugins.scala.lang.formatting
import com.intellij.formatting.{Wrap, WrapType}
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScCompositePattern, ScInfixPattern, ScPattern, ScPatternArgumentList}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScInfixTypeElement, ScSequenceArg}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameterClause, ScParameters}
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScTypeAlias, ScValue, ScVariable}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScEarlyDefinitions
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScExtendsBlock, ScTemplateBody}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
/**
* @author Alexander Podkhalyuzin
*/
object ScalaWrapManager {
def suggestedWrap(block: ScalaBlock, scalaSettings: ScalaCodeStyleSettings): Wrap = {
val settings = block.getCommonSettings
val node = block.getNode
val psi = node.getPsi
def wrapBinary(elementMatch: PsiElement => Boolean,
elementOperation: PsiElement => PsiElement,
assignments: Boolean): Wrap = {
psi.getParent match {
case parent: PsiElement if elementMatch(parent) => {
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils.priority
val parentPriority = priority(elementOperation(parent).getText, assignments)
val childPriority = priority(elementOperation(psi).getText, assignments)
val notSamePriority = parentPriority != childPriority
if (notSamePriority) {
Wrap.createChildWrap(block.getWrap,
WrapType.byLegacyRepresentation(settings.BINARY_OPERATION_WRAP),
false)
}
else Wrap.createWrap(settings.BINARY_OPERATION_WRAP, false)
}
case _ => Wrap.createWrap(settings.BINARY_OPERATION_WRAP, false)
}
}
psi match {
case psi: ScInfixExpr => {
return wrapBinary(_.isInstanceOf[ScInfixExpr], _.asInstanceOf[ScInfixExpr].operation, assignments = true)
}
case psi: ScInfixPattern => {
return wrapBinary(_.isInstanceOf[ScInfixPattern], _.asInstanceOf[ScInfixPattern].refernece, assignments = false)
}
case psi: ScInfixTypeElement => {
return wrapBinary(_.isInstanceOf[ScInfixTypeElement], _.asInstanceOf[ScInfixTypeElement].ref, assignments = false)
}
case psi: ScCompositePattern => {
return Wrap.createWrap(settings.BINARY_OPERATION_WRAP, false)
}
case psi: ScArgumentExprList => {
val parentSuggestedWrap = block.myParentBlock.suggestedWrap
val wrap = if (parentSuggestedWrap != null) Wrap.createChildWrap(parentSuggestedWrap,
WrapType.byLegacyRepresentation(settings.CALL_PARAMETERS_WRAP), false)
else Wrap.createWrap(settings.CALL_PARAMETERS_WRAP, false)
if (settings.PREFER_PARAMETERS_WRAP) {
wrap.ignoreParentWraps
}
return wrap
}
case psi: ScReferenceExpression => {
return Wrap.createWrap(settings.METHOD_CALL_CHAIN_WRAP, true)
}
case psi: ScMethodCall => {
return Wrap.createWrap(settings.METHOD_CALL_CHAIN_WRAP, true)
}
case psi: ScPatternArgumentList => {
return Wrap.createWrap(settings.CALL_PARAMETERS_WRAP, false)
}
case _ if node.getElementType == ScalaTokenTypes.kEXTENDS && block.myLastNode != null => {
return Wrap.createChildWrap(block.getWrap, WrapType.byLegacyRepresentation(settings.EXTENDS_LIST_WRAP), true)
}
case psi: ScParameterClause => {
return Wrap.createWrap(settings.METHOD_PARAMETERS_WRAP, false)
}
case psi: ScParameters => {
return Wrap.createWrap(settings.METHOD_PARAMETERS_WRAP, true)
}
case annot: ScAnnotations if annot.getAnnotations.length > 0 => {
annot.getParent match {
case _: ScTypeDefinition => return Wrap.createWrap(settings.CLASS_ANNOTATION_WRAP, false)
case _: ScFunction => return Wrap.createWrap(settings.METHOD_ANNOTATION_WRAP, false)
case _: ScVariable | _: ScValue | _: ScTypeAlias if {
annot.getParent.getParent match { case _: ScEarlyDefinitions | _: ScTemplateBody => true; case _ => false }
} =>
return Wrap.createWrap(settings.FIELD_ANNOTATION_WRAP, false)
case _: ScVariable | _: ScValue | _: ScTypeAlias => Wrap.createWrap(settings.VARIABLE_ANNOTATION_WRAP, false)
case _: ScParameter => Wrap.createWrap(settings.PARAMETER_ANNOTATION_WRAP, false)
case _ =>
}
}
case _ =>
}
null
}
def arrangeSuggestedWrapForChild(parent: ScalaBlock, child: ASTNode, scalaSettings: ScalaCodeStyleSettings,
suggestedWrap: Wrap): Wrap = {
val settings = parent.getCommonSettings
val parentNode = parent.getNode
val parentPsi = parentNode.getPsi
val childPsi = child.getPsi
if (childPsi.isInstanceOf[ScExtendsBlock] &&
childPsi.getFirstChild != null && !childPsi.getFirstChild.isInstanceOf[ScTemplateBody])
return Wrap.createWrap(settings.EXTENDS_KEYWORD_WRAP, true)
def arrageBinary(elementMatch: PsiElement => Boolean,
elementOperation: PsiElement => PsiElement,
elementRightSide: PsiElement => PsiElement,
elementLeftSide: PsiElement => PsiElement): Wrap = {
childPsi.getParent match {
case parent: PsiElement if elementMatch(parent) => {
if (elementOperation(parent) == childPsi) return null
if (parent != parentPsi) suggestedWrap
else if (elementLeftSide(parentPsi) == childPsi) suggestedWrap
else if (elementRightSide(parentPsi) == childPsi) suggestedWrap
else null
}
case _ => null //hasn't to be
}
}
parentPsi match {
case inf: ScInfixExpr => {
return arrageBinary(_.isInstanceOf[ScInfixExpr], _.asInstanceOf[ScInfixExpr].operation,
_.asInstanceOf[ScInfixExpr].rOp, _.asInstanceOf[ScInfixExpr].lOp)
}
case inf: ScInfixPattern => {
return arrageBinary(_.isInstanceOf[ScInfixPattern], _.asInstanceOf[ScInfixPattern].refernece,
_.asInstanceOf[ScInfixPattern].rightPattern.orNull,
_.asInstanceOf[ScInfixPattern].leftPattern)
}
case inf: ScInfixTypeElement => {
return arrageBinary(_.isInstanceOf[ScInfixTypeElement], _.asInstanceOf[ScInfixTypeElement].ref,
_.asInstanceOf[ScInfixTypeElement].rOp.orNull,
_.asInstanceOf[ScInfixTypeElement].lOp)
}
case psi: ScCompositePattern => {
if (childPsi.isInstanceOf[ScPattern]) return suggestedWrap
else return null
}
case call: ScMethodCall => {
if (child.getElementType == ScalaTokenTypes.tDOT) return suggestedWrap
else return null
}
case ref: ScReferenceExpression => {
if (child.getElementType == ScalaTokenTypes.tDOT) return suggestedWrap
else return null
}
case args: ScArgumentExprList => {
if (childPsi.isInstanceOf[ScExpression]) return suggestedWrap
else return null
}
case patt: ScPatternArgumentList => {
childPsi match {
case _: ScPattern => return suggestedWrap
case _: ScSequenceArg => return suggestedWrap
case _ => return null
}
}
case params: ScParameterClause => {
if (childPsi.isInstanceOf[ScParameter]) return suggestedWrap
else return null
}
case params: ScParameters => {
if (childPsi.isInstanceOf[ScParameterClause] && params.clauses.apply(0) != childPsi) return suggestedWrap
else return null
}
case annot: ScAnnotations => {
if (childPsi.isInstanceOf[ScAnnotation]) return suggestedWrap
else return null
}
case _ if parentNode.getElementType == ScalaTokenTypes.kEXTENDS && parent.myLastNode != null => {
val e: ScExtendsBlock = PsiTreeUtil.getParentOfType(parentPsi, classOf[ScExtendsBlock])
val first: PsiElement = e.earlyDefinitions match {
case Some(z) => z
case _ => e.templateParents match {
case Some(tp) if tp.typeElements.length > 0 => tp.typeElements(0)
case _ => null
}
}
if (first == null) return null
if (childPsi == first) return suggestedWrap
if (scalaSettings.WRAP_BEFORE_WITH_KEYWORD) {
if (child.getElementType == ScalaTokenTypes.kWITH) return suggestedWrap
else return null
} else {
e.templateParents match {
case Some(tp) if tp.typeElements.exists(_ == childPsi) => return suggestedWrap
case _ => return null
}
}
}
case _ =>
}
null
}
} | LPTK/intellij-scala | src/org/jetbrains/plugins/scala/lang/formatting/ScalaWrapManager.scala | Scala | apache-2.0 | 9,473 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.provider.oauth2
import java.time.Clock
import io.circe.Json
import silhouette.http.Method.GET
import silhouette.http._
import silhouette.http.client.Request
import silhouette.provider.UnexpectedResponseException
import silhouette.provider.oauth2.DropboxProvider._
import silhouette.provider.oauth2.OAuth2Provider._
import silhouette.provider.social._
import silhouette.provider.social.state.StateHandler
import silhouette.{ ConfigURI, LoginInfo }
import scala.concurrent.{ ExecutionContext, Future }
/**
* Base Dropbox OAuth2 Provider.
*
* @see https://www.dropbox.com/developers/blog/45/using-oauth-20-with-the-core-api
* @see https://www.dropbox.com/developers/core/docs#oauth2-methods
*/
trait BaseDropboxProvider extends OAuth2Provider {
/**
* The provider ID.
*/
override val id = ID
/**
* Builds the social profile.
*
* @param authInfo The auth info received from the provider.
* @return On success the build social profile, otherwise a failure.
*/
override protected def buildProfile(authInfo: OAuth2Info): Future[Profile] = {
val uri = config.apiURI.getOrElse[ConfigURI](DefaultApiURI)
val request = Request(GET, uri).withHeaders(Header(Header.Name.Authorization, s"Bearer ${authInfo.accessToken}"))
httpClient.execute(request).flatMap { response =>
withParsedJson(response) { json =>
response.status match {
case Status.OK =>
profileParser.parse(json, authInfo)
case status =>
Future.failed(new UnexpectedResponseException(UnexpectedResponse.format(id, json, status)))
}
}
}
}
}
/**
* The profile parser for the common social profile.
*
* @param ec The execution context.
*/
class DropboxProfileParser(implicit val ec: ExecutionContext)
extends SocialProfileParser[Json, CommonSocialProfile, OAuth2Info] {
/**
* Parses the social profile.
*
* @param json The content returned from the provider.
* @param authInfo The auth info to query the provider again for additional data.
* @return The social profile from the given result.
*/
override def parse(json: Json, authInfo: OAuth2Info): Future[CommonSocialProfile] = {
Future.fromTry(json.hcursor.downField("uid").as[Long].getOrError(json, "uid", ID)).map { id =>
CommonSocialProfile(
loginInfo = LoginInfo(ID, id.toString),
firstName = json.hcursor.downField("name_details").downField("given_name").as[String].toOption,
lastName = json.hcursor.downField("name_details").downField("surname").as[String].toOption,
fullName = json.hcursor.downField("display_name").as[String].toOption
)
}
}
}
/**
* The Dropbox OAuth2 Provider.
*
* @param httpClient The HTTP client implementation.
* @param stateHandler The state provider implementation.
* @param clock The current clock instance.
* @param config The provider config.
* @param ec The execution context.
*/
class DropboxProvider(
protected val httpClient: HttpClient,
protected val stateHandler: StateHandler,
protected val clock: Clock,
val config: OAuth2Config
)(
implicit
override implicit val ec: ExecutionContext
) extends BaseDropboxProvider with CommonProfileBuilder {
/**
* The type of this class.
*/
override type Self = DropboxProvider
/**
* The profile parser implementation.
*/
override val profileParser = new DropboxProfileParser
/**
* Gets a provider initialized with a new config object.
*
* @param f A function which gets the config passed and returns different config.
* @return An instance of the provider initialized with new config.
*/
override def withConfig(f: OAuth2Config => OAuth2Config): Self =
new DropboxProvider(httpClient, stateHandler, clock, f(config))
}
/**
* The companion object.
*/
object DropboxProvider {
/**
* The provider ID.
*/
val ID = "dropbox"
/**
* Default provider endpoint.
*/
val DefaultApiURI: ConfigURI = ConfigURI("https://api.dropbox.com/1/account/info")
}
| mohiva/silhouette | modules/provider-oauth2/src/main/scala/silhouette/provider/oauth2/DropboxProvider.scala | Scala | apache-2.0 | 4,835 |
package dk.tennis.compare
import dk.atp.api.tournament.GenericTournamentAtpApi
import dk.tennisprob.tournament.GenericTournamentProbCalc
import dk.tennis.compare.rating.multiskill.matchloader.MatchesLoader
import dk.tennis.compare.rating.multiskill.infer.matchprob.givenmatchresults.InferMatchProbGivenMatchResults
import dk.tennis.compare.rating.multiskill.matchloader.MatchResult
import java.text.SimpleDateFormat
import dk.tennis.compare.rating.multiskill.model.perfdiff.Surface
import dk.tennis.compare.rating.multiskill.matchloader.PlayerStats
import com.typesafe.scalalogging.slf4j.Logging
import dk.tennis.compare.rating.multiskill.model.perfdiff.Player
import dk.tennis.compare.rating.multiskill.model.perfdiff.NumOfSets
object TournamentApp extends App with Logging {
val df = new SimpleDateFormat("dd/MM/yyyy")
val time = df.parse("19/01/2015")
val matchesFile = "./src/test/resources/atp_historical_data/match_data_2006_2014_121114.csv"
val matchResults = MatchesLoader.loadMatches(matchesFile, 2011, 2014)
logger.info("Building skills model...")
val matchModel = InferMatchProbGivenMatchResults(matchResults.toIndexedSeq)
logger.info("Building skills model...DONE")
val players = getPlayers()
players.foreach(println(_))
val draw = getDraw(players.toIndexedSeq)
draw.foreach(println(_))
val winningProbs = GenericTournamentProbCalc.winningProbs(draw, matchProb)
println(winningProbs.toList.sortBy(v => v._2).foreach(println(_)))
def matchProb(player1: String, player2: String): Double = {
val result = MatchResult(time, "tournament name", Surface.HARD, player1, player2, time, player1Won = true, numOfSets = 3, PlayerStats(0, 0, 0), PlayerStats(0, 0, 0))
matchModel.predict(result).matchProb(player1)
}
private def getPlayers(): Seq[String] = {
val allPlayers = matchResults.filter(m => m.tournamentTime.getTime() > df.parse("01/07/2014").getTime()).flatMap(r => List(r.player1, r.player2)).distinct
val skills = allPlayers.map { p =>
val playerOnServe = Player(p, p, true, time, Surface.HARD,NumOfSets.THREE_SETS)
val skillOnServe = matchModel.infer.inferSkill(playerOnServe)
val playerOnReturn = Player(p, p, false, time, Surface.HARD,NumOfSets.THREE_SETS)
val skillOnReturn = matchModel.infer.inferSkill(playerOnReturn)
(p, skillOnServe.skill.m + skillOnReturn.skill.m)
}.sortWith((s1,s2) => s1._2>s2._2)
skills.take(128).map(s => s._1)
}
private def getDraw(players: IndexedSeq[String]): Seq[Tuple2[String, String]] = {
var groups: Array[Array[String]] = players.map(p => Array(p)).toArray
while (groups.size > 1) {
groups = (0 until groups.size / 2).map(i => groups(i) ++ groups(groups.size - i - 1)).toArray
}
groups(0).grouped(2).map(g => (g(0),g(1))).toList
}
} | danielkorzekwa/tennis-player-compare | model-tester/src/test/scala/dk/tennis/compare/TournamentApp.scala | Scala | bsd-2-clause | 2,813 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.precondition
import com.krux.hyperion.adt.HString
import com.krux.hyperion.aws.AdpDynamoDBTableExistsPrecondition
import com.krux.hyperion.common.{ BaseFields, PipelineObjectId }
import com.krux.hyperion.HyperionContext
/**
* A precondition to check that the DynamoDB table exists.
*
* @param tableName The DynamoDB table to check.
*/
case class DynamoDBTableExistsPrecondition private (
baseFields: BaseFields,
preconditionFields: PreconditionFields,
tableName: HString
) extends Precondition {
type Self = DynamoDBTableExistsPrecondition
def updateBaseFields(fields: BaseFields) = copy(baseFields = fields)
def updatePreconditionFields(fields: PreconditionFields) = copy(preconditionFields = fields)
lazy val serialize = AdpDynamoDBTableExistsPrecondition(
id = id,
name = name,
tableName = tableName.serialize,
role = role.serialize,
preconditionTimeout = preconditionTimeout.map(_.serialize),
maximumRetries = maximumRetries.map(_.serialize),
onFail = seqToOption(onFail)(_.ref),
onLateAction = seqToOption(onLateAction)(_.ref),
onSuccess = seqToOption(onSuccess)(_.ref)
)
}
object DynamoDBTableExistsPrecondition {
def apply(tableName: HString)(implicit hc: HyperionContext) = new DynamoDBTableExistsPrecondition(
baseFields = BaseFields(PipelineObjectId(DynamoDBTableExistsPrecondition.getClass)),
preconditionFields = Precondition.defaultPreconditionFields,
tableName = tableName
)
}
| realstraw/hyperion | core/src/main/scala/com/krux/hyperion/precondition/DynamoDBTableExistsPrecondition.scala | Scala | bsd-3-clause | 1,727 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.transform.vision.image.augmentation
import com.intel.analytics.bigdl.transform.vision.image.{ImageFrame, LocalImageFrame}
import org.opencv.imgcodecs.Imgcodecs
import org.scalatest.{FlatSpec, Matchers}
class RandomAlterRatioSpec extends FlatSpec with Matchers {
val resource = getClass.getClassLoader.getResource("pascal/")
"RandomAlterRatio" should "work properly" in {
val data = ImageFrame.read(resource.getFile)
val transformer = RandomAlterAspect()
val transformed = transformer(data).asInstanceOf[LocalImageFrame]
val imf = transformed.array.head
imf.getHeight() should be (224)
imf.getWidth() should be (224)
val tmpFile = java.io.File.createTempFile("module", ".jpg")
Imgcodecs.imwrite(tmpFile.toString, imf.opencvMat())
println(tmpFile)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/transform/vision/image/augmentation/RandomAlterRatioSpec.scala | Scala | apache-2.0 | 1,430 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.knockdata.spark.highcharts.plotoptions
import com.knockdata.spark.highcharts.model._
import com.knockdata.spark.highcharts.base._
private[highcharts] class Pyramid extends BasePlotOptions with PublicApply {
def fieldName = "pyramid"
def allowPointSelect(value: Boolean): this.type = {
append("allowPointSelect", value)
}
def borderColor(value: String): this.type = {
append("borderColor", value)
}
def borderWidth(value: Int): this.type = {
append("borderWidth", value)
}
def center(values: Any*): this.type = {
append("center", values.toList)
}
def colors(values: String*): this.type = {
append("colors", values.toList)
}
def dataLabels(values: (String, Any)*): this.type = {
append("dataLabels", values.toMap)
}
def depth(value: Int): this.type = {
append("depth", value)
}
def height(value: Int): this.type = {
append("height", value)
}
def linkedTo(value: String): this.type = {
append("linkedTo", value)
}
def minSize(value: Int): this.type = {
append("minSize", value)
}
def reversed(value: Boolean): this.type = {
append("reversed", value)
}
def shadow(value: Boolean): this.type = {
append("shadow", value)
}
def showInLegend(value: Boolean): this.type = {
append("showInLegend", value)
}
def slicedOffset(value: Int): this.type = {
append("slicedOffset", value)
}
// the value only have one item which is hover
def states(values: (String, Map[String, Any])*): this.type = {
val hover = values.collect {
case ("hover", v) => v
}.head
append("states", "hover", hover)
}
/**
* it is states.hover, since only hover in states
* so just using one function without embedded structure
*/
def statesHover(values: (String, Any)*): this.type = {
append("states", "hover", values.toMap)
}
def width(value: Int): this.type = {
append("width", value)
}
}
| knockdata/spark-highcharts | src/main/scala/com/knockdata/spark/highcharts/plotoptions/Pyramid.scala | Scala | apache-2.0 | 2,737 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.businessdetails
import connectors.BusinessMatchingConnector
import controllers.actions.SuccessfulAuthAction
import models.{Country}
import models.businessdetails.{BusinessDetails, CorporationTaxRegisteredYes}
import models.businesscustomer.{Address, ReviewDetails}
import models.businessmatching.{BusinessMatching, BusinessType}
import models.businessmatching.BusinessType.{LimitedCompany, UnincorporatedBody}
import org.mockito.Matchers.any
import org.mockito.Mockito.{verify}
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import play.api.test.Helpers._
import utils.DependencyMocks
import utils.AmlsSpec
import org.mockito.Matchers.{eq => eqTo}
class CorporationTaxRegisteredControllerSpec extends AmlsSpec with MockitoSugar with ScalaFutures with DependencyMocks {
trait Fixture {
self =>
val request = addToken(authRequest)
val reviewDetails = ReviewDetails(
"BusinessName",
Some(LimitedCompany),
Address("line1", "line2", Some("line3"), Some("line4"), Some("AA11 1AA"), Country("United Kingdom", "GB")),
"ghghg",
Some("sdsw")
)
val businessMatching = BusinessMatching(Some(reviewDetails))
mockCacheFetchAll
mockCacheGetEntry[BusinessMatching](Some(businessMatching), BusinessMatching.key)
mockCacheSave[BusinessDetails]
val controller = new CorporationTaxRegisteredController(
dataCacheConnector = mockCacheConnector,
businessMatchingConnector = mock[BusinessMatchingConnector],
authAction = SuccessfulAuthAction,
ds = commonDependencies,
cc = mockMcc,
errorView)
}
"CorporationTaxRegisteredController" when {
"get is called" must {
"redirect to ConfirmRegisteredOfficeController" in new Fixture {
val data = BusinessDetails(corporationTaxRegistered = Some(CorporationTaxRegisteredYes("1111111111")))
mockCacheGetEntry[BusinessDetails](Some(data), BusinessDetails.key)
val result = controller.get()(request)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be(Some(routes.ConfirmRegisteredOfficeController.get().url))
}
}
"process the UTR" when {
"business matching UTR exists" in new Fixture {
val reviewDtlsUtr = ReviewDetails(
"BusinessName",
Some(BusinessType.LimitedCompany),
Address("line1", "line2", Some("line3"), Some("line4"), Some("AA11 1AA"), Country("United Kingdom", "GB")),
"XE0000000000000",
Some("1111111111")
)
val corpTax = CorporationTaxRegisteredYes(reviewDtlsUtr.utr.get)
override val businessMatching = BusinessMatching(Some(reviewDtlsUtr))
mockCacheFetchAll
mockCacheGetEntry[BusinessMatching](Some(businessMatching), BusinessMatching.key)
mockCacheSave[BusinessDetails]
val data = BusinessDetails(corporationTaxRegistered = None)
mockCacheGetEntry[BusinessDetails](Some(data), BusinessDetails.key)
val result = controller.get()(request)
status(result) must be(SEE_OTHER)
verify(controller.dataCacheConnector).save(eqTo("internalId"), eqTo(BusinessDetails.key),
eqTo(data.corporationTaxRegistered(corpTax)))(any(), any())
}
"business matching UTR NOT exists" in new Fixture {
val reviewDtlsUtr = ReviewDetails(
"BusinessName",
Some(BusinessType.LimitedCompany),
Address("line1", "line2", Some("line3"), Some("line4"), Some("AA11 1AA"), Country("United Kingdom", "GB")),
"XE0000000000000",
None
)
override val businessMatching = BusinessMatching(Some(reviewDtlsUtr))
mockCacheFetchAll
mockCacheGetEntry[BusinessMatching](Some(businessMatching), BusinessMatching.key)
mockCacheSave[BusinessDetails]
val data = BusinessDetails(corporationTaxRegistered = Some(CorporationTaxRegisteredYes("1111111111")))
mockCacheGetEntry[BusinessDetails](Some(data), BusinessDetails.key)
val result = controller.get()(request)
status(result) must be(SEE_OTHER)
}
}
"respond with NOT_FOUND" must {
"business type is UnincorporatedBody" in new Fixture {
mockCacheGetEntry[BusinessMatching](Some(BusinessMatching(Some(ReviewDetails(
"BusinessName",
Some(UnincorporatedBody),
Address("line1", "line2", Some("line3"), Some("line4"), Some("AA11 1AA"), Country("United Kingdom", "GB")), "ghghg")
))), BusinessMatching.key)
val data = BusinessDetails(corporationTaxRegistered = Some(CorporationTaxRegisteredYes("1111111111")))
mockCacheGetEntry[BusinessDetails](Some(data), BusinessDetails.key)
val result = controller.get()(request)
status(result) must be(NOT_FOUND)
}
}
}
}
| hmrc/amls-frontend | test/controllers/businessdetails/CorporationTaxRegisteredControllerSpec.scala | Scala | apache-2.0 | 5,481 |
package mesosphere.marathon.api
import mesosphere.chaos.http.RestModule
import mesosphere.jackson.CaseClassModule
import mesosphere.marathon.api.v2.json.MarathonModule
import com.google.inject.Scopes
import com.fasterxml.jackson.module.scala.DefaultScalaModule
class MarathonRestModule extends RestModule {
override val jacksonModules = Seq(
new DefaultScalaModule with CaseClassModule,
new MarathonModule
)
protected override def configureServlets() {
super.configureServlets()
// Map some exceptions to HTTP responses
bind(classOf[MarathonExceptionMapper]).asEagerSingleton()
// V2 API
bind(classOf[v2.AppsResource]).in(Scopes.SINGLETON)
bind(classOf[v2.TasksResource]).in(Scopes.SINGLETON)
bind(classOf[v2.EventSubscriptionsResource]).in(Scopes.SINGLETON)
bind(classOf[v2.QueueResource]).in(Scopes.SINGLETON)
bind(classOf[v2.GroupsResource]).in(Scopes.SINGLETON)
bind(classOf[v2.InfoResource]).in(Scopes.SINGLETON)
bind(classOf[v2.LeaderResource]).in(Scopes.SINGLETON)
bind(classOf[v2.DeploymentsResource]).in(Scopes.SINGLETON)
bind(classOf[v2.ArtifactsResource]).in(Scopes.SINGLETON)
bind(classOf[v2.SchemaResource]).in(Scopes.SINGLETON)
// This filter will redirect to the master if running in HA mode.
bind(classOf[LeaderProxyFilter]).asEagerSingleton()
filter("/*").through(classOf[LeaderProxyFilter])
bind(classOf[CORSFilter]).asEagerSingleton()
filter("/*").through(classOf[CORSFilter])
bind(classOf[CacheDisablingFilter]).asEagerSingleton()
filter("/*").through(classOf[CacheDisablingFilter])
}
}
| sielaq/marathon | src/main/scala/mesosphere/marathon/api/MarathonRestModule.scala | Scala | apache-2.0 | 1,617 |
package org.littlewings.lyricsbot
import java.util.TimeZone
import org.quartz.CronScheduleBuilder._
import org.quartz.JobBuilder._
import org.quartz.TriggerBuilder._
import org.quartz.impl.StdSchedulerFactory
import org.quartz.{Job, Scheduler}
object ScheduledLyricsBotSupport {
private lazy val SCHEDULER: Scheduler = {
val s = StdSchedulerFactory.getDefaultScheduler
s.start()
s
}
def scheduler(): Scheduler = SCHEDULER
def shutdown(): Unit = {
if (!scheduler.isShutdown) {
scheduler.shutdown()
}
}
}
trait ScheduledLyricsBotSupport[T <: Job] extends ScheduledLyricsBot
with LyricsBotSupport {
protected def jobClass: Class[T]
def startJob(): Unit = {
val job =
newJob(jobClass)
.withIdentity(s"${artistNameAlias}-Job")
.build
val trigger =
newTrigger
.withIdentity(s"${artistNameAlias}-Trigger")
.withSchedule {
cronSchedule(artist.tweetScheduleFromAlbum)
.inTimeZone(TimeZone.getTimeZone("Asia/Tokyo"))
}
.forJob(job)
.startNow
.build
ScheduledLyricsBotSupport.scheduler.scheduleJob(job, trigger)
}
def endJob(): Unit =
ScheduledLyricsBotSupport.shutdown()
}
| kazuhira-r/lyrics-bot | src/main/scala/org/littlewings/lyricsbot/ScheduledLyricsBotSupport.scala | Scala | apache-2.0 | 1,231 |
package com.sksamuel.elastic4s.requests.indexes
import com.sksamuel.elastic4s.requests.analyzers.{AnalyzerDefinition, NormalizerDefinition}
import com.sksamuel.elastic4s.requests.mappings.MappingDefinition
import com.sksamuel.elastic4s.requests.searches.queries.Query
import com.sksamuel.exts.OptionImplicits._
case class TemplateAlias(name: String, filter: Option[Query] = None, routing: Option[String] = None) {
def filter(filter: Query): TemplateAlias = copy(filter = filter.some)
def routing(routing: String): TemplateAlias = copy(routing = routing.some)
}
case class IndexTemplateExistsRequest()
case class CreateIndexTemplateRequest(name: String,
pattern: String,
settings: Map[String, Any] = Map.empty,
mappings: Seq[MappingDefinition] = Nil,
@deprecated("use the new analysis package", "7.0.1")
_analysis: Option[AnalysisDefinition] = None,
analysis: Option[com.sksamuel.elastic4s.analysis.Analysis] = None,
order: Option[Int] = None,
version: Option[Int] = None,
create: Option[Boolean] = None,
aliases: Seq[TemplateAlias] = Nil) {
require(name.nonEmpty, "template name must not be null or empty")
require(pattern.nonEmpty, "pattern must not be null or empty")
@deprecated("use new analysis package", "7.2.0")
def analysis(first: AnalyzerDefinition, rest: AnalyzerDefinition*): CreateIndexTemplateRequest =
analysis(first +: rest, Nil)
@deprecated("use new analysis package", "7.2.0")
def analysis(analyzers: Iterable[AnalyzerDefinition]): CreateIndexTemplateRequest =
analysis(analyzers, Nil)
def analysis(analysis: com.sksamuel.elastic4s.analysis.Analysis): CreateIndexTemplateRequest = copy(analysis = analysis.some)
@deprecated("use new analysis package", "7.2.0")
def analysis(analyzers: Iterable[AnalyzerDefinition],
normalizers: Iterable[NormalizerDefinition]): CreateIndexTemplateRequest =
_analysis match {
case None => copy(_analysis = AnalysisDefinition(analyzers, normalizers).some)
case Some(a) => copy(_analysis = AnalysisDefinition(a.analyzers ++ analyzers, a.normalizers ++ normalizers).some)
}
@deprecated("use new analysis package", "7.2.0")
def normalizers(first: NormalizerDefinition, rest: NormalizerDefinition*): CreateIndexTemplateRequest =
analysis(Nil, first +: rest)
@deprecated("use new analysis package", "7.2.0")
def normalizers(normalizers: Iterable[NormalizerDefinition]): CreateIndexTemplateRequest =
analysis(Nil, normalizers)
def mappings(first: MappingDefinition, rest: MappingDefinition*): CreateIndexTemplateRequest =
mappings(first +: rest)
def mappings(mappings: Iterable[MappingDefinition]): CreateIndexTemplateRequest = copy(mappings = mappings.toSeq)
def version(version: Int): CreateIndexTemplateRequest = copy(version = version.some)
// replaces all settings with the given settings
def settings(settings: Map[String, Any]): CreateIndexTemplateRequest = copy(settings = settings)
def order(order: Int): CreateIndexTemplateRequest = copy(order = order.some)
def create(create: Boolean): CreateIndexTemplateRequest = copy(create = create.some)
def aliases(first: TemplateAlias, rest: TemplateAlias*): CreateIndexTemplateRequest = aliases(first +: rest)
def aliases(aliases: Iterable[TemplateAlias]): CreateIndexTemplateRequest = copy(aliases = aliases.toSeq)
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/indexes/CreateIndexTemplateRequest.scala | Scala | apache-2.0 | 3,730 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze.pipeline
import org.http4s.blaze.testkit.BlazeTestSuite
import org.http4s.blaze.util.{Execution, FutureUnit}
import scala.concurrent.Future
import scala.concurrent.ExecutionContext
class PipelineSuite extends BlazeTestSuite {
private implicit def ec: ExecutionContext = Execution.trampoline
class IntHead extends HeadStage[Int] {
def name = "IntHead"
override protected def doClosePipeline(cause: Option[Throwable]): Unit = ()
@volatile
var lastWrittenInt: Int = 0
def writeRequest(data: Int): Future[Unit] = {
lastWrittenInt = data
FutureUnit
}
def readRequest(size: Int): Future[Int] = Future.successful(54)
}
class IntToString extends MidStage[Int, String] {
def name = "IntToString"
def readRequest(size: Int): Future[String] = channelRead(1).map(_.toString)
def writeRequest(data: String): Future[Unit] =
try channelWrite(data.toInt)
catch { case t: NumberFormatException => Future.failed(t) }
}
class Noop[T] extends MidStage[T, T] {
def name: String = "NOOP"
def readRequest(size: Int): Future[T] = channelRead(size)
def writeRequest(data: T): Future[Unit] = channelWrite(data)
}
class StringEnd extends TailStage[String] {
def name: String = "StringEnd"
var lastString = ""
}
test("A Pipeline should make a basic org.http4s.blaze.pipeline") {
val head = new IntHead
val tail = new StringEnd
TrunkBuilder(new IntToString).cap(tail).base(head)
val channelReadResult = tail.channelRead()
tail.channelWrite("32").map(_ => head)
for {
_ <- assertFuture(channelReadResult, "54")
_ <- assertFuture(Future(head.lastWrittenInt), 32)
} yield ()
}
test("A Pipeline should be able to find and remove stages with identical arguments") {
val noop = new Noop[Int]
val p = TrunkBuilder(noop).append(new IntToString).cap(new StringEnd).base(new IntHead)
assertEquals(p.findInboundStage(classOf[Noop[Int]]).get, noop)
assertEquals(p.findInboundStage(noop.name).get, noop)
noop.removeStage()
assert(p.findInboundStage(classOf[Noop[Int]]).isEmpty)
}
test("A Pipeline should splice after") {
val noop = new Noop[Int]
val p = TrunkBuilder(new IntToString).cap(new StringEnd).base(new IntHead)
p.spliceAfter(noop)
assertEquals(p.findInboundStage(classOf[Noop[Int]]).get, noop)
assertEquals(p.findInboundStage(noop.name).get, noop)
noop.removeStage()
assert(p.findInboundStage(classOf[Noop[Int]]).isEmpty)
}
test("A Pipeline should splice before") {
val noop = new Noop[String]
val end = new StringEnd
val p = LeafBuilder(end).prepend(new IntToString).base(new IntHead)
end.spliceBefore(noop)
assertEquals(p.findInboundStage(classOf[Noop[String]]).get, noop)
assertEquals(p.findInboundStage(noop.name).get, noop)
noop.removeStage()
assert(p.findInboundStage(classOf[Noop[String]]).isEmpty)
}
}
| http4s/blaze | core/src/test/scala/org/http4s/blaze/pipeline/PipelineSuite.scala | Scala | apache-2.0 | 3,567 |
object Test {
trait Tagged[A]
// Negation Tagged: NotTagged[A] is available only if there are no Tagged[A] in scope.
trait NotTagged[A]
trait NotTaggedLowPrio {
implicit def notTaggedInstance[A]: NotTagged[A] = null
}
object NotTagged extends NotTaggedLowPrio {
implicit def notTaggedAmbiguity1[A](implicit ev: Tagged[A]): NotTagged[A] = null
implicit def notTaggedAmbiguity2[A](implicit ev: Tagged[A]): NotTagged[A] = null
}
case class Foo[A](value: Boolean)
trait FooLowPrio {
implicit def fooDefault[A]: Foo[A] = Foo(true)
}
object Foo extends FooLowPrio {
implicit def fooNotTagged[A](implicit ev: NotTagged[A]): Foo[A] = Foo(false)
}
def main(args: Array[String]): Unit = {
implicit val taggedInt: Tagged[Int] = null
assert(implicitly[Foo[Int]].value) // fooDefault
assert(!implicitly[Foo[String]].value) // fooNotTagged
println(1)
}
}
| som-snytt/dotty | tests/pos-scala2/i3396.scala | Scala | apache-2.0 | 914 |
package wdl.expression
import org.scalatest.{FlatSpec, Matchers}
import wom.types._
import wom.values._
import scala.util.Success
class PureStandardLibraryFunctionsSpec extends FlatSpec with Matchers {
behavior of "transpose"
it should "transpose a 2x3 into a 3x2" in {
val inArray = WomArray(WomArrayType(WomArrayType(WomIntegerType)), List(
WomArray(WomArrayType(WomIntegerType), List(WomInteger(1), WomInteger(2), WomInteger(3))),
WomArray(WomArrayType(WomIntegerType), List(WomInteger(4), WomInteger(5), WomInteger(6)))
))
val expectedResult = WomArray(WomArrayType(WomArrayType(WomIntegerType)), List(
WomArray(WomArrayType(WomIntegerType), List(WomInteger(1), WomInteger(4))),
WomArray(WomArrayType(WomIntegerType), List(WomInteger(2), WomInteger(5))),
WomArray(WomArrayType(WomIntegerType), List(WomInteger(3), WomInteger(6)))
))
PureStandardLibraryFunctions.transpose(Seq(Success(inArray))) should be(Success(expectedResult))
}
behavior of "length"
it should "get the right answers" in {
val two = WomArray(WomArrayType(WomIntegerType), List(WomInteger(1), WomInteger(2)))
PureStandardLibraryFunctions.length(Seq(Success(two))) should be(Success(WomInteger(2)))
val empty = WomArray(WomArrayType(WomIntegerType), List.empty)
PureStandardLibraryFunctions.length(Seq(Success(empty))) should be(Success(WomInteger(0)))
}
behavior of "prefix"
it should "prefix things correctly" in {
val strings = List("foo", "bar", "baz")
val stringWdlValues = WomArray(WomArrayType(WomStringType), strings map WomString.apply)
val stringsExpectation = WomArray(WomArrayType(WomStringType), strings map { f => WomString("-f " + f) } )
PureStandardLibraryFunctions.prefix(Seq(Success(WomString("-f ")), Success(stringWdlValues))) should be(Success(stringsExpectation))
val noStringWdlValues = WomArray(WomArrayType(WomStringType), List.empty)
PureStandardLibraryFunctions.prefix(Seq(Success(WomString("-f ")), Success(noStringWdlValues))) should be(Success(WomArray(WomArrayType(WomStringType), Seq.empty)))
val integers = List(1, 2, 3)
val integerWdlValues = WomArray(WomArrayType(WomIntegerType), integers map { i => WomInteger.apply(Integer.valueOf(i)) })
val integersExpectation = WomArray(WomArrayType(WomStringType), integers map { i => WomString("-f " + i)})
PureStandardLibraryFunctions.prefix(Seq(Success(WomString("-f ")), Success(integerWdlValues))) should be(Success(integersExpectation))
}
behavior of "basename"
List(
("my.txt", "my.txt", ".txt", "my"),
("/Users/chris/chris.tar.gz", "chris.tar.gz", ".tar.gz", "chris"),
("gs://bucket/charlie.bucket", "charlie.bucket", ".wonka", "charlie.bucket")
) foreach { case (full, baseWithExtension, suffixToStrip, suffixStripped) =>
it should s"get the file name for $full" in {
PureStandardLibraryFunctions.basename(Seq(Success(WomString(full)))) should be(Success(WomString(baseWithExtension)))
}
it should s"get the file name for $full and strip the suffix '$suffixToStrip'" in {
PureStandardLibraryFunctions.basename(Seq(Success(WomString(full)), Success(WomString(suffixToStrip)))) should be(Success(WomString(suffixStripped)))
}
}
}
| ohsu-comp-bio/cromwell | wdl/src/test/scala/wdl/expression/PureStandardLibraryFunctionsSpec.scala | Scala | bsd-3-clause | 3,274 |
package scalax.collection
import org.scalatest.{Spec, Matchers}
import GraphPredef._, GraphEdge._, edge._, edge.LBase._, edge.Implicits._
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
import custom.flight._,
custom.flight.Helper._,
custom.flight.FlightImplicits._
@RunWith(classOf[JUnitRunner])
class TEdgeTest extends Spec with Matchers
{
object FlightLabel extends LEdgeImplicits[Flight]
import FlightLabel._
val (ham, gig) = (Airport("HAM"), Airport("GIG"))
val flightNo = "LH007"
object `Custom edge tests` {
def `LkDiEdge ` {
val outer = LkDiEdge(ham, gig)(Flight(flightNo))
val g = Graph(outer)
val e = g.edges.head
e.edge.nodes.productElement(0).asInstanceOf[AnyRef].getClass should be (
g.nodes.head.getClass)
e.from should be (ham)
e.to should be (gig)
e.flightNo should be (flightNo)
e should be (outer)
e.## should be (outer.##)
val eqFlight = LkDiEdge(ham, gig)(Flight(flightNo, 11 o 2))
e should be (eqFlight)
e.## should be (eqFlight.##)
val neFlight = LkDiEdge(ham, gig)(Flight(flightNo + "x", 11 o 2))
e should not be (neFlight)
e.## should not be (neFlight.##)
}
def `LkDiEdgeShortcut ` {
val outer = LkDiEdge(ham, gig)(Flight(flightNo))
(ham ~+#> gig)(Flight(flightNo)) should be (outer)
(ham ~+#> gig)(Flight(flightNo, 11 o 20)) should be (outer)
}
def `matching weighted edges` {
val (n1, n2, w) = (1, 2, 5)
def check(_n1: Int, _n2: Int, _w: Long) {
_n1 should be (n1)
_n2 should be (n2)
_w should be (w)
}
val wDi = (n1 ~%> n2)(w)
wDi match { case WDiEdge(s, t, w) => check(s, t, w) }
wDi match { case s :~> %(t, w) => check(s, t, w) }
wDi match { case s :~> t % w => check(s, t, w) }
Graph(wDi).get(wDi).edge match {
case s :~> t % w => check(s.value, t.value, w) }
val wkDi = (n1 ~%#> n2)(w)
wkDi match { case s :~> t % w => check(s, t, w) }
}
def `matching labeled edges` {
object StringLabel extends LEdgeImplicits[String]
import StringLabel._
val (n1, n2, label) = (1, 2, "A")
def check(_n1: Int, _n2: Int, _label: String) {
_n1 should be (n1)
_n2 should be (n2)
_label should be (label)
}
val lDi = (n1 ~+> n2)(label)
lDi match { case LDiEdge(s, t, l) => check(s, t, l) }
lDi match { case s :~> +(t, l) => check(s, t, l) }
lDi match { case s :~> t + l => check(s, t, l) }
Graph(lDi).get(lDi).edge match {
case s :~> t + l => check(s.value, t.value, l) }
val lkDi = (n1 ~+#> n2)(label)
lkDi match { case s :~> t + l => check(s, t, l) }
}
def `matching weighted labeled edges` {
object StringLabel extends LEdgeImplicits[String]
import StringLabel._
val (n1, n2, label, weight) = (1, 2, "A", 4L)
def check(_n1: Int, _n2: Int, _weight: Long, _label: String) {
_n1 should be (n1)
_n2 should be (n2)
_weight should be (weight)
_label should be (label)
}
val wlDi = (n1 ~%+> n2)(weight, label)
wlDi match { case WLDiEdge(s, t, w, l) => check(s, t, w, l) }
wlDi match { case s :~> %+(t, w, l) => check(s, t, w, l) }
wlDi match { case s :~> t %+ (w, l) => check(s, t, w, l) }
Graph(wlDi).get(wlDi).edge match {
case s :~> t %+ (w, l) => check(s.value, t.value, w, l) }
val wlkDi = (n1 ~%+#> n2)(weight, label)
wlkDi match { case s :~> t %+ (w, l) => check(s, t, w, l) }
}
def `findOutgoingTo LkDiEdge` {
import edge.LkDiEdge
val le = LkDiEdge(1,1)(1)
val lg = Graph(le)
val ln1 = lg get 1
(ln1 findOutgoingTo ln1) should be (Some(le))
}
def `LkHyperEdge equality` {
val e1 = LkDiHyperEdge(1,1)("a")
val e2 = LkHyperEdge(1,1)("b")
val g = Graph[Int, LHyperEdge](e1, e2)
g find e1 should be ('defined)
g find e2 should be ('defined)
}
def `LkDiHyperEdge equality` {
val e = LkDiHyperEdge(1,2,3)("a")
val g = Graph[Int, LHyperEdge](e)
val eo = g.edges.head.toOuter
g find eo should be ('defined)
}
}
}
/* Label type for use in key-labeled edges.
* Note that using path-dependent label types with Scala 2.9.1-final I had a runtime issue
* which could be resolved by moving the label class to the top-level.
*/
case class Flight(val flightNo: String,
val departure: DayTime = DayTime(0,0),
val duration: Duration = Duration(0,0))
{
/* flightNo should be treated as the label key meaning that the set of edges
* incident to two given nodes may contain at most one edge with a given flightNo.
*
* To achieve the above requirement we must override `equals` and `hashCode`
* narrowing equality to the flightNo attribute because the hash-code of key-labeled edges
* is composed by the hash-code of the incident nodes and the label hash-code.
*/
override def equals(other: Any) = other match {
case that: Flight => that.flightNo == this.flightNo
case _ => false
}
override def hashCode = flightNo.##
}
// Compiler tests for predefined edges.
object Test {
import scalax.collection.GraphPredef._
val h = 2~4~6
val d = 1~>2
val u = 1~(-1)
val (lh1, lh2) = (LHyperEdge(1,3,5)(6), LHyperEdge(1,3,5)(7))
val g_lh_h = Graph(lh1,h)
val g_lh_d = Graph[Int,HyperEdge](lh1,d) // not inferred
val g_lh_lh = Graph(lh1,lh2)
val (lkh1, lkh2) = (LkHyperEdge(1,3,5)(8), LkHyperEdge(1,3,5)(9))
val g_lkh_h = Graph(lkh1,h)
val g_lkh_lkh = Graph(lkh1,lkh2)
val g_lkh_lh = Graph(lkh1,lh1)
val (ldh1, ldh2) = (LDiHyperEdge(1,3,5)(10), LDiHyperEdge(1,3,5)(11))
val g_ldh_h = Graph(ldh1,h)
val g_ldh_ldh = Graph(ldh1,ldh2)
val g_ldh_lh = Graph(ldh1,lh2)
val g_ldh_lkh = Graph[Int,LHyperEdge](ldh1,lkh2) // not inferred
val (lkdh1, lkdh2) = (LkDiHyperEdge(1,3,5)(12), LkDiHyperEdge(1,3,5)(13))
val g_lkdh_h = Graph(lkdh1,h)
val g_lkdh_lkdh = Graph(lkdh1,lkdh2)
val g_lkdh_ldh = Graph(lkdh1,ldh2)
val g_lkdh_lh = Graph(lkdh1,lh2)
val g_lkdh_lkh = Graph[Int,LHyperEdge](lkdh1,lkh2) // not inferred
val (lu1, lu2) = (LUnDiEdge(1,3)(4), LUnDiEdge(1,3)(5))
val g_lu_u = Graph(lu1,u)
val g_lu_h = Graph(lu1,h)
val g_lu_d = Graph[Int,UnDiEdge](lu1,d) // not inferred
val g_lu_lu = Graph(lu1,lu2)
val g_lu_lh = Graph[Int,HyperEdge](lu1,lh2) // not inferred
}
// Compiler tests for calling label methods by means of implicits.
object TestImplicits {
import scalax.collection.Graph
case class MyLabel(val i: Int)
val eOuter = LUnDiEdge(1,3)(MyLabel(4))
object OuterEdge {
object UserL extends LEdgeImplicits[MyLabel]
import UserL._
val four = eOuter.i
}
object InnerEdge {
object UserL extends LEdgeImplicits[MyLabel]
import UserL._
val g = Graph(eOuter)
val eInner = g.edges.head
// val four_0 = e.label match {case m: MyLabel => m.i}
val four = eInner.i
}
}
// Compiler tests for predefined edge shortcuts.
object TestOperators {
val ld = (1 ~+> 2)(3)
val lkd = (3 ~+#> 4)(7)
} | chen0031/scala-graph | core/src/test/scala/scalax/collection/TEdge.scala | Scala | bsd-3-clause | 7,600 |
package org.cakesolutions.akkapatterns
import org.specs2.mutable.Specification
import org.specs2.specification.Analysis
import org.specs2.analysis.ClassycleDependencyFinder
class ArchitectureSpec extends Specification with Analysis with ClassycleDependencyFinder {
"The architecture" should {
"Have properly defined layers" in {
val ls = layers(
"main",
"web",
"api",
"core",
"domain"
).withPrefix("org.cakesolutions.akkapatterns").inTargetDir("target/scala-2.10")
ls must beRespected
}
}
}
| anand-singh/akka-patterns | sbt/src/test/scala/org/cakesolutions/akkapatterns/ArchitectureSpec.scala | Scala | apache-2.0 | 566 |
package fpinscala.datastructures
sealed trait Tree[+A]
case class Leaf[A](value: A) extends Tree[A]
case class Branch[A](left: Tree[A], right: Tree[A]) extends Tree[A]
object Tree {
def size[A](t: Tree[A]): Int = t match {
case Leaf(_) => 1
case Branch(l,r) => 1 + size(l) + size(r)
}
/*
We are using the method `max` that exists on all `Int` values rather than an explicit `if` expression.
Notice how similar the implementation is to `size`. We'll abstract out the common pattern in a later exercise.
*/
def maximum(t: Tree[Int]): Int = t match {
case Leaf(n) => n
case Branch(l,r) => maximum(l) max maximum(r)
}
/*
Again, notice how similar the implementation is to `size` and `maximum`.
*/
def depth[A](t: Tree[A]): Int = t match {
case Leaf(_) => 0
case Branch(l,r) => 1 + (depth(l) max depth(r))
}
def map[A,B](t: Tree[A])(f: A => B): Tree[B] = t match {
case Leaf(a) => Leaf(f(a))
case Branch(l,r) => Branch(map(l)(f), map(r)(f))
}
/*
Like `foldRight` for lists, `fold` receives a "handler" for each of the data constructors of the type, and recursively accumulates some value using these handlers. As with `foldRight`, `fold(t)(Leaf(_))(Branch(_,_)) == t`, and we can use this function to implement just about any recursive function that would otherwise be defined by pattern matching.
*/
def fold[A,B](t: Tree[A])(f: A => B)(g: (B,B) => B): B = t match {
case Leaf(a) => f(a)
case Branch(l,r) => g(fold(l)(f)(g), fold(r)(f)(g))
}
def sizeViaFold[A](t: Tree[A]): Int =
fold(t)(a => 1)(1 + _ + _)
def maximumViaFold(t: Tree[Int]): Int =
fold(t)(a => a)(_ max _)
def depthViaFold[A](t: Tree[A]): Int =
fold(t)(a => 0)((d1,d2) => 1 + (d1 max d2))
/*
Notice the type annotation required on the expression `Leaf(f(a))`. Without this annotation, we get an error like this:
type mismatch;
found : fpinscala.datastructures.Branch[B]
required: fpinscala.datastructures.Leaf[B]
fold(t)(a => Leaf(f(a)))(Branch(_,_))
^
This error is an unfortunate consequence of Scala using subtyping to encode algebraic data types. Without the annotation, the result type of the fold gets inferred as `Leaf[B]` and it is then expected that the second argument to `fold` will return `Leaf[B]`, which it does not (it returns `Branch[B]`). Really, we would prefer if Scala would infer `Tree[B]` as the result type in both cases. When working with algebraic data types in Scala, it is somewhat common to define helper functions that simply call the corresponding data constructors but give the less specific result type:
def leaf[A](a: A): Tree[A] = Leaf(a)
def branch[A](l: Tree[A], r: Tree[A]): Tree[A] = Branch(l, r)
*/
def mapViaFold[A,B](t: Tree[A])(f: A => B): Tree[B] =
fold(t)(a => Leaf(f(a)): Tree[B])(Branch(_,_))
} | ShokuninSan/fpinscala | answers/src/main/scala/fpinscala/datastructures/Tree.scala | Scala | mit | 2,926 |
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import scala.util.Random
//WorkerType
//////////
object WorkerType extends Enumeration {
type WorkerType = Value
val Elf, Reindeer = Value
}
//Worker
////////
class Worker(
workerType: WorkerType.WorkerType,
id: Int,
maxSleepTime: Int,
santasHouse: ActorRef) extends Actor {
//Forces the first work iteration and going to Santa
self ! Unit
def receive = {
//Receive is used for blocking the worker while it meets with Santa and he
//sends one type of message, but cases are need to satisfy the type system
case Unit =>
Main.sleepRandomInterval(maxSleepTime)
santasHouse ! (workerType, id)
}
}
//Reindeer and Elf
class Reindeer(id: Int, santasHouse: ActorRef)
extends Worker(WorkerType.Reindeer, id, 30, santasHouse)
class Elf(id: Int, santasHouse: ActorRef)
extends Worker(WorkerType.Elf, id, 15, santasHouse)
//Santa
///////
class SantasHouse(santa: ActorRef) extends Actor {
var elfs = List[(Int, ActorRef)]()
var reindeers = List[(Int, ActorRef)]()
def enterQueue(
workerType: WorkerType.WorkerType,
id: Int,
actorRef: ActorRef,
queue: List[(Int, ActorRef)],
maxQueue: Int): List[(Int, ActorRef)] = {
val updatedQueue = (id, actorRef) +: queue
if (updatedQueue.length == maxQueue) {
val (ids, actorRefs) = updatedQueue.unzip
santa ! (workerType, ids, actorRefs)
return List[(Int, ActorRef)]()
}
updatedQueue
}
def receive = {
case (WorkerType.Reindeer, id: Int) =>
reindeers = enterQueue(WorkerType.Reindeer, id, sender, reindeers, 9)
case (WorkerType.Elf, id: Int) =>
elfs = enterQueue(WorkerType.Elf, id, sender, elfs, 3)
}
}
//Santa
///////
class Santa extends Actor {
def receive = {
case (WorkerType.Reindeer, ids: List[Int], actorRefs: List[ActorRef]) =>
println(s"Santa: ho ho delivering presents with reindeers: $ids")
Main.sleepRandomInterval(5)
actorRefs.foreach(ref => ref ! Unit)
case (WorkerType.Elf, ids: List[Int], actorRefs: List[ActorRef]) =>
println(s"Santa: ho ho helping elfs: $ids")
Main.sleepRandomInterval(5)
actorRefs.foreach(ref => ref ! Unit)
}
}
object Main {
def sleepRandomInterval(maxTime: Int): Unit = {
val sleepTime = Random.nextInt(maxTime)
Thread.sleep(sleepTime * 1000)
}
def main(args: Array[String]): Unit = {
val system = ActorSystem("greenland")
val santa = system.actorOf(Props(new Santa))
val santasHouse = system.actorOf(Props(new SantasHouse(santa)))
(1 to 9) foreach(id =>
system.actorOf(Props(new Reindeer(id, santasHouse))))
(1 to 30) foreach(id =>
system.actorOf(Props(new Elf(id, santasHouse))))
}
}
| eXeDK/dpt908e14 | SantaClaus/SantaClausScala.scala | Scala | mit | 2,804 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.commons.source
import cascading.flow.FlowDef
import cascading.pipe.Pipe
import cascading.scheme.Scheme
import cascading.tap.Tap
import cascading.tuple.Fields
import com.twitter.algebird.Monoid
import com.twitter.bijection.Injection
import com.twitter.chill.Externalizer
import com.twitter.scalding.TDsl._
import com.twitter.scalding._
import com.twitter.scalding.commons.scheme.KeyValueByteScheme
import com.twitter.scalding.commons.tap.VersionedTap
import com.twitter.scalding.commons.tap.VersionedTap.TapMode
import com.twitter.scalding.source.{ CheckedInversion, MaxFailuresCheck }
import com.twitter.scalding.typed.KeyedListLike
import com.twitter.scalding.typed.TypedSink
import org.apache.hadoop.mapred.JobConf
import scala.collection.JavaConverters._
/**
* Source used to write key-value pairs as byte arrays into a versioned store.
* Supports incremental updates via the monoid on V.
*/
object VersionedKeyValSource {
val defaultVersionsToKeep = 3
// TODO: have two apply methods here for binary compatibility purpose. Need to clean it up in next release.
def apply[K, V](path: String, sourceVersion: Option[Long] = None, sinkVersion: Option[Long] = None, maxFailures: Int = 0)(implicit codec: Injection[(K, V), (Array[Byte], Array[Byte])]) = {
new VersionedKeyValSource[K, V](path, sourceVersion, sinkVersion, maxFailures, defaultVersionsToKeep)
}
def apply[K, V](path: String, sourceVersion: Option[Long], sinkVersion: Option[Long], maxFailures: Int, versionsToKeep: Int)(implicit codec: Injection[(K, V), (Array[Byte], Array[Byte])]) =
new VersionedKeyValSource[K, V](path, sourceVersion, sinkVersion, maxFailures, versionsToKeep)
}
class VersionedKeyValSource[K, V](val path: String, val sourceVersion: Option[Long], val sinkVersion: Option[Long],
val maxFailures: Int, val versionsToKeep: Int)(
implicit @transient codec: Injection[(K, V), (Array[Byte], Array[Byte])]) extends Source
with Mappable[(K, V)]
with TypedSink[(K, V)] {
import Dsl._
val keyField = "key"
val valField = "value"
val fields = new Fields(keyField, valField)
val codecBox = Externalizer(codec)
override def converter[U >: (K, V)] = TupleConverter.asSuperConverter[(K, V), U](TupleConverter.of[(K, V)])
override def setter[U <: (K, V)] = TupleSetter.asSubSetter[(K, V), U](TupleSetter.of[(K, V)])
def hdfsScheme =
HadoopSchemeInstance(new KeyValueByteScheme(fields).asInstanceOf[Scheme[_, _, _, _, _]])
@deprecated("This method is deprecated", "0.1.6")
def this(path: String, sourceVersion: Option[Long], sinkVersion: Option[Long], maxFailures: Int)(implicit @transient codec: Injection[(K, V), (Array[Byte], Array[Byte])]) =
this(path, sourceVersion, sinkVersion, maxFailures, VersionedKeyValSource.defaultVersionsToKeep)(codec)
def getTap(mode: TapMode) = {
val tap = new VersionedTap(path, hdfsScheme, mode).setVersionsToKeep(versionsToKeep)
if (mode == TapMode.SOURCE && sourceVersion.isDefined)
tap.setVersion(sourceVersion.get)
else if (mode == TapMode.SINK && sinkVersion.isDefined)
tap.setVersion(sinkVersion.get)
else
tap
}
val source = getTap(TapMode.SOURCE)
val sink = getTap(TapMode.SINK)
override def validateTaps(mode: Mode): Unit = {
// if a version is explicitly supplied, ensure that it exists
sourceVersion.foreach { version =>
mode match {
case hadoopMode: HadoopMode => {
val store = source.getStore(new JobConf(hadoopMode.jobConf))
if (!store.hasVersion(version)) {
throw new InvalidSourceException(
"Version %s does not exist. Currently available versions are: %s"
.format(version, store.getAllVersions))
}
}
case _ => throw new IllegalArgumentException(
"VersionedKeyValSource does not support mode %s. Only HadoopMode is supported"
.format(mode))
}
}
}
def resourceExists(mode: Mode): Boolean =
mode match {
case Test(buffers) => {
buffers(this) map { !_.isEmpty } getOrElse false
}
case HadoopTest(conf, buffers) => {
buffers(this) map { !_.isEmpty } getOrElse false
}
case _ => {
val conf = new JobConf(mode.asInstanceOf[HadoopMode].jobConf)
source.resourceExists(conf)
}
}
def sinkExists(mode: Mode): Boolean =
sinkVersion match {
case Some(version) =>
mode match {
case Test(buffers) =>
buffers(this) map { !_.isEmpty } getOrElse false
case HadoopTest(conf, buffers) =>
buffers(this) map { !_.isEmpty } getOrElse false
case m: HadoopMode =>
val conf = new JobConf(m.jobConf)
val store = sink.getStore(conf)
store.hasVersion(version)
case _ => sys.error(s"Unknown mode $mode")
}
case None => false
}
override def createTap(readOrWrite: AccessMode)(implicit mode: Mode): Tap[_, _, _] = {
import com.twitter.scalding.CastHfsTap
mode match {
case Hdfs(_strict, _config) =>
readOrWrite match {
case Read => CastHfsTap(source)
case Write => CastHfsTap(sink)
}
case _ =>
TestTapFactory(this, hdfsScheme).createTap(readOrWrite)
}
}
// Override this for more control on failure on decode
protected lazy val checkedInversion: CheckedInversion[(K, V), (Array[Byte], Array[Byte])] =
new MaxFailuresCheck(maxFailures)(codecBox.get)
override def sinkFields: Fields = fields
override def transformForRead(pipe: Pipe): Pipe = {
pipe.flatMap((keyField, valField) -> (keyField, valField)) { pair: (Array[Byte], Array[Byte]) =>
checkedInversion(pair)
}
}
override def transformForWrite(pipe: Pipe): Pipe = {
pipe.mapTo((0, 1) -> (keyField, valField)) { pair: (K, V) =>
codecBox.get.apply(pair)
}
}
override def toIterator(implicit config: Config, mode: Mode): Iterator[(K, V)] = {
val tap = createTap(Read)(mode)
mode.openForRead(config, tap)
.asScala
.flatMap { te =>
val item = te.selectTuple(fields)
mode match {
case _: TestMode =>
val key = item.getObject(0).asInstanceOf[K]
val value = item.getObject(1).asInstanceOf[V]
Some((key, value))
case _ =>
val key = item.getObject(0).asInstanceOf[Array[Byte]]
val value = item.getObject(1).asInstanceOf[Array[Byte]]
checkedInversion((key, value))
}
}
}
override def toString =
"%s path:%s,sourceVersion:%s,sinkVersion:%s".format(getClass(), path, sourceVersion, sinkVersion)
override def equals(other: Any) =
if (other.isInstanceOf[VersionedKeyValSource[_, _]]) {
val otherSrc = other.asInstanceOf[VersionedKeyValSource[K, V]]
otherSrc.path == path && otherSrc.sourceVersion == sourceVersion && otherSrc.sinkVersion == sinkVersion
} else {
false
}
override def hashCode = toString.hashCode
}
object RichPipeEx extends java.io.Serializable {
implicit def pipeToRichPipeEx(pipe: Pipe): RichPipeEx = new RichPipeEx(pipe)
implicit def typedPipeToRichPipeEx[K: Ordering, V: Monoid](pipe: TypedPipe[(K, V)]) =
new TypedRichPipeEx(pipe)
implicit def keyedListLikeToRichPipeEx[K: Ordering, V: Monoid, T[K, +V] <: KeyedListLike[K, V, T]](
kll: KeyedListLike[K, V, T]) = typedPipeToRichPipeEx(kll.toTypedPipe)
}
class TypedRichPipeEx[K: Ordering, V: Monoid](pipe: TypedPipe[(K, V)]) extends java.io.Serializable {
import Dsl._
import TDsl._
// Tap reads existing data from the `sourceVersion` (or latest
// version) of data specified in `src`, merges the K,V pairs from
// the pipe in using an implicit `Monoid[V]` and sinks all results
// into the `sinkVersion` of data (or a new version) specified by
// `src`.
def writeIncremental(src: VersionedKeyValSource[K, V], reducers: Int = 1)(implicit flowDef: FlowDef, mode: Mode): TypedPipe[(K, V)] = {
val outPipe =
if (!src.resourceExists(mode))
pipe
else {
val oldPairs = TypedPipe
.from[(K, V)](src.read, (0, 1))
.map { case (k, v) => (k, v, 0) }
val newPairs = pipe.sumByLocalKeys.map { case (k, v) => (k, v, 1) }
(oldPairs ++ newPairs)
.groupBy { _._1 }
.withReducers(reducers)
.sortBy { _._3 }
.mapValues { _._2 }
.sum
.toTypedPipe
}
outPipe.write(src)
}
}
class RichPipeEx(pipe: Pipe) extends java.io.Serializable {
import Dsl._
// VersionedKeyValSource always merges with the most recent complete
// version
def writeIncremental[K, V](src: VersionedKeyValSource[K, V], fields: Fields, reducers: Int = 1)(implicit monoid: Monoid[V],
flowDef: FlowDef,
mode: Mode) = {
def appendToken(pipe: Pipe, token: Int) =
pipe.mapTo((0, 1) -> ('key, 'value, 'isNew)) { pair: (K, V) => pair :+ token }
val outPipe =
if (!src.resourceExists(mode))
pipe
else {
val oldPairs = appendToken(src.read, 0)
val newPairs = appendToken(pipe, 1)
(oldPairs ++ newPairs)
.groupBy('key) { _.reducers(reducers).sortBy('isNew).sum[V]('value) }
.project(('key, 'value))
.rename(('key, 'value) -> fields)
}
outPipe.write(src)
}
}
| rubanm/scalding | scalding-commons/src/main/scala/com/twitter/scalding/commons/source/VersionedKeyValSource.scala | Scala | apache-2.0 | 9,965 |
package com.kelebra.github.impatient.scala.katas
import org.scalacheck.Gen
import scala.collection.mutable.ArrayBuffer
class ArraysSpec extends ScalaTestSetup[Arrays] {
val implementation = Arrays
"Control Structures and Functions" when {
"method range implemented" should {
"work for integers" in {
forAll(Gen.choose(0, 1000)) { (n: Int) =>
implementation.range(n) shouldBe (0 until n).toArray
}
}
}
"method swapAdjacentElements implemented" should {
"not change empty array" in {
val array = Array.empty
implementation.swapAdjacentElements(array)
array shouldBe Array.empty
}
"not change array of size one" in {
val array = Array(1)
implementation.swapAdjacentElements(array)
array shouldBe Array(1)
}
"change odd size array" in {
val array = Array(1, 2, 3, 4, 5)
implementation.swapAdjacentElements(array)
array shouldBe Array(2, 1, 4, 3, 5)
}
"change even size array" in {
val array = Array(1, 2, 3, 4, 5, 6)
implementation.swapAdjacentElements(array)
array shouldBe Array(2, 1, 4, 3, 6, 5)
}
}
"methods swapAdjacentElements and swappedAdjacentElements are implemented" should {
"return the same values" in {
forAll { (array: Array[Int]) =>
whenever(array.length <= 100) {
val swapped = implementation.swappedAdjacentElements(array)
implementation.swapAdjacentElements(array)
swapped shouldBe array
}
}
}
}
"method positiveFirstWithOriginalOrder is implemented" should {
"work for all arrays" in {
forAll { (array: Array[Int]) =>
whenever(array.length <= 100) {
implementation.positiveFirstWithOriginalOrder(array) shouldBe
(array.filter(_ > 0) ++ array.filterNot(_ > 0))
}
}
}
}
"method avg is implemented" should {
"work for trivial array" in {
implementation.avg(Array(10, -5, 15, 25, 40)) shouldBe 17.0
}
"work for empty array" in {
implementation.avg(Array()).isNaN
}
}
"method sortInReverseOrder is implemented" should {
"work for trivial array" in {
val array = ArrayBuffer(10, -5, 15, 25, 40)
implementation.sortedInReverseOrder(array)
array shouldBe Array(40, 25, 15, 10, -5)
}
}
"method deduplicate is implemented" should {
"work for any array" in {
val out = mockFunction[Int, Unit]
inSequence {
out expects 1
out expects 2
out expects 3
out expects 4
out expects 5
} returning Unit
implementation.deduplicate(Array(2, 2, 2, 2, 1, 1, 5, 5, 1, 1, 3, 3, 3, 4), out)
}
}
"method allAmericanTimezones is implemented" should {
"return list of correct size" in {
implementation.allAmericanTimezones.length shouldBe 164 +- 2
}
}
}
}
| kelebra/impatient-scala-katas | src/test/scala/com/kelebra/github/impatient/scala/katas/ArraysSpec.scala | Scala | mit | 3,066 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.