code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gr.ionio.p11blet
import java.util.Properties
import com.typesafe.config.{Config, ConfigFactory}
object Database {
final private val config : Config = ConfigFactory.load()
final private val user : String = config.getString("database.user")
final private val pass : String = config.getString("database.pass")
final val db : String = config.getString("database.db")
final val connectionProperties : Properties = initConnectionProperties
def initConnectionProperties : Properties = {
val connectionProperties = new Properties()
connectionProperties.put("user", this.user)
connectionProperties.put("password", this.pass)
connectionProperties
}
}
| Panos-Bletsos/spark-cost-model-optimizer | testing-cost-model/src/main/scala/gr/ionio/p11blet/Database.scala | Scala | apache-2.0 | 1,485 |
import codemodels.incrementalparsers.javaip.{PapaCarloUnitSpec, JavaIP}
import name.lakhin.eliah.projects.papacarlo.syntax.Node
import name.lakhin.eliah.projects.papacarlo.test.utils.ErrorMonitor
import scala.collection.JavaConversions._
import java.io.File
class javaip_codebase_test extends PapaCarloUnitSpec {
def parseWithoutErrors(name:String) {
val filename = "src/test/scala/codebase/"+name+".java.code"
val code = scala.io.Source.fromFile(filename).mkString
val lexer = JavaIP.lexer
val syntax = JavaIP.syntax(lexer)
val m = new ErrorMonitor(lexer,syntax)
lexer.input(code)
assert(0==syntax.getErrors.size,m.getResult)
}
it should "parse typical compilation unit " in {
val code = """
// This example is from the book _Java in a Nutshell_ by David Flanagan.
// Written by David Flanagan. Copyright (c) 1996 O'Reilly & Associates.
// You may study, use, modify, and distribute this example for any purpose.
// This example is provided WITHOUT WARRANTY either expressed or implied.
import java.applet.*;
import java.awt.*;
public class Scribble extends Applet {
}"""
val lexer = JavaIP.lexer
val syntax = JavaIP.syntax(lexer)
var classes = Map[String,Node]()
var imports = List[Node]()
syntax.onNodeMerge.bind {node => {
imports = node.getBranches("imports")
val classNode = getBranch(node,"classDeclaration")
classes += (getValue(classNode,"name") -> classNode)
}}
lexer.input(code)
assert(2==imports.size)
var import1 = imports.head
var import2 = imports.tail.head
assert(List("java","applet","*")==import1.getValues("part").reverse)
assert(List("java","awt","*")==import2.getValues("part").reverse)
assert(1==classes.size)
assert(classes contains "Scribble")
assert(1==classes.get("Scribble").get.getBranches("qualifiers").size)
}
it should "parse Scribble.java" in {
parseWithoutErrors("Scribble")
}
it should "parse Animator.java" in {
parseWithoutErrors("Animator")
}
it should "parse AllComponents.java" in {
parseWithoutErrors("AllComponents")
}
it should "parse AbstactWekaProgrammingLanguageTokenizer.java" in {
parseWithoutErrors("AbstactWekaProgrammingLanguageTokenizer")
}
it should "parse FileCopy.java.code" in {
parseWithoutErrors("FileCopy")
}
it should "parse Mud.java.code" in {
parseWithoutErrors("Mud")
}
it should "parse Soundmap.java.code" in {
parseWithoutErrors("Soundmap")
}
it should "parse Learner.java.code" in {
parseWithoutErrors("Learner")
}
it should "parse TestServerAuthModule.java.code" in {
parseWithoutErrors("TestServerAuthModule")
}
it should "parse TestServlet.java.code" in {
parseWithoutErrors("TestServlet")
}
it should "parse temp.java.code" in {
parseWithoutErrors("temp")
}
def fetchFiles(path:String)(op:File => Boolean) : Unit = {
for (file <- new File(path).listFiles if !file.isHidden){
if (file.getName().endsWith(".java")){
op(file)
}
if (file.isDirectory)
fetchFiles(file.getAbsolutePath)(op)
}
}
var oks = 0
var kos = 0
def tryToParse(f:File) : Boolean = {
try {
val code : String = scala.io.Source.fromFile(f,"UTF-8").mkString
val lexer = JavaIP.lexer
val syntax = JavaIP.syntax(lexer)
val m = new ErrorMonitor(lexer,syntax)
lexer.input(code)
assert(0==syntax.getErrors.size,"Failure on "+f.getCanonicalPath+": "+m.getResult)
if (0==syntax.getErrors.size){
//println("OK "+f.getName)
oks+=1
} else {
//println("KO "+f.getName)
kos+=1
}
//println("OKS "+oks+", KOS "+kos)
return 0==syntax.getErrors.size
} catch {
case e : Exception => throw new Exception("Problem reading "+f)
}
}
it should "parse ALL javaee7-samples" in {
//fetchFiles(".")(tryToParse)
}
}
| ftomassetti/JavaIncrementalParser | src/test/scala/javaip_codebase_test.scala | Scala | apache-2.0 | 4,069 |
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka
import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.actor.Actor
import akka.actor.Terminated
import akka.actor.ActorLogging
import akka.actor.Props
import akka.actor.ActorRef
import scala.util.control.NonFatal
/**
* Main class to start an [[akka.actor.ActorSystem]] with one
* top level application supervisor actor. It will shutdown
* the actor system when the top level actor is terminated.
*/
object Main {
/**
* @param args one argument: the class of the application supervisor actor
*/
def main(args: Array[String]): Unit = {
if (args.length != 1) {
println("you need to provide exactly one argument: the class of the application supervisor actor")
} else {
val system = ActorSystem("Main")
try {
val appClass = system.asInstanceOf[ExtendedActorSystem].dynamicAccess.getClassFor[Actor](args(0)).get
val app = system.actorOf(Props(appClass), "app")
val terminator = system.actorOf(Props(classOf[Terminator], app), "app-terminator")
} catch {
case NonFatal(e) ⇒ system.terminate(); throw e
}
}
}
class Terminator(app: ActorRef) extends Actor with ActorLogging {
context watch app
def receive = {
case Terminated(_) ⇒
log.info("application supervisor has terminated, shutting down")
context.system.terminate()
}
}
}
| rorygraves/perf_tester | corpus/akka/akka-actor/src/main/scala/akka/Main.scala | Scala | apache-2.0 | 1,474 |
package com.nicta
package rng
import scalaz._, Scalaz._, Validation._, effect._
import Rng._
case class Balance(amount: Balance.ErrorMessage \\?/ Int)
object Balance {
type ErrorMessage =
String
val randomBalance =
string(3) \\?/ positiveint map (Balance(_))
implicit val ShowBalance: Show[Balance] =
Show.shows(_.amount.swap valueOr (_.shows))
}
case class AccountNumber(value: List[Digit] \\/ Int)
object AccountNumber {
val randomAccountNumber =
digits(10) \\/ positiveint map (AccountNumber(_))
implicit val ShowAccountNumber: Show[AccountNumber] =
Show.shows(_.value fold (_.shows, _.shows))
}
sealed trait AccountType
case object Cheque extends AccountType
case object Savings extends AccountType
case class Other(n: NonEmptyList[Digit]) extends AccountType
object AccountType {
val randomAccountType =
digits1(10).option.option map {
case None => Cheque
case Some(None) => Savings
case Some(Some(s)) => Other(s)
}
implicit val ShowAccountType: Show[AccountType] =
Show.shows {
case Cheque => "Cheque"
case Savings => "Savings"
case Other(x) => "Other(" + x.shows + ")"
}
}
case class Account(name: String, balance: Balance, number: AccountNumber, tp: AccountType)
object Account {
val randomAccount =
for {
n <- alphastring1(6)
b <- Balance.randomBalance
m <- AccountNumber.randomAccountNumber
t <- AccountType.randomAccountType
} yield Account(n, b, m, t)
implicit val ShowAccount: Show[Account] =
Show.show(a =>
("Account(": Cord) ++
a.name ++ "," ++
a.balance.show ++ "," ++
a.number.show ++ "," ++
a.tp.show ++ ")"
)
}
object RunAccount extends SafeApp {
override def runc = {
val a = Account.randomAccount.run
a map (_.println)
}
}
| NICTA/rng | examples/src/main/scala/com/nicta/rng/Account.scala | Scala | bsd-3-clause | 1,828 |
/*
* Copyright 2015 Avira Operations GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.avira.ds.sparser
/** Instances of this class are produced by [[Parser]] classes and store an
* optional value and a list of parsing errors. A callback function (with side
* effects) can be called each time an error occurs.
*
* As the input is being parsed by a [[Parser]], the [[ParseResult]] collects a
* list of errors as [[ParseError]] objects. If the last error in the list is a
* fatal one, no value will be present in the [[ParseResult]]. If the value is
* available, errors might indicate warnings or the fact that the parsed value
* is partial.
*
* Parser users only have to deal with the value and the errors inside the
* parser. It might be helpful for them to do pattern matching on
* [[ParseResult]] case classes:
*
* {{{
* result match {
* case ParseResult.Success(value, input) => ???
* case ParseResult.Warning(value, errors, input) => ???
* case ParseResult.Failure(errors, input) => ???
* }
* }}}
*
* Parser developers, who are responsible to create new parsers, will
* additionally have to learn to operate with the `transform` method. Check
* [[com.avira.ds.sparser.samples.SamplePersonParser]] for an example.
*
* @param valueOption Optional value resulted from parsing
* @param errors A list of errors collected during parsing
* @param input If configured via [[ParserConf]] the result can track the input
* that lead to output result, feature that might be useful while debugging
* errors
* @param conf Parser configuration object which decides whether input should
* be collected or not
* @tparam I Type of the input accepted by the parser
* @tparam O Type of the value resulted from parsing
* @see [[ParseError]] and [[TransformResult]]
*/
sealed abstract class ParseResult[I, +O](
val valueOption: Option[O],
val errors: Seq[ParseError],
val input: Option[I])(implicit conf: ParserConf) extends Serializable {
import ParseResult._
/** Returns result's value if available.
*
* @throws NoSuchElementException if the result does not contain any value
* @return value if available
*/
def get: O
/** Whether the result has a value inside */
lazy val hasValue: Boolean = valueOption.isDefined
/** Whether the result has at least one error */
lazy val hasErrors: Boolean = errors.nonEmpty
/** Returns true if the result contains a value and there are no errors */
def isSuccess: Boolean
/** Returns true if the result contains a value, but there were also some errors */
def isWarning: Boolean
/** Returns true if the result contains no value because of at least one error */
def isFailure: Boolean
/** Main method used by parser developers to incrementally transform input
* data into ''value''.
*
* This is basically a `flatMap` function which accepts a function
* responsible to transform the wrapped value. But the accepted lambda
* function does not directly return the new wrapped value as it happens
* with a traditional `flatMap`. It returns instead a [[TransformResult]]
* object which allows wrapping an error along with the optional value.
* There are three sealed cases: [[TransformSuccess]], [[TransformWarning]]
* and [[TransformFailure]]. Check [[TransformResult]] documentation to
* learn more.
*/
def transform[OO](f: O => TransformResult[OO]): ParseResult[I, OO] =
valueOption.fold[ParseResult[I, OO]](this.asInstanceOf[ParseResult[I, OO]]) { v =>
f(v) match {
case TransformFailure(error) => Failure(errors, input).reportError(error)
case TransformWarning(newValue, warning) => fillValueAndReportError(newValue, warning)
case TransformSuccess(newValue) => fillValue(newValue)
}
}
/** Create a new result with the passed value, while keeping errors and the callback.
*
* @param newValue New value
* @tparam OO New type of the value
* @return A new ParserResult
*/
private[sparser] def fillValue[OO](newValue: OO): ParseResult[I, OO] =
if (errors.isEmpty) {
Success(newValue, input)
} else {
Warning(newValue, errors, input)
}
/** Create a new result, while adding an error to the error list and keeping the old value.
* @param error Error to be added to the list of tracked errors
* @return A new ParserResult
*/
private[sparser] def reportError(error: ParseError): ParseResult[I, O] = {
conf.errorCallback(error)
Failure(errors :+ error, input)
}
/** Equivalent of calling `fillValue` and `reportError` in sequence.
* @param newValue New value
* @param error Error to be added to the list of tracked errors
* @tparam OO New type of the value
* @return A new ParserResult
*/
private[sparser] def fillValueAndReportError[OO](
newValue: OO, error: ParseError): ParseResult[I, OO] = {
conf.errorCallback(error)
Warning(newValue, errors :+ error, input)
}
}
/** Companion object of [[ParseResult]] which contains contains all concrete
* child case classes and other utility methods.
*/
object ParseResult {
/** [[Parser]] result which contains a value and no errors */
case class Success[I, O](
value: O,
override val input: Option[I] = None)(implicit conf: ParserConf)
extends ParseResult(Some(value), Seq(), input)(conf) {
override val get: O = value
override lazy val hasValue: Boolean = true
override lazy val hasErrors: Boolean = false
override val isSuccess: Boolean = true
override val isWarning: Boolean = false
override val isFailure: Boolean = false
}
/** [[Parser]] result which contains a (potentially incomplete) value and at least one error */
case class Warning[I, O](
value: O,
override val errors: Seq[ParseError],
override val input: Option[I] = None)(implicit conf: ParserConf)
extends ParseResult(Some(value), errors, input)(conf) {
override val get: O = value
override lazy val hasValue: Boolean = true
override val isSuccess: Boolean = false
override val isWarning: Boolean = true
override val isFailure: Boolean = false
}
/** [[Parser]] result which doesn't contain a value and as a consequence
* it has at least one error
*/
case class Failure[I](
override val errors: Seq[ParseError],
override val input: Option[I] = None)(implicit conf: ParserConf)
extends ParseResult[I, Nothing](None, errors, input)(conf) {
override def get: Nothing = throw new NoSuchElementException
override lazy val hasValue: Boolean = false
override lazy val isSuccess: Boolean = false
override lazy val isWarning: Boolean = false
override lazy val isFailure: Boolean = true
}
/** Used for pattern matching of generic results which have an optional
* value, a list of errors and the input for which parsing was attempted.
*/
def unapply[I, O](result: ParseResult[I, O]): Option[(Option[O], Seq[ParseError], Option[I])] =
Some((result.valueOption, result.errors, result.input))
}
| Avira/sparser | core/src/main/scala/com/avira/ds/sparser/ParseResult.scala | Scala | apache-2.0 | 7,637 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.internal.Logging
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression, _}
import org.apache.spark.sql.catalyst.expressions.aggregate.ImperativeAggregate
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateMutableProjection
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
/**
* A helper trait used to create specialized setter and getter for types supported by
* [[org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap]]'s buffer.
* (see UnsafeFixedWidthAggregationMap.supportsAggregationBufferSchema).
*/
sealed trait BufferSetterGetterUtils {
def createGetters(schema: StructType): Array[(InternalRow, Int) => Any] = {
val dataTypes = schema.fields.map(_.dataType)
val getters = new Array[(InternalRow, Int) => Any](dataTypes.length)
var i = 0
while (i < getters.length) {
getters(i) = dataTypes(i) match {
case NullType =>
(row: InternalRow, ordinal: Int) => null
case BooleanType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getBoolean(ordinal)
case ByteType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getByte(ordinal)
case ShortType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getShort(ordinal)
case IntegerType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getInt(ordinal)
case LongType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getLong(ordinal)
case FloatType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getFloat(ordinal)
case DoubleType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getDouble(ordinal)
case dt: DecimalType =>
val precision = dt.precision
val scale = dt.scale
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getDecimal(ordinal, precision, scale)
case DateType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getInt(ordinal)
case TimestampType =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.getLong(ordinal)
case other =>
(row: InternalRow, ordinal: Int) =>
if (row.isNullAt(ordinal)) null else row.get(ordinal, other)
}
i += 1
}
getters
}
def createSetters(schema: StructType): Array[((InternalRow, Int, Any) => Unit)] = {
val dataTypes = schema.fields.map(_.dataType)
val setters = new Array[(InternalRow, Int, Any) => Unit](dataTypes.length)
var i = 0
while (i < setters.length) {
setters(i) = dataTypes(i) match {
case NullType =>
(row: InternalRow, ordinal: Int, value: Any) => row.setNullAt(ordinal)
case b: BooleanType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setBoolean(ordinal, value.asInstanceOf[Boolean])
} else {
row.setNullAt(ordinal)
}
case ByteType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setByte(ordinal, value.asInstanceOf[Byte])
} else {
row.setNullAt(ordinal)
}
case ShortType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setShort(ordinal, value.asInstanceOf[Short])
} else {
row.setNullAt(ordinal)
}
case IntegerType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setInt(ordinal, value.asInstanceOf[Int])
} else {
row.setNullAt(ordinal)
}
case LongType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setLong(ordinal, value.asInstanceOf[Long])
} else {
row.setNullAt(ordinal)
}
case FloatType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setFloat(ordinal, value.asInstanceOf[Float])
} else {
row.setNullAt(ordinal)
}
case DoubleType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setDouble(ordinal, value.asInstanceOf[Double])
} else {
row.setNullAt(ordinal)
}
case dt: DecimalType =>
val precision = dt.precision
(row: InternalRow, ordinal: Int, value: Any) =>
// To make it work with UnsafeRow, we cannot use setNullAt.
// Please see the comment of UnsafeRow's setDecimal.
row.setDecimal(ordinal, value.asInstanceOf[Decimal], precision)
case DateType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setInt(ordinal, value.asInstanceOf[Int])
} else {
row.setNullAt(ordinal)
}
case TimestampType =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.setLong(ordinal, value.asInstanceOf[Long])
} else {
row.setNullAt(ordinal)
}
case other =>
(row: InternalRow, ordinal: Int, value: Any) =>
if (value != null) {
row.update(ordinal, value)
} else {
row.setNullAt(ordinal)
}
}
i += 1
}
setters
}
}
/**
* A Mutable [[Row]] representing a mutable aggregation buffer.
*/
private[aggregate] class MutableAggregationBufferImpl(
schema: StructType,
toCatalystConverters: Array[Any => Any],
toScalaConverters: Array[Any => Any],
bufferOffset: Int,
var underlyingBuffer: InternalRow)
extends MutableAggregationBuffer with BufferSetterGetterUtils {
private[this] val offsets: Array[Int] = {
val newOffsets = new Array[Int](length)
var i = 0
while (i < newOffsets.length) {
newOffsets(i) = bufferOffset + i
i += 1
}
newOffsets
}
private[this] val bufferValueGetters = createGetters(schema)
private[this] val bufferValueSetters = createSetters(schema)
override def length: Int = toCatalystConverters.length
override def get(i: Int): Any = {
if (i >= length || i < 0) {
throw new IllegalArgumentException(
s"Could not access ${i}th value in this buffer because it only has $length values.")
}
toScalaConverters(i)(bufferValueGetters(i)(underlyingBuffer, offsets(i)))
}
def update(i: Int, value: Any): Unit = {
if (i >= length || i < 0) {
throw new IllegalArgumentException(
s"Could not update ${i}th value in this buffer because it only has $length values.")
}
bufferValueSetters(i)(underlyingBuffer, offsets(i), toCatalystConverters(i)(value))
}
// Because get method call specialized getter based on the schema, we cannot use the
// default implementation of the isNullAt (which is get(i) == null).
// We have to override it to call isNullAt of the underlyingBuffer.
override def isNullAt(i: Int): Boolean = {
underlyingBuffer.isNullAt(offsets(i))
}
override def copy(): MutableAggregationBufferImpl = {
new MutableAggregationBufferImpl(
schema,
toCatalystConverters,
toScalaConverters,
bufferOffset,
underlyingBuffer)
}
}
/**
* A [[Row]] representing an immutable aggregation buffer.
*/
private[aggregate] class InputAggregationBuffer(
schema: StructType,
toCatalystConverters: Array[Any => Any],
toScalaConverters: Array[Any => Any],
bufferOffset: Int,
var underlyingInputBuffer: InternalRow)
extends Row with BufferSetterGetterUtils {
private[this] val offsets: Array[Int] = {
val newOffsets = new Array[Int](length)
var i = 0
while (i < newOffsets.length) {
newOffsets(i) = bufferOffset + i
i += 1
}
newOffsets
}
private[this] val bufferValueGetters = createGetters(schema)
def getBufferOffset: Int = bufferOffset
override def length: Int = toCatalystConverters.length
override def get(i: Int): Any = {
if (i >= length || i < 0) {
throw new IllegalArgumentException(
s"Could not access ${i}th value in this buffer because it only has $length values.")
}
toScalaConverters(i)(bufferValueGetters(i)(underlyingInputBuffer, offsets(i)))
}
// Because get method call specialized getter based on the schema, we cannot use the
// default implementation of the isNullAt (which is get(i) == null).
// We have to override it to call isNullAt of the underlyingInputBuffer.
override def isNullAt(i: Int): Boolean = {
underlyingInputBuffer.isNullAt(offsets(i))
}
override def copy(): InputAggregationBuffer = {
new InputAggregationBuffer(
schema,
toCatalystConverters,
toScalaConverters,
bufferOffset,
underlyingInputBuffer)
}
}
/**
* The internal wrapper used to hook a [[UserDefinedAggregateFunction]] `udaf` in the
* internal aggregation code path.
*/
case class ScalaUDAF(
children: Seq[Expression],
udaf: UserDefinedAggregateFunction,
mutableAggBufferOffset: Int = 0,
inputAggBufferOffset: Int = 0)
extends ImperativeAggregate
with NonSQLExpression
with Logging
with ImplicitCastInputTypes
with UserDefinedExpression {
override def withNewMutableAggBufferOffset(newMutableAggBufferOffset: Int): ImperativeAggregate =
copy(mutableAggBufferOffset = newMutableAggBufferOffset)
override def withNewInputAggBufferOffset(newInputAggBufferOffset: Int): ImperativeAggregate =
copy(inputAggBufferOffset = newInputAggBufferOffset)
override def nullable: Boolean = true
override def dataType: DataType = udaf.dataType
override lazy val deterministic: Boolean = udaf.deterministic
override val inputTypes: Seq[DataType] = udaf.inputSchema.map(_.dataType)
override val aggBufferSchema: StructType = udaf.bufferSchema
override val aggBufferAttributes: Seq[AttributeReference] = aggBufferSchema.toAttributes
// Note: although this simply copies aggBufferAttributes, this common code can not be placed
// in the superclass because that will lead to initialization ordering issues.
override val inputAggBufferAttributes: Seq[AttributeReference] =
aggBufferAttributes.map(_.newInstance())
private[this] lazy val childrenSchema: StructType = {
val inputFields = children.zipWithIndex.map {
case (child, index) =>
StructField(s"input$index", child.dataType, child.nullable, Metadata.empty)
}
StructType(inputFields)
}
private lazy val inputProjection = {
val inputAttributes = childrenSchema.toAttributes
log.debug(
s"Creating MutableProj: $children, inputSchema: $inputAttributes.")
MutableProjection.create(children, inputAttributes)
}
private[this] lazy val inputToScalaConverters: Any => Any =
CatalystTypeConverters.createToScalaConverter(childrenSchema)
private[this] lazy val bufferValuesToCatalystConverters: Array[Any => Any] = {
aggBufferSchema.fields.map { field =>
CatalystTypeConverters.createToCatalystConverter(field.dataType)
}
}
private[this] lazy val bufferValuesToScalaConverters: Array[Any => Any] = {
aggBufferSchema.fields.map { field =>
CatalystTypeConverters.createToScalaConverter(field.dataType)
}
}
private[this] lazy val outputToCatalystConverter: Any => Any = {
CatalystTypeConverters.createToCatalystConverter(dataType)
}
// This buffer is only used at executor side.
private[this] lazy val inputAggregateBuffer: InputAggregationBuffer = {
new InputAggregationBuffer(
aggBufferSchema,
bufferValuesToCatalystConverters,
bufferValuesToScalaConverters,
inputAggBufferOffset,
null)
}
// This buffer is only used at executor side.
private[this] lazy val mutableAggregateBuffer: MutableAggregationBufferImpl = {
new MutableAggregationBufferImpl(
aggBufferSchema,
bufferValuesToCatalystConverters,
bufferValuesToScalaConverters,
mutableAggBufferOffset,
null)
}
// This buffer is only used at executor side.
private[this] lazy val evalAggregateBuffer: InputAggregationBuffer = {
new InputAggregationBuffer(
aggBufferSchema,
bufferValuesToCatalystConverters,
bufferValuesToScalaConverters,
mutableAggBufferOffset,
null)
}
override def initialize(buffer: InternalRow): Unit = {
mutableAggregateBuffer.underlyingBuffer = buffer
udaf.initialize(mutableAggregateBuffer)
}
override def update(buffer: InternalRow, input: InternalRow): Unit = {
mutableAggregateBuffer.underlyingBuffer = buffer
udaf.update(
mutableAggregateBuffer,
inputToScalaConverters(inputProjection(input)).asInstanceOf[Row])
}
override def merge(buffer1: InternalRow, buffer2: InternalRow): Unit = {
mutableAggregateBuffer.underlyingBuffer = buffer1
inputAggregateBuffer.underlyingInputBuffer = buffer2
udaf.merge(mutableAggregateBuffer, inputAggregateBuffer)
}
override def eval(buffer: InternalRow): Any = {
evalAggregateBuffer.underlyingInputBuffer = buffer
outputToCatalystConverter(udaf.evaluate(evalAggregateBuffer))
}
override def toString: String = {
s"""${udaf.getClass.getSimpleName}(${children.mkString(",")})"""
}
override def nodeName: String = udaf.getClass.getSimpleName
}
| pgandhi999/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/udaf.scala | Scala | apache-2.0 | 15,038 |
package uk.org.nbn.nbnv.importer.validation
import uk.org.nbn.nbnv.importer.records.NbnRecord
class Nbnv56Validator {
def validate(record: NbnRecord) = {
val validator = new NullFieldValidator
validator.validate("NBNV-56", record.taxonVersionKey, "TaxonVersionKey")
}
}
| JNCC-dev-team/nbn-importer | importer/src/main/scala/uk/org/nbn/nbnv/importer/validation/Nbnv56Validator.scala | Scala | apache-2.0 | 294 |
package com.github.ponkin.bloom
import org.apache.commons.lang3.StringUtils
import scala.util.Random
import org.scalatest.FunSuite // scalastyle:ignore funsuite
class PartitionedBloomFilterSuite extends FunSuite { // scalastyle:ignore funsuite
private final val EPSILON = 0.01
private final val numItems = 100000
private val itemGen: Random => String = { r =>
r.nextString(r.nextInt(512))
}
test(s"accuracy - String") {
// use a fixed seed to make the test predictable.
val r = new Random(37)
val fpp = 0.01
val numInsertion = numItems / 10
val allItems = Array.fill(numItems)(itemGen(r))
val filter = PartitionedBloomFilter.builder()
.withExpectedNumberOfItems(numInsertion)
.withFalsePositiveRate(fpp)
.build()
// insert first `numInsertion` items.
var inserted = allItems.take(numInsertion).filter(StringUtils.isNotEmpty)
inserted.foreach(filter.put)
// false negative is not allowed.
assert(inserted.forall(filter.mightContain))
// The number of inserted items doesn't exceed `expectedNumItems`, so the `expectedFpp`
// should not be significantly higher
// than the one we passed in to create this bloom filter.
assert(filter.expectedFpp() - fpp < EPSILON)
val errorCount = allItems.drop(numInsertion).count(filter.mightContain)
// Also check the actual fpp is not significantly higher than we expected.
val actualFpp = errorCount.toDouble / (numItems - numInsertion)
assert(actualFpp - fpp < EPSILON)
}
test(s"mergeInPlace - String") {
// use a fixed seed to make the test predictable.
val r = new Random(37)
val items1 = Array.fill(numItems / 2)(itemGen(r)).filter(StringUtils.isNotEmpty)
val items2 = Array.fill(numItems / 2)(itemGen(r)).filter(StringUtils.isNotEmpty)
val filter1 = PartitionedBloomFilter.builder()
.withExpectedNumberOfItems(numItems)
.build()
items1.foreach(filter1.put)
val filter2 = PartitionedBloomFilter.builder()
.withExpectedNumberOfItems(numItems)
.build()
items2.foreach(filter2.put)
filter1.mergeInPlace(filter2)
// After merge, `filter1` has `numItems` items which doesn't exceed `expectedNumItems`,
// so the `expectedFpp` should not be significantly higher than the default one.
assert(filter1.expectedFpp() - Utils.DEFAULT_FPP < EPSILON)
items1.foreach(i => assert(filter1.mightContain(i)))
items2.foreach(i => assert(filter1.mightContain(i)))
}
}
| ponkin/bloom | core/src/test/scala/com/github/ponkin/bloom/PartitionedBloomFilterSuite.scala | Scala | apache-2.0 | 2,501 |
/*
Copyright 2011 Ben Biddington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.coriander.unit.tests
import org.junit.Test
import org.junit.Assert._
import org.hamcrest.core.Is._
import org.hamcrest.core.IsEqual._
import org.coriander.oauth.core.Options
class OptionsCompanionTest extends TestBase {
@Test
def DEFAULT_has_standard_default_values {
assertThat(Options.DEFAULT.signatureMethod, is(equalTo("HMAC-SHA1")))
assertThat(Options.DEFAULT.version, is(equalTo(1.0)))
}
} | ben-biddington/Coriander.OAuth | test/org/coriander/unit/tests/OptionsCompanionTest.scala | Scala | apache-2.0 | 1,000 |
package gie.utils
package object ImplicitPipe {
implicit final class Pipe[T](val t:T) extends AnyVal{
@inline def |%>[X, U](f: X=>U)(implicit ev: T=>X) = f(t)
@inline def |>[U](f: T=>U) = f(t)
}
} | igorge/ggdrive | src/main/scala/gie/utils/pipe.scala | Scala | gpl-2.0 | 211 |
package org.quantintel.ql.time.daycounters
import org.scalatest.{FlatSpec, Matchers}
/**
*
* Created by Paul Bernard on 8/2/14.
*
* 30/360
*
* 30-day months, end of month adjustments
*
* 1. 01/31/94 - 03/16/94
* 2. 01/01/93 - 02/21/93
* 3. 02/01/93 - 03/01/93
* 4. 01/01/93 - 01/01/94
* 5. 01/01/93 - 01/01/94
* 6. 01/15/93 - 02/01/93
* 7. 02/15/93 - 04/01/93
* 8. 03/31/93 - 04/30/93
* 9. 03/31/93 - 12/31/93
* 10. 03/15/93 - 06/15/93
* 11. 11/01/93 - 03/01/94
* 12. 12/31/93 - 02/01/94
* 13. 07/15/93 - 09/15/93
* 14. 08/21/93 - 04/11/94
* 15. 03/31/93 - 04/01/93
* 16. 12/15/93 - 12/31/93
* 17. 12/15/93 - 12/30/93
* 18. 01/15/07 - 02/15/07
* 19. 01/15/07 - 07/15/07
* 20. 09/30/07 - 03/31/08
* 21. 09/30/07 - 10/31/07
* 22. 09/30/07 - 09/30/08
* 23. 01/15/07 - 01/31/07
* 24. 01/31/07 - 02/28/07
* 25. 02/28/07 - 03/31/07
* 26. 08/31/06 - 02/28/07
* 27. 02/28/07 - 08/31/07
* 28. 02/14/07 - 02/28/07
* 29. 02/26/07 - 02/29/08
* 30. 02/29/08 - 02/28/09
* 31. 92/29/08 - 03/30/08
* 32. 02/29/08 - 03/31/08
* 33. 02/28/07 - 03/05/07
* 34. 10/31/07 - 11/28/07
* 35. 08/31/07 - 02/29/08
* 36. 02/29/08 - 08/31/08
* 37. 08/31/08 - 02/28/09
* 38. 02/28/09 - 08/31/09
*
**/
class Thirty360Test extends FlatSpec with Matchers {
import org.quantintel.lang.numeric._
import org.quantintel.ql.time.Date
import org.quantintel.ql.time.daycounters.Thirty360Convention.USA
"1. 01/31/94 - 03/16/1994" should "be 0.127777778" in {
val d1 = Date(31, 1, 1992)
val d2 = Date(16, 3, 1992)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.127777778)
}
"2. 01//01/93 - 02/21/93" should "be 0.138888889" in {
val d1 = new Date(1, 1, 1993)
val d2 = new Date(21, 2, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.138888889)
}
"3. 02/01/93 - 03/01/93" should "be 0.083333333" in {
val d1 = new Date(1,2,1993)
val d2 = new Date(1, 3, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.083333333)
}
"4. 02/01/96 - 03/01/96" should "be 0.083333333" in {
val d1 = new Date(1, 2, 1992)
val d2 = new Date(1, 3, 1992)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.083333333)
}
"5. 01/01/93 - 01/01/94" should "be 1.000000000" in {
val d1 = new Date(1,1, 1993)
val d2 = new Date(1,1, 1994)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 1.000000000)
}
"6. 01/15/93 - 02/01/93" should "be 0.044444444" in {
val d1 = new Date(15, 1, 1993)
val d2 = new Date(1, 2, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.044444444)
}
"7. 02/15/93 - 04/01/93" should "be 46" in {
val d1 = new Date(15, 2, 1993)
val d2 = new Date(1,4, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == (46/360d).rounded(9))
}
"8. 03/31/93 to 04/30/93" should "be 0.083333333" in {
val d1 = new Date(31, 3, 1993)
val d2 = new Date(30, 4, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.083333333)
}
"9. 03/31/93 - 12/31/93" should "be 0.750000000" in {
val d1 = new Date(31, 3, 1993)
val d2 = new Date(31, 12, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.750000000)
}
"10. 03/15/93 - 06/15/93" should "be 0.250000000" in {
val d1 = new Date(15, 3, 1993)
val d2 = new Date(15, 6, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.250000000)
}
"11. 11/01/93 - 03/01/94" should "be 0.333333333" in {
val d1 = new Date(1, 11, 1993)
val d2 = new Date(1, 3, 1994)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.333333333)
}
"12. 12/31/93 - 02/01/94" should "be 0.086111111" in {
val d1 = new Date(31, 12, 1993)
val d2 = new Date(1, 2, 1994)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.086111111)
}
"13. 07/15/93 - 09/15/93" should "be 0.166666667" in {
val d1 = new Date(15, 7, 1993)
val d2 = new Date(15, 9, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.166666667)
}
"14. 08/21/93 - 04/11/94" should "be 0.638888889" in {
val d1 = new Date(21, 8, 1993)
val d2 = new Date(11, 4, 1994)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.638888889)
}
"15. 03/31/93 - 04/01/93" should "be 0.002777778" in {
val d1 = new Date(31, 3, 1993)
val d2 = new Date(1, 4, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.002777778)
}
"16. 12/15/93 - 12/31/93" should "be 0.044444444" in {
val d1 = new Date(15, 12, 1993)
val d2 = new Date(31, 12, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.044444444)
}
"17. 12/15/93 - 12/30/93" should "be 0.041666667" in {
val d1 = new Date(15, 12, 1993)
val d2 = new Date(30, 12, 1993)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.041666667)
}
"18. 01/15/07 - 2/15/07" should "be 0.083333333" in {
val d1 = new Date(15, 1, 2007)
val d2 = new Date(15, 2, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.083333333)
}
"19. 01/15/07 - 07/15/07" should "be 0.500000000" in {
val d1 = new Date(15, 1, 2007)
val d2 = new Date(15, 7, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.500000000)
}
"20. 09/30/07 - 03/31/08" should "be 0.500000000" in {
val d1 = new Date(30, 9, 2007)
val d2 = new Date(31, 3, 2008)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.500000000)
}
"21. 09/30/07 - 10/31/07" should "be 0.083333333" in {
val d1 = new Date(30, 9, 2007)
val d2 = new Date(31, 10, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.083333333)
}
"22. 09/30/07 - 09/30/08" should "be 1.000000000" in {
val d1 = new Date(30, 9, 2007)
val d2 = new Date(30, 9, 2008)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 1.000000000)
}
"23. 01/15/07 - 01/31/07" should "be 0.044444444" in {
val d1 = new Date(15, 1, 2007)
val d2 = new Date(31, 1, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.044444444)
}
"24. 01/31/07 - 02/28/07" should "be 0.077777778" in {
val d1 = new Date(31, 1, 2007)
val d2 = new Date(28, 2, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.077777778)
}
"25. 02/28/07 - 03/31/07" should "be 0.091666667" in {
val d1 = new Date(28, 2,2007)
val d2 = new Date(31, 3, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.091666667)
}
"26. 08/31/06 - 02/28/07" should "be 0.494444444" in {
val d1 = new Date(31, 8, 2006)
val d2 = new Date(28, 2, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.494444444)
}
"27. 02/28/07 - 08/31/07" should "be 0.500000000" in {
val d1 = new Date(28, 2, 2007)
val d2 = new Date(31, 8, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.508333333)
}
"28. 02/14/07 - 02/28/07" should "be 0.038888889" in {
val d1 = new Date(14, 2, 2007)
val d2 = new Date(28, 2, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.038888889)
}
"29. 02/26/07 - 02/29/08" should "be 1.008333333" in {
val d1 = new Date(26, 2, 2007)
val d2 = new Date(29, 2, 2008)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 1.008333333)
}
"30. 02/29/08 - 02/28/09" should "be 0.997222222" in {
val d1 = new Date(29, 2, 2008)
val d2 = new Date(28, 2, 2009)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.997222222)
}
"31. 02/29/08 - 03/30/08" should "be 0.086111111" in {
val d1 = new Date(29, 2, 2008)
val d2 = new Date(30, 3, 2008)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.086111111)
}
"32. 02/29/08 - 03/31/08" should "be 0.088888889" in {
val d1 = new Date(29, 2, 2008)
val d2 = new Date(31, 3, 2008)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.088888889)
}
"33. 02/28/07 - 03/05/07" should "be 0.019444444" in {
val d1 = new Date(28, 2, 2007)
val d2 = new Date(5, 3, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.019444444)
}
"34. 10/31/07 - 11/28/07" should "be 0.077777778" in {
val d1 = new Date(31, 10, 2007)
val d2 = new Date(28, 11, 2007)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.077777778)
}
"35. 08/31/07 - 02/29/08" should "be 0.497222222" in {
val d1 = new Date(31, 8, 2007)
val d2 = new Date(29, 2, 2008)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.497222222)
}
"36. 02/29/08 - 08/31/08" should "be 0.505555556" in {
val d1 = new Date(29, 2, 2008)
val d2 = new Date(31, 8, 2008)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.505555556)
}
"37. 08/31/08 - 02/28/09" should "be 0.494444444" in {
val d1 = new Date(31, 8, 2008)
val d2 = new Date(28, 2, 2009)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.494444444)
}
"38. 02/28/09 - 08/31/09" should "be 0.508333333" in {
val d1 = new Date(28, 2, 2009)
val d2 = new Date(31, 8, 2009)
val yf :Double = Thirty360(USA).yearFraction(d1, d2)
assert(yf.rounded(9) == 0.508333333)
}
}
| quantintel/spectrum | financial/src/test/scala/org/quantintel/ql/time/daycounters/Thirty360Test.scala | Scala | apache-2.0 | 10,374 |
package geek.lawsof.physics.lib.machine.metallic
import geek.lawsof.physics.lib.block.te.TileEntityBase
import geek.lawsof.physics.lib.materials.metals.MetalBlock
/**
* Created by anshuman on 23-07-2014.
*/
trait IMetallicMachine {
self: TileEntityBase =>
def properties = {
val block = coord.getBlockAt(worldObj)
if (block._1.isInstanceOf[IMetallicBlock]) block._1.asInstanceOf[IMetallicBlock].props(block._2)
}
}
| GeckoTheGeek42/TheLawsOfPhysics | src/main/scala/geek/lawsof/physics/lib/machine/metallic/IMetallicMachine.scala | Scala | mit | 433 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.io.File
import java.net.URI
import java.util.Locale
import org.apache.hadoop.fs.Path
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, NoSuchPartitionException, NoSuchTableException, TempTableAlreadyExistsException}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.test.{SharedSQLContext, SQLTestUtils}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class InMemoryCatalogedDDLSuite extends DDLSuite with SharedSQLContext with BeforeAndAfterEach {
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
spark.sessionState.catalog.reset()
} finally {
Utils.deleteRecursively(new File(spark.sessionState.conf.warehousePath))
super.afterEach()
}
}
protected override def generateTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean = true): CatalogTable = {
val storage =
CatalogStorageFormat.empty.copy(locationUri = Some(catalog.defaultTablePath(name)))
val metadata = new MetadataBuilder()
.putString("key", "value")
.build()
CatalogTable(
identifier = name,
tableType = CatalogTableType.EXTERNAL,
storage = storage,
schema = new StructType()
.add("col1", "int", nullable = true, metadata = metadata)
.add("col2", "string")
.add("a", "int")
.add("b", "int"),
provider = Some("parquet"),
partitionColumnNames = Seq("a", "b"),
createTime = 0L,
tracksPartitionsInCatalog = true)
}
test("create a managed Hive source table") {
assume(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory")
val tabName = "tbl"
withTable(tabName) {
val e = intercept[AnalysisException] {
sql(s"CREATE TABLE $tabName (i INT, j STRING)")
}.getMessage
assert(e.contains("Hive support is required to CREATE Hive TABLE"))
}
}
test("create an external Hive source table") {
assume(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory")
withTempDir { tempDir =>
val tabName = "tbl"
withTable(tabName) {
val e = intercept[AnalysisException] {
sql(
s"""
|CREATE EXTERNAL TABLE $tabName (i INT, j STRING)
|ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
|LOCATION '${tempDir.toURI}'
""".stripMargin)
}.getMessage
assert(e.contains("Hive support is required to CREATE Hive TABLE"))
}
}
}
test("Create Hive Table As Select") {
import testImplicits._
withTable("t", "t1") {
var e = intercept[AnalysisException] {
sql("CREATE TABLE t SELECT 1 as a, 1 as b")
}.getMessage
assert(e.contains("Hive support is required to CREATE Hive TABLE (AS SELECT)"))
spark.range(1).select('id as 'a, 'id as 'b).write.saveAsTable("t1")
e = intercept[AnalysisException] {
sql("CREATE TABLE t SELECT a, b from t1")
}.getMessage
assert(e.contains("Hive support is required to CREATE Hive TABLE (AS SELECT)"))
}
}
}
abstract class DDLSuite extends QueryTest with SQLTestUtils {
protected def isUsingHiveMetastore: Boolean = {
spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "hive"
}
protected def generateTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean = true): CatalogTable
private val escapedIdentifier = "`(.+)`".r
protected def normalizeCatalogTable(table: CatalogTable): CatalogTable = table
private def normalizeSerdeProp(props: Map[String, String]): Map[String, String] = {
props.filterNot(p => Seq("serialization.format", "path").contains(p._1))
}
private def checkCatalogTables(expected: CatalogTable, actual: CatalogTable): Unit = {
assert(normalizeCatalogTable(actual) == normalizeCatalogTable(expected))
}
/**
* Strip backticks, if any, from the string.
*/
private def cleanIdentifier(ident: String): String = {
ident match {
case escapedIdentifier(i) => i
case plainIdent => plainIdent
}
}
private def assertUnsupported(query: String): Unit = {
val e = intercept[AnalysisException] {
sql(query)
}
assert(e.getMessage.toLowerCase(Locale.ROOT).contains("operation not allowed"))
}
private def maybeWrapException[T](expectException: Boolean)(body: => T): Unit = {
if (expectException) intercept[AnalysisException] { body } else body
}
private def createDatabase(catalog: SessionCatalog, name: String): Unit = {
catalog.createDatabase(
CatalogDatabase(
name, "", CatalogUtils.stringToURI(spark.sessionState.conf.warehousePath), Map()),
ignoreIfExists = false)
}
private def createTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean = true): Unit = {
catalog.createTable(generateTable(catalog, name, isDataSource), ignoreIfExists = false)
}
private def createTablePartition(
catalog: SessionCatalog,
spec: TablePartitionSpec,
tableName: TableIdentifier): Unit = {
val part = CatalogTablePartition(
spec, CatalogStorageFormat(None, None, None, None, false, Map()))
catalog.createPartitions(tableName, Seq(part), ignoreIfExists = false)
}
private def getDBPath(dbName: String): URI = {
val warehousePath = makeQualifiedPath(spark.sessionState.conf.warehousePath)
new Path(CatalogUtils.URIToString(warehousePath), s"$dbName.db").toUri
}
test("alter table: set location (datasource table)") {
testSetLocation(isDatasourceTable = true)
}
test("alter table: set properties (datasource table)") {
testSetProperties(isDatasourceTable = true)
}
test("alter table: unset properties (datasource table)") {
testUnsetProperties(isDatasourceTable = true)
}
test("alter table: set serde (datasource table)") {
testSetSerde(isDatasourceTable = true)
}
test("alter table: set serde partition (datasource table)") {
testSetSerdePartition(isDatasourceTable = true)
}
test("alter table: change column (datasource table)") {
testChangeColumn(isDatasourceTable = true)
}
test("alter table: add partition (datasource table)") {
testAddPartitions(isDatasourceTable = true)
}
test("alter table: drop partition (datasource table)") {
testDropPartitions(isDatasourceTable = true)
}
test("alter table: rename partition (datasource table)") {
testRenamePartitions(isDatasourceTable = true)
}
test("drop table - data source table") {
testDropTable(isDatasourceTable = true)
}
test("the qualified path of a database is stored in the catalog") {
val catalog = spark.sessionState.catalog
withTempDir { tmpDir =>
val path = tmpDir.getCanonicalPath
// The generated temp path is not qualified.
assert(!path.startsWith("file:/"))
val uri = tmpDir.toURI
sql(s"CREATE DATABASE db1 LOCATION '$uri'")
val pathInCatalog = new Path(catalog.getDatabaseMetadata("db1").locationUri).toUri
assert("file" === pathInCatalog.getScheme)
val expectedPath = new Path(path).toUri
assert(expectedPath.getPath === pathInCatalog.getPath)
sql("DROP DATABASE db1")
}
}
test("Create Database using Default Warehouse Path") {
val catalog = spark.sessionState.catalog
val dbName = "db1"
try {
sql(s"CREATE DATABASE $dbName")
val db1 = catalog.getDatabaseMetadata(dbName)
assert(db1 == CatalogDatabase(
dbName,
"",
getDBPath(dbName),
Map.empty))
sql(s"DROP DATABASE $dbName CASCADE")
assert(!catalog.databaseExists(dbName))
} finally {
catalog.reset()
}
}
test("Create/Drop Database - location") {
val catalog = spark.sessionState.catalog
val databaseNames = Seq("db1", "`database`")
withTempDir { tmpDir =>
val path = new Path(tmpDir.getCanonicalPath).toUri
databaseNames.foreach { dbName =>
try {
val dbNameWithoutBackTicks = cleanIdentifier(dbName)
sql(s"CREATE DATABASE $dbName Location '$path'")
val db1 = catalog.getDatabaseMetadata(dbNameWithoutBackTicks)
val expPath = makeQualifiedPath(tmpDir.toString)
assert(db1 == CatalogDatabase(
dbNameWithoutBackTicks,
"",
expPath,
Map.empty))
sql(s"DROP DATABASE $dbName CASCADE")
assert(!catalog.databaseExists(dbNameWithoutBackTicks))
} finally {
catalog.reset()
}
}
}
}
test("Create Database - database already exists") {
val catalog = spark.sessionState.catalog
val databaseNames = Seq("db1", "`database`")
databaseNames.foreach { dbName =>
try {
val dbNameWithoutBackTicks = cleanIdentifier(dbName)
sql(s"CREATE DATABASE $dbName")
val db1 = catalog.getDatabaseMetadata(dbNameWithoutBackTicks)
assert(db1 == CatalogDatabase(
dbNameWithoutBackTicks,
"",
getDBPath(dbNameWithoutBackTicks),
Map.empty))
// TODO: HiveExternalCatalog should throw DatabaseAlreadyExistsException
val e = intercept[AnalysisException] {
sql(s"CREATE DATABASE $dbName")
}.getMessage
assert(e.contains(s"already exists"))
} finally {
catalog.reset()
}
}
}
private def checkSchemaInCreatedDataSourceTable(
path: File,
userSpecifiedSchema: Option[String],
userSpecifiedPartitionCols: Option[String],
expectedSchema: StructType,
expectedPartitionCols: Seq[String]): Unit = {
val tabName = "tab1"
withTable(tabName) {
val partitionClause =
userSpecifiedPartitionCols.map(p => s"PARTITIONED BY ($p)").getOrElse("")
val schemaClause = userSpecifiedSchema.map(s => s"($s)").getOrElse("")
val uri = path.toURI
val sqlCreateTable =
s"""
|CREATE TABLE $tabName $schemaClause
|USING parquet
|OPTIONS (
| path '$uri'
|)
|$partitionClause
""".stripMargin
if (userSpecifiedSchema.isEmpty && userSpecifiedPartitionCols.nonEmpty) {
val e = intercept[AnalysisException](sql(sqlCreateTable)).getMessage
assert(e.contains(
"not allowed to specify partition columns when the table schema is not defined"))
} else {
sql(sqlCreateTable)
val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName))
assert(expectedSchema == tableMetadata.schema)
assert(expectedPartitionCols == tableMetadata.partitionColumnNames)
}
}
}
test("Create partitioned data source table without user specified schema") {
import testImplicits._
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
// Case 1: with partitioning columns but no schema: Option("inexistentColumns")
// Case 2: without schema and partitioning columns: None
Seq(Option("inexistentColumns"), None).foreach { partitionCols =>
withTempPath { pathToPartitionedTable =>
df.write.format("parquet").partitionBy("num")
.save(pathToPartitionedTable.getCanonicalPath)
checkSchemaInCreatedDataSourceTable(
pathToPartitionedTable,
userSpecifiedSchema = None,
userSpecifiedPartitionCols = partitionCols,
expectedSchema = new StructType().add("str", StringType).add("num", IntegerType),
expectedPartitionCols = Seq("num"))
}
}
}
test("Create partitioned data source table with user specified schema") {
import testImplicits._
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
// Case 1: with partitioning columns but no schema: Option("num")
// Case 2: without schema and partitioning columns: None
Seq(Option("num"), None).foreach { partitionCols =>
withTempPath { pathToPartitionedTable =>
df.write.format("parquet").partitionBy("num")
.save(pathToPartitionedTable.getCanonicalPath)
checkSchemaInCreatedDataSourceTable(
pathToPartitionedTable,
userSpecifiedSchema = Option("num int, str string"),
userSpecifiedPartitionCols = partitionCols,
expectedSchema = new StructType().add("str", StringType).add("num", IntegerType),
expectedPartitionCols = partitionCols.map(Seq(_)).getOrElse(Seq.empty[String]))
}
}
}
test("Create non-partitioned data source table without user specified schema") {
import testImplicits._
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
// Case 1: with partitioning columns but no schema: Option("inexistentColumns")
// Case 2: without schema and partitioning columns: None
Seq(Option("inexistentColumns"), None).foreach { partitionCols =>
withTempPath { pathToNonPartitionedTable =>
df.write.format("parquet").save(pathToNonPartitionedTable.getCanonicalPath)
checkSchemaInCreatedDataSourceTable(
pathToNonPartitionedTable,
userSpecifiedSchema = None,
userSpecifiedPartitionCols = partitionCols,
expectedSchema = new StructType().add("num", IntegerType).add("str", StringType),
expectedPartitionCols = Seq.empty[String])
}
}
}
test("Create non-partitioned data source table with user specified schema") {
import testImplicits._
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
// Case 1: with partitioning columns but no schema: Option("inexistentColumns")
// Case 2: without schema and partitioning columns: None
Seq(Option("num"), None).foreach { partitionCols =>
withTempPath { pathToNonPartitionedTable =>
df.write.format("parquet").save(pathToNonPartitionedTable.getCanonicalPath)
checkSchemaInCreatedDataSourceTable(
pathToNonPartitionedTable,
userSpecifiedSchema = Option("num int, str string"),
userSpecifiedPartitionCols = partitionCols,
expectedSchema = if (partitionCols.isDefined) {
// we skipped inference, so the partition col is ordered at the end
new StructType().add("str", StringType).add("num", IntegerType)
} else {
// no inferred partitioning, so schema is in original order
new StructType().add("num", IntegerType).add("str", StringType)
},
expectedPartitionCols = partitionCols.map(Seq(_)).getOrElse(Seq.empty[String]))
}
}
}
test("create table - duplicate column names in the table definition") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int, a string) USING json")
}
assert(e.message == "Found duplicate column(s) in table definition of `tbl`: a")
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val e2 = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int, A string) USING json")
}
assert(e2.message == "Found duplicate column(s) in table definition of `tbl`: a")
}
}
test("create table - partition column names not in table definition") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int, b string) USING json PARTITIONED BY (c)")
}
assert(e.message == "partition column c is not defined in table tbl, " +
"defined table columns are: a, b")
}
test("create table - bucket column names not in table definition") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int, b string) USING json CLUSTERED BY (c) INTO 4 BUCKETS")
}
assert(e.message == "bucket column c is not defined in table tbl, " +
"defined table columns are: a, b")
}
test("create table - column repeated in partition columns") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int) USING json PARTITIONED BY (a, a)")
}
assert(e.message == "Found duplicate column(s) in partition: a")
}
test("create table - column repeated in bucket columns") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int) USING json CLUSTERED BY (a, a) INTO 4 BUCKETS")
}
assert(e.message == "Found duplicate column(s) in bucket: a")
}
test("Refresh table after changing the data source table partitioning") {
import testImplicits._
val tabName = "tab1"
val catalog = spark.sessionState.catalog
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString, i, i))
.toDF("col1", "col2", "col3", "col4")
df.write.format("json").partitionBy("col1", "col3").save(path)
val schema = new StructType()
.add("col2", StringType).add("col4", LongType)
.add("col1", IntegerType).add("col3", IntegerType)
val partitionCols = Seq("col1", "col3")
val uri = dir.toURI
withTable(tabName) {
spark.sql(
s"""
|CREATE TABLE $tabName
|USING json
|OPTIONS (
| path '$uri'
|)
""".stripMargin)
val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName))
assert(tableMetadata.schema == schema)
assert(tableMetadata.partitionColumnNames == partitionCols)
// Change the schema
val newDF = sparkContext.parallelize(1 to 10).map(i => (i, i.toString))
.toDF("newCol1", "newCol2")
newDF.write.format("json").partitionBy("newCol1").mode(SaveMode.Overwrite).save(path)
// No change on the schema
val tableMetadataBeforeRefresh = catalog.getTableMetadata(TableIdentifier(tabName))
assert(tableMetadataBeforeRefresh.schema == schema)
assert(tableMetadataBeforeRefresh.partitionColumnNames == partitionCols)
// Refresh does not affect the schema
spark.catalog.refreshTable(tabName)
val tableMetadataAfterRefresh = catalog.getTableMetadata(TableIdentifier(tabName))
assert(tableMetadataAfterRefresh.schema == schema)
assert(tableMetadataAfterRefresh.partitionColumnNames == partitionCols)
}
}
}
test("Alter/Describe Database") {
val catalog = spark.sessionState.catalog
val databaseNames = Seq("db1", "`database`")
databaseNames.foreach { dbName =>
try {
val dbNameWithoutBackTicks = cleanIdentifier(dbName)
val location = getDBPath(dbNameWithoutBackTicks)
sql(s"CREATE DATABASE $dbName")
checkAnswer(
sql(s"DESCRIBE DATABASE EXTENDED $dbName"),
Row("Database Name", dbNameWithoutBackTicks) ::
Row("Description", "") ::
Row("Location", CatalogUtils.URIToString(location)) ::
Row("Properties", "") :: Nil)
sql(s"ALTER DATABASE $dbName SET DBPROPERTIES ('a'='a', 'b'='b', 'c'='c')")
checkAnswer(
sql(s"DESCRIBE DATABASE EXTENDED $dbName"),
Row("Database Name", dbNameWithoutBackTicks) ::
Row("Description", "") ::
Row("Location", CatalogUtils.URIToString(location)) ::
Row("Properties", "((a,a), (b,b), (c,c))") :: Nil)
sql(s"ALTER DATABASE $dbName SET DBPROPERTIES ('d'='d')")
checkAnswer(
sql(s"DESCRIBE DATABASE EXTENDED $dbName"),
Row("Database Name", dbNameWithoutBackTicks) ::
Row("Description", "") ::
Row("Location", CatalogUtils.URIToString(location)) ::
Row("Properties", "((a,a), (b,b), (c,c), (d,d))") :: Nil)
} finally {
catalog.reset()
}
}
}
test("Drop/Alter/Describe Database - database does not exists") {
val databaseNames = Seq("db1", "`database`")
databaseNames.foreach { dbName =>
val dbNameWithoutBackTicks = cleanIdentifier(dbName)
assert(!spark.sessionState.catalog.databaseExists(dbNameWithoutBackTicks))
var message = intercept[AnalysisException] {
sql(s"DROP DATABASE $dbName")
}.getMessage
// TODO: Unify the exception.
if (isUsingHiveMetastore) {
assert(message.contains(s"NoSuchObjectException: $dbNameWithoutBackTicks"))
} else {
assert(message.contains(s"Database '$dbNameWithoutBackTicks' not found"))
}
message = intercept[AnalysisException] {
sql(s"ALTER DATABASE $dbName SET DBPROPERTIES ('d'='d')")
}.getMessage
assert(message.contains(s"Database '$dbNameWithoutBackTicks' not found"))
message = intercept[AnalysisException] {
sql(s"DESCRIBE DATABASE EXTENDED $dbName")
}.getMessage
assert(message.contains(s"Database '$dbNameWithoutBackTicks' not found"))
sql(s"DROP DATABASE IF EXISTS $dbName")
}
}
test("drop non-empty database in restrict mode") {
val catalog = spark.sessionState.catalog
val dbName = "db1"
sql(s"CREATE DATABASE $dbName")
// create a table in database
val tableIdent1 = TableIdentifier("tab1", Some(dbName))
createTable(catalog, tableIdent1)
// drop a non-empty database in Restrict mode
val message = intercept[AnalysisException] {
sql(s"DROP DATABASE $dbName RESTRICT")
}.getMessage
assert(message.contains(s"Database $dbName is not empty. One or more tables exist"))
catalog.dropTable(tableIdent1, ignoreIfNotExists = false, purge = false)
assert(catalog.listDatabases().contains(dbName))
sql(s"DROP DATABASE $dbName RESTRICT")
assert(!catalog.listDatabases().contains(dbName))
}
test("drop non-empty database in cascade mode") {
val catalog = spark.sessionState.catalog
val dbName = "db1"
sql(s"CREATE DATABASE $dbName")
// create a table in database
val tableIdent1 = TableIdentifier("tab1", Some(dbName))
createTable(catalog, tableIdent1)
// drop a non-empty database in CASCADE mode
assert(catalog.listTables(dbName).contains(tableIdent1))
assert(catalog.listDatabases().contains(dbName))
sql(s"DROP DATABASE $dbName CASCADE")
assert(!catalog.listDatabases().contains(dbName))
}
test("create table in default db") {
val catalog = spark.sessionState.catalog
val tableIdent1 = TableIdentifier("tab1", None)
createTable(catalog, tableIdent1)
val expectedTableIdent = tableIdent1.copy(database = Some("default"))
val expectedTable = generateTable(catalog, expectedTableIdent)
checkCatalogTables(expectedTable, catalog.getTableMetadata(tableIdent1))
}
test("create table in a specific db") {
val catalog = spark.sessionState.catalog
createDatabase(catalog, "dbx")
val tableIdent1 = TableIdentifier("tab1", Some("dbx"))
createTable(catalog, tableIdent1)
val expectedTable = generateTable(catalog, tableIdent1)
checkCatalogTables(expectedTable, catalog.getTableMetadata(tableIdent1))
}
test("create table using") {
val catalog = spark.sessionState.catalog
withTable("tbl") {
sql("CREATE TABLE tbl(a INT, b INT) USING parquet")
val table = catalog.getTableMetadata(TableIdentifier("tbl"))
assert(table.tableType == CatalogTableType.MANAGED)
assert(table.schema == new StructType().add("a", "int").add("b", "int"))
assert(table.provider == Some("parquet"))
}
}
test("create table using - with partitioned by") {
val catalog = spark.sessionState.catalog
withTable("tbl") {
sql("CREATE TABLE tbl(a INT, b INT) USING parquet PARTITIONED BY (a)")
val table = catalog.getTableMetadata(TableIdentifier("tbl"))
assert(table.tableType == CatalogTableType.MANAGED)
assert(table.provider == Some("parquet"))
// a is ordered last since it is a user-specified partitioning column
assert(table.schema == new StructType().add("b", IntegerType).add("a", IntegerType))
assert(table.partitionColumnNames == Seq("a"))
}
}
test("create table using - with bucket") {
val catalog = spark.sessionState.catalog
withTable("tbl") {
sql("CREATE TABLE tbl(a INT, b INT) USING parquet " +
"CLUSTERED BY (a) SORTED BY (b) INTO 5 BUCKETS")
val table = catalog.getTableMetadata(TableIdentifier("tbl"))
assert(table.tableType == CatalogTableType.MANAGED)
assert(table.provider == Some("parquet"))
assert(table.schema == new StructType().add("a", IntegerType).add("b", IntegerType))
assert(table.bucketSpec == Some(BucketSpec(5, Seq("a"), Seq("b"))))
}
}
test("create temporary view using") {
// when we test the HiveCatalogedDDLSuite, it will failed because the csvFile path above
// starts with 'jar:', and it is an illegal parameter for Path, so here we copy it
// to a temp file by withResourceTempPath
withResourceTempPath("test-data/cars.csv") { tmpFile =>
withView("testview") {
sql(s"CREATE OR REPLACE TEMPORARY VIEW testview (c1 String, c2 String) USING " +
"org.apache.spark.sql.execution.datasources.csv.CSVFileFormat " +
s"OPTIONS (PATH '${tmpFile.toURI}')")
checkAnswer(
sql("select c1, c2 from testview order by c1 limit 1"),
Row("1997", "Ford") :: Nil)
// Fails if creating a new view with the same name
intercept[TempTableAlreadyExistsException] {
sql(
s"""
|CREATE TEMPORARY VIEW testview
|USING org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
|OPTIONS (PATH '${tmpFile.toURI}')
""".stripMargin)
}
}
}
}
test("alter table: rename") {
val catalog = spark.sessionState.catalog
val tableIdent1 = TableIdentifier("tab1", Some("dbx"))
val tableIdent2 = TableIdentifier("tab2", Some("dbx"))
createDatabase(catalog, "dbx")
createDatabase(catalog, "dby")
createTable(catalog, tableIdent1)
assert(catalog.listTables("dbx") == Seq(tableIdent1))
sql("ALTER TABLE dbx.tab1 RENAME TO dbx.tab2")
assert(catalog.listTables("dbx") == Seq(tableIdent2))
// The database in destination table name can be omitted, and we will use the database of source
// table for it.
sql("ALTER TABLE dbx.tab2 RENAME TO tab1")
assert(catalog.listTables("dbx") == Seq(tableIdent1))
catalog.setCurrentDatabase("dbx")
// rename without explicitly specifying database
sql("ALTER TABLE tab1 RENAME TO tab2")
assert(catalog.listTables("dbx") == Seq(tableIdent2))
// table to rename does not exist
intercept[AnalysisException] {
sql("ALTER TABLE dbx.does_not_exist RENAME TO dbx.tab2")
}
// destination database is different
intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 RENAME TO dby.tab2")
}
}
test("alter table: rename cached table") {
import testImplicits._
sql("CREATE TABLE students (age INT, name STRING) USING parquet")
val df = (1 to 2).map { i => (i, i.toString) }.toDF("age", "name")
df.write.insertInto("students")
spark.catalog.cacheTable("students")
assume(spark.table("students").collect().toSeq == df.collect().toSeq, "bad test: wrong data")
assume(spark.catalog.isCached("students"), "bad test: table was not cached in the first place")
sql("ALTER TABLE students RENAME TO teachers")
sql("CREATE TABLE students (age INT, name STRING) USING parquet")
// Now we have both students and teachers.
// The cached data for the old students table should not be read by the new students table.
assert(!spark.catalog.isCached("students"))
assert(spark.catalog.isCached("teachers"))
assert(spark.table("students").collect().isEmpty)
assert(spark.table("teachers").collect().toSeq == df.collect().toSeq)
}
test("rename temporary table - destination table with database name") {
withTempView("tab1") {
sql(
"""
|CREATE TEMPORARY TABLE tab1
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
val e = intercept[AnalysisException] {
sql("ALTER TABLE tab1 RENAME TO default.tab2")
}
assert(e.getMessage.contains(
"RENAME TEMPORARY TABLE from '`tab1`' to '`default`.`tab2`': " +
"cannot specify database name 'default' in the destination table"))
val catalog = spark.sessionState.catalog
assert(catalog.listTables("default") == Seq(TableIdentifier("tab1")))
}
}
test("rename temporary table") {
withTempView("tab1", "tab2") {
spark.range(10).createOrReplaceTempView("tab1")
sql("ALTER TABLE tab1 RENAME TO tab2")
checkAnswer(spark.table("tab2"), spark.range(10).toDF())
intercept[NoSuchTableException] { spark.table("tab1") }
sql("ALTER VIEW tab2 RENAME TO tab1")
checkAnswer(spark.table("tab1"), spark.range(10).toDF())
intercept[NoSuchTableException] { spark.table("tab2") }
}
}
test("rename temporary table - destination table already exists") {
withTempView("tab1", "tab2") {
sql(
"""
|CREATE TEMPORARY TABLE tab1
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
sql(
"""
|CREATE TEMPORARY TABLE tab2
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
val e = intercept[AnalysisException] {
sql("ALTER TABLE tab1 RENAME TO tab2")
}
assert(e.getMessage.contains(
"RENAME TEMPORARY TABLE from '`tab1`' to '`tab2`': destination table already exists"))
val catalog = spark.sessionState.catalog
assert(catalog.listTables("default") == Seq(TableIdentifier("tab1"), TableIdentifier("tab2")))
}
}
test("alter table: bucketing is not supported") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent)
assertUnsupported("ALTER TABLE dbx.tab1 CLUSTERED BY (blood, lemon, grape) INTO 11 BUCKETS")
assertUnsupported("ALTER TABLE dbx.tab1 CLUSTERED BY (fuji) SORTED BY (grape) INTO 5 BUCKETS")
assertUnsupported("ALTER TABLE dbx.tab1 NOT CLUSTERED")
assertUnsupported("ALTER TABLE dbx.tab1 NOT SORTED")
}
test("alter table: skew is not supported") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent)
assertUnsupported("ALTER TABLE dbx.tab1 SKEWED BY (dt, country) ON " +
"(('2008-08-08', 'us'), ('2009-09-09', 'uk'), ('2010-10-10', 'cn'))")
assertUnsupported("ALTER TABLE dbx.tab1 SKEWED BY (dt, country) ON " +
"(('2008-08-08', 'us'), ('2009-09-09', 'uk')) STORED AS DIRECTORIES")
assertUnsupported("ALTER TABLE dbx.tab1 NOT SKEWED")
assertUnsupported("ALTER TABLE dbx.tab1 NOT STORED AS DIRECTORIES")
}
test("alter table: recover partitions (sequential)") {
withSQLConf("spark.rdd.parallelListingThreshold" -> "10") {
testRecoverPartitions()
}
}
test("alter table: recover partition (parallel)") {
withSQLConf("spark.rdd.parallelListingThreshold" -> "1") {
testRecoverPartitions()
}
}
protected def testRecoverPartitions() {
val catalog = spark.sessionState.catalog
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist RECOVER PARTITIONS")
}
val tableIdent = TableIdentifier("tab1")
createTable(catalog, tableIdent)
val part1 = Map("a" -> "1", "b" -> "5")
createTablePartition(catalog, part1, tableIdent)
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1))
val part2 = Map("a" -> "2", "b" -> "6")
val root = new Path(catalog.getTableMetadata(tableIdent).location)
val fs = root.getFileSystem(spark.sparkContext.hadoopConfiguration)
// valid
fs.mkdirs(new Path(new Path(root, "a=1"), "b=5"))
fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "a.csv")) // file
fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "_SUCCESS")) // file
fs.mkdirs(new Path(new Path(root, "A=2"), "B=6"))
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "b.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "c.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), ".hiddenFile")) // file
fs.mkdirs(new Path(new Path(root, "A=2/B=6"), "_temporary"))
// invalid
fs.mkdirs(new Path(new Path(root, "a"), "b")) // bad name
fs.mkdirs(new Path(new Path(root, "b=1"), "a=1")) // wrong order
fs.mkdirs(new Path(root, "a=4")) // not enough columns
fs.createNewFile(new Path(new Path(root, "a=1"), "b=4")) // file
fs.createNewFile(new Path(new Path(root, "a=1"), "_SUCCESS")) // _SUCCESS
fs.mkdirs(new Path(new Path(root, "a=1"), "_temporary")) // _temporary
fs.mkdirs(new Path(new Path(root, "a=1"), ".b=4")) // start with .
try {
sql("ALTER TABLE tab1 RECOVER PARTITIONS")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2))
if (!isUsingHiveMetastore) {
assert(catalog.getPartition(tableIdent, part1).parameters("numFiles") == "1")
assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2")
} else {
// After ALTER TABLE, the statistics of the first partition is removed by Hive megastore
assert(catalog.getPartition(tableIdent, part1).parameters.get("numFiles").isEmpty)
assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2")
}
} finally {
fs.delete(root, true)
}
}
test("alter table: add partition is not supported for views") {
assertUnsupported("ALTER VIEW dbx.tab1 ADD IF NOT EXISTS PARTITION (b='2')")
}
test("alter table: drop partition is not supported for views") {
assertUnsupported("ALTER VIEW dbx.tab1 DROP IF EXISTS PARTITION (b='2')")
}
test("show databases") {
sql("CREATE DATABASE showdb2B")
sql("CREATE DATABASE showdb1A")
// check the result as well as its order
checkDataset(sql("SHOW DATABASES"), Row("default"), Row("showdb1a"), Row("showdb2b"))
checkAnswer(
sql("SHOW DATABASES LIKE '*db1A'"),
Row("showdb1a") :: Nil)
checkAnswer(
sql("SHOW DATABASES LIKE 'showdb1A'"),
Row("showdb1a") :: Nil)
checkAnswer(
sql("SHOW DATABASES LIKE '*db1A|*db2B'"),
Row("showdb1a") ::
Row("showdb2b") :: Nil)
checkAnswer(
sql("SHOW DATABASES LIKE 'non-existentdb'"),
Nil)
}
test("drop view - temporary view") {
val catalog = spark.sessionState.catalog
sql(
"""
|CREATE TEMPORARY VIEW tab1
|USING org.apache.spark.sql.sources.DDLScanSource
|OPTIONS (
| From '1',
| To '10',
| Table 'test1'
|)
""".stripMargin)
assert(catalog.listTables("default") == Seq(TableIdentifier("tab1")))
sql("DROP VIEW tab1")
assert(catalog.listTables("default") == Nil)
}
protected def testDropTable(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
assert(catalog.listTables("dbx") == Seq(tableIdent))
sql("DROP TABLE dbx.tab1")
assert(catalog.listTables("dbx") == Nil)
sql("DROP TABLE IF EXISTS dbx.tab1")
intercept[AnalysisException] {
sql("DROP TABLE dbx.tab1")
}
}
test("drop view") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent)
assert(catalog.listTables("dbx") == Seq(tableIdent))
val e = intercept[AnalysisException] {
sql("DROP VIEW dbx.tab1")
}
assert(
e.getMessage.contains("Cannot drop a table with DROP VIEW. Please use DROP TABLE instead"))
}
protected def testSetProperties(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
def getProps: Map[String, String] = {
if (isUsingHiveMetastore) {
normalizeCatalogTable(catalog.getTableMetadata(tableIdent)).properties
} else {
catalog.getTableMetadata(tableIdent).properties
}
}
assert(getProps.isEmpty)
// set table properties
sql("ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('andrew' = 'or14', 'kor' = 'bel')")
assert(getProps == Map("andrew" -> "or14", "kor" -> "bel"))
// set table properties without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 SET TBLPROPERTIES ('kor' = 'belle', 'kar' = 'bol')")
assert(getProps == Map("andrew" -> "or14", "kor" -> "belle", "kar" -> "bol"))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist SET TBLPROPERTIES ('winner' = 'loser')")
}
}
protected def testUnsetProperties(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
def getProps: Map[String, String] = {
if (isUsingHiveMetastore) {
normalizeCatalogTable(catalog.getTableMetadata(tableIdent)).properties
} else {
catalog.getTableMetadata(tableIdent).properties
}
}
// unset table properties
sql("ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('j' = 'am', 'p' = 'an', 'c' = 'lan', 'x' = 'y')")
sql("ALTER TABLE dbx.tab1 UNSET TBLPROPERTIES ('j')")
assert(getProps == Map("p" -> "an", "c" -> "lan", "x" -> "y"))
// unset table properties without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 UNSET TBLPROPERTIES ('p')")
assert(getProps == Map("c" -> "lan", "x" -> "y"))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist UNSET TBLPROPERTIES ('c' = 'lan')")
}
// property to unset does not exist
val e = intercept[AnalysisException] {
sql("ALTER TABLE tab1 UNSET TBLPROPERTIES ('c', 'xyz')")
}
assert(e.getMessage.contains("xyz"))
// property to unset does not exist, but "IF EXISTS" is specified
sql("ALTER TABLE tab1 UNSET TBLPROPERTIES IF EXISTS ('c', 'xyz')")
assert(getProps == Map("x" -> "y"))
}
protected def testSetLocation(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val partSpec = Map("a" -> "1", "b" -> "2")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, partSpec, tableIdent)
assert(catalog.getTableMetadata(tableIdent).storage.locationUri.isDefined)
assert(normalizeSerdeProp(catalog.getTableMetadata(tableIdent).storage.properties).isEmpty)
assert(catalog.getPartition(tableIdent, partSpec).storage.locationUri.isDefined)
assert(
normalizeSerdeProp(catalog.getPartition(tableIdent, partSpec).storage.properties).isEmpty)
// Verify that the location is set to the expected string
def verifyLocation(expected: URI, spec: Option[TablePartitionSpec] = None): Unit = {
val storageFormat = spec
.map { s => catalog.getPartition(tableIdent, s).storage }
.getOrElse { catalog.getTableMetadata(tableIdent).storage }
// TODO(gatorsmile): fix the bug in alter table set location.
// if (isUsingHiveMetastore) {
// assert(storageFormat.properties.get("path") === expected)
// }
assert(storageFormat.locationUri === Some(expected))
}
// set table location
sql("ALTER TABLE dbx.tab1 SET LOCATION '/path/to/your/lovely/heart'")
verifyLocation(new URI("/path/to/your/lovely/heart"))
// set table partition location
sql("ALTER TABLE dbx.tab1 PARTITION (a='1', b='2') SET LOCATION '/path/to/part/ways'")
verifyLocation(new URI("/path/to/part/ways"), Some(partSpec))
// set table location without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 SET LOCATION '/swanky/steak/place'")
verifyLocation(new URI("/swanky/steak/place"))
// set table partition location without explicitly specifying database
sql("ALTER TABLE tab1 PARTITION (a='1', b='2') SET LOCATION 'vienna'")
verifyLocation(new URI("vienna"), Some(partSpec))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE dbx.does_not_exist SET LOCATION '/mister/spark'")
}
// partition to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 PARTITION (b='2') SET LOCATION '/mister/spark'")
}
}
protected def testSetSerde(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
def checkSerdeProps(expectedSerdeProps: Map[String, String]): Unit = {
val serdeProp = catalog.getTableMetadata(tableIdent).storage.properties
if (isUsingHiveMetastore) {
assert(normalizeSerdeProp(serdeProp) == expectedSerdeProps)
} else {
assert(serdeProp == expectedSerdeProps)
}
}
if (isUsingHiveMetastore) {
val expectedSerde = if (isDatasourceTable) {
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
} else {
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"
}
assert(catalog.getTableMetadata(tableIdent).storage.serde == Some(expectedSerde))
} else {
assert(catalog.getTableMetadata(tableIdent).storage.serde.isEmpty)
}
checkSerdeProps(Map.empty[String, String])
// set table serde and/or properties (should fail on datasource tables)
if (isDatasourceTable) {
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 SET SERDE 'whatever'")
}
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 SET SERDE 'org.apache.madoop' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
}
assert(e1.getMessage.contains("datasource"))
assert(e2.getMessage.contains("datasource"))
} else {
val newSerde = "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
sql(s"ALTER TABLE dbx.tab1 SET SERDE '$newSerde'")
assert(catalog.getTableMetadata(tableIdent).storage.serde == Some(newSerde))
checkSerdeProps(Map.empty[String, String])
val serde2 = "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe"
sql(s"ALTER TABLE dbx.tab1 SET SERDE '$serde2' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
assert(catalog.getTableMetadata(tableIdent).storage.serde == Some(serde2))
checkSerdeProps(Map("k" -> "v", "kay" -> "vee"))
}
// set serde properties only
sql("ALTER TABLE dbx.tab1 SET SERDEPROPERTIES ('k' = 'vvv', 'kay' = 'vee')")
checkSerdeProps(Map("k" -> "vvv", "kay" -> "vee"))
// set things without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 SET SERDEPROPERTIES ('kay' = 'veee')")
checkSerdeProps(Map("k" -> "vvv", "kay" -> "veee"))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist SET SERDEPROPERTIES ('x' = 'y')")
}
}
protected def testSetSerdePartition(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val spec = Map("a" -> "1", "b" -> "2")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, spec, tableIdent)
createTablePartition(catalog, Map("a" -> "1", "b" -> "3"), tableIdent)
createTablePartition(catalog, Map("a" -> "2", "b" -> "2"), tableIdent)
createTablePartition(catalog, Map("a" -> "2", "b" -> "3"), tableIdent)
def checkPartitionSerdeProps(expectedSerdeProps: Map[String, String]): Unit = {
val serdeProp = catalog.getPartition(tableIdent, spec).storage.properties
if (isUsingHiveMetastore) {
assert(normalizeSerdeProp(serdeProp) == expectedSerdeProps)
} else {
assert(serdeProp == expectedSerdeProps)
}
}
if (isUsingHiveMetastore) {
val expectedSerde = if (isDatasourceTable) {
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
} else {
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"
}
assert(catalog.getPartition(tableIdent, spec).storage.serde == Some(expectedSerde))
} else {
assert(catalog.getPartition(tableIdent, spec).storage.serde.isEmpty)
}
checkPartitionSerdeProps(Map.empty[String, String])
// set table serde and/or properties (should fail on datasource tables)
if (isDatasourceTable) {
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'whatever'")
}
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'org.apache.madoop' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
}
assert(e1.getMessage.contains("datasource"))
assert(e2.getMessage.contains("datasource"))
} else {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'org.apache.jadoop'")
assert(catalog.getPartition(tableIdent, spec).storage.serde == Some("org.apache.jadoop"))
checkPartitionSerdeProps(Map.empty[String, String])
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'org.apache.madoop' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
assert(catalog.getPartition(tableIdent, spec).storage.serde == Some("org.apache.madoop"))
checkPartitionSerdeProps(Map("k" -> "v", "kay" -> "vee"))
}
// set serde properties only
maybeWrapException(isDatasourceTable) {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) " +
"SET SERDEPROPERTIES ('k' = 'vvv', 'kay' = 'vee')")
checkPartitionSerdeProps(Map("k" -> "vvv", "kay" -> "vee"))
}
// set things without explicitly specifying database
catalog.setCurrentDatabase("dbx")
maybeWrapException(isDatasourceTable) {
sql("ALTER TABLE tab1 PARTITION (a=1, b=2) SET SERDEPROPERTIES ('kay' = 'veee')")
checkPartitionSerdeProps(Map("k" -> "vvv", "kay" -> "veee"))
}
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist PARTITION (a=1, b=2) SET SERDEPROPERTIES ('x' = 'y')")
}
}
protected def testAddPartitions(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val part1 = Map("a" -> "1", "b" -> "5")
val part2 = Map("a" -> "2", "b" -> "6")
val part3 = Map("a" -> "3", "b" -> "7")
val part4 = Map("a" -> "4", "b" -> "8")
val part5 = Map("a" -> "9", "b" -> "9")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, part1, tableIdent)
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1))
// basic add partition
sql("ALTER TABLE dbx.tab1 ADD IF NOT EXISTS " +
"PARTITION (a='2', b='6') LOCATION 'paris' PARTITION (a='3', b='7')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part3))
assert(catalog.getPartition(tableIdent, part1).storage.locationUri.isDefined)
val partitionLocation = if (isUsingHiveMetastore) {
val tableLocation = catalog.getTableMetadata(tableIdent).storage.locationUri
assert(tableLocation.isDefined)
makeQualifiedPath(new Path(tableLocation.get.toString, "paris").toString)
} else {
new URI("paris")
}
assert(catalog.getPartition(tableIdent, part2).storage.locationUri == Option(partitionLocation))
assert(catalog.getPartition(tableIdent, part3).storage.locationUri.isDefined)
// add partitions without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 ADD IF NOT EXISTS PARTITION (a='4', b='8')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2, part3, part4))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist ADD IF NOT EXISTS PARTITION (a='4', b='9')")
}
// partition to add already exists
intercept[AnalysisException] {
sql("ALTER TABLE tab1 ADD PARTITION (a='4', b='8')")
}
// partition to add already exists when using IF NOT EXISTS
sql("ALTER TABLE tab1 ADD IF NOT EXISTS PARTITION (a='4', b='8')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2, part3, part4))
// partition spec in ADD PARTITION should be case insensitive by default
sql("ALTER TABLE tab1 ADD PARTITION (A='9', B='9')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2, part3, part4, part5))
}
protected def testDropPartitions(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val part1 = Map("a" -> "1", "b" -> "5")
val part2 = Map("a" -> "2", "b" -> "6")
val part3 = Map("a" -> "3", "b" -> "7")
val part4 = Map("a" -> "4", "b" -> "8")
val part5 = Map("a" -> "9", "b" -> "9")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, part1, tableIdent)
createTablePartition(catalog, part2, tableIdent)
createTablePartition(catalog, part3, tableIdent)
createTablePartition(catalog, part4, tableIdent)
createTablePartition(catalog, part5, tableIdent)
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2, part3, part4, part5))
// basic drop partition
sql("ALTER TABLE dbx.tab1 DROP IF EXISTS PARTITION (a='4', b='8'), PARTITION (a='3', b='7')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part5))
// drop partitions without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 DROP IF EXISTS PARTITION (a='2', b ='6')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part5))
// table to alter does not exist
intercept[AnalysisException] {
sql("ALTER TABLE does_not_exist DROP IF EXISTS PARTITION (a='2')")
}
// partition to drop does not exist
intercept[AnalysisException] {
sql("ALTER TABLE tab1 DROP PARTITION (a='300')")
}
// partition to drop does not exist when using IF EXISTS
sql("ALTER TABLE tab1 DROP IF EXISTS PARTITION (a='300')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part5))
// partition spec in DROP PARTITION should be case insensitive by default
sql("ALTER TABLE tab1 DROP PARTITION (A='1', B='5')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part5))
// use int literal as partition value for int type partition column
sql("ALTER TABLE tab1 DROP PARTITION (a=9, b=9)")
assert(catalog.listPartitions(tableIdent).isEmpty)
}
protected def testRenamePartitions(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1", Some("dbx"))
val part1 = Map("a" -> "1", "b" -> "q")
val part2 = Map("a" -> "2", "b" -> "c")
val part3 = Map("a" -> "3", "b" -> "p")
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
createTablePartition(catalog, part1, tableIdent)
createTablePartition(catalog, part2, tableIdent)
createTablePartition(catalog, part3, tableIdent)
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part3))
// basic rename partition
sql("ALTER TABLE dbx.tab1 PARTITION (a='1', b='q') RENAME TO PARTITION (a='100', b='p')")
sql("ALTER TABLE dbx.tab1 PARTITION (a='2', b='c') RENAME TO PARTITION (a='20', b='c')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(Map("a" -> "100", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p")))
// rename without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 PARTITION (a='100', b='p') RENAME TO PARTITION (a='10', b='p')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(Map("a" -> "10", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p")))
// table to alter does not exist
intercept[NoSuchTableException] {
sql("ALTER TABLE does_not_exist PARTITION (c='3') RENAME TO PARTITION (c='333')")
}
// partition to rename does not exist
intercept[NoSuchPartitionException] {
sql("ALTER TABLE tab1 PARTITION (a='not_found', b='1') RENAME TO PARTITION (a='1', b='2')")
}
// partition spec in RENAME PARTITION should be case insensitive by default
sql("ALTER TABLE tab1 PARTITION (A='10', B='p') RENAME TO PARTITION (A='1', B='p')")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(Map("a" -> "1", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p")))
}
protected def testChangeColumn(isDatasourceTable: Boolean): Unit = {
if (!isUsingHiveMetastore) {
assert(isDatasourceTable, "InMemoryCatalog only supports data source tables")
}
val catalog = spark.sessionState.catalog
val resolver = spark.sessionState.conf.resolver
val tableIdent = TableIdentifier("tab1", Some("dbx"))
createDatabase(catalog, "dbx")
createTable(catalog, tableIdent, isDatasourceTable)
def getMetadata(colName: String): Metadata = {
val column = catalog.getTableMetadata(tableIdent).schema.fields.find { field =>
resolver(field.name, colName)
}
column.map(_.metadata).getOrElse(Metadata.empty)
}
// Ensure that change column will preserve other metadata fields.
sql("ALTER TABLE dbx.tab1 CHANGE COLUMN col1 col1 INT COMMENT 'this is col1'")
assert(getMetadata("col1").getString("key") == "value")
}
test("drop build-in function") {
Seq("true", "false").foreach { caseSensitive =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) {
// partition to add already exists
var e = intercept[AnalysisException] {
sql("DROP TEMPORARY FUNCTION year")
}
assert(e.getMessage.contains("Cannot drop native function 'year'"))
e = intercept[AnalysisException] {
sql("DROP TEMPORARY FUNCTION YeAr")
}
assert(e.getMessage.contains("Cannot drop native function 'YeAr'"))
e = intercept[AnalysisException] {
sql("DROP TEMPORARY FUNCTION `YeAr`")
}
assert(e.getMessage.contains("Cannot drop native function 'YeAr'"))
}
}
}
test("describe function") {
checkAnswer(
sql("DESCRIBE FUNCTION log"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Logarithm") ::
Row("Function: log") ::
Row("Usage: log(base, expr) - Returns the logarithm of `expr` with `base`.") :: Nil
)
// predicate operator
checkAnswer(
sql("DESCRIBE FUNCTION or"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Or") ::
Row("Function: or") ::
Row("Usage: expr1 or expr2 - Logical OR.") :: Nil
)
checkAnswer(
sql("DESCRIBE FUNCTION !"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Not") ::
Row("Function: !") ::
Row("Usage: ! expr - Logical not.") :: Nil
)
// arithmetic operators
checkAnswer(
sql("DESCRIBE FUNCTION +"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Add") ::
Row("Function: +") ::
Row("Usage: expr1 + expr2 - Returns `expr1`+`expr2`.") :: Nil
)
// comparison operators
checkAnswer(
sql("DESCRIBE FUNCTION <"),
Row("Class: org.apache.spark.sql.catalyst.expressions.LessThan") ::
Row("Function: <") ::
Row("Usage: expr1 < expr2 - Returns true if `expr1` is less than `expr2`.") :: Nil
)
// STRING
checkAnswer(
sql("DESCRIBE FUNCTION 'concat'"),
Row("Class: org.apache.spark.sql.catalyst.expressions.Concat") ::
Row("Function: concat") ::
Row("Usage: concat(str1, str2, ..., strN) - " +
"Returns the concatenation of str1, str2, ..., strN.") :: Nil
)
// extended mode
checkAnswer(
sql("DESCRIBE FUNCTION EXTENDED ^"),
Row("Class: org.apache.spark.sql.catalyst.expressions.BitwiseXor") ::
Row(
"""Extended Usage:
| Examples:
| > SELECT 3 ^ 5;
| 2
| """.stripMargin) ::
Row("Function: ^") ::
Row("Usage: expr1 ^ expr2 - Returns the result of " +
"bitwise exclusive OR of `expr1` and `expr2`.") :: Nil
)
}
test("create a data source table without schema") {
import testImplicits._
withTempPath { tempDir =>
withTable("tab1", "tab2") {
(("a", "b") :: Nil).toDF().write.json(tempDir.getCanonicalPath)
val e = intercept[AnalysisException] { sql("CREATE TABLE tab1 USING json") }.getMessage
assert(e.contains("Unable to infer schema for JSON. It must be specified manually"))
sql(s"CREATE TABLE tab2 using json location '${tempDir.toURI}'")
checkAnswer(spark.table("tab2"), Row("a", "b"))
}
}
}
test("create table using CLUSTERED BY without schema specification") {
import testImplicits._
withTempPath { tempDir =>
withTable("jsonTable") {
(("a", "b") :: Nil).toDF().write.json(tempDir.getCanonicalPath)
val e = intercept[AnalysisException] {
sql(
s"""
|CREATE TABLE jsonTable
|USING org.apache.spark.sql.json
|OPTIONS (
| path '${tempDir.getCanonicalPath}'
|)
|CLUSTERED BY (inexistentColumnA) SORTED BY (inexistentColumnB) INTO 2 BUCKETS
""".stripMargin)
}
assert(e.message == "Cannot specify bucketing information if the table schema is not " +
"specified when creating and will be inferred at runtime")
}
}
}
test("Create Data Source Table As Select") {
import testImplicits._
withTable("t", "t1", "t2") {
sql("CREATE TABLE t USING parquet SELECT 1 as a, 1 as b")
checkAnswer(spark.table("t"), Row(1, 1) :: Nil)
spark.range(1).select('id as 'a, 'id as 'b).write.saveAsTable("t1")
sql("CREATE TABLE t2 USING parquet SELECT a, b from t1")
checkAnswer(spark.table("t2"), spark.table("t1"))
}
}
test("drop current database") {
withDatabase("temp") {
sql("CREATE DATABASE temp")
sql("USE temp")
sql("DROP DATABASE temp")
val e = intercept[AnalysisException] {
sql("CREATE TABLE t (a INT, b INT) USING parquet")
}.getMessage
assert(e.contains("Database 'temp' not found"))
}
}
test("drop default database") {
val caseSensitiveOptions = if (isUsingHiveMetastore) Seq("false") else Seq("true", "false")
caseSensitiveOptions.foreach { caseSensitive =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) {
var message = intercept[AnalysisException] {
sql("DROP DATABASE default")
}.getMessage
assert(message.contains("Can not drop default database"))
message = intercept[AnalysisException] {
sql("DROP DATABASE DeFault")
}.getMessage
if (caseSensitive == "true") {
assert(message.contains("Database 'DeFault' not found"))
} else {
assert(message.contains("Can not drop default database"))
}
}
}
}
test("truncate table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i, i) }.toDF("width", "length")
// Test both a Hive compatible and incompatible code path.
Seq("json", "parquet").foreach { format =>
withTable("rectangles") {
data.write.format(format).saveAsTable("rectangles")
assume(spark.table("rectangles").collect().nonEmpty,
"bad test; table was empty to begin with")
sql("TRUNCATE TABLE rectangles")
assert(spark.table("rectangles").collect().isEmpty)
// not supported since the table is not partitioned
assertUnsupported("TRUNCATE TABLE rectangles PARTITION (width=1)")
}
}
}
test("truncate partitioned table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i % 3, i % 5, i) }.toDF("width", "length", "height")
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// supported since partitions are stored in the metastore
sql("TRUNCATE TABLE partTable PARTITION (width=1, length=1)")
assert(spark.table("partTable").filter($"width" === 1).collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1 && $"length" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// support partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=1)")
assert(spark.table("partTable").collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// do nothing if no partition is matched for the given partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=100)")
assert(spark.table("partTable").count() == data.count())
// throw exception if no partition is matched for the given non-partial partition spec.
intercept[NoSuchPartitionException] {
sql("TRUNCATE TABLE partTable PARTITION (width=100, length=100)")
}
// throw exception if the column in partition spec is not a partition column.
val e = intercept[AnalysisException] {
sql("TRUNCATE TABLE partTable PARTITION (unknown=1)")
}
assert(e.message.contains("unknown is not a valid partition column"))
}
}
test("create temporary view with mismatched schema") {
withTable("tab1") {
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
val e = intercept[AnalysisException] {
sql("CREATE TEMPORARY VIEW view1 (col1, col3) AS SELECT * FROM tab1")
}.getMessage
assert(e.contains("the SELECT clause (num: `1`) does not match")
&& e.contains("CREATE VIEW (num: `2`)"))
}
}
}
test("create temporary view with specified schema") {
withView("view1") {
sql("CREATE TEMPORARY VIEW view1 (col1, col2) AS SELECT 1, 2")
checkAnswer(
sql("SELECT * FROM view1"),
Row(1, 2) :: Nil
)
}
}
test("block creating duplicate temp table") {
withView("t_temp") {
sql("CREATE TEMPORARY VIEW t_temp AS SELECT 1, 2")
val e = intercept[TempTableAlreadyExistsException] {
sql("CREATE TEMPORARY TABLE t_temp (c3 int, c4 string) USING JSON")
}.getMessage
assert(e.contains("Temporary table 't_temp' already exists"))
}
}
test("truncate table - external table, temporary table, view (not allowed)") {
import testImplicits._
withTempPath { tempDir =>
withTable("my_ext_tab") {
(("a", "b") :: Nil).toDF().write.parquet(tempDir.getCanonicalPath)
(1 to 10).map { i => (i, i) }.toDF("a", "b").createTempView("my_temp_tab")
sql(s"CREATE TABLE my_ext_tab using parquet LOCATION '${tempDir.toURI}'")
sql(s"CREATE VIEW my_view AS SELECT 1")
intercept[NoSuchTableException] {
sql("TRUNCATE TABLE my_temp_tab")
}
assertUnsupported("TRUNCATE TABLE my_ext_tab")
assertUnsupported("TRUNCATE TABLE my_view")
}
}
}
test("truncate table - non-partitioned table (not allowed)") {
withTable("my_tab") {
sql("CREATE TABLE my_tab (age INT, name STRING) using parquet")
sql("INSERT INTO my_tab values (10, 'a')")
assertUnsupported("TRUNCATE TABLE my_tab PARTITION (age=10)")
}
}
test("SPARK-16034 Partition columns should match when appending to existing data source tables") {
import testImplicits._
val df = Seq((1, 2, 3)).toDF("a", "b", "c")
withTable("partitionedTable") {
df.write.mode("overwrite").partitionBy("a", "b").saveAsTable("partitionedTable")
// Misses some partition columns
intercept[AnalysisException] {
df.write.mode("append").partitionBy("a").saveAsTable("partitionedTable")
}
// Wrong order
intercept[AnalysisException] {
df.write.mode("append").partitionBy("b", "a").saveAsTable("partitionedTable")
}
// Partition columns not specified
intercept[AnalysisException] {
df.write.mode("append").saveAsTable("partitionedTable")
}
assert(sql("select * from partitionedTable").collect().size == 1)
// Inserts new data successfully when partition columns are correctly specified in
// partitionBy(...).
// TODO: Right now, partition columns are always treated in a case-insensitive way.
// See the write method in DataSource.scala.
Seq((4, 5, 6)).toDF("a", "B", "c")
.write
.mode("append")
.partitionBy("a", "B")
.saveAsTable("partitionedTable")
Seq((7, 8, 9)).toDF("a", "b", "c")
.write
.mode("append")
.partitionBy("a", "b")
.saveAsTable("partitionedTable")
checkAnswer(
sql("select a, b, c from partitionedTable"),
Row(1, 2, 3) :: Row(4, 5, 6) :: Row(7, 8, 9) :: Nil
)
}
}
test("show functions") {
withUserDefinedFunction("add_one" -> true) {
val numFunctions = FunctionRegistry.functionSet.size.toLong
assert(sql("show functions").count() === numFunctions)
assert(sql("show system functions").count() === numFunctions)
assert(sql("show all functions").count() === numFunctions)
assert(sql("show user functions").count() === 0L)
spark.udf.register("add_one", (x: Long) => x + 1)
assert(sql("show functions").count() === numFunctions + 1L)
assert(sql("show system functions").count() === numFunctions)
assert(sql("show all functions").count() === numFunctions + 1L)
assert(sql("show user functions").count() === 1L)
}
}
test("show columns - negative test") {
// When case sensitivity is true, the user supplied database name in table identifier
// should match the supplied database name in case sensitive way.
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTempDatabase { db =>
val tabName = s"$db.showcolumn"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(col1 int, col2 string) USING parquet ")
val message = intercept[AnalysisException] {
sql(s"SHOW COLUMNS IN $db.showcolumn FROM ${db.toUpperCase(Locale.ROOT)}")
}.getMessage
assert(message.contains("SHOW COLUMNS with conflicting databases"))
}
}
}
}
test("SPARK-18009 calling toLocalIterator on commands") {
import scala.collection.JavaConverters._
val df = sql("show databases")
val rows: Seq[Row] = df.toLocalIterator().asScala.toSeq
assert(rows.length > 0)
}
test("SET LOCATION for managed table") {
withTable("tbl") {
withTempDir { dir =>
sql("CREATE TABLE tbl(i INT) USING parquet")
sql("INSERT INTO tbl SELECT 1")
checkAnswer(spark.table("tbl"), Row(1))
val defaultTablePath = spark.sessionState.catalog
.getTableMetadata(TableIdentifier("tbl")).storage.locationUri.get
try {
sql(s"ALTER TABLE tbl SET LOCATION '${dir.toURI}'")
spark.catalog.refreshTable("tbl")
// SET LOCATION won't move data from previous table path to new table path.
assert(spark.table("tbl").count() == 0)
// the previous table path should be still there.
assert(new File(defaultTablePath).exists())
sql("INSERT INTO tbl SELECT 2")
checkAnswer(spark.table("tbl"), Row(2))
// newly inserted data will go to the new table path.
assert(dir.listFiles().nonEmpty)
sql("DROP TABLE tbl")
// the new table path will be removed after DROP TABLE.
assert(!dir.exists())
} finally {
Utils.deleteRecursively(new File(defaultTablePath))
}
}
}
}
test("insert data to a data source table which has a non-existing location should succeed") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, b int)
|USING parquet
|OPTIONS(path "${dir.toURI}")
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
dir.delete
assert(!dir.exists)
spark.sql("INSERT INTO TABLE t SELECT 'c', 1")
assert(dir.exists)
checkAnswer(spark.table("t"), Row("c", 1) :: Nil)
Utils.deleteRecursively(dir)
assert(!dir.exists)
spark.sql("INSERT OVERWRITE TABLE t SELECT 'c', 1")
assert(dir.exists)
checkAnswer(spark.table("t"), Row("c", 1) :: Nil)
val newDirFile = new File(dir, "x")
val newDir = newDirFile.toURI
spark.sql(s"ALTER TABLE t SET LOCATION '$newDir'")
spark.sessionState.catalog.refreshTable(TableIdentifier("t"))
val table1 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table1.location == newDir)
assert(!newDirFile.exists)
spark.sql("INSERT INTO TABLE t SELECT 'c', 1")
assert(newDirFile.exists)
checkAnswer(spark.table("t"), Row("c", 1) :: Nil)
}
}
}
test("insert into a data source table with a non-existing partition location should succeed") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a int, b int, c int, d int)
|USING parquet
|PARTITIONED BY(a, b)
|LOCATION "${dir.toURI}"
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t PARTITION(a=1, b=2) SELECT 3, 4")
checkAnswer(spark.table("t"), Row(3, 4, 1, 2) :: Nil)
val partLoc = new File(s"${dir.getAbsolutePath}/a=1")
Utils.deleteRecursively(partLoc)
assert(!partLoc.exists())
// insert overwrite into a partition which location has been deleted.
spark.sql("INSERT OVERWRITE TABLE t PARTITION(a=1, b=2) SELECT 7, 8")
assert(partLoc.exists())
checkAnswer(spark.table("t"), Row(7, 8, 1, 2) :: Nil)
}
}
}
test("read data from a data source table which has a non-existing location should succeed") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, b int)
|USING parquet
|OPTIONS(path "${dir.toURI}")
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
dir.delete()
checkAnswer(spark.table("t"), Nil)
val newDirFile = new File(dir, "x")
val newDir = newDirFile.toURI
spark.sql(s"ALTER TABLE t SET LOCATION '$newDir'")
val table1 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table1.location == newDir)
assert(!newDirFile.exists())
checkAnswer(spark.table("t"), Nil)
}
}
}
test("read data from a data source table with non-existing partition location should succeed") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a int, b int, c int, d int)
|USING parquet
|PARTITIONED BY(a, b)
|LOCATION "${dir.toURI}"
""".stripMargin)
spark.sql("INSERT INTO TABLE t PARTITION(a=1, b=2) SELECT 3, 4")
checkAnswer(spark.table("t"), Row(3, 4, 1, 2) :: Nil)
// select from a partition which location has been deleted.
Utils.deleteRecursively(dir)
assert(!dir.exists())
spark.sql("REFRESH TABLE t")
checkAnswer(spark.sql("select * from t where a=1 and b=2"), Nil)
}
}
}
test("create datasource table with a non-existing location") {
withTable("t", "t1") {
withTempPath { dir =>
spark.sql(s"CREATE TABLE t(a int, b int) USING parquet LOCATION '${dir.toURI}'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t SELECT 1, 2")
assert(dir.exists())
checkAnswer(spark.table("t"), Row(1, 2))
}
// partition table
withTempPath { dir =>
spark.sql(
s"CREATE TABLE t1(a int, b int) USING parquet PARTITIONED BY(a) LOCATION '${dir.toURI}'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t1 PARTITION(a=1) SELECT 2")
val partDir = new File(dir, "a=1")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(2, 1))
}
}
}
Seq(true, false).foreach { shouldDelete =>
val tcName = if (shouldDelete) "non-existing" else "existed"
test(s"CTAS for external data source table with a $tcName location") {
withTable("t", "t1") {
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t
|USING parquet
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
checkAnswer(spark.table("t"), Row(3, 4, 1, 2))
}
// partition table
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t1
|USING parquet
|PARTITIONED BY(a, b)
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
val partDir = new File(dir, "a=3")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(1, 2, 3, 4))
}
}
}
}
Seq("a b", "a:b", "a%b", "a,b").foreach { specialChars =>
test(s"data source table:partition column name containing $specialChars") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, `$specialChars` string)
|USING parquet
|PARTITIONED BY(`$specialChars`)
|LOCATION '${dir.toURI}'
""".stripMargin)
assert(dir.listFiles().isEmpty)
spark.sql(s"INSERT INTO TABLE t PARTITION(`$specialChars`=2) SELECT 1")
val partEscaped = s"${ExternalCatalogUtils.escapePathName(specialChars)}=2"
val partFile = new File(dir, partEscaped)
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1", "2") :: Nil)
}
}
}
}
Seq("a b", "a:b", "a%b").foreach { specialChars =>
test(s"location uri contains $specialChars for datasource table") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withTable("t", "t1") {
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t(a string)
|USING parquet
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
spark.sql("INSERT INTO TABLE t SELECT 1")
assert(loc.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1") :: Nil)
}
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t1(a string, b string)
|USING parquet
|PARTITIONED BY(b)
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1")
val partFile = new File(loc, "b=2")
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t1"), Row("1", "2") :: Nil)
spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1")
val partFile1 = new File(loc, "b=2017-03-03 12:13%3A14")
assert(!partFile1.exists())
if (!Utils.isWindows) {
// Actual path becomes "b=2017-03-03%2012%3A13%253A14" on Windows.
val partFile2 = new File(loc, "b=2017-03-03 12%3A13%253A14")
assert(partFile2.listFiles().nonEmpty)
checkAnswer(
spark.table("t1"), Row("1", "2") :: Row("1", "2017-03-03 12:13%3A14") :: Nil)
}
}
}
}
}
Seq("a b", "a:b", "a%b").foreach { specialChars =>
test(s"location uri contains $specialChars for database") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withDatabase ("tmpdb") {
withTable("t") {
withTempDir { dir =>
val loc = new File(dir, specialChars)
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(s"CREATE DATABASE tmpdb LOCATION '$escapedLoc'")
spark.sql("USE tmpdb")
import testImplicits._
Seq(1).toDF("a").write.saveAsTable("t")
val tblloc = new File(loc, "t")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(tblloc.getAbsolutePath))
assert(tblloc.listFiles().nonEmpty)
}
}
}
}
}
test("the qualified path of a datasource table is stored in the catalog") {
withTable("t", "t1") {
withTempDir { dir =>
assert(!dir.getAbsolutePath.startsWith("file:/"))
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedDir = dir.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t(a string)
|USING parquet
|LOCATION '$escapedDir'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location.toString.startsWith("file:/"))
}
withTempDir { dir =>
assert(!dir.getAbsolutePath.startsWith("file:/"))
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedDir = dir.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t1(a string, b string)
|USING parquet
|PARTITIONED BY(b)
|LOCATION '$escapedDir'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location.toString.startsWith("file:/"))
}
}
}
val supportedNativeFileFormatsForAlterTableAddColumns = Seq("parquet", "json", "csv")
supportedNativeFileFormatsForAlterTableAddColumns.foreach { provider =>
test(s"alter datasource table add columns - $provider") {
withTable("t1") {
sql(s"CREATE TABLE t1 (c1 int) USING $provider")
sql("INSERT INTO t1 VALUES (1)")
sql("ALTER TABLE t1 ADD COLUMNS (c2 int)")
checkAnswer(
spark.table("t1"),
Seq(Row(1, null))
)
checkAnswer(
sql("SELECT * FROM t1 WHERE c2 is null"),
Seq(Row(1, null))
)
sql("INSERT INTO t1 VALUES (3, 2)")
checkAnswer(
sql("SELECT * FROM t1 WHERE c2 = 2"),
Seq(Row(3, 2))
)
}
}
}
supportedNativeFileFormatsForAlterTableAddColumns.foreach { provider =>
test(s"alter datasource table add columns - partitioned - $provider") {
withTable("t1") {
sql(s"CREATE TABLE t1 (c1 int, c2 int) USING $provider PARTITIONED BY (c2)")
sql("INSERT INTO t1 PARTITION(c2 = 2) VALUES (1)")
sql("ALTER TABLE t1 ADD COLUMNS (c3 int)")
checkAnswer(
spark.table("t1"),
Seq(Row(1, null, 2))
)
checkAnswer(
sql("SELECT * FROM t1 WHERE c3 is null"),
Seq(Row(1, null, 2))
)
sql("INSERT INTO t1 PARTITION(c2 =1) VALUES (2, 3)")
checkAnswer(
sql("SELECT * FROM t1 WHERE c3 = 3"),
Seq(Row(2, 3, 1))
)
checkAnswer(
sql("SELECT * FROM t1 WHERE c2 = 1"),
Seq(Row(2, 3, 1))
)
}
}
}
test("alter datasource table add columns - text format not supported") {
withTable("t1") {
sql("CREATE TABLE t1 (c1 int) USING text")
val e = intercept[AnalysisException] {
sql("ALTER TABLE t1 ADD COLUMNS (c2 int)")
}.getMessage
assert(e.contains("ALTER ADD COLUMNS does not support datasource table with type"))
}
}
test("alter table add columns -- not support temp view") {
withTempView("tmp_v") {
sql("CREATE TEMPORARY VIEW tmp_v AS SELECT 1 AS c1, 2 AS c2")
val e = intercept[AnalysisException] {
sql("ALTER TABLE tmp_v ADD COLUMNS (c3 INT)")
}
assert(e.message.contains("ALTER ADD COLUMNS does not support views"))
}
}
test("alter table add columns -- not support view") {
withView("v1") {
sql("CREATE VIEW v1 AS SELECT 1 AS c1, 2 AS c2")
val e = intercept[AnalysisException] {
sql("ALTER TABLE v1 ADD COLUMNS (c3 INT)")
}
assert(e.message.contains("ALTER ADD COLUMNS does not support views"))
}
}
test("alter table add columns with existing column name") {
withTable("t1") {
sql("CREATE TABLE t1 (c1 int) USING PARQUET")
val e = intercept[AnalysisException] {
sql("ALTER TABLE t1 ADD COLUMNS (c1 string)")
}.getMessage
assert(e.contains("Found duplicate column(s)"))
}
}
test("create temporary function with if not exists") {
withUserDefinedFunction("func1" -> true) {
val sql1 =
"""
|CREATE TEMPORARY FUNCTION IF NOT EXISTS func1 as
|'com.matthewrathbone.example.SimpleUDFExample' USING JAR '/path/to/jar1',
|JAR '/path/to/jar2'
""".stripMargin
val e = intercept[AnalysisException] {
sql(sql1)
}.getMessage
assert(e.contains("It is not allowed to define a TEMPORARY function with IF NOT EXISTS"))
}
}
test("create function with both if not exists and replace") {
withUserDefinedFunction("func1" -> false) {
val sql1 =
"""
|CREATE OR REPLACE FUNCTION IF NOT EXISTS func1 as
|'com.matthewrathbone.example.SimpleUDFExample' USING JAR '/path/to/jar1',
|JAR '/path/to/jar2'
""".stripMargin
val e = intercept[AnalysisException] {
sql(sql1)
}.getMessage
assert(e.contains("CREATE FUNCTION with both IF NOT EXISTS and REPLACE is not allowed"))
}
}
test("create temporary function by specifying a database") {
val dbName = "mydb"
withDatabase(dbName) {
sql(s"CREATE DATABASE $dbName")
sql(s"USE $dbName")
withUserDefinedFunction("func1" -> true) {
val sql1 =
s"""
|CREATE TEMPORARY FUNCTION $dbName.func1 as
|'com.matthewrathbone.example.SimpleUDFExample' USING JAR '/path/to/jar1',
|JAR '/path/to/jar2'
""".stripMargin
val e = intercept[AnalysisException] {
sql(sql1)
}.getMessage
assert(e.contains(s"Specifying a database in CREATE TEMPORARY FUNCTION " +
s"is not allowed: '$dbName'"))
}
}
}
Seq(true, false).foreach { caseSensitive =>
test(s"alter table add columns with existing column name - caseSensitive $caseSensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> s"$caseSensitive") {
withTable("t1") {
sql("CREATE TABLE t1 (c1 int) USING PARQUET")
if (!caseSensitive) {
val e = intercept[AnalysisException] {
sql("ALTER TABLE t1 ADD COLUMNS (C1 string)")
}.getMessage
assert(e.contains("Found duplicate column(s)"))
} else {
if (isUsingHiveMetastore) {
// hive catalog will still complains that c1 is duplicate column name because hive
// identifiers are case insensitive.
val e = intercept[AnalysisException] {
sql("ALTER TABLE t1 ADD COLUMNS (C1 string)")
}.getMessage
assert(e.contains("HiveException"))
} else {
sql("ALTER TABLE t1 ADD COLUMNS (C1 string)")
assert(spark.table("t1").schema
.equals(new StructType().add("c1", IntegerType).add("C1", StringType)))
}
}
}
}
}
test(s"basic DDL using locale tr - caseSensitive $caseSensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> s"$caseSensitive") {
withLocale("tr") {
val dbName = "DaTaBaSe_I"
withDatabase(dbName) {
sql(s"CREATE DATABASE $dbName")
sql(s"USE $dbName")
val tabName = "tAb_I"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(col_I int) USING PARQUET")
sql(s"INSERT OVERWRITE TABLE $tabName SELECT 1")
checkAnswer(sql(s"SELECT col_I FROM $tabName"), Row(1) :: Nil)
}
}
}
}
}
}
}
| aokolnychyi/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala | Scala | apache-2.0 | 91,863 |
/**
* Copyright © 2012 Gustav van der Merwe
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
sealed trait AbstractSyntaxTree extends Objct
object Ast { def apply(ast: Objct) = Judgement("ast", List(ast), PostFix) }
object Symbol { def apply(sym: Objct) = Judgement("sym", List(sym), PostFix) }
object Distinct { def apply(a: Objct, b: Objct) = Judgement("#", List(a, b), InFix) }
| gvdm/proof-system | src/Syntax.scala | Scala | gpl-3.0 | 988 |
package org.jetbrains.plugins.scala.lang.typeConformance
package generated
class TypeConformanceCompoundTest extends TypeConformanceTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "compound/"
def testAWithB() {doTest()}
def testAWithBWithC() {doTest()}
def testAWithBWithMissingB() {doTest()}
def testAWithBWithMissingDef() {doTest()}
def testAWithBWithMissingType() {doTest()}
def testAWithBWithTemplateDef() {doTest()}
def testAWithBWithTemplateDef1() {doTest()}
def testAWithBWithType() {doTest()}
def testAWithBWithTypeInC() {doTest()}
def testAdditionalTemplateBody() {doTest()}
def testBugScl1996() {doTest()}
def testWrongName() {doTest()}
def testnoAdditionalTemplateBody() {doTest()}
def testAnonImplementation(): Unit = doTest(
"""
|trait Foo {
| def foo: Unit
|}
|
|val newFoo: Foo = new Foo {
| override def foo: Unit = ()
|}
|//True
""".stripMargin, checkEquivalence = true)
} | loskutov/intellij-scala | test/org/jetbrains/plugins/scala/lang/typeConformance/generated/TypeConformanceCompoundTest.scala | Scala | apache-2.0 | 1,079 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.responsiblepeople.address
import com.google.inject.Inject
import connectors.DataCacheConnector
import controllers.{AmlsBaseController, CommonPlayDependencies}
import forms.{Form2, InvalidForm, ValidForm}
import models.responsiblepeople._
import play.api.mvc.{AnyContent, MessagesControllerComponents, Request}
import utils.{AuthAction, ControllerHelper, RepeatingSection}
import views.html.responsiblepeople.address.time_at_additional_extra_address
import scala.concurrent.Future
class TimeAtAdditionalExtraAddressController @Inject() (val dataCacheConnector: DataCacheConnector,
authAction: AuthAction,
val ds: CommonPlayDependencies,
val cc: MessagesControllerComponents,
time_at_additional_extra_address: time_at_additional_extra_address,
implicit val error: views.html.error) extends AmlsBaseController(ds, cc) with RepeatingSection {
final val DefaultAddressHistory = ResponsiblePersonAddress(PersonAddressUK("", "", None, None, ""), None)
def get(index: Int, edit: Boolean = false, flow: Option[String] = None) = authAction.async {
implicit request =>
getData[ResponsiblePerson](request.credId, index) map {
case Some(ResponsiblePerson(Some(personName), _, _, _, _, _, _, _, _, Some(ResponsiblePersonAddressHistory(_, _, Some(ResponsiblePersonAddress(_, Some(additionalExtraAddress))))), _, _, _, _, _, _, _, _, _, _, _, _)) =>
Ok(time_at_additional_extra_address(Form2[TimeAtAddress](additionalExtraAddress), edit, index, flow, personName.titleName))
case Some(ResponsiblePerson(Some(personName), _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _)) =>
Ok(time_at_additional_extra_address(Form2(DefaultAddressHistory), edit, index, flow, personName.titleName))
case _ => NotFound(notFoundView)
}
}
def post(index: Int, edit: Boolean = false, flow: Option[String] = None) = authAction.async {
implicit request =>
(Form2[TimeAtAddress](request.body) match {
case f: InvalidForm =>
getData[ResponsiblePerson](request.credId, index) map { rp =>
BadRequest(time_at_additional_extra_address(f, edit, index, flow, ControllerHelper.rpTitleName(rp)))
}
case ValidForm(_, data) =>
getData[ResponsiblePerson](request.credId, index) flatMap { responsiblePerson =>
(for {
rp <- responsiblePerson
addressHistory <- rp.addressHistory
additionalExtraAddress <- addressHistory.additionalExtraAddress
} yield {
val additionalExtraAddressWithTime = additionalExtraAddress.copy(
timeAtAddress = Some(data)
)
updateAndRedirect(request.credId, additionalExtraAddressWithTime, index, edit, flow)
}) getOrElse Future.successful(NotFound(notFoundView))
}
}).recoverWith {
case _: IndexOutOfBoundsException => Future.successful(NotFound(notFoundView))
}
}
private def updateAndRedirect(credId: String, data: ResponsiblePersonAddress, index: Int, edit: Boolean, flow: Option[String])
(implicit request: Request[AnyContent]) = {
updateDataStrict[ResponsiblePerson](credId, index) { res =>
res.addressHistory(
res.addressHistory match {
case Some(a) => a.additionalExtraAddress(data)
case _ => ResponsiblePersonAddressHistory(additionalExtraAddress = Some(data))
}
)
} map { _ =>
edit match {
case true => Redirect(controllers.responsiblepeople.routes.DetailedAnswersController.get(index, flow))
case false => Redirect(controllers.responsiblepeople.routes.PositionWithinBusinessController.get(index, edit, flow))
}
}
}
} | hmrc/amls-frontend | app/controllers/responsiblepeople/address/TimeAtAdditionalExtraAddressController.scala | Scala | apache-2.0 | 4,648 |
/*******************************************************************************
Copyright (c) 2012-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.nodes_util
import kr.ac.kaist.jsaf.nodes._
import kr.ac.kaist.jsaf.nodes_util.{NodeUtil => NU}
import kr.ac.kaist.jsaf.nodes_util.{NodeFactory => NF}
import kr.ac.kaist.jsaf.nodes_util._
import kr.ac.kaist.jsaf.scala_src.useful.Lists._
import kr.ac.kaist.jsaf.scala_src.useful.Options._
import kr.ac.kaist.jsaf.useful.Useful
import edu.rice.cs.plt.tuple.{Option => JOption}
import _root_.java.lang.{Integer => JInt}
import _root_.java.lang.{Double => JDouble}
import _root_.java.util.{List => JList}
import _root_.java.io.BufferedWriter
import _root_.java.io.File
import _root_.java.math.BigInteger
import _root_.java.util.ArrayList
import _root_.java.util.Arrays
import _root_.java.util.Collections
import _root_.java.util.Set
import _root_.java.util.StringTokenizer
object IRFactory {
val dummyAst = NF.makeNoOp(NF.makeSpanInfo(NF.makeSpan("dummyAST")), "dummyAST")
// For use only when there is no hope of attaching a true span.
def dummySpan(villain: String): Span = {
val name = if (villain.length != 0) villain else "dummySpan"
val sl = new SourceLocRats(name,0,0,0)
new Span(sl,sl)
}
def makeInfo(span: Span): IRSpanInfo = new IRSpanInfo(false, span)
def dummyIRId(name: String): IRId = makeTId(dummyAst, dummySpan(name), name)
def dummyIRId(id: Id): IRId = {
val name = id.getText
makeTId(dummyAst, dummySpan(name), name)
}
def dummyIRId(label: Label): IRId = {
val name = label.getId.getText
makeTId(dummyAst, dummySpan(name), name)
}
def dummyIRStmt(ast: ASTNode, span: Span): IRSeq =
makeSeq(ast, span, Nil.asInstanceOf[List[IRStmt]])
def dummyIRExpr(): IRExpr = makeTId(dummyAst, dummySpan("_"), "_")
def dummyIRStmt(ast: ASTNode, span: Span, msg: String): IRSeq =
makeSeq(dummyAst, span, List(makeExprStmt(dummyAst, span, dummyIRId(msg), dummyIRExpr)))
def makeSpanInfo(fromSource: Boolean, span: Span): IRSpanInfo =
new IRSpanInfo(fromSource, span)
def makeFunctional(fromSource: Boolean, ast: ASTNode,
name: IRId, params: JList[IRId], args: JList[IRStmt],
fds: JList[IRFunDecl], vds: JList[IRVarStmt],
body: JList[IRStmt]): IRFunctional =
NF.putIr2ast(new IRFunctional(fromSource, name, params, args, fds, vds, body), ast)
def makeFunctional(fromSource: Boolean, ast: ASTNode,
name: IRId, params: JList[IRId], body: IRStmt): IRFunctional =
makeFunctional(fromSource, ast, name, params, toJavaList(Nil), toJavaList(Nil),
toJavaList(Nil), toJavaList(List(body)))
def makeFunctional(fromSource: Boolean, ast: ASTNode, name: IRId, params: JList[IRId],
body: JList[IRStmt]): IRFunctional =
makeFunctional(fromSource, ast, name, params, toJavaList(Nil), toJavaList(Nil),
toJavaList(Nil), body)
def makeRoot(): IRRoot =
new IRRoot(makeSpanInfo(false, dummySpan("disambiguatorOnly")),
toJavaList(Nil), toJavaList(Nil), toJavaList(Nil))
def makeRoot(fromSource: Boolean, ast: ASTNode, span: Span, irs: JList[IRStmt]): IRRoot =
makeRoot(fromSource, ast, span, toJavaList(Nil), toJavaList(Nil), irs)
def makeRoot(fromSource: Boolean, ast: ASTNode, span: Span, fds: JList[IRFunDecl], vds: JList[IRVarStmt],
irs: JList[IRStmt]): IRRoot =
NF.putIr2ast(new IRRoot(makeSpanInfo(fromSource, span), fds, vds, irs), ast)
def makeFunExpr(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, name: IRId,
params: JList[IRId], body: IRStmt): IRFunExpr =
makeFunExpr(fromSource, ast, span, lhs, name, params, toJavaList(Nil), toJavaList(Nil),
toJavaList(Nil), toJavaList(List(body)))
def makeFunExpr(fromSource: Boolean, ast: ASTNode,
span: Span, lhs: IRId, name: IRId, params: JList[IRId], args: JList[IRStmt],
fds: JList[IRFunDecl], vds: JList[IRVarStmt], body: JList[IRStmt]): IRFunExpr =
NF.putIr2ast(new IRFunExpr(makeSpanInfo(fromSource, span), lhs,
makeFunctional(fromSource, ast, name, params, args, fds, vds, body)), ast)
def makeEval(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, arg: IRExpr) =
NF.putIr2ast(new IREval(makeSpanInfo(fromSource, span), lhs, arg), ast)
def makeUn(fromSource: Boolean, ast: ASTNode, span: Span, op: IROp, expr: IRExpr) =
NF.putIr2ast(new IRUn(makeSpanInfo(fromSource, span), op, expr), ast)
def makeDelete(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, expr: IRId) =
NF.putIr2ast(new IRDelete(makeSpanInfo(fromSource, span), lhs, expr), ast)
def makeDeleteProp(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, obj: IRId, index: IRExpr) =
NF.putIr2ast(new IRDeleteProp(makeSpanInfo(fromSource, span), lhs, obj, index), ast)
def makeObject(fromSource: Boolean, ast: ASTNode, span: Span,
lhs: IRId, members: List[IRMember], proto: IRId): IRObject =
makeObject(fromSource, ast, span, lhs, toJavaList(members), Some(proto))
def makeObject(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, members: List[IRMember]): IRObject =
makeObject(fromSource, ast, span, lhs, toJavaList(members), None)
def makeObject(fromSource: Boolean, ast: ASTNode, span: Span,
lhs: IRId, members: JList[IRMember], proto: Option[IRId]): IRObject =
NF.putIr2ast(new IRObject(makeSpanInfo(fromSource, span), lhs, members, proto), ast)
def makeArray(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, elements: List[Option[IRExpr]]) : IRArray = {
val new_elements = toJavaList(elements.map(toJavaOption(_)))
makeArray(fromSource, ast, span, lhs, new_elements)
}
def makeArray(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, elements: JList[JOption[IRExpr]]) : IRArray =
NF.putIr2ast(new IRArray(makeSpanInfo(fromSource, span), lhs, elements), ast)
def makeArrayNumber(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, elements: JList[JDouble]) : IRStmt =
NF.putIr2ast(new IRArrayNumber(makeSpanInfo(fromSource, span), lhs, elements), ast)
def makeArgs(ast: ASTNode, span: Span, lhs: IRId, elements: List[Option[IRExpr]]) : IRArgs = {
val new_elements = toJavaList(elements.map(toJavaOption(_)))
makeArgs(ast, span, lhs, new_elements)
}
def makeArgs(ast: ASTNode, span: Span, lhs: IRId, elements: JList[JOption[IRExpr]]) : IRArgs =
NF.putIr2ast(new IRArgs(makeSpanInfo(false, span), lhs, elements), ast)
def makeLoad(fromSource: Boolean, ast: ASTNode, span: Span, obj: IRId, index: IRExpr) =
NF.putIr2ast(new IRLoad(makeSpanInfo(fromSource, span), obj, index), ast)
def makeInternalCall(ast: ASTNode, span: Span, lhs: IRId, fun: IRId, arg: IRExpr) : IRInternalCall =
makeInternalCall(ast, span, lhs, fun, arg, None)
def makeInternalCall(ast: ASTNode, span: Span, lhs: IRId, fun: IRId, arg1: IRId, arg2: IRId) : IRInternalCall =
makeInternalCall(ast, span, lhs, fun, arg1, Some(arg2))
def makeInternalCall(ast: ASTNode, span: Span, lhs: IRId, fun: IRId, arg1: IRExpr, arg2: Option[IRId]) : IRInternalCall =
NF.putIr2ast(new IRInternalCall(makeSpanInfo(false, span), lhs, fun, arg1, toJavaOption(arg2)), ast)
def makeCall(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, fun: IRId, thisB: IRId, args: IRId) : IRCall =
NF.putIr2ast(new IRCall(makeSpanInfo(fromSource, span), lhs, fun, thisB, args), ast)
def makeNew(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, fun: IRId, args: List[IRId]) : IRNew =
makeNew(fromSource, ast, span, lhs, fun, toJavaList(args))
def makeNew(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, fun: IRId, args: JList[IRId]) : IRNew =
NF.putIr2ast(new IRNew(makeSpanInfo(fromSource, span), lhs, fun, args), ast)
def makeBin(fromSource: Boolean, ast: ASTNode, span: Span, first: IRExpr, op: IROp, second: IRExpr) =
NF.putIr2ast(new IRBin(makeSpanInfo(fromSource, span), first, op, second), ast)
def makeLoadStmt(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, obj: IRId, index: IRExpr) =
makeExprStmt(ast, span, lhs, makeLoad(fromSource, ast, span, obj, index))
def makeExprStmt(ast: ASTNode, span: Span, lhs: IRId, right: IRExpr): IRExprStmt =
makeExprStmt(ast, span, lhs, right, false)
def makeExprStmtIgnore(ast: ASTNode, span: Span, lhs: IRId, right: IRExpr): IRExprStmt =
makeExprStmt(ast, span, lhs, right, true)
def makeExprStmt(ast: ASTNode, span: Span, lhs: IRId, right: IRExpr, isRef: Boolean): IRExprStmt =
NF.putIr2ast(new IRExprStmt(makeSpanInfo(false, span), lhs, right, isRef), ast)
def makeFunDecl(fromSource: Boolean, ast: ASTNode, span: Span,
name: IRId, params: JList[IRId], body: IRStmt): IRFunDecl =
makeFunDecl(fromSource, ast, span, name, params, toJavaList(Nil), toJavaList(Nil),
toJavaList(Nil), toJavaList(List(body)))
def makeFunDecl(fromSource: Boolean, ast: ASTNode,
span: Span, name: IRId, params: JList[IRId], args: JList[IRStmt],
fds: JList[IRFunDecl], vds: JList[IRVarStmt], body: JList[IRStmt]): IRFunDecl =
NF.putIr2ast(new IRFunDecl(makeSpanInfo(fromSource, span),
makeFunctional(fromSource, ast, name, params, args, fds, vds, body)), ast)
def makeBreak(fromSource: Boolean, ast: ASTNode, span: Span, label: IRId): IRBreak =
NF.putIr2ast(new IRBreak(makeSpanInfo(fromSource, span), label), ast)
def makeReturn(fromSource: Boolean, ast: ASTNode, span: Span, expr: JOption[IRExpr]) =
NF.putIr2ast(new IRReturn(makeSpanInfo(fromSource, span), expr), ast)
def makeLabelStmt(fromSource: Boolean, ast: ASTNode, span: Span, label: IRId, stmt: IRStmt): IRLabelStmt =
NF.putIr2ast(new IRLabelStmt(makeSpanInfo(fromSource, span), label, stmt), ast)
def makeWith(fromSource: Boolean, ast: ASTNode, span: Span, id: IRId, stmt: IRStmt) =
NF.putIr2ast(new IRWith(makeSpanInfo(fromSource, span), id, stmt), ast)
def makeThrow(fromSource: Boolean, ast: ASTNode, span: Span, expr: IRExpr) =
NF.putIr2ast(new IRThrow(makeSpanInfo(fromSource, span), expr), ast)
def makeVarStmt(fromSource: Boolean, ast: ASTNode, span: Span, lhs: IRId, fromParam: Boolean): IRVarStmt =
NF.putIr2ast(new IRVarStmt(makeSpanInfo(fromSource, span), lhs, fromParam), ast)
def makeIf(fromSource: Boolean, ast: ASTNode, span: Span, cond: IRExpr, trueB: IRStmt, falseB: JOption[IRStmt]) =
NF.putIr2ast(new IRIf(makeSpanInfo(fromSource, span), cond, trueB, falseB), ast)
def makeWhile(fromSource: Boolean, ast: ASTNode, span: Span, cond: IRExpr, body: IRStmt) =
NF.putIr2ast(new IRWhile(makeSpanInfo(fromSource, span), cond, body), ast)
def makeTry(fromSource: Boolean, ast: ASTNode, span: Span,
body: IRStmt, name: JOption[IRId], catchB: JOption[IRStmt], finallyB: JOption[IRStmt]) =
NF.putIr2ast(new IRTry(makeSpanInfo(fromSource, span), body, name, catchB, finallyB), ast)
def makeStore(fromSource: Boolean, ast: ASTNode, span: Span, obj: IRId, index: IRExpr, rhs: IRExpr) =
NF.putIr2ast(new IRStore(makeSpanInfo(fromSource, span), obj, index, rhs), ast)
def makeSeq(ast: ASTNode, span: Span, first: IRStmt, second: IRStmt): IRSeq =
makeSeq(ast, span, List(first, second))
def makeSeq(ast: ASTNode, span: Span): IRSeq =
makeSeq(ast, span, Nil)
def makeSeq(ast: ASTNode, span: Span, stmt: IRStmt): IRSeq =
makeSeq(ast, span, List(stmt))
def makeSeq(ast: ASTNode, span: Span, stmts: List[IRStmt]): IRSeq =
NF.putIr2ast(new IRSeq(makeSpanInfo(false, span), toJavaList(stmts)), ast)
def makeStmtUnit(ast: ASTNode, span: Span): IRStmtUnit =
makeStmtUnit(ast, span, Useful.list().asInstanceOf[JList[IRStmt]])
def makeStmtUnit(ast: ASTNode, span: Span, stmt: IRStmt): IRStmtUnit =
makeStmtUnit(ast, span, Useful.list(stmt))
def makeStmtUnit(ast: ASTNode, span: Span, first: IRStmt, second: IRStmt): IRStmtUnit =
makeStmtUnit(ast, span, Useful.list(first, second))
def makeStmtUnit(ast: ASTNode, span: Span, stmts: List[IRStmt]): IRStmtUnit =
makeStmtUnit(ast, span, toJavaList(stmts))
def makeStmtUnit(ast: ASTNode, span: Span, stmts: JList[IRStmt]): IRStmtUnit =
NF.putIr2ast(new IRStmtUnit(makeSpanInfo(true, span), stmts), ast)
def makeGetProp(fromSource: Boolean, ast: ASTNode, span: Span, prop: IRId, body: IRStmt): IRGetProp =
makeGetProp(fromSource, ast, span,
makeFunctional(fromSource, ast, prop, toJavaList(Nil).asInstanceOf[JList[IRId]], body))
def makeGetProp(fromSource: Boolean, ast: ASTNode,
span: Span, name: IRId, params: JList[IRId], args: JList[IRStmt],
fds: JList[IRFunDecl], vds: JList[IRVarStmt],
body: JList[IRStmt]): IRGetProp =
makeGetProp(fromSource, ast, span, makeFunctional(true, ast, name, params, args, fds, vds, body))
def makeGetProp(fromSource: Boolean, ast: ASTNode, span: Span, functional: IRFunctional): IRGetProp =
NF.putIr2ast(new IRGetProp(makeSpanInfo(fromSource, span), functional), ast)
def makeSetProp(fromSource: Boolean, ast: ASTNode,
span: Span, name: IRId, params: JList[IRId], args: JList[IRStmt],
fds: JList[IRFunDecl], vds: JList[IRVarStmt],
body: JList[IRStmt]): IRSetProp =
makeSetProp(fromSource, ast, span, makeFunctional(true, ast, name, params, args, fds, vds, body))
def makeSetProp(fromSource: Boolean, ast: ASTNode, span: Span, prop: IRId, id: IRId, body: IRStmt): IRSetProp =
makeSetProp(fromSource, ast, span, makeFunctional(true, ast, prop, toJavaList(List(id)), body))
def makeSetProp(fromSource: Boolean, ast: ASTNode, span: Span, functional: IRFunctional) =
NF.putIr2ast(new IRSetProp(makeSpanInfo(fromSource, span), functional), ast)
def makeField(fromSource: Boolean, ast: ASTNode, span: Span, prop: IRId, expr: IRExpr) =
NF.putIr2ast(new IRField(makeSpanInfo(fromSource, span), prop, expr), ast)
val defaultSpan = NF.makeSpan("Default span for internally generated nodes")
val defaultInfo = new IRSpanInfo(false, defaultSpan)
def trueInfo(ast: ASTNode) = NF.putIr2ast(new IRSpanInfo(true, ast.getInfo.getSpan), ast)
def makeSourceInfo(fromSource: Boolean, ast: ASTNode) =
if (fromSource) trueInfo(ast) else defaultInfo
def makeBool(fromSource: Boolean, ast: ASTNode, bool: Boolean): IRBool =
new IRBool(makeSourceInfo(fromSource, ast), bool)
val trueV = makeBool(false, dummyAst, true)
val falseV = makeBool(false, dummyAst, false)
def makeNull(ast: ASTNode) = new IRNull(trueInfo(ast))
def makeUndef(ast: ASTNode) = new IRUndef(trueInfo(ast))
def makeNumber(fromSource: Boolean, text: String, num: Double): IRNumber =
makeNumber(fromSource, dummyAst, text, num)
def makeNumber(fromSource: Boolean, ast: ASTNode, text: String, num: Double): IRNumber =
new IRNumber(makeSourceInfo(fromSource, ast), text, num)
val oneV = makeNumber(false, "1", 1)
val zero = new IRString(defaultInfo, "0")
val one = new IRString(defaultInfo, "1")
val two = new IRString(defaultInfo, "2")
val three = new IRString(defaultInfo, "3")
val four = new IRString(defaultInfo, "4")
val five = new IRString(defaultInfo, "5")
val six = new IRString(defaultInfo, "6")
val seven = new IRString(defaultInfo, "7")
val eight = new IRString(defaultInfo, "8")
val nine = new IRString(defaultInfo, "9")
def makeString(str: String, ast: ASTNode): IRString = makeString(false, ast, str)
def makeString(fromSource: Boolean, ast: ASTNode, str1: String): IRString = {
if(str1.equals("0")) zero
else if(str1.equals("1")) one
else if(str1.equals("2")) two
else if(str1.equals("3")) three
else if(str1.equals("4")) four
else if(str1.equals("5")) five
else if(str1.equals("6")) six
else if(str1.equals("7")) seven
else if(str1.equals("8")) eight
else if(str1.equals("9")) nine
else new IRString(makeSourceInfo(fromSource, ast), str1)
}
////////////////////////////////////////////////////////////////////////////////////////
def makeThis(ast: ASTNode, span: Span) =
NF.putIr2ast(new IRThis(makeSpanInfo(true, span)), ast)
// make a user id
def makeUId(originalName: String, uniqueName: String, isGlobal: Boolean,
ast: ASTNode, span: Span, isWith: Boolean): IRUserId =
NF.putIr2ast(new IRUserId(makeSpanInfo(true, span), originalName, uniqueName, isGlobal, isWith), ast)
// make a withRewriter-generated id
def makeWId(originalName: String, uniqueName: String, isGlobal: Boolean,
ast: ASTNode, span: Span): IRUserId =
makeUId(originalName, uniqueName, isGlobal, ast, span, true)
// make a non-global user id
def makeNGId(uniqueName: String, ast: ASTNode, span: Span): IRUserId =
makeUId(uniqueName, uniqueName, false, ast, span, false)
def makeNGId(originalName: String, uniqueName: String, ast: ASTNode, span: Span): IRUserId =
makeUId(originalName, uniqueName, false, ast, span, false)
// make a global user id
def makeGId(ast: ASTNode, uniqueName: String): IRUserId =
makeUId(uniqueName, uniqueName, true, ast, ast.getInfo.getSpan, false)
// make a global user id
def makeGId(ast: ASTNode, originalName: String, uniqueName: String, span: Span): IRUserId =
makeUId(originalName, uniqueName, true, ast, span, false)
// make a non-global temporary id
def makeTId(span: Span, uniqueName: String): IRTmpId =
makeTId(span, uniqueName, false)
def makeTId(span: Span, uniqueName: String, isGlobal: Boolean): IRTmpId =
new IRTmpId(makeSpanInfo(false, span), uniqueName, uniqueName, isGlobal)
def makeTId(ast: ASTNode, span: Span, uniqueName: String): IRTmpId =
makeTId(false, ast, span, uniqueName, uniqueName, false)
def makeTId(fromSource: Boolean, ast: ASTNode, span: Span, uniqueName: String): IRTmpId =
makeTId(fromSource, ast, span, uniqueName, uniqueName, false)
// make a temporary id
def makeTId(ast: ASTNode, span: Span, uniqueName: String, isGlobal: Boolean): IRTmpId =
makeTId(false, ast, span, uniqueName, uniqueName, isGlobal)
def makeTId(ast: ASTNode, span: Span, originalName: String, uniqueName: String, isGlobal: Boolean): IRTmpId =
makeTId(false, ast, span, originalName, uniqueName, isGlobal)
def makeTId(fromSource: Boolean, ast: ASTNode, span: Span, originalName: String, uniqueName: String,
isGlobal: Boolean): IRTmpId =
NF.putIr2ast(new IRTmpId(makeSpanInfo(fromSource, span), originalName, uniqueName, isGlobal), ast)
def makeOp(name: String, kind: Int = 0) = {
new IROp(name, if(kind == 0) EJSOp.strToEJSOp(name) else kind)
}
def makeNoOp(ast: ASTNode, span: Span, desc: String) =
NF.putIr2ast(new IRNoOp(makeSpanInfo(false, span), desc), ast)
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/nodes_util/IRFactory.scala | Scala | bsd-3-clause | 19,240 |
package rumms
package impl
import scutil.core.implicits.*
import scutil.jdk.implicits.*
import scutil.lang.*
import scutil.log.*
import scjson.ast.*
import scjson.ast.JsonNavigation.*
import scjson.codec.*
import scjson.converter.*
import scjson.converter.syntax.*
import scwebapp.*
import scwebapp.instances.*
import scwebapp.method.*
import scwebapp.status.*
import scwebapp.header.*
import scwebapp.data.MimeType
/** mount this with an url-pattern of <configuration.path>/STAR (where STAR is a literal "*") */
final class RummsHandler(configuration:RummsConfiguration, context:RummsHandlerContext) extends Logging {
import Constants.paths
private val serverVersion =
Constants.version.toString + "/" + configuration.version
//------------------------------------------------------------------------------
//## request handling
lazy val totalPlan:HttpHandler =
partialPlan orAlways
constant(HttpResponder.sync(EmptyStatus(NOT_FOUND)))
lazy val partialPlan:HttpPHandler =
subHandler(GET, paths.code, code) orElse
subHandler(POST, paths.hi, hi) orElse
subHandler(POST, paths.comm, comm)
private def subHandler(method:HttpMethod, subPath:String, handler:HttpHandler):HttpPHandler =
req => {
(req.fullPathUTF8 exists (_ ==== configuration.path + subPath)) option {
if (req.method.toOption == Some(method)) {
try {
context.expireConversations()
handler(req)
}
catch { case e:Exception =>
ERROR(e)
HttpResponder.sync(EmptyStatus(INTERNAL_SERVER_ERROR))
}
}
else HttpResponder.sync(EmptyStatus(METHOD_NOT_ALLOWED))
}
}
//------------------------------------------------------------------------------
//## code transfer
/** send javascript code for client configuration */
private def code(request:HttpRequest):HttpResponder = {
val servletPrefix = request.contextPath + configuration.path
ClientCode(clientCode(servletPrefix))
}
private def clientCode(servletPrefix:String):String = {
val resource = "rumms/Client.js"
val raw = getClass.getClassLoader.classpathResourceOrError(resource).string(Constants.encoding)
configure(raw, Map[String,JsonValue](
"VERSION" -> JsonValue.fromString(serverVersion),
"ENCODING" -> JsonValue.fromString(Constants.encoding.name),
"CLIENT_TTL" -> JsonValue.fromLong(Constants.clientTTL.millis),
"SERVLET_PREFIX" -> JsonValue.fromString(servletPrefix)
))
}
/** patch raw code by replacing @{id} tags */
private def configure(raw:String, params:Map[String,JsonValue]):String =
params.foldLeft(raw){ (raw, param) =>
val (key, value) = param
val pattern = "@{" + key + "}"
val code = JsonCodec encodeShort value
raw.replace(pattern, code)
}
//------------------------------------------------------------------------------
//## message transfer
private object MyWriters
extends JsonWriters
/** establish a new Conversation */
private def hi(request:HttpRequest):HttpResponder = {
// BETTER send Json data here
val action:Action[HttpResponder] =
for {
clientVersion <- handleException(bodyString(request), Forbidden, "unreadable message")
}
yield (clientVersion == serverVersion).cata (
Upgrade,
Connected(context.createConversation())
)
action.log foreach { ERROR(_*) }
action.responder
}
/** receive and send messages for a single Conversation */
private def comm(request:HttpRequest):HttpResponder = {
import MyWriters.given
val action:Action[HttpResponder] =
for {
json <- handleException( bodyString(request), Forbidden, "unreadable message")
data <- handleDecode( JsonCodec.decode(json), Forbidden, "invalid message")
conversationId <- handleNone( (data / "conversation").string, Forbidden, "conversationId missing") map ConversationId.apply
clientCont <- handleNone( (data / "clientCont").toLong, Forbidden, "clientCont missing")
serverCont <- handleNone( (data / "serverCont").toLong, Forbidden, "serverCont missing")
incoming <- handleNone( (data / "messages").arraySeq, Forbidden, "messages missing")
conversation <- handleNone( context.findConversation(conversationId), Disconnected, "unknown conversation")
}
yield {
conversation.touch()
// tell the client it's alive
conversation.handleHeartbeat()
// give new messages to the client
conversation.handleIncoming(incoming, clientCont)
def compileResponse(batch:Batch):HttpResponse = {
val json =
jsonObject(
"clientCont" -> clientCont,
"serverCont" -> batch.serverCont,
"messages" -> batch.messages
)
json match {
case Validated.Valid(x) =>
JsonOK(x)
case Validated.Invalid(errors) =>
ERROR("json creation failed", errors)
EmptyStatus(INTERNAL_SERVER_ERROR)
}
}
// maybe there already are new messages, if not, we have to wait
val fromConversation = conversation fetchOutgoing serverCont
if (fromConversation.messages.nonEmpty || incoming.nonEmpty) {
HttpResponder.sync(compileResponse(fromConversation))
}
else {
val (responder, send) =
HttpResponder.async(
timeout = Constants.continuationTTL,
timeoutResponse = thunk {
compileResponse(conversation fetchOutgoing serverCont)
},
errorResponse = thunk {
EmptyStatus(INTERNAL_SERVER_ERROR)
}
)
conversation onHasOutgoing thunk {
send(compileResponse(conversation fetchOutgoing serverCont))
}
responder
}
}
action.log foreach { ERROR(_*) }
action.responder
}
private def bodyString(request:HttpRequest):Either[Exception,String] =
Catch.exception in (request.body readString Constants.encoding)
//------------------------------------------------------------------------------
//## helper
private val CONNECTED_TEXT = "OK"
private val DISCONNECTED_TEXT = "CONNECT"
private val UPGRADED_TEXT = "VERSION"
private val Forbidden:HttpResponder =
HttpResponder.sync(EmptyStatus(FORBIDDEN))
private def ClientCode(code:String):HttpResponder =
HttpResponder.sync(StringOK(code, text_javascript))
private def Connected(conversationId:ConversationId):HttpResponder =
SendPlainTextCharset(CONNECTED_TEXT + " " + conversationId.value)
private def Upgrade:HttpResponder =
SendPlainTextCharset(UPGRADED_TEXT + " " + serverVersion)
private val Disconnected:HttpResponder =
SendPlainTextCharset(DISCONNECTED_TEXT)
private def SendPlainTextCharset(s:String):HttpResponder =
HttpResponder.sync(StringOK(s, text_plain))
//------------------------------------------------------------------------------
private def JsonOK(json:JsonValue):HttpResponse =
HttpResponse(
OK, None,
DisableCaching ++
HeaderValues(
ContentType(application_json)
),
HttpOutput.writeString(
Charsets.utf_8,
JsonCodec encodeShort json
)
)
private def StringOK(text:String, contentType:MimeType):HttpResponse =
HttpResponse(
OK, None,
DisableCaching ++
HeaderValues(
ContentType(contentType.addParameter("charset", Constants.encoding.name))
),
HttpOutput.writeString(Constants.encoding, text)
)
private def EmptyStatus(status:HttpStatus):HttpResponse =
HttpResponse(
status, None,
DisableCaching,
HttpOutput.empty
)
//------------------------------------------------------------------------------
private def handleDecode[T](result:Either[JsonDecodeFailure,T], responder:HttpResponder, text:String):Action[T] =
Action(
result leftMap { e => (responder, Problem.Plain(show"${text}: expected ${e.expectation} at ${e.offset}")) }
)
private def handleException[T](result:Either[Exception,T], responder:HttpResponder, text:String):Action[T] =
Action(
result leftMap { e => (responder, Problem.Exceptional(text, e)) }
)
private def handleNone[T](result:Option[T], responder:HttpResponder, text:String):Action[T] =
Action(
result toRight ((responder, Problem.Plain(text)))
)
}
| ritschwumm/rumms | src/main/scala/rumms/impl/RummsHandler.scala | Scala | bsd-2-clause | 8,027 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import kafka.api._
import kafka.network._
import kafka.utils._
import kafka.common.{ErrorMapping, TopicAndPartition}
/**
* A consumer of kafka messages
*/
@threadsafe
class SimpleConsumer(val host: String,
val port: Int,
val soTimeout: Int,
val bufferSize: Int,
val clientId: String) extends Logging {
ConsumerConfig.validateClientId(clientId)
private val lock = new Object()
private val blockingChannel = new BlockingChannel(host, port, bufferSize, BlockingChannel.UseDefaultBufferSize, soTimeout)
val brokerInfo = "host_%s-port_%s".format(host, port)
private val fetchRequestAndResponseStats = FetchRequestAndResponseStatsRegistry.getFetchRequestAndResponseStats(clientId)
private var isClosed = false
private def connect(): BlockingChannel = {
close
blockingChannel.connect()
blockingChannel
}
private def disconnect() = {
if(blockingChannel.isConnected) {
debug("Disconnecting from " + host + ":" + port)
blockingChannel.disconnect()
}
}
private def reconnect() {
disconnect()
connect()
}
def close() {
lock synchronized {
disconnect()
isClosed = true
}
}
private def sendRequest(request: RequestOrResponse): Receive = {
lock synchronized {
getOrMakeConnection()
var response: Receive = null
try {
blockingChannel.send(request)
response = blockingChannel.receive()
} catch {
case e : java.io.IOException =>
info("Reconnect due to socket error: ", e)
// retry once
try {
reconnect()
blockingChannel.send(request)
response = blockingChannel.receive()
} catch {
case ioe: java.io.IOException =>
disconnect()
throw ioe
}
case e => throw e
}
response
}
}
def send(request: TopicMetadataRequest): TopicMetadataResponse = {
val response = sendRequest(request)
TopicMetadataResponse.readFrom(response.buffer)
}
/**
* Fetch a set of messages from a topic.
*
* @param request specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched.
* @return a set of fetched messages
*/
def fetch(request: FetchRequest): FetchResponse = {
var response: Receive = null
val specificTimer = fetchRequestAndResponseStats.getFetchRequestAndResponseStats(brokerInfo).requestTimer
val aggregateTimer = fetchRequestAndResponseStats.getFetchRequestAndResponseAllBrokersStats.requestTimer
aggregateTimer.time {
specificTimer.time {
response = sendRequest(request)
}
}
val fetchResponse = FetchResponse.readFrom(response.buffer)
val fetchedSize = fetchResponse.sizeInBytes
fetchRequestAndResponseStats.getFetchRequestAndResponseStats(brokerInfo).requestSizeHist.update(fetchedSize)
fetchRequestAndResponseStats.getFetchRequestAndResponseAllBrokersStats.requestSizeHist.update(fetchedSize)
fetchResponse
}
/**
* Get a list of valid offsets (up to maxSize) before the given time.
* @param request a [[kafka.api.OffsetRequest]] object.
* @return a [[kafka.api.OffsetResponse]] object.
*/
def getOffsetsBefore(request: OffsetRequest) = OffsetResponse.readFrom(sendRequest(request).buffer)
private def getOrMakeConnection() {
if(!isClosed && !blockingChannel.isConnected) {
connect()
}
}
/**
* Get the earliest or latest offset of a given topic, partition.
* @param topicAndPartition Topic and partition of which the offset is needed.
* @param earliestOrLatest A value to indicate earliest or latest offset.
* @param consumerId Id of the consumer which could be a consumer client, SimpleConsumerShell or a follower broker.
* @return Requested offset.
*/
def earliestOrLatestOffset(topicAndPartition: TopicAndPartition, earliestOrLatest: Long, consumerId: Int): Long = {
val request = OffsetRequest(requestInfo = Map(topicAndPartition -> PartitionOffsetRequestInfo(earliestOrLatest, 1)),
clientId = clientId,
replicaId = consumerId)
val partitionErrorAndOffset = getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition)
val offset = partitionErrorAndOffset.error match {
case ErrorMapping.NoError => partitionErrorAndOffset.offsets.head
case _ => throw ErrorMapping.exceptionFor(partitionErrorAndOffset.error)
}
offset
}
}
| Digsolab/kafka | core/src/main/scala/kafka/consumer/SimpleConsumer.scala | Scala | apache-2.0 | 5,412 |
package recfun
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CountChangeSuite extends FunSuite {
import Main.countChange
test("countChange: example given in instructions") {
assert(countChange(4,List(1,2)) === 3)
}
test("countChange: sorted CHF") {
assert(countChange(300,List(5,10,20,50,100,200,500)) === 1022)
}
test("countChange: no pennies") {
assert(countChange(301,List(5,10,20,50,100,200,500)) === 0)
}
test("countChange: unsorted CHF") {
assert(countChange(300,List(500,5,50,100,20,200,10)) === 1022)
}
}
| giovannidoni/Scala-course-1 | week1/src/test/scala/recfun/CountChangeSuite.scala | Scala | gpl-3.0 | 647 |
package de.kaufhof.hajobs
import java.util.UUID
import play.api.Logger
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json.Json.toJsFieldJsValueWrapper
import play.api.libs.json._
import play.api.mvc._
import scala.concurrent.Future
import scala.language.postfixOps
import scala.util.Try
import scala.util.control.NonFatal
class JobsController(jobManager: JobManager,
jobTypes: JobTypes,
// We don't have a reverse router yet, this needs to be supplied by the app
reverseRouter: {
def status(jobType: String, jobId: String): Call
}) extends Controller {
private val logger = Logger(getClass)
private def statusUrl(jobType: JobType, jobId: UUID): String = {
reverseRouter.status(jobType.name, jobId.toString).url
}
/**
* Determines the latest job execution and redirects (temporarily, 307) to its status details url.
* If no job executions are found 404 is returned.
*/
def latest(jobTypeString: String): Action[AnyContent] = Action.async {
jobTypes(jobTypeString).map { jobType =>
val jobStatusFuture: Future[List[JobStatus]] = jobManager.allJobStatus(jobType, limit = 1)
jobStatusFuture.map(_.headOption).map {
case Some(j: JobStatus) =>
TemporaryRedirect(statusUrl(jobType, j.jobId))
case None => NotFound
}
}.getOrElse(Future.successful(NotFound))
}
/**
* Returns the status details for the given job type and job id.
*/
def status(jobTypeString: String, jobIdAsString: String): Action[AnyContent] = Action.async {
Try(UUID.fromString(jobIdAsString)).map { uuid =>
jobTypes(jobTypeString).map { jobType =>
val jobStatusFuture: Future[Option[JobStatus]] = jobManager.jobStatus(jobType, uuid)
jobStatusFuture.map {
case Some(j: JobStatus) => Ok(Json.toJson(j))
case None => NotFound
}
}.getOrElse(Future.successful(NotFound))
}.getOrElse(Future.successful(NotFound))
}
/**
* Returns the list of job executions for the given job type.
*/
def list(jobTypeString: String, limit: Int = 20): Action[AnyContent] = Action.async {
jobTypes(jobTypeString).map { jobType =>
val jobStatusFuture: Future[List[JobStatus]] = jobManager.allJobStatus(jobType, limit)
jobStatusFuture.map { jobs =>
Ok(Json.obj("jobs" -> jobs, "latest" -> jobs.headOption.map(job => statusUrl(jobType, job.jobId))))
}
}.getOrElse(Future.successful(NotFound))
}
/**
* Starts the execution of the given job type.
*/
def run(jobTypeString: String): Action[AnyContent] = Action.async { implicit request =>
jobTypes(jobTypeString).map { jobType =>
jobManager.triggerJob(jobType).map {
case Started(newJobId, None) =>
Created(Json.obj("status" -> "OK"))
.withHeaders(("Location", statusUrl(jobType, newJobId)))
case Started(newJobId, Some(message)) =>
Created(Json.obj("status" -> "OK", "message" -> message))
.withHeaders(("Location", statusUrl(jobType, newJobId)))
case LockedStatus(runningJobId) =>
val conflict = Conflict(Json.obj("status" -> "KO", "message" -> s"The job's already running"))
runningJobId.map(id =>
conflict.withHeaders(("Location", statusUrl(jobType, id)))
).getOrElse(
conflict
)
case Error(message) =>
InternalServerError(Json.obj("status" -> "KO", "message" -> s"$message"))
}.recover {
case NonFatal(e) =>
logger.error("Error when starting job", e)
InternalServerError(s"Error when starting job: $e")
}
}.getOrElse(Future.successful(NotFound))
}
} | MarcoPriebe/ha-jobs | ha-jobs-play/src/main/scala/de/kaufhof/hajobs/JobsController.scala | Scala | apache-2.0 | 3,815 |
package name.abhijitsarkar.scauth.model
case class TwitterUser(name: String, screenName: String, location: String, numFollowers: Long) | asarkar/akka | scauth/src/test/scala/name/abhijitsarkar/scauth/model/TwitterUser.scala | Scala | gpl-3.0 | 135 |
/*
* Copyright 2015 Andrew Gibson
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.andrewresearch
import org.bson.types.ObjectId
import scala.collection.immutable.ListMap
/**
* These are the data classes and objects required by the Analyser classes
* Note that Reflection is just a stub
* Created by Andrew Gibson on 15/07/15.
*/
case class SentencePhrase(phraseType: String, phrase: String, start: Int, end: Int)
case class CodedSentence(index: Int, sentence: String, metacognitionTags:Array[String], subTags:Array[String], phraseTags:Array[String], selfRatio:Double, othersRatio:Double, phrases:Array[SentencePhrase])
case class ReflectionCoded(reflectionId:ObjectId,authorId:ObjectId) extends GenericCoding with SentenceCoding
object ReflectionCoded {
def apply(reflection: Reflection, referenceCode: String, codedSentences: Seq[CodedSentence]) = {
val rc = new ReflectionCoded(reflection._id,reflection.authorId)
rc.organisationDataId = reflection.organisationDataId
rc.referenceCode = referenceCode
rc.setCodedSentences(codedSentences)
rc
}
}
// Note: This is just a stub - a reflection may be more substantial than this
class Reflection {
val _id:ObjectId = new ObjectId()
var lastAnalysed:Long = _
var authorId:ObjectId = _
var organisationDataId:ObjectId = _
}
trait GenericCoding {
val _id:ObjectId = new ObjectId()
var organisationDataId:ObjectId = _
var referenceCode:String = _
var classification:String = _
}
trait SentenceCoding {
var sentences:Array[String] = Array()
var posTags:Array[Array[String]] = Array()
var metaTags:Array[String] = Array()
var codedSentences:Array[CodedSentence] = Array()
var phraseTagsCount:Int = 0
var subTagsCount:Int = 0
var metaTagsCount:Int = 0
var subTagDensity:Double = 0.0
def setCodedSentences(codedSentences:Seq[CodedSentence]) = {
phraseTagsCount = codedSentences.map(cs => if(cs.phrases.nonEmpty) cs.phraseTags.toSeq else Seq[String]()).foldLeft(Seq[String]())(_++_).distinct.length
metaTags = codedSentences.map(cs => cs.metacognitionTags.toList).foldLeft(List[String]())(_++_).toArray
metaTagsCount = metaTags.distinct.size
val subTags = codedSentences.map(cs => cs.subTags.toList).foldLeft(List[String]())(_++_)
subTagsCount = subTags.distinct.size
subTagDensity = if(codedSentences.size > 1) ((subTags.size.toDouble) / Math.log(codedSentences.size)) else 0.0
this.codedSentences = codedSentences.toArray
}
def getCodedSentences:Seq[CodedSentence] = this.codedSentences.toSeq
def setSentences(sentences: Seq[String]) = {
this.sentences = sentences.toArray
}
def getSentences:Map[Int,String] = ListMap(sentences.indices zip sentences:_*)
def setPosTags(posTags:Seq[Array[String]]) = {
this.posTags = posTags.toArray
}
def getPosTags:Map[Int,Array[String]] = ListMap(posTags.indices zip posTags:_*)
}
| andrewresearch/metacognitive_reflection | src/main/scala/net/andrewresearch/Data.scala | Scala | apache-2.0 | 3,447 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009 Mark Harrah
*/
package sbt
import jline.console.ConsoleReader
import jline.console.history.{ FileHistory, MemoryHistory }
import java.io.{ File, InputStream, PrintWriter }
import complete.Parser
import java.util.concurrent.atomic.AtomicBoolean
abstract class JLine extends LineReader {
protected[this] val handleCONT: Boolean
protected[this] val reader: ConsoleReader
def readLine(prompt: String, mask: Option[Char] = None) = JLine.withJLine { unsynchronizedReadLine(prompt, mask) }
private[this] def unsynchronizedReadLine(prompt: String, mask: Option[Char]) =
readLineWithHistory(prompt, mask) match {
case null => None
case x => Some(x.trim)
}
private[this] def readLineWithHistory(prompt: String, mask: Option[Char]): String =
reader.getHistory match {
case fh: FileHistory =>
try { readLineDirect(prompt, mask) }
finally { fh.flush() }
case _ => readLineDirect(prompt, mask)
}
private[this] def readLineDirect(prompt: String, mask: Option[Char]): String =
if (handleCONT)
Signals.withHandler(() => resume(), signal = Signals.CONT)(() => readLineDirectRaw(prompt, mask))
else
readLineDirectRaw(prompt, mask)
private[this] def readLineDirectRaw(prompt: String, mask: Option[Char]): String =
{
val newprompt = handleMultilinePrompt(prompt)
mask match {
case Some(m) => reader.readLine(newprompt, m)
case None => reader.readLine(newprompt)
}
}
private[this] def handleMultilinePrompt(prompt: String): String = {
val lines = """\\r?\\n""".r.split(prompt)
lines.size match {
case 0 | 1 => prompt
case _ => reader.print(lines.init.mkString("\\n") + "\\n"); lines.last;
}
}
private[this] def resume() {
jline.TerminalFactory.reset
JLine.terminal.init
reader.drawLine()
reader.flush()
}
}
private object JLine {
private[this] val TerminalProperty = "jline.terminal"
fixTerminalProperty()
// translate explicit class names to type in order to support
// older Scala, since it shaded classes but not the system property
private[sbt] def fixTerminalProperty() {
val newValue = System.getProperty(TerminalProperty) match {
case "jline.UnixTerminal" => "unix"
case null if System.getProperty("sbt.cygwin") != null => "unix"
case "jline.WindowsTerminal" => "windows"
case "jline.AnsiWindowsTerminal" => "windows"
case "jline.UnsupportedTerminal" => "none"
case x => x
}
if (newValue != null) System.setProperty(TerminalProperty, newValue)
}
// When calling this, ensure that enableEcho has been or will be called.
// TerminalFactory.get will initialize the terminal to disable echo.
private def terminal = jline.TerminalFactory.get
private def withTerminal[T](f: jline.Terminal => T): T =
synchronized {
val t = terminal
t.synchronized { f(t) }
}
/**
* For accessing the JLine Terminal object.
* This ensures synchronized access as well as re-enabling echo after getting the Terminal.
*/
def usingTerminal[T](f: jline.Terminal => T): T =
withTerminal { t =>
t.restore
f(t)
}
def createReader(): ConsoleReader = createReader(None)
def createReader(historyPath: Option[File]): ConsoleReader =
usingTerminal { t =>
val cr = new ConsoleReader
cr.setExpandEvents(false) // https://issues.scala-lang.org/browse/SI-7650
cr.setBellEnabled(false)
val h = historyPath match {
case None => new MemoryHistory
case Some(file) => new FileHistory(file)
}
h.setMaxSize(MaxHistorySize)
cr.setHistory(h)
cr
}
def withJLine[T](action: => T): T =
withTerminal { t =>
t.init
try { action }
finally { t.restore }
}
def simple(historyPath: Option[File], handleCONT: Boolean = HandleCONT): SimpleReader = new SimpleReader(historyPath, handleCONT)
val MaxHistorySize = 500
val HandleCONT = !java.lang.Boolean.getBoolean("sbt.disable.cont") && Signals.supported(Signals.CONT)
}
trait LineReader {
def readLine(prompt: String, mask: Option[Char] = None): Option[String]
}
final class FullReader(historyPath: Option[File], complete: Parser[_], val handleCONT: Boolean = JLine.HandleCONT) extends JLine {
protected[this] val reader =
{
val cr = JLine.createReader(historyPath)
sbt.complete.JLineCompletion.installCustomCompletor(cr, complete)
cr
}
}
class SimpleReader private[sbt] (historyPath: Option[File], val handleCONT: Boolean) extends JLine {
protected[this] val reader = JLine.createReader(historyPath)
}
object SimpleReader extends SimpleReader(None, JLine.HandleCONT)
| pdalpra/sbt | util/complete/src/main/scala/sbt/LineReader.scala | Scala | bsd-3-clause | 4,765 |
package actors
import akka.actor._
import akka.event.Logging
import akka.routing.SmallestMailboxRouter
import models._
trait NotificatorComponent{
this:ComponentSystem =>
val notificatorActorRef = system.actorOf(Props(new NotificatorActor).withRouter(
SmallestMailboxRouter(nrOfInstances = 2)))
class NotificatorActor extends Actor{
val log = Logging(context.system, this)
def receive={
case Notify(user:User, cheapestPrice:CheapestPrice, searchRequest:SearchRequest, bestPrice:Float)=>{
log.info("sending to phonenumber "+ user.phoneNumber + "for "+searchRequest.origin +" to "+ searchRequest.destination + " with "+
"price "+cheapestPrice.price + " at "+cheapestPrice.deeplinkUrl + "with bestPrice "+bestPrice)
//println("sending to phonenumber "+ user.phoneNumber + "for "+searchRequest.origin +" to "+ searchRequest.destination + " with "+
//"price "+cheapestPrice.price + " at "+cheapestPrice.deeplinkUrl)
controllers.Sms.sendPriceAlert(user.phoneNumber, cheapestPrice.price, bestPrice, cheapestPrice.deeplinkUrl)
}
}
}
}
| soulofpeace/FareHound | app/actors/NotificatorComponent.scala | Scala | apache-2.0 | 1,113 |
package chapter.twentyone
import ExerciseSeven._
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
@RunWith(classOf[JUnitRunner])
class ExerciseSevenSpec extends FlatSpec with Matchers {
"function" should "" in {
}
}
| deekim/impatient-scala | src/test/scala/chapter/twentyone/ExerciseSevenSpec.scala | Scala | apache-2.0 | 269 |
package models.attribute
import controllers.helper.GoogleMapsHelper
import models.label._
import models.region.{Region, RegionTable}
import models.street.{OsmWayStreetEdgeTable}
import models.street.{StreetEdgeTable}
import models.utils.MyPostgresDriver.simple._
import play.api.Play.current
import play.api.db.slick
import play.api.libs.json.{JsObject, Json}
import play.extras.geojson
import scala.slick.lifted.{ForeignKeyQuery, ProvenShape, Tag}
import scala.language.postfixOps
case class GlobalAttribute(globalAttributeId: Int,
globalClusteringSessionId: Int,
clusteringThreshold: Float,
labelTypeId: Int,
streetEdgeId: Int,
regionId: Int,
lat: Float, lng: Float,
severity: Option[Int],
temporary: Boolean)
case class GlobalAttributeForAPI(val globalAttributeId: Int,
val labelType: String,
val lat: Float, val lng: Float,
val severity: Option[Int],
val temporary: Boolean,
val agreeCount: Int,
val disagreeCount: Int,
val notsureCount: Int,
val streetEdgeId: Int,
val osmStreetId: Int,
val neighborhoodName: String) {
def toJSON: JsObject = {
Json.obj(
"type" -> "Feature",
"geometry" -> geojson.Point(geojson.LatLng(lat.toDouble, lng.toDouble)),
"properties" -> Json.obj(
"attribute_id" -> globalAttributeId,
"label_type" -> labelType,
"street_edge_id" -> streetEdgeId,
"osm_street_id" -> osmStreetId,
"neighborhood" -> neighborhoodName,
"severity" -> severity,
"is_temporary" -> temporary,
"agree_count" -> agreeCount,
"disagree_count" -> disagreeCount,
"notsure_count" -> notsureCount
)
)
}
val attributesToArray = Array(globalAttributeId, labelType, streetEdgeId, osmStreetId, neighborhoodName, lat.toString,
lng.toString, severity.getOrElse("NA").toString, temporary.toString,
agreeCount.toString, disagreeCount.toString, notsureCount.toString)
}
case class GlobalAttributeWithLabelForAPI(val globalAttributeId: Int,
val labelType: String,
val attributeLatLng: (Float, Float),
val attributeSeverity: Option[Int],
val attributeTemporary: Boolean,
val streetEdgeId: Int,
val osmStreetId: Int,
val neighborhoodName: String,
val labelId: Int,
val labelLatLng: (Float, Float),
val gsvPanoramaId: String,
val heading: Float,
val pitch: Float,
val zoom: Int,
val canvasXY: (Int, Int),
val canvasWidth: Int, val canvasHeight: Int,
val agreeCount: Int,
val disagreeCount: Int,
val notsureCount: Int,
val labelSeverity: Option[Int],
val labelTemporary: Boolean) {
val gsvUrl = s"""https://maps.googleapis.com/maps/api/streetview?
|size=${canvasWidth}x${canvasHeight}
|&pano=${gsvPanoramaId}
|&heading=${heading}
|&pitch=${pitch}
|&fov=${GoogleMapsHelper.getFov(zoom)}
|&key=YOUR_API_KEY
|&signature=YOUR_SIGNATURE""".stripMargin.replaceAll("\\n", "")
def toJSON: JsObject = {
Json.obj(
"type" -> "Feature",
"geometry" -> geojson.Point(geojson.LatLng(attributeLatLng._1.toDouble, attributeLatLng._2.toDouble)),
"label_geometry" -> geojson.Point(geojson.LatLng(labelLatLng._1.toDouble, labelLatLng._2.toDouble)),
"properties" -> Json.obj(
"attribute_id" -> globalAttributeId,
"label_type" -> labelType,
"street_edge_id" -> streetEdgeId,
"osm_street_id" -> osmStreetId,
"neighborhood" -> neighborhoodName,
"severity" -> attributeSeverity,
"is_temporary" -> attributeTemporary,
"label_id" -> labelId,
"gsv_panorama_id" -> gsvPanoramaId,
"heading" -> heading,
"pitch" -> pitch,
"zoom" -> zoom,
"canvas_x" -> canvasXY._1,
"canvas_y" -> canvasXY._2,
"canvas_width" -> canvasWidth,
"canvas_height" -> canvasHeight,
"gsv_url" -> gsvUrl,
"label_severity" -> labelSeverity,
"label_is_temporary" -> labelTemporary,
"agree_count" -> agreeCount,
"disagree_count" -> disagreeCount,
"notsure_count" -> notsureCount
)
)
}
val attributesToArray = Array(globalAttributeId.toString, labelType, attributeSeverity.getOrElse("NA").toString,
attributeTemporary.toString, streetEdgeId.toString, osmStreetId.toString,
neighborhoodName, labelId.toString, gsvPanoramaId, attributeLatLng._1.toString,
attributeLatLng._2.toString, labelLatLng._1.toString, labelLatLng._2.toString,
heading.toString, pitch.toString, zoom.toString, canvasXY._1.toString,
canvasXY._2.toString, canvasWidth.toString, canvasHeight.toString, "\\"" + gsvUrl + "\\"",
labelSeverity.getOrElse("NA").toString, labelTemporary.toString, agreeCount.toString,
disagreeCount.toString, notsureCount.toString)
}
class GlobalAttributeTable(tag: Tag) extends Table[GlobalAttribute](tag, Some("sidewalk"), "global_attribute") {
def globalAttributeId: Column[Int] = column[Int]("global_attribute_id", O.NotNull, O.PrimaryKey, O.AutoInc)
def globalClusteringSessionId: Column[Int] = column[Int]("global_clustering_session_id", O.NotNull)
def clusteringThreshold: Column[Float] = column[Float]("clustering_threshold", O.NotNull)
def labelTypeId: Column[Int] = column[Int]("label_type_id", O.NotNull)
def streetEdgeId: Column[Int] = column[Int]("street_edge_id", O.NotNull)
def regionId: Column[Int] = column[Int]("region_id", O.NotNull)
def lat: Column[Float] = column[Float]("lat", O.NotNull)
def lng: Column[Float] = column[Float]("lng", O.NotNull)
def severity: Column[Option[Int]] = column[Option[Int]]("severity")
def temporary: Column[Boolean] = column[Boolean]("temporary", O.NotNull)
def * : ProvenShape[GlobalAttribute] = (globalAttributeId,
globalClusteringSessionId,
clusteringThreshold,
labelTypeId,
streetEdgeId,
regionId,
lat, lng,
severity,
temporary) <>
((GlobalAttribute.apply _).tupled, GlobalAttribute.unapply)
def labelType: ForeignKeyQuery[LabelTypeTable, LabelType] =
foreignKey("global_attribute_label_type_id_fkey", labelTypeId, TableQuery[LabelTypeTable])(_.labelTypeId)
def region: ForeignKeyQuery[RegionTable, Region] =
foreignKey("global_attribute_region_id_fkey", regionId, TableQuery[RegionTable])(_.regionId)
def globalClusteringSession: ForeignKeyQuery[GlobalClusteringSessionTable, GlobalClusteringSession] =
foreignKey("global_attribute_global_clustering_session_id_fkey", globalClusteringSessionId, TableQuery[GlobalClusteringSessionTable])(_.globalClusteringSessionId)
}
/**
* Data access object for the GlobalAttributeTable table.
*/
object GlobalAttributeTable {
val db: slick.Database = play.api.db.slick.DB
val globalAttributes: TableQuery[GlobalAttributeTable] = TableQuery[GlobalAttributeTable]
def getAllGlobalAttributes: List[GlobalAttribute] = db.withTransaction { implicit session =>
globalAttributes.list
}
def toInt(s: Option[String]): Option[Int] = {
try {
Some(s.getOrElse("-1").toInt)
} catch {
case e: Exception => None
}
}
/**
* Gets global attributes within a bounding box for the public API.
*/
def getGlobalAttributesInBoundingBox(minLat: Float, minLng: Float, maxLat: Float, maxLng: Float, severity: Option[String]): List[GlobalAttributeForAPI] = db.withSession { implicit session =>
// Sums the validations counts of the labels that make up each global attribute.
val validationCounts = (for {
_ga <- globalAttributes
_gaua <- GlobalAttributeUserAttributeTable.globalAttributeUserAttributes if _ga.globalAttributeId === _gaua.globalAttributeId
_ual <- UserAttributeLabelTable.userAttributeLabels if _gaua.userAttributeId === _ual.userAttributeId
_l <- LabelTable.labels if _ual.labelId === _l.labelId
} yield (_ga.globalAttributeId, _l.agreeCount, _l.disagreeCount, _l.notsureCount))
.groupBy(_._1)
.map { case (attrId, group) => (attrId, group.map(_._2).sum, group.map(_._3).sum, group.map(_._4).sum) }
val attributes = for {
_ga <- globalAttributes if _ga.lat > minLat && _ga.lat < maxLat && _ga.lng > minLng && _ga.lng < maxLng &&
(_ga.severity.isEmpty && severity.getOrElse("") == "none" || severity.isEmpty || _ga.severity === toInt(severity))
// The line above gets attributes with null severity if severity = "none", all attributes if severity is unspecified,
// and attributes with the specified severity (e.g. severity = 3) otherwise.
_vc <- validationCounts if _ga.globalAttributeId === _vc._1
_lt <- LabelTypeTable.labelTypes if _ga.labelTypeId === _lt.labelTypeId
_r <- RegionTable.regions if _ga.regionId === _r.regionId
_osm <- OsmWayStreetEdgeTable.osmStreetTable if _ga.streetEdgeId === _osm.streetEdgeId
if _lt.labelType =!= "Problem"
} yield (
_ga.globalAttributeId, _lt.labelType, _ga.lat, _ga.lng, _ga.severity, _ga.temporary,
_vc._2.getOrElse(0), _vc._3.getOrElse(0), _vc._4.getOrElse(0), _ga.streetEdgeId, _osm.osmWayId, _r.description
)
attributes.list.map(GlobalAttributeForAPI.tupled)
}
/**
* Gets global attributes within a bounding box with the labels that make up those attributes for the public API.
*/
def getGlobalAttributesWithLabelsInBoundingBox(minLat: Float, minLng: Float, maxLat: Float, maxLng: Float, severity: Option[String]): List[GlobalAttributeWithLabelForAPI] = db.withSession { implicit session =>
val attributesWithLabels = for {
_ga <- globalAttributes if _ga.lat > minLat && _ga.lat < maxLat && _ga.lng > minLng && _ga.lng < maxLng &&
(_ga.severity.isEmpty && severity.getOrElse("") == "none" || severity.isEmpty || _ga.severity === toInt(severity))
_lt <- LabelTypeTable.labelTypes if _ga.labelTypeId === _lt.labelTypeId
_r <- RegionTable.regions if _ga.regionId === _r.regionId
_gaua <- GlobalAttributeUserAttributeTable.globalAttributeUserAttributes if _ga.globalAttributeId === _gaua.globalAttributeId
_ual <- UserAttributeLabelTable.userAttributeLabels if _gaua.userAttributeId === _ual.userAttributeId
_l <- LabelTable.labels if _ual.labelId === _l.labelId
_lp <- LabelTable.labelPoints if _l.labelId === _lp.labelId
_osm <- OsmWayStreetEdgeTable.osmStreetTable if _ga.streetEdgeId === _osm.streetEdgeId
if _lt.labelType =!= "Problem"
} yield (
_ga.globalAttributeId, _lt.labelType, (_ga.lat, _ga.lng), _ga.severity, _ga.temporary, _ga.streetEdgeId, _osm.osmWayId,
_r.description, _l.labelId, (_lp.lat, _lp.lng), _l.gsvPanoramaId, _lp.heading, _lp.pitch, _lp.zoom,
(_lp.canvasX, _lp.canvasY), _lp.canvasWidth, _lp.canvasHeight, _l.agreeCount, _l.disagreeCount, _l.notsureCount
)
val withSeverity = for {
(_l, _s) <- attributesWithLabels.leftJoin(LabelSeverityTable.labelSeverities).on(_._9 === _.labelId)
} yield (_l._1, _l._2, _l._3, _l._4, _l._5, _l._6, _l._7, _l._8, _l._9, _l._10, _l._11, _l._12, _l._13, _l._14, _l._15, _l._16, _l._17, _l._18, _l._19, _l._20, _s.severity.?)
val withTemporary = for {
(_l, _t) <- withSeverity.leftJoin(LabelTemporarinessTable.labelTemporarinesses).on(_._9 === _.labelId)
} yield (_l._1, _l._2, _l._3, _l._4, _l._5, _l._6, _l._7, _l._8, _l._9, _l._10, _l._11, _l._12, _l._13, _l._14, _l._15, _l._16, _l._17, _l._18, _l._19, _l._20, _l._21, _t.temporary.?)
withTemporary.list.map(a =>
GlobalAttributeWithLabelForAPI(a._1, a._2, a._3, a._4, a._5, a._6, a._7, a._8, a._9, (a._10._1.get, a._10._2.get), a._11, a._12, a._13, a._14, a._15, a._16, a._17, a._18, a._19, a._20, a._21, a._22.getOrElse(false))
)
}
/**
* Counts the number of NoCurbRamp/SurfaceProb/Obstacle/NoSidewalk attribute counts in each region.
*/
def selectNegativeAttributeCountsByRegion(): List[(Int, String, Int)] = db.withSession { implicit session =>
globalAttributes
.filter(_.labelTypeId inSet List(2, 3, 4, 7))
.groupBy(a => (a.regionId, a.labelTypeId)).map { case ((rId, typeId), group) => (rId, typeId, group.length) }
.list.map{ case (rId, typeId, count) => (rId, LabelTypeTable.labelTypeIdToLabelType(typeId), count) }
}
def countGlobalAttributes: Int = db.withTransaction { implicit session =>
globalAttributes.length.run
}
def save(newSess: GlobalAttribute): Int = db.withTransaction { implicit session =>
val newId: Int = (globalAttributes returning globalAttributes.map(_.globalAttributeId)) += newSess
newId
}
}
| ProjectSidewalk/SidewalkWebpage | app/models/attribute/GlobalAttributeTable.scala | Scala | mit | 14,403 |
/**
* Encrypts and returns the input string using the famous BKK cryptographic method.
*
* @param input - String to be encoded.
* @return The encoded string.
*/
object BKKCrypt {
def encode(input: String): String = {
input
}
}
| moszinet/BKKCrypt | Scala/BKKCrypt.scala | Scala | mit | 240 |
/**
* Created by peter_v on 08/23/15.
*/
package csv
import java.util.UUID
import base._
import common._
import scala.io.BufferedSource
object FactsReader {
// reading from a CSV with structure (basic Fact, 10 fields, last field no newlines)
// timestamp
// uuid
// context
// subject
// predicate
// objectType
// objectValue
// at
// from
// to
def reader(file: BufferedSource): FactWithStatusIterator = {
file.getLines().map[FactWithStatus](line => {
val elements: Array[String] = line.split(separator, 10)
val timestampString = elements(0)
val id = UUID.fromString(elements(1)) // fail fast for invalid UUID string
val context = elements(2)
val subject = elements(3)
val at = elements(4)
val from = elements(5)
val to = elements(6)
val predicate = elements(7)
val objectType = elements(8)
val objectValue = elements(9)
val predicateObject = PredicateObject(
predicate = predicate,
objectValue = objectValue,
objectType = objectType,
factsAtOption = Some(at),
from = OptionalTimestamp(from),
to = OptionalTimestamp(to)
)
val fact = Fact(
timestamp = timestampString,
id = id,
context = Context(context),
subject = UUID.fromString(subject),
predicateObject
)
(Some(fact), None) // errors not yet handled
})
}
}
| petervandenabeele/AllMyData | src/main/scala/csv/FactsReader.scala | Scala | mit | 1,447 |
/*
* Shapes.scala
* (SysSon)
*
* Copyright (c) 2013-2017 Institute of Electronic Music and Acoustics, Graz.
* Copyright (c) 2014-2019 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package at.iem.sysson
package gui
import java.awt.geom.Path2D
object Shapes {
def Spreadsheet(p: Path2D): Unit = {
p.moveTo(4.0f, 4.0f)
p.lineTo(4.0f, 10.3125f)
p.lineTo(10.3125f, 10.3125f)
p.lineTo(10.3125f, 4.0f)
p.lineTo(4.0f, 4.0f)
p.moveTo(12.84375f, 4.0f)
p.lineTo(12.84375f, 10.3125f)
p.lineTo(19.15625f, 10.3125f)
p.lineTo(19.15625f, 4.0f)
p.lineTo(12.84375f, 4.0f)
p.moveTo(21.6875f, 4.0f)
p.lineTo(21.6875f, 10.3125f)
p.lineTo(28.0f, 10.3125f)
p.lineTo(28.0f, 4.0f)
p.lineTo(21.6875f, 4.0f)
p.moveTo(4.0f, 12.84375f)
p.lineTo(4.0f, 19.15625f)
p.lineTo(10.3125f, 19.15625f)
p.lineTo(10.3125f, 12.84375f)
p.lineTo(4.0f, 12.84375f)
p.moveTo(12.84375f, 12.84375f)
p.lineTo(12.84375f, 19.15625f)
p.lineTo(19.15625f, 19.15625f)
p.lineTo(19.15625f, 12.84375f)
p.lineTo(12.84375f, 12.84375f)
p.moveTo(21.6875f, 12.84375f)
p.lineTo(21.6875f, 19.15625f)
p.lineTo(28.0f, 19.15625f)
p.lineTo(28.0f, 12.84375f)
p.lineTo(21.6875f, 12.84375f)
p.moveTo(4.0f, 21.6875f)
p.lineTo(4.0f, 28.0f)
p.lineTo(10.3125f, 28.0f)
p.lineTo(10.3125f, 21.6875f)
p.lineTo(4.0f, 21.6875f)
p.moveTo(12.84375f, 21.6875f)
p.lineTo(12.84375f, 28.0f)
p.lineTo(19.15625f, 28.0f)
p.lineTo(19.15625f, 21.6875f)
p.lineTo(12.84375f, 21.6875f)
p.moveTo(21.6875f, 21.6875f)
p.lineTo(21.6875f, 28.0f)
p.lineTo(28.0f, 28.0f)
p.lineTo(28.0f, 21.6875f)
p.lineTo(21.6875f, 21.6875f)
}
}
| iem-projects/sysson | src/main/scala/at/iem/sysson/gui/Shapes.scala | Scala | gpl-3.0 | 1,884 |
package recursion_analysis
import def_finder.DefFinder
import exceptions.ICE
import tir._
object RecursionIdentifier {
def hasRecursion(env: TTypeEnv, program: TProgram,
name: TNamedIdent): Boolean = {
val (_, funDef) = DefFinder.getSingleDefOrFail(env, program, name)
hasRecursion(name, funDef)
}
def hasRecursion(name: TNamedIdent, dec: TDec): Boolean =
new RecursionIdentifierWalk(name).apply((), dec)
}
| j-c-w/mlc | src/main/scala/recursion_analysis/RecursionIdentifier.scala | Scala | gpl-3.0 | 446 |
package com.crobox.clickhouse.dsl.language
import com.crobox.clickhouse.dsl._
trait IPFunctionTokenizer {
self: ClickhouseTokenizerModule =>
def tokenizeIPFunction(col: IPFunction[_])(implicit ctx: TokenizeContext): String = col match {
case IPv4NumToString(col: NumericCol[_]) => s"IPv4NumToString(${tokenizeColumn(col.column)})"
case IPv4StringToNum(col: StringColMagnet[_]) => s"IPv4StringToNum(${tokenizeColumn(col.column)})"
case IPv4NumToStringClassC(col: NumericCol[_]) => s"IPv4NumToStringClassC(${tokenizeColumn(col.column)})"
case IPv6NumToString(col: StringColMagnet[_]) => s"IPv6NumToString(${tokenizeColumn(col.column)})"
case IPv6StringToNum(col: StringColMagnet[_]) => s"IPv6StringToNum(${tokenizeColumn(col.column)})"
}
}
| crobox/clickhouse-scala-client | dsl/src/main/scala/com.crobox.clickhouse/dsl/language/IPFunctionTokenizer.scala | Scala | lgpl-3.0 | 777 |
/* sbt -- Simple Build Tool
* Copyright 2009 Mark Harrah
*/
package xsbt.boot
import Pre._
import java.io.File
import java.net.URI
import scala.collection.immutable.List
object Find { def apply(config: LaunchConfiguration, currentDirectory: File) = (new Find(config))(currentDirectory) }
class Find(config: LaunchConfiguration)
{
import config.boot.search
def apply(currentDirectory: File) =
{
val current = currentDirectory.getCanonicalFile
assert(current.isDirectory)
lazy val fromRoot = path(current, Nil).filter(hasProject).map(_.getCanonicalFile)
val found: Option[File] =
search.tpe match
{
case Search.RootFirst => fromRoot.headOption
case Search.Nearest => fromRoot.lastOption
case Search.Only =>
if(hasProject(current))
Some(current)
else
fromRoot match
{
case Nil => Some(current)
case head :: Nil => Some(head)
case xs =>
System.err.println("Search method is 'only' and multiple ancestor directories match:\n\t" + fromRoot.mkString("\n\t"))
System.exit(1)
None
}
case _ => Some(current)
}
val baseDirectory = orElse(found, current)
System.setProperty("user.dir", baseDirectory.getAbsolutePath)
(ResolvePaths(config, baseDirectory), baseDirectory)
}
private def hasProject(f: File) = f.isDirectory && search.paths.forall(p => ResolvePaths(f, p).exists)
private def path(f: File, acc: List[File]): List[File] = if(f eq null) acc else path(f.getParentFile, f :: acc)
}
object ResolvePaths
{
def apply(config: LaunchConfiguration, baseDirectory: File): LaunchConfiguration =
config.map(f => apply(baseDirectory, f))
def apply(baseDirectory: File, f: File): File =
if (f.isAbsolute) f
else
{
assert(baseDirectory.isDirectory) // if base directory is not a directory, URI.resolve will not work properly
val uri = new URI(null, null, f.getPath, null)
new File(baseDirectory.toURI.resolve(uri))
}
} | olove/xsbt | launch/src/main/scala/xsbt/boot/Find.scala | Scala | bsd-3-clause | 1,951 |
package com.danielwestheide.kontextfrei
import scala.collection.immutable.Seq
import scala.reflect.ClassTag
private[kontextfrei] trait DCollectionConstructors[DCollection[_]] {
def unit[A: ClassTag](as: Seq[A]): DCollection[A]
def empty[A: ClassTag]: DCollection[A]
}
| dwestheide/kontextfrei | core/src/main/scala/com/danielwestheide/kontextfrei/DCollectionConstructors.scala | Scala | apache-2.0 | 274 |
package org.mozartoz.bootcompiler.fastparse
import java.io.File
import org.mozartoz.bootcompiler.fastparse.Tokens.PreprocessorDirective
import org.mozartoz.bootcompiler.fastparse.Tokens.PreprocessorDirectiveWithArg
import org.mozartoz.bootcompiler.fastparse.Tokens.Token
import com.oracle.truffle.api.source.Source
import fastparse.core.Parsed
import scala.collection.mutable.ArrayBuffer
object Preprocessor {
case class SourceMap(fromOffset: Int, sourceOffset: Int, source: Source, toOffset: Int = 0) {
override def toString = fromOffset + "-" + toOffset + " " + sourceOffset + " @ " + source.getName
def in(pos: Int) = {
fromOffset <= pos && pos < toOffset
}
def length = toOffset - fromOffset
}
def preprocess(source: Source): (String, Seq[SourceMap]) = {
val input = source.getCode
val tokens = Parser.t(Parser.tokens(input), source.getPath)
var defines: Set[String] = Set()
var skipDepth = 0
var offset = 0
val buffer = new StringBuilder
val sourceMap: ArrayBuffer[SourceMap] = new ArrayBuffer[SourceMap]
def capture(until: Int) {
buffer ++= input.substring(offset, until)
}
def restartAt(pos: Int) {
offset = pos
recordPosition
}
def ignore(token: Token) {
capture(token.pB)
restartAt(token.pE)
}
def recordPosition {
sourceMap += SourceMap(buffer.length, offset, source)
}
recordPosition
for (elem <- tokens) {
if (elem.isInstanceOf[Token]) {
val token = elem.asInstanceOf[Token]
if (skipDepth > 0) {
token match {
case PreprocessorDirectiveWithArg("ifdef" | "ifndef", _) =>
skipDepth += 1
case PreprocessorDirective("else" | "endif") if skipDepth == 1 =>
skipDepth = 0
restartAt(token.pE)
case PreprocessorDirective("endif") =>
skipDepth -= 1
}
} else {
token match {
case PreprocessorDirectiveWithArg("define", name) =>
defines += name
ignore(token)
case PreprocessorDirectiveWithArg("undef", name) =>
defines -= name
ignore(token)
case PreprocessorDirectiveWithArg("ifdef", name) =>
if (defines contains name) {
// next
} else {
skipDepth = 1
}
ignore(token)
case PreprocessorDirectiveWithArg("ifndef", name) =>
if (!(defines contains name)) {
// next
} else {
skipDepth = 1
}
ignore(token)
case PreprocessorDirective("else") =>
skipDepth = 1
ignore(token)
case PreprocessorDirective("endif") =>
ignore(token)
case PreprocessorDirectiveWithArg("insert", fileName) =>
val file = resolve(new File(source.getPath), fileName)
capture(token.pB)
val subSource = Source.newBuilder(file).name(file.getName).mimeType("application/x-oz").build
val (out, map) = preprocess(subSource)
sourceMap ++= map.map {
case SourceMap(fromOffset, sourceOffset, source, toOffset) =>
SourceMap(buffer.length + fromOffset, sourceOffset, source)
}
buffer ++= out
restartAt(token.pE)
}
}
}
}
capture(input.length)
for (i <- 0 until sourceMap.size - 1) {
sourceMap(i) = sourceMap(i).copy(toOffset = sourceMap(i + 1).fromOffset)
}
sourceMap(sourceMap.size - 1) = sourceMap.last.copy(toOffset = Int.MaxValue)
(buffer.toString, sourceMap)
}
def resolve(currentFile: File, fileName: String) = {
val file0 = if (new File(fileName).isAbsolute()) {
new File(fileName)
} else {
new File(currentFile.getParentFile, fileName)
}
val file = {
if (file0.exists()) file0
else {
val altFile = new File(currentFile.getParentFile, fileName + ".oz")
if (altFile.exists()) altFile
else file0
}
}
file
}
}
| mistasse/mozart-graal | bootcompiler/src/main/scala/org/mozartoz/bootcompiler/fastparse/Preprocessor.scala | Scala | bsd-2-clause | 4,217 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc
import java.sql.{Connection, Driver, DriverPropertyInfo, SQLFeatureNotSupportedException}
import java.util.Properties
/**
* A wrapper for a JDBC Driver to work around SPARK-6913.
*
* The problem is in `java.sql.DriverManager` class that can't access drivers loaded by
* Spark ClassLoader.
* 问题出在`java.sql.DriverManager`类,它无法访问Spark ClassLoader加载的驱动程序
*/
class DriverWrapper(val wrapped: Driver) extends Driver {
override def acceptsURL(url: String): Boolean = wrapped.acceptsURL(url)
override def jdbcCompliant(): Boolean = wrapped.jdbcCompliant()
override def getPropertyInfo(url: String, info: Properties): Array[DriverPropertyInfo] = {
wrapped.getPropertyInfo(url, info)
}
override def getMinorVersion: Int = wrapped.getMinorVersion
def getParentLogger: java.util.logging.Logger = {
throw new SQLFeatureNotSupportedException(
s"${this.getClass.getName}.getParentLogger is not yet implemented.")
}
override def connect(url: String, info: Properties): Connection = wrapped.connect(url, info)
override def getMajorVersion: Int = wrapped.getMajorVersion
}
| tophua/spark1.52 | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/DriverWrapper.scala | Scala | apache-2.0 | 1,994 |
package com.sksamuel.elastic4s.requests.searches.aggs
import com.sksamuel.elastic4s.requests.searches.aggs.pipeline.PipelineAgg
import com.sksamuel.elastic4s.ext.OptionImplicits._
case class VariableWidthAggregation(name: String,
field: String,
buckets: Option[Int] = None,
shardSize: Option[Int] = None,
initialBuffer: Option[Int] = None,
missing: Option[Any] = None,
pipelines: Seq[PipelineAgg] = Nil,
subaggs: Seq[AbstractAggregation] = Nil,
metadata: Map[String, AnyRef] = Map.empty)
extends Aggregation {
type T = VariableWidthAggregation
def buckets(buckets: Int): T = copy(buckets = buckets.some)
def shardSize(shardSize: Int): T = copy(shardSize = shardSize.some)
def initialBuffer(initialBuffer: Int): T = copy(initialBuffer = initialBuffer.some)
def missing(missing: Any): T = copy(missing = missing.some)
override def subAggregations(aggs: Iterable[AbstractAggregation]): T = copy(subaggs = aggs.toSeq)
override def metadata(map: Map[String, AnyRef]): T = copy(metadata = map)
}
| sksamuel/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/VariableWidthAggregation.scala | Scala | apache-2.0 | 1,331 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools.scalasbt
class NotASuite {
def testMethod1(): Unit = {
}
def testMethod2(): Unit = {
}
def testMethod3(): Unit = {
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/tools/scalasbt/NotASuite.scala | Scala | apache-2.0 | 786 |
package com.ebay.neutrino.config
import java.net.InetSocketAddress
import com.ebay.neutrino.PoolResolver
import com.ebay.neutrino.config.Configuration._
import com.typesafe.config.Config
import io.netty.channel.ChannelHandler
import scala.concurrent.duration.Duration
/**
* An individual listener/port/transport tuple.
* Note that listener-interfaces are transport specific; we duplicate for ease of resolution.
*/
case class ListenerAddress(host: String, port: Int, protocol: Transport) extends VirtualAddress
{
// Expose VirtualAddress as socket-address (?? Does this cache ??)
lazy val socketAddress = new InetSocketAddress(host, port)
}
/**
* Representation of a VIP/Interface Address/Listener.
*
* Note that LBaaS models LoadBalancer with the address and Listener (VIP) as port/protocol
* only LBMS models VIP as containing both address/port.
*
* Validation:
* - Host: Needs to be an IP or DNS address, but is expensive.
* @see http://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address
* - Port: 0..65535
*
* @param addresses
* @param protocol
* @param handlers
* @param poolResolver
* @param timeouts
*/
case class ListenerSettings(
addresses: Seq[ListenerAddress],
protocol: Transport,
sourcePorts: Seq[Int],
handlers: Seq[_ <: ChannelHandler],
poolResolvers: Seq[_ <: PoolResolver],
channel: ChannelSettings,
timeouts: TimeoutSettings
)
object ListenerSettings {
import com.ebay.neutrino.config.Configuration._
// ListenerSettings factory type; this is the preferred construction
def apply(cfg: Config) = {
// Build a set of addresses around
val hosts = cfg getStringOrList "host"
val ports = cfg getIntOrList "port"
val alias = cfg getIntOrList "port-alias"
val proto = cfg getProtocol "protocol"
// Get the cross-product of host:port
val addresses = for { host <- hosts; port <- ports } yield ListenerAddress(host, port, proto)
new ListenerSettings(
addresses,
proto,
ports ++ alias,
cfg getClassInstances "pipeline-class",
cfg getStringOrList "pool-resolver" map (PoolResolver(_)),
cfg getOptionalConfig "channel-options" map (ChannelSettings(_)) getOrElse ChannelSettings.Default,
cfg getTimeouts "timeout"
) with
HasConfiguration { override val config: Config = cfg }
}
// Factory type; create with some defaults.
def apply(
addresses: Seq[ListenerAddress]=Seq(),
protocol: Transport=Transport.HTTP,
sourcePorts: Seq[Int]=Seq(),
handlers: Seq[_ <: ChannelHandler]=Seq(),
poolResolvers: Seq[_ <: PoolResolver]=Seq()): ListenerSettings =
{
ListenerSettings(addresses, protocol, sourcePorts, handlers, poolResolvers, ChannelSettings.Default, TimeoutSettings.Default)
}
}
/**
* Representation of downstream channel settings.
*
*/
case class ChannelSettings(
forceKeepAlive: Boolean,
auditThreshold: Option[Duration]
)
object ChannelSettings {
val Default = ChannelSettings(true, None)
def apply(cfg: Config) =
new ChannelSettings(
cfg getBoolean "force-keepalive",
cfg getOptionalDuration "audit-threshold"
)
} | eBay/Neutrino | src/main/scala/com/ebay/neutrino/config/ListenerAddress.scala | Scala | apache-2.0 | 3,232 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.items.misc
import com.castlebravostudios.rayguns.mod.Config
import com.castlebravostudios.rayguns.mod.ModularRayguns
import net.minecraft.item.Item
object RadiantDust extends Item {
setCreativeTab(ModularRayguns.raygunsTab)
setUnlocalizedName("rayguns.RadiantDust")
setTextureName("rayguns:radiant_dust")
} | Redattack34/ModularRayguns | src/main/scala/com/castlebravostudios/rayguns/items/misc/RadiantDust.scala | Scala | bsd-3-clause | 1,952 |
/**
* algorithms-lab
*
* Copyright 2016 juanitodread
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.juanitodread.algorithmslab.sorting
/**
* @author juanitodread
* @version 1.0.0
*
* 8/2/16
*/
abstract class AbstractSort {
protected def sortInt = (x: Int, y: Int) => x < y
protected def sortString = (x: String, y: String) => x < y
protected def sortChar = (x: Char, y: Char) => x < y
/**
* Method to apply sort algorithm over array.
*
* @param elements Array of T
* @param func Function to sort the elements of the items
*
* @return A sorted array of T elements
*/
def sort[T](elements: Array[T], func: (T, T) => Boolean): Array[T]
}
| juanitodread/algorithms-lab | algorithms-lab/scala/src/main/scala/org/juanitodread/algorithmslab/sorting/AbstractSort.scala | Scala | apache-2.0 | 1,218 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.parsers.FileParsers.Email
import java.io.StringReader
import com.wegtam.tensei.adt.ElementReference
import com.wegtam.tensei.agent.DataTreeDocument.DataTreeDocumentMessages
import com.wegtam.tensei.agent.XmlActorSpec
import org.xml.sax.InputSource
class EmailTest extends XmlActorSpec {
describe("FileParser") {
describe("E-Mail") {
describe("when given a simple email") {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/Email/email-01.txt"
val dfasdlFile = "/com/wegtam/tensei/agent/parsers/FileParsers/Email/email-01.xml"
it("should create the correct source structure") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/Email/email-01-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/Email/email-01-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
dataTree ! DataTreeDocumentMessages.GetSequenceRowCount(
ElementReference("MY-DFASDL", "headers")
)
val headerCount = expectMsgType[DataTreeDocumentMessages.SequenceRowCount]
headerCount.rows.getOrElse(0L) should be(23L)
compareChoiceInSequence("headers", expectedDataTree, dataTree)
}
}
}
}
}
| Tensei-Data/tensei-agent | src/test/scala/com/wegtam/tensei/agent/parsers/FileParsers/Email/EmailTest.scala | Scala | agpl-3.0 | 2,805 |
package com.cj.messagestreaming
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.postfixOps
class PublicationTest extends FlatSpec with Matchers {
private implicit lazy val ec = scala.concurrent.ExecutionContext.Implicits.global
behavior of "blocking"
it should "transform the publication into one that blocks" in {
// given
var sent: String = null
val message1 = "foo"
val message2 = "bar"
val original = Publication(
send = (s: String) => Future {
Thread.sleep(1000); sent = s
},
onClose = ()
)
val transformed = Publication.blocking(responseTimeout = 2 seconds)(original)
// when
lazy val res1 = original(message1)
lazy val res2 = transformed(message2)
// then
withClue("The original publication should not complete synchronously") {
res1.isCompleted should be(false)
}
withClue("The original publication should not have had an outside effect") {
sent should be(null)
}
withClue("The transformed publication should complete synchronously") {
res2.isCompleted should be(true)
}
withClue("The transformed publication should have had an outside effect") {
sent should be(message2)
}
}
it should "close the original publication when closed" in {
// given
var closed: Boolean = false
val original = Publication(
send = (_: String) => Future.successful(true),
onClose = closed = true
)
val transformed =
Publication.blocking(responseTimeout = 0 seconds)(original)
// when
transformed.close()
// then
closed should be(true)
}
behavior of "retrying"
it should "transform the publication into one that retries failures" in {
// given
var sent: String = null
var tries: Int = 0
val message = "foo"
val original = Publication(
send = (v1: String) => {
tries = tries + 1
tries match {
case 10 => sent = v1; Future.successful(())
case _ => Future.failed(new RuntimeException)
}
},
onClose = ()
)
val transformed = Publication.retrying(
maxRetries = 100,
initialDelay = 0 seconds,
incrementDelay = identity,
maxDelay = 0 seconds
)(original)
// when
lazy val attempt1 = original(message)
lazy val attempt2 = transformed(message)
//then
Await.ready(attempt1, Duration.Inf)
withClue("The original publication should have tried once") {
tries should be(1)
}
withClue("The original publication should have failed") {
sent should be(null)
}
Await.ready(attempt2, Duration.Inf)
withClue("the transformed publication should have tried 9 times") {
tries should be(10)
}
withClue("The transformed publication should have succeeded") {
sent should be(message)
}
}
it should "close the original publication when closed" in {
// given
var closed: Boolean = false
val original = Publication(
send = (_: String) => Future.successful(true),
onClose = closed = true
)
val transformed =
Publication.retrying(maxRetries = 0)(original)
// when
transformed.close()
// then
closed should be(true)
}
behavior of "premap"
it should "transform the publication's input using the provided callback" in {
// given
var sent: Any = null
val original = Publication(
(v1: String) => {
sent = v1
Future.successful(true)
},
onClose = ()
)
val callback = (n: Int) => n.toString
val transformed = original.premap(callback)
val message: Int = 5
// when
transformed(message)
// then
sent should be(callback(message))
}
it should "close the original publication when closed" in {
// given
var closed: Boolean = false
val original = Publication(
send = (_: String) => Future.successful(true),
onClose = closed = true
)
val transformed = original.premap(identity[String])
// when
transformed.close()
// then
closed should be(true)
}
behavior of "map"
it should "transform the publication's output using the provided callback" in {
// given
val receipt = true
val original = Publication(
send = (_: String) => Future.successful(receipt),
onClose = ()
)
val callback = (p: Boolean) => if (p) 0 else 1
val transformed = original.map(callback)
val message: String = "5"
// when
val result = transformed(message)
// then
Await.result(result, Duration.Inf) should be(callback(receipt))
}
it should "close the original publication when closed" in {
// given
var closed: Boolean = false
val original = Publication(
send = (_: String) => Future.successful(true),
onClose = closed = true
)
val transformed = original.map(identity[Boolean])
// when
transformed.close()
// then
closed should be(true)
}
}
| cjdev/message-streaming | src/test/scala/com/cj/messagestreaming/PublicationTest.scala | Scala | mit | 5,067 |
/*
* This file is part of eCobertura.
*
* Copyright (c) 2010 Joachim Hofer
* All rights reserved.
*
* This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package ecobertura.ui.launching.config.filters
import org.eclipse.swt.SWT
import org.eclipse.swt.events.SelectionEvent
import org.eclipse.swt.widgets._
import org.eclipse.swt.layout._
import org.eclipse.jface.viewers._
import ecobertura.core.data.filters._
import ecobertura.ui.util.layout.FormDataBuilder
import ecobertura.ui.util.Predef._
object IncludeExcludeClassesGroupBuilder {
def forParent(parent: Composite) = new IncludeExcludeClassesGroupBuilder(parent)
}
class IncludeExcludeClassesGroupBuilder private (parent: Composite) {
private var includeExcludeGroup: Group = _
private var includeExcludeTable: TableViewer = _
private var tableHolder: Composite = _
private var listener: FilterChangeListener = _
def withChangeListener(listener: FilterChangeListener) = {
this.listener = listener
this
}
def build() = {
includeExcludeGroup = initializeIncludeExcludeGroup()
buildBasicIncludeExcludeGroupLayout(includeExcludeGroup)
tableHolder = initializeIncludeExcludeTableHolder(includeExcludeGroup)
includeExcludeTable = initializeIncludeExcludeTable(tableHolder, listener)
initializeButtons()
includeExcludeTable
}
private def initializeIncludeExcludeGroup() = {
val includeExcludeGroup = new Group(parent, SWT.NONE)
includeExcludeGroup.setText("Classes to Include/Exclude:")
includeExcludeGroup.setLayout(new FormLayout)
includeExcludeGroup
}
private def buildBasicIncludeExcludeGroupLayout(group: Composite) = {
val gridData = new GridData
gridData.grabExcessHorizontalSpace = true
gridData.horizontalAlignment = SWT.FILL
gridData.grabExcessVerticalSpace = true
gridData.verticalAlignment = SWT.FILL
group.setLayoutData(gridData)
}
private def initializeIncludeExcludeTableHolder(parent: Composite) = {
val tableHolder = new Composite(parent, SWT.NONE)
tableHolder.setLayout(new FillLayout)
FormDataBuilder.forFormElement(tableHolder)
.topAtPercent(0, 5).leftAtPercent(0, 5).bottomAtPercent(100, 5)
.build
tableHolder
}
private def initializeIncludeExcludeTable(parent: Composite, listener: FilterChangeListener) =
ClassFilterTable.forParentAndFilters(parent).withChangeListener(listener).build()
private def initializeButtons() = {
val addIncludeButton = initializeAddIncludeButton
val addExcludeButton = initializeAddExcludeButton(addIncludeButton)
val removeButton = initializeRemoveButton(addExcludeButton)
}
private def initializeAddIncludeButton() = {
val addIncludeButton = new Button(includeExcludeGroup, SWT.PUSH)
addIncludeButton.setText("Add Include Filter")
FormDataBuilder.forFormElement(addIncludeButton)
.topAtPercent(0, 5).rightNeighborOf(tableHolder, 5).rightAtPercent(100, 5)
.build
addIncludeButton.addSelectionListener((_: SelectionEvent) => {
addAndEditClassFilterPattern(ClassFilter(IncludeFilter, "*"))
})
addIncludeButton
}
private def initializeAddExcludeButton(addIncludeButton: Control) = {
val addExcludeButton = new Button(includeExcludeGroup, SWT.PUSH)
addExcludeButton.setText("Add Exclude Filter")
FormDataBuilder.forFormElement(addExcludeButton)
.bottomNeighborOf(addIncludeButton, 5).rightNeighborOf(tableHolder, 5)
.rightAtPercent(100, 5).build
addExcludeButton.addSelectionListener((_: SelectionEvent) => {
addAndEditClassFilterPattern(ClassFilter(ExcludeFilter, "*"))
})
addExcludeButton
}
private def addAndEditClassFilterPattern(classFilter: ClassFilter) = {
val classFilters = includeExcludeTable.getInput.asInstanceOf[ClassFilters]
classFilters.add(classFilter)
includeExcludeTable.refresh()
includeExcludeTable.editElement(classFilter, 1)
listener.filtersChanged(includeExcludeTable)
}
private def initializeRemoveButton(addExcludeButton: Control) = {
val removeButton = new Button(includeExcludeGroup, SWT.PUSH)
removeButton.setText("Remove Filter")
FormDataBuilder.forFormElement(removeButton)
.bottomNeighborOf(addExcludeButton, 15).rightNeighborOf(tableHolder, 5)
.rightAtPercent(100, 5).build
removeButton.addSelectionListener((event: SelectionEvent) => {
val selectedFilter = includeExcludeTable.getSelection.asInstanceOf[IStructuredSelection]
.getFirstElement.asInstanceOf[ClassFilter]
val classFilters = includeExcludeTable.getInput.asInstanceOf[ClassFilters]
classFilters.remove(selectedFilter)
includeExcludeTable.refresh()
listener.filtersChanged(includeExcludeTable)
})
removeButton
}
}
| jmhofer/eCobertura | ecobertura.ui/src/main/scala/ecobertura/ui/launching/config/filters/IncludeExcludeClassesGroupBuilder.scala | Scala | epl-1.0 | 5,011 |
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
object Histogram{
def main(args:Array[String]){
val conf:SparkConf = new SparkConf().setAppName("Histogram").setMaster("local")
val sc:SparkContext = new SparkContext(conf)
val dataset1:RDD[String] = sc.textFile("/home/hadoop/spark/scala/mllib/core/data1")
val dataset2:RDD[String] = sc.textFile("/home/hadoop/spark/scala/mllib/core/data2");
val subRDD:RDD[String] = dataset1.subtract(dataset2)
val keyValueRDD:RDD[(String, String)] = subRDD.map(line => (line.split(",")(1), line.split(",")(0)))
val hist = keyValueRDD.countByKey
for((k,v) <- hist){
println(k + "===>" + v)
}
}
}
| malli3131/SparkApps | histogram/Histogram.scala | Scala | apache-2.0 | 707 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.local
import java.nio.ByteBuffer
import scala.concurrent.duration._
import akka.actor.{Actor, ActorRef, Props}
import org.apache.spark.{Logging, SparkContext, SparkEnv, TaskState}
import org.apache.spark.TaskState.TaskState
import org.apache.spark.executor.{Executor, ExecutorBackend}
import org.apache.spark.scheduler.{SchedulerBackend, TaskSchedulerImpl, WorkerOffer}
import org.apache.spark.util.ActorLogReceive
private case class ReviveOffers()
private case class StatusUpdate(taskId: Long, state: TaskState, serializedData: ByteBuffer)
private case class KillTask(taskId: Long, interruptThread: Boolean)
private case class StopExecutor()
/**
* Calls to LocalBackend are all serialized through LocalActor. Using an actor makes the calls on
* LocalBackend asynchronous, which is necessary to prevent deadlock between LocalBackend
* and the TaskSchedulerImpl.
*/
private[spark] class LocalActor(
scheduler: TaskSchedulerImpl,
executorBackend: LocalBackend,
private val totalCores: Int)
extends Actor with ActorLogReceive with Logging {
import context.dispatcher // to use Akka's scheduler.scheduleOnce()
private var freeCores = totalCores
private val localExecutorId = SparkContext.DRIVER_IDENTIFIER
private val localExecutorHostname = "localhost"
private val executor = new Executor(
localExecutorId, localExecutorHostname, SparkEnv.get, isLocal = true)
override def receiveWithLogging = {
case ReviveOffers =>
reviveOffers()
case StatusUpdate(taskId, state, serializedData) =>
scheduler.statusUpdate(taskId, state, serializedData)
if (TaskState.isFinished(state)) {
freeCores += scheduler.CPUS_PER_TASK
reviveOffers()
}
case KillTask(taskId, interruptThread) =>
executor.killTask(taskId, interruptThread)
case StopExecutor =>
executor.stop()
}
def reviveOffers() {
val offers = Seq(new WorkerOffer(localExecutorId, localExecutorHostname, freeCores))
val tasks = scheduler.resourceOffers(offers).flatten
for (task <- tasks) {
freeCores -= scheduler.CPUS_PER_TASK
executor.launchTask(executorBackend, taskId = task.taskId, attemptNumber = task.attemptNumber,
task.name, task.serializedTask)
}
if (tasks.isEmpty && scheduler.activeTaskSets.nonEmpty) {
// Try to reviveOffer after 1 second, because scheduler may wait for locality timeout
context.system.scheduler.scheduleOnce(1000 millis, self, ReviveOffers)
}
}
}
/**
* LocalBackend is used when running a local version of Spark where the executor, backend, and
* master all run in the same JVM. It sits behind a TaskSchedulerImpl and handles launching tasks
* on a single Executor (created by the LocalBackend) running locally.
*/
private[spark] class LocalBackend(scheduler: TaskSchedulerImpl, val totalCores: Int)
extends SchedulerBackend with ExecutorBackend {
private val appId = "local-" + System.currentTimeMillis
var localActor: ActorRef = null
override def start() {
localActor = SparkEnv.get.actorSystem.actorOf(
Props(new LocalActor(scheduler, this, totalCores)),
"LocalBackendActor")
}
override def stop() {
localActor ! StopExecutor
}
override def reviveOffers() {
localActor ! ReviveOffers
}
override def defaultParallelism() =
scheduler.conf.getInt("spark.default.parallelism", totalCores)
override def killTask(taskId: Long, executorId: String, interruptThread: Boolean) {
localActor ! KillTask(taskId, interruptThread)
}
override def statusUpdate(taskId: Long, state: TaskState, serializedData: ByteBuffer) {
localActor ! StatusUpdate(taskId, state, serializedData)
}
override def applicationId(): String = appId
}
| Dax1n/spark-core | core/src/main/scala/org/apache/spark/scheduler/local/LocalBackend.scala | Scala | apache-2.0 | 4,585 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.killrweather
import akka.pattern.pipe
import akka.actor.{ActorLogging, Actor, ActorRef}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import com.datastax.spark.connector._
import org.joda.time.DateTime
/** For a given weather station id, retrieves the full station data. */
class WeatherStationActor(sc: SparkContext, settings: WeatherSettings)
extends AggregationActor with ActorLogging {
import settings.{CassandraKeyspace => keyspace }
import settings.{CassandraTableRaw => rawtable}
import settings.{CassandraTableStations => weatherstations}
import WeatherEvent._
import Weather._
def receive : Actor.Receive = {
case GetCurrentWeather(wsid, dt) => current(wsid, dt, sender)
case GetWeatherStation(wsid) => weatherStation(wsid, sender)
}
/** Computes and sends the current weather conditions for a given weather station,
* based on UTC time, to the `requester`.
*/
def current(wsid: String, dt: Option[DateTime], requester: ActorRef): Unit = {
val day = Day(wsid, dt getOrElse timestamp)
sc.cassandraTable[RawWeatherData](keyspace, rawtable)
.where("wsid = ? AND year = ? AND month = ? AND day = ?",
wsid, day.year, day.month, day.day)
.collectAsync.map(_.headOption) pipeTo requester
}
/** The reason we can not allow a `LIMIT 1` in the `where` function is that
* the query is executed on each node, so the limit would applied in each
* query invocation. You would probably receive about partitions_number * limit results.
*/
def weatherStation(wsid: String, requester: ActorRef): Unit =
sc.cassandraTable[Weather.WeatherStation](keyspace, weatherstations)
.where("id = ?", wsid)
.collectAsync.map(_.headOption) pipeTo requester
} | chbatey/killrweather | killrweather-app/src/main/scala/com/datastax/killrweather/WeatherStationActor.scala | Scala | apache-2.0 | 2,594 |
package io.vamp.pulse.notification
import akka.actor.{ Actor, ActorSystem }
import io.vamp.common.NamespaceProvider
import io.vamp.common.akka.{ CommonActorLogging, IoC }
import io.vamp.common.notification.{ ErrorNotification, Notification }
import io.vamp.common.util.TextUtil
import io.vamp.model.event.Event
import io.vamp.pulse.PulseActor
import io.vamp.pulse.PulseActor.Publish
trait PulseFailureNotifier {
this: Actor with CommonActorLogging with NamespaceProvider ⇒
def errorNotificationClass: Class[_ <: ErrorNotification]
def reportException(notification: Notification): Exception
def failure(failure: Any, `class`: Class[_ <: Notification] = errorNotificationClass): Exception = {
publishFailureNotification(failure)
reportException(`class`.getConstructors()(0).newInstance(failure.asInstanceOf[AnyRef]).asInstanceOf[Notification])
}
protected def failureNotificationEvent(failure: Any): Event = {
val event = Event(Event.defaultVersion, Set("info", s"$typeName${Event.tagDelimiter}ERROR"), failure match {
case e: Exception ⇒ if (e.getCause != null) e.getCause.getClass.getSimpleName else e.getClass.getSimpleName
case _ ⇒ ""
})
log.debug(s"Pulse failure notification event: ${event.tags}")
event
}
protected def publishFailureNotification(failure: Any): Unit = {
try {
implicit val actorSystem: ActorSystem = context.system
IoC.actorFor[PulseActor].tell(Publish(failureNotificationEvent(failure)), Actor.noSender)
}
catch {
case e: Exception ⇒
failure match {
case f: Exception ⇒ f.printStackTrace()
case f ⇒ log.error(f.toString)
}
e.printStackTrace()
}
}
def typeName: String = TextUtil.toSnakeCase(getClass.getSimpleName.replaceAll("Actor$", ""), dash = false)
}
| magneticio/vamp | pulse/src/main/scala/io/vamp/pulse/notification/PulseFailureNotifier.scala | Scala | apache-2.0 | 1,854 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.jms.request
import java.io.{ Serializable => JSerializable }
import com.softwaremill.quicklens.ModifyPimp
import io.gatling.core.action.builder.ActionBuilder
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.Expression
import io.gatling.core.session.ExpressionSuccessWrapper
import io.gatling.jms.JmsCheck
import io.gatling.jms.action.JmsReqReplyBuilder
import io.gatling.jms.action.JmsRequestSendBuilder
case class JmsRequestBuilderBase(requestName: String) {
def reqreply(implicit configuration: GatlingConfiguration) = JmsRequestReplyBuilderQueue(requestName, configuration)
def send(implicit configuration: GatlingConfiguration) = JmsRequestSendBuilderQueue(requestName, configuration)
}
/**
* Builder for sending.
*/
case class JmsRequestSendBuilderQueue(
requestName: String,
configuration: GatlingConfiguration
) {
def queue(name: String) = destination(JmsQueue(name))
def destination(destination: JmsDestination) = JmsRequestSendBuilderMessage(requestName, destination, configuration)
}
/**
* Builder for rereply.
*/
case class JmsRequestReplyBuilderQueue(
requestName: String,
configuration: GatlingConfiguration
) {
def queue(name: String) = destination(JmsQueue(name))
def destination(destination: JmsDestination) = JmsRequestReplyBuilderMessage(requestName, destination, JmsTemporaryQueue, None, configuration)
}
/**
* Message buildeer for sending.
*/
case class JmsRequestSendBuilderMessage(
requestName: String,
destination: JmsDestination,
configuration: GatlingConfiguration
) {
def textMessage(text: Expression[String]) = message(TextJmsMessage(text))
def bytesMessage(bytes: Expression[Array[Byte]]) = message(BytesJmsMessage(bytes))
def objectMessage(o: Expression[JSerializable]) = message(ObjectJmsMessage(o))
private def message(mess: JmsMessage) =
JmsSendRequestBuilder(JmsAttributes(requestName, destination, None, mess), JmsRequestSendBuilder.apply(_, configuration))
}
/**
* Message builder for rereply.
*/
case class JmsRequestReplyBuilderMessage(
requestName: String,
destination: JmsDestination,
replyDest: JmsDestination,
messageSelector: Option[String],
configuration: GatlingConfiguration
) {
/**
* Add a reply queue, if not specified dynamic queue is used
*/
def replyQueue(name: String) = replyDestination(JmsQueue(name))
def replyDestination(destination: JmsDestination) = this.copy(replyDest = destination)
/**
* defines selector for reply destination that is used for responses
*/
def selector(selector: String) = this.copy(messageSelector = Some(selector))
def textMessage(text: Expression[String]) = message(TextJmsMessage(text))
def bytesMessage(bytes: Expression[Array[Byte]]) = message(BytesJmsMessage(bytes))
def mapMessage(map: Map[String, Any]): JmsReplyRequestBuilder = mapMessage(map.expressionSuccess)
def mapMessage(map: Expression[Map[String, Any]]): JmsReplyRequestBuilder = message(MapJmsMessage(map))
def objectMessage(o: Expression[JSerializable]) = message(ObjectJmsMessage(o))
private def message(mess: JmsMessage) =
JmsReplyRequestBuilder(JmsAttributes(requestName, destination, messageSelector, mess), JmsReqReplyBuilder.apply(_, replyDest, configuration))
}
case class JmsReplyRequestBuilder(attributes: JmsAttributes, factory: JmsAttributes => ActionBuilder) {
/**
* Add JMS message properties (aka headers) to the outbound message
*/
def property(key: Expression[String], value: Expression[Any]) = this.modify(_.attributes.messageProperties).using(_ + (key -> value))
def jmsType(jmsType: Expression[String]) = this.modify(_.attributes.jmsType).setTo(Some(jmsType))
/**
* Add a check that will be perfomed on each received JMS response message before giving Gatling on OK/KO response
*/
def check(checks: JmsCheck*) = this.modify(_.attributes.checks).using(_ ::: checks.toList)
def build(): ActionBuilder = factory(attributes)
}
case class JmsSendRequestBuilder(attributes: JmsAttributes, factory: JmsAttributes => ActionBuilder) {
/**
* Add JMS message properties (aka headers) to the outbound message
*/
def property(key: Expression[String], value: Expression[Any]) = this.modify(_.attributes.messageProperties).using(_ + (key -> value))
def jmsType(jmsType: Expression[String]) = this.modify(_.attributes.jmsType).setTo(Some(jmsType))
def build(): ActionBuilder = factory(attributes)
}
| thkluge/gatling | gatling-jms/src/main/scala/io/gatling/jms/request/JmsRequestBuilder.scala | Scala | apache-2.0 | 5,127 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import com.vividsolutions.jts.geom.Geometry
import org.geotools.data.Query
import org.geotools.factory.CommonFactoryFinder
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.geotools.filter.text.ecql.ECQL
import org.joda.time.{DateTime, DateTimeZone}
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.index.{AttributeIndex, Z2Index, Z3Index}
import org.locationtech.geomesa.accumulo.{AccumuloFeatureIndexType, TestWithDataStore}
import org.locationtech.geomesa.index.utils.ExplainString
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class AttributeIndexFilteringIteratorTest extends Specification with TestWithDataStore {
sequential
override val spec = s"name:String:index=true,age:Integer:index=true,dtg:Date,*geom:Point:srid=4326"
val features = List("a", "b", "c", "d").flatMap { name =>
List(1, 2, 3, 4).zip(List(45, 46, 47, 48)).map { case (i, lat) =>
val sf = SimpleFeatureBuilder.build(sft, List(), name + i.toString)
sf.setDefaultGeometry(WKTUtils.read(f"POINT($lat%d $lat%d)"))
sf.setAttribute("dtg", new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
sf.setAttribute("age", i)
sf.setAttribute("name", name)
sf
}
}
addFeatures(features)
val ff = CommonFactoryFinder.getFilterFactory2
def checkStrategies[T](query: Query, strategy: AccumuloFeatureIndexType) = {
val out = new ExplainString
val plan = ds.getQueryPlan(query)
plan must haveLength(1)
plan.head.filter.index mustEqual strategy
}
"AttributeIndexFilteringIterator" should {
"handle like queries and choose correct strategies" in {
// Try out wildcard queries using the % wildcard syntax.
// Test single wildcard, trailing, leading, and both trailing & leading wildcards
// % should return all features
val wildCardQuery = new Query(sftName, ff.like(ff.property("name"),"%"))
checkStrategies(wildCardQuery, AttributeIndex)
SelfClosingIterator(fs.getFeatures(wildCardQuery)) must haveLength(16)
forall(List("a", "b", "c", "d")) { letter =>
// 4 features for this letter
val leftWildCard = new Query(sftName, ff.like(ff.property("name"),s"%$letter"))
checkStrategies(leftWildCard, Z3Index)
SelfClosingIterator(fs.getFeatures(leftWildCard)) must haveLength(4)
// Double wildcards should be full table scan
val doubleWildCard = new Query(sftName, ff.like(ff.property("name"),s"%$letter%"))
checkStrategies(doubleWildCard, Z3Index)
SelfClosingIterator(fs.getFeatures(doubleWildCard)) must haveLength(4)
// should return the 4 features for this letter
val rightWildcard = new Query(sftName, ff.like(ff.property("name"),s"$letter%"))
checkStrategies(rightWildcard, AttributeIndex)
SelfClosingIterator(fs.getFeatures(rightWildcard)) must haveLength(4)
}
}
"actually handle transforms properly and chose correct strategies for attribute indexing" in {
// transform to only return the attribute geom - dropping dtg, age, and name
val query = new Query(sftName, ECQL.toFilter("name = 'b'"), Array("geom"))
checkStrategies(query, AttributeIndex)
// full table scan
val leftWildCard = new Query(sftName, ff.like(ff.property("name"), "%b"), Array("geom"))
checkStrategies(leftWildCard, Z3Index)
// full table scan
val doubleWildCard = new Query(sftName, ff.like(ff.property("name"), "%b%"), Array("geom"))
checkStrategies(doubleWildCard, Z3Index)
val rightWildcard = new Query(sftName, ff.like(ff.property("name"), "b%"), Array("geom"))
checkStrategies(rightWildcard, AttributeIndex)
forall(List(query, leftWildCard, doubleWildCard, rightWildcard)) { query =>
val features = SelfClosingIterator(fs.getFeatures(query)).toList
features must haveLength(4)
forall(features)(_.getAttribute(0) must beAnInstanceOf[Geometry])
forall(features)(_.getAttributeCount mustEqual 1)
}
}
"handle corner case with attr idx, bbox, and no temporal filter" in {
val filter = ff.and(ECQL.toFilter("name = 'b'"), ECQL.toFilter("BBOX(geom, 30, 30, 50, 50)"))
val query = new Query(sftName, filter, Array("geom"))
ds.queryPlanner.strategyDecider.getFilterPlan(sft, Some(ds), filter, None, None).head.index mustEqual Z2Index
val features = SelfClosingIterator(fs.getFeatures(query)).toList
features must haveLength(4)
forall(features)(_.getAttribute(0) must beAnInstanceOf[Geometry])
forall(features)(_.getAttributeCount mustEqual 1)
}
}
}
| nagavallia/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/iterators/AttributeIndexFilteringIteratorTest.scala | Scala | apache-2.0 | 5,383 |
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.binders
import play.api.mvc.QueryStringBindable
import scala.util.{Success, Try}
case class Origin(origin: String){
require(Origin.Allowed.pattern.matcher(origin).find())
}
object Origin {
private val Allowed = "^[\\\\w\\\\.-]{1,100}$".r
val Default = Origin("unknown")
implicit def queryBinder(implicit stringBinder: QueryStringBindable[String]) = new QueryStringBindable[Origin] {
def bind(key: String, params: Map[String, Seq[String]]) = {
val result = stringBinder.bind(key, params).map {
case Right(s) => Try(Origin(s)) match {
case Success(url) => Right(url)
case _ => Right(Default)
}
case _ => Right(Default)
}
result.orElse(Some(Right(Default)))
}
def unbind(key: String, value: Origin) = stringBinder.unbind(key, value.origin)
}
}
| nicf82/play-ui | src/main/scala/uk/gov/hmrc/play/binders/Origin.scala | Scala | apache-2.0 | 1,460 |
package circumflex
package web
import core._
import matchers._
import java.io.File
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.matchers.MustMatchers
import org.scalatest._
class MockRouter extends Router {
get("/") = "preved"
post("/") = "preved from POST"
get("/decode me") = "preved"
get("/regex/(.*)".r) = "matched " + uri(1)
put("/") = "this is a put route"
get("/error") = sendError(503)
get("/redirect") = sendRedirect("/decode me")
sub("/sub") = new SubMockRouter
var counter = 0
filter("/filter") = {
counter += 1
}
get("/filter") = counter.toString
sub("/matching") = new MatchingMockRouter
}
class SubMockRouter extends Router {
get("/?") = "preved"
}
class MatchingMockRouter extends Router {
get("/uri/:name.:ext") = uri("name") + "->" + uri("ext")
get("/uri/:name") = "preved, " + uri("name")
get("/uri/*/one/:two/+.:three") = uri(1) + uri("two") + uri(3) + uri("three")
get("/param").and(HOST(":host")) = "host is " + param("host")
get("/composite").and(ACCEPT("text/:format")).and(REFERER("localhost")) =
"3 conditions met (" + param("format") + ")"
get("/composite").and(ACCEPT("text/:format")) =
"2 conditions met (" + param("format") + ")"
get("/composite") = "1 condition met"
get("/multiparam") = request.params.list("test").mkString(",")
get("/multiparam/:test/:test") = param.list("test").mkString(",")
get("/complex/:name")
.and(param("name").startsWith("Ch")) = "You passed a complex route."
get("/complex/:name")
.and(false)
.and({
println("Unreachable code.")
true
}) = "You can't be there."
get("/complex/:name") = "You failed to pass complex route using '" +
param("name") + "'."
}
@RunWith(classOf[JUnitRunner])
class CircumflexWebSpec
extends FreeSpec
with MustMatchers
with BeforeAndAfterAll {
override def beforeAll() {
cx("cx.router") = classOf[MockRouter]
var rootPath = System.getProperty("user.dir")
// Hack to get around different Maven execution locations
if (!rootPath.endsWith("web")) {
rootPath += (File.separator + "web")
}
cx("cx.webappRoot") = (rootPath + File.separatorChar + "src/test/webapp")
MockApp.start()
}
override def afterAll() {
MockApp.stop()
}
"Router" - {
"return 404 by default on non-matched requests" in {
MockApp.get("/this/does/not/match/any/routes").execute().statusCode must equal (404)
}
"decode URIs before matching" in {
MockApp.get("/decode%20me").execute().content must equal ("preved")
}
"match requests by simple URI" in {
MockApp.get("/").execute().content must equal("preved")
}
"match requests by method" in {
MockApp.post("/").execute().content must equal("preved from POST")
}
"match requests by regex" in {
MockApp.get("/regex/piu").execute().content must equal ("matched piu")
}
"interpret `_method` parameter as HTTP method" in {
MockApp.get("/?_method=PUT").execute().content must equal ("this is a put route")
MockApp.post("/")
.setParam("_method", "PUT")
.execute()
.content must equal ("this is a put route")
}
"process subrouters" in {
MockApp.get("/sub/").execute().content must equal ("preved")
}
"send errors" in {
MockApp.get("/error").execute().statusCode must equal (503)
}
"send redirects" in {
MockApp.get("/redirect").execute().content must equal ("preved")
}
"process filter directive" in {
MockApp.get("/filter").execute().content must equal ("1")
}
}
"Matching mechanism" - {
"process named parameters from URI" in {
MockApp.get("/matching/uri/Jack").execute().content must equal ("preved, Jack")
MockApp.get("/matching/uri/file.txt").execute().content must equal ("file->txt")
MockApp.get("/matching/uri/I/one/Love/Circum.flex").execute().content must equal ("ILoveCircumflex")
}
"process named parameters from current match results, delegating to request parameters on fail" in {
MockApp.get("/matching/param")
.setHeader("Host", "preved")
.execute()
.content must equal ("host is preved")
}
"match composite routes" in {
MockApp.get("/matching/composite")
.setHeader("Accept","text/html")
.setHeader("Referer","localhost")
.execute().content must equal ("3 conditions met (html)")
MockApp.get("/matching/composite")
.setHeader("Accept","text/plain")
.execute().content must equal ("2 conditions met (plain)")
MockApp.get("/matching/composite")
.setHeader("Accept","application/xml")
.setHeader("Referer","localhost")
.execute().content must equal ("1 condition met")
}
"deal with multiple parameter values" in {
MockApp.get("/matching/multiparam?test=one&test=two&test=three")
.execute()
.content must equal ("one,two,three")
MockApp.get("/matching/multiparam/one/two?test=three&test=four&test=five")
.execute()
.content must equal ("one,two,three,four,five")
}
"deal with complex route contexts" in {
MockApp.get("/matching/complex/Chris")
.execute()
.content must equal ("You passed a complex route.")
MockApp.get("/matching/complex/Chuck")
.execute()
.content must equal ("You passed a complex route.")
MockApp.get("/matching/complex/Joe")
.execute()
.content must equal ("You failed to pass complex route using 'Joe'.")
}
}
}
| inca/circumflex | web/src/test/scala/specs.scala | Scala | bsd-2-clause | 5,672 |
package models.daoapriori.apriori
import models.MyPostgresDriver.simple._
import models.DataRule.SetBarang
import models.daoapriori.DBTableDefinitions._
import models.daoapriori.SupKonDAOSlick
import play.api.db.slick._
import scala.concurrent.Future
class SetBarangDAOSlick extends SetBarangDAO {
import play.api.Play.current
val supKonDAOSlick = new SupKonDAOSlick
def find(listBarang: List[Int]): Option[SetBarang] = {
DB withSession { implicit session =>
slickSetBarang.filter(_.daftar === listBarang).firstOption match {
case Some(daftar) => Some(daftar)
case None => None
}
}
}
def findSupport(listBarang: List[Int]): Int = {
DB withSession { implicit session =>
try {
slickSetBarang.filter(_.daftar === listBarang).map(_.support).first.run
} catch {
case e: Exception => 1
}
}
}
def findByKoleksi(koleksi: Int) = {
DB withSession { implicit session =>
slickSetBarang.filter(_.koleksi === koleksi).list
}
}
def save(setBarang: SetBarang) = {
DB withTransaction { implicit session =>
Future.successful {
val setbarang = SetBarang(setBarang.daftar, setBarang.koleksi, setBarang.support)
slickSetBarang.insert(setbarang)
setbarang
}
}
}
def save(listSetBarang: List[SetBarang]) = {
DB withTransaction { implicit session =>
Future.successful {
for (setBarang <- listSetBarang) save(setBarang)
listSetBarang
}
}
}
def lihatKoleksi(koleksi: Int): List[SetBarang] = {
DB withSession { implicit session =>
prune(koleksi)
slickSetBarang.filter(_.koleksi === koleksi).sortBy(_.daftar).list
}
}
def reset: Int = {
DB withSession { implicit session =>
slickSetBarang.delete
}
}
def prune(koleksi: Int): Int = {
DB withTransaction { implicit session =>
val minimumSupport: Int = supKonDAOSlick.minimumSupport
slickSetBarang.filter(_.support < minimumSupport).filter(_.koleksi === koleksi).delete
}
}
def listSetBarang(n: Int): List[List[Int]] = {
DB withSession { implicit session =>
val koleksiNMinusSatu: List[SetBarang] = lihatKoleksi(n - 1)
val setBarangN = {
for (koleksi <- koleksiNMinusSatu) yield koleksi.daftar
}
setBarangN.distinct
}
}
def koleksiFinal: Int = {
DB withSession { implicit session =>
slickSetBarang.map(_.koleksi).sortBy(_.desc).first
}
}
def isJamak: Boolean = {
DB withSession { implicit session =>
val jamak = slickSetBarang.map(_.koleksi).list.distinct
jamak.length > 1
}
}
def isAda(daftar: List[Int]): Boolean = {
DB withSession { implicit session =>
slickSetBarang.filter(_.daftar === daftar).exists.run
}
}
}
| ibnuda/hasembuh | app/models/daoapriori/apriori/SetBarangDAOSlick.scala | Scala | apache-2.0 | 2,635 |
package pep_075
object Wip {
// http://en.wikipedia.org/wiki/Pythagorean_triple
// a * a + b * b = c * c
// p = a + b + c
// p is always even
// a < b < c
// a < p / 3
def isPerimiterOfHowManyRightAngleTriangle(p: Int): Long = {
if (p % 2 == 0) {
val n = for {
a <- 2 until p / 3
if p * (p - 2 * a) % (2 * (p - a)) == 0
} yield a
n.length
} else {
0
}
}
// def solve(max: Int = 1500000) = (1 to max).par.map(isPerimiterOfHowManyRightAngleTriangle).count(_ == 1)
def isPerimiterOfOnlyOneRightAngleTriangle(p: Int): Boolean = {
if (p % 2 == 0) {
val n = for {
a <- 2 until p / 3
if p * (p - 2 * a) % (2 * (p - a)) == 0
} yield a
!n.isEmpty && n.drop(1).isEmpty
} else {
false
}
}
// a = d * (m2 - n2)
// b = 2d * m * n
// c = d * (m2 + n2)
// d: gcd of a,b,c
// 0 < n < m
// a + b + c = 2 * m * (m + n) * d
}
| filippovitale/pe | pe-solution/src/main/scala/pep_075/Wip.scala | Scala | mit | 956 |
import org.apache.log4j.{Level, Logger}
import org.apache.spark.graphx._
import org.apache.spark.graphx.util.GraphGenerators
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object KDD14_MRV03Dimitris {
def main(args: Array[String]) = {
Logger.getLogger("org").setLevel(Level.WARN)
Logger.getLogger("akka").setLevel(Level.WARN)
val sc = new SparkContext(new SparkConf())
// var graph: Graph[Int, Int] = GraphGenerators.rmatGraph(sc, requestedNumVertices = 1e4.toInt, numEdges = 1e4.toInt).mapVertices( (id, _) => -100.toInt )
val path = "/Users/dimitris/Documents/graphs/amazon.txt"
val numParitions = 2
val graphInit: Graph[(Int), Int] = GraphLoader.edgeListFile(sc, path, false, numParitions)
//The following is needed for undirected (bi-directional edge) graphs
val vertexRDDs: VertexRDD[Int] = graphInit.vertices
var edgeRDDs: RDD[Edge[Int]] = graphInit.edges.reverse.union(graphInit.edges)
val graph: Graph[(Int), Int] = Graph(vertexRDDs, edgeRDDs).mapVertices((id, _) => -100.toInt)
var unclusterGraph: Graph[(Int), Int] = graph
var activeSubgraph: Graph[(Int), Int] = null
val epsilon: Double = 2
var x: Int = 1
var clusterUpdates: RDD[(org.apache.spark.graphx.VertexId, Int)] = null
var randomSet: RDD[(org.apache.spark.graphx.VertexId, Int)] = null
var newVertices: RDD[(org.apache.spark.graphx.VertexId, Int)] = null
var hasFriends: RDD[(org.apache.spark.graphx.VertexId, Int)] = null
var maxDegree: VertexRDD[Int] = unclusterGraph.aggregateMessages[Int](
triplet => {
if (triplet.dstAttr == -100 & triplet.srcAttr == -100) {
triplet.sendToDst(1)
}
}, _ + _).cache()
var maxDeg: Int = if (maxDegree.count == 0) 0 else maxDegree.toArray.map(x => x._2).max
while (maxDeg >= 1) {
randomSet = unclusterGraph.vertices.filter(v => v._2 == -100).sample(false, math.min(epsilon / maxDeg, 1), scala.util.Random.nextInt(1000))
while (randomSet.count == 0) {
randomSet = unclusterGraph.vertices.filter(v => v._2 == -100).sample(false, math.min(epsilon / maxDeg, 1), scala.util.Random.nextInt(1000))
}
// System.out.println(s"Cluster Centers ${randomSet.collect().toList}.")
unclusterGraph = unclusterGraph.joinVertices(randomSet)((vId, attr, active) => -1).cache()
//Turn-off active nodes that are friends
// activeSubgraph = unclusterGraph.subgraph(vpred = (id, attr) => attr == -1).cache()
// hasFriends = unclusterGraph.degrees.filter{case (id, u) => u > 0}.cache()
hasFriends = unclusterGraph.aggregateMessages[Int](
triplet => {
if (triplet.dstAttr == -1 & triplet.srcAttr == -1) {
triplet.sendToDst(1)
}
}, math.min(_, _)
)
unclusterGraph = unclusterGraph.joinVertices(hasFriends)((vId, attr, active) => -100).cache()
clusterUpdates = unclusterGraph.aggregateMessages[Int](
triplet => {
if (triplet.dstAttr == -100 & triplet.srcAttr == -1) {
triplet.sendToDst(triplet.srcId.toInt)
}
}, math.min(_, _)
)
newVertices = unclusterGraph.vertices.leftJoin(clusterUpdates) {
(id, oldValue, newValue) =>
newValue match {
case Some(x: Int) => x
case None => {
if (oldValue == -1) id.toInt; else oldValue;
}
}
}
unclusterGraph = unclusterGraph.joinVertices(newVertices)((vId, oldAttr, newAttr) => newAttr).cache()
maxDegree = unclusterGraph.aggregateMessages[Int](
triplet => {
if (triplet.dstAttr == -100 & triplet.srcAttr == -100) {
triplet.sendToDst(1)
}
}, _ + _
).cache()
maxDeg = if (maxDegree.count == 0) 0 else maxDegree.toArray.map(x => x._2).max
System.out.println(s"new maxDegree $maxDeg.")
System.out.println(s"KDD14 finished iteration $x.")
x = x + 1
}
//Take care of degree 0 nodes
newVertices = unclusterGraph.subgraph(vpred = (vId, clusterID) => clusterID == -100).vertices
newVertices = unclusterGraph.vertices.leftJoin(newVertices) {
(id, oldValue, newValue) =>
newValue match {
case Some(x: Int) => id.toInt
case None => oldValue;
}
}
unclusterGraph = unclusterGraph.joinVertices(newVertices)((vId, oldAttr, newAttr) => newAttr).cache()
// unclusterGraph = unclusterGraph.mapVertices((id,clusterID) => v == 1)
// unclusterGraph.vertices.collect
// // unhappy edges accross clusters
// val unhappyFriends: Float = unclusterGraph.triplets.filter(e=> e.dstAttr != e.srcAttr).count/2
// // compute cluster sizes
// val clusterSizes: List[Float] = unclusterGraph.vertices.map(v=> v._2).countByValue.map(v => v._2).toList.map(_.toFloat)
// // compute missing edges inside clusters
// val tripletsWithSameID: Float = unclusterGraph.triplets.filter(e=> e.dstAttr == e.srcAttr).count/2
// //Cost
// val costClusterWild = (clusterSizes.map( x=> x*(x-1)/2).sum - tripletsWithSameID) + unhappyFriends
}
}
| anadim/clusterWild | src/main/scalaPre20150324/KDD14_MRV03Dimitris.scala | Scala | apache-2.0 | 5,166 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import java.lang.{Double => jDouble, Float => jFloat, Long => jLong}
import java.util.Date
import com.clearspring.analytics.stream.cardinality.RegisterSet
import com.esotericsoftware.kryo.io.{Input, Output}
import com.vividsolutions.jts.geom.Geometry
import org.locationtech.geomesa.curve.TimePeriod
import org.locationtech.geomesa.utils.cache.{CacheKeyGenerator, SoftThreadLocal}
import org.locationtech.geomesa.utils.clearspring.{HyperLogLog, StreamSummary}
import org.locationtech.geomesa.utils.stats.MinMax.MinMaxDefaults
import org.locationtech.geomesa.utils.text.WKBUtils
import org.opengis.feature.simple.SimpleFeatureType
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
/**
* Serialize and deserialize stats
*/
trait StatSerializer {
def serialize(stat: Stat): Array[Byte]
def deserialize(bytes: Array[Byte], immutable: Boolean = false): Stat =
deserialize(bytes, 0, bytes.length, immutable)
def deserialize(bytes: Array[Byte], offset: Int, length: Int, immutable: Boolean): Stat
}
object StatSerializer {
private val serializers = scala.collection.mutable.Map.empty[String, StatSerializer]
def apply(sft: SimpleFeatureType): StatSerializer = serializers.synchronized {
serializers.getOrElseUpdate(CacheKeyGenerator.cacheKey(sft), new KryoStatSerializer(sft))
}
}
/**
* Kryo implementation of stat serializer. Thread-safe.
*
* @param sft simple feature type
*/
class KryoStatSerializer(sft: SimpleFeatureType) extends StatSerializer {
override def serialize(stat: Stat): Array[Byte] = {
val output = KryoStatSerializer.outputs.getOrElseUpdate(new Output(1024, -1))
output.clear()
KryoStatSerializer.write(output, sft, stat)
output.toBytes
}
override def deserialize(bytes: Array[Byte], offset: Int, length: Int, immutable: Boolean): Stat = {
val input = KryoStatSerializer.inputs.getOrElseUpdate(new Input)
input.setBuffer(bytes, offset, length)
KryoStatSerializer.read(input, sft, immutable)
}
}
object KryoStatSerializer {
private val inputs = new SoftThreadLocal[Input]()
private val outputs = new SoftThreadLocal[Output]()
// bytes indicating the type of stat
private val SeqStatByte: Byte = 0
private val CountByte: Byte = 1
private val MinMaxByteV1: Byte = 2
private val IteratorStackByte: Byte = 3
private val EnumerationByte: Byte = 4
private val HistogramByte: Byte = 5
private val FrequencyByteV1: Byte = 6
private val Z3HistogramByteV1: Byte = 7
private val Z3FrequencyByteV1: Byte = 8
private val TopKByteV1: Byte = 9
private val FrequencyByte: Byte = 10
private val Z3HistogramByte: Byte = 11
private val Z3FrequencyByte: Byte = 12
private val DescriptiveStatByte: Byte = 13
private val GroupByByte: Byte = 14
private val TopKByte: Byte = 15
private val MinMaxByte: Byte = 16
private def write(output: Output, sft: SimpleFeatureType, stat: Stat): Unit = {
stat match {
case s: CountStat => output.writeByte(CountByte); writeCount(output, s)
case s: MinMax[_] => output.writeByte(MinMaxByte); writeMinMax(output, sft, s)
case s: EnumerationStat[_] => output.writeByte(EnumerationByte); writeEnumeration(output, sft, s)
case s: TopK[_] => output.writeByte(TopKByte); writeTopK(output, sft, s)
case s: Histogram[_] => output.writeByte(HistogramByte); writeHistogram(output, sft, s)
case s: Frequency[_] => output.writeByte(FrequencyByte); writeFrequency(output, sft, s)
case s: Z3Histogram => output.writeByte(Z3HistogramByte); writeZ3Histogram(output, sft, s)
case s: Z3Frequency => output.writeByte(Z3FrequencyByte); writeZ3Frequency(output, sft, s)
case s: IteratorStackCount => output.writeByte(IteratorStackByte); writeIteratorStackCount(output, s)
case s: SeqStat => output.writeByte(SeqStatByte); writeSeqStat(output, sft, s)
case s: DescriptiveStats => output.writeByte(DescriptiveStatByte); writeDescriptiveStats(output, sft, s)
case s: GroupBy[_] => output.writeByte(GroupByByte); writeGroupBy(output, sft, s)
}
}
private def read(input: Input, sft: SimpleFeatureType, immutable: Boolean): Stat = {
input.readByte() match {
case CountByte => readCount(input, immutable)
case MinMaxByte => readMinMax(input, sft, immutable, 2)
case EnumerationByte => readEnumeration(input, sft, immutable)
case TopKByte => readTopK(input, sft, immutable, 2)
case HistogramByte => readHistogram(input, sft, immutable)
case FrequencyByte => readFrequency(input, sft, immutable, 2)
case Z3HistogramByte => readZ3Histogram(input, sft, immutable, 2)
case Z3FrequencyByte => readZ3Frequency(input, sft, immutable, 2)
case IteratorStackByte => readIteratorStackCount(input, immutable)
case SeqStatByte => readSeqStat(input, sft, immutable)
case DescriptiveStatByte => readDescriptiveStat(input, sft, immutable)
case GroupByByte => readGroupBy(input, sft, immutable)
case FrequencyByteV1 => readFrequency(input, sft, immutable, 1)
case Z3HistogramByteV1 => readZ3Histogram(input, sft, immutable, 1)
case Z3FrequencyByteV1 => readZ3Frequency(input, sft, immutable, 1)
case MinMaxByteV1 => readMinMax(input, sft, immutable, 1)
case TopKByteV1 => readTopK(input, sft, immutable, 1)
}
}
private def writeGroupBy(output: Output, sft: SimpleFeatureType, stat: GroupBy[_]): Unit = {
output.writeInt(stat.attribute, true)
output.writeString(stat.exampleStat)
output.writeInt(stat.groupedStats.keys.size, true)
stat.groupedStats.foreach { case (key, groupedStat) =>
writer(output, sft.getDescriptor(stat.attribute).getType.getBinding)(key)
write(output, sft, groupedStat)
}
}
private def readGroupBy(input: Input, sft: SimpleFeatureType, immutable: Boolean): GroupBy[_] = {
val attribute = input.readInt(true)
val exampleStat = input.readString()
val keyLength = input.readInt(true)
val binding = sft.getDescriptor(attribute).getType.getBinding
val classTag = ClassTag[Any](binding)
val stat = if (immutable) {
new GroupBy(attribute, exampleStat, sft)(classTag) with ImmutableStat
} else {
new GroupBy(attribute, exampleStat, sft)(classTag)
}
var i = 0
while (i < keyLength) {
val key = reader(input, sft.getDescriptor(attribute).getType.getBinding).apply()
val groupedStat = read(input, sft, immutable)
stat.groupedStats.put(key, groupedStat)
i += 1
}
stat
}
private def writeDescriptiveStats(output: Output, sft: SimpleFeatureType, stat: DescriptiveStats): Unit = {
val size = stat.size
output.writeInt(size, true)
stat.attributes.foreach(output.writeInt(_, true))
def writeArray(array: Array[Double]): Unit = for(v <- array) { output.writeDouble(v) }
writeArray(stat._min.getMatrix.data)
writeArray(stat._max.getMatrix.data)
writeArray(stat._sum.getMatrix.data)
writeArray(stat._mean.getMatrix.data)
writeArray(stat._m2n.getMatrix.data)
writeArray(stat._m3n.getMatrix.data)
writeArray(stat._m4n.getMatrix.data)
writeArray(stat._c2.getMatrix.data)
output.writeLong(stat._count, true)
}
private def readDescriptiveStat(input: Input, sft: SimpleFeatureType, immutable: Boolean): DescriptiveStats = {
val size = input.readInt(true)
val attributes = for(_ <- 0 until size) yield input.readInt(true)
val stats = if (immutable) {
new DescriptiveStats(attributes) with ImmutableStat
} else {
new DescriptiveStats(attributes)
}
def readArray(array: Array[Double]): Unit = for(i <- array.indices) { array(i) = input.readDouble }
readArray(stats._min.getMatrix.data)
readArray(stats._max.getMatrix.data)
readArray(stats._sum.getMatrix.data)
readArray(stats._mean.getMatrix.data)
readArray(stats._m2n.getMatrix.data)
readArray(stats._m3n.getMatrix.data)
readArray(stats._m4n.getMatrix.data)
readArray(stats._c2.getMatrix.data)
stats._count = input.readLong(true)
stats
}
private def writeSeqStat(output: Output, sft: SimpleFeatureType, stat: SeqStat): Unit =
stat.stats.foreach(write(output, sft, _))
private def readSeqStat(input: Input, sft: SimpleFeatureType, immutable: Boolean): SeqStat = {
val stats = ArrayBuffer.empty[Stat]
while (input.available() > 0) {
stats.append(read(input, sft, immutable))
}
if (immutable) {
new SeqStat(stats) with ImmutableStat
} else {
new SeqStat(stats)
}
}
private def writeCount(output: Output, stat: CountStat): Unit = output.writeLong(stat.counter, true)
private def readCount(input: Input, immutable: Boolean): CountStat = {
val stat = if (immutable) {
new CountStat with ImmutableStat
} else {
new CountStat
}
stat.counter = input.readLong(true)
stat
}
private def writeMinMax(output: Output, sft: SimpleFeatureType, stat: MinMax[_]): Unit = {
output.writeInt(stat.attribute, true)
output.writeInt(stat.hpp.log2m, true)
output.writeInt(stat.hpp.registerSet.size, true)
stat.hpp.registerSet.rawBits.foreach(output.writeInt)
val write = writer(output, sft.getDescriptor(stat.attribute).getType.getBinding)
write(stat.minValue)
write(stat.maxValue)
}
private def readMinMax(input: Input, sft: SimpleFeatureType, immutable: Boolean, version: Int): MinMax[_] = {
val attribute = input.readInt(true)
val hpp = if (version > 1) {
val log2m = input.readInt(true)
val size = input.readInt(true)
val bytes = Array.fill(size)(input.readInt)
HyperLogLog(log2m, bytes)
} else {
val hppBytes = Array.ofDim[Byte](input.readInt(true))
input.read(hppBytes)
val clearspring = com.clearspring.analytics.stream.cardinality.HyperLogLog.Builder.build(hppBytes)
// use reflection to access private variables
def getField[T](name: String): T = {
val field = clearspring.getClass.getDeclaredField(name)
field.setAccessible(true)
field.get(clearspring).asInstanceOf[T]
}
val log2m = getField[Int]("log2m")
val registerSet = getField[RegisterSet]("registerSet").bits
HyperLogLog(log2m, registerSet)
}
val binding = sft.getDescriptor(attribute).getType.getBinding
val read = reader(input, binding)
val min = read()
val max = read()
val defaults = MinMaxDefaults[Any](binding)
if (immutable) {
new MinMax[Any](attribute, min, max, hpp)(defaults) with ImmutableStat
} else {
new MinMax[Any](attribute, min, max, hpp)(defaults)
}
}
private def writeEnumeration(output: Output, sft: SimpleFeatureType, stat: EnumerationStat[_]): Unit = {
output.writeInt(stat.attribute, true)
output.writeInt(stat.enumeration.size, true)
val write = writer(output, sft.getDescriptor(stat.attribute).getType.getBinding)
stat.enumeration.foreach { case (key, count) => write(key); output.writeLong(count, true) }
}
private def readEnumeration(input: Input, sft: SimpleFeatureType, immutable: Boolean): EnumerationStat[_] = {
val attribute = input.readInt(true)
val size = input.readInt(true)
val binding = sft.getDescriptor(attribute).getType.getBinding
val read = reader(input, binding)
val classTag = ClassTag[Any](binding)
val stat = if (immutable) {
new EnumerationStat[Any](attribute)(classTag) with ImmutableStat
} else {
new EnumerationStat[Any](attribute)(classTag)
}
var i = 0
while (i < size) {
stat.enumeration(read()) = input.readLong(true)
i += 1
}
stat
}
private def writeTopK(output: Output, sft: SimpleFeatureType, stat: TopK[_]): Unit = {
output.writeInt(stat.attribute, true)
output.writeInt(stat.size, true)
val write = writer(output, sft.getDescriptor(stat.attribute).getType.getBinding)
stat.topK(Int.MaxValue).foreach { case (item, count) => write(item); output.writeLong(count, true) }
}
private def readTopK(input: Input, sft: SimpleFeatureType, immutable: Boolean, version: Int): TopK[_] = {
val attribute = input.readInt(true)
val binding = sft.getDescriptor(attribute).getType.getBinding
val read = reader(input, binding)
val summary = if (version > 1) {
val size = input.readInt(true)
val counters = (0 until size).map(_ => (read(), input.readLong(true)))
StreamSummary[Any](TopK.StreamCapacity, counters)
} else {
import scala.collection.JavaConversions._
val summaryBytes = input.readBytes(input.readInt(true))
val clearspring = new com.clearspring.analytics.stream.StreamSummary[Any](summaryBytes)
val geomesa = StreamSummary[Any](TopK.StreamCapacity)
clearspring.topK(clearspring.size()).foreach(c => geomesa.offer(c.getItem, c.getCount))
geomesa
}
if (immutable) {
new TopK[Any](attribute, summary) with ImmutableStat
} else {
new TopK[Any](attribute, summary)
}
}
private def writeHistogram(output: Output, sft: SimpleFeatureType, stat: Histogram[_]): Unit = {
output.writeInt(stat.attribute, true)
output.writeInt(stat.length, true)
val write = writer(output, sft.getDescriptor(stat.attribute).getType.getBinding)
write(stat.bounds._1)
write(stat.bounds._2)
writeCountArray(output, stat.bins.counts)
}
private def readHistogram(input: Input, sft: SimpleFeatureType, immutable: Boolean): Histogram[_] = {
val attribute = input.readInt(true)
val length = input.readInt(true)
val binding = sft.getDescriptor(attribute).getType.getBinding
val read = reader(input, binding)
val min = read()
val max = read()
val defaults = MinMaxDefaults[Any](binding)
val classTag = ClassTag[Any](binding)
val stat = if (immutable) {
new Histogram[Any](attribute, length, (min, max))(defaults, classTag) with ImmutableStat
} else {
new Histogram[Any](attribute, length, (min, max))(defaults, classTag)
}
readCountArray(input, stat.bins.counts)
stat
}
private def writeZ3Histogram(output: Output, sft: SimpleFeatureType, stat: Z3Histogram): Unit = {
output.writeInt(stat.geomIndex, true)
output.writeInt(stat.dtgIndex, true)
output.writeAscii(stat.period.toString)
output.writeInt(stat.length, true)
val bins = stat.binMap.filter(_._2.counts.exists(_ != 0L))
output.writeInt(bins.size, true)
bins.foreach { case (w, bin) =>
output.writeShort(w)
writeCountArray(output, bin.counts)
}
}
private def readZ3Histogram(input: Input, sft: SimpleFeatureType, immutable: Boolean, version: Int): Z3Histogram = {
val geomIndex = input.readInt(true)
val dtgIndex = input.readInt(true)
val period = if (version > 1) { TimePeriod.withName(input.readString()) } else { TimePeriod.Week }
val length = input.readInt(true)
val stat = if (immutable) {
new Z3Histogram(geomIndex, dtgIndex, period, length) with ImmutableStat
} else {
new Z3Histogram(geomIndex, dtgIndex, period, length)
}
val numWeeks = input.readInt(true)
var week = 0
while (week < numWeeks) {
val bins = stat.newBins
stat.binMap.put(input.readShort, bins)
readCountArray(input, bins.counts)
week += 1
}
stat
}
private def writeFrequency(output: Output, sft: SimpleFeatureType, stat: Frequency[_]): Unit = {
output.writeInt(stat.attribute, true)
output.writeInt(stat.dtgIndex, true)
output.writeAscii(stat.period.toString)
output.writeInt(stat.precision, true)
output.writeDouble(stat.eps)
output.writeDouble(stat.confidence)
val sketches = stat.sketchMap.filter(_._2.size > 0)
output.writeInt(sketches.size, true)
sketches.foreach { case (w, sketch) =>
output.writeShort(w)
var i = 0
while (i < sketch.table.length) {
writeCountArray(output, sketch.table(i))
i += 1
}
output.writeLong(sketch.size, true)
}
}
private def readFrequency(input: Input, sft: SimpleFeatureType, immutable: Boolean, version: Int): Frequency[_] = {
val attribute = input.readInt(true)
val dtgIndex = input.readInt(true)
val period = if (version > 1) { TimePeriod.withName(input.readString()) } else { TimePeriod.Week }
val precision = input.readInt(true)
val eps = input.readDouble()
val confidence = input.readDouble()
val binding = sft.getDescriptor(attribute).getType.getBinding
val stat = if (immutable) {
new Frequency[Any](attribute, dtgIndex, period, precision, eps, confidence)(ClassTag[Any](binding)) with ImmutableStat
} else {
new Frequency[Any](attribute, dtgIndex, period, precision, eps, confidence)(ClassTag[Any](binding))
}
val sketchCount = input.readInt(true)
var c = 0
while (c < sketchCount) {
val week = input.readShort
val sketch = stat.newSketch
stat.sketchMap.put(week, sketch)
var i = 0
while (i < sketch.table.length) {
readCountArray(input, sketch.table(i))
i += 1
}
sketch._size = input.readLong(true)
c += 1
}
stat
}
private def writeZ3Frequency(output: Output, sft: SimpleFeatureType, stat: Z3Frequency): Unit = {
output.writeInt(stat.geomIndex, true)
output.writeInt(stat.dtgIndex, true)
output.writeAscii(stat.period.toString)
output.writeInt(stat.precision, true)
output.writeDouble(stat.eps)
output.writeDouble(stat.confidence)
val sketches = stat.sketches.filter(_._2.size > 0)
output.writeInt(sketches.size, true)
sketches.foreach { case (w, sketch) =>
output.writeShort(w)
var i = 0
while (i < sketch.table.length) {
writeCountArray(output, sketch.table(i))
i += 1
}
output.writeLong(sketch.size, true)
}
}
private def readZ3Frequency(input: Input, sft: SimpleFeatureType, immutable: Boolean, version: Int): Z3Frequency = {
val geomIndex = input.readInt(true)
val dtgIndex = input.readInt(true)
val period = if (version > 1) { TimePeriod.withName(input.readString()) } else { TimePeriod.Week }
val precision = input.readInt(true)
val eps = input.readDouble()
val confidence = input.readDouble()
val stat = if (immutable) {
new Z3Frequency(geomIndex, dtgIndex, period, precision, eps, confidence) with ImmutableStat
} else {
new Z3Frequency(geomIndex, dtgIndex, period, precision, eps, confidence)
}
val numSketches = input.readInt(true)
var sketchCount = 0
while (sketchCount < numSketches) {
val sketch = stat.newSketch
stat.sketches.put(input.readShort, sketch)
var i = 0
while (i < sketch.table.length) {
readCountArray(input, sketch.table(i))
i += 1
}
sketch._size = input.readLong(true)
sketchCount += 1
}
stat
}
private def writeIteratorStackCount(output: Output, stat: IteratorStackCount): Unit =
output.writeLong(stat.counter, true)
private def readIteratorStackCount(input: Input, immutable: Boolean): IteratorStackCount = {
val stat = if (immutable) {
new IteratorStackCount() with ImmutableStat
} else {
new IteratorStackCount()
}
stat.counter = input.readLong(true)
stat
}
private def writeCountArray(output: Output, counts: Array[Long]): Unit = {
var i = 0
while (i < counts.length) {
val count = counts(i)
if (count == 0) {
var nextNonZero = i + 1
while (nextNonZero < counts.length && counts(nextNonZero) == 0) {
nextNonZero += 1
}
val numZeros = nextNonZero - i
if (numZeros > 4) {
// write a max long as an indicator that we have sparse values, then write the number of zeros
output.writeLong(Long.MaxValue, true)
output.writeInt(numZeros, true)
} else if (numZeros > 0) {
(0 until numZeros).foreach(_ => output.writeLong(0L, true))
}
i = nextNonZero
} else {
output.writeLong(count, true)
i += 1
}
}
}
private def readCountArray(input: Input, counts: Array[Long]): Unit = {
var i = 0
while (i < counts.length) {
val count = input.readLong(true)
if (count == Long.MaxValue) {
i += input.readInt(true) // skip sparsely written values
} else {
counts(i) = count
i += 1
}
}
}
private def writer(output: Output, binding: Class[_]): (Any) => Unit = {
if (binding == classOf[String]) {
(value) => output.writeString(value.asInstanceOf[String])
} else if (binding == classOf[Integer]) {
(value) => output.writeInt(value.asInstanceOf[Integer], true)
} else if (binding == classOf[jLong]) {
(value) => output.writeLong(value.asInstanceOf[jLong], true)
} else if (binding == classOf[jFloat]) {
(value) => output.writeFloat(value.asInstanceOf[jFloat])
} else if (binding == classOf[jDouble]) {
(value) => output.writeDouble(value.asInstanceOf[jDouble])
} else if (classOf[Date].isAssignableFrom(binding)) {
(value) => output.writeLong(value.asInstanceOf[Date].getTime, true)
} else if (classOf[Geometry].isAssignableFrom(binding)) {
(value) => {
val b1 = WKBUtils.write(value.asInstanceOf[Geometry])
output.writeInt(b1.length, true)
output.write(b1)
}
} else {
throw new Exception(s"Cannot serialize stat due to invalid type: $binding")
}
}
private def reader(input: Input, binding: Class[_]): () => Any = {
if (binding == classOf[String]) {
() => input.readString()
} else if (binding == classOf[Integer]) {
() => input.readInt(true)
} else if (binding == classOf[jLong]) {
() => input.readLong(true)
} else if (binding == classOf[jFloat]) {
() => input.readFloat()
} else if (binding == classOf[jDouble]) {
() => input.readDouble()
} else if (classOf[Date].isAssignableFrom(binding)) {
() => new Date(input.readLong(true))
} else if (classOf[Geometry].isAssignableFrom(binding)) {
() => {
val b = Array.ofDim[Byte](input.readInt(true))
input.read(b)
WKBUtils.read(b)
}
} else {
throw new Exception(s"Cannot deserialize stat due to invalid type: $binding")
}
}
}
| ronq/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/stats/StatSerializer.scala | Scala | apache-2.0 | 23,294 |
/**
* Created by Roni A. Koitermaa on 24.11.2015.
*/
package Game
import scala.collection.mutable
class World {
var areas = mutable.Buffer[Area]()
var currentArea = 0
var player = new Player()
var action = new Action()
createNewArea("Forest", 0, "res/forest", "It's a forest")
def createNewArea(name: String, id: Int, path: String, desc: String): Unit = {
areas += new Area(name, id, path, desc)
}
def update(game: Game): Unit = {
action.update(game)
}
def draw(game: Game): Unit = {
areas(currentArea).draw(game)
}
}
| Ronin748/Herbert_Prelude | src/World.scala | Scala | gpl-2.0 | 562 |
package io.react2.scalata.translation
import io.react2.scalata.generators.Generator
/**
* @author dbalduini
*/
sealed abstract class DataStructure {
def name: String
}
case class Root(name: String, repeat: Int, fields: List[FieldGen]) extends DataStructure {
override def toString: String = "Root fields: " + (fields mkString ", ")
}
case class FieldGen(name: String, gen: Generator[Field]) extends DataStructure {
def generate: (String, Field) = name -> gen.one
override def toString: String = name
}
| React2/scalata | src/main/scala/io/react2/scalata/translation/DataStructure.scala | Scala | apache-2.0 | 518 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.{BinaryExecNode, CodegenSupport,
ExternalAppendOnlyUnsafeRowArray, RowIterator, SparkPlan}
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.util.collection.BitSet
/**
* Performs a sort merge join of two child relations.
*/
case class SortMergeJoinExec(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
joinType: JoinType,
condition: Option[Expression],
left: SparkPlan,
right: SparkPlan) extends BinaryExecNode with CodegenSupport {
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override def output: Seq[Attribute] = {
joinType match {
case _: InnerLike =>
left.output ++ right.output
case LeftOuter =>
left.output ++ right.output.map(_.withNullability(true))
case RightOuter =>
left.output.map(_.withNullability(true)) ++ right.output
case FullOuter =>
(left.output ++ right.output).map(_.withNullability(true))
case j: ExistenceJoin =>
left.output :+ j.exists
case LeftExistence(_) =>
left.output
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
}
override def outputPartitioning: Partitioning = joinType match {
case _: InnerLike =>
PartitioningCollection(Seq(left.outputPartitioning, right.outputPartitioning))
// For left and right outer joins, the output is partitioned by the streamed input's join keys.
case LeftOuter => left.outputPartitioning
case RightOuter => right.outputPartitioning
case FullOuter => UnknownPartitioning(left.outputPartitioning.numPartitions)
case LeftExistence(_) => left.outputPartitioning
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: Nil
override def outputOrdering: Seq[SortOrder] = joinType match {
// For inner join, orders of both sides keys should be kept.
case _: InnerLike =>
val leftKeyOrdering = getKeyOrdering(leftKeys, left.outputOrdering)
val rightKeyOrdering = getKeyOrdering(rightKeys, right.outputOrdering)
leftKeyOrdering.zip(rightKeyOrdering).map { case (lKey, rKey) =>
// Also add the right key and its `sameOrderExpressions`
SortOrder(lKey.child, Ascending, lKey.sameOrderExpressions + rKey.child ++ rKey
.sameOrderExpressions)
}
// For left and right outer joins, the output is ordered by the streamed input's join keys.
case LeftOuter => getKeyOrdering(leftKeys, left.outputOrdering)
case RightOuter => getKeyOrdering(rightKeys, right.outputOrdering)
// There are null rows in both streams, so there is no order.
case FullOuter => Nil
case LeftExistence(_) => getKeyOrdering(leftKeys, left.outputOrdering)
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
/**
* The utility method to get output ordering for left or right side of the join.
*
* Returns the required ordering for left or right child if childOutputOrdering does not
* satisfy the required ordering; otherwise, which means the child does not need to be sorted
* again, returns the required ordering for this child with extra "sameOrderExpressions" from
* the child's outputOrdering.
*/
private def getKeyOrdering(keys: Seq[Expression], childOutputOrdering: Seq[SortOrder])
: Seq[SortOrder] = {
val requiredOrdering = requiredOrders(keys)
if (SortOrder.orderingSatisfies(childOutputOrdering, requiredOrdering)) {
keys.zip(childOutputOrdering).map { case (key, childOrder) =>
SortOrder(key, Ascending, childOrder.sameOrderExpressions + childOrder.child - key)
}
} else {
requiredOrdering
}
}
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
requiredOrders(leftKeys) :: requiredOrders(rightKeys) :: Nil
private def requiredOrders(keys: Seq[Expression]): Seq[SortOrder] = {
// This must be ascending in order to agree with the `keyOrdering` defined in `doExecute()`.
keys.map(SortOrder(_, Ascending))
}
private def createLeftKeyGenerator(): Projection =
UnsafeProjection.create(leftKeys, left.output)
private def createRightKeyGenerator(): Projection =
UnsafeProjection.create(rightKeys, right.output)
private def getSpillThreshold: Int = {
sqlContext.conf.sortMergeJoinExecBufferSpillThreshold
}
private def getInMemoryThreshold: Int = {
sqlContext.conf.sortMergeJoinExecBufferInMemoryThreshold
}
protected override def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
val spillThreshold = getSpillThreshold
val inMemoryThreshold = getInMemoryThreshold
left.execute().zipPartitions(right.execute()) { (leftIter, rightIter) =>
val boundCondition: (InternalRow) => Boolean = {
condition.map { cond =>
newPredicate(cond, left.output ++ right.output).eval _
}.getOrElse {
(r: InternalRow) => true
}
}
// An ordering that can be used to compare keys from both sides.
val keyOrdering = newNaturalAscendingOrdering(leftKeys.map(_.dataType))
val resultProj: InternalRow => InternalRow = UnsafeProjection.create(output, output)
joinType match {
case _: InnerLike =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] var currentRightMatches: ExternalAppendOnlyUnsafeRowArray = _
private[this] var rightMatchesIterator: Iterator[UnsafeRow] = null
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
private[this] val joinRow = new JoinedRow
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
rightMatchesIterator = currentRightMatches.generateIterator()
}
override def advanceNext(): Boolean = {
while (rightMatchesIterator != null) {
if (!rightMatchesIterator.hasNext) {
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
rightMatchesIterator = currentRightMatches.generateIterator()
} else {
currentRightMatches = null
currentLeftRow = null
rightMatchesIterator = null
return false
}
}
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
numOutputRows += 1
return true
}
}
false
}
override def getRow: InternalRow = resultProj(joinRow)
}.toScala
case LeftOuter =>
val smjScanner = new SortMergeJoinScanner(
streamedKeyGenerator = createLeftKeyGenerator(),
bufferedKeyGenerator = createRightKeyGenerator(),
keyOrdering,
streamedIter = RowIterator.fromScala(leftIter),
bufferedIter = RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
val rightNullRow = new GenericInternalRow(right.output.length)
new LeftOuterIterator(
smjScanner, rightNullRow, boundCondition, resultProj, numOutputRows).toScala
case RightOuter =>
val smjScanner = new SortMergeJoinScanner(
streamedKeyGenerator = createRightKeyGenerator(),
bufferedKeyGenerator = createLeftKeyGenerator(),
keyOrdering,
streamedIter = RowIterator.fromScala(rightIter),
bufferedIter = RowIterator.fromScala(leftIter),
inMemoryThreshold,
spillThreshold
)
val leftNullRow = new GenericInternalRow(left.output.length)
new RightOuterIterator(
smjScanner, leftNullRow, boundCondition, resultProj, numOutputRows).toScala
case FullOuter =>
val leftNullRow = new GenericInternalRow(left.output.length)
val rightNullRow = new GenericInternalRow(right.output.length)
val smjScanner = new SortMergeFullOuterJoinScanner(
leftKeyGenerator = createLeftKeyGenerator(),
rightKeyGenerator = createRightKeyGenerator(),
keyOrdering,
leftIter = RowIterator.fromScala(leftIter),
rightIter = RowIterator.fromScala(rightIter),
boundCondition,
leftNullRow,
rightNullRow)
new FullOuterIterator(
smjScanner,
resultProj,
numOutputRows).toScala
case LeftSemi =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextInnerJoinRows()) {
val currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
if (currentRightMatches != null && currentRightMatches.length > 0) {
val rightMatchesIterator = currentRightMatches.generateIterator()
while (rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
numOutputRows += 1
return true
}
}
}
}
false
}
override def getRow: InternalRow = currentLeftRow
}.toScala
case LeftAnti =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextOuterJoinRows()) {
currentLeftRow = smjScanner.getStreamedRow
val currentRightMatches = smjScanner.getBufferedMatches
if (currentRightMatches == null || currentRightMatches.length == 0) {
numOutputRows += 1
return true
}
var found = false
val rightMatchesIterator = currentRightMatches.generateIterator()
while (!found && rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
found = true
}
}
if (!found) {
numOutputRows += 1
return true
}
}
false
}
override def getRow: InternalRow = currentLeftRow
}.toScala
case j: ExistenceJoin =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val result: InternalRow = new GenericInternalRow(Array[Any](null))
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextOuterJoinRows()) {
currentLeftRow = smjScanner.getStreamedRow
val currentRightMatches = smjScanner.getBufferedMatches
var found = false
if (currentRightMatches != null && currentRightMatches.length > 0) {
val rightMatchesIterator = currentRightMatches.generateIterator()
while (!found && rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
found = true
}
}
}
result.setBoolean(0, found)
numOutputRows += 1
return true
}
false
}
override def getRow: InternalRow = resultProj(joinRow(currentLeftRow, result))
}.toScala
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin should not take $x as the JoinType")
}
}
}
override def supportCodegen: Boolean = {
joinType.isInstanceOf[InnerLike]
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
left.execute() :: right.execute() :: Nil
}
private def createJoinKey(
ctx: CodegenContext,
row: String,
keys: Seq[Expression],
input: Seq[Attribute]): Seq[ExprCode] = {
ctx.INPUT_ROW = row
ctx.currentVars = null
keys.map(BindReferences.bindReference(_, input).genCode(ctx))
}
private def copyKeys(ctx: CodegenContext, vars: Seq[ExprCode]): Seq[ExprCode] = {
vars.zipWithIndex.map { case (ev, i) =>
ctx.addBufferedState(leftKeys(i).dataType, "value", ev.value)
}
}
private def genComparison(ctx: CodegenContext, a: Seq[ExprCode], b: Seq[ExprCode]): String = {
val comparisons = a.zip(b).zipWithIndex.map { case ((l, r), i) =>
s"""
|if (comp == 0) {
| comp = ${ctx.genComp(leftKeys(i).dataType, l.value, r.value)};
|}
""".stripMargin.trim
}
s"""
|comp = 0;
|${comparisons.mkString("\\n")}
""".stripMargin
}
/**
* Generate a function to scan both left and right to find a match, returns the term for
* matched one row from left side and buffered rows from right side.
*/
private def genScanner(ctx: CodegenContext): (String, String) = {
// Create class member for next row from both sides.
val leftRow = ctx.freshName("leftRow")
ctx.addMutableState("InternalRow", leftRow)
val rightRow = ctx.freshName("rightRow")
ctx.addMutableState("InternalRow", rightRow, s"$rightRow = null;")
// Create variables for join keys from both sides.
val leftKeyVars = createJoinKey(ctx, leftRow, leftKeys, left.output)
val leftAnyNull = leftKeyVars.map(_.isNull).mkString(" || ")
val rightKeyTmpVars = createJoinKey(ctx, rightRow, rightKeys, right.output)
val rightAnyNull = rightKeyTmpVars.map(_.isNull).mkString(" || ")
// Copy the right key as class members so they could be used in next function call.
val rightKeyVars = copyKeys(ctx, rightKeyTmpVars)
// A list to hold all matched rows from right side.
val matches = ctx.freshName("matches")
val clsName = classOf[ExternalAppendOnlyUnsafeRowArray].getName
val spillThreshold = getSpillThreshold
val inMemoryThreshold = getInMemoryThreshold
ctx.addMutableState(clsName, matches,
s"$matches = new $clsName($inMemoryThreshold, $spillThreshold);")
// Copy the left keys as class members so they could be used in next function call.
val matchedKeyVars = copyKeys(ctx, leftKeyVars)
ctx.addNewFunction("findNextInnerJoinRows",
s"""
|private boolean findNextInnerJoinRows(
| scala.collection.Iterator leftIter,
| scala.collection.Iterator rightIter) {
| $leftRow = null;
| int comp = 0;
| while ($leftRow == null) {
| if (!leftIter.hasNext()) return false;
| $leftRow = (InternalRow) leftIter.next();
| ${leftKeyVars.map(_.code).mkString("\\n")}
| if ($leftAnyNull) {
| $leftRow = null;
| continue;
| }
| if (!$matches.isEmpty()) {
| ${genComparison(ctx, leftKeyVars, matchedKeyVars)}
| if (comp == 0) {
| return true;
| }
| $matches.clear();
| }
|
| do {
| if ($rightRow == null) {
| if (!rightIter.hasNext()) {
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| return !$matches.isEmpty();
| }
| $rightRow = (InternalRow) rightIter.next();
| ${rightKeyTmpVars.map(_.code).mkString("\\n")}
| if ($rightAnyNull) {
| $rightRow = null;
| continue;
| }
| ${rightKeyVars.map(_.code).mkString("\\n")}
| }
| ${genComparison(ctx, leftKeyVars, rightKeyVars)}
| if (comp > 0) {
| $rightRow = null;
| } else if (comp < 0) {
| if (!$matches.isEmpty()) {
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| return true;
| }
| $leftRow = null;
| } else {
| $matches.add((UnsafeRow) $rightRow);
| $rightRow = null;;
| }
| } while ($leftRow != null);
| }
| return false; // unreachable
|}
""".stripMargin, inlineToOuterClass = true)
(leftRow, matches)
}
/**
* Creates variables and declarations for left part of result row.
*
* In order to defer the access after condition and also only access once in the loop,
* the variables should be declared separately from accessing the columns, we can't use the
* codegen of BoundReference here.
*/
private def createLeftVars(ctx: CodegenContext, leftRow: String): (Seq[ExprCode], Seq[String]) = {
ctx.INPUT_ROW = leftRow
left.output.zipWithIndex.map { case (a, i) =>
val value = ctx.freshName("value")
val valueCode = ctx.getValue(leftRow, a.dataType, i.toString)
val javaType = ctx.javaType(a.dataType)
val defaultValue = ctx.defaultValue(a.dataType)
if (a.nullable) {
val isNull = ctx.freshName("isNull")
val code =
s"""
|$isNull = $leftRow.isNullAt($i);
|$value = $isNull ? $defaultValue : ($valueCode);
""".stripMargin
val leftVarsDecl =
s"""
|boolean $isNull = false;
|$javaType $value = $defaultValue;
""".stripMargin
(ExprCode(code, isNull, value), leftVarsDecl)
} else {
val code = s"$value = $valueCode;"
val leftVarsDecl = s"""$javaType $value = $defaultValue;"""
(ExprCode(code, "false", value), leftVarsDecl)
}
}.unzip
}
/**
* Creates the variables for right part of result row, using BoundReference, since the right
* part are accessed inside the loop.
*/
private def createRightVar(ctx: CodegenContext, rightRow: String): Seq[ExprCode] = {
ctx.INPUT_ROW = rightRow
right.output.zipWithIndex.map { case (a, i) =>
BoundReference(i, a.dataType, a.nullable).genCode(ctx)
}
}
/**
* Splits variables based on whether it's used by condition or not, returns the code to create
* these variables before the condition and after the condition.
*
* Only a few columns are used by condition, then we can skip the accessing of those columns
* that are not used by condition also filtered out by condition.
*/
private def splitVarsByCondition(
attributes: Seq[Attribute],
variables: Seq[ExprCode]): (String, String) = {
if (condition.isDefined) {
val condRefs = condition.get.references
val (used, notUsed) = attributes.zip(variables).partition{ case (a, ev) =>
condRefs.contains(a)
}
val beforeCond = evaluateVariables(used.map(_._2))
val afterCond = evaluateVariables(notUsed.map(_._2))
(beforeCond, afterCond)
} else {
(evaluateVariables(variables), "")
}
}
override def needCopyResult: Boolean = true
override def doProduce(ctx: CodegenContext): String = {
val leftInput = ctx.freshName("leftInput")
ctx.addMutableState("scala.collection.Iterator", leftInput, s"$leftInput = inputs[0];")
val rightInput = ctx.freshName("rightInput")
ctx.addMutableState("scala.collection.Iterator", rightInput, s"$rightInput = inputs[1];")
val (leftRow, matches) = genScanner(ctx)
// Create variables for row from both sides.
val (leftVars, leftVarDecl) = createLeftVars(ctx, leftRow)
val rightRow = ctx.freshName("rightRow")
val rightVars = createRightVar(ctx, rightRow)
val iterator = ctx.freshName("iterator")
val numOutput = metricTerm(ctx, "numOutputRows")
val (beforeLoop, condCheck) = if (condition.isDefined) {
// Split the code of creating variables based on whether it's used by condition or not.
val loaded = ctx.freshName("loaded")
val (leftBefore, leftAfter) = splitVarsByCondition(left.output, leftVars)
val (rightBefore, rightAfter) = splitVarsByCondition(right.output, rightVars)
// Generate code for condition
ctx.currentVars = leftVars ++ rightVars
val cond = BindReferences.bindReference(condition.get, output).genCode(ctx)
// evaluate the columns those used by condition before loop
val before = s"""
|boolean $loaded = false;
|$leftBefore
""".stripMargin
val checking = s"""
|$rightBefore
|${cond.code}
|if (${cond.isNull} || !${cond.value}) continue;
|if (!$loaded) {
| $loaded = true;
| $leftAfter
|}
|$rightAfter
""".stripMargin
(before, checking)
} else {
(evaluateVariables(leftVars), "")
}
s"""
|while (findNextInnerJoinRows($leftInput, $rightInput)) {
| ${leftVarDecl.mkString("\\n")}
| ${beforeLoop.trim}
| scala.collection.Iterator<UnsafeRow> $iterator = $matches.generateIterator();
| while ($iterator.hasNext()) {
| InternalRow $rightRow = (InternalRow) $iterator.next();
| ${condCheck.trim}
| $numOutput.add(1);
| ${consume(ctx, leftVars ++ rightVars)}
| }
| if (shouldStop()) return;
|}
""".stripMargin
}
}
/**
* Helper class that is used to implement [[SortMergeJoinExec]].
*
* To perform an inner (outer) join, users of this class call [[findNextInnerJoinRows()]]
* ([[findNextOuterJoinRows()]]), which returns `true` if a result has been produced and `false`
* otherwise. If a result has been produced, then the caller may call [[getStreamedRow]] to return
* the matching row from the streamed input and may call [[getBufferedMatches]] to return the
* sequence of matching rows from the buffered input (in the case of an outer join, this will return
* an empty sequence if there are no matches from the buffered input). For efficiency, both of these
* methods return mutable objects which are re-used across calls to the `findNext*JoinRows()`
* methods.
*
* @param streamedKeyGenerator a projection that produces join keys from the streamed input.
* @param bufferedKeyGenerator a projection that produces join keys from the buffered input.
* @param keyOrdering an ordering which can be used to compare join keys.
* @param streamedIter an input whose rows will be streamed.
* @param bufferedIter an input whose rows will be buffered to construct sequences of rows that
* have the same join key.
* @param inMemoryThreshold Threshold for number of rows guaranteed to be held in memory by
* internal buffer
* @param spillThreshold Threshold for number of rows to be spilled by internal buffer
*/
private[joins] class SortMergeJoinScanner(
streamedKeyGenerator: Projection,
bufferedKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
streamedIter: RowIterator,
bufferedIter: RowIterator,
inMemoryThreshold: Int,
spillThreshold: Int) {
private[this] var streamedRow: InternalRow = _
private[this] var streamedRowKey: InternalRow = _
private[this] var bufferedRow: InternalRow = _
// Note: this is guaranteed to never have any null columns:
private[this] var bufferedRowKey: InternalRow = _
/**
* The join key for the rows buffered in `bufferedMatches`, or null if `bufferedMatches` is empty
*/
private[this] var matchJoinKey: InternalRow = _
/** Buffered rows from the buffered side of the join. This is empty if there are no matches. */
private[this] val bufferedMatches =
new ExternalAppendOnlyUnsafeRowArray(inMemoryThreshold, spillThreshold)
// Initialization (note: do _not_ want to advance streamed here).
advancedBufferedToRowWithNullFreeJoinKey()
// --- Public methods ---------------------------------------------------------------------------
def getStreamedRow: InternalRow = streamedRow
def getBufferedMatches: ExternalAppendOnlyUnsafeRowArray = bufferedMatches
/**
* Advances both input iterators, stopping when we have found rows with matching join keys.
* @return true if matching rows have been found and false otherwise. If this returns true, then
* [[getStreamedRow]] and [[getBufferedMatches]] can be called to construct the join
* results.
*/
final def findNextInnerJoinRows(): Boolean = {
while (advancedStreamed() && streamedRowKey.anyNull) {
// Advance the streamed side of the join until we find the next row whose join key contains
// no nulls or we hit the end of the streamed iterator.
}
if (streamedRow == null) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// The new streamed row has the same join key as the previous row, so return the same matches.
true
} else if (bufferedRow == null) {
// The streamed row's join key does not match the current batch of buffered rows and there are
// no more rows to read from the buffered iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// Advance both the streamed and buffered iterators to find the next pair of matching rows.
var comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
do {
if (streamedRowKey.anyNull) {
advancedStreamed()
} else {
assert(!bufferedRowKey.anyNull)
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
if (comp > 0) advancedBufferedToRowWithNullFreeJoinKey()
else if (comp < 0) advancedStreamed()
}
} while (streamedRow != null && bufferedRow != null && comp != 0)
if (streamedRow == null || bufferedRow == null) {
// We have either hit the end of one of the iterators, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// The streamed row's join key matches the current buffered row's join, so walk through the
// buffered iterator to buffer the rest of the matching rows.
assert(comp == 0)
bufferMatchingRows()
true
}
}
}
/**
* Advances the streamed input iterator and buffers all rows from the buffered input that
* have matching keys.
* @return true if the streamed iterator returned a row, false otherwise. If this returns true,
* then [[getStreamedRow]] and [[getBufferedMatches]] can be called to produce the outer
* join results.
*/
final def findNextOuterJoinRows(): Boolean = {
if (!advancedStreamed()) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// Matches the current group, so do nothing.
} else {
// The streamed row does not match the current group.
matchJoinKey = null
bufferedMatches.clear()
if (bufferedRow != null && !streamedRowKey.anyNull) {
// The buffered iterator could still contain matching rows, so we'll need to walk through
// it until we either find matches or pass where they would be found.
var comp = 1
do {
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
} while (comp > 0 && advancedBufferedToRowWithNullFreeJoinKey())
if (comp == 0) {
// We have found matches, so buffer them (this updates matchJoinKey)
bufferMatchingRows()
} else {
// We have overshot the position where the row would be found, hence no matches.
}
}
}
// If there is a streamed input then we always return true
true
}
}
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the streamed iterator and compute the new row's join key.
* @return true if the streamed iterator returned a row and false otherwise.
*/
private def advancedStreamed(): Boolean = {
if (streamedIter.advanceNext()) {
streamedRow = streamedIter.getRow
streamedRowKey = streamedKeyGenerator(streamedRow)
true
} else {
streamedRow = null
streamedRowKey = null
false
}
}
/**
* Advance the buffered iterator until we find a row with join key that does not contain nulls.
* @return true if the buffered iterator returned a row and false otherwise.
*/
private def advancedBufferedToRowWithNullFreeJoinKey(): Boolean = {
var foundRow: Boolean = false
while (!foundRow && bufferedIter.advanceNext()) {
bufferedRow = bufferedIter.getRow
bufferedRowKey = bufferedKeyGenerator(bufferedRow)
foundRow = !bufferedRowKey.anyNull
}
if (!foundRow) {
bufferedRow = null
bufferedRowKey = null
false
} else {
true
}
}
/**
* Called when the streamed and buffered join keys match in order to buffer the matching rows.
*/
private def bufferMatchingRows(): Unit = {
assert(streamedRowKey != null)
assert(!streamedRowKey.anyNull)
assert(bufferedRowKey != null)
assert(!bufferedRowKey.anyNull)
assert(keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
// This join key may have been produced by a mutable projection, so we need to make a copy:
matchJoinKey = streamedRowKey.copy()
bufferedMatches.clear()
do {
bufferedMatches.add(bufferedRow.asInstanceOf[UnsafeRow])
advancedBufferedToRowWithNullFreeJoinKey()
} while (bufferedRow != null && keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
}
}
/**
* An iterator for outputting rows in left outer join.
*/
private class LeftOuterIterator(
smjScanner: SortMergeJoinScanner,
rightNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric)
extends OneSideOuterIterator(
smjScanner, rightNullRow, boundCondition, resultProj, numOutputRows) {
protected override def setStreamSideOutput(row: InternalRow): Unit = joinedRow.withLeft(row)
protected override def setBufferedSideOutput(row: InternalRow): Unit = joinedRow.withRight(row)
}
/**
* An iterator for outputting rows in right outer join.
*/
private class RightOuterIterator(
smjScanner: SortMergeJoinScanner,
leftNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric)
extends OneSideOuterIterator(smjScanner, leftNullRow, boundCondition, resultProj, numOutputRows) {
protected override def setStreamSideOutput(row: InternalRow): Unit = joinedRow.withRight(row)
protected override def setBufferedSideOutput(row: InternalRow): Unit = joinedRow.withLeft(row)
}
/**
* An abstract iterator for sharing code between [[LeftOuterIterator]] and [[RightOuterIterator]].
*
* Each [[OneSideOuterIterator]] has a streamed side and a buffered side. Each row on the
* streamed side will output 0 or many rows, one for each matching row on the buffered side.
* If there are no matches, then the buffered side of the joined output will be a null row.
*
* In left outer join, the left is the streamed side and the right is the buffered side.
* In right outer join, the right is the streamed side and the left is the buffered side.
*
* @param smjScanner a scanner that streams rows and buffers any matching rows
* @param bufferedSideNullRow the default row to return when a streamed row has no matches
* @param boundCondition an additional filter condition for buffered rows
* @param resultProj how the output should be projected
* @param numOutputRows an accumulator metric for the number of rows output
*/
private abstract class OneSideOuterIterator(
smjScanner: SortMergeJoinScanner,
bufferedSideNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric) extends RowIterator {
// A row to store the joined result, reused many times
protected[this] val joinedRow: JoinedRow = new JoinedRow()
// Index of the buffered rows, reset to 0 whenever we advance to a new streamed row
private[this] var rightMatchesIterator: Iterator[UnsafeRow] = null
// This iterator is initialized lazily so there should be no matches initially
assert(smjScanner.getBufferedMatches.length == 0)
// Set output methods to be overridden by subclasses
protected def setStreamSideOutput(row: InternalRow): Unit
protected def setBufferedSideOutput(row: InternalRow): Unit
/**
* Advance to the next row on the stream side and populate the buffer with matches.
* @return whether there are more rows in the stream to consume.
*/
private def advanceStream(): Boolean = {
rightMatchesIterator = null
if (smjScanner.findNextOuterJoinRows()) {
setStreamSideOutput(smjScanner.getStreamedRow)
if (smjScanner.getBufferedMatches.isEmpty) {
// There are no matching rows in the buffer, so return the null row
setBufferedSideOutput(bufferedSideNullRow)
} else {
// Find the next row in the buffer that satisfied the bound condition
if (!advanceBufferUntilBoundConditionSatisfied()) {
setBufferedSideOutput(bufferedSideNullRow)
}
}
true
} else {
// Stream has been exhausted
false
}
}
/**
* Advance to the next row in the buffer that satisfies the bound condition.
* @return whether there is such a row in the current buffer.
*/
private def advanceBufferUntilBoundConditionSatisfied(): Boolean = {
var foundMatch: Boolean = false
if (rightMatchesIterator == null) {
rightMatchesIterator = smjScanner.getBufferedMatches.generateIterator()
}
while (!foundMatch && rightMatchesIterator.hasNext) {
setBufferedSideOutput(rightMatchesIterator.next())
foundMatch = boundCondition(joinedRow)
}
foundMatch
}
override def advanceNext(): Boolean = {
val r = advanceBufferUntilBoundConditionSatisfied() || advanceStream()
if (r) numOutputRows += 1
r
}
override def getRow: InternalRow = resultProj(joinedRow)
}
private class SortMergeFullOuterJoinScanner(
leftKeyGenerator: Projection,
rightKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
leftIter: RowIterator,
rightIter: RowIterator,
boundCondition: InternalRow => Boolean,
leftNullRow: InternalRow,
rightNullRow: InternalRow) {
private[this] val joinedRow: JoinedRow = new JoinedRow()
private[this] var leftRow: InternalRow = _
private[this] var leftRowKey: InternalRow = _
private[this] var rightRow: InternalRow = _
private[this] var rightRowKey: InternalRow = _
private[this] var leftIndex: Int = 0
private[this] var rightIndex: Int = 0
private[this] val leftMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
private[this] val rightMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
private[this] var leftMatched: BitSet = new BitSet(1)
private[this] var rightMatched: BitSet = new BitSet(1)
advancedLeft()
advancedRight()
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the left iterator and compute the new row's join key.
* @return true if the left iterator returned a row and false otherwise.
*/
private def advancedLeft(): Boolean = {
if (leftIter.advanceNext()) {
leftRow = leftIter.getRow
leftRowKey = leftKeyGenerator(leftRow)
true
} else {
leftRow = null
leftRowKey = null
false
}
}
/**
* Advance the right iterator and compute the new row's join key.
* @return true if the right iterator returned a row and false otherwise.
*/
private def advancedRight(): Boolean = {
if (rightIter.advanceNext()) {
rightRow = rightIter.getRow
rightRowKey = rightKeyGenerator(rightRow)
true
} else {
rightRow = null
rightRowKey = null
false
}
}
/**
* Populate the left and right buffers with rows matching the provided key.
* This consumes rows from both iterators until their keys are different from the matching key.
*/
private def findMatchingRows(matchingKey: InternalRow): Unit = {
leftMatches.clear()
rightMatches.clear()
leftIndex = 0
rightIndex = 0
while (leftRowKey != null && keyOrdering.compare(leftRowKey, matchingKey) == 0) {
leftMatches += leftRow.copy()
advancedLeft()
}
while (rightRowKey != null && keyOrdering.compare(rightRowKey, matchingKey) == 0) {
rightMatches += rightRow.copy()
advancedRight()
}
if (leftMatches.size <= leftMatched.capacity) {
leftMatched.clearUntil(leftMatches.size)
} else {
leftMatched = new BitSet(leftMatches.size)
}
if (rightMatches.size <= rightMatched.capacity) {
rightMatched.clearUntil(rightMatches.size)
} else {
rightMatched = new BitSet(rightMatches.size)
}
}
/**
* Scan the left and right buffers for the next valid match.
*
* Note: this method mutates `joinedRow` to point to the latest matching rows in the buffers.
* If a left row has no valid matches on the right, or a right row has no valid matches on the
* left, then the row is joined with the null row and the result is considered a valid match.
*
* @return true if a valid match is found, false otherwise.
*/
private def scanNextInBuffered(): Boolean = {
while (leftIndex < leftMatches.size) {
while (rightIndex < rightMatches.size) {
joinedRow(leftMatches(leftIndex), rightMatches(rightIndex))
if (boundCondition(joinedRow)) {
leftMatched.set(leftIndex)
rightMatched.set(rightIndex)
rightIndex += 1
return true
}
rightIndex += 1
}
rightIndex = 0
if (!leftMatched.get(leftIndex)) {
// the left row has never matched any right row, join it with null row
joinedRow(leftMatches(leftIndex), rightNullRow)
leftIndex += 1
return true
}
leftIndex += 1
}
while (rightIndex < rightMatches.size) {
if (!rightMatched.get(rightIndex)) {
// the right row has never matched any left row, join it with null row
joinedRow(leftNullRow, rightMatches(rightIndex))
rightIndex += 1
return true
}
rightIndex += 1
}
// There are no more valid matches in the left and right buffers
false
}
// --- Public methods --------------------------------------------------------------------------
def getJoinedRow(): JoinedRow = joinedRow
def advanceNext(): Boolean = {
// If we already buffered some matching rows, use them directly
if (leftIndex <= leftMatches.size || rightIndex <= rightMatches.size) {
if (scanNextInBuffered()) {
return true
}
}
if (leftRow != null && (leftRowKey.anyNull || rightRow == null)) {
joinedRow(leftRow.copy(), rightNullRow)
advancedLeft()
true
} else if (rightRow != null && (rightRowKey.anyNull || leftRow == null)) {
joinedRow(leftNullRow, rightRow.copy())
advancedRight()
true
} else if (leftRow != null && rightRow != null) {
// Both rows are present and neither have null values,
// so we populate the buffers with rows matching the next key
val comp = keyOrdering.compare(leftRowKey, rightRowKey)
if (comp <= 0) {
findMatchingRows(leftRowKey.copy())
} else {
findMatchingRows(rightRowKey.copy())
}
scanNextInBuffered()
true
} else {
// Both iterators have been consumed
false
}
}
}
private class FullOuterIterator(
smjScanner: SortMergeFullOuterJoinScanner,
resultProj: InternalRow => InternalRow,
numRows: SQLMetric) extends RowIterator {
private[this] val joinedRow: JoinedRow = smjScanner.getJoinedRow()
override def advanceNext(): Boolean = {
val r = smjScanner.advanceNext()
if (r) numRows += 1
r
}
override def getRow: InternalRow = resultProj(joinedRow)
}
| ericvandenbergfb/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala | Scala | apache-2.0 | 44,291 |
package com.olivergg.starttabs
import scala.scalajs.js.JSConverters.array2JSRichGenTrav
import scala.scalajs.js.JSApp
import scala.scalajs.js.annotation.JSExport
import com.greencatsoft.angularjs.Module
import com.olivergg.starttabs.controller.DashController
import com.olivergg.starttabs.controller.FriendsController
import com.olivergg.starttabs.controller.FriendDetailController
import com.olivergg.starttabs.controller.AccountController
import com.greencatsoft.angularjs.Angular
import com.olivergg.starttabs.service.BetterHttpServiceFactory
import com.olivergg.starttabs.controller.ChatsController
import com.olivergg.starttabs.controller.ChatDetailController
/**
* The main entry point of the application. The main method is called thanks to the Scala.js autogenerated launcher.
* See the "XXXXXX-launcher.js" file in Index.scala
*/
@JSExport("IonicStartTabsApp")
object IonicStartTabsApp extends JSApp {
override def main(): Unit = {
println("start main")
// angular.module is a global place for creating, registering and retrieving Angular modules
// 'starter' is the name of this angular module example (also set in a <body> attribute in index.html)
// the 2nd parameter is an array of 'requires'
// 'starter.controllers' is defined in controllers below
val module = Angular.module("starter", Array("ionic", "starter.controllers", "ngCordova"))
module.run[PlatformInitializer]
module.config[StateConfig]
module.factory[BetterHttpServiceFactory]
val controllers = Angular.module("starter.controllers", Array.empty[String])
module.controller[DashController]
module.controller[FriendsController]
module.controller[FriendDetailController]
module.controller[AccountController]
module.controller[ChatsController]
module.controller[ChatDetailController]
println("end main")
}
}
| olivergg/scalajs-ionic-starttabs | app-js/src/main/scala/com/olivergg/starttabs/IonicStartTabsApp.scala | Scala | gpl-2.0 | 1,861 |
package domino.bundle_watching
import org.osgi.framework.Bundle
/**
* Super class for bundle watcher events. The possible events are defined in the companion object.
*
* @param bundle Bundle affected by the state transition
* @param context Additional event data
*/
abstract sealed class BundleWatcherEvent(bundle: Bundle, context: BundleWatcherContext)
/**
* Contains the possible bundle watcher events.
*/
object BundleWatcherEvent {
/**
* A bundle is being added to the BundleTracker.
*/
case class AddingBundle(bundle: Bundle, context: BundleWatcherContext) extends BundleWatcherEvent(bundle, context)
/**
* A bundle tracked by the BundleTracker has been modified.
*/
case class ModifiedBundle(bundle: Bundle, context: BundleWatcherContext) extends BundleWatcherEvent(bundle, context)
/**
* A bundle tracked by the BundleTracker has been removed.
*/
case class RemovedBundle(bundle: Bundle, context: BundleWatcherContext) extends BundleWatcherEvent(bundle, context)
}
| helgoboss/domino | src/main/scala/domino/bundle_watching/BundleWatcherEvent.scala | Scala | mit | 1,017 |
package microtools.decorators
import akka.actor.Scheduler
import akka.pattern.after
import microtools.BusinessTry
import microtools.logging.{ContextAwareLogger, LoggingContext}
import microtools.models.Problem
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
trait Retries {
def log: ContextAwareLogger
def retryFuture[T](
maxRetries: Int,
delay: FiniteDuration,
errorHandler: PartialFunction[(Throwable, Int), Future[T]] = PartialFunction.empty
)(implicit ec: ExecutionContext, ctx: LoggingContext, s: Scheduler): FutureDecorator[T] =
new FutureDecorator[T] {
override def apply(block: => Future[T]): Future[T] = {
Retries.materialize(block).recoverWith {
case e if errorHandler.isDefinedAt((e, maxRetries)) =>
errorHandler.apply((e, maxRetries))
case e if maxRetries > 0 =>
log.warn(s"Retrying on ${e.getMessage}")
after(delay, s)(retryFuture(maxRetries - 1, delay, errorHandler).apply(block))
case e: Throwable =>
log.error("Retries exhausted", e)
Future.failed(e)
}
}
}
def retryTry[T](
maxRetries: Int,
delay: FiniteDuration,
problemHandler: PartialFunction[(Problem, Int), BusinessTry[T]] = PartialFunction.empty,
errorHandler: PartialFunction[(Throwable, Int), BusinessTry[T]] = PartialFunction.empty
)(
implicit ec: ExecutionContext,
ctx: LoggingContext,
s: Scheduler
): TryDecorator[T] =
new TryDecorator[T] {
override def apply(block: => BusinessTry[T]): BusinessTry[T] = {
BusinessTry.future(
Retries
.materialize(block.asFuture)
.recoverWith {
case e if errorHandler.isDefinedAt((e, maxRetries)) =>
errorHandler.apply((e, maxRetries)).asFuture
case e if maxRetries > 0 =>
log.warn(s"Retrying on ${e.getMessage}")
after(delay, s)(
retryTry(maxRetries - 1, delay, problemHandler, errorHandler)
.apply(block)
.asFuture
)
case e: Throwable =>
log.error("Retries exhausted", e)
Future.failed(e)
}
.map {
case Left(success) => BusinessTry.success(success)
case Right(problem) =>
problemHandler.lift((problem, maxRetries)).getOrElse(BusinessTry.failure(problem))
}
)
}
}
}
object Retries {
def materialize[T](block: => Future[T]): Future[T] =
try block
catch { case NonFatal(t) => Future.failed(t) }
}
| 21re/play-micro-tools | src/main/scala/microtools/decorators/Retries.scala | Scala | mit | 2,752 |
package scalaoauth2.provider
import org.scalatest._
import org.scalatest.Matchers._
class AuthorizationCodeSpec extends FlatSpec {
it should "handle request" in {
val authorizationCode = new AuthorizationCode(new MockClientCredentialFetcher())
val request = AuthorizationRequest(Map(), Map("code" -> Seq("code1"), "redirect_uri" -> Seq("http://example.com/")))
val grantHandlerResult = authorizationCode.handleRequest(request, new MockDataHandler() {
override def findAuthInfoByCode(code: String): Option[AuthInfo[MockUser]] = Some(
AuthInfo(user = MockUser(10000, "username"), clientId = "clientId1", scope = Some("all"), redirectUri = Some("http://example.com/"))
)
override def createAccessToken(authInfo: AuthInfo[MockUser]): AccessToken = AccessToken("token1", Some("refreshToken1"), Some("all"), Some(3600), new java.util.Date())
})
grantHandlerResult.tokenType should be ("Bearer")
grantHandlerResult.accessToken should be ("token1")
grantHandlerResult.expiresIn should be (Some(3600))
grantHandlerResult.refreshToken should be (Some("refreshToken1"))
grantHandlerResult.scope should be (Some("all"))
}
it should "handle request if redirectUrl is none" in {
val authorizationCode = new AuthorizationCode(new MockClientCredentialFetcher())
val request = AuthorizationRequest(Map(), Map("code" -> Seq("code1"), "redirect_uri" -> Seq("http://example.com/")))
val grantHandlerResult = authorizationCode.handleRequest(request, new MockDataHandler() {
override def findAuthInfoByCode(code: String): Option[AuthInfo[MockUser]] = Some(
AuthInfo(user = MockUser(10000, "username"), clientId = "clientId1", scope = Some("all"), redirectUri = None)
)
override def createAccessToken(authInfo: AuthInfo[MockUser]): AccessToken = AccessToken("token1", Some("refreshToken1"), Some("all"), Some(3600), new java.util.Date())
})
grantHandlerResult.tokenType should be ("Bearer")
grantHandlerResult.accessToken should be ("token1")
grantHandlerResult.expiresIn should be (Some(3600))
grantHandlerResult.refreshToken should be (Some("refreshToken1"))
grantHandlerResult.scope should be (Some("all"))
}
class MockClientCredentialFetcher extends ClientCredentialFetcher {
override def fetch(request: AuthorizationRequest): Option[ClientCredential] = Some(ClientCredential("clientId1", "clientSecret1"))
}
}
| centraldesktop/scala-oauth2-provider | scala-oauth2-core/src/test/scala/scalaoauth2/provider/AuthorizationCodeSpec.scala | Scala | mit | 2,455 |
package pbdirect
import shapeless.{:+:, CNil, Coproduct, Generic, Witness}
object Enum {
def values[T](implicit v: Values[T], ord: Ordering[T]): Seq[T] = v.apply.sorted
def fromInt[T](index: Int)(implicit v: Values[T], ord: Ordering[T]): T = values.apply(index)
def toInt[T](a: T)(implicit v: Values[T], ord: Ordering[T]): Int = values.indexOf(a)
trait Values[T] {
def apply: List[T]
}
object Values {
implicit def values[A, Repr <: Coproduct](implicit gen: Generic.Aux[A, Repr], v: Aux[A, Repr]): Values[A] =
new Values[A] { def apply = v.values }
trait Aux[A, Repr] {
def values: List[A]
}
object Aux {
implicit def cnilAux[E]: Aux[E, CNil] = new Aux[E, CNil] { def values = Nil }
implicit def cconsAux[E, V <: E, R <: Coproduct](implicit l: Witness.Aux[V], r: Aux[E, R]): Aux[E, V :+: R] =
new Aux[E, V :+: R] { def values = l.value :: r.values }
}
}
}
| btlines/pbdirect | shared/src/main/scala/pbdirect/Enum.scala | Scala | mit | 932 |
import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import scala.io.Source._
import org.apache.spark.mllib.classification.{LogisticRegressionWithLBFGS, LogisticRegressionModel}
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.MLUtils
object QueryClassifier {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("abmQueryClassifier")
// Create a Scala Spark Context.
val sc = new SparkContext(conf)
//records.take(100).foreach(println)
// val word2vec = new Word2Vec()
// val model = word2vec.fit(queryDocuments)
// val synonymsPMF = model.findSynonyms("pmf", 10)
// val synonymsCCHD = model.findSynonyms("cchd", 10)
// for((synonym, cosineSimilarity) <- synonymsPMF) {
// println(s"$synonym $cosineSimilarity")
// }
// println()
// for((synonym, cosineSimilarity) <- synonymsCCHD) {
// println(s"$synonym $cosineSimilarity")
// }
//labeledPoints.take(100).foreach(println)
//val data = MLUtils.loadLibSVMFile(sc, "cf_data")
val data = MLUtils.loadLibSVMFile(sc, "lunch_data")
//val data = MLUtils.loadLibSVMFile(sc, "test_data/breast-cancer_scale.txt")
// val lc = new LearningCurve(data, 500)
// lc.trainSample()
// // Split data into training (60%) and test (40%).
val splits = data.randomSplit(Array(0.6, 0.4), seed = 11L)
val training = splits(0).cache()
val test = splits(1)
// val bagger = new Bagger(training, test, 20, 1.0)
// bagger.bootstrap()
// val votePredLabels = test.map(bagger.aggregatePredictions(_))
// bagger.evaluate(votePredLabels)
// Run training algorithm to build the model
val model = new LogisticRegressionWithLBFGS()
.setNumClasses(2)
.run(training)
// Compute raw scores on the test set.
val predictionAndLabels = test.map { case LabeledPoint(label, features) =>
val prediction = model.predict(features)
(prediction, label)
}
//Get evaluation metrics.
val metrics = new MulticlassMetrics(predictionAndLabels)
val confusionMatrix = metrics.confusionMatrix
println(confusionMatrix)
val precision = metrics.precision
println("Precision = " + precision)
val recall = metrics.recall(1)
println("Recall = " + recall)
// val metrics = new BinaryClassificationMetrics(predictionAndLabels)
// val precision = metrics.precisionByThreshold()
// precision.foreach { case (t, p) =>
// println(s"Threshold: $t, Precision: $p")
// }
// val recall = metrics.recallByThreshold()
// recall.foreach { case (t, r) =>
// println(s"Threshold: $t, Recall: $r")
// }
}
} | bjvanov/sparkler | src/main/scala/QueryClassifier.scala | Scala | apache-2.0 | 2,822 |
import sys.process.Process
import java.io.File
val sqlDir = new File("./sql/")
val host = "192.168.33.10"
val user = "locest"
val database = "locest"
val password = System.console.readPassword("%s", "Password:").mkString("")
println(password)
sqlDir.listFiles.foreach { file =>
println(file)
Process(s"psql -h ${host} -d ${database} -U ${user} -f ${file.getAbsolutePath}", None, "PGPASSWORD" -> password) !!
} | morikuni/locest | setup/area/scripts/exec_sql.scala | Scala | mit | 416 |
package uk.gov.gds.location.importer.model
/**
* Representation of various enum style address base data types
*/
object CodeLists {
object BlpuStateCode extends Enumeration {
type BlpuStateCode = Value
val underConstruction, inUse, unoccupied, noLongerExists, planningPermissionGranted = Value
def forId(id: String) = id match {
case "1" => Some(underConstruction)
case "2" => Some(inUse)
case "3" => Some(unoccupied)
case "4" => Some(noLongerExists)
case "6" => Some(planningPermissionGranted)
case _ => None
}
}
object LogicalStatusCode extends Enumeration {
type LogicalStatusCode = Value
val approved, alternative, provisional, historical = Value
def forId(id: String) = id match {
case "1" => Some(approved)
case "3" => Some(alternative)
case "6" => Some(provisional)
case "8" => Some(historical)
case _ => None
}
}
object StreetRecordTypeCode extends Enumeration {
type StreetRecordTypeCode = Value
val officiallyDesignated, streetDescription, numberedStreet, unofficialStreetDescription, descriptionForLLPG = Value
def forId(id: String) = id match {
case "1" => Some(officiallyDesignated)
case "2" => Some(streetDescription)
case "3" => Some(numberedStreet)
case "4" => Some(unofficialStreetDescription)
case "9" => Some(descriptionForLLPG)
case _ => None
}
def isUnofficialStreet(value: String) =
value.equals(this.unofficialStreetDescription.toString) || value.equals(this.descriptionForLLPG.toString)
def isDescription(value: String) =
value.equals(this.streetDescription.toString)
}
object StreetStateCode extends Enumeration {
type StreetStateCode = Value
val streetUnderConstruction, open, closed = Value
def forId(id: String) = id match {
case "1" => Some(streetUnderConstruction)
case "2" => Some(open)
case "4" => Some(closed)
case _ => None
}
}
object StreetSurfaceCode extends Enumeration {
type StreetSurfaceCode = Value
val metalled, unMetalled, mixed = Value
def forId(id: String) = id match {
case "1" => Some(metalled)
case "2" => Some(unMetalled)
case "3" => Some(mixed)
case _ => None
}
}
object StreetClassificationCode extends Enumeration {
type StreetClassificationCode = Value
val footpath, cycleway, allVehicles, restricted, bridleway = Value
def forId(id: String) = id match {
case "4" => Some(footpath)
case "6" => Some(cycleway)
case "8" => Some(allVehicles)
case "9" => Some(restricted)
case "10" => Some(bridleway)
case _ => None
}
}
}
| alphagov/location-data-importer | src/main/scala/uk/gov/gds/location/importer/model/CodeLists.scala | Scala | mit | 2,716 |
/*
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.cassandra.lucene.column
import com.stratio.cassandra.lucene.BaseScalaTest
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/** Tests for [[Column]].
*
* @author Andres de la Pena `adelapena@stratio.com`
*/
@RunWith(classOf[JUnitRunner])
class ColumnsTest extends BaseScalaTest {
test("build empty") {
val columns = Columns()
columns.size shouldBe 0
columns.isEmpty shouldBe true
}
test("build with columns") {
val columns = Columns(Column("c1"), Column("c2"))
columns.size shouldBe 2
columns.isEmpty shouldBe false
}
test("foreach with mapper") {
val columns = Columns(
Column("c1"),
Column("c1").withUDTName("u1"),
Column("c1").withMapName("m1"),
Column("c1").withUDTName("u1").withMapName("m1"),
Column("c2"),
Column("c2").withUDTName("u1"),
Column("c2").withMapName("m1"),
Column("c2").withUDTName("u1").withMapName("m12"))
var cols1 = Columns()
columns.foreachWithMapper("c1")(c => cols1 += c)
cols1 shouldBe Columns(Column("c1"), Column("c1").withMapName("m1"))
var cols2 = Columns()
columns.foreachWithMapper("c1.u1")(c => cols2 += c)
cols2 shouldBe Columns(
Column("c1").withUDTName("u1"),
Column("c1").withUDTName("u1").withMapName("m1"))
}
test("value for field") {
val columns = Columns() +
Column("c1").withValue(1) +
Column("c1").withUDTName("u1").withValue(2) +
Column("c1").withMapName("m1").withValue(3) +
Column("c1").withUDTName("u1").withMapName("m1").withValue(4) +
Column("c2").withValue(5) +
Column("c2").withUDTName("u1").withValue(6) +
Column("c2").withMapName("m1").withValue(7) +
Column("c2").withUDTName("u1").withMapName("m1").withValue(8)
columns.valueForField("c1") shouldBe 1
columns.valueForField("c1.u1") shouldBe 2
columns.valueForField("c1$m1") shouldBe 3
columns.valueForField("c1.u1$m1") shouldBe 4
columns.valueForField("c2") shouldBe 5
columns.valueForField("c2.u1") shouldBe 6
columns.valueForField("c2$m1") shouldBe 7
columns.valueForField("c2.u1$m1") shouldBe 8
}
test("prepend column") {
Column("c1") :: Columns(Column("c2")) shouldBe Columns(Column("c1"), Column("c2"))
}
test("sum column") {
Columns(Column("c1")) + Column("c2") shouldBe Columns(Column("c1"), Column("c2"))
}
test("sum columns") {
Columns(Column("c1")) ++ Columns(Column("c2")) shouldBe Columns(Column("c1"), Column("c2"))
}
test("add column without value") {
Columns(Column("c1")).add("c2") shouldBe Columns(Column("c1"), Column("c2"))
}
test("add column with value") {
Columns(Column("c1")).add("c2", 1) shouldBe Columns(Column("c1"), Column("c2").withValue(1))
}
test("toString empty") {
Columns().toString shouldBe "Columns{}"
}
test("toString with columns") {
val columns = Columns(
Column("c1"),
Column("c2").withUDTName("u1").withMapName("m1").withValue(7))
columns.toString shouldBe "Columns{c1=None, c2.u1$m1=Some(7)}"
}
}
| adelapena/cassandra-lucene-index | plugin/src/test/scala/com/stratio/cassandra/lucene/column/ColumnsTest.scala | Scala | apache-2.0 | 3,710 |
package im.actor.server
import akka.actor.ActorSystem
import com.amazonaws.auth.EnvironmentVariableCredentialsProvider
import slick.driver.PostgresDriver.api.Database
import im.actor.server.util.{ FileStorageAdapter, S3StorageAdapterConfig, S3StorageAdapter }
trait ImplicitFileStorageAdapter {
protected implicit val system: ActorSystem
protected implicit val db: Database
protected implicit lazy val awsCredentials = new EnvironmentVariableCredentialsProvider()
protected implicit lazy val fsAdapterS3: S3StorageAdapter = new S3StorageAdapter(S3StorageAdapterConfig.load.get)
protected implicit lazy val fsAdapter: FileStorageAdapter = fsAdapterS3
}
| v2tmobile/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/ImplicitFileStorageAdapter.scala | Scala | mit | 666 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package internal
import Def._
/**
* This trait injected to `Def` object to provide `sequential` functions for tasks.
*/
trait TaskSequential {
def sequential[B](last: Initialize[Task[B]]): Initialize[Task[B]] =
sequential(Nil, last)
def sequential[A0, B](
task0: Initialize[Task[A0]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(List(unitTask(task0)), last)
def sequential[A0, A1, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(List(unitTask(task0), unitTask(task1)), last)
def sequential[A0, A1, A2, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(List(unitTask(task0), unitTask(task1), unitTask(task2)), last)
def sequential[A0, A1, A2, A3, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(List(unitTask(task0), unitTask(task1), unitTask(task2), unitTask(task3)), last)
def sequential[A0, A1, A2, A3, A4, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(unitTask(task0), unitTask(task1), unitTask(task2), unitTask(task3), unitTask(task4)),
last
)
def sequential[A0, A1, A2, A3, A4, A5, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
task14: Initialize[Task[A14]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13),
unitTask(task14)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
task14: Initialize[Task[A14]],
task15: Initialize[Task[A15]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13),
unitTask(task14),
unitTask(task15)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
task14: Initialize[Task[A14]],
task15: Initialize[Task[A15]],
task16: Initialize[Task[A16]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13),
unitTask(task14),
unitTask(task15),
unitTask(task16)
),
last
)
def sequential[A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, B](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
task14: Initialize[Task[A14]],
task15: Initialize[Task[A15]],
task16: Initialize[Task[A16]],
task17: Initialize[Task[A17]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13),
unitTask(task14),
unitTask(task15),
unitTask(task16),
unitTask(task17)
),
last
)
def sequential[
A0,
A1,
A2,
A3,
A4,
A5,
A6,
A7,
A8,
A9,
A10,
A11,
A12,
A13,
A14,
A15,
A16,
A17,
A18,
B
](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
task14: Initialize[Task[A14]],
task15: Initialize[Task[A15]],
task16: Initialize[Task[A16]],
task17: Initialize[Task[A17]],
task18: Initialize[Task[A18]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13),
unitTask(task14),
unitTask(task15),
unitTask(task16),
unitTask(task17),
unitTask(task18)
),
last
)
def sequential[
A0,
A1,
A2,
A3,
A4,
A5,
A6,
A7,
A8,
A9,
A10,
A11,
A12,
A13,
A14,
A15,
A16,
A17,
A18,
A19,
B
](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
task14: Initialize[Task[A14]],
task15: Initialize[Task[A15]],
task16: Initialize[Task[A16]],
task17: Initialize[Task[A17]],
task18: Initialize[Task[A18]],
task19: Initialize[Task[A19]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13),
unitTask(task14),
unitTask(task15),
unitTask(task16),
unitTask(task17),
unitTask(task18),
unitTask(task19)
),
last
)
def sequential[
A0,
A1,
A2,
A3,
A4,
A5,
A6,
A7,
A8,
A9,
A10,
A11,
A12,
A13,
A14,
A15,
A16,
A17,
A18,
A19,
A20,
B
](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
task14: Initialize[Task[A14]],
task15: Initialize[Task[A15]],
task16: Initialize[Task[A16]],
task17: Initialize[Task[A17]],
task18: Initialize[Task[A18]],
task19: Initialize[Task[A19]],
task20: Initialize[Task[A20]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13),
unitTask(task14),
unitTask(task15),
unitTask(task16),
unitTask(task17),
unitTask(task18),
unitTask(task19),
unitTask(task20)
),
last
)
def sequential[
A0,
A1,
A2,
A3,
A4,
A5,
A6,
A7,
A8,
A9,
A10,
A11,
A12,
A13,
A14,
A15,
A16,
A17,
A18,
A19,
A20,
A21,
B
](
task0: Initialize[Task[A0]],
task1: Initialize[Task[A1]],
task2: Initialize[Task[A2]],
task3: Initialize[Task[A3]],
task4: Initialize[Task[A4]],
task5: Initialize[Task[A5]],
task6: Initialize[Task[A6]],
task7: Initialize[Task[A7]],
task8: Initialize[Task[A8]],
task9: Initialize[Task[A9]],
task10: Initialize[Task[A10]],
task11: Initialize[Task[A11]],
task12: Initialize[Task[A12]],
task13: Initialize[Task[A13]],
task14: Initialize[Task[A14]],
task15: Initialize[Task[A15]],
task16: Initialize[Task[A16]],
task17: Initialize[Task[A17]],
task18: Initialize[Task[A18]],
task19: Initialize[Task[A19]],
task20: Initialize[Task[A20]],
task21: Initialize[Task[A21]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
sequential(
List(
unitTask(task0),
unitTask(task1),
unitTask(task2),
unitTask(task3),
unitTask(task4),
unitTask(task5),
unitTask(task6),
unitTask(task7),
unitTask(task8),
unitTask(task9),
unitTask(task10),
unitTask(task11),
unitTask(task12),
unitTask(task13),
unitTask(task14),
unitTask(task15),
unitTask(task16),
unitTask(task17),
unitTask(task18),
unitTask(task19),
unitTask(task20),
unitTask(task21)
),
last
)
def sequential[B](tasks: Seq[Initialize[Task[B]]]): Initialize[Task[B]] = {
val initTasks: Seq[Initialize[Task[B]]] = tasks.init
val lastTask: Initialize[Task[B]] = tasks.last
sequential(initTasks.map(unitTask), lastTask)
}
def sequential[B](
tasks: Seq[Initialize[Task[Unit]]],
last: Initialize[Task[B]]
): Initialize[Task[B]] =
tasks.toList match {
case Nil => Def.task { last.value }
case x :: xs =>
Def.taskDyn {
Def.unit(x.value)
sequential(xs, last)
}
}
private def unitTask[A](task: Initialize[Task[A]]): Initialize[Task[Unit]] =
Def.task {
Def.unit(task.value)
()
}
}
| xuwei-k/xsbt | main/src/main/scala/sbt/internal/TaskSequential.scala | Scala | apache-2.0 | 20,819 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.stat
import org.apache.commons.math3.distribution.NormalDistribution
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.util.MLlibTestSparkContext
class KernelDensitySuite extends SparkFunSuite with MLlibTestSparkContext {
test("kernel density single sample") {
val rdd = sc.parallelize(Array(5.0))
val evaluationPoints = Array(5.0, 6.0)
val densities = new KernelDensity().setSample(rdd).setBandwidth(3.0).estimate(evaluationPoints)
val normal = new NormalDistribution(5.0, 3.0)
val acceptableErr = 1e-6
assert(math.abs(densities(0) - normal.density(5.0)) < acceptableErr)
assert(math.abs(densities(1) - normal.density(6.0)) < acceptableErr)
}
test("kernel density multiple samples") {
val rdd = sc.parallelize(Array(5.0, 10.0))
val evaluationPoints = Array(5.0, 6.0)
val densities = new KernelDensity().setSample(rdd).setBandwidth(3.0).estimate(evaluationPoints)
val normal1 = new NormalDistribution(5.0, 3.0)
val normal2 = new NormalDistribution(10.0, 3.0)
val acceptableErr = 1e-6
assert(math.abs(
densities(0) - (normal1.density(5.0) + normal2.density(5.0)) / 2) < acceptableErr)
assert(math.abs(
densities(1) - (normal1.density(6.0) + normal2.density(6.0)) / 2) < acceptableErr)
}
}
| wangyixiaohuihui/spark2-annotation | mllib/src/test/scala/org/apache/spark/mllib/stat/KernelDensitySuite.scala | Scala | apache-2.0 | 2,171 |
package akka.persistence.pg.testkit
import akka.persistence.CapabilityFlag
import akka.persistence.journal.JournalSpec
import akka.persistence.pg.journal.JournalTable
import akka.persistence.pg.util.{CreateTables, RecreateSchema}
import akka.persistence.pg.{PgConfig, PgExtension}
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Milliseconds, Second, Span}
class PgAsyncJournalSpec
extends JournalSpec(ConfigFactory.load("pg-application.conf"))
with JournalTable
with RecreateSchema
with ScalaFutures
with CreateTables
with PgConfig {
override implicit val patienceConfig = PatienceConfig(timeout = Span(1, Second), interval = Span(100, Milliseconds))
override lazy val pluginConfig = PgExtension(system).pluginConfig
import driver.api._
override def beforeAll(): Unit = {
pluginConfig.database
.run(
recreateSchema
.andThen(journals.schema.create)
)
.futureValue
super.beforeAll()
}
override protected def afterAll(): Unit = {
system.terminate()
system.whenTerminated.futureValue
()
}
override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = false
protected override def supportsSerialization: CapabilityFlag = false
}
| WegenenVerkeer/akka-persistence-postgresql | modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/testkit/PgAsyncJournalSpec.scala | Scala | mit | 1,316 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples
import scala.collection.mutable
import scala.util.Random
import org.apache.spark.sql.SparkSession
/**
* Transitive closure on a graph.
*/
object SparkTC {
val numEdges = 200
val numVertices = 100
val rand = new Random(42)
def generateGraph: Seq[(Int, Int)] = {
val edges: mutable.Set[(Int, Int)] = mutable.Set.empty
while (edges.size < numEdges) {
val from = rand.nextInt(numVertices)
val to = rand.nextInt(numVertices)
if (from != to) edges.+=((from, to))
}
edges.toSeq
}
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.appName("SparkTC")
.getOrCreate()
val slices = if (args.length > 0) args(0).toInt else 2
var tc = spark.sparkContext.parallelize(generateGraph, slices).cache()
// Linear transitive closure: each round grows paths by one edge,
// by joining the graph's edges with the already-discovered paths.
// e.g. join the path (y, z) from the TC with the edge (x, y) from
// the graph to obtain the path (x, z).
// Because join() joins on keys, the edges are stored in reversed order.
val edges = tc.map(x => (x._2, x._1))
// This join is iterated until a fixed point is reached.
var oldCount = 0L
var nextCount = tc.count()
do {
oldCount = nextCount
// Perform the join, obtaining an RDD of (y, (z, x)) pairs,
// then project the result to obtain the new (x, z) paths.
tc = tc.union(tc.join(edges).map(x => (x._2._2, x._2._1))).distinct().cache()
nextCount = tc.count()
} while (nextCount != oldCount)
println(s"TC has ${tc.count()} edges.")
spark.stop()
}
}
// scalastyle:on println
| lhfei/spark-in-action | spark-3.x/src/main/scala/org/apache/spark/examples/SparkTC.scala | Scala | apache-2.0 | 2,554 |
package com.twitter.finagle.httpx
import org.jboss.netty.handler.codec.http.{HttpHeaders,
CookieDecoder => NettyCookieDecoder, CookieEncoder => NettyCookieEncoder}
import scala.collection.mutable
import scala.collection.JavaConverters._
/**
* Adapt cookies of a Message to a mutable Map where cookies are indexed by
* their name. Requests use the Cookie header and Responses use the Set-Cookie
* header. If a cookie is added to the CookieMap, a header is automatically
* added to the Message. You can add the same cookie more than once. Use getAll
* to retrieve all of them, otherwise only the first one is returned. If a
* cookie is removed from the CookieMap, a header is automatically removed from
* the message.
*/
class CookieMap(message: Message)
extends mutable.Map[String, Cookie]
with mutable.MapLike[String, Cookie, CookieMap] {
override def empty: CookieMap = new CookieMap(Request())
private[this] val underlying = mutable.Map[String, Seq[Cookie]]()
/** Check if there was a parse error. Invalid cookies are ignored. */
def isValid = _isValid
private[this] var _isValid = true
private[this] val cookieHeaderName =
if (message.isRequest)
HttpHeaders.Names.COOKIE
else
HttpHeaders.Names.SET_COOKIE
private[this] def decodeCookies(header: String): Iterable[Cookie] = {
val decoder = new NettyCookieDecoder
try {
decoder.decode(header).asScala map { new Cookie(_) }
} catch {
case e: IllegalArgumentException =>
_isValid = false
Nil
}
}
protected def rewriteCookieHeaders() {
// Clear all cookies - there may be more than one with this name.
message.headers.remove(cookieHeaderName)
// Add cookies back again
if (message.isRequest) {
val encoder = new NettyCookieEncoder(false)
foreach { case (_, cookie) =>
encoder.addCookie(cookie.underlying)
}
message.headers.set(cookieHeaderName, encoder.encode())
} else {
val encoder = new NettyCookieEncoder(true)
foreach { case (_, cookie) =>
encoder.addCookie(cookie.underlying)
message.headers.add(cookieHeaderName, encoder.encode())
}
}
}
/** Iterate through all cookies. */
def iterator: Iterator[(String, Cookie)] = {
for {
(name, cookies) <- underlying.iterator
cookie <- cookies
} yield (name, cookie)
}
/** Get first cookie with this name. */
def get(key: String): Option[Cookie] = getAll(key).headOption
def getValue(key: String): Option[String] = get(key) map { _.value }
/** Get all cookies with this name. */
def getAll(key: String): Seq[Cookie] = underlying.getOrElse(key, Nil)
/** Add cookie. Remove existing cookies with this name. */
def +=(kv: (String, Cookie)) = {
underlying(kv._1) = Seq(kv._2)
rewriteCookieHeaders()
this
}
def +=(cookie: Cookie): CookieMap = {
this += ((cookie.name, cookie))
}
/** Delete all cookies with this name. */
def -=(key: String) = {
underlying -= key
rewriteCookieHeaders()
this
}
/** Add cookie. Keep existing cookies with this name. */
def add(k: String, v: Cookie) {
underlying(k) = underlying.getOrElse(k, Nil) :+ v
rewriteCookieHeaders()
}
def add(cookie: Cookie) {
add(cookie.name, cookie)
}
for {
cookieHeader <- message.headers.getAll(cookieHeaderName).asScala
cookie <- decodeCookies(cookieHeader)
} {
add(cookie)
}
}
| LithiumTD/finagle | finagle-httpx/src/main/scala/com/twitter/finagle/httpx/CookieMap.scala | Scala | apache-2.0 | 3,451 |
package com.arcusys.learn.liferay.update.version250
import com.arcusys.learn.liferay.LiferayClasses.LUpgradeProcess
import com.arcusys.learn.liferay.update.version250.slide.SlideTableComponent
import com.arcusys.valamis.persistence.common.SlickDBInfo
import com.arcusys.valamis.web.configuration.ioc.Configuration
import com.escalatesoft.subcut.inject.Injectable
class DBUpdater2419 extends LUpgradeProcess with Injectable with SlideTableComponent{
implicit val bindingModule = Configuration
override def getThreshold = 2419
lazy val dbInfo = inject[SlickDBInfo]
lazy val driver = dbInfo.slickDriver
lazy val db = dbInfo.databaseDef
import driver.simple._
override def doUpgrade(): Unit = {
db.withSession { implicit session =>
var themes = Seq[SlideTheme]()
themes = themes :+ createSlideTheme("Black and White",
"#000000",
"ubuntu$20px$#ffffff") :+
createSlideTheme("Blue",
"#48aadf",
"roboto$20px$#ffffff") :+
createSlideTheme("Default White",
"#ffffff",
"ubuntu$20px$#000000") :+
createSlideTheme("Green",
"#ffffff",
"roboto$20px$#9cc83d")
themes.foreach(theme =>{
slideThemes returning slideThemes.map(_.id) insert theme
})
}
}
private def createSlideTheme(title: String, bgColor: String, font: String): SlideTheme = {
new SlideTheme(
title = title,
bgColor = Some(bgColor),
font = Some(font),
isDefault = true
)
}
}
| igor-borisov/valamis | learn-portlet/src/main/scala/com/arcusys/learn/liferay/update/version250/DBUpdater2419.scala | Scala | gpl-3.0 | 1,554 |
package skinny.controller
import skinny.engine.SkinnyEngineFilter
/**
* SkinnyController as a Servlet for REST APIs.
*
* NOTICE: If you'd like to disable Set-Cookie header for session id, configure in web.xml
*/
trait SkinnyApiController
extends SkinnyControllerBase
with SkinnyEngineFilter | holycattle/skinny-framework | framework/src/main/scala/skinny/controller/SkinnyApiController.scala | Scala | mit | 300 |
package so.eval.languages
import so.eval.{ EvaluationRequest, SandboxedLanguage }
case class C(evaluation: EvaluationRequest) extends SandboxedLanguage {
val extension = "c"
val allFiles = filename :: evaluation.files.map {
_.keys.filter(f => f.endsWith(".c")).toList
}.getOrElse(List())
override val compileCommand = Some(Seq("gcc", "-Wall") ++ allFiles)
val command = Seq("./a.out")
}
| eval-so/minibcs | src/main/scala/languages/C.scala | Scala | apache-2.0 | 402 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.core.config
import scala.collection.mutable.{ Builder, ListBuffer }
import com.tzavellas.coeus.core.interception._
/**
* A trait to register <code>Interceptor</code> instances for a
* {@code DispatcherServlet}.
*/
trait InterceptorRegistry {
/**
* Holds all the registered interceptors.
*
* <p>Interceptors are called during request execution in the order that are
* registered.</p>
*
* <p>{@link ThreadLocalInterceptor} and {@link FlashScopeInterceptor}
* are registered by default because basic framework features depend on their
* function.</p>
*/
val interceptors: Builder[Interceptor, Seq[Interceptor]] = new ListBuffer
interceptors += new ThreadLocalInterceptor
interceptors += new FlashScopeInterceptor
}
| sptz45/coeus | src/main/scala/com/tzavellas/coeus/core/config/InterceptorRegistry.scala | Scala | apache-2.0 | 928 |
package foo
class Bar { }
package object Bar { }
| felixmulder/scala | test/pending/pos/t4695/T_2.scala | Scala | bsd-3-clause | 50 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.cluster.Partition
import kafka.controller.StateChangeLogger
import kafka.log.Log
import kafka.server.checkpoints.OffsetCheckpoints
import kafka.server.metadata.{MetadataBrokers, MetadataPartition}
import kafka.utils.Implicits.MapExtensionMethods
import org.apache.kafka.common.{TopicPartition, Uuid}
import org.apache.kafka.common.errors.KafkaStorageException
import scala.collection.{Map, Set, mutable}
trait RaftReplicaChangeDelegateHelper {
def stateChangeLogger: StateChangeLogger
def replicaFetcherManager: ReplicaFetcherManager
def replicaAlterLogDirsManager: ReplicaAlterLogDirsManager
def markDeferred(state: HostedPartition.Deferred): Unit
def getLogDir(topicPartition: TopicPartition): Option[String]
def error(msg: => String, e: => Throwable): Unit
def markOffline(topicPartition: TopicPartition): Unit
def markOnline(partition: Partition): Unit
def completeDelayedFetchOrProduceRequests(topicPartition: TopicPartition): Unit
def isShuttingDown: Boolean
def initialFetchOffset(log: Log): Long
def config: KafkaConfig
}
class RaftReplicaChangeDelegate(helper: RaftReplicaChangeDelegateHelper) {
def makeDeferred(partitionsNewMap: Map[Partition, Boolean],
metadataOffset: Long): Unit = {
val traceLoggingEnabled = helper.stateChangeLogger.isTraceEnabled
if (traceLoggingEnabled)
partitionsNewMap.forKeyValue { (partition, isNew) =>
helper.stateChangeLogger.trace(s"Metadata batch $metadataOffset: starting the " +
s"become-deferred transition for partition ${partition.topicPartition} isNew=$isNew")
}
// Stop fetchers for all the partitions
helper.replicaFetcherManager.removeFetcherForPartitions(partitionsNewMap.keySet.map(_.topicPartition))
helper.stateChangeLogger.info(s"Metadata batch $metadataOffset: as part of become-deferred request, " +
s"stopped any fetchers for ${partitionsNewMap.size} partitions")
// mark all the partitions as deferred
partitionsNewMap.forKeyValue((partition, isNew) => helper.markDeferred(HostedPartition.Deferred(partition, isNew)))
helper.replicaFetcherManager.shutdownIdleFetcherThreads()
helper.replicaAlterLogDirsManager.shutdownIdleFetcherThreads()
if (traceLoggingEnabled)
partitionsNewMap.keys.foreach { partition =>
helper.stateChangeLogger.trace(s"Completed batch $metadataOffset become-deferred " +
s"transition for partition ${partition.topicPartition}")
}
}
def makeLeaders(prevPartitionsAlreadyExisting: Set[MetadataPartition],
partitionStates: Map[Partition, MetadataPartition],
highWatermarkCheckpoints: OffsetCheckpoints,
metadataOffset: Option[Long],
topicIds: String => Option[Uuid]): Set[Partition] = {
val partitionsMadeLeaders = mutable.Set[Partition]()
val traceLoggingEnabled = helper.stateChangeLogger.isTraceEnabled
val deferredBatches = metadataOffset.isEmpty
val topLevelLogPrefix = if (deferredBatches)
"Metadata batch <multiple deferred>"
else
s"Metadata batch ${metadataOffset.get}"
try {
// First stop fetchers for all the partitions
helper.replicaFetcherManager.removeFetcherForPartitions(partitionStates.keySet.map(_.topicPartition))
helper.stateChangeLogger.info(s"$topLevelLogPrefix: stopped ${partitionStates.size} fetcher(s)")
// Update the partition information to be the leader
partitionStates.forKeyValue { (partition, state) =>
val topicPartition = partition.topicPartition
val partitionLogMsgPrefix = if (deferredBatches)
s"Apply deferred leader partition $topicPartition"
else
s"Metadata batch ${metadataOffset.get} $topicPartition"
try {
val isrState = state.toLeaderAndIsrPartitionState(
!prevPartitionsAlreadyExisting(state))
if (partition.makeLeader(isrState, highWatermarkCheckpoints, topicIds(partition.topic))) {
partitionsMadeLeaders += partition
if (traceLoggingEnabled) {
helper.stateChangeLogger.trace(s"$partitionLogMsgPrefix: completed the become-leader state change.")
}
} else {
helper.stateChangeLogger.info(s"$partitionLogMsgPrefix: skipped the " +
"become-leader state change since it is already the leader.")
}
} catch {
case e: KafkaStorageException =>
helper.stateChangeLogger.error(s"$partitionLogMsgPrefix: unable to make " +
s"leader because the replica for the partition is offline due to disk error $e")
val dirOpt = helper.getLogDir(topicPartition)
helper.error(s"Error while making broker the leader for partition $partition in dir $dirOpt", e)
helper.markOffline(topicPartition)
}
}
} catch {
case e: Throwable =>
helper.stateChangeLogger.error(s"$topLevelLogPrefix: error while processing batch.", e)
// Re-throw the exception for it to be caught in BrokerMetadataListener
throw e
}
partitionsMadeLeaders
}
def makeFollowers(prevPartitionsAlreadyExisting: Set[MetadataPartition],
currentBrokers: MetadataBrokers,
partitionStates: Map[Partition, MetadataPartition],
highWatermarkCheckpoints: OffsetCheckpoints,
metadataOffset: Option[Long],
topicIds: String => Option[Uuid]): Set[Partition] = {
val traceLoggingEnabled = helper.stateChangeLogger.isTraceEnabled
val deferredBatches = metadataOffset.isEmpty
val topLevelLogPrefix = if (deferredBatches)
"Metadata batch <multiple deferred>"
else
s"Metadata batch ${metadataOffset.get}"
if (traceLoggingEnabled) {
partitionStates.forKeyValue { (partition, state) =>
val topicPartition = partition.topicPartition
val partitionLogMsgPrefix = if (deferredBatches)
s"Apply deferred follower partition $topicPartition"
else
s"Metadata batch ${metadataOffset.get} $topicPartition"
helper.stateChangeLogger.trace(s"$partitionLogMsgPrefix: starting the " +
s"become-follower transition with leader ${state.leaderId}")
}
}
val partitionsMadeFollower: mutable.Set[Partition] = mutable.Set()
// all brokers, including both alive and not
val acceptableLeaderBrokerIds = currentBrokers.iterator().map(broker => broker.id).toSet
val allBrokersByIdMap = currentBrokers.iterator().map(broker => broker.id -> broker).toMap
try {
partitionStates.forKeyValue { (partition, state) =>
val topicPartition = partition.topicPartition
val partitionLogMsgPrefix = if (deferredBatches)
s"Apply deferred follower partition $topicPartition"
else
s"Metadata batch ${metadataOffset.get} $topicPartition"
try {
val isNew = !prevPartitionsAlreadyExisting(state)
if (!acceptableLeaderBrokerIds.contains(state.leaderId)) {
// The leader broker should always be present in the metadata cache.
// If not, we should record the error message and abort the transition process for this partition
helper.stateChangeLogger.error(s"$partitionLogMsgPrefix: cannot become follower " +
s"since the new leader ${state.leaderId} is unavailable.")
// Create the local replica even if the leader is unavailable. This is required to ensure that we include
// the partition's high watermark in the checkpoint file (see KAFKA-1647)
partition.createLogIfNotExists(isNew, isFutureReplica = false, highWatermarkCheckpoints, topicIds(partition.topic))
} else {
val isrState = state.toLeaderAndIsrPartitionState(isNew)
if (partition.makeFollower(isrState, highWatermarkCheckpoints, topicIds(partition.topic))) {
partitionsMadeFollower += partition
if (traceLoggingEnabled) {
helper.stateChangeLogger.trace(s"$partitionLogMsgPrefix: completed the " +
s"become-follower state change with new leader ${state.leaderId}.")
}
} else {
helper.stateChangeLogger.info(s"$partitionLogMsgPrefix: skipped the " +
s"become-follower state change since " +
s"the new leader ${state.leaderId} is the same as the old leader.")
}
}
} catch {
case e: KafkaStorageException =>
helper.stateChangeLogger.error(s"$partitionLogMsgPrefix: unable to complete the " +
s"become-follower state change since the " +
s"replica for the partition is offline due to disk error $e")
val dirOpt = helper.getLogDir(partition.topicPartition)
helper.error(s"Error while making broker the follower with leader ${state.leaderId} in dir $dirOpt", e)
helper.markOffline(topicPartition)
}
}
if (partitionsMadeFollower.nonEmpty) {
helper.replicaFetcherManager.removeFetcherForPartitions(partitionsMadeFollower.map(_.topicPartition))
helper.stateChangeLogger.info(s"$topLevelLogPrefix: stopped followers for ${partitionsMadeFollower.size} partitions")
partitionsMadeFollower.foreach { partition =>
helper.completeDelayedFetchOrProduceRequests(partition.topicPartition)
}
if (helper.isShuttingDown) {
if (traceLoggingEnabled) {
partitionsMadeFollower.foreach { partition =>
val topicPartition = partition.topicPartition
val partitionLogMsgPrefix = if (deferredBatches)
s"Apply deferred follower partition $topicPartition"
else
s"Metadata batch ${metadataOffset.get} $topicPartition"
helper.stateChangeLogger.trace(s"$partitionLogMsgPrefix: skipped the " +
s"adding-fetcher step of the become-follower state for " +
s"$topicPartition since we are shutting down.")
}
}
} else {
// we do not need to check if the leader exists again since this has been done at the beginning of this process
val partitionsToMakeFollowerWithLeaderAndOffset = partitionsMadeFollower.map { partition =>
val leader = allBrokersByIdMap(partition.leaderReplicaIdOpt.get).brokerEndPoint(helper.config.interBrokerListenerName)
val log = partition.localLogOrException
val fetchOffset = helper.initialFetchOffset(log)
if (deferredBatches) {
helper.markOnline(partition)
}
partition.topicPartition -> InitialFetchState(leader, partition.getLeaderEpoch, fetchOffset)
}.toMap
helper.replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset)
}
}
} catch {
case e: Throwable =>
helper.stateChangeLogger.error(s"$topLevelLogPrefix: error while processing batch", e)
// Re-throw the exception for it to be caught in BrokerMetadataListener
throw e
}
if (traceLoggingEnabled)
partitionsMadeFollower.foreach { partition =>
val topicPartition = partition.topicPartition
val state = partitionStates(partition)
val partitionLogMsgPrefix = if (deferredBatches)
s"Apply deferred follower partition $topicPartition"
else
s"Metadata batch ${metadataOffset.get} $topicPartition"
helper.stateChangeLogger.trace(s"$partitionLogMsgPrefix: completed become-follower " +
s"transition for partition $topicPartition with new leader ${state.leaderId}")
}
partitionsMadeFollower
}
}
| Chasego/kafka | core/src/main/scala/kafka/server/RaftReplicaChangeDelegate.scala | Scala | apache-2.0 | 12,671 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Sun May 17 15:07:22 EDT 2015
* @see LICENSE (MIT style license file)
* Also see Oracle Copyright below
*
* Translated to Scala from MoleculeSampleApp
* @see https://docs.oracle.com/javase/8/javafx/graphics-tutorial/sampleapp3d.htm
* @see https://docs.oracle.com/javase/8/javafx/graphics-tutorial/sampleapp3d-code.htm
*/
/*
* Copyright (c) 2013, 2014 Oracle and/or its affiliates.
* All rights reserved. Use is subject to license terms.
*
* This file is available and licensed under the following license:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the distribution.
* - Neither the name of Oracle nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS OF USE,
* DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package scalation.moleculesampleapp
import javafx.application.Application
import javafx.event.EventHandler
import javafx.scene._
import javafx.scene.input.{KeyCode, KeyEvent, MouseEvent}
import javafx.scene.paint.Color
import javafx.scene.shape.{Box, Cylinder, Sphere}
import javafx.scene.transform.Rotate
import javafx.stage.Stage
import PerspectiveCamera._
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MoleculeSampleApp` class illustrates the creation of 3D objects
* (a representation of a Hydrogen molecule) as well as how to rotate them using
* the mouse. It support the following keyboard commands as well:
* 'V' toggles the visibility of the Hydrogen molecule,
* 'X' toggles the visibility of the coordinate axes,
* 'Z' restores original locations.
*
* @author cmcastil (author of Java version)
*/
class MoleculeSampleApp extends Application
{
val root = new Group ()
val axisGroup = new Xform ()
val moleculeGroup = new Xform ()
val world = new Xform ()
val cameraXform = new Xform ()
val cameraXform2 = new Xform ()
val cameraXform3 = new Xform ()
val camera = PerspectiveCamera ()
private val AXIS_LENGTH = 250.0
private val HYDROGEN_ANGLE = 104.5
private val CONTROL_MULTIPLIER = 0.1
private val SHIFT_MULTIPLIER = 10.0
private val MOUSE_SPEED = 0.1
private val ROTATION_SPEED = 2.0
private val TRACK_SPEED = 0.3
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Build the coordinate axes (x, y and z).
*/
private def buildAxes ()
{
println ("buildAxes ()")
val redMaterial = PhongMaterial (Color.DARKRED, Color.RED)
val greenMaterial = PhongMaterial (Color.DARKGREEN, Color.GREEN)
val blueMaterial = PhongMaterial (Color.DARKBLUE, Color.BLUE)
val xAxis = new Box (AXIS_LENGTH, 1, 1)
val yAxis = new Box (1, AXIS_LENGTH, 1)
val zAxis = new Box (1, 1, AXIS_LENGTH)
xAxis.setMaterial (redMaterial)
yAxis.setMaterial (greenMaterial)
zAxis.setMaterial (blueMaterial)
axisGroup.getChildren ().addAll (xAxis, yAxis, zAxis)
axisGroup.setVisible (false)
world.getChildren ().addAll (axisGroup)
} // buildAxes
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Handle mouse events for moving the molecule/axes.
* @param scene the current scene
* @param root the root node of the scene
*/
private def handleMouse (scene: Scene, root: Node)
{
var mPos = (0.0, 0.0) // current mouse position
var mOld = (0.0, 0.0) // prior/old mouse position
var mDel = (0.0, 0.0) // distance/delta mPos - mOld
scene.setOnMousePressed (new EventHandler [MouseEvent] () {
override def handle (me: MouseEvent)
{
mPos = (me.getSceneX (), me.getSceneY ())
mOld = mPos
} // handle
})
scene.setOnMouseDragged (new EventHandler [MouseEvent] () {
@Override
override def handle (me: MouseEvent)
{
mOld = mPos
mPos = (me.getSceneX (), me.getSceneY ())
mDel = (mPos._1 - mOld._1, mPos._2 - mOld._2)
var modifier = 1.0
if (me.isControlDown ()) {
modifier = CONTROL_MULTIPLIER
} // if
if (me.isShiftDown ()) {
modifier = SHIFT_MULTIPLIER
} // if
if (me.isPrimaryButtonDown ()) {
cameraXform.getRy.setAngle (cameraXform.getRy.getAngle () - mDel._1 * MOUSE_SPEED * modifier * ROTATION_SPEED)
cameraXform.getRx.setAngle (cameraXform.getRx.getAngle () + mDel._2 * MOUSE_SPEED * modifier * ROTATION_SPEED)
} else if (me.isSecondaryButtonDown ()) {
val z = camera.getTranslateZ ()
val newZ = z + mDel._1 * MOUSE_SPEED * modifier
camera.setTranslateZ (newZ)
} else if (me.isMiddleButtonDown ()) {
cameraXform2.t.setX (cameraXform2.t.getX () + mDel._1 * MOUSE_SPEED * modifier * TRACK_SPEED)
cameraXform2.t.setY (cameraXform2.t.getY () + mDel._2 * MOUSE_SPEED * modifier * TRACK_SPEED)
} // if
} // handle
})
} // handleMouse
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Handle keyboard events for visibility/restoring molecule/axes.
* @param scene the current scene
* @param root the root node of the scene
*/
private def handleKeyboard (scene: Scene, root: Node)
{
scene.setOnKeyPressed (new EventHandler [KeyEvent] () {
override def handle (event: KeyEvent)
{
event.getCode match {
case KeyCode.Z => cameraXform2.t.setX (0.0)
cameraXform2.t.setY (0.0)
camera.setTranslateZ (CAMERA_INITIAL_DISTANCE)
cameraXform.getRy.setAngle (CAMERA_INITIAL_Y_ANGLE)
cameraXform.getRx.setAngle (CAMERA_INITIAL_X_ANGLE)
case KeyCode.X => axisGroup.setVisible (! axisGroup.isVisible ())
case KeyCode.V => moleculeGroup.setVisible (! moleculeGroup.isVisible ())
case _ => println ("handleKeyboard: unrecognized key code")
} // match
} // handle
})
} // scene.setOnKeyPressed
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Build the Hydrogen molecule using three spheres and two narrow cylinders.
* The molecule component hierarchy is as follows:
*
* [*] moleculeXform
* [*] oxygenXform
* [*] oxygenSphere
* [*] hydrogen1SideXform
* [*] hydrogen1Xform
* [*] hydrogen1Sphere
* [*] bond1Cylinder
* [*] hydrogen2SideXform
* [*] hydrogen2Xform
* [*] hydrogen2Sphere
* [*] bond2Cylinder
*/
private def buildMolecule ()
{
val redMaterial = PhongMaterial (Color.DARKRED, Color.RED)
val whiteMaterial = PhongMaterial (Color.WHITE, Color.LIGHTBLUE)
val greyMaterial = PhongMaterial (Color.DARKGREY, Color.GREY)
val moleculeXform = new Xform ()
val oxygenXform = new Xform ()
val hydrogen1SideXform = new Xform ()
val hydrogen1Xform = new Xform ()
val hydrogen2SideXform = new Xform ()
val hydrogen2Xform = new Xform ()
val oxygenSphere = new Sphere (40.0)
oxygenSphere.setMaterial (redMaterial)
val hydrogen1Sphere = new Sphere (30.0)
hydrogen1Sphere.setMaterial (whiteMaterial)
hydrogen1Sphere.setTranslateX (0.0)
val hydrogen2Sphere = new Sphere (30.0)
hydrogen2Sphere.setMaterial (whiteMaterial)
hydrogen2Sphere.setTranslateZ (0.0)
val bond1Cylinder = new Cylinder (5, 100)
bond1Cylinder.setMaterial (greyMaterial)
bond1Cylinder.setTranslateX (50.0)
bond1Cylinder.setRotationAxis (Rotate.Z_AXIS)
bond1Cylinder.setRotate (90.0)
val bond2Cylinder = new Cylinder (5, 100)
bond2Cylinder.setMaterial (greyMaterial)
bond2Cylinder.setTranslateX (50.0)
bond2Cylinder.setRotationAxis (Rotate.Z_AXIS)
bond2Cylinder.setRotate (90.0)
moleculeXform.getChildren ().add (oxygenXform)
moleculeXform.getChildren ().add (hydrogen1SideXform)
moleculeXform.getChildren ().add (hydrogen2SideXform)
oxygenXform.getChildren ().add (oxygenSphere)
hydrogen1SideXform.getChildren ().add (hydrogen1Xform)
hydrogen2SideXform.getChildren ().add (hydrogen2Xform)
hydrogen1Xform.getChildren ().add (hydrogen1Sphere)
hydrogen2Xform.getChildren ().add (hydrogen2Sphere)
hydrogen1SideXform.getChildren ().add (bond1Cylinder)
hydrogen2SideXform.getChildren ().add (bond2Cylinder)
hydrogen1Xform.setTx (100.0)
hydrogen2Xform.setTx (100.0)
hydrogen2SideXform.setRy (HYDROGEN_ANGLE)
moleculeGroup.getChildren ().add (moleculeXform)
world.getChildren ().addAll (moleculeGroup)
} // buildMolecule
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Start the javafx application.
* @param the primary stage
*/
override def start (primaryStage: Stage)
{
// setUserAgentStylesheet (STYLESHEET_MODENA)
println ("start ()")
root.getChildren ().add (world)
root.setDepthTest (DepthTest.ENABLE)
// buildScene ()
buildCamera (camera, cameraXform, cameraXform2, cameraXform3, root)
buildAxes ()
buildMolecule ()
val scene = new Scene (root, 1024, 768, true)
scene.setFill (Color.GREY)
handleKeyboard (scene, world)
handleMouse (scene, world)
primaryStage.setTitle ("Molecule Sample Application")
primaryStage.setScene (scene)
primaryStage.show ()
scene.setCamera (camera)
} // start
} // MoleculeSampleApp class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MoleculeSampleApp` object is used to launch the javafx application.
*/
object MoleculeSampleApp
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The main method serves as a fallback in case the application can not be
* launched through deployment artifacts, e.g., in IDEs with limited javafx
* support. NetBeans ignores main.
* @param args the command line arguments
*/
def main (args: Array [String])
{
// launch (args) Java style launch
javafx.application.Application.launch (classOf [MoleculeSampleApp], args: _*)
} // main
} // MoleculeSampleApp object
| NBKlepp/fda | scalation_1.3/scalation_modeling/src/main/scala/scalation/moleculesampleapp/MoleculeScalaApp.scala | Scala | mit | 12,597 |
/*
* File Plugin.scala is part of JsonRecipes.
* JsonRecipes is opensource Minecraft mod(released under LGPLv3), created by anti344.
* Full licence information can be found in LICENCE and LICENCE.LESSER files in jar-file of the mod.
* Copyright © 2014, anti344
*/
package net.anti344.jsonrecipes.plugins
import net.anti344.jsonrecipes.impl.RecipeRegistry.doRegisterRecipeType
trait Plugin
extends DelayedInit{
var modid: String = null
private var func: () => Unit = () => {}
def delayedInit(body: => Unit) =
func = () => body
final def execute(modid: String) = {
this.modid = modid
func()
}
final def register(tpe: String, handler: RecipeHandler[_]) =
doRegisterRecipeType(if(modid != null)s"$modid-$tpe" else tpe, handler.getRecipeClass, handler)
} | mc-anti344/JsonRecipes | src/main/scala/net/anti344/jsonrecipes/plugins/Plugin.scala | Scala | gpl-3.0 | 792 |
package com.github.davidkellis.query
// see https://github.com/antlr/grammars-v4/blob/master/sqlite/SQLite.g4:312
sealed trait Predicate {
def or(op: Predicate): Predicate = Or(this, op)
def and(op: Predicate): Predicate = And(this, op)
}
case class Eq(lhs: Expr, rhs: Expr) extends Predicate
case class Neq(lhs: Expr, rhs: Expr) extends Predicate
case class Gt(lhs: Expr, rhs: Expr) extends Predicate
case class Gte(lhs: Expr, rhs: Expr) extends Predicate
case class Lt(lhs: Expr, rhs: Expr) extends Predicate
case class Lte(lhs: Expr, rhs: Expr) extends Predicate
case class Between(originValue: Expr, min: Expr, max: Expr) extends Predicate
case class In(originValue: Expr, values: Seq[Expr]) extends Predicate
case class Or(lhs: Predicate, rhs: Predicate) extends Predicate
case class And(lhs: Predicate, rhs: Predicate) extends Predicate
case class All(predicates: Seq[Predicate]) extends Predicate
case class Any(predicates: Seq[Predicate]) extends Predicate
case class Not(p: Predicate) extends Predicate
sealed trait Expr {
def eq(op: Expr): Predicate = Eq(this, op)
def neq(op: Expr): Predicate = Neq(this, op)
def gt(op: Expr): Predicate = Gt(this, op)
def gte(op: Expr): Predicate = Gte(this, op)
def lt(op: Expr): Predicate = Lt(this, op)
def lte(op: Expr): Predicate = Lte(this, op)
def between(bounds: (Expr, Expr)): Predicate = {
val (min, max) = bounds
Between(this, min, max)
}
def in(valueSet: Seq[Expr]): Predicate = In(this, valueSet)
def +(op: Expr): Expr = AdditionExpr(this, op)
def -(op: Expr): Expr = SubtractionExpr(this, op)
def *(op: Expr): Expr = MultiplicationExpr(this, op)
def /(op: Expr): Expr = DivisionExpr(this, op)
def %(op: Expr): Expr = ModulusExpr(this, op)
}
case class FieldExpr(name: String) extends Expr
case class StringExpr(v: String) extends Expr
case class IntExpr(v: Int) extends Expr
case class LongExpr(v: Long) extends Expr
case class FloatExpr(v: Float) extends Expr
case class DoubleExpr(v: Double) extends Expr
case class BoolExpr(v: Boolean) extends Expr
case class AdditionExpr(lhs: Expr, rhs: Expr) extends Expr
case class SubtractionExpr(lhs: Expr, rhs: Expr) extends Expr
case class MultiplicationExpr(lhs: Expr, rhs: Expr) extends Expr
case class DivisionExpr(lhs: Expr, rhs: Expr) extends Expr
case class ModulusExpr(lhs: Expr, rhs: Expr) extends Expr
sealed trait Ordering
case class Asc(e: Expr) extends Ordering
case class Desc(e: Expr) extends Ordering
case class OrderBy(orderings: Seq[Ordering])
case class LimitAndSkip(limit: Int, skip: Int)
class Query() {
var whereClause: Option[Predicate] = None
var orderByClause: Option[OrderBy] = None
var limitAndSkipClause: Option[LimitAndSkip] = None
def where(p: Predicate): Query = { whereClause = Some(p); this }
def orderBy(orderings: Seq[Ordering]) = { orderByClause = Some(OrderBy(orderings)); this }
def limitAndSkip(limit: Int, skip: Int) = { limitAndSkipClause = Some(LimitAndSkip(limit, skip)); this }
}
object dsl {
def everything(): Query = new Query()
def where(p: Predicate): Query = new Query().where(p)
def all(predicates: Predicate*): Predicate = All(predicates)
def any(predicates: Predicate*): Predicate = Any(predicates)
def field(name: String): Expr = FieldExpr(name)
def v(v: String): Expr = StringExpr(v)
def v(v: Int): Expr = IntExpr(v)
def v(v: Long): Expr = LongExpr(v)
def v(v: Float): Expr = FloatExpr(v)
def v(v: Double): Expr = DoubleExpr(v)
def v(v: Boolean): Expr = BoolExpr(v)
def orderBy(orderings: Ordering*): Query = new Query().orderBy(orderings)
def limit(limit: Int): Query = new Query().limitAndSkip(limit, 0)
def limitAndSkip(limit: Int, skip: Int): Query = new Query().limitAndSkip(limit, skip)
}
| davidkellis/query | src/main/scala/Query.scala | Scala | mit | 3,743 |
/** ********************************************************************************************
* Testing
* Version 0.1
*
* The primary distribution site is
*
* http://scalavcs.alanrodas.com
*
* Copyright 2014 alanrodas
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* *********************************************************************************************/
package com.alanrodas.fronttier.parsers
import com.alanrodas.fronttier._
import com.alanrodas.fronttier.io._
import rapture.fs.FileUrl
import scala.xml.XML
object XmlAttrConfigParser extends ConfigParser {
def id: String = "xmlattr"
def name: String = "XML Attribute"
def fileName: String = "fronttier.xml"
def existsFileAt(path : FileUrl) =
(path / fileName exists) && (path / fileName contents).contains("xmlattr.dtd")
def parseAt(path: FileUrl) = {
val xml = XML.loadFile(path / fileName pathString)
Configuration(
xml \\ "@group" text, xml \\ "@name" text, xml \\ "@version" text,
xml \\ "repositories" \\ "repository" map { each =>
Repository(each \\ "@type" text, each \\ "@url" text)
},
xml \\ "dependencies" \\ "dependency" map { each =>
Dependency(each \\ "@group" text, each \\ "@name" text, each \\ "@version" text)
},
xml \\ "files" \\ "file" map { each => each \\ "@name" text }
)
}
} | alanrodas/Fronttier | core/src/main/scala/com/alanrodas/fronttier/parsers/XmlAttrConfigParser.scala | Scala | apache-2.0 | 1,863 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.ctrl.checkers
import cmwell.ctrl.utils.ProcUtil
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by eli on 28/10/16.
*/
object KafkaChecker extends Checker {
override def check: Future[ComponentState] = {
Future {
blocking {
if (ProcUtil.checkIfProcessRun("kafka.Kafka") > 0) KafkaOk() else KafkaNotOk()
}
}
}
}
| nruppin/CM-Well | server/cmwell-controller/src/main/scala/cmwell/ctrl/checkers/KafkaChecker.scala | Scala | apache-2.0 | 1,033 |
package org.pgscala.converters
import scala.xml.Elem
/** Do not edit - generated in Builder / PGElemConverterBuilder.scala */
object PGOptionElemConverter extends PGConverter[Option[Elem]] {
val PGType = PGElemConverter.PGType
def toPGString(oe: Option[Elem]): String =
oe match {
case None =>
null
case Some(e) =>
PGElemConverter.toPGString(e)
}
def fromPGString(e: String): Option[Elem] =
e match {
case null | "" =>
None
case oe =>
Some(PGElemConverter.fromPGString(oe))
}
}
| melezov/pgscala | converters-scala/src/generated/scala/org/pgscala/converters/option/PGOptionElemConverter.scala | Scala | bsd-3-clause | 559 |
package mesosphere.marathon.core.launchqueue.impl
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.base.{ Clock, ShutdownHooks }
import mesosphere.marathon.core.launchqueue.{ LaunchQueueConfig, LaunchQueueModule }
import mesosphere.marathon.core.leadership.AlwaysElectedLeadershipModule
import mesosphere.marathon.core.matcher.DummyOfferMatcherManager
import mesosphere.marathon.core.task.bus.TaskBusModule
import mesosphere.marathon.integration.setup.WaitTestSupport
import mesosphere.marathon.state.{ AppRepository, PathId }
import mesosphere.marathon.tasks.TaskFactory.CreatedTask
import mesosphere.marathon.tasks.{ TaskFactory, TaskIdUtil, TaskTracker }
import mesosphere.marathon.{ MarathonSpec, MarathonTestHelper }
import mesosphere.util.state.PersistentEntity
import org.apache.mesos.Protos.TaskID
import org.mockito.Mockito
import org.mockito.Mockito.{ when => call, _ }
import org.scalatest.{ BeforeAndAfter, GivenWhenThen }
import scala.concurrent.{ Future, Await }
import scala.concurrent.duration._
class LaunchQueueModuleTest extends MarathonSpec with BeforeAndAfter with GivenWhenThen {
test("empty queue returns no results") {
When("querying queue")
val apps = taskQueue.list
Then("no apps are returned")
assert(apps.isEmpty)
}
test("An added queue item is returned in list") {
Given("a task queue with one item")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("querying its contents")
val list = taskQueue.list
Then("we get back the added app")
assert(list.size == 1)
assert(list.head.app == app)
assert(list.head.tasksLeftToLaunch == 1)
assert(list.head.tasksLaunchedOrRunning == 0)
assert(list.head.taskLaunchesInFlight == 0)
verify(taskTracker).get(app.id)
}
test("An added queue item is reflected via count") {
Given("a task queue with one item")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 1")
assert(count == 1)
verify(taskTracker).get(app.id)
}
test("A purged queue item has a count of 0") {
Given("a task queue with one item which is purged")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
taskQueue.purge(app.id)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 0")
assert(count == 0)
verify(taskTracker).get(app.id)
}
test("A re-added queue item has a count of 1") {
Given("a task queue with one item which is purged")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
taskQueue.purge(app.id)
taskQueue.add(app)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 1")
assert(count == 1)
verify(taskTracker, times(2)).get(app.id)
}
test("adding a queue item registers new offer matcher") {
Given("An empty task tracker")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
When("Adding an app to the taskQueue")
taskQueue.add(app)
Then("A new offer matcher gets registered")
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
verify(taskTracker).get(app.id)
}
test("purging a queue item UNregisters offer matcher") {
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("The app is purged")
taskQueue.purge(app.id)
Then("No offer matchers remain registered")
assert(offerMatcherManager.offerMatchers.isEmpty)
verify(taskTracker).get(app.id)
}
test("an offer gets unsuccessfully matched against an item in the queue") {
val offer = MarathonTestHelper.makeBasicOffer().build()
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
When("we ask for matching an offer")
call(taskFactory.newTask(app, offer, Set.empty[MarathonTask])).thenReturn(None)
val matchFuture = offerMatcherManager.offerMatchers.head.matchOffer(clock.now() + 3.seconds, offer)
val matchedTasks = Await.result(matchFuture, 3.seconds)
Then("the offer gets passed to the task factory and respects the answer")
verify(taskFactory).newTask(app, offer, Set.empty[MarathonTask])
assert(matchedTasks.offerId == offer.getId)
assert(matchedTasks.tasks == Seq.empty)
verify(taskTracker).get(app.id)
}
test("an offer gets successfully matched against an item in the queue") {
val offer = MarathonTestHelper.makeBasicOffer().build()
val taskId: TaskID = TaskIdUtil.newTaskId(app.id)
val mesosTask = MarathonTestHelper.makeOneCPUTask("").setTaskId(taskId).build()
val marathonTask = MarathonTask.newBuilder().setId(taskId.getValue).build()
val createdTask = CreatedTask(mesosTask, marathonTask)
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
call(taskFactory.newTask(app, offer, Set.empty[MarathonTask])).thenReturn(Some(createdTask))
taskQueue.add(app)
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
When("we ask for matching an offer")
val matchFuture = offerMatcherManager.offerMatchers.head.matchOffer(clock.now() + 3.seconds, offer)
val matchedTasks = Await.result(matchFuture, 3.seconds)
Then("the offer gets passed to the task factory and respects the answer")
verify(taskFactory).newTask(app, offer, Set.empty[MarathonTask])
assert(matchedTasks.offerId == offer.getId)
assert(matchedTasks.tasks.map(_.taskInfo) == Seq(mesosTask))
verify(taskTracker).get(app.id)
}
private[this] val app = MarathonTestHelper.makeBasicApp().copy(id = PathId("/app"))
private[this] var shutdownHooks: ShutdownHooks = _
private[this] var clock: Clock = _
private[this] var taskBusModule: TaskBusModule = _
private[this] var offerMatcherManager: DummyOfferMatcherManager = _
private[this] var appRepository: AppRepository = _
private[this] var taskTracker: TaskTracker = _
private[this] var taskFactory: TaskFactory = _
private[this] var module: LaunchQueueModule = _
private[this] def taskQueue = module.taskQueue
before {
shutdownHooks = ShutdownHooks()
clock = Clock()
taskBusModule = new TaskBusModule()
offerMatcherManager = new DummyOfferMatcherManager()
taskTracker = mock[TaskTracker]("taskTracker")
taskFactory = mock[TaskFactory]("taskFactory")
appRepository = mock[AppRepository]("appRepository")
val config: LaunchQueueConfig = new LaunchQueueConfig {}
config.afterInit()
module = new LaunchQueueModule(
config,
AlwaysElectedLeadershipModule(shutdownHooks),
clock,
subOfferMatcherManager = offerMatcherManager,
taskStatusObservables = taskBusModule.taskStatusObservables,
maybeOfferReviver = None,
appRepository,
taskTracker,
taskFactory
)
}
after {
verifyNoMoreInteractions(appRepository)
verifyNoMoreInteractions(taskTracker)
verifyNoMoreInteractions(taskFactory)
shutdownHooks.shutdown()
}
}
| EasonYi/marathon | src/test/scala/mesosphere/marathon/core/launchqueue/impl/LaunchQueueModuleTest.scala | Scala | apache-2.0 | 7,565 |
package com.azavea.rasterfoundry
import geotrellis.vector._
object TileMath {
val TILE_DIM = 256
val BAND_COUNT = 3
def getUpperLeft(zoom: Int, col: Int, row: Int) = {
val n = math.pow(2, zoom)
val long = ((col / n) * 360.0) - 180.0
val lat = math.toDegrees(math.atan(math.sinh(math.Pi * (1 - 2 * row / n))))
(long, lat)
}
def getExtent(zoom: Int, col: Int, row: Int) = {
val (xmin, ymax) = getUpperLeft(zoom, col, row)
val (xmax, ymin) = getUpperLeft(zoom, col + 1, row + 1)
Extent(xmin, ymin, xmax, ymax)
}
}
| kdeloach/raster-foundry-tiler | mosaic/src/main/scala/com/azavea/rasterfoundry/TileMath.scala | Scala | apache-2.0 | 555 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cypher.cucumber.db
import java.io.{File => JFile}
import java.nio.file.{Path => JPath, Files, Paths}
import org.neo4j.graphdb.factory.{GraphDatabaseSettings, GraphDatabaseFactory}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.reflect.io.File
case class ImportQuery(script: String, params: java.util.Map[String, Object], fileRoot: JPath)
object AvailableDatabase {
final val archive = Map("cineast" -> importInto("/cypher/db/cineast/"))
final val dbPaths : mutable.Map[String, JFile] = new mutable.HashMap
private final val SCRIPT_FILENAME = "import.cyp"
private final val PARAMS_FILENAME = "params.json"
private def importInto(path: String): ImportQuery = {
val basePath = Paths.get(getClass.getResource(path).toURI)
val scriptPath = basePath.resolve(SCRIPT_FILENAME)
val paramsPath = basePath.resolve(PARAMS_FILENAME)
assert(Files.exists(scriptPath), scriptPath + " should exist")
assert(Files.exists(paramsPath), paramsPath + " should exist")
val script = File.apply(scriptPath.toFile).slurp()
val content = File.apply(paramsPath.toFile).slurp()
val json = scala.util.parsing.json.JSON.parseFull(content)
val params = json match {
case Some(map: Map[_,_]) => map.asInstanceOf[Map[String,AnyRef]].asJava
case _ => throw new IllegalStateException(s"Unable to parse json file containing params at $paramsPath")
}
ImportQuery(script, params, basePath)
}
}
case class DatabaseFactory(dbDir: JFile) extends ((String) => Unit) {
override def apply(dbName: String): Unit = {
val ImportQuery(script, params, fileRoot) = AvailableDatabase.archive(dbName)
val dbPath = new JFile(dbDir, dbName)
if (!dbPath.exists()) {
val graph = new GraphDatabaseFactory()
.newEmbeddedDatabaseBuilder(dbPath)
.setConfig(GraphDatabaseSettings.load_csv_file_url_root, fileRoot.toAbsolutePath.toString)
.newGraphDatabase()
script.split(';').filter(_.trim.nonEmpty) foreach { q =>
graph.execute(q.trim, params)
}
graph.shutdown()
}
AvailableDatabase.dbPaths += dbName -> dbPath.getAbsoluteFile
}
}
object DatabaseLoader extends ((String) => JFile) {
override def apply(dbName: String): JFile = AvailableDatabase.dbPaths(dbName)
}
| HuangLS/neo4j | community/cypher/compatibility-suite/src/test/scala/cypher/cucumber/db/DatabaseArchive.scala | Scala | apache-2.0 | 3,105 |
package me.lachlanap.oldtoby.server
import me.lachlanap.oldtoby.server.helpers.{Generators, ServerInterface}
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
/**
* Base test class for tests.
*/
abstract class UnitSpec extends FlatSpec
with Matchers
with OptionValues
with EitherValues
with Inside
with Inspectors
with BeforeAndAfterAll
with ScalaFutures
with Generators {
private[this] var _serverAddress: Option[String] = None
def serverAddress = _serverAddress.get
lazy val server = new ServerInterface(serverAddress)
override def beforeAll(configMap: ConfigMap) = {
_serverAddress = Some(configMap.getWithDefault[String]("server", "http://localhost:8273/"))
}
override def afterAll(configMap: ConfigMap) = {}
}
| thorinii/oldtoby-server | acceptance/src/test/scala/me/lachlanap/oldtoby/server/UnitSpec.scala | Scala | mit | 1,016 |
/*
* scala-swing (https://www.scala-lang.org)
*
* Copyright EPFL, Lightbend, Inc., contributors
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.swing.examples
import scala.swing._
object Dialogs extends SimpleSwingApplication {
import TabbedPane._
lazy val label = new Label("No Result yet")
lazy val tabs = new TabbedPane {
pages += new Page("File", new GridBagPanel {
grid =>
import GridBagPanel._
val buttonText = new TextField("Click Me")
val c = new Constraints
c.fill = Fill.Horizontal
c.grid = (1, 1)
val chooser = new FileChooser
layout(new Button(Action("Open") {
chooser.showOpenDialog(grid)
})) = c
c.grid = (1, 2)
layout(new Button(Action("Save") {
chooser.showSaveDialog(grid)
})) = c
c.grid = (1, 3)
layout(new Button(Action("Custom") {
chooser.showDialog(grid, buttonText.text)
})) = c
c.grid = (2, 3)
layout(new Label(" with Text ")) = c
c.grid = (3, 3)
c.ipadx = 50
layout(buttonText) = c
border = Swing.EmptyBorder(5, 5, 5, 5)
})
pages += new Page("Simple Modal Dialogs", new BorderPanel {
import BorderPanel._
val mutex = new ButtonGroup
val ok = new RadioButton("OK (in the L&F's words)")
val ynlf = new RadioButton("Yes/No (in the L&F's words)")
val ynp = new RadioButton("Yes/No (in the programmer's words)")
val yncp = new RadioButton("Yes/No/Cancel (in the programmer's words)")
val radios = List(ok, ynlf, ynp, yncp)
mutex.buttons ++= radios
mutex.select(ok)
val buttons = new BoxPanel(Orientation.Vertical) {
contents ++= radios
}
layout(buttons) = Position.North
layout(new Button(Action("Show It!") {
import Dialog._
mutex.selected.get match {
case `ok` =>
showMessage(buttons, "Eggs aren't supposed to be green.")
case `ynlf` =>
label.text = showConfirmation(buttons,
"Would you like green eggs and ham?",
"An Inane Question") match {
case Result.Yes => "Ewww!"
case Result.No => "Me neither!"
case _ => "Come on -- tell me!"
}
case `ynp` =>
val options = List("Yes, please",
"No, thanks",
"No eggs, no ham!")
label.text = showOptions(buttons,
"Would you like some green eggs to go with that ham?",
"A Silly Question",
entries = options,
initial = 2) match {
case Result.Yes => "You're kidding!"
case Result.No => "I don't like them, either."
case _ => "Come on -- 'fess up!"
}
case `yncp` =>
val options = List("Yes, please",
"No, thanks",
"No eggs, no ham!")
label.text = showOptions(buttons,
message = "Would you like some green eggs to go with that ham?",
title = "A Silly Question",
entries = options,
initial = 2) match {
case Result.Yes => "Here you go: green eggs and ham!"
case Result.No => "OK, just the ham, then."
case Result.Cancel => "Well, I'm certainly not going to eat them!"
case _ => "Please tell me what you want!"
}
}
})) = Position.South
})
pages += new Page("More Dialogs", new BorderPanel {
import BorderPanel._
val mutex = new ButtonGroup
val pick = new RadioButton("Pick one of several choices")
val enter = new RadioButton("Enter some text")
val custom = new RadioButton("Custom")
val customUndec = new RadioButton("Custom undecorated")
val custom2 = new RadioButton("2 custom dialogs")
val radios = List(pick, enter, custom, customUndec, custom2)
mutex.buttons ++= radios
mutex.select(pick)
val buttons = new BoxPanel(Orientation.Vertical) {
contents ++= radios
}
layout(buttons) = Position.North
layout(new Button(Action("Show It!") {
import Dialog._
mutex.selected.get match {
case `pick` =>
val possibilities = List("ham", "spam", "yam")
val s = showInput(buttons,
"Complete the sentence:\\n\\"Green eggs and...\\"",
"Customized Dialog",
Message.Plain,
Swing.EmptyIcon,
possibilities, "ham")
//If a string was returned, say so.
label.text = if (s.isDefined && (s.get.length > 0))
"Green eggs and... " + s.get + "!"
else
"Come on, finish the sentence!"
case `enter` =>
val s = showInput(buttons,
"Complete the sentence:\\n\\"Green eggs and...\\"",
"Customized Dialog",
Message.Plain,
Swing.EmptyIcon,
Nil, "ham")
//If a string was returned, say so.
label.text = if (s.isDefined && (s.get.length > 0))
"Green eggs and... " + s.get + "!"
else
"Come on, finish the sentence!"
case `custom` =>
val dialog = new Dialog(top)
dialog.open()
dialog.contents = Button("Close Me!") {
dialog.close()
}
case `customUndec` =>
val dialog = new Dialog with RichWindow.Undecorated
dialog.open()
dialog.contents = Button("Close Me!") {
dialog.close()
}
case `custom2` =>
val d1 = new Dialog
val d2 = new Dialog(d1)
d1.open()
d2.open()
d1.contents = Button("Close Me! I am the owner and will automatically close the other one") {
d1.close()
}
d2.contents = Button("Close Me!") {
d2.close()
}
}
})) = Position.South
})
}
lazy val ui: Panel = new BorderPanel {
layout(tabs) = BorderPanel.Position.Center
layout(label) = BorderPanel.Position.South
}
lazy val top: Frame = new MainFrame {
title = "Dialog Demo"
contents = ui
}
}
| scala/scala-swing | examples/src/main/scala/scala/swing/examples/Dialogs.scala | Scala | apache-2.0 | 6,595 |
package codebook.runtime.server
import akka.actor.{ActorRef, Props}
import codebook.runtime.protocol.Decoder
import codebook.runtime.test.AkkaTestkitSpecs2Support
import org.specs2.mutable.Specification
class ApiHandlerSetterTest extends Specification {
object TestStates extends Enumeration {
val Running = Value
}
"addHandler" should {
"normally success" in new AkkaTestkitSpecs2Support() {
trait TestHandler { _ : UserServiceActorBase[TestStates.Value,Int] =>
val testHandlerAtRunning:StateFunction ={
case Event("Hello",_) =>
sender() ! "Hello"
stay()
}
addHandlers(TestStates.Running,testHandlerAtRunning)
}
class UserServiceActor extends UserServiceActorBase[TestStates.Value,Int] with TestHandler {
override def decoder: Decoder = null
override def socket: ActorRef = null
setupHandlers()
startWith(TestStates.Running,0)
}
val act = system.actorOf(Props(new UserServiceActor))
act ! "Hello"
expectMsg("Hello")
}
}
}
| RustyRaven/CodebookRuntime | scala/src/test/scala/codebook/runtime/server/ApiHandlerSetterTest.scala | Scala | mit | 1,081 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.termstructures
import org.quantintel.ql.Settings
import org.quantintel.ql.math.interpolations.{DefaultExtraploator, Extrapolator}
import org.quantintel.ql.time.{Calendar, Date}
import org.quantintel.ql.time.daycounters.{Actual365, DayCounter}
import org.quantintel.ql.time.daycounters.Actual365Convention.ACT365F
import org.quantintel.ql.time.TimeUnit.DAYS
/**
* @author Paul Bernard
*/
abstract class AbstractTermStructure extends TermStructure with Extrapolator {
var dayCounter : DayCounter = null
var updated : Boolean = true
var settlementDays : Int = 0
var moving : Boolean = false
var m_referenceDate: Date = null
var calendar: Calendar = null
protected def this(dc: DayCounter){
this()
this.dayCounter = dc
this.calendar = null
this.settlementDays = 0
this.moving = false
this.updated = true
this.m_referenceDate = null
}
protected def this(referenceDate: Date, calendar: Calendar, dc: DayCounter) {
this()
this.settlementDays = 0
this.calendar = calendar
this.dayCounter = dc
this.moving = false
this.updated = true
this.m_referenceDate = referenceDate
}
def this(settlememntDays: Int, calendar: Calendar, dc: DayCounter){
this()
this.settlementDays = settlementDays
this.calendar = calendar
this.dayCounter = dc
this.moving = true
this.updated = false
val today : Date = new Settings().evaluationDate
today.addObserver(this)
this.m_referenceDate = calendar.advance(today, settlementDays, DAYS)
}
protected def this(settlementDays: Int, calendar: Calendar){
this(settlementDays, calendar, Actual365(ACT365F))
}
override def timeFromReference(date: Date): Double = {
dayCounter.yearFraction(referenceDate, date)
}
override def maxTime() : Double = timeFromReference(maxDate())
override def referenceDate() : Date = {
if (!this.updated){
val today: Date = new Settings().evaluationDate
m_referenceDate = calendar.advance(today, settlementDays, DAYS)
}
m_referenceDate
}
override def update() {
if(this.moving){
updated = false
}
notifyObservers()
}
private val delegatedExtrapolator : DefaultExtraploator = new DefaultExtraploator()
override def allowsExtrapolation(): Boolean = {
delegatedExtrapolator.allowsExtrapolation()
}
override def disableExtrapolation(): Unit = {
delegatedExtrapolator.disableExtrapolation()
}
override def enableExtrapolation(): Unit = {
delegatedExtrapolator.enableExtrapolation()
}
}
object AbstractTermStructure {
val THIS_METHOD_MUST_BE_OVERRIDDEN = "This method must be overridden"
}
| quantintel/spectrum | financial/src/main/scala/org/quantintel/ql/termstructures/AbstractTermStructure.scala | Scala | apache-2.0 | 3,375 |
package scala.in.programming.abstract_internal_control
/**
* Call by name paraemter.
* Not ambiguous with named parameter.
*
* @author loustler
* @since 02/26/2017 18:14
*/
object ByNameParameter {
val assertionEnabled: Boolean = true
/**
* This function use closure from field in this object.
*
* @param predicate
* @return
*/
def myAssert(predicate: () => Boolean) =
if ( assertionEnabled && !predicate() )
throw new AssertionError
def myAssert2(predicate: => Boolean) =
if (assertionEnabled && !predicate)
throw new AssertionError
/**
* What's difference between [[ByNameParameter.myAssert2()]] and this?
*
* If [[ByNameParameter.assertionEnabled]] is false,
* [[ByNameParameter.myAssert2()]]'s predicate not work.
* But this function must to work.
* Cause this function's arguments have side-effects,
* but [[ByNameParameter.myAssert2()]] haven't.
*
* Maybe like this
* {{{
* val x = 10
*
* myAssert2( x / 0 == 0 ) // Its happen nothing.
*
* myAssert3( x / 0 == 0 ) // It is throw ArithmeticException.
* }}}
*
* Therefore [[ByNameParameter.myAssert2()]] is lazy,
* this function strict.
*
* @param predicate
*/
def myAssert3(predicate: Boolean) =
if (assertionEnabled && !predicate)
throw new AssertionError
def main(args: Array[String]): Unit = {
myAssert(() => 5 >3) // It so strange. It is not good.
myAssert2(5 > 3) // It is nice.
}
}
| loustler/scala | src/main/scala/scala/in/programming/abstract_internal_control/ByNameParameter.scala | Scala | mit | 1,532 |
package ru.maizy.ambient7.mt8057agent.tests
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2015-2017
* See LICENSE.txt for details.
*/
import java.text.SimpleDateFormat
import java.util.Date
import org.scalatest.{ FlatSpec, Matchers }
import ru.maizy.ambient7.mt8057agent.{ Event, Writer }
abstract class AbstractBaseSpec extends FlatSpec with Matchers
trait WritersTestUtils {
val time = 1445785424583000000L
val timeString = time.toString
val formatedTime = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date(time / 1000000))
def checkWriterEvent(writer: Writer, event: Event): (String, String) = {
checkWriterOutput(writer)(_.write(event))
}
def checkWriterInit(writer: Writer): (String, String) = {
checkWriterOutput(writer)(_.onInit())
}
private def checkWriterOutput(writer: Writer)(f: Writer => Unit): (String, String) = {
val out = new java.io.ByteArrayOutputStream
val err = new java.io.ByteArrayOutputStream
Console.withOut(out) {
Console.withErr(err) {
f(writer)
}
}
(out.toString, err.toString)
}
}
| maizy/ambient7 | mt8057-agent/src/test/scala/ru/maizy/ambient7/mt8057agent/tests/AbstractBaseSpec.scala | Scala | apache-2.0 | 1,097 |
package java.util.regex
import scala.language.implicitConversions
import scala.annotation.switch
import scala.scalajs.js
final class Matcher private[regex] (
private var pattern0: Pattern, private var input0: CharSequence,
private var regionStart0: Int, private var regionEnd0: Int)
extends AnyRef with MatchResult {
import Matcher._
def pattern(): Pattern = pattern0
// Configuration (updated manually)
private var regexp = new js.RegExp(pattern0.jspattern, pattern0.jsflags)
private var inputstr = input0.subSequence(regionStart0, regionEnd0).toString
// Match result (updated by successful matches)
private var lastMatch: js.RegExp.ExecResult = null
private var lastMatchIsValid = false
private var canStillFind = true
// Append state (updated by replacement methods)
private var appendPos: Int = 0
// Lookup methods
def matches(): Boolean = {
reset()
find()
// TODO this check is wrong with non-greedy patterns
// Further, it might be wrong to just use ^$ delimiters for two reasons:
// - They might already be there
// - They might not behave as expected when newline characters are present
if ((lastMatch ne null) && (start != 0 || end != inputstr.length))
reset()
lastMatch ne null
}
def lookingAt(): Boolean = {
reset()
find()
if ((lastMatch ne null) && (start != 0))
reset()
lastMatch ne null
}
def find(): Boolean = if (canStillFind) {
lastMatchIsValid = true
lastMatch = regexp.exec(inputstr)
if (lastMatch ne null) {
if (lastMatch(0).get.isEmpty)
regexp.lastIndex += 1
} else {
canStillFind = false
}
lastMatch ne null
} else false
def find(start: Int): Boolean = {
reset()
regexp.lastIndex = start
find()
}
// Replace methods
def appendReplacement(sb: StringBuffer, replacement: String): Matcher = {
sb.append(inputstr.substring(appendPos, start))
@inline def isDigit(c: Char) = c >= '0' && c <= '9'
val len = replacement.length
var i = 0
while (i < len) {
replacement.charAt(i) match {
case '$' =>
i += 1
val j = i
while (i < len && isDigit(replacement.charAt(i)))
i += 1
val group = Integer.parseInt(replacement.substring(j, i))
sb.append(this.group(group))
case '\\\\' =>
i += 1
if (i < len)
sb.append(replacement.charAt(i))
i += 1
case c =>
sb.append(c)
i += 1
}
}
appendPos = end
this
}
def appendTail(sb: StringBuffer): StringBuffer = {
sb.append(inputstr.substring(appendPos))
appendPos = inputstr.length
sb
}
def replaceFirst(replacement: String): String = {
reset()
if (find()) {
val sb = new StringBuffer
appendReplacement(sb, replacement)
appendTail(sb)
sb.toString
} else {
inputstr
}
}
def replaceAll(replacement: String): String = {
reset()
val sb = new StringBuffer
while (find()) {
appendReplacement(sb, replacement)
}
appendTail(sb)
sb.toString
}
// Reset methods
def reset(): Matcher = {
regexp.lastIndex = 0
lastMatch = null
lastMatchIsValid = false
canStillFind = true
appendPos = 0
this
}
def reset(input: CharSequence): Matcher = {
regionStart0 = 0
regionEnd0 = input.length()
input0 = input
inputstr = input0.toString
reset()
}
def usePattern(pattern: Pattern): Matcher = {
val prevLastIndex = regexp.lastIndex
pattern0 = pattern
regexp = new js.RegExp(pattern.jspattern, pattern.jsflags)
regexp.lastIndex = prevLastIndex
lastMatch = null
this
}
// Query state methods - implementation of MatchResult
private def ensureLastMatch: js.RegExp.ExecResult = {
if (lastMatch == null)
throw new IllegalStateException("No match available")
lastMatch
}
def groupCount(): Int = ensureLastMatch.length-1
def start(): Int = ensureLastMatch.index
def end(): Int = start() + group().length
def group(): String = ensureLastMatch(0).get
def start(group: Int): Int = {
if (group == 0) start()
else {
val last = ensureLastMatch
// not provided by JS RegExp, so we make up something that at least
// will have some sound behavior from scala.util.matching.Regex
last(group).fold(-1) {
groupStr => inputstr.indexOf(groupStr, last.index)
}
}
}
def end(group: Int): Int = {
val s = start(group)
if (s == -1) -1
else s + this.group(group).length
}
def group(group: Int): String = ensureLastMatch(group).orNull
// Seal the state
def toMatchResult(): MatchResult = new SealedResult(inputstr, lastMatch)
// Other query state methods
def hitEnd(): Boolean =
lastMatchIsValid && (lastMatch == null || end() == inputstr.length)
//def requireEnd(): Boolean // I don't understand the spec
// Stub methods for region management
def regionStart(): Int = regionStart0
def regionEnd(): Int = regionEnd0
def region(start: Int, end: Int): Matcher =
new Matcher(pattern0, input0, start, end)
def hasTransparentBounds(): Boolean = false
//def useTransparentBounds(b: Boolean): Matcher
def hasAnchoringBounds(): Boolean = true
//def useAnchoringBounds(b: Boolean): Matcher
}
object Matcher {
def quoteReplacement(s: String): String = {
var result = ""
var i = 0
while (i < s.length) {
val c = s.charAt(i)
result += ((c: @switch) match {
case '\\\\' | '$' => "\\\\"+c
case _ => c
})
i += 1
}
result
}
private final class SealedResult(inputstr: String,
lastMatch: js.RegExp.ExecResult) extends MatchResult {
def groupCount(): Int = ensureLastMatch.length-1
def start(): Int = ensureLastMatch.index
def end(): Int = start() + group().length
def group(): String = ensureLastMatch(0).get
def start(group: Int): Int = {
if (group == 0) start()
else {
val last = ensureLastMatch
// not provided by JS RegExp, so we make up something that at least
// will have some sound behavior from scala.util.matching.Regex
last(group).fold(-1) {
groupStr => inputstr.indexOf(groupStr, last.index)
}
}
}
def end(group: Int): Int = {
val s = start(group)
if (s == -1) -1
else s + this.group(group).length
}
def group(group: Int): String = ensureLastMatch(group).orNull
private def ensureLastMatch: js.RegExp.ExecResult = {
if (lastMatch == null)
throw new IllegalStateException("No match available")
lastMatch
}
}
}
| matthughes/scala-js | javalib/src/main/scala/java/util/regex/Matcher.scala | Scala | bsd-3-clause | 6,746 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperations.spark.wrappers.estimators
import io.deepsense.commons.utils.Version
import io.deepsense.deeplang.DOperation._
import io.deepsense.deeplang.documentation.SparkOperationDocumentation
import io.deepsense.deeplang.doperables.spark.wrappers.estimators.KMeans
import io.deepsense.deeplang.doperations.EstimatorAsFactory
class CreateKMeans extends EstimatorAsFactory[KMeans]
with SparkOperationDocumentation {
override val id: Id = "2ecdd789-695d-4efa-98ad-63c80ae70f71"
override val name: String = "K-Means"
override val description: String =
"Creates a k-means model. Note: Trained k-means model does not have any parameters."
override protected[this] val docsGuideLocation =
Some("ml-clustering.html#k-means")
override val since: Version = Version(1, 0, 0)
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/doperations/spark/wrappers/estimators/CreateKMeans.scala | Scala | apache-2.0 | 1,422 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.hadoop
import com.twitter.zipkin.gen.{BinaryAnnotation, Constants, SpanServiceName, Annotation}
import com.twitter.scalding.{Tsv, DefaultDateRangeJob, Job, Args}
import com.twitter.zipkin.hadoop.sources.{Util, DailyPreprocessedSpanSource}
import java.nio.ByteBuffer
/**
* Finds traces that have 500 Internal Service Errors and finds the spans in those traces that have retries or timeouts
*/
class WhaleReport(args: Args) extends Job(args) with DefaultDateRangeJob {
val ERRORS = List("finagle.timeout", "finagle.retry")
val spanInfo = DailyPreprocessedSpanSource()
.read
.mapTo(0 ->('trace_id, 'id, 'service, 'annotations, 'binary_annotations))
{ s: SpanServiceName => (s.trace_id, s.id, s.service_name, s.annotations.toList, s.binary_annotations.toList)
}
val errorTraces = spanInfo
.project('trace_id, 'binary_annotations)
.filter('binary_annotations) {
bal: List[BinaryAnnotation] =>
bal.exists({ ba: BinaryAnnotation => {
ba != null && ba.value != null && cleanString(ba.value) == WhaleReport.ERROR_MESSAGE
}
})
}
.project('trace_id)
.rename('trace_id -> 'trace_id_1)
val filtered = spanInfo
.flatMap('annotations -> 'error) { al : List[Annotation] => { al.find { a : Annotation => ERRORS.contains(a.value) } } }
.joinWithSmaller('trace_id -> 'trace_id_1, errorTraces)
.discard('trace_id_1)
.groupBy('trace_id) { _.toList[String]('service -> 'serviceList) }
.write(Tsv(args("output")))
// When converting from ByteBuffer to String some null values seem to be passed along, so we clean them
private def cleanString(bb : ByteBuffer) : String = {
val chars = (new String(Util.getArrayFromBuffer(bb))).toCharArray
var result = ""
for (char <- chars) {
if (char.asInstanceOf[Int] != 0) {
result += char
}
}
result
}
}
object WhaleReport {
val ERROR_MESSAGE = "500 Internal Server Error"
} | davidbernick/zipkin | zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/WhaleReport.scala | Scala | apache-2.0 | 2,569 |
package io.getquill.context.jdbc.oracle
import io.getquill.Spec
class JdbcContextSpec extends Spec {
import testContext._
val badEntity = quote {
querySchema[TestEntity]("TestEntity", _.s -> "a", _.i -> "i", _.l -> "l", _.o -> "o")
}
"probes sqls" in {
val p = testContext.probe("DELETE FROM TestEntity")
}
"run non-batched action" in {
val insert = quote {
qr1.insert(_.i -> lift(1))
}
testContext.run(insert) mustEqual 1
}
"provides transaction support" - {
"success" in {
testContext.run(qr1.delete)
testContext.transaction {
testContext.run(qr1.insert(_.i -> 33))
}
testContext.run(qr1).map(_.i) mustEqual List(33)
}
"failure" in {
testContext.run(qr1.delete)
intercept[IllegalStateException] {
testContext.transaction {
testContext.run(qr1.insert(_.i -> 33))
throw new IllegalStateException
}
}
testContext.run(qr1).isEmpty mustEqual true
}
"nested" in {
testContext.run(qr1.delete)
testContext.transaction {
testContext.transaction {
testContext.run(qr1.insert(_.i -> 33))
}
}
testContext.run(qr1).map(_.i) mustEqual List(33)
}
"prepare" in {
testContext.prepareParams(
"select * from Person where name=? and age > ?", (ps, _) => (List("Sarah", 127), ps)
) mustEqual List("127", "'Sarah'")
}
}
"insert returning" - {
"with single column table" in {
val inserted = testContext.run {
qr4.insert(lift(TestEntity4(0))).returning(_.i)
}
testContext.run(qr4.filter(_.i == lift(inserted))).head.i mustBe inserted
}
"with multiple columns" in {
testContext.run(qr1.delete)
val inserted = testContext.run {
qr1.insert(lift(TestEntity("foo", 1, 18L, Some(123), true))).returning(r => (r.i, r.s, r.o))
}
(1, "foo", Some(123)) mustBe inserted
}
"with multiple columns - case class" in {
case class Return(id: Int, str: String, opt: Option[Int])
testContext.run(qr1.delete)
val inserted = testContext.run {
qr1.insert(lift(TestEntity("foo", 1, 18L, Some(123), true))).returning(r => Return(r.i, r.s, r.o))
}
Return(1, "foo", Some(123)) mustBe inserted
}
}
// This currently does not work with Oracle which needs a RETURNING clause as well as
// explicit specification of which variables it is returning e.g:
// Update MyTable Set Col1 = Value where primary key filters returning column1,column2... into variable1,variable2...
"update returning" ignore {
"with single column table" in {
testContext.run(qr4.insert(lift(TestEntity4(8))))
val updated = testContext.run {
qr4.update(lift(TestEntity4(0))).returning(_.i)
}
testContext.run(qr4.filter(_.i == lift(updated))).head.i mustBe updated
}
"with multiple columns" in {
testContext.run(qr1.delete)
testContext.run(qr1.insert(lift(TestEntity("baz", 6, 42L, Some(456), true))))
val updated = testContext.run {
qr1.update(lift(TestEntity("foo", 1, 18L, Some(123), true))).returning(r => (r.i, r.s, r.o))
}
(1, "foo", Some(123)) mustBe updated
}
"with multiple columns - case class" in {
case class Return(id: Int, str: String, opt: Option[Int])
testContext.run(qr1.delete)
testContext.run(qr1.insert(lift(TestEntity("baz", 6, 42L, Some(456), true))))
val updated = testContext.run {
qr1.update(lift(TestEntity("foo", 1, 18L, Some(123), true))).returning(r => Return(r.i, r.s, r.o))
}
Return(1, "foo", Some(123)) mustBe updated
}
}
}
| getquill/quill | quill-jdbc/src/test/scala/io/getquill/context/jdbc/oracle/JdbcContextSpec.scala | Scala | apache-2.0 | 3,700 |
package mesosphere.marathon.core.task.tracker.impl
import akka.actor.{ Actor, ActorLogging, Cancellable, Props }
import mesosphere.marathon.MarathonSchedulerDriverHolder
import mesosphere.marathon.tasks.TaskTracker
import mesosphere.mesos.protos.TaskID
import scala.concurrent.duration._
private[tracker] object KillOverdueStagedTasksActor {
def props(taskTracker: TaskTracker, driverHolder: MarathonSchedulerDriverHolder): Props = {
Props(new KillOverdueStagedTasksActor(taskTracker, driverHolder))
}
private[tracker] case object Check
}
private class KillOverdueStagedTasksActor(taskTracker: TaskTracker, driverHolder: MarathonSchedulerDriverHolder)
extends Actor with ActorLogging {
var checkTicker: Cancellable = _
override def preStart(): Unit = {
import context.dispatcher
checkTicker = context.system.scheduler.schedule(30.seconds, 5.seconds, self, KillOverdueStagedTasksActor.Check)
}
override def postStop(): Unit = {
checkTicker.cancel()
}
override def receive: Receive = {
case KillOverdueStagedTasksActor.Check =>
log.debug("checking for overdue tasks")
driverHolder.driver.foreach { driver =>
taskTracker.checkStagedTasks.foreach { overdueTask =>
import mesosphere.mesos.protos.Implicits._
log.warning("Killing overdue task '{}'", overdueTask.getId)
driver.killTask(TaskID(overdueTask.getId))
}
}
}
}
| MrMarvin/marathon | src/main/scala/mesosphere/marathon/core/task/tracker/impl/KillOverdueStagedTasksActor.scala | Scala | apache-2.0 | 1,430 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.