code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package blended.activemq.client
import java.util.UUID
import akka.actor.ActorSystem
import akka.pattern.after
import blended.container.context.api.ContainerContext
import blended.jms.utils.{IdAwareConnectionFactory, JmsDestination}
import blended.streams.FlowHeaderConfig
import blended.streams.jms.{JmsEnvelopeHeader, JmsProducerSettings, JmsStreamSupport, MessageDestinationResolver}
import blended.streams.message.{FlowEnvelope, FlowEnvelopeLogger}
import blended.streams.processor.Collector
import blended.util.logging.{LogLevel, Logger}
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success}
class RoundtripConnectionVerifier(
probeMsg : String => FlowEnvelope,
verify : FlowEnvelope => Boolean,
requestDest : JmsDestination,
responseDest : JmsDestination,
retryInterval : FiniteDuration = 1.second,
receiveTimeout : FiniteDuration = 250.millis,
timeToLive : FiniteDuration = 10.seconds
)(implicit system : ActorSystem) extends ConnectionVerifier
with JmsStreamSupport
with JmsEnvelopeHeader {
private val log : Logger = Logger[RoundtripConnectionVerifier]
private val verified : Promise[Boolean] = Promise[Boolean]()
override def verifyConnection(ctCtxt : ContainerContext)(cf: IdAwareConnectionFactory)(implicit eCtxt: ExecutionContext): Future[Boolean] = {
Future {
probe(ctCtxt)(cf)
}
verified.future
}
protected def waitForResponse(ctCtxt : ContainerContext)(cf : IdAwareConnectionFactory, id : String) : Unit = {
implicit val eCtxt : ExecutionContext = system.dispatcher
val headerConfig : FlowHeaderConfig = FlowHeaderConfig.create(ctCtxt)
val collector : Collector[FlowEnvelope] = receiveMessages(
headerCfg = headerConfig,
cf = cf,
dest = responseDest,
log = FlowEnvelopeLogger.create(headerConfig, log),
selector = Some(s"JMSCorrelationID='$id'"),
completeOn = Some(_.nonEmpty),
timeout = Some(receiveTimeout),
ackTimeout = 1.second
)
collector.result.onComplete {
case Success(l) => l match {
case Nil =>
log.warn(s"No response received to verify connection [${cf.vendor}:${cf.provider}] with id [$id]")
scheduleRetry(ctCtxt)(cf)
case h :: _ =>
val result : Boolean = verify(h)
log.info(s"Verification result [$id] for client connection [${cf.vendor}:${cf.provider}] is [$result]")
verified.complete(Success(result))
}
case Failure(t) =>
log.warn(s"Failed to receive verification response [$id] to verify connection [${cf.vendor}:${cf.provider}] : [${t.getMessage()}]")
scheduleRetry(ctCtxt)(cf)
}
}
protected def probe(ctCtxt : ContainerContext)(cf: IdAwareConnectionFactory) : Unit = {
val headerConfig : FlowHeaderConfig = FlowHeaderConfig.create(ctCtxt)
val envLogger : FlowEnvelopeLogger = FlowEnvelopeLogger.create(headerConfig, log)
val id : String = UUID.randomUUID().toString()
val probeEnv : FlowEnvelope = probeMsg(id)
.withHeader(corrIdHeader(headerConfig.prefix), id).get
.withHeader(replyToHeader(headerConfig.prefix), responseDest.asString).get
val pSettings : JmsProducerSettings = JmsProducerSettings(
log = envLogger,
headerCfg = headerConfig,
connectionFactory = cf,
jmsDestination = Some(requestDest),
timeToLive = Some(timeToLive),
destinationResolver = s => new MessageDestinationResolver(s),
logLevel = _ => LogLevel.Debug
)
log.info(s"Running verification probe for connection [${cf.vendor}:${cf.provider}] with id [$id]")
sendMessages(pSettings, envLogger, timeToLive * 2, probeEnv) match {
case Success(s) =>
log.info(s"Request message [$id] sent successfully to [${requestDest.asString}]")
s.shutdown()
waitForResponse(ctCtxt)(cf, id)
case Failure(t) =>
log.debug(s"Failed to send verification request [$id] to verify connection [${cf.vendor}:${cf.provider}] : [${t.getMessage()}]")
scheduleRetry(ctCtxt)(cf)
}
}
private def scheduleRetry(ctCtxt : ContainerContext)(cf : IdAwareConnectionFactory) : Unit = {
implicit val eCtxt : ExecutionContext = system.dispatcher
after[Unit](retryInterval, system.scheduler){
log.debug(s"Scheduling retry to verify connection [${cf.vendor}:${cf.provider}] in [$retryInterval]")
Future { probe(ctCtxt)(cf) }
}
}
}
| woq-blended/blended | blended.activemq.client/src/main/scala/blended/activemq/client/RoundtripConnectionVerifier.scala | Scala | apache-2.0 | 4,487 |
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.implicits.ops
import org.platanios.tensorflow.api.ops.UntypedOp
trait ControlFlowImplicits {
implicit class ControlFlowOps(val op: UntypedOp) {
/** Returns `true` if the provided op is within a cond statement. */
def isInCond: Boolean = {
op.controlFlowContext.flatMap(_.condContext).isDefined
}
/** Returns `true` if the provided op is within a while loop statement. */
def isInWhileLoop: Boolean = {
op.controlFlowContext.flatMap(_.whileLoopContext()).isDefined
}
/** Returns `true` if the provided op is within an XLA control flow context. */
def isInXLAContext: Boolean = {
val xlaCompile = {
try {
op.booleanAttribute("_XlaCompile")
} catch {
case _: IllegalArgumentException => false
}
}
xlaCompile || op.controlFlowContext.flatMap(_.xlaContext).isDefined
}
}
}
| eaplatanios/tensorflow_scala | modules/api/src/main/scala/org/platanios/tensorflow/api/implicits/ops/ControlFlowImplicits.scala | Scala | apache-2.0 | 1,559 |
package com.github.simy4.xpath.scala.xpath
trait ToXPathLiteral {
implicit def toXPathLiteral(sc: StringContext): XPathLiteral = new XPathLiteral(sc)
}
| SimY4/xpath-to-xml | xpath-to-xml-scala/src/main/scala-2-/com/github/simy4/xpath/scala/xpath/ToXPathLiteral.scala | Scala | apache-2.0 | 155 |
package org.vitrivr.adampro.data.index
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.vitrivr.adampro.config.AttributeNames
import org.vitrivr.adampro.data.entity.Entity.AttributeName
import org.vitrivr.adampro.distribution.partitioning.{PartitionMode, PartitionerChoice}
import org.vitrivr.adampro.distribution.partitioning.partitioner._
import org.vitrivr.adampro.process.SharedComponentContext
import org.vitrivr.adampro.utils.Logging
import org.vitrivr.adampro.utils.exception.GeneralAdamException
import scala.util.{Failure, Success, Try}
/**
* ADAMpro
*
* Ivan Giangreco
* July 2016
*/
object IndexPartitioner extends Logging {
/**
* Partitions the index data.
*
* @param index index
* @param nPartitions number of partitions
* @param join other dataframes to join on, on which the partitioning is performed
* @param attribute columns to partition on, if not specified the primary key is used
* @param mode partition mode
* @param partitioner Which Partitioner you want to use.
* @param options Options for partitioner. See each partitioner for details
* @return
*/
def apply(index: Index, nPartitions: Int, join: Option[DataFrame], attribute: Option[AttributeName], mode: PartitionMode.Value, partitioner: PartitionerChoice.Value = PartitionerChoice.SPARK, options: Map[String, String] = Map[String, String]())(implicit ac: SharedComponentContext): Try[Index] = {
log.trace("repartitioning Index: " + index.indexname + " with partitioner " + partitioner)
var data = index.getData().get.join(index.entity.get.getData().get, index.pk.name)
//TODO: possibly consider replication
//http://stackoverflow.com/questions/31624622/is-there-a-way-to-change-the-replication-factor-of-rdds-in-spark
//data.persist(StorageLevel.MEMORY_ONLY_2) new StorageLevel(...., N)
if (join.isDefined) {
data = data.join(join.get, index.pk.name)
}
try {
//repartition
data = partitioner match {
case PartitionerChoice.SPARK => SparkPartitioner(data, attribute, Some(index.indexname), nPartitions)
case PartitionerChoice.RANDOM => RandomPartitioner(data, attribute, Some(index.indexname), nPartitions)
case PartitionerChoice.ECP => ECPPartitioner(data, attribute, Some(index.indexname), nPartitions)
}
data = data.select(index.pk.name, AttributeNames.featureIndexColumnName)
} catch {
case e: Exception => return Failure(e)
}
mode match {
case PartitionMode.CREATE_NEW =>
val newName = Index.createIndexName(index.entityname, index.attribute, index.indextypename)
ac.catalogManager.createIndex(newName, index.entityname, index.attribute, index.indextypename, index.metadata.get)
Index.getStorage().get.create(newName, Seq()) //TODO: switch index to be an entity with specific fields
val status = Index.getStorage().get.write(newName, data, Seq())
if (status.isFailure) {
throw status.failed.get
}
ac.cacheManager.invalidateIndex(newName)
Success(Index.load(newName).get)
case PartitionMode.CREATE_TEMP =>
val newName = Index.createIndexName(index.entityname, index.attribute, index.indextypename)
val newIndex = index.shallowCopy(Some(newName))
newIndex.setData(data)
ac.cacheManager.put(newName, newIndex)
Success(newIndex)
case PartitionMode.REPLACE_EXISTING =>
val status = Index.getStorage().get.write(index.indexname, data, Seq(), SaveMode.Overwrite)
if (status.isFailure) {
throw status.failed.get
}
ac.cacheManager.invalidateIndex(index.indexname)
Success(index)
case _ => Failure(new GeneralAdamException("partitioning mode unknown"))
}
}
}
| dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/data/index/IndexPartitioner.scala | Scala | mit | 3,849 |
package com.wmb.spark.experiments;
/**
* a simple app to test the scala develop environment configuration
*/
import scala.math.random
object SimpleScalaApp {
def main(args: Array[String]) {
var results = 0.0
for (i <- 1 to 1000000) {
val x = random * 3
results += x
}
println("results: "+results)
}
} | robmurray/experimentsonspark | src/main/scala/com/wmb/spark/experments/SimpleScalaApp.scala | Scala | apache-2.0 | 340 |
package com.tribbloids.spookystuff.parsing
import com.tribbloids.spookystuff.parsing.Pattern.{CharToken, EndOfStream, Token}
object ParsingRun {
case class ResultSeq(
self: Seq[(Seq[Token], RuleOutcome[Any], Rule)]
) {
lazy val outputs: Seq[Any] = self.flatMap(v => v._2.export)
lazy val outputToString: String = outputs.mkString("\n")
lazy val ioMap: Seq[(String, Option[Any])] = self.map { v =>
Pattern.tokens2Str(v._1) -> v._2.export
}
lazy val ioMapToString: String = ioMap
.map {
case (k, Some(v)) =>
s"$k\t-> $v"
case (k, None) =>
k
}
.mkString("\n")
override def toString: String = ioMapToString
}
}
// only applies to interpolation parser, there should be many others
/**
* heavily stateful book-keeping object that tracks progress of parsing process
* WIHTOUT object creation overhead! 1 object for 1 complete parsing run!
* Resembles Fastparse 2.1.0 object with the same name
*/
case class ParsingRun(
stream: Seq[Char],
initialFState: FState,
maxBacktracking: Int = 5 //TODO: enable it!
) {
val input: Seq[Token] = {
stream.map { cc =>
CharToken(cc)
} ++ Seq(EndOfStream)
}
val backtrackingMgr: BacktrackingManager = BacktrackingManager(input, initialFState -> PhaseVec.Eye)
lazy val run: ParsingRun.ResultSeq = {
backtrackingMgr.run_!()
val seq = backtrackingMgr.stack.map { ls =>
val captured = ls.spanTokens
val result = ls.currentOutcome
(captured, result._2, result._1)
}
ParsingRun.ResultSeq(seq)
}
}
| tribbloid/spookystuff | mldsl/src/main/scala/com/tribbloids/spookystuff/parsing/ParsingRun.scala | Scala | apache-2.0 | 1,606 |
package org.fayalite.util.img
import java.io.File
import javax.imageio.ImageIO
import com.github.sarxos.webcam.Webcam
object WebCamTestHelp {
def main(args: Array[String]) {
val webcam = Webcam.getDefault()
webcam.open()
val img = webcam.getImage
val byt = webcam.getImageBytes
/*
webcam.getWebcamListeners map {
_.webcamImageObtained()
}*/
Webcam.getWebcams
ImageIO.write(
webcam.getImage(), "PNG", new File("hello-world.png"))
Thread.sleep(Long.MaxValue)
}}
| ryleg/fayalite | experimental/src/main/scala/org/fayalite/util/img/WebCamTestHelp.scala | Scala | mit | 529 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.Equality
import org.scalactic.Uniformity
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
class ListShouldContainOnlySpec extends Spec {
private def upperCase(value: Any): Any =
value match {
case l: List[_] => l.map(upperCase(_))
case s: String => s.toUpperCase
case c: Char => c.toString.toUpperCase.charAt(0)
case (s1: String, s2: String) => (s1.toUpperCase, s2.toUpperCase)
case e: java.util.Map.Entry[_, _] =>
(e.getKey, e.getValue) match {
case (k: String, v: String) => Entry(k.toUpperCase, v.toUpperCase)
case _ => value
}
case _ => value
}
val upperCaseStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = upperCase(a) == upperCase(b)
}
//ADDITIONAL//
object `a List` {
val fumList: List[String] = List("fum", "foe", "fie", "fee")
val toList: List[String] = List("you", "to", "birthday", "happy")
object `when used with contain only (..)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should contain only ("fee", "fie", "foe", "fum")
val e1 = intercept[TestFailedException] {
fumList should contain only ("happy", "birthday", "to", "you")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should contain only ("FEE", "FIE", "FOE", "FUM")
intercept[TestFailedException] {
fumList should contain only ("fee", "fie", "foe")
}
}
def `should use an explicitly provided Equality` {
(fumList should contain only ("FEE", "FIE", "FOE", "FUM")) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(fumList should contain only ("fee", "fie", "foe")) (decided by upperCaseStringEquality)
}
intercept[TestFailedException] {
fumList should contain only (" FEE ", " FIE ", " FOE ", " FUM ")
}
(fumList should contain only (" FEE ", " FIE ", " FOE ", " FUM ")) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should contain only ("fee", "fie", "foe", "fie", "fum")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TestFailedException with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[exceptions.TestFailedException] {
fumList should contain only (Vector("happy", "birthday", "to", "you"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("didNotContainOnlyElementsWithFriendlyReminder", decorateToStringValue(fumList), decorateToStringValue(Vector("happy", "birthday", "to", "you"))))
}
}
object `when used with (contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain only ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("happy", "birthday", "to", "you"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("didNotContainOnlyElements", decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain only ("FEE", "FIE", "FOE", "FUM"))
intercept[TestFailedException] {
fumList should (contain only ("fee", "fie", "foe"))
}
}
def `should use an explicitly provided Equality` {
(fumList should (contain only ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(fumList should (contain only ("fee", "fie", "foe"))) (decided by upperCaseStringEquality)
}
intercept[TestFailedException] {
fumList should (contain only (" FEE ", " FIE ", " FOE ", " FUM "))
}
(fumList should (contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TestFailedException with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[exceptions.TestFailedException] {
fumList should (contain only (Vector("happy", "birthday", "to", "you")))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("didNotContainOnlyElementsWithFriendlyReminder", decorateToStringValue(fumList), decorateToStringValue(Vector("happy", "birthday", "to", "you"))))
}
}
object `when used with not contain only (..)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
toList should not contain only ("fee", "fie", "foe", "fum")
val e1 = intercept[TestFailedException] {
toList should not contain only ("happy", "birthday", "to", "you")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("containedOnlyElements", decorateToStringValue(toList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
toList should not contain only ("happy", "birthday", "to")
intercept[TestFailedException] {
toList should not contain only ("HAPPY", "BIRTHDAY", "TO", "YOU")
}
}
def `should use an explicitly provided Equality` {
(toList should not contain only ("happy", "birthday", "to")) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(toList should not contain only ("HAPPY", "BIRTHDAY", "TO", "YOU")) (decided by upperCaseStringEquality)
}
toList should not contain only (" HAPPY ", " BIRTHDAY ", " TO ", " YOU ")
intercept[TestFailedException] {
(toList should not contain only (" HAPPY ", " BIRTHDAY ", " TO ", " YOU ")) (after being lowerCased and trimmed)
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
toList should not contain only ("fee", "fie", "foe", "fie", "fum")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TestFailedException with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[exceptions.TestFailedException] {
Vector(Vector("happy", "birthday", "to", "you")) should not contain only (Vector("happy", "birthday", "to", "you"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("containedOnlyElementsWithFriendlyReminder", decorateToStringValue(Vector(Vector("happy", "birthday", "to", "you"))), decorateToStringValue(Vector("happy", "birthday", "to", "you"))))
}
}
object `when used with (not contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
toList should (not contain only ("HAPPY", "BIRTHDAY", "TO", "YOU"))
val e1 = intercept[TestFailedException] {
toList should (not contain only ("happy", "birthday", "to", "you"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("containedOnlyElements", decorateToStringValue(toList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
toList should (not contain only ("NICE", "TO", "MEET", "YOU"))
intercept[TestFailedException] {
toList should (not contain only ("HAPPY", "BIRTHDAY", "TO", "YOU"))
}
}
def `should use an explicitly provided Equality` {
(toList should (not contain only ("NICE", "TO", "MEET", "YOU"))) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(toList should (not contain only ("HAPPY", "BIRTHDAY", "TO", "YOU"))) (decided by upperCaseStringEquality)
}
toList should (not contain only (" HAPPY ", " BIRTHDAY ", " TO ", " YOU "))
intercept[TestFailedException] {
(toList should (not contain only (" HAPPY ", " BIRTHDAY ", " TO ", " YOU "))) (after being lowerCased and trimmed)
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
toList should (not contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TestFailedException with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[exceptions.TestFailedException] {
Vector(Vector("happy", "birthday", "to", "you")) should (not contain only (Vector("happy", "birthday", "to", "you")))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("containedOnlyElementsWithFriendlyReminder", decorateToStringValue(Vector(Vector("happy", "birthday", "to", "you"))), decorateToStringValue(Vector("happy", "birthday", "to", "you"))))
}
}
object `when used with shouldNot contain only (..)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
toList shouldNot contain only ("fee", "fie", "foe", "fum")
val e1 = intercept[TestFailedException] {
toList shouldNot contain only ("happy", "birthday", "to", "you")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("containedOnlyElements", decorateToStringValue(toList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
toList shouldNot contain only ("happy", "birthday", "to")
intercept[TestFailedException] {
toList shouldNot contain only ("HAPPY", "BIRTHDAY", "TO", "YOU")
}
}
def `should use an explicitly provided Equality` {
(toList shouldNot contain only ("happy", "birthday", "to")) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(toList shouldNot contain only ("HAPPY", "BIRTHDAY", "TO", "YOU")) (decided by upperCaseStringEquality)
}
toList shouldNot contain only (" HAPPY ", " BIRTHDAY ", " TO ", " YOU ")
intercept[TestFailedException] {
(toList shouldNot contain only (" HAPPY ", " BIRTHDAY ", " TO ", " YOU ")) (after being lowerCased and trimmed)
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
toList shouldNot contain only ("fee", "fie", "foe", "fie", "fum")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TestFailedException with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
Vector(Vector("happy", "birthday", "to", "you")) shouldNot contain only (Vector("happy", "birthday", "to", "you"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("containedOnlyElementsWithFriendlyReminder", decorateToStringValue(Vector(Vector("happy", "birthday", "to", "you"))), decorateToStringValue(Vector("happy", "birthday", "to", "you"))))
}
}
object `when used with shouldNot (contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
toList shouldNot (contain only ("HAPPY", "BIRTHDAY", "TO", "YOU"))
val e1 = intercept[TestFailedException] {
toList shouldNot (contain only ("happy", "birthday", "to", "you"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("containedOnlyElements", decorateToStringValue(toList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
toList shouldNot (contain only ("NICE", "TO", "MEET", "YOU"))
intercept[TestFailedException] {
toList shouldNot (contain only ("HAPPY", "BIRTHDAY", "TO", "YOU"))
}
}
def `should use an explicitly provided Equality` {
(toList shouldNot (contain only ("NICE", "TO", "MEET", "YOU"))) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(toList shouldNot (contain only ("HAPPY", "BIRTHDAY", "TO", "YOU"))) (decided by upperCaseStringEquality)
}
toList shouldNot (contain only (" HAPPY ", " BIRTHDAY ", " TO ", " YOU "))
intercept[TestFailedException] {
(toList shouldNot (contain only (" HAPPY ", " BIRTHDAY ", " TO ", " YOU "))) (after being lowerCased and trimmed)
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
toList shouldNot (contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TestFailedException with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
Vector(Vector("happy", "birthday", "to", "you")) shouldNot (contain only (Vector("happy", "birthday", "to", "you")))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message.get should be (Resources("containedOnlyElementsWithFriendlyReminder", decorateToStringValue(Vector(Vector("happy", "birthday", "to", "you"))), decorateToStringValue(Vector("happy", "birthday", "to", "you"))))
}
}
}
object `a col of Lists` {
val list1s: Vector[List[Int]] = Vector(List(3, 2, 1), List(3, 2, 1), List(3, 2, 1))
val lists: Vector[List[Int]] = Vector(List(3, 2, 1), List(3, 2, 1), List(4, 3, 2))
val listsNil: Vector[List[Int]] = Vector(List(3, 2, 1), List(3, 2, 1), Nil)
val hiLists: Vector[List[String]] = Vector(List("hi", "he"), List("hi", "he"), List("hi", "he"))
val toLists: Vector[List[String]] = Vector(List("to", "you"), List("to", "you"), List("to", "you"))
object `when used with contain only (..)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should contain only (1, 2, 3)
atLeast (2, lists) should contain only (1, 2, 3)
atMost (2, lists) should contain only (1, 2, 3)
no (lists) should contain only (3, 4, 5)
val e1 = intercept[TestFailedException] {
all (lists) should contain only (1, 2, 3)
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 2, " + decorateToStringValue(List(4, 3, 2)) + " did not contain only " + "(1, 2, 3)" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(lists)))
val e3 = intercept[TestFailedException] {
all (lists) should contain only (1, 2, 3)
}
e3.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 3)
e3.message should be (Some("'all' inspection failed, because: \\n" +
" at index 2, " + decorateToStringValue(List(4, 3, 2)) + " did not contain only " + "(1, 2, 3)" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(lists)))
}
def `should use the implicit Equality in scope` {
all (hiLists) should contain only ("he", "hi")
intercept[TestFailedException] {
all (hiLists) should contain only ("ho", "hi")
}
implicit val ise = upperCaseStringEquality
all (hiLists) should contain only ("HE", "HI")
intercept[TestFailedException] {
all (hiLists) should contain only ("HO", "HI")
}
}
def `should use an explicitly provided Equality` {
(all (hiLists) should contain only ("HE", "HI")) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(all (hiLists) should contain only ("HO", "HI")) (decided by upperCaseStringEquality)
}
implicit val ise = upperCaseStringEquality
(all (hiLists) should contain only ("he", "hi")) (decided by defaultEquality[String])
intercept[TestFailedException] {
(all (hiLists) should contain only ("ho", "hi")) (decided by defaultEquality[String])
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should contain only (1, 2, 2, 3)
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (Vector(Vector(3, 2, 1), Vector(3, 2, 1), Vector(4, 3, 2))) should contain only Vector(1, 2, 3)
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(Vector(3, 2, 1)) + " did not contain only (" + decorateToStringValue(Vector(1, 2, 3)) + "), did you forget to say : _*" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(Vector(Vector(3, 2, 1), Vector(3, 2, 1), Vector(4, 3, 2)))))
}
}
object `when used with (contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (contain only (1, 2, 3))
atLeast (2, lists) should (contain only (1, 2, 3))
atMost (2, lists) should (contain only (1, 2, 3))
no (lists) should (contain only (3, 4, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (contain only (1, 2, 3))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 2, " + decorateToStringValue(List(4, 3, 2)) + " did not contain only " + "(1, 2, 3)" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(lists)))
val e4 = intercept[TestFailedException] {
all (lists) should (contain only (1, 2, 3))
}
e4.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e4.failedCodeLineNumber.get should be (thisLineNumber - 3)
e4.message should be (Some("'all' inspection failed, because: \\n" +
" at index 2, " + decorateToStringValue(List(4, 3, 2)) + " did not contain only " + "(1, 2, 3)" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(lists)))
}
def `should use the implicit Equality in scope` {
all (hiLists) should (contain only ("he", "hi"))
intercept[TestFailedException] {
all (hiLists) should (contain only ("ho", "hi"))
}
implicit val ise = upperCaseStringEquality
all (hiLists) should (contain only ("HE", "HI"))
intercept[TestFailedException] {
all (hiLists) should (contain only ("HO", "HI"))
}
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (contain only ("HE", "HI"))) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(all (hiLists) should (contain only ("HO", "HI"))) (decided by upperCaseStringEquality)
}
implicit val ise = upperCaseStringEquality
(all (hiLists) should (contain only ("he", "hi"))) (decided by defaultEquality[String])
intercept[TestFailedException] {
(all (hiLists) should (contain only ("ho", "hi"))) (decided by defaultEquality[String])
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain only (1, 2, 2, 3))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (Vector(Vector(3, 2, 1), Vector(3, 2, 1), Vector(4, 3, 2))) should (contain only Vector(1, 2, 3))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(Vector(3, 2, 1)) + " did not contain only (" + decorateToStringValue(Vector(1, 2, 3)) + "), did you forget to say : _*" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(Vector(Vector(3, 2, 1), Vector(3, 2, 1), Vector(4, 3, 2)))))
}
}
object `when used with not contain only (..)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (toLists) should not contain only ("fee", "fie", "foe", "fum")
val e1 = intercept[TestFailedException] {
all (toLists) should not contain only ("you", "to")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(List("to", "you")) + " contained only " + "(\\"you\\", \\"to\\")" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(toLists)))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (toLists) should not contain only ("NICE", "MEET", "YOU")
intercept[TestFailedException] {
all (toLists) should not contain only ("YOU", "TO")
}
}
def `should use an explicitly provided Equality` {
(all (toLists) should not contain only ("NICE", "MEET", "YOU")) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(all (toLists) should not contain only ("YOU", "TO")) (decided by upperCaseStringEquality)
}
all (toLists) should not contain only (" YOU ", " TO ")
intercept[TestFailedException] {
(all (toLists) should not contain only (" YOU ", " TO ")) (after being lowerCased and trimmed)
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (toLists) should not contain only ("fee", "fie", "foe", "fie", "fum")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (Vector(Vector(Vector("you", "to")))) should not contain only (Vector("you", "to"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(Vector(Vector("you", "to"))) + " contained only (" + decorateToStringValue(Vector("you", "to")) + "), did you forget to say : _*" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(Vector(Vector(Vector("you", "to"))))))
}
}
object `when used with (not contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (toLists) should (not contain only ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
all (toLists) should (not contain only ("you", "to"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(List("to", "you")) + " contained only " + "(\\"you\\", \\"to\\")" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(toLists)))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (toLists) should (not contain only ("NICE", "MEET", "YOU"))
intercept[TestFailedException] {
all (toLists) should (not contain only ("YOU", "TO"))
}
}
def `should use an explicitly provided Equality` {
(all (toLists) should (not contain only ("NICE", "MEET", "YOU"))) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(all (toLists) should (not contain only ("YOU", "TO"))) (decided by upperCaseStringEquality)
}
all (toLists) should (not contain only (" YOU ", " TO "))
intercept[TestFailedException] {
(all (toLists) should (not contain only (" YOU ", " TO "))) (after being lowerCased and trimmed)
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (toLists) should (not contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (Vector(Vector(Vector("you", "to")))) should (not contain only (Vector("you", "to")))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(Vector(Vector("you", "to"))) + " contained only (" + decorateToStringValue(Vector("you", "to")) + "), did you forget to say : _*" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(Vector(Vector(Vector("you", "to"))))))
}
}
object `when used with shouldNot contain only (..)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (toLists) shouldNot contain only ("fee", "fie", "foe", "fum")
val e1 = intercept[TestFailedException] {
all (toLists) shouldNot contain only ("you", "to")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(List("to", "you")) + " contained only " + "(\\"you\\", \\"to\\")" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(toLists)))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (toLists) shouldNot contain only ("NICE", "MEET", "YOU")
intercept[TestFailedException] {
all (toLists) shouldNot contain only ("YOU", "TO")
}
}
def `should use an explicitly provided Equality` {
(all (toLists) shouldNot contain only ("NICE", "MEET", "YOU")) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(all (toLists) shouldNot contain only ("YOU", "TO")) (decided by upperCaseStringEquality)
}
all (toLists) shouldNot contain only (" YOU ", " TO ")
intercept[TestFailedException] {
(all (toLists) shouldNot contain only (" YOU ", " TO ")) (after being lowerCased and trimmed)
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (toLists) shouldNot contain only ("fee", "fie", "foe", "fie", "fum")
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (Vector(Vector(Vector("to", "you")))) shouldNot contain only (Vector("to", "you"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(Vector(Vector("to", "you"))) + " contained only (" + decorateToStringValue(Vector("to", "you")) + "), did you forget to say : _*" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(Vector(Vector(Vector("to", "you"))))))
}
}
object `when used with shouldNot (contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (toLists) shouldNot (contain only ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
all (toLists) shouldNot (contain only ("you", "to"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(List("to", "you")) + " contained only " + "(\\"you\\", \\"to\\")" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(toLists)))
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (toLists) shouldNot (contain only ("NICE", "MEET", "YOU"))
intercept[TestFailedException] {
all (toLists) shouldNot (contain only ("YOU", "TO"))
}
}
def `should use an explicitly provided Equality` {
(all (toLists) shouldNot (contain only ("NICE", "MEET", "YOU"))) (decided by upperCaseStringEquality)
intercept[TestFailedException] {
(all (toLists) shouldNot (contain only ("YOU", "TO"))) (decided by upperCaseStringEquality)
}
all (toLists) shouldNot (contain only (" YOU ", " TO "))
intercept[TestFailedException] {
(all (toLists) shouldNot (contain only (" YOU ", " TO "))) (after being lowerCased and trimmed)
}
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (toLists) shouldNot (contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("onlyDuplicate")))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (Vector(Vector(Vector("to", "you")))) shouldNot (contain only Vector("to", "you"))
}
e1.failedCodeFileName.get should be ("ListShouldContainOnlySpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \\n" +
" at index 0, " + decorateToStringValue(Vector(Vector("to", "you"))) + " contained only (" + decorateToStringValue(Vector("to", "you")) + "), did you forget to say : _*" + " (ListShouldContainOnlySpec.scala:" + (thisLineNumber - 5) + ") \\n" +
"in " + decorateToStringValue(Vector(Vector(Vector("to", "you"))))))
}
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/ListShouldContainOnlySpec.scala | Scala | apache-2.0 | 38,760 |
package chess
private[chess] case class Magic(mask: Long, factor: Long, offset: Int)
private[chess] object Magic {
// Fixed shift white magics found by Volker Annuss.
// From: http://www.talkchess.com/forum/viewtopic.php?p=727500&t=64790
val tableSize = 88772
val rook = Array(
Magic(0x000101010101017eL, 0x00280077ffebfffeL, 26304),
Magic(0x000202020202027cL, 0x2004010201097fffL, 35520),
Magic(0x000404040404047aL, 0x0010020010053fffL, 38592),
Magic(0x0008080808080876L, 0x0040040008004002L, 8026),
Magic(0x001010101010106eL, 0x7fd00441ffffd003L, 22196),
Magic(0x002020202020205eL, 0x4020008887dffffeL, 80870),
Magic(0x004040404040403eL, 0x004000888847ffffL, 76747),
Magic(0x008080808080807eL, 0x006800fbff75fffdL, 30400),
Magic(0x0001010101017e00L, 0x000028010113ffffL, 11115),
Magic(0x0002020202027c00L, 0x0020040201fcffffL, 18205),
Magic(0x0004040404047a00L, 0x007fe80042ffffe8L, 53577),
Magic(0x0008080808087600L, 0x00001800217fffe8L, 62724),
Magic(0x0010101010106e00L, 0x00001800073fffe8L, 34282),
Magic(0x0020202020205e00L, 0x00001800e05fffe8L, 29196),
Magic(0x0040404040403e00L, 0x00001800602fffe8L, 23806),
Magic(0x0080808080807e00L, 0x000030002fffffa0L, 49481),
Magic(0x00010101017e0100L, 0x00300018010bffffL, 2410),
Magic(0x00020202027c0200L, 0x0003000c0085fffbL, 36498),
Magic(0x00040404047a0400L, 0x0004000802010008L, 24478),
Magic(0x0008080808760800L, 0x0004002020020004L, 10074),
Magic(0x00101010106e1000L, 0x0001002002002001L, 79315),
Magic(0x00202020205e2000L, 0x0001001000801040L, 51779),
Magic(0x00404040403e4000L, 0x0000004040008001L, 13586),
Magic(0x00808080807e8000L, 0x0000006800cdfff4L, 19323),
Magic(0x000101017e010100L, 0x0040200010080010L, 70612),
Magic(0x000202027c020200L, 0x0000080010040010L, 83652),
Magic(0x000404047a040400L, 0x0004010008020008L, 63110),
Magic(0x0008080876080800L, 0x0000040020200200L, 34496),
Magic(0x001010106e101000L, 0x0002008010100100L, 84966),
Magic(0x002020205e202000L, 0x0000008020010020L, 54341),
Magic(0x004040403e404000L, 0x0000008020200040L, 60421),
Magic(0x008080807e808000L, 0x0000820020004020L, 86402),
Magic(0x0001017e01010100L, 0x00fffd1800300030L, 50245),
Magic(0x0002027c02020200L, 0x007fff7fbfd40020L, 76622),
Magic(0x0004047a04040400L, 0x003fffbd00180018L, 84676),
Magic(0x0008087608080800L, 0x001fffde80180018L, 78757),
Magic(0x0010106e10101000L, 0x000fffe0bfe80018L, 37346),
Magic(0x0020205e20202000L, 0x0001000080202001L, 370),
Magic(0x0040403e40404000L, 0x0003fffbff980180L, 42182),
Magic(0x0080807e80808000L, 0x0001fffdff9000e0L, 45385),
Magic(0x00017e0101010100L, 0x00fffefeebffd800L, 61659),
Magic(0x00027c0202020200L, 0x007ffff7ffc01400L, 12790),
Magic(0x00047a0404040400L, 0x003fffbfe4ffe800L, 16762),
Magic(0x0008760808080800L, 0x001ffff01fc03000L, 0),
Magic(0x00106e1010101000L, 0x000fffe7f8bfe800L, 38380),
Magic(0x00205e2020202000L, 0x0007ffdfdf3ff808L, 11098),
Magic(0x00403e4040404000L, 0x0003fff85fffa804L, 21803),
Magic(0x00807e8080808000L, 0x0001fffd75ffa802L, 39189),
Magic(0x007e010101010100L, 0x00ffffd7ffebffd8L, 58628),
Magic(0x007c020202020200L, 0x007fff75ff7fbfd8L, 44116),
Magic(0x007a040404040400L, 0x003fff863fbf7fd8L, 78357),
Magic(0x0076080808080800L, 0x001fffbfdfd7ffd8L, 44481),
Magic(0x006e101010101000L, 0x000ffff810280028L, 64134),
Magic(0x005e202020202000L, 0x0007ffd7f7feffd8L, 41759),
Magic(0x003e404040404000L, 0x0003fffc0c480048L, 1394),
Magic(0x007e808080808000L, 0x0001ffffafd7ffd8L, 40910),
Magic(0x7e01010101010100L, 0x00ffffe4ffdfa3baL, 66516),
Magic(0x7c02020202020200L, 0x007fffef7ff3d3daL, 3897),
Magic(0x7a04040404040400L, 0x003fffbfdfeff7faL, 3930),
Magic(0x7608080808080800L, 0x001fffeff7fbfc22L, 72934),
Magic(0x6e10101010101000L, 0x0000020408001001L, 72662),
Magic(0x5e20202020202000L, 0x0007fffeffff77fdL, 56325),
Magic(0x3e40404040404000L, 0x0003ffffbf7dfeecL, 66501),
Magic(0x7e80808080808000L, 0x0001ffff9dffa333L, 14826)
)
val bishop = Array(
Magic(0x0040201008040200L, 0x007fbfbfbfbfbfffL, 5378),
Magic(0x0000402010080400L, 0x0000a060401007fcL, 4093),
Magic(0x0000004020100a00L, 0x0001004008020000L, 4314),
Magic(0x0000000040221400L, 0x0000806004000000L, 6587),
Magic(0x0000000002442800L, 0x0000100400000000L, 6491),
Magic(0x0000000204085000L, 0x000021c100b20000L, 6330),
Magic(0x0000020408102000L, 0x0000040041008000L, 5609),
Magic(0x0002040810204000L, 0x00000fb0203fff80L, 22236),
Magic(0x0020100804020000L, 0x0000040100401004L, 6106),
Magic(0x0040201008040000L, 0x0000020080200802L, 5625),
Magic(0x00004020100a0000L, 0x0000004010202000L, 16785),
Magic(0x0000004022140000L, 0x0000008060040000L, 16817),
Magic(0x0000000244280000L, 0x0000004402000000L, 6842),
Magic(0x0000020408500000L, 0x0000000801008000L, 7003),
Magic(0x0002040810200000L, 0x000007efe0bfff80L, 4197),
Magic(0x0004081020400000L, 0x0000000820820020L, 7356),
Magic(0x0010080402000200L, 0x0000400080808080L, 4602),
Magic(0x0020100804000400L, 0x00021f0100400808L, 4538),
Magic(0x004020100a000a00L, 0x00018000c06f3fffL, 29531),
Magic(0x0000402214001400L, 0x0000258200801000L, 45393),
Magic(0x0000024428002800L, 0x0000240080840000L, 12420),
Magic(0x0002040850005000L, 0x000018000c03fff8L, 15763),
Magic(0x0004081020002000L, 0x00000a5840208020L, 5050),
Magic(0x0008102040004000L, 0x0000020008208020L, 4346),
Magic(0x0008040200020400L, 0x0000804000810100L, 6074),
Magic(0x0010080400040800L, 0x0001011900802008L, 7866),
Magic(0x0020100a000a1000L, 0x0000804000810100L, 32139),
Magic(0x0040221400142200L, 0x000100403c0403ffL, 57673),
Magic(0x0002442800284400L, 0x00078402a8802000L, 55365),
Magic(0x0004085000500800L, 0x0000101000804400L, 15818),
Magic(0x0008102000201000L, 0x0000080800104100L, 5562),
Magic(0x0010204000402000L, 0x00004004c0082008L, 6390),
Magic(0x0004020002040800L, 0x0001010120008020L, 7930),
Magic(0x0008040004081000L, 0x000080809a004010L, 13329),
Magic(0x00100a000a102000L, 0x0007fefe08810010L, 7170),
Magic(0x0022140014224000L, 0x0003ff0f833fc080L, 27267),
Magic(0x0044280028440200L, 0x007fe08019003042L, 53787),
Magic(0x0008500050080400L, 0x003fffefea003000L, 5097),
Magic(0x0010200020100800L, 0x0000101010002080L, 6643),
Magic(0x0020400040201000L, 0x0000802005080804L, 6138),
Magic(0x0002000204081000L, 0x0000808080a80040L, 7418),
Magic(0x0004000408102000L, 0x0000104100200040L, 7898),
Magic(0x000a000a10204000L, 0x0003ffdf7f833fc0L, 42012),
Magic(0x0014001422400000L, 0x0000008840450020L, 57350),
Magic(0x0028002844020000L, 0x00007ffc80180030L, 22813),
Magic(0x0050005008040200L, 0x007fffdd80140028L, 56693),
Magic(0x0020002010080400L, 0x00020080200a0004L, 5818),
Magic(0x0040004020100800L, 0x0000101010100020L, 7098),
Magic(0x0000020408102000L, 0x0007ffdfc1805000L, 4451),
Magic(0x0000040810204000L, 0x0003ffefe0c02200L, 4709),
Magic(0x00000a1020400000L, 0x0000000820806000L, 4794),
Magic(0x0000142240000000L, 0x0000000008403000L, 13364),
Magic(0x0000284402000000L, 0x0000000100202000L, 4570),
Magic(0x0000500804020000L, 0x0000004040802000L, 4282),
Magic(0x0000201008040200L, 0x0004010040100400L, 14964),
Magic(0x0000402010080400L, 0x00006020601803f4L, 4026),
Magic(0x0002040810204000L, 0x0003ffdfdfc28048L, 4826),
Magic(0x0004081020400000L, 0x0000000820820020L, 7354),
Magic(0x000a102040000000L, 0x0000000008208060L, 4848),
Magic(0x0014224000000000L, 0x0000000000808020L, 15946),
Magic(0x0028440200000000L, 0x0000000001002020L, 14932),
Magic(0x0050080402000000L, 0x0000000401002008L, 16588),
Magic(0x0020100804020000L, 0x0000004040404040L, 6905),
Magic(0x0040201008040200L, 0x007fff9fdf7ff813L, 16076)
)
}
| niklasf/scalachess | src/main/scala/Magic.scala | Scala | mit | 7,950 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti s_mach.i18n
.t1i .,::;;; ;1tt Copyright (c) 2016 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: lance.gatlin@gmail.com
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.i18n.impl
import java.util.{Locale, ResourceBundle}
import s_mach.i18n.messages._
import s_mach.string._
import MessageFormat.Interpolation._
object DefaultUTF8Messages {
private val fakeFormat = new java.text.Format {
// Never called
def parseObject(source: String, pos: java.text.ParsePosition) = ???
def format(obj: scala.Any, toAppendTo: StringBuffer, pos: java.text.FieldPosition) =
toAppendTo.append(obj.toString)
}
private val uniqueKey = "<>" * 12
private val parseRegex = s"$uniqueKey([0-9]+)".r
def apply(
locale: Locale,
fileBaseDir: String = "conf",
fileBaseName: String = "messages",
fileExt: String = "txt"
) : Messages = {
def parseFormat(raw: String, fmt: java.text.MessageFormat) : MessageFormat = {
fmt.getFormats.length match {
case 0 => MessageFormat.Literal(raw)
case 1 if fmt.getFormats.head.isInstanceOf[java.text.ChoiceFormat] =>
parseChoice(fmt)
case argsCount => parseInterpolation(fmt,argsCount)
}
}
def parseChoice(fmt: java.text.MessageFormat) : MessageFormat.Choice = {
MessageFormat.Choice({ n =>
fmt.format(
Array(n.underlying().doubleValue()).map(_.asInstanceOf[java.lang.Object])
)
})
}
case class M(
start: Int,
end: Int,
argIdx: Int
)
def parseInterpolation(
fmt: java.text.MessageFormat,
argsCount: Int
) : MessageFormat.Interpolation = {
// Force all formats to simple string replacement
val formats = Array.fill[java.text.Format](argsCount)(fakeFormat)
fmt.setFormats(formats)
// Inject unique key and arg number as fake args to allow
// standardized parsing below
val parseable = fmt.format(
(0 until argsCount).map(i => s"$uniqueKey$i").toArray
)
// Parse simplified interpolation format
val ms = parseRegex.findAllMatchIn(parseable)
.map(m => M(m.start,m.end,m.group(1).toInt))
// Note: impossible for ms not to match at least once since there at
// least one arg at this point
val builder = Seq.newBuilder[Part]
val _lastIdx =
ms.foldLeft(0) { case (lastIdx,m) =>
if(lastIdx < m.start) {
builder += Part.Literal(parseable.substring(lastIdx, m.start))
}
builder += Part.StringArg(m.argIdx)
m.end
}
if(_lastIdx != parseable.length) {
builder += Part.Literal(parseable.substring(_lastIdx))
}
MessageFormat.Interpolation(builder.result())
}
// Note: can't load multiple resources with same name without a base dir
// See https://stackoverflow.com/questions/6730580/how-to-read-several-resource-files-with-the-same-name-from-different-jars
require(fileBaseDir.length > 0,"Base directory must not be empty")
val control = new UTF8ResourceBundleControl(
fileExt = fileExt
)
val bundle = ResourceBundle.getBundle(s"${fileBaseDir.ensureSuffix("/")}$fileBaseName", locale, control)
val keyToFormats =
bundle.getKeys.toStream.map { k =>
val raw = bundle.getString(k)
val fmt = new java.text.MessageFormat(raw)
Symbol(k) -> parseFormat(raw,fmt)
}
Messages(
locale = locale,
keyToFormats:_*
)
}
} | S-Mach/s_mach.i18n | src/main/scala/s_mach/i18n/impl/DefaultUTF8Messages.scala | Scala | mit | 4,074 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, AttributeSet, CurrentDate, CurrentTimestamp, MonotonicallyIncreasingID}
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes
import org.apache.spark.sql.streaming.OutputMode
/**
* Analyzes the presence of unsupported operations in a logical plan.
*/
object UnsupportedOperationChecker {
def checkForBatch(plan: LogicalPlan): Unit = {
plan.foreachUp {
case p if p.isStreaming =>
throwError("Queries with streaming sources must be executed with writeStream.start()")(p)
case _ =>
}
}
def checkForStreaming(plan: LogicalPlan, outputMode: OutputMode): Unit = {
if (!plan.isStreaming) {
throwError(
"Queries without streaming sources cannot be executed with writeStream.start()")(plan)
}
/** Collect all the streaming aggregates in a sub plan */
def collectStreamingAggregates(subplan: LogicalPlan): Seq[Aggregate] = {
subplan.collect { case a: Aggregate if a.isStreaming => a }
}
val mapGroupsWithStates = plan.collect {
case f: FlatMapGroupsWithState if f.isStreaming && f.isMapGroupsWithState => f
}
// Disallow multiple `mapGroupsWithState`s.
if (mapGroupsWithStates.size >= 2) {
throwError(
"Multiple mapGroupsWithStates are not supported on a streaming DataFrames/Datasets")(plan)
}
val flatMapGroupsWithStates = plan.collect {
case f: FlatMapGroupsWithState if f.isStreaming && !f.isMapGroupsWithState => f
}
// Disallow mixing `mapGroupsWithState`s and `flatMapGroupsWithState`s
if (mapGroupsWithStates.nonEmpty && flatMapGroupsWithStates.nonEmpty) {
throwError(
"Mixing mapGroupsWithStates and flatMapGroupsWithStates are not supported on a " +
"streaming DataFrames/Datasets")(plan)
}
// Only allow multiple `FlatMapGroupsWithState(Append)`s in append mode.
if (flatMapGroupsWithStates.size >= 2 && (
outputMode != InternalOutputModes.Append ||
flatMapGroupsWithStates.exists(_.outputMode != InternalOutputModes.Append)
)) {
throwError(
"Multiple flatMapGroupsWithStates are not supported when they are not all in append mode" +
" or the output mode is not append on a streaming DataFrames/Datasets")(plan)
}
// Disallow multiple streaming aggregations
val aggregates = collectStreamingAggregates(plan)
if (aggregates.size > 1) {
throwError(
"Multiple streaming aggregations are not supported with " +
"streaming DataFrames/Datasets")(plan)
}
// Disallow some output mode
outputMode match {
case InternalOutputModes.Append if aggregates.nonEmpty =>
val aggregate = aggregates.head
// Find any attributes that are associated with an eventTime watermark.
val watermarkAttributes = aggregate.groupingExpressions.collect {
case a: Attribute if a.metadata.contains(EventTimeWatermark.delayKey) => a
}
// We can append rows to the sink once the group is under the watermark. Without this
// watermark a group is never "finished" so we would never output anything.
if (watermarkAttributes.isEmpty) {
throwError(
s"$outputMode output mode not supported when there are streaming aggregations on " +
s"streaming DataFrames/DataSets without watermark")(plan)
}
case InternalOutputModes.Complete if aggregates.isEmpty =>
throwError(
s"$outputMode output mode not supported when there are no streaming aggregations on " +
s"streaming DataFrames/Datasets")(plan)
case _ =>
}
/**
* Whether the subplan will contain complete data or incremental data in every incremental
* execution. Some operations may be allowed only when the child logical plan gives complete
* data.
*/
def containsCompleteData(subplan: LogicalPlan): Boolean = {
val aggs = subplan.collect { case a@Aggregate(_, _, _) if a.isStreaming => a }
// Either the subplan has no streaming source, or it has aggregation with Complete mode
!subplan.isStreaming || (aggs.nonEmpty && outputMode == InternalOutputModes.Complete)
}
def checkUnsupportedExpressions(implicit operator: LogicalPlan): Unit = {
val unsupportedExprs = operator.expressions.flatMap(_.collect {
case m: MonotonicallyIncreasingID => m
}).distinct
if (unsupportedExprs.nonEmpty) {
throwError("Expression(s): " + unsupportedExprs.map(_.sql).mkString(", ") +
" is not supported with streaming DataFrames/Datasets")
}
}
plan.foreachUp { implicit subPlan =>
// Operations that cannot exists anywhere in a streaming plan
subPlan match {
case Aggregate(_, aggregateExpressions, child) =>
val distinctAggExprs = aggregateExpressions.flatMap { expr =>
expr.collect { case ae: AggregateExpression if ae.isDistinct => ae }
}
throwErrorIf(
child.isStreaming && distinctAggExprs.nonEmpty,
"Distinct aggregations are not supported on streaming DataFrames/Datasets. Consider " +
"using approx_count_distinct() instead.")
case _: Command =>
throwError("Commands like CreateTable*, AlterTable*, Show* are not supported with " +
"streaming DataFrames/Datasets")
case _: InsertIntoDir =>
throwError("InsertIntoDir is not supported with streaming DataFrames/Datasets")
// mapGroupsWithState and flatMapGroupsWithState
case m: FlatMapGroupsWithState if m.isStreaming =>
// Check compatibility with output modes and aggregations in query
val aggsAfterFlatMapGroups = collectStreamingAggregates(plan)
if (m.isMapGroupsWithState) { // check mapGroupsWithState
// allowed only in update query output mode and without aggregation
if (aggsAfterFlatMapGroups.nonEmpty) {
throwError(
"mapGroupsWithState is not supported with aggregation " +
"on a streaming DataFrame/Dataset")
} else if (outputMode != InternalOutputModes.Update) {
throwError(
"mapGroupsWithState is not supported with " +
s"$outputMode output mode on a streaming DataFrame/Dataset")
}
} else { // check latMapGroupsWithState
if (aggsAfterFlatMapGroups.isEmpty) {
// flatMapGroupsWithState without aggregation: operation's output mode must
// match query output mode
m.outputMode match {
case InternalOutputModes.Update if outputMode != InternalOutputModes.Update =>
throwError(
"flatMapGroupsWithState in update mode is not supported with " +
s"$outputMode output mode on a streaming DataFrame/Dataset")
case InternalOutputModes.Append if outputMode != InternalOutputModes.Append =>
throwError(
"flatMapGroupsWithState in append mode is not supported with " +
s"$outputMode output mode on a streaming DataFrame/Dataset")
case _ =>
}
} else {
// flatMapGroupsWithState with aggregation: update operation mode not allowed, and
// *groupsWithState after aggregation not allowed
if (m.outputMode == InternalOutputModes.Update) {
throwError(
"flatMapGroupsWithState in update mode is not supported with " +
"aggregation on a streaming DataFrame/Dataset")
} else if (collectStreamingAggregates(m).nonEmpty) {
throwError(
"flatMapGroupsWithState in append mode is not supported after " +
s"aggregation on a streaming DataFrame/Dataset")
}
}
}
// Check compatibility with timeout configs
if (m.timeout == EventTimeTimeout) {
// With event time timeout, watermark must be defined.
val watermarkAttributes = m.child.output.collect {
case a: Attribute if a.metadata.contains(EventTimeWatermark.delayKey) => a
}
if (watermarkAttributes.isEmpty) {
throwError(
"Watermark must be specified in the query using " +
"'[Dataset/DataFrame].withWatermark()' for using event-time timeout in a " +
"[map|flatMap]GroupsWithState. Event-time timeout not supported without " +
"watermark.")(plan)
}
}
case d: Deduplicate if collectStreamingAggregates(d).nonEmpty =>
throwError("dropDuplicates is not supported after aggregation on a " +
"streaming DataFrame/Dataset")
case Join(left, right, joinType, condition) =>
joinType match {
case _: InnerLike =>
if (left.isStreaming && right.isStreaming &&
outputMode != InternalOutputModes.Append) {
throwError("Inner join between two streaming DataFrames/Datasets is not supported" +
s" in ${outputMode} output mode, only in Append output mode")
}
case FullOuter =>
if (left.isStreaming || right.isStreaming) {
throwError("Full outer joins with streaming DataFrames/Datasets are not supported")
}
case LeftSemi | LeftAnti =>
if (right.isStreaming) {
throwError("Left semi/anti joins with a streaming DataFrame/Dataset " +
"on the right are not supported")
}
// We support streaming left outer joins with static on the right always, and with
// stream on both sides under the appropriate conditions.
case LeftOuter =>
if (!left.isStreaming && right.isStreaming) {
throwError("Left outer join with a streaming DataFrame/Dataset " +
"on the right and a static DataFrame/Dataset on the left is not supported")
} else if (left.isStreaming && right.isStreaming) {
val watermarkInJoinKeys = StreamingJoinHelper.isWatermarkInJoinKeys(subPlan)
val hasValidWatermarkRange =
StreamingJoinHelper.getStateValueWatermark(
left.outputSet, right.outputSet, condition, Some(1000000)).isDefined
if (!watermarkInJoinKeys && !hasValidWatermarkRange) {
throwError("Stream-stream outer join between two streaming DataFrame/Datasets " +
"is not supported without a watermark in the join keys, or a watermark on " +
"the nullable side and an appropriate range condition")
}
}
// We support streaming right outer joins with static on the left always, and with
// stream on both sides under the appropriate conditions.
case RightOuter =>
if (left.isStreaming && !right.isStreaming) {
throwError("Right outer join with a streaming DataFrame/Dataset on the left and " +
"a static DataFrame/DataSet on the right not supported")
} else if (left.isStreaming && right.isStreaming) {
val isWatermarkInJoinKeys = StreamingJoinHelper.isWatermarkInJoinKeys(subPlan)
// Check if the nullable side has a watermark, and there's a range condition which
// implies a state value watermark on the first side.
val hasValidWatermarkRange =
StreamingJoinHelper.getStateValueWatermark(
right.outputSet, left.outputSet, condition, Some(1000000)).isDefined
if (!isWatermarkInJoinKeys && !hasValidWatermarkRange) {
throwError("Stream-stream outer join between two streaming DataFrame/Datasets " +
"is not supported without a watermark in the join keys, or a watermark on " +
"the nullable side and an appropriate range condition")
}
}
case NaturalJoin(_) | UsingJoin(_, _) =>
// They should not appear in an analyzed plan.
case _ =>
throwError(s"Join type $joinType is not supported with streaming DataFrame/Dataset")
}
case c: CoGroup if c.children.exists(_.isStreaming) =>
throwError("CoGrouping with a streaming DataFrame/Dataset is not supported")
case u: Union if u.children.map(_.isStreaming).distinct.size == 2 =>
throwError("Union between streaming and batch DataFrames/Datasets is not supported")
case Except(left, right) if right.isStreaming =>
throwError("Except on a streaming DataFrame/Dataset on the right is not supported")
case Intersect(left, right) if left.isStreaming && right.isStreaming =>
throwError("Intersect between two streaming DataFrames/Datasets is not supported")
case GroupingSets(_, _, child, _) if child.isStreaming =>
throwError("GroupingSets is not supported on streaming DataFrames/Datasets")
case GlobalLimit(_, _) | LocalLimit(_, _) if subPlan.children.forall(_.isStreaming) =>
throwError("Limits are not supported on streaming DataFrames/Datasets")
case Sort(_, _, _) if !containsCompleteData(subPlan) =>
throwError("Sorting is not supported on streaming DataFrames/Datasets, unless it is on " +
"aggregated DataFrame/Dataset in Complete output mode")
case Sample(_, _, _, _, child) if child.isStreaming =>
throwError("Sampling is not supported on streaming DataFrames/Datasets")
case Window(_, _, _, child) if child.isStreaming =>
throwError("Non-time-based windows are not supported on streaming DataFrames/Datasets")
case ReturnAnswer(child) if child.isStreaming =>
throwError("Cannot return immediate result on streaming DataFrames/Dataset. Queries " +
"with streaming DataFrames/Datasets must be executed with writeStream.start().")
case _ =>
}
// Check if there are unsupported expressions in streaming query plan.
checkUnsupportedExpressions(subPlan)
}
}
def checkForContinuous(plan: LogicalPlan, outputMode: OutputMode): Unit = {
checkForStreaming(plan, outputMode)
plan.foreachUp { implicit subPlan =>
subPlan match {
case (_: Project | _: Filter | _: MapElements | _: MapPartitions |
_: DeserializeToObject | _: SerializeFromObject | _: SubqueryAlias |
_: TypedFilter) =>
case node if node.nodeName == "StreamingRelationV2" =>
case node =>
throwError(s"Continuous processing does not support ${node.nodeName} operations.")
}
subPlan.expressions.foreach { e =>
if (e.collectLeaves().exists {
case (_: CurrentTimestamp | _: CurrentDate) => true
case _ => false
}) {
throwError(s"Continuous processing does not support current time operations.")
}
}
}
}
private def throwErrorIf(
condition: Boolean,
msg: String)(implicit operator: LogicalPlan): Unit = {
if (condition) {
throwError(msg)
}
}
private def throwError(msg: String)(implicit operator: LogicalPlan): Nothing = {
throw new AnalysisException(
msg, operator.origin.line, operator.origin.startPosition, Some(operator))
}
}
| szhem/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationChecker.scala | Scala | apache-2.0 | 16,995 |
package com.monsanto.arch.kamon.prometheus.converter
import java.util.concurrent.{ArrayBlockingQueue, ThreadPoolExecutor, TimeUnit}
import akka.kamon.instrumentation.AkkaDispatcherMetrics
import com.monsanto.arch.kamon.prometheus.KamonTestKit._
import com.monsanto.arch.kamon.prometheus.converter.SnapshotConverter.{KamonCategoryLabel, KamonNameLabel}
import com.monsanto.arch.kamon.prometheus.metric.PrometheusType.Counter
import com.monsanto.arch.kamon.prometheus.metric._
import com.monsanto.arch.kamon.prometheus.{PrometheusGen, PrometheusSettings}
import com.typesafe.config.ConfigFactory
import kamon.Kamon
import kamon.akka.{ActorMetrics, RouterMetrics}
import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot
import kamon.metric._
import kamon.metric.instrument.{InstrumentFactory, Memory, Time, UnitOfMeasurement}
import kamon.util.executors.{ForkJoinPoolMetrics, ThreadPoolExecutorMetrics}
import kamon.util.MilliTimestamp
import org.scalacheck.Gen
import org.scalatest.LoneElement._
import org.scalatest.Matchers._
import org.scalatest.prop.GeneratorDrivenPropertyChecks._
import org.scalatest.{Outcome, WordSpec}
import scala.concurrent.forkjoin.ForkJoinPool
/** Tests for the conversion of Kamon TickMetricSnapshot instances into our own MetricFamily instances. */
class SnapshotConverterSpec extends WordSpec {
def handle = afterWord("handle")
def _have = afterWord("have")
def are = afterWord("are")
def converter = new SnapshotConverter(new PrometheusSettings(ConfigFactory.defaultReference()))
override def withFixture(test: NoArgTest): Outcome = {
Kamon.start()
try super.withFixture(test)
finally clearEntities()
}
"a snapshot converter" should handle {
"empty ticks" in {
val tick = TickMetricSnapshot(start, end, Map.empty[Entity, EntitySnapshot])
val result = converter(tick)
result shouldBe Seq.empty[MetricFamily]
}
"counters" which _have {
"valid names and simple counts" in {
forAll(
PrometheusGen.metricName → "name",
PrometheusGen.count → "count"
) { (name: String, count: Int) ⇒
val entity = counter(name, count)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement shouldBe
MetricFamily(name, Counter, None, Seq(Metric(MetricValue.Counter(count), end,
Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Counter, KamonNameLabel → name))))
}
}
"names that need munging" in {
forAll(PrometheusGen.unicodeString → "name") { name ⇒
val entity = counter(name, 1)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe Mungers.asMetricName(name)
result.loneElement.metrics.loneElement.labels(KamonNameLabel) shouldBe name
}
}
"tags" in {
val name = "counter"
forAll(PrometheusGen.tags → "tags") { tags ⇒
val entity = counter(name, 2, tags)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.metrics.loneElement.labels shouldBe
tags + (KamonCategoryLabel → SingleInstrumentEntityRecorder.Counter, KamonNameLabel → name)
}
}
"tags that require munging" in {
val name = "counter"
val labelName = PrometheusGen.unicodeString.suchThat(str ⇒ Mungers.asLabelName(str) != str)
forAll(
labelName → "key",
PrometheusGen.unicodeString → "value"
) { (key: String, value: String) ⇒
val tags = Map(key → value)
val entity = counter(name, 1, tags)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.metrics.loneElement.labels shouldBe
Map(
Mungers.asLabelName(key) → value,
KamonCategoryLabel → SingleInstrumentEntityRecorder.Counter,
KamonNameLabel → name)
}
}
"multiple sets of tags" in {
val name = "counter"
val count1 = 1
val count2 = 2
forAll(PrometheusGen.tags → "tags1", PrometheusGen.tags → "tags2") { (tags1, tags2) ⇒
whenever(tags1 != tags2) {
val entity1 = counter(name, count1, tags1)
val entity2 = counter(name, count2, tags2)
val tick = snapshotOf(entity1, entity2)
val result = converter(tick)
val extraTags = Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Counter, KamonNameLabel → name)
result.loneElement.metrics shouldBe
Seq(
Metric(MetricValue.Counter(count1), end, tags1 ++ extraTags),
Metric(MetricValue.Counter(count2), end, tags2 ++ extraTags))
}
}
}
"units of measurement" which are {
import SnapshotConverterSpec.{Celsius, Joules}
"unknown" in {
val entity = counter("counter", 20, unitOfMeasurement = UnitOfMeasurement.Unknown)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter"
}
"nanoseconds" in {
val entity = counter("counter", 20, unitOfMeasurement = Time.Nanoseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_nanoseconds"
}
"microseconds" in {
val entity = counter("counter", 20, unitOfMeasurement = Time.Microseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_microseconds"
}
"milliseconds" in {
val entity = counter("counter", 20, unitOfMeasurement = Time.Milliseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_milliseconds"
}
"seconds" in {
val entity = counter("counter", 20, unitOfMeasurement = Time.Seconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_seconds"
}
"bytes" in {
val entity = counter("counter", 20, unitOfMeasurement = Memory.Bytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_bytes"
}
"kilobytes" in {
val entity = counter("counter", 20, unitOfMeasurement = Memory.KiloBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_kilobytes"
}
"megabytes" in {
val entity = counter("counter", 20, unitOfMeasurement = Memory.MegaBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_megabytes"
}
"gigabytes" in {
val entity = counter("counter", 20, unitOfMeasurement = Memory.GigaBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_gigabytes"
}
"hours (custom time type)" in {
val entity = counter("counter", 20, unitOfMeasurement = Time(3600, "h"))
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_h"
}
"terabytes (custom memory type)" in {
val entity = counter("counter", 20, unitOfMeasurement = Time(1024E4, "Tb"))
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_Tb"
}
"joules (custom type)" in {
val entity = counter("counter", 20, unitOfMeasurement = Joules)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter_J"
}
"celsius (mungeable custom type)" in {
val entity = counter("counter", 20, unitOfMeasurement = Celsius)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "counter__C"
}
}
}
"histograms" which _have {
"nothing special" in {
val name = "test_histogram"
val values = 1L.to(5)
val entity = histogram(name, values)
val tick = snapshotOf(entity)
val labels = Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Histogram, KamonNameLabel → name)
val metricValue = MetricValue.Histogram(
Seq(
MetricValue.Bucket(1, 1),
MetricValue.Bucket(2, 2),
MetricValue.Bucket(3, 3),
MetricValue.Bucket(4, 4),
MetricValue.Bucket(5, 5),
MetricValue.Bucket(Double.PositiveInfinity, 5)),
5,
15.0)
val result = converter(tick)
result.loneElement shouldBe
MetricFamily(name, PrometheusType.Histogram, None,
Seq(Metric(metricValue, end, labels)))
}
"valid names" in {
val value: Gen[Long] = PrometheusGen.chooseExponentially(1L, 1L, 3600000000000L)
/** Generates a list of values for a histogram. */
val values: Gen[Seq[Long]] = Gen.listOf(value)
forAll(
PrometheusGen.metricName → "name",
values → "values"
) { (name: String, values: Seq[Long]) ⇒
val entity = histogram(name, values)
val tick = snapshotOf(entity)
val expected = {
val value = MetricValue.Histogram(tick.metrics(entity).histogram("histogram").get)
val labels = Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Histogram, KamonNameLabel → name)
MetricFamily(name, PrometheusType.Histogram, None, Seq(Metric(value, end, labels)))
}
val result = converter(tick)
result.loneElement shouldBe expected
}
}
"names that require munging" in {
forAll(PrometheusGen.unicodeString → "name") { (name: String) ⇒
val entity = histogram(name, Seq.empty[Long])
val tick = snapshotOf(entity)
val result = converter(tick)
val mungedName = Mungers.asMetricName(name)
result.loneElement.name shouldBe mungedName
}
}
"non-help tags" in {
forAll(PrometheusGen.tags → "tags") { (tags: Map[String, String]) ⇒
val name = "tagged_histogram"
val entity = histogram(name, tags = tags)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.metrics.loneElement.labels shouldBe
(tags ++ Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Histogram, KamonNameLabel → name))
}
}
"non-help tags which require munging" in {
val labelName = PrometheusGen.unicodeString.suchThat(str ⇒ Mungers.asLabelName(str) != str)
forAll(
labelName → "label name",
PrometheusGen.unicodeString → "label value"
) { (key: String, value: String) ⇒
val name = "histogram"
val tags = Map(key → value)
val entity = histogram(name, tags = tags)
val tick = snapshotOf(entity)
val result = converter(tick)
val mungedTag = Map(Mungers.asLabelName(key) → value)
result.loneElement.metrics.loneElement.labels shouldBe
(mungedTag ++ Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Histogram, KamonNameLabel → name))
}
}
"multiple sets of tags" in {
type Tags = Map[String, String]
val name = "multi_tagged"
val extraLabels = Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Histogram, KamonNameLabel → name)
forAll(PrometheusGen.tags → "tags1", PrometheusGen.tags → "tags2") { (tags1: Tags, tags2: Tags) ⇒
whenever(tags1 != tags2) {
val entity1 = histogram(name, values = Seq(1L), tags = tags1)
val value1 = MetricValue.Histogram(
Seq(MetricValue.Bucket(1, 1), MetricValue.Bucket(Double.PositiveInfinity, 1)),
1,
1)
val entity2 = histogram(name, values = Seq(2L), tags = tags2)
val value2 = MetricValue.Histogram(
Seq(MetricValue.Bucket(2, 1), MetricValue.Bucket(Double.PositiveInfinity, 1)),
1,
2)
val tick = snapshotOf(entity1, entity2)
val result = converter(tick)
result.loneElement.metrics shouldBe
Seq(
Metric(value1, end, tags1 ++ extraLabels),
Metric(value2, end, tags2 ++ extraLabels))
}
}
}
"units of measurement" which are {
import SnapshotConverterSpec.{Celsius, Joules}
"unknown" in {
val entity = histogram("histogram", unitOfMeasurement = UnitOfMeasurement.Unknown)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram"
}
"nanoseconds" in {
val entity = histogram("histogram", unitOfMeasurement = Time.Nanoseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_nanoseconds"
}
"microseconds" in {
val entity = histogram("histogram", unitOfMeasurement = Time.Microseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_microseconds"
}
"milliseconds" in {
val entity = histogram("histogram", unitOfMeasurement = Time.Milliseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_milliseconds"
}
"seconds" in {
val entity = histogram("histogram", unitOfMeasurement = Time.Seconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_seconds"
}
"bytes" in {
val entity = histogram("histogram", unitOfMeasurement = Memory.Bytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_bytes"
}
"kilobytes" in {
val entity = histogram("histogram", unitOfMeasurement = Memory.KiloBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_kilobytes"
}
"megabytes" in {
val entity = histogram("histogram", unitOfMeasurement = Memory.MegaBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_megabytes"
}
"gigabytes" in {
val entity = histogram("histogram", unitOfMeasurement = Memory.GigaBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_gigabytes"
}
"hours (custom time type)" in {
val entity = histogram("histogram", unitOfMeasurement = Time(3600, "h"))
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_h"
}
"terabytes (custom memory type)" in {
val entity = histogram("histogram", unitOfMeasurement = Time(1024E4, "Tb"))
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_Tb"
}
"joules (custom type)" in {
val entity = histogram("histogram", unitOfMeasurement = Joules)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram_J"
}
"celsius (mungeable custom type)" in {
val entity = histogram("histogram", unitOfMeasurement = Celsius)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "histogram__C"
}
}
}
"min-max counters" which _have {
"nothing special" in {
val name = "test_min_max_counter"
val changes = Seq(5L, -1L, 2L, -3L, 6L, 20L, -10L)
val entity = minMaxCounter(name, changes)
val tick = snapshotOf(entity)
val value = MetricValue.Histogram(tick.metrics(entity).minMaxCounter("min-max-counter").get)
val labels = Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.MinMaxCounter, KamonNameLabel → name)
val expected = MetricFamily(name, PrometheusType.Histogram, None, Seq(Metric(value, end, labels)))
val result = converter(tick)
result.loneElement shouldBe expected
}
"arbitrary values" in {
val name = "arbitrary_values"
forAll(PrometheusGen.minMaxCounterChanges → "changes") { (changes: Seq[Long]) ⇒
val entity = minMaxCounter(name, changes)
val tick = snapshotOf(entity)
val value = MetricValue.Histogram(tick.metrics(entity).minMaxCounter("min-max-counter").get)
val result = converter(tick)
result.loneElement.metrics.loneElement.value shouldBe value
}
}
"valid names" in {
val changes = Seq.empty[Long]
forAll(PrometheusGen.metricName → "name") { (name: String) ⇒
val entity = minMaxCounter(name, changes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe name
result.loneElement.metrics.loneElement.labels(KamonNameLabel) shouldBe name
}
}
"names that require munging" in {
val changes = Seq.empty[Long]
forAll(PrometheusGen.unicodeString → "name") { (name: String) ⇒
val entity = minMaxCounter(name, changes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe Mungers.asMetricName(name)
result.loneElement.metrics.loneElement.labels(KamonNameLabel) shouldBe name
}
}
"non-help tags" in {
val name = "non_help_tags"
val changes = Seq.empty[Long]
forAll(PrometheusGen.tags → "tags") { tags ⇒
val entity = minMaxCounter(name, changes = changes, tags = tags)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.metrics.loneElement.labels shouldBe
(tags ++ Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.MinMaxCounter, KamonNameLabel → name))
}
}
"non-help tags which require munging" in {
val name = "mungeable_non_help_tags"
val changes = Seq.empty[Long]
val labelName = PrometheusGen.unicodeString.suchThat(str ⇒ Mungers.asLabelName(str) != str)
forAll(labelName → "label name", PrometheusGen.unicodeString → "label value") { (key, value) ⇒
val tags = Map(key → value)
val entity = minMaxCounter(name, tags = tags, changes = changes)
val tick = snapshotOf(entity)
val result = converter(tick)
val mungedTags = Map(Mungers.asLabelName(key) → value,
KamonCategoryLabel → SingleInstrumentEntityRecorder.MinMaxCounter,
KamonNameLabel → name)
result.loneElement.metrics.loneElement.labels shouldBe mungedTags
}
}
"multiple sets of tags" in {
val name = "multiple_sets_of_tags"
val commonLabels = Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.MinMaxCounter, KamonNameLabel → name)
val changes = Seq.empty[Long]
forAll(PrometheusGen.tags → "tags1", PrometheusGen.tags → "tags2") { (tags1, tags2) ⇒
whenever(tags1 != tags2 && (tags1 ++ tags2).keys.forall(Metric.isValidLabelName)) {
val entity1 = minMaxCounter(name, tags = tags1, changes = changes)
val entity2 = minMaxCounter(name, tags = tags2, changes = changes)
val tick = snapshotOf(entity1, entity2)
val result = converter(tick)
result.loneElement.metrics.map(_.labels) shouldBe Seq(tags1 ++ commonLabels, tags2 ++ commonLabels)
}
}
}
"units of measurement" which are {
import SnapshotConverterSpec.{Celsius, Joules}
"unknown" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = UnitOfMeasurement.Unknown)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter"
}
"nanoseconds" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Time.Nanoseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_nanoseconds"
}
"microseconds" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Time.Microseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_microseconds"
}
"milliseconds" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Time.Milliseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_milliseconds"
}
"seconds" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Time.Seconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_seconds"
}
"bytes" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Memory.Bytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_bytes"
}
"kilobytes" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Memory.KiloBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_kilobytes"
}
"megabytes" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Memory.MegaBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_megabytes"
}
"gigabytes" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Memory.GigaBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_gigabytes"
}
"hours (custom time type)" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Time(3600, "h"))
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_h"
}
"terabytes (custom memory type)" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Time(1024E4, "Tb"))
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_Tb"
}
"joules (custom type)" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Joules)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter_J"
}
"celsius (mungeable custom type)" in {
val entity = minMaxCounter("minMaxCounter", unitOfMeasurement = Celsius)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "minMaxCounter__C"
}
}
}
"gauges" which _have {
"nothing special" in {
val name = "gauge"
val values = Seq(1L, 3L, 6L, 10L)
val entity = gauge(name, values)
val labels = Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Gauge, KamonNameLabel → name)
val tick = snapshotOf(entity)
val result = converter(tick)
val value = MetricValue.Histogram(
Seq(
MetricValue.Bucket(1, 1),
MetricValue.Bucket(3, 2),
MetricValue.Bucket(6, 3),
MetricValue.Bucket(10, 4),
MetricValue.Bucket(Double.PositiveInfinity, 4) ),
4,
20)
result.loneElement shouldBe
MetricFamily(name, PrometheusType.Histogram, None, Seq(Metric(value, end, labels)))
}
"arbitrary values" in {
val name = "arbitrary_values"
forAll(PrometheusGen.instrumentLevels → "readings") { readings ⇒
val entity = gauge(name, readings)
val tick = snapshotOf(entity)
val value = MetricValue.Histogram(tick.metrics(entity).gauge("gauge").get)
val result = converter(tick)
result.loneElement.metrics.loneElement.value shouldBe value
}
}
"valid names" in {
val readings = Seq.empty[Long]
forAll(PrometheusGen.metricName → "name") { name ⇒
val entity = gauge(name, readings)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe name
result.loneElement.metrics.loneElement.labels(KamonNameLabel) shouldBe name
}
}
"names that require munging" in {
val readings = Seq.empty[Long]
forAll(PrometheusGen.unicodeString → "name") { name ⇒
val entity = gauge(name, readings)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe Mungers.asMetricName(name)
result.loneElement.metrics.loneElement.labels(KamonNameLabel) shouldBe name
}
}
"non-help tags" in {
val name = "gauge"
val readings = Seq.empty[Long]
forAll(PrometheusGen.tags → "tags") { tags ⇒
val entity = gauge(name, tags = tags, readings = readings)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.metrics.loneElement.labels shouldBe
(tags ++ Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Gauge, KamonNameLabel → name))
}
}
"non-help tags which require munging" in {
val name = "gauge"
val readings = Seq.empty[Long]
val labelName = PrometheusGen.unicodeString.suchThat(str ⇒ Mungers.asLabelName(str) != str)
forAll(labelName → "label name", PrometheusGen.unicodeString → "label value") { (key, value) ⇒
val tags = Map(key → value)
val entity = gauge(name, tags = tags, readings = readings)
val tick = snapshotOf(entity)
val result = converter(tick)
val mungedTags = Map(Mungers.asLabelName(key) → value,
KamonCategoryLabel → SingleInstrumentEntityRecorder.Gauge,
KamonNameLabel → name)
result.loneElement.metrics.loneElement.labels shouldBe mungedTags
}
}
"multiple sets of tags" in {
val name = "gauge"
val commonLabels = Map(KamonCategoryLabel → SingleInstrumentEntityRecorder.Gauge, KamonNameLabel → name)
val readings = Seq.empty[Long]
forAll(PrometheusGen.tags → "tags1", PrometheusGen.tags → "tags2") { (tags1, tags2) ⇒
whenever(tags1 != tags2 && (tags1 ++ tags2).keys.forall(Metric.isValidLabelName)) {
val entity1 = gauge(name, tags = tags1, readings = readings)
val entity2 = gauge(name, tags = tags2, readings = readings)
val tick = snapshotOf(entity1, entity2)
val result = converter(tick)
result.loneElement.metrics.map(_.labels) shouldBe Seq(tags1 ++ commonLabels, tags2 ++ commonLabels)
}
}
}
"units of measurement" which are {
import SnapshotConverterSpec.{Celsius, Joules}
"unknown" in {
val entity = gauge("gauge", unitOfMeasurement = UnitOfMeasurement.Unknown)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge"
}
"nanoseconds" in {
val entity = gauge("gauge", unitOfMeasurement = Time.Nanoseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_nanoseconds"
}
"microseconds" in {
val entity = gauge("gauge", unitOfMeasurement = Time.Microseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_microseconds"
}
"milliseconds" in {
val entity = gauge("gauge", unitOfMeasurement = Time.Milliseconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_milliseconds"
}
"seconds" in {
val entity = gauge("gauge", unitOfMeasurement = Time.Seconds)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_seconds"
}
"bytes" in {
val entity = gauge("gauge", unitOfMeasurement = Memory.Bytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_bytes"
}
"kilobytes" in {
val entity = gauge("gauge", unitOfMeasurement = Memory.KiloBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_kilobytes"
}
"megabytes" in {
val entity = gauge("gauge", unitOfMeasurement = Memory.MegaBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_megabytes"
}
"gigabytes" in {
val entity = gauge("gauge", unitOfMeasurement = Memory.GigaBytes)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_gigabytes"
}
"hours (custom time type)" in {
val entity = gauge("gauge", unitOfMeasurement = Time(3600, "h"))
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_h"
}
"terabytes (custom memory type)" in {
val entity = gauge("gauge", unitOfMeasurement = Time(1024E4, "Tb"))
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_Tb"
}
"joules (custom type)" in {
val entity = gauge("gauge", unitOfMeasurement = Joules)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge_J"
}
"celsius (mungeable custom type)" in {
val entity = gauge("gauge", unitOfMeasurement = Celsius)
val tick = snapshotOf(entity)
val result = converter(tick)
result.loneElement.name shouldBe "gauge__C"
}
}
}
"empty custom metrics" in {
val name = "test"
class EmptyMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory)
object EmptyMetrics extends EntityRecorderFactory[EmptyMetrics] {
override val category: String = "empty"
override def createRecorder(instrumentFactory: InstrumentFactory): EmptyMetrics = new EmptyMetrics(instrumentFactory)
}
Kamon.metrics.entity[EmptyMetrics](EmptyMetrics, name)
val entity = Entity(name, EmptyMetrics.category, Map.empty)
val tick = snapshotOf(entity)
val result = converter(tick)
result shouldBe empty
}
"dual counter custom metrics" which _have {
def dualCounter(name: String, count1: Long, count2: Long, tags: Map[String,String] = Map.empty): Entity = {
val dualCounter = Kamon.metrics.entity(DualCounter, name, tags)
dualCounter.count1.increment(count1)
dualCounter.count2.increment(count2)
Entity(name, DualCounter.category, tags)
}
"nothing special" in {
val name = "test"
val entity = dualCounter(name, 42, 1)
val tick = snapshotOf(entity)
val labels = Map(KamonCategoryLabel → DualCounter.category, KamonNameLabel → name)
val result = converter(tick)
result shouldBe
Seq(
MetricFamily("dual_counter_count_1", PrometheusType.Counter, None,
Seq(Metric(MetricValue.Counter(42), end, labels))),
MetricFamily("dual_counter_count_2", PrometheusType.Counter, None,
Seq(Metric(MetricValue.Counter(1), end, labels))))
}
"arbitrary values" in {
val name = "test"
val labels = Map(KamonCategoryLabel → DualCounter.category, KamonNameLabel → name)
forAll(PrometheusGen.count → "count1", PrometheusGen.count → "count2") { (count1, count2) ⇒
val entity = dualCounter(name, count1, count2)
val tick = snapshotOf(entity)
val result = converter(tick)
result shouldBe
Seq(
MetricFamily("dual_counter_count_1", PrometheusType.Counter, None,
Seq(Metric(MetricValue.Counter(count1), end, labels))),
MetricFamily("dual_counter_count_2", PrometheusType.Counter, None,
Seq(Metric(MetricValue.Counter(count2), end, labels))))
}
}
"non-help tags" in {
val name = "dual_counter"
val commonLabels = Map(KamonCategoryLabel → DualCounter.category, KamonNameLabel → name)
forAll(PrometheusGen.tags → "tags") { tags ⇒
val entity = dualCounter(name, 1, 2, tags)
val tick = snapshotOf(entity)
val result = converter(tick)
result should have size 2
all(result.map(_.metrics.loneElement.labels)) shouldBe (tags ++ commonLabels)
}
}
"non-help tags which require munging" in {
val name = "dual_counter"
val labelName = PrometheusGen.unicodeString.suchThat(str ⇒ Mungers.asLabelName(str) != str)
forAll(labelName → "label name", PrometheusGen.unicodeString → "label value") { (key, value) ⇒
whenever(Mungers.asLabelName(key) != key) {
val tags = Map(key → value)
val entity = dualCounter(name, 1, 2, tags)
val tick = snapshotOf(entity)
val result = converter(tick)
val mungedTags = Map(Mungers.asLabelName(key) → value,
KamonCategoryLabel → DualCounter.category,
KamonNameLabel → name)
result should have size 2
all(result.map(_.metrics.loneElement.labels)) shouldBe mungedTags
}
}
}
"multiple sets of tags" in {
val name = "dual_counter"
val commonLabels = Map(KamonCategoryLabel → DualCounter.category, KamonNameLabel → name)
forAll(PrometheusGen.tags → "tags1", PrometheusGen.tags → "tags2") { (tags1, tags2) ⇒
whenever(tags1 != tags2 && (tags1 ++ tags2).keys.forall(Metric.isValidLabelName)) {
val entity1 = dualCounter(name, 1, 2, tags1)
val entity2 = dualCounter(name, 3, 4, tags2)
val tick = snapshotOf(entity1, entity2)
val result = converter(tick)
result should have size 2
all(result.map(_.metrics.map(_.labels))) should
contain theSameElementsAs Seq(tags1 ++ commonLabels, tags2 ++ commonLabels)
}
}
}
}
"smorgasbord metrics" which _have {
def smorgasboard(name: String, count: Long, values: Seq[Long], changes: Seq[Long], readings: Seq[Long],
tags: Map[String,String] = Map.empty): Entity = {
val s = Kamon.metrics.entity(Smorgasbord, name, tags)
s.aCounter.increment(count)
values.foreach(s.aHistogram.record)
changes.grouped(5).foreach { changesChunk ⇒
changesChunk.foreach(s.aMinMaxCounter.increment)
s.aMinMaxCounter.refreshValues()
}
s.setGaugeReadings(readings)
readings.foreach(_ ⇒ s.aGage.refreshValue())
Entity(name, Smorgasbord.category, tags)
}
"nothing special" in {
val name = "test_smorgasbord"
val count = 3L
val values = Seq(1L, 2L, 3L, 5L, 8L)
val changes = Seq(1L, 2L, -1L)
val readings = Seq(10L, 15L, 20L)
val labels = Map(KamonCategoryLabel → Smorgasbord.category, KamonNameLabel → name)
val entity = smorgasboard(name, count, values, changes, readings)
val tick = snapshotOf(entity)
val result = converter(tick)
val histogramBuckets = Seq(
MetricValue.Bucket(1, 1),
MetricValue.Bucket(2, 2),
MetricValue.Bucket(3, 3),
MetricValue.Bucket(5, 4),
MetricValue.Bucket(8, 5),
MetricValue.Bucket(Double.PositiveInfinity, 5)
)
val minMaxCounterBuckets = Seq(
MetricValue.Bucket(0, 1),
MetricValue.Bucket(2, 5),
MetricValue.Bucket(3, 6),
MetricValue.Bucket(Double.PositiveInfinity, 6)
)
val gaugeBuckets = Seq(
MetricValue.Bucket(10, 1),
MetricValue.Bucket(15, 2),
MetricValue.Bucket(20, 3),
MetricValue.Bucket(Double.PositiveInfinity, 3)
)
result should contain theSameElementsAs
Seq(
MetricFamily("sm_rg_sbord_a_counter", PrometheusType.Counter, None,
Seq(Metric(MetricValue.Counter(count), end, labels))),
MetricFamily("sm_rg_sbord_a_histogram_milliseconds", PrometheusType.Histogram, None,
Seq(Metric(MetricValue.Histogram(histogramBuckets, 5, 19), end, labels))),
MetricFamily("sm_rg_sbord_a_min_max_counter_kilobytes", PrometheusType.Histogram, None,
Seq(Metric(MetricValue.Histogram(minMaxCounterBuckets, 6, 11), end, labels))),
MetricFamily("sm_rg_sbord_a_gauge", PrometheusType.Histogram, None,
Seq(Metric(MetricValue.Histogram(gaugeBuckets, 3, 45), end, labels)))
)
}
"arbitrary values" in {
val name = "test"
val labels = Map(KamonCategoryLabel → Smorgasbord.category, KamonNameLabel → name)
forAll(
PrometheusGen.count → "counter value",
PrometheusGen.instrumentLevels → "histogram values",
PrometheusGen.minMaxCounterChanges → "min-max counter changes",
PrometheusGen.instrumentLevels → "gauge readings"
) { (count, values, changes, readings) ⇒
val entity = smorgasboard(name, count, values, changes, readings)
val tick = snapshotOf(entity)
val result = converter(tick)
assert(tick.metrics(entity).histogram("a-histogram").isDefined)
val histogramValue = MetricValue.Histogram(tick.metrics(entity).histogram("a-histogram").get)
assert(tick.metrics(entity).minMaxCounter("a-min-max-counter").isDefined)
val minMaxCounterValue = MetricValue.Histogram(tick.metrics(entity).minMaxCounter("a-min-max-counter").get)
assert(tick.metrics(entity).gauge("a-gauge").isDefined)
val gaugeValue = MetricValue.Histogram(tick.metrics(entity).gauge("a-gauge").get)
result should contain theSameElementsAs
Seq(
MetricFamily("sm_rg_sbord_a_counter", PrometheusType.Counter, None,
Seq(Metric(MetricValue.Counter(count), end, labels))),
MetricFamily("sm_rg_sbord_a_histogram_milliseconds", PrometheusType.Histogram, None,
Seq(Metric(histogramValue, end, labels))),
MetricFamily("sm_rg_sbord_a_min_max_counter_kilobytes", PrometheusType.Histogram, None,
Seq(Metric(minMaxCounterValue, end, labels))),
MetricFamily("sm_rg_sbord_a_gauge", PrometheusType.Histogram, None,
Seq(Metric(gaugeValue, end, labels)))
)
}
}
"non-help tags" in {
val name = "test"
val commonLabels = Map(KamonCategoryLabel → Smorgasbord.category, KamonNameLabel → name)
forAll(PrometheusGen.tags → "tags") { tags ⇒
val entity = smorgasboard(name, 1, Seq.empty, Seq.empty, Seq.empty, tags)
val tick = snapshotOf(entity)
val result = converter(tick)
result should have size 4
all(result.map(_.metrics.loneElement.labels)) shouldBe (tags ++ commonLabels)
}
}
"non-help tags which require munging" in {
val name = "test"
val commonLabels = Map(KamonCategoryLabel → Smorgasbord.category, KamonNameLabel → name)
val labelName = PrometheusGen.unicodeString.suchThat(str ⇒ Mungers.asLabelName(str) != str)
forAll(labelName → "label name", PrometheusGen.unicodeString → "label value") { (key, value) ⇒
whenever(Mungers.asLabelName(key) != key) {
val tags = Map(key → value)
val entity = smorgasboard(name, 1, Seq.empty, Seq.empty, Seq.empty, tags)
val tick = snapshotOf(entity)
val result = converter(tick)
val mungedTags = Map(Mungers.asLabelName(key) → value) ++ commonLabels
result should have size 4
all(result.map(_.metrics.loneElement.labels)) shouldBe mungedTags
}
}
}
"multiple sets of tags" in {
val name = "test"
val commonLabels = Map(KamonCategoryLabel → Smorgasbord.category, KamonNameLabel → name)
forAll(PrometheusGen.tags → "tags1", PrometheusGen.tags → "tags2") { (tags1, tags2) ⇒
whenever(tags1 != tags2 && (tags1 ++ tags2).keys.forall(Metric.isValidLabelName)) {
val entity1 = smorgasboard(name, 1, Seq(1L), Seq(1L), Seq(1L), tags1)
val entity2 = smorgasboard(name, 2, Seq(2L), Seq(2L), Seq(2L), tags2)
val tick = snapshotOf(entity1, entity2)
val result = converter(tick)
result should have size 4
all(result.map(_.metrics.map(_.labels))) should
contain theSameElementsAs Seq(tags1 ++ commonLabels, tags2 ++ commonLabels)
}
}
}
}
"Akka actor metrics" which _have {
def akkaActor(actorName: String, tags: Map[String,String] = Map.empty, timeInMailboxValues: Seq[Long] = Seq.empty,
processingTimeValues: Seq[Long] = Seq.empty, mailboxSizeChanges: Seq[Long] = Seq.empty,
errorsCount: Long = 0): Entity = {
val m = Kamon.metrics.entity(ActorMetrics, actorName, tags)
timeInMailboxValues.foreach(m.timeInMailbox.record)
processingTimeValues.foreach(m.processingTime.record)
m.errors.increment(errorsCount)
mailboxSizeChanges.grouped(5).foreach { changesChunk ⇒
changesChunk.foreach(m.mailboxSize.increment)
m.mailboxSize.refreshValues()
}
Entity(actorName, ActorMetrics.category, tags)
}
"a single actor" in {
val actorName = "actor-system/user/an-actor"
val labels = Map(KamonCategoryLabel → "akka-actor", KamonNameLabel → actorName, "actor_name" → actorName)
forAll(
PrometheusGen.instrumentLevels → "time in mailbox values",
PrometheusGen.instrumentLevels → "processing time values",
PrometheusGen.minMaxCounterChanges → "mailbox size changes",
PrometheusGen.count → "errors count"
) { (timeInMailboxValues, processingTimeValues, mailboxSizeChanges, errorsCount) ⇒
import DefaultPostprocessor._
val entity = akkaActor(actorName,
timeInMailboxValues = timeInMailboxValues,
processingTimeValues = processingTimeValues,
mailboxSizeChanges = mailboxSizeChanges,
errorsCount = errorsCount)
val tick = snapshotOf(entity)
val result = converter(tick)
val timeInMailboxValue = MetricValue.Histogram(tick.metrics(entity).histogram("time-in-mailbox").get)
val processingTimeValue = MetricValue.Histogram(tick.metrics(entity).histogram("processing-time").get)
val mailboxSizeValue = MetricValue.Histogram(tick.metrics(entity).minMaxCounter("mailbox-size").get)
val errorsValue = MetricValue.Counter(tick.metrics(entity).counter("errors").get.count)
result should contain theSameElementsAs Seq(
MetricFamily("akka_actor_time_in_mailbox_nanoseconds", PrometheusType.Histogram,
Some(AkkaActorTimeInMailboxHelp), Seq(Metric(timeInMailboxValue, end, labels))),
MetricFamily("akka_actor_processing_time_nanoseconds", PrometheusType.Histogram,
Some(AkkaActorProcessingTimeHelp), Seq(Metric(processingTimeValue, end, labels))),
MetricFamily("akka_actor_mailbox_size", PrometheusType.Histogram, Some(AkkaActorMailboxSizeHelp),
Seq(Metric(mailboxSizeValue, end, labels))),
MetricFamily("akka_actor_errors", PrometheusType.Counter, Some(AkkaActorErrorsHelp),
Seq(Metric(errorsValue, end, labels)))
)
}
}
"multiple actors" in {
import MetricValue.{Bucket ⇒ B}
val actorName1 = "actor-system/user/actor-1"
val actorName2 = "actor-system/user/actor-2"
val labels1 = Map(KamonCategoryLabel → "akka-actor", KamonNameLabel → actorName1, "actor_name" → actorName1)
val labels2 = Map(KamonCategoryLabel → "akka-actor", KamonNameLabel → actorName2, "actor_name" → actorName2)
val histogram1 = MetricValue.Histogram(Seq(B(1, 1), B(Double.PositiveInfinity, 1)), 1, 1)
val histogram2 = MetricValue.Histogram(Seq(B(2, 1), B(Double.PositiveInfinity, 1)), 1, 2)
val minMaxCount1 = MetricValue.Histogram(Seq(B(0, 1), B(1, 6), B(Double.PositiveInfinity, 6)), 6, 5)
val minMaxCount2 = MetricValue.Histogram(Seq(B(0, 1), B(2, 6), B(Double.PositiveInfinity, 6)), 6, 10)
val entity1 = akkaActor(actorName1,
timeInMailboxValues = Seq(1L),
processingTimeValues = Seq(1L),
mailboxSizeChanges = Seq(1L),
errorsCount = 1)
val entity2 = akkaActor(actorName2,
timeInMailboxValues = Seq(2L),
processingTimeValues = Seq(2L),
mailboxSizeChanges = Seq(2L),
errorsCount = 2)
val tick = snapshotOf(entity1, entity2)
val result = converter(tick)
val timeInMailboxMetricFamily = result.find(_.name == "akka_actor_time_in_mailbox_nanoseconds")
timeInMailboxMetricFamily shouldBe defined
timeInMailboxMetricFamily.get.metrics should contain theSameElementsAs Seq(
Metric(histogram1, end, labels1),
Metric(histogram2, end, labels2))
val processingTimeMetricFamily = result.find(_.name == "akka_actor_processing_time_nanoseconds")
processingTimeMetricFamily shouldBe defined
processingTimeMetricFamily.get.metrics should contain theSameElementsAs Seq(
Metric(histogram1, end, labels1),
Metric(histogram2, end, labels2))
val mailboxSizeMetricFamily = result.find(_.name == "akka_actor_mailbox_size")
mailboxSizeMetricFamily shouldBe defined
mailboxSizeMetricFamily.get.metrics should contain theSameElementsAs Seq(
Metric(minMaxCount1, end, labels1),
Metric(minMaxCount2, end, labels2))
val errorsMetricFamily = result.find(_.name == "akka_actor_errors")
errorsMetricFamily shouldBe defined
errorsMetricFamily.get.metrics should contain theSameElementsAs Seq(
Metric(MetricValue.Counter(1), end, labels1),
Metric(MetricValue.Counter(2), end, labels2))
}
}
"Akka dispatcher metrics" which _have {
import DefaultPostprocessor._
"a fork join pool" in {
val dispatcherName = "test-system/fork-join-pool"
val forkJoinPool = new ForkJoinPool()
val tags: Map[String, String] = Map("dispatcher-type" → "fork-join-pool")
Kamon.metrics.entity(ForkJoinPoolMetrics.factory(forkJoinPool, AkkaDispatcherMetrics.Category),
dispatcherName, tags)
val entity = Entity(dispatcherName, "akka-dispatcher", tags)
val tick = snapshotOf(entity)
val result = converter(tick)
val labels = Map(
"dispatcher_type" → "fork-join-pool",
KamonCategoryLabel → "akka-dispatcher",
KamonNameLabel → dispatcherName,
"dispatcher_name" → dispatcherName)
val resultMap = result.map(mf => (mf.name, mf)).toMap
val submissionTimestamp = try {
resultMap("akka_fork_join_pool_dispatcher_queued_submission_count").metrics(0).timestamp
} catch {
case t: Throwable => MilliTimestamp(-1)
}
result should contain theSameElementsAs
Seq(
MetricFamily(
"akka_fork_join_pool_dispatcher_parallelism",
PrometheusType.Histogram,
Some(AkkaForkJoinPoolDispatcherParallelismHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).minMaxCounter("parallelism").get), end, labels))),
MetricFamily(
"akka_fork_join_pool_dispatcher_pool_size",
PrometheusType.Histogram,
Some(AkkaForkJoinPoolDispatcherPoolSizeHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("pool-size").get), end, labels))),
MetricFamily(
"akka_fork_join_pool_dispatcher_active_threads",
PrometheusType.Histogram,
Some(AkkaForkJoinPoolDispatcherActiveThreadsHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("active-threads").get), end, labels))),
MetricFamily(
"akka_fork_join_pool_dispatcher_running_threads",
PrometheusType.Histogram,
Some(AkkaForkJoinPoolDispatcherRunningThreadsHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("running-threads").get), end, labels))),
MetricFamily(
"akka_fork_join_pool_dispatcher_queued_task_count",
PrometheusType.Histogram,
Some(AkkaForkJoinPoolDispatcherQueuedTaskCountHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("queued-task-count").get), end, labels))),
MetricFamily(
"akka_fork_join_pool_dispatcher_queued_submission_count",
PrometheusType.Histogram,
None,
List(Metric(MetricValue.Histogram(List(MetricValue.Bucket(Double.PositiveInfinity, 0)), 0 , 0.0),
submissionTimestamp, labels)))
)
}
"a thread pool executor" in {
val dispatcherName = "test-system/thread-pool-executor"
val threadPoolExecutor = new ThreadPoolExecutor(2, 4, 10, TimeUnit.SECONDS,
new ArrayBlockingQueue[Runnable](10))
val tags: Map[String, String] = Map("dispatcher-type" → "thread-pool-executor")
Kamon.metrics.entity(ThreadPoolExecutorMetrics.factory(threadPoolExecutor, AkkaDispatcherMetrics.Category),
dispatcherName, tags)
val entity = Entity(dispatcherName, "akka-dispatcher", tags)
val tick = snapshotOf(entity)
val result = converter(tick)
val labels = Map(
"dispatcher_type" → "thread-pool-executor",
KamonCategoryLabel → "akka-dispatcher",
KamonNameLabel → dispatcherName,
"dispatcher_name" → dispatcherName)
result should contain theSameElementsAs
Seq(
MetricFamily(
"akka_thread_pool_executor_dispatcher_core_pool_size",
PrometheusType.Histogram,
Some(AkkaThreadPoolExecutorDispatcherCorePoolSizeHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("core-pool-size").get), end, labels))),
MetricFamily(
"akka_thread_pool_executor_dispatcher_max_pool_size",
PrometheusType.Histogram,
Some(AkkaThreadPoolExecutorDispatcherMaxPoolSizeHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("max-pool-size").get), end, labels))),
MetricFamily(
"akka_thread_pool_executor_dispatcher_pool_size",
PrometheusType.Histogram,
Some(AkkaThreadPoolExecutorDispatcherPoolSizeHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("pool-size").get), end, labels))),
MetricFamily(
"akka_thread_pool_executor_dispatcher_active_threads",
PrometheusType.Histogram,
Some(AkkaThreadPoolExecutorDispatcherActiveThreadsHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("active-threads").get), end, labels))),
MetricFamily(
"akka_thread_pool_executor_dispatcher_processed_tasks",
PrometheusType.Histogram,
Some(AkkaThreadPoolExecutorDispatcherProcessedTasksHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).gauge("processed-tasks").get), end, labels))))
}
}
"Akka router metrics" in {
val routerName = "test/user/my-router"
forAll(
PrometheusGen.instrumentLevels → "routing times",
PrometheusGen.instrumentLevels → "times in mailbox",
PrometheusGen.instrumentLevels → "processing times",
PrometheusGen.count → "errors count"
) { (routingTimes, timesInMailbox, processingTimes, errorsCount) ⇒
import DefaultPostprocessor._
val routerMetrics = Kamon.metrics.entity(RouterMetrics, routerName)
routingTimes.foreach(routerMetrics.routingTime.record)
timesInMailbox.foreach(routerMetrics.timeInMailbox.record)
processingTimes.foreach(routerMetrics.processingTime.record)
routerMetrics.errors.increment(errorsCount)
val entity = Entity(routerName, RouterMetrics.category)
val tick = snapshotOf(entity)
val result = converter(tick)
val labels = Map(
KamonCategoryLabel → "akka-router",
KamonNameLabel → routerName,
"router_name" → routerName)
result should contain theSameElementsAs
Seq(
MetricFamily(
"akka_router_routing_time_nanoseconds",
PrometheusType.Histogram,
Some(AkkaRouterRoutingTimeHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).histogram("routing-time").get), end, labels))),
MetricFamily(
"akka_router_time_in_mailbox_nanoseconds",
PrometheusType.Histogram,
Some(AkkaRouterTimeInMailboxHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).histogram("time-in-mailbox").get), end, labels))),
MetricFamily(
"akka_router_processing_time_nanoseconds",
PrometheusType.Histogram,
Some(AkkaRouterProcessingTimeHelp),
Seq(Metric(MetricValue.Histogram(tick.metrics(entity).histogram("processing-time").get), end, labels))),
MetricFamily(
"akka_router_errors",
PrometheusType.Counter,
Some(AkkaRouterErrorsHelp),
Seq(Metric(MetricValue.Counter(tick.metrics(entity).counter("errors").get.count), end, labels))))
}
}
"its settings" when {
"additional labels are specified" in {
val name = "counter"
val count = 42
val entity = counter(name, count)
val tick = snapshotOf(entity)
val config = ConfigFactory.parseString("kamon.prometheus.labels.foo=\\"bar\\"")
.withFallback(ConfigFactory.load())
val converter = new SnapshotConverter(new PrometheusSettings(config))
val result = converter(tick)
result.loneElement shouldBe
MetricFamily(name, PrometheusType.Counter, None,
Seq(Metric(MetricValue.Counter(count), end,
Map("foo" → "bar",
KamonCategoryLabel → SingleInstrumentEntityRecorder.Counter,
KamonNameLabel → name))))
}
"additional labels that require munging are specified" in {
val name = "counter"
val count = 42
val entity = counter(name, count)
val tick = snapshotOf(entity)
val config = ConfigFactory.parseString("kamon.prometheus.labels.needs-munging=\\"bar\\"")
.withFallback(ConfigFactory.load())
val converter = new SnapshotConverter(new PrometheusSettings(config))
val result = converter(tick)
result.loneElement shouldBe
MetricFamily(name, PrometheusType.Counter, None,
Seq(Metric(MetricValue.Counter(count), end,
Map("needs_munging" → "bar",
KamonCategoryLabel → SingleInstrumentEntityRecorder.Counter,
KamonNameLabel → name))))
}
}
}
}
object SnapshotConverterSpec {
case object Joules extends UnitOfMeasurement {
override type U = Joules.type
override val name = "energy"
override val label = "J"
override protected def canScale(toUnit: UnitOfMeasurement): Boolean = toUnit.isInstanceOf[Joules.type]
}
case object Celsius extends UnitOfMeasurement {
override type U = this.type
override val name = "temperature"
override val label = "°C"
override protected def canScale(toUnit: UnitOfMeasurement): Boolean = toUnit.isInstanceOf[Celsius.type]
}
}
| MonsantoCo/kamon-prometheus | library/src/test/scala/com/monsanto/arch/kamon/prometheus/converter/SnapshotConverterSpec.scala | Scala | bsd-3-clause | 58,786 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import scala.collection.JavaConverters._
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
private[spark] object SparkCuratorUtil extends Logging {
private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
private val ZK_SESSION_TIMEOUT_MILLIS = 60000
private val RETRY_WAIT_MILLIS = 5000
private val MAX_RECONNECT_ATTEMPTS = 3
def newClient(
conf: SparkConf,
zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = {
val ZK_URL = conf.get(zkUrlConf)
val zk = CuratorFrameworkFactory.newClient(ZK_URL,
ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
zk.start()
zk
}
def mkdir(zk: CuratorFramework, path: String) {
if (zk.checkExists().forPath(path) == null) {
try {
zk.create().creatingParentsIfNeeded().forPath(path)
} catch {
case nodeExist: KeeperException.NodeExistsException =>
// do nothing, ignore node existing exception.
case e: Exception => throw e
}
}
}
def deleteRecursive(zk: CuratorFramework, path: String) {
if (zk.checkExists().forPath(path) != null) {
for (child <- zk.getChildren.forPath(path).asScala) {
zk.delete().forPath(path + "/" + child)
}
zk.delete().forPath(path)
}
}
}
| sh-cho/cshSpark | deploy/SparkCuratorUtil.scala | Scala | apache-2.0 | 2,384 |
implicit object MonoidProduct extends Monoid[Product] {
override def mempty = Product(1)
override def mappend(a: Product, b: Product) = Product(a.value * b.value)
}
| grzegorzbalcerek/scala-exercises | Monoid/stepMonoidProduct.scala | Scala | bsd-2-clause | 169 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.logical
import org.apache.flink.table.expressions.PlannerExpression
/**
* A class implementing this interface can resolve the expressions of its parameters and
* return a new instance with resolved parameters. This is necessary if expression are nested in
* a not supported structure. By default, the validation of a logical node can resolve common
* structures like `Expression`, `Option[Expression]`, `Traversable[Expression]`.
*
* See also [[LogicalNode.expressionPostOrderTransform(scala.PartialFunction)]].
*
* @tparam T class which expression parameters need to be resolved
*/
trait Resolvable[T <: AnyRef] {
/**
* An implementing class can resolve its expressions by applying the given resolver
* function on its parameters.
*
* @param resolver function that can resolve an expression
* @return class with resolved expression parameters
*/
def resolveExpressions(resolver: (PlannerExpression) => PlannerExpression): T
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/logical/Resolvable.scala | Scala | apache-2.0 | 1,819 |
package com.landoop.streamreactor.hive.it
import org.apache.kafka.clients.producer.ProducerRecord
import scala.concurrent.Future
import scala.io.Source
import scala.util.Try
object HiveOrcBenchmark extends App with PersonTestData with HiveTests {
import scala.concurrent.ExecutionContext.Implicits.global
val start = System.currentTimeMillis()
val topic = createTopic()
val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions-orc.json")).getLines().mkString("\\n")
.replace("{{TOPIC}}", topic)
.replace("{{TABLE}}", topic)
.replace("{{NAME}}", topic)
postTask(taskDef)
Future {
val producer = stringStringProducer()
val count = 10000000 // 10mil
for (k <- 0 until count) {
producer.send(new ProducerRecord(topic, JacksonSupport.mapper.writeValueAsString(person)))
if (k % 100000 == 0) {
println(s"Flushing records [total=$count]")
producer.flush()
}
}
producer.flush()
producer.close()
}
Future {
while (true) {
Try {
Thread.sleep(2000)
withConn { conn =>
val stmt = conn.createStatement
val rs = stmt.executeQuery(s"select count(*) from $topic")
rs.next()
val total = rs.getLong(1)
val time = System.currentTimeMillis() - start
println(s"Total $total in ${time}ms which is ${total / (time / 1000)} records per second")
}
}
}
stopTask(topic)
}
} | datamountaineer/stream-reactor | kafka-connect-hive/it/src/test/scala/com/landoop/streamreactor/hive/it/HiveOrcBenchmark.scala | Scala | apache-2.0 | 1,489 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.expressions.utils.ExpressionTestBase
import org.apache.flink.types.Row
import org.junit.{Assert, Test}
/**
* Tests keyword as suffix.
*/
class KeywordParseTest extends ExpressionTestBase {
@Test
def testKeyword(): Unit = {
Assert.assertEquals(
ExpressionParser.parseExpression("f0.asc"),
Asc(UnresolvedFieldReference("f0")))
Assert.assertEquals(
ExpressionParser.parseExpression("f0.asc()"),
Asc(UnresolvedFieldReference("f0")))
}
@Test
def testKeywordAsPrefixInFunctionName(): Unit = {
Assert.assertEquals(
ExpressionParser.parseExpression("f0.ascii()").asInstanceOf[Call].functionName,
"ASCII")
}
@Test
def testKeywordAsInfixInFunctionName(): Unit = {
Assert.assertEquals(
ExpressionParser.parseExpression("f0.iiascii()").asInstanceOf[Call].functionName,
"IIASCII")
}
@Test
def testKeywordAsSuffixInFunctionName(): Unit = {
Assert.assertEquals(
ExpressionParser.parseExpression("f0.iiasc()").asInstanceOf[Call].functionName,
"IIASC")
}
override def testData: Any = new Row(0)
override def typeInfo: TypeInformation[Any] =
new RowTypeInfo().asInstanceOf[TypeInformation[Any]]
}
| mylog00/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/KeywordParseTest.scala | Scala | apache-2.0 | 2,198 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import java.io.File
import java.net.URI
import scala.util.Random
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.execution.{DataSourceScanExec, SortExec}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec
import org.apache.spark.sql.execution.datasources.BucketingUtils
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
import org.apache.spark.sql.execution.joins.SortMergeJoinExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.BitSet
class BucketedReadWithoutHiveSupportSuite extends BucketedReadSuite with SharedSparkSession {
protected override def beforeAll(): Unit = {
super.beforeAll()
assume(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory")
}
}
abstract class BucketedReadSuite extends QueryTest with SQLTestUtils {
import testImplicits._
protected override def beforeAll(): Unit = {
super.beforeAll()
spark.sessionState.conf.setConf(SQLConf.LEGACY_BUCKETED_TABLE_SCAN_OUTPUT_ORDERING, true)
}
protected override def afterAll(): Unit = {
spark.sessionState.conf.unsetConf(SQLConf.LEGACY_BUCKETED_TABLE_SCAN_OUTPUT_ORDERING)
super.afterAll()
}
private val maxI = 5
private val maxJ = 13
private lazy val df = (0 until 50).map(i => (i % maxI, i % maxJ, i.toString)).toDF("i", "j", "k")
private lazy val nullDF = (for {
i <- 0 to 50
s <- Seq(null, "a", "b", "c", "d", "e", "f", null, "g")
} yield (i % maxI, s, i % maxJ)).toDF("i", "j", "k")
// number of buckets that doesn't yield empty buckets when bucketing on column j on df/nullDF
// empty buckets before filtering might hide bugs in pruning logic
private val NumBucketsForPruningDF = 7
private val NumBucketsForPruningNullDf = 5
test("read bucketed data") {
withTable("bucketed_table") {
df.write
.format("parquet")
.partitionBy("i")
.bucketBy(8, "j", "k")
.saveAsTable("bucketed_table")
val bucketValue = Random.nextInt(maxI)
val table = spark.table("bucketed_table").filter($"i" === bucketValue)
val query = table.queryExecution
val output = query.analyzed.output
val rdd = query.toRdd
assert(rdd.partitions.length == 8)
val attrs = table.select("j", "k").queryExecution.analyzed.output
val checkBucketId = rdd.mapPartitionsWithIndex((index, rows) => {
val getBucketId = UnsafeProjection.create(
HashPartitioning(attrs, 8).partitionIdExpression :: Nil,
output)
rows.map(row => getBucketId(row).getInt(0) -> index)
})
checkBucketId.collect().foreach(r => assert(r._1 == r._2))
}
}
// To verify if the bucket pruning works, this function checks two conditions:
// 1) Check if the pruned buckets (before filtering) are empty.
// 2) Verify the final result is the same as the expected one
private def checkPrunedAnswers(
bucketSpec: BucketSpec,
bucketValues: Seq[Integer],
filterCondition: Column,
originalDataFrame: DataFrame): Unit = {
// This test verifies parts of the plan. Disable whole stage codegen.
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
val bucketedDataFrame = spark.table("bucketed_table").select("i", "j", "k")
val BucketSpec(numBuckets, bucketColumnNames, _) = bucketSpec
// Limit: bucket pruning only works when the bucket column has one and only one column
assert(bucketColumnNames.length == 1)
val bucketColumnIndex = bucketedDataFrame.schema.fieldIndex(bucketColumnNames.head)
val bucketColumn = bucketedDataFrame.schema.toAttributes(bucketColumnIndex)
// Filter could hide the bug in bucket pruning. Thus, skipping all the filters
val plan = bucketedDataFrame.filter(filterCondition).queryExecution.executedPlan
val rdd = plan.find(_.isInstanceOf[DataSourceScanExec])
assert(rdd.isDefined, plan)
// if nothing should be pruned, skip the pruning test
if (bucketValues.nonEmpty) {
val matchedBuckets = new BitSet(numBuckets)
bucketValues.foreach { value =>
matchedBuckets.set(BucketingUtils.getBucketIdFromValue(bucketColumn, numBuckets, value))
}
val invalidBuckets = rdd.get.execute().mapPartitionsWithIndex { case (index, iter) =>
// return indexes of partitions that should have been pruned and are not empty
if (!matchedBuckets.get(index % numBuckets) && iter.nonEmpty) {
Iterator(index)
} else {
Iterator()
}
}.collect()
if (invalidBuckets.nonEmpty) {
fail(s"Buckets ${invalidBuckets.mkString(",")} should have been pruned from:\\n$plan")
}
}
checkAnswer(
bucketedDataFrame.filter(filterCondition).orderBy("i", "j", "k"),
originalDataFrame.filter(filterCondition).orderBy("i", "j", "k"))
}
}
test("read partitioning bucketed tables with bucket pruning filters") {
withTable("bucketed_table") {
val numBuckets = NumBucketsForPruningDF
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
df.write
.format("json")
.partitionBy("i")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
val bucketValue = Random.nextInt(maxJ)
// Case 1: EqualTo
checkPrunedAnswers(
bucketSpec,
bucketValues = bucketValue :: Nil,
filterCondition = $"j" === bucketValue,
df)
// Case 2: EqualNullSafe
checkPrunedAnswers(
bucketSpec,
bucketValues = bucketValue :: Nil,
filterCondition = $"j" <=> bucketValue,
df)
// Case 3: In
checkPrunedAnswers(
bucketSpec,
bucketValues = Seq(bucketValue, bucketValue + 1, bucketValue + 2, bucketValue + 3),
filterCondition = $"j".isin(bucketValue, bucketValue + 1, bucketValue + 2, bucketValue + 3),
df)
// Case 4: InSet
val inSetExpr = expressions.InSet($"j".expr,
Set(bucketValue, bucketValue + 1, bucketValue + 2, bucketValue + 3).map(lit(_).expr))
checkPrunedAnswers(
bucketSpec,
bucketValues = Seq(bucketValue, bucketValue + 1, bucketValue + 2, bucketValue + 3),
filterCondition = Column(inSetExpr),
df)
}
}
test("read non-partitioning bucketed tables with bucket pruning filters") {
withTable("bucketed_table") {
val numBuckets = NumBucketsForPruningDF
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
df.write
.format("json")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
val bucketValue = Random.nextInt(maxJ)
checkPrunedAnswers(
bucketSpec,
bucketValues = bucketValue :: Nil,
filterCondition = $"j" === bucketValue,
df)
}
}
test("read partitioning bucketed tables having null in bucketing key") {
withTable("bucketed_table") {
val numBuckets = NumBucketsForPruningNullDf
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
nullDF.write
.format("json")
.partitionBy("i")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
// Case 1: isNull
checkPrunedAnswers(
bucketSpec,
bucketValues = null :: Nil,
filterCondition = $"j".isNull,
nullDF)
// Case 2: <=> null
checkPrunedAnswers(
bucketSpec,
bucketValues = null :: Nil,
filterCondition = $"j" <=> null,
nullDF)
}
}
test("read partitioning bucketed tables having composite filters") {
withTable("bucketed_table") {
val numBuckets = NumBucketsForPruningDF
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
df.write
.format("json")
.partitionBy("i")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
val bucketValue = Random.nextInt(maxJ)
checkPrunedAnswers(
bucketSpec,
bucketValues = bucketValue :: Nil,
filterCondition = $"j" === bucketValue && $"k" > $"j",
df)
checkPrunedAnswers(
bucketSpec,
bucketValues = bucketValue :: Nil,
filterCondition = $"j" === bucketValue && $"i" > bucketValue % 5,
df)
// check multiple bucket values OR condition
checkPrunedAnswers(
bucketSpec,
bucketValues = Seq(bucketValue, bucketValue + 1),
filterCondition = $"j" === bucketValue || $"j" === (bucketValue + 1),
df)
// check bucket value and none bucket value OR condition
checkPrunedAnswers(
bucketSpec,
bucketValues = Nil,
filterCondition = $"j" === bucketValue || $"i" === 0,
df)
// check AND condition in complex expression
checkPrunedAnswers(
bucketSpec,
bucketValues = Seq(bucketValue),
filterCondition = ($"i" === 0 || $"k" > $"j") && $"j" === bucketValue,
df)
}
}
test("read bucketed table without filters") {
withTable("bucketed_table") {
val numBuckets = NumBucketsForPruningDF
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
df.write
.format("json")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
val bucketedDataFrame = spark.table("bucketed_table").select("i", "j", "k")
val plan = bucketedDataFrame.queryExecution.executedPlan
val rdd = plan.find(_.isInstanceOf[DataSourceScanExec])
assert(rdd.isDefined, plan)
val emptyBuckets = rdd.get.execute().mapPartitionsWithIndex { case (index, iter) =>
// return indexes of empty partitions
if (iter.isEmpty) {
Iterator(index)
} else {
Iterator()
}
}.collect()
if (emptyBuckets.nonEmpty) {
fail(s"Buckets ${emptyBuckets.mkString(",")} should not have been pruned from:\\n$plan")
}
checkAnswer(
bucketedDataFrame.orderBy("i", "j", "k"),
df.orderBy("i", "j", "k"))
}
}
private lazy val df1 =
(0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k").as("df1")
private lazy val df2 =
(0 until 50).map(i => (i % 7, i % 11, i.toString)).toDF("i", "j", "k").as("df2")
case class BucketedTableTestSpec(
bucketSpec: Option[BucketSpec],
numPartitions: Int = 10,
expectedShuffle: Boolean = true,
expectedSort: Boolean = true)
/**
* A helper method to test the bucket read functionality using join. It will save `df1` and `df2`
* to hive tables, bucketed or not, according to the given bucket specifics. Next we will join
* these 2 tables, and firstly make sure the answer is corrected, and then check if the shuffle
* exists as user expected according to the `shuffleLeft` and `shuffleRight`.
*/
private def testBucketing(
bucketedTableTestSpecLeft: BucketedTableTestSpec,
bucketedTableTestSpecRight: BucketedTableTestSpec,
joinType: String = "inner",
joinCondition: (DataFrame, DataFrame) => Column): Unit = {
val BucketedTableTestSpec(bucketSpecLeft, numPartitionsLeft, shuffleLeft, sortLeft) =
bucketedTableTestSpecLeft
val BucketedTableTestSpec(bucketSpecRight, numPartitionsRight, shuffleRight, sortRight) =
bucketedTableTestSpecRight
withTable("bucketed_table1", "bucketed_table2") {
def withBucket(
writer: DataFrameWriter[Row],
bucketSpec: Option[BucketSpec]): DataFrameWriter[Row] = {
bucketSpec.map { spec =>
writer.bucketBy(
spec.numBuckets,
spec.bucketColumnNames.head,
spec.bucketColumnNames.tail: _*)
if (spec.sortColumnNames.nonEmpty) {
writer.sortBy(
spec.sortColumnNames.head,
spec.sortColumnNames.tail: _*
)
} else {
writer
}
}.getOrElse(writer)
}
withBucket(df1.repartition(numPartitionsLeft).write.format("parquet"), bucketSpecLeft)
.saveAsTable("bucketed_table1")
withBucket(df2.repartition(numPartitionsRight).write.format("parquet"), bucketSpecRight)
.saveAsTable("bucketed_table2")
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0",
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
val t1 = spark.table("bucketed_table1")
val t2 = spark.table("bucketed_table2")
val joined = t1.join(t2, joinCondition(t1, t2), joinType)
// First check the result is corrected.
checkAnswer(
joined.sort("bucketed_table1.k", "bucketed_table2.k"),
df1.join(df2, joinCondition(df1, df2), joinType).sort("df1.k", "df2.k"))
val joinOperator = if (joined.sqlContext.conf.adaptiveExecutionEnabled) {
val executedPlan =
joined.queryExecution.executedPlan.asInstanceOf[AdaptiveSparkPlanExec].executedPlan
assert(executedPlan.isInstanceOf[SortMergeJoinExec])
executedPlan.asInstanceOf[SortMergeJoinExec]
} else {
val executedPlan = joined.queryExecution.executedPlan
assert(executedPlan.isInstanceOf[SortMergeJoinExec])
executedPlan.asInstanceOf[SortMergeJoinExec]
}
// check existence of shuffle
assert(
joinOperator.left.find(_.isInstanceOf[ShuffleExchangeExec]).isDefined == shuffleLeft,
s"expected shuffle in plan to be $shuffleLeft but found\\n${joinOperator.left}")
assert(
joinOperator.right.find(_.isInstanceOf[ShuffleExchangeExec]).isDefined == shuffleRight,
s"expected shuffle in plan to be $shuffleRight but found\\n${joinOperator.right}")
// check existence of sort
assert(
joinOperator.left.find(_.isInstanceOf[SortExec]).isDefined == sortLeft,
s"expected sort in the left child to be $sortLeft but found\\n${joinOperator.left}")
assert(
joinOperator.right.find(_.isInstanceOf[SortExec]).isDefined == sortRight,
s"expected sort in the right child to be $sortRight but found\\n${joinOperator.right}")
}
}
}
private def joinCondition(joinCols: Seq[String]) (left: DataFrame, right: DataFrame): Column = {
joinCols.map(col => left(col) === right(col)).reduce(_ && _)
}
test("avoid shuffle when join 2 bucketed tables") {
val bucketSpec = Some(BucketSpec(8, Seq("i", "j"), Nil))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(bucketSpec, expectedShuffle = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(bucketSpec, expectedShuffle = false)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i", "j"))
)
}
// Enable it after fix https://issues.apache.org/jira/browse/SPARK-12704
ignore("avoid shuffle when join keys are a super-set of bucket keys") {
val bucketSpec = Some(BucketSpec(8, Seq("i"), Nil))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(bucketSpec, expectedShuffle = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(bucketSpec, expectedShuffle = false)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i", "j"))
)
}
test("only shuffle one side when join bucketed table and non-bucketed table") {
val bucketSpec = Some(BucketSpec(8, Seq("i", "j"), Nil))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(bucketSpec, expectedShuffle = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(None, expectedShuffle = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i", "j"))
)
}
test("only shuffle one side when 2 bucketed tables have different bucket number") {
val bucketSpecLeft = Some(BucketSpec(8, Seq("i", "j"), Nil))
val bucketSpecRight = Some(BucketSpec(5, Seq("i", "j"), Nil))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(bucketSpecLeft, expectedShuffle = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(bucketSpecRight, expectedShuffle = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i", "j"))
)
}
test("only shuffle one side when 2 bucketed tables have different bucket keys") {
val bucketSpecLeft = Some(BucketSpec(8, Seq("i"), Nil))
val bucketSpecRight = Some(BucketSpec(8, Seq("j"), Nil))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(bucketSpecLeft, expectedShuffle = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(bucketSpecRight, expectedShuffle = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i"))
)
}
test("shuffle when join keys are not equal to bucket keys") {
val bucketSpec = Some(BucketSpec(8, Seq("i"), Nil))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(bucketSpec, expectedShuffle = true)
val bucketedTableTestSpecRight = BucketedTableTestSpec(bucketSpec, expectedShuffle = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("j"))
)
}
test("shuffle when join 2 bucketed tables with bucketing disabled") {
val bucketSpec = Some(BucketSpec(8, Seq("i", "j"), Nil))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(bucketSpec, expectedShuffle = true)
val bucketedTableTestSpecRight = BucketedTableTestSpec(bucketSpec, expectedShuffle = true)
withSQLConf(SQLConf.BUCKETING_ENABLED.key -> "false") {
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i", "j"))
)
}
}
test("check sort and shuffle when bucket and sort columns are join keys") {
// In case of bucketing, its possible to have multiple files belonging to the
// same bucket in a given relation. Each of these files are locally sorted
// but those files combined together are not globally sorted. Given that,
// the RDD partition will not be sorted even if the relation has sort columns set
// Therefore, we still need to keep the Sort in both sides.
val bucketSpec = Some(BucketSpec(8, Seq("i", "j"), Seq("i", "j")))
val bucketedTableTestSpecLeft1 = BucketedTableTestSpec(
bucketSpec, numPartitions = 50, expectedShuffle = false, expectedSort = true)
val bucketedTableTestSpecRight1 = BucketedTableTestSpec(
bucketSpec, numPartitions = 1, expectedShuffle = false, expectedSort = false)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft1,
bucketedTableTestSpecRight = bucketedTableTestSpecRight1,
joinCondition = joinCondition(Seq("i", "j"))
)
val bucketedTableTestSpecLeft2 = BucketedTableTestSpec(
bucketSpec, numPartitions = 1, expectedShuffle = false, expectedSort = false)
val bucketedTableTestSpecRight2 = BucketedTableTestSpec(
bucketSpec, numPartitions = 50, expectedShuffle = false, expectedSort = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft2,
bucketedTableTestSpecRight = bucketedTableTestSpecRight2,
joinCondition = joinCondition(Seq("i", "j"))
)
val bucketedTableTestSpecLeft3 = BucketedTableTestSpec(
bucketSpec, numPartitions = 50, expectedShuffle = false, expectedSort = true)
val bucketedTableTestSpecRight3 = BucketedTableTestSpec(
bucketSpec, numPartitions = 50, expectedShuffle = false, expectedSort = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft3,
bucketedTableTestSpecRight = bucketedTableTestSpecRight3,
joinCondition = joinCondition(Seq("i", "j"))
)
val bucketedTableTestSpecLeft4 = BucketedTableTestSpec(
bucketSpec, numPartitions = 1, expectedShuffle = false, expectedSort = false)
val bucketedTableTestSpecRight4 = BucketedTableTestSpec(
bucketSpec, numPartitions = 1, expectedShuffle = false, expectedSort = false)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft4,
bucketedTableTestSpecRight = bucketedTableTestSpecRight4,
joinCondition = joinCondition(Seq("i", "j"))
)
}
test("avoid shuffle and sort when sort columns are a super set of join keys") {
val bucketSpecLeft = Some(BucketSpec(8, Seq("i"), Seq("i", "j")))
val bucketSpecRight = Some(BucketSpec(8, Seq("i"), Seq("i", "k")))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(
bucketSpecLeft, numPartitions = 1, expectedShuffle = false, expectedSort = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(
bucketSpecRight, numPartitions = 1, expectedShuffle = false, expectedSort = false)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i"))
)
}
test("only sort one side when sort columns are different") {
val bucketSpecLeft = Some(BucketSpec(8, Seq("i", "j"), Seq("i", "j")))
val bucketSpecRight = Some(BucketSpec(8, Seq("i", "j"), Seq("k")))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(
bucketSpecLeft, numPartitions = 1, expectedShuffle = false, expectedSort = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(
bucketSpecRight, numPartitions = 1, expectedShuffle = false, expectedSort = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i", "j"))
)
}
test("only sort one side when sort columns are same but their ordering is different") {
val bucketSpecLeft = Some(BucketSpec(8, Seq("i", "j"), Seq("i", "j")))
val bucketSpecRight = Some(BucketSpec(8, Seq("i", "j"), Seq("j", "i")))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(
bucketSpecLeft, numPartitions = 1, expectedShuffle = false, expectedSort = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(
bucketSpecRight, numPartitions = 1, expectedShuffle = false, expectedSort = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i", "j"))
)
}
test("avoid shuffle when grouping keys are equal to bucket keys") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("bucketed_table")
val tbl = spark.table("bucketed_table")
val agged = tbl.groupBy("i", "j").agg(max("k"))
checkAnswer(
agged.sort("i", "j"),
df1.groupBy("i", "j").agg(max("k")).sort("i", "j"))
assert(agged.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty)
}
}
test("bucket join should work with SubqueryAlias plan") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0") {
withTable("t") {
withView("v") {
spark.range(20).selectExpr("id as i").write.bucketBy(8, "i").saveAsTable("t")
sql("CREATE VIEW v AS SELECT * FROM t").collect()
val plan = sql("SELECT * FROM t a JOIN v b ON a.i = b.i").queryExecution.executedPlan
assert(plan.collect { case exchange: ShuffleExchangeExec => exchange }.isEmpty)
}
}
}
}
test("avoid shuffle when grouping keys are a super-set of bucket keys") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
val tbl = spark.table("bucketed_table")
val agged = tbl.groupBy("i", "j").agg(max("k"))
checkAnswer(
agged.sort("i", "j"),
df1.groupBy("i", "j").agg(max("k")).sort("i", "j"))
assert(agged.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty)
}
}
test("SPARK-17698 Join predicates should not contain filter clauses") {
val bucketSpec = Some(BucketSpec(8, Seq("i"), Seq("i")))
val bucketedTableTestSpecLeft = BucketedTableTestSpec(
bucketSpec, numPartitions = 1, expectedShuffle = false, expectedSort = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(
bucketSpec, numPartitions = 1, expectedShuffle = false, expectedSort = false)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinType = "fullouter",
joinCondition = (left: DataFrame, right: DataFrame) => {
val joinPredicates = Seq("i").map(col => left(col) === right(col)).reduce(_ && _)
val filterLeft = left("i") === Literal("1")
val filterRight = right("i") === Literal("1")
joinPredicates && filterLeft && filterRight
}
)
}
test("SPARK-19122 Re-order join predicates if they match with the child's output partitioning") {
val bucketedTableTestSpec = BucketedTableTestSpec(
Some(BucketSpec(8, Seq("i", "j", "k"), Seq("i", "j", "k"))),
numPartitions = 1,
expectedShuffle = false,
expectedSort = false)
// If the set of join columns is equal to the set of bucketed + sort columns, then
// the order of join keys in the query should not matter and there should not be any shuffle
// and sort added in the query plan
Seq(
Seq("i", "j", "k"),
Seq("i", "k", "j"),
Seq("j", "k", "i"),
Seq("j", "i", "k"),
Seq("k", "j", "i"),
Seq("k", "i", "j")
).foreach(joinKeys => {
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpec,
bucketedTableTestSpecRight = bucketedTableTestSpec,
joinCondition = joinCondition(joinKeys)
)
})
}
test("SPARK-19122 No re-ordering should happen if set of join columns != set of child's " +
"partitioning columns") {
// join predicates is a super set of child's partitioning columns
val bucketedTableTestSpec1 =
BucketedTableTestSpec(Some(BucketSpec(8, Seq("i", "j"), Seq("i", "j"))), numPartitions = 1)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpec1,
bucketedTableTestSpecRight = bucketedTableTestSpec1,
joinCondition = joinCondition(Seq("i", "j", "k"))
)
// child's partitioning columns is a super set of join predicates
val bucketedTableTestSpec2 =
BucketedTableTestSpec(Some(BucketSpec(8, Seq("i", "j", "k"), Seq("i", "j", "k"))),
numPartitions = 1)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpec2,
bucketedTableTestSpecRight = bucketedTableTestSpec2,
joinCondition = joinCondition(Seq("i", "j"))
)
// set of child's partitioning columns != set join predicates (despite the lengths of the
// sets are same)
val bucketedTableTestSpec3 =
BucketedTableTestSpec(Some(BucketSpec(8, Seq("i", "j"), Seq("i", "j"))), numPartitions = 1)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpec3,
bucketedTableTestSpecRight = bucketedTableTestSpec3,
joinCondition = joinCondition(Seq("j", "k"))
)
}
test("SPARK-22042 ReorderJoinPredicates can break when child's partitioning is not decided") {
withTable("bucketed_table", "table1", "table2") {
df.write.format("parquet").saveAsTable("table1")
df.write.format("parquet").saveAsTable("table2")
df.write.format("parquet").bucketBy(8, "j", "k").saveAsTable("bucketed_table")
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0") {
checkAnswer(
sql("""
|SELECT ab.i, ab.j, ab.k, c.i, c.j, c.k
|FROM (
| SELECT a.i, a.j, a.k
| FROM bucketed_table a
| JOIN table1 b
| ON a.i = b.i
|) ab
|JOIN table2 c
|ON ab.i = c.i
""".stripMargin),
sql("""
|SELECT a.i, a.j, a.k, c.i, c.j, c.k
|FROM bucketed_table a
|JOIN table1 b
|ON a.i = b.i
|JOIN table2 c
|ON a.i = c.i
""".stripMargin))
}
}
}
test("error if there exists any malformed bucket files") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
val warehouseFilePath = new URI(spark.sessionState.conf.warehousePath).getPath
val tableDir = new File(warehouseFilePath, "bucketed_table")
Utils.deleteRecursively(tableDir)
df1.write.parquet(tableDir.getAbsolutePath)
val agged = spark.table("bucketed_table").groupBy("i").count()
val error = intercept[Exception] {
agged.count()
}
assert(error.getCause().toString contains "Invalid bucket file")
}
}
test("disable bucketing when the output doesn't contain all bucketing columns") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
checkAnswer(spark.table("bucketed_table").select("j"), df1.select("j"))
checkAnswer(spark.table("bucketed_table").groupBy("j").agg(max("k")),
df1.groupBy("j").agg(max("k")))
}
}
// A test with a partition where the number of files in the partition is
// large. tests for the condition where the serialization of such a task may result in a stack
// overflow if the files list is stored in a recursive data structure
// This test is ignored because it takes long to run (~3 min)
ignore("SPARK-27100 stack overflow: read data with large partitions") {
val nCount = 20000
// reshuffle data so that many small files are created
val nShufflePartitions = 10000
// and with one table partition, should result in 10000 files in one partition
val nPartitions = 1
val nBuckets = 2
val dfPartitioned = (0 until nCount)
.map(i => (i % nPartitions, i % nBuckets, i.toString)).toDF("i", "j", "k")
// non-bucketed tables. This part succeeds without the fix for SPARK-27100
try {
withTable("non_bucketed_table") {
dfPartitioned.repartition(nShufflePartitions)
.write
.format("parquet")
.partitionBy("i")
.saveAsTable("non_bucketed_table")
val table = spark.table("non_bucketed_table")
val nValues = table.select("j", "k").count()
assert(nValues == nCount)
}
} catch {
case e: Exception => fail("Failed due to exception: " + e)
}
// bucketed tables. This fails without the fix for SPARK-27100
try {
withTable("bucketed_table") {
dfPartitioned.repartition(nShufflePartitions)
.write
.format("parquet")
.partitionBy("i")
.bucketBy(nBuckets, "j")
.saveAsTable("bucketed_table")
val table = spark.table("bucketed_table")
val nValues = table.select("j", "k").count()
assert(nValues == nCount)
}
} catch {
case e: Exception => fail("Failed due to exception: " + e)
}
}
test("SPARK-29655 Read bucketed tables obeys spark.sql.shuffle.partitions") {
withSQLConf(
SQLConf.SHUFFLE_PARTITIONS.key -> "5",
SQLConf.SHUFFLE_MAX_NUM_POSTSHUFFLE_PARTITIONS.key -> "7") {
val bucketSpec = Some(BucketSpec(6, Seq("i", "j"), Nil))
Seq(false, true).foreach { enableAdaptive =>
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> s"$enableAdaptive") {
val bucketedTableTestSpecLeft = BucketedTableTestSpec(bucketSpec, expectedShuffle = false)
val bucketedTableTestSpecRight = BucketedTableTestSpec(None, expectedShuffle = true)
testBucketing(
bucketedTableTestSpecLeft = bucketedTableTestSpecLeft,
bucketedTableTestSpecRight = bucketedTableTestSpecRight,
joinCondition = joinCondition(Seq("i", "j"))
)
}
}
}
}
}
| darionyaphet/spark | sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala | Scala | apache-2.0 | 34,300 |
package com.twitter.finatra.http.integration.doeverything.main.domain
import javax.inject.Inject
import com.twitter.finagle.http.Request
import com.twitter.finatra.request.{JsonIgnoreBody, RouteParam}
case class IdAndNameRequest(
@RouteParam id: Long,
name: String)
case class IdRequest(
@RouteParam id: Long,
@Inject request: Request)
@JsonIgnoreBody
case class IdRequestIgnoringBody(
@RouteParam id: Long)
case class IdRequestNotIgnoringBody(
@RouteParam id: Long)
| joecwu/finatra | http/src/test/scala/com/twitter/finatra/http/integration/doeverything/main/domain/IdAndNameRequest.scala | Scala | apache-2.0 | 485 |
package org.jetbrains.plugins.scala
package base
import java.io.File
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.openapi.projectRoots.{JavaSdk, Sdk}
import com.intellij.openapi.roots._
import com.intellij.openapi.roots.libraries.Library
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.vfs.impl.VirtualFilePointerManagerImpl
import com.intellij.openapi.vfs.newvfs.impl.VfsRootAccess
import com.intellij.openapi.vfs.pointers.VirtualFilePointerManager
import com.intellij.openapi.vfs.{JarFileSystem, LocalFileSystem, VfsUtil, VirtualFile}
import com.intellij.testFramework.PsiTestUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.SyntheticClasses
import org.jetbrains.plugins.scala.project._
import org.jetbrains.plugins.scala.project.template.Artifact
import org.jetbrains.plugins.scala.util.TestUtils
import org.jetbrains.plugins.scala.util.TestUtils.ScalaSdkVersion
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
/**
* Nikolay.Tropin
* 5/29/13
*/
class ScalaLibraryLoader(project: Project, module: Module, rootPath: String, isIncludeReflectLibrary: Boolean = false,
javaSdk: Option[Sdk] = None, additionalLibraries: Array[String] = Array.empty) {
private val addedLibraries = ArrayBuffer[Library]()
def loadScala(libVersion: TestUtils.ScalaSdkVersion) {
initScalaComponents()
addSyntheticClasses()
if (rootPath != null) {
FileUtil.createIfDoesntExist(new File(rootPath))
val testDataRoot: VirtualFile = LocalFileSystem.getInstance.refreshAndFindFileByPath(rootPath)
assert(testDataRoot != null)
PsiTestUtil.addSourceRoot(module, testDataRoot)
}
addScalaSdk(module, libVersion, isIncludeReflectLibrary)
additionalLibraries.foreach(name => addLibrary(module, CommonLibrary(name, libVersion)))
javaSdk.foreach { sdk =>
val rootModel = ModuleRootManager.getInstance(module).getModifiableModel
rootModel.setSdk(sdk)
inWriteAction(rootModel.commit())
}
}
def initScalaComponents(): Unit = {
ScalaLoader.loadScala()
}
def addSyntheticClasses(): Unit = {
val syntheticClasses: SyntheticClasses = project.getComponent(classOf[SyntheticClasses])
if (!syntheticClasses.isClassesRegistered) {
syntheticClasses.registerClasses()
}
}
def clean() {
if (rootPath != null) {
val testDataRoot: VirtualFile = LocalFileSystem.getInstance.refreshAndFindFileByPath(rootPath)
PsiTestUtil.removeSourceRoot(module, testDataRoot)
}
inWriteAction {
addedLibraries.foreach(module.detach)
}
}
def addScalaSdk(module: Module, sdkVersion: ScalaSdkVersion, loadReflect: Boolean) = {
val compilerPath = TestUtils.getScalaCompilerPath(sdkVersion)
val libraryPath = TestUtils.getScalaLibraryPath(sdkVersion)
val reflectPath = TestUtils.getScalaReflectPath(sdkVersion)
val scalaSdkJars = Seq(libraryPath, compilerPath) ++ (if (loadReflect) Seq(reflectPath) else Seq.empty)
val classRoots = scalaSdkJars.map(path => JarFileSystem.getInstance.refreshAndFindFileByPath(path + "!/")).asJava
val scalaLibrarySrc = TestUtils.getScalaLibrarySrc(sdkVersion)
val srcsRoots = Seq(JarFileSystem.getInstance.refreshAndFindFileByPath(scalaLibrarySrc + "!/")).asJava
val scalaSdkLib = PsiTestUtil.addProjectLibrary(module, "scala-sdk", classRoots, srcsRoots)
val languageLevel = Artifact.ScalaCompiler.versionOf(new File(compilerPath))
.flatMap(ScalaLanguageLevel.from).getOrElse(ScalaLanguageLevel.Default)
inWriteAction {
scalaSdkLib.convertToScalaSdkWith(languageLevel, scalaSdkJars.map(new File(_)))
module.attach(scalaSdkLib)
addedLibraries += scalaSdkLib
}
VirtualFilePointerManager.getInstance.asInstanceOf[VirtualFilePointerManagerImpl].storePointers()
}
private def addLibrary(module: Module, lib: CommonLibrary): Unit = addLibrary(module, lib.name, lib.path)
private def addLibrary(module: Module, libraryName: String, mockLib: String): Unit = {
if (module.libraries.exists(_.getName == libraryName)) return
VfsRootAccess.allowRootAccess(mockLib)
val rootModel = ModuleRootManager.getInstance(module).getModifiableModel
val libraryTable = rootModel.getModuleLibraryTable
val library = libraryTable.createLibrary(libraryName)
val libModel = library.getModifiableModel
val libRoot: File = new File(mockLib)
assert(libRoot.exists)
libModel.addRoot(VfsUtil.getUrlForLibraryRoot(libRoot), OrderRootType.CLASSES)
inWriteAction {
libModel.commit()
rootModel.commit()
}
VirtualFilePointerManager.getInstance.asInstanceOf[VirtualFilePointerManagerImpl].storePointers()
}
}
object ScalaLibraryLoader {
def getSdkNone: Option[Sdk] = None
def withMockJdk(project: Project, module: Module, rootPath: String, isIncludeReflectLibrary: Boolean = false,
additionalLibraries: Array[String] = Array.empty): ScalaLibraryLoader = {
val mockJdk = TestUtils.getDefaultJdk
VfsRootAccess.allowRootAccess(mockJdk)
val javaSdk = Some(JavaSdk.getInstance.createJdk("java sdk", mockJdk, false))
new ScalaLibraryLoader(project, module, rootPath, isIncludeReflectLibrary, javaSdk, additionalLibraries)
}
}
private object CommonLibrary {
def apply(name: String, version: TestUtils.ScalaSdkVersion): CommonLibrary = {
name match {
case "scalaz" => CommonLibrary("scalaz", TestUtils.getMockScalazLib(version))
case "slick" => CommonLibrary("slick", TestUtils.getMockSlickLib(version))
case "spray" => CommonLibrary("spray", TestUtils.getMockSprayLib(version))
case "cats" => CommonLibrary("cats", TestUtils.getCatsLib(version))
case "specs2" => CommonLibrary("specs2", TestUtils.getSpecs2Lib(version))
case "scalacheck" => CommonLibrary("scalacheck", TestUtils.getScalacheckLib(version))
case "postgresql" => CommonLibrary("postgresql", TestUtils.getPostgresLib(version))
case _ => throw new IllegalArgumentException(s"Unknown library: $name")
}
}
}
private case class CommonLibrary(name: String, path: String) | whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/base/ScalaLibraryLoader.scala | Scala | apache-2.0 | 6,281 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGenerator, ExprCode}
import org.apache.spark.sql.catalyst.util.TypeUtils
import org.apache.spark.sql.types._
/**
* An expression that is evaluated to the first non-null input.
*
* {{{
* coalesce(1, 2) => 1
* coalesce(null, 1, 2) => 1
* coalesce(null, null, 2) => 2
* coalesce(null, null, null) => null
* }}}
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2, ...) - Returns the first non-null argument if exists. Otherwise, null.",
examples = """
Examples:
> SELECT _FUNC_(NULL, 1, NULL);
1
""")
// scalastyle:on line.size.limit
case class Coalesce(children: Seq[Expression]) extends Expression {
/** Coalesce is nullable if all of its children are nullable, or if it has no children. */
override def nullable: Boolean = children.forall(_.nullable)
// Coalesce is foldable if all children are foldable.
override def foldable: Boolean = children.forall(_.foldable)
override def checkInputDataTypes(): TypeCheckResult = {
if (children.length < 1) {
TypeCheckResult.TypeCheckFailure(
s"input to function $prettyName requires at least one argument")
} else {
TypeUtils.checkForSameTypeInputExpr(children.map(_.dataType), s"function $prettyName")
}
}
override def dataType: DataType = children.head.dataType
override def eval(input: InternalRow): Any = {
var result: Any = null
val childIterator = children.iterator
while (childIterator.hasNext && result == null) {
result = childIterator.next().eval(input)
}
result
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
ev.isNull = ctx.addMutableState(CodeGenerator.JAVA_BOOLEAN, ev.isNull)
// all the evals are meant to be in a do { ... } while (false); loop
val evals = children.map { e =>
val eval = e.genCode(ctx)
s"""
|${eval.code}
|if (!${eval.isNull}) {
| ${ev.isNull} = false;
| ${ev.value} = ${eval.value};
| continue;
|}
""".stripMargin
}
val resultType = CodeGenerator.javaType(dataType)
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = evals,
funcName = "coalesce",
returnType = resultType,
makeSplitFunction = func =>
s"""
|$resultType ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
|do {
| $func
|} while (false);
|return ${ev.value};
""".stripMargin,
foldFunctions = _.map { funcCall =>
s"""
|${ev.value} = $funcCall;
|if (!${ev.isNull}) {
| continue;
|}
""".stripMargin
}.mkString)
ev.copy(code =
s"""
|${ev.isNull} = true;
|$resultType ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
|do {
| $codes
|} while (false);
""".stripMargin)
}
}
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2) - Returns `expr2` if `expr1` is null, or `expr1` otherwise.",
examples = """
Examples:
> SELECT _FUNC_(NULL, array('2'));
["2"]
""")
case class IfNull(left: Expression, right: Expression, child: Expression)
extends RuntimeReplaceable {
def this(left: Expression, right: Expression) = {
this(left, right, Coalesce(Seq(left, right)))
}
override def flatArguments: Iterator[Any] = Iterator(left, right)
override def sql: String = s"$prettyName(${left.sql}, ${right.sql})"
}
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2) - Returns null if `expr1` equals to `expr2`, or `expr1` otherwise.",
examples = """
Examples:
> SELECT _FUNC_(2, 2);
NULL
""")
case class NullIf(left: Expression, right: Expression, child: Expression)
extends RuntimeReplaceable {
def this(left: Expression, right: Expression) = {
this(left, right, If(EqualTo(left, right), Literal.create(null, left.dataType), left))
}
override def flatArguments: Iterator[Any] = Iterator(left, right)
override def sql: String = s"$prettyName(${left.sql}, ${right.sql})"
}
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2) - Returns `expr2` if `expr1` is null, or `expr1` otherwise.",
examples = """
Examples:
> SELECT _FUNC_(NULL, array('2'));
["2"]
""")
case class Nvl(left: Expression, right: Expression, child: Expression) extends RuntimeReplaceable {
def this(left: Expression, right: Expression) = {
this(left, right, Coalesce(Seq(left, right)))
}
override def flatArguments: Iterator[Any] = Iterator(left, right)
override def sql: String = s"$prettyName(${left.sql}, ${right.sql})"
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2, expr3) - Returns `expr2` if `expr1` is not null, or `expr3` otherwise.",
examples = """
Examples:
> SELECT _FUNC_(NULL, 2, 1);
1
""")
// scalastyle:on line.size.limit
case class Nvl2(expr1: Expression, expr2: Expression, expr3: Expression, child: Expression)
extends RuntimeReplaceable {
def this(expr1: Expression, expr2: Expression, expr3: Expression) = {
this(expr1, expr2, expr3, If(IsNotNull(expr1), expr2, expr3))
}
override def flatArguments: Iterator[Any] = Iterator(expr1, expr2, expr3)
override def sql: String = s"$prettyName(${expr1.sql}, ${expr2.sql}, ${expr3.sql})"
}
/**
* Evaluates to `true` iff it's NaN.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns true if `expr` is NaN, or false otherwise.",
examples = """
Examples:
> SELECT _FUNC_(cast('NaN' as double));
true
""")
case class IsNaN(child: Expression) extends UnaryExpression
with Predicate with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(DoubleType, FloatType))
override def nullable: Boolean = false
override def eval(input: InternalRow): Any = {
val value = child.eval(input)
if (value == null) {
false
} else {
child.dataType match {
case DoubleType => value.asInstanceOf[Double].isNaN
case FloatType => value.asInstanceOf[Float].isNaN
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval = child.genCode(ctx)
child.dataType match {
case DoubleType | FloatType =>
ev.copy(code = s"""
${eval.code}
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
${ev.value} = !${eval.isNull} && Double.isNaN(${eval.value});""", isNull = "false")
}
}
}
/**
* An Expression evaluates to `left` iff it's not NaN, or evaluates to `right` otherwise.
* This Expression is useful for mapping NaN values to null.
*/
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2) - Returns `expr1` if it's not NaN, or `expr2` otherwise.",
examples = """
Examples:
> SELECT _FUNC_(cast('NaN' as double), 123);
123.0
""")
case class NaNvl(left: Expression, right: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = left.dataType
override def inputTypes: Seq[AbstractDataType] =
Seq(TypeCollection(DoubleType, FloatType), TypeCollection(DoubleType, FloatType))
override def eval(input: InternalRow): Any = {
val value = left.eval(input)
if (value == null) {
null
} else {
left.dataType match {
case DoubleType =>
if (!value.asInstanceOf[Double].isNaN) value else right.eval(input)
case FloatType =>
if (!value.asInstanceOf[Float].isNaN) value else right.eval(input)
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val leftGen = left.genCode(ctx)
val rightGen = right.genCode(ctx)
left.dataType match {
case DoubleType | FloatType =>
ev.copy(code = s"""
${leftGen.code}
boolean ${ev.isNull} = false;
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (${leftGen.isNull}) {
${ev.isNull} = true;
} else {
if (!Double.isNaN(${leftGen.value})) {
${ev.value} = ${leftGen.value};
} else {
${rightGen.code}
if (${rightGen.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${rightGen.value};
}
}
}""")
}
}
}
/**
* An expression that is evaluated to true if the input is null.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns true if `expr` is null, or false otherwise.",
examples = """
Examples:
> SELECT _FUNC_(1);
false
""")
case class IsNull(child: Expression) extends UnaryExpression with Predicate {
override def nullable: Boolean = false
override def eval(input: InternalRow): Any = {
child.eval(input) == null
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval = child.genCode(ctx)
ExprCode(code = eval.code, isNull = "false", value = eval.isNull)
}
override def sql: String = s"(${child.sql} IS NULL)"
}
/**
* An expression that is evaluated to true if the input is not null.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns true if `expr` is not null, or false otherwise.",
examples = """
Examples:
> SELECT _FUNC_(1);
true
""")
case class IsNotNull(child: Expression) extends UnaryExpression with Predicate {
override def nullable: Boolean = false
override def eval(input: InternalRow): Any = {
child.eval(input) != null
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval = child.genCode(ctx)
ExprCode(code = eval.code, isNull = "false", value = s"(!(${eval.isNull}))")
}
override def sql: String = s"(${child.sql} IS NOT NULL)"
}
/**
* A predicate that is evaluated to be true if there are at least `n` non-null and non-NaN values.
*/
case class AtLeastNNonNulls(n: Int, children: Seq[Expression]) extends Predicate {
override def nullable: Boolean = false
override def foldable: Boolean = children.forall(_.foldable)
override def toString: String = s"AtLeastNNulls(n, ${children.mkString(",")})"
private[this] val childrenArray = children.toArray
override def eval(input: InternalRow): Boolean = {
var numNonNulls = 0
var i = 0
while (i < childrenArray.length && numNonNulls < n) {
val evalC = childrenArray(i).eval(input)
if (evalC != null) {
childrenArray(i).dataType match {
case DoubleType =>
if (!evalC.asInstanceOf[Double].isNaN) numNonNulls += 1
case FloatType =>
if (!evalC.asInstanceOf[Float].isNaN) numNonNulls += 1
case _ => numNonNulls += 1
}
}
i += 1
}
numNonNulls >= n
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val nonnull = ctx.freshName("nonnull")
// all evals are meant to be inside a do { ... } while (false); loop
val evals = children.map { e =>
val eval = e.genCode(ctx)
e.dataType match {
case DoubleType | FloatType =>
s"""
|if ($nonnull < $n) {
| ${eval.code}
| if (!${eval.isNull} && !Double.isNaN(${eval.value})) {
| $nonnull += 1;
| }
|} else {
| continue;
|}
""".stripMargin
case _ =>
s"""
|if ($nonnull < $n) {
| ${eval.code}
| if (!${eval.isNull}) {
| $nonnull += 1;
| }
|} else {
| continue;
|}
""".stripMargin
}
}
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = evals,
funcName = "atLeastNNonNulls",
extraArguments = (CodeGenerator.JAVA_INT, nonnull) :: Nil,
returnType = CodeGenerator.JAVA_INT,
makeSplitFunction = body =>
s"""
|do {
| $body
|} while (false);
|return $nonnull;
""".stripMargin,
foldFunctions = _.map { funcCall =>
s"""
|$nonnull = $funcCall;
|if ($nonnull >= $n) {
| continue;
|}
""".stripMargin
}.mkString)
ev.copy(code =
s"""
|${CodeGenerator.JAVA_INT} $nonnull = 0;
|do {
| $codes
|} while (false);
|${CodeGenerator.JAVA_BOOLEAN} ${ev.value} = $nonnull >= $n;
""".stripMargin, isNull = "false")
}
}
| brad-kaiser/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/nullExpressions.scala | Scala | apache-2.0 | 13,756 |
package ru.avhaliullin.whatever.frontend
import java.io.File
import ru.avhaliullin.whatever.common.CompilationException
import ru.avhaliullin.whatever.frontend.sources.{SourceTree, SourceTreeNode}
import ru.avhaliullin.whatever.frontend.syntax.{Parser, SyntaxTree, Expression}
import scala.io.Source
/**
* @author avhaliullin
*/
object Frontend {
def parseSources(sources: SourceTree[Unit]): SourceTree[SyntaxTree] = {
val p = new Parser
sources.map {
srcFile =>
val pr = p.parse(Source.fromFile(srcFile.file).bufferedReader())
pr match {
case p.Success(tree: SyntaxTree, _) => tree
case p.Failure(msg, pos) =>
throw new CompilationException(srcFile.file, pos.pos, "Syntax error: " + msg)
case p.Error(msg, pos) =>
throw new CompilationException(srcFile.file, pos.pos, "Syntax error: " + msg)
}
}
}
def parseSources(sourcesRoot: File): SourceTree[SyntaxTree] = {
parseSources(SourceTreeNode.findSources(sourcesRoot))
}
}
| avhaliullin/whatever-compiler | compiler/src/main/scala/ru/avhaliullin/whatever/frontend/Frontend.scala | Scala | mit | 1,038 |
package com.twitter.finatra.http.tests.routing
import com.twitter.finatra.http.internal.routing.PathPattern
import com.twitter.inject.Test
import java.net.URI
class PathPatternTest extends Test {
test("routes") {
PathPattern("/cars").extract("/cars") should equal(Some(Map()))
PathPattern("/cars").extract("/cars/") should equal(None)
PathPattern("/cars/").extract("/cars") should equal(None)
PathPattern("/cars/").extract("/cars/") should equal(Some(Map()))
PathPattern("/cars/?").extract("/cars") should equal(None)
PathPattern("/cars/?").extract("/cars/") should equal(None)
PathPattern("/cars:*").extract("/cars") should equal(Some(Map("*" -> "")))
PathPattern("/cars:*").extract("/cars/") should equal(Some(Map("*" -> "/")))
PathPattern("/cars/:id").extract("/cars/123") should equal(Some(Map("id" -> "123")))
PathPattern("/cars/:id").extract("/cars/") should equal(None)
PathPattern("/store/cars/:id").extract("/store/cars/123") should equal(Some(Map("id" -> "123")))
PathPattern("/store/cars/:id").extract("/asdf/cars/123") should equal(None)
PathPattern("/cars/:make/:model").extract("/cars/ford/explorer") should equal(
Some(Map("make" -> "ford", "model" -> "explorer"))
)
PathPattern("/cars/:make/:model").extract("/cars/foo/ford/explorer") should equal(None)
PathPattern("/cars/:make/:model").extract("/cars/1-1/2") should equal(
Some(Map("make" -> "1-1", "model" -> "2"))
)
PathPattern("/cars/:make/:model").extract("/cars/ford/") should equal(None)
PathPattern("/cars/:make/:model").extract("/cars/ford") should equal(None)
PathPattern("/store/cars/:make/:model").extract("/store/cars/ford/explorer") should equal(
Some(Map("make" -> "ford", "model" -> "explorer"))
)
PathPattern("/cars/:make/:model/:*").extract("/cars/ford/explorer/foo/bar") should equal(
Some(Map("make" -> "ford", "model" -> "explorer", "*" -> "foo/bar"))
)
}
test("non capture group syntax") {
PathPattern("/(?:cars|boats)/:id").extract("/cars/123") should equal(None)
PathPattern("/(?:cars|boats)/:id").extract("/boats/123") should equal(None)
}
test("capture group syntax is escaped and ignored") {
PathPattern("/(cars|boats)/:id").extract("/boats/123") should equal(None)
}
test("routes w/ special '*' token") {
PathPattern("/:*").extract("/") should equal(Some(Map("*" -> "")))
PathPattern("/:*").extract("/abc") should equal(Some(Map("*" -> "abc")))
PathPattern("/:*").extract("/abc/123") should equal(Some(Map("*" -> "abc/123")))
PathPattern("/ui/:*").extract("/ui/") should equal(Some(Map("*" -> "")))
PathPattern("/ui/:*").extract("/ui/abc") should equal(Some(Map("*" -> "abc")))
PathPattern("/ui/:*").extract("/ui/abc/123") should equal(Some(Map("*" -> "abc/123")))
}
test("constant") {
PathPattern("/cars/ford/explorer").extract("/cars/ford/explorer") should equal(Some(Map()))
}
test("unicode") {
pending
val path = "위키백과"
val escapedUri = "/" + new URI(path).toASCIIString
PathPattern("/" + path).extract(escapedUri).isDefined should equal(true)
}
test("url-encode") {
PathPattern("/cars/ford/:model").extract("/cars/ford/fusion%20hybrid") should equal(
Some(Map("model" -> "fusion hybrid"))
)
}
}
| twitter/finatra | http-server/src/test/scala/com/twitter/finatra/http/tests/routing/PathPatternTest.scala | Scala | apache-2.0 | 3,335 |
package org.akkamon.core
import akka.actor.Actor
import akka.contrib.pattern.ReceivePipeline
import scala.reflect.runtime.universe
trait ActorStack extends Actor with ReceivePipeline {
var actorName = self.path.name;
// use the reflect module to get a reference to our object instance
private val runtimeMirror = universe.runtimeMirror(getClass.getClassLoader)
private val module = runtimeMirror.staticModule(Config.InstrumentExporter)
val exporter = runtimeMirror.reflectModule(module).instance.asInstanceOf[InstrumentExporter]
}
| josdirksen/akka-mon | src/main/scala/org/akkamon/core/ActorStack.scala | Scala | mit | 548 |
package lila.ai
import scala.collection.JavaConversions._
import akka.actor._
import akka.pattern.pipe
import com.typesafe.config.{ Config => TypesafeConfig }
import lila.common.PimpedConfig._
final class Env(
c: TypesafeConfig,
uciMemo: lila.game.UciMemo,
db: lila.db.Env,
system: ActorSystem) {
private val settings = new {
val Endpoint = c getString "endpoint"
val CallbackUrl = c getString "callback_url"
val ActorName = c getString "actor.name"
}
import settings._
val ServerOnly = c getBoolean "server-only"
private val config = new Config(
execPath = c getString "exec_path",
hashSize = c getInt "hash_size",
nbThreads = c getInt "threads",
nbInstances = c getInt "instances",
playMaxMoveTime = c duration "play.movetime",
analyseMoveTime = c duration "analyse.movetime",
playTimeout = c duration "play.timeout",
analyseMaxPlies = c getInt "analyse.max_plies",
debug = c getBoolean "debug")
lazy val aiPerfApi = new AiPerfApi
def ratingOf(level: Int) = aiPerfApi.intRatings get level
// api actor
system.actorOf(Props(new Actor {
def receive = {
case lila.hub.actorApi.ai.Analyse(gameId, uciMoves, fen, requestedByHuman, variant) =>
client.analyse(gameId, uciMoves, fen, requestedByHuman, actorApi.Variant(variant))
}
}), name = ActorName)
lazy val client = new Client(
config = config,
endpoint = Endpoint,
callbackUrl = CallbackUrl,
uciMemo = uciMemo)
lazy val server = new Server(
config = config,
queue = system.actorOf(Props(new Queue(config))),
uciMemo = uciMemo)
}
object Env {
lazy val current = "[boot] ai" describes new Env(
c = lila.common.PlayApp loadConfig "ai",
uciMemo = lila.game.Env.current.uciMemo,
db = lila.db.Env.current,
system = lila.common.PlayApp.system)
}
| pavelo65/lila | modules/ai/src/main/Env.scala | Scala | mit | 1,859 |
package com.arcusys.learn.exceptions
case class NotAuthorizedException(message: String = null) extends Exception(message)
| ViLPy/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/exceptions/NotAuthorizedException.scala | Scala | lgpl-3.0 | 123 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.regex
package object ops extends AllOps
| nrinaudo/kantan.regex | core/shared/src/main/scala/kantan/regex/ops/package.scala | Scala | apache-2.0 | 656 |
package scorex.lagonaki.integration.api
import org.scalatest.{FunSuite, Matchers}
import play.api.libs.json.JsValue
import scorex.block.Block
import scorex.crypto.encode.Base58
import scorex.lagonaki.TransactionTestingCommons
class TransactionsAPISpecification extends FunSuite with Matchers with TransactionTestingCommons {
import scorex.lagonaki.TestingCommons._
if (wallet.privateKeyAccounts().size < 10) wallet.generateNewAccounts(10)
val addresses = accounts.map(_.address)
val account = accounts.head
val address = account.address
test("/transactions/unconfirmed API route") {
(1 to 20) foreach (i => genValidTransaction())
val unconfirmed = transactionModule.utxStorage.all()
unconfirmed.size should be > 0
val tr = GET.request("/transactions/unconfirmed")
(tr \\\\ "signature").toList.size shouldBe unconfirmed.size
}
test("/transactions/address/{address} API route") {
addresses.foreach { a =>
checkTransactionList(GET.request(s"/transactions/address/$a"))
}
}
test("/transactions/address/{address}/limit/{limit} API route") {
addresses.foreach { a =>
val tr = GET.request(s"/transactions/address/$a/limit/2")
(tr \\\\ "amount").toList.size should be <= 2
checkTransactionList(tr)
}
}
test("/transactions/info/{signature} API route") {
val genesisTx = Block.genesis().transactions.head
val tr = GET.request(s"/transactions/info/${Base58.encode(genesisTx.signature)}")
(tr \\ "signature").as[String] shouldBe Base58.encode(genesisTx.signature)
(tr \\ "type").as[Int] shouldBe 1
(tr \\ "fee").as[Int] shouldBe 0
(tr \\ "amount").as[Long] should be > 0L
(tr \\ "height").as[Int] shouldBe 1
(tr \\ "recipient").as[String] shouldBe genesisTx.recipient.address
}
def checkTransactionList(tr: JsValue): Unit = {
(tr \\\\ "amount").toList.foreach(amount => amount.as[Long] should be > 0L)
(tr \\\\ "fee").toList.foreach(amount => amount.as[Long] should be >= 0L)
(tr \\\\ "type").toList.foreach(amount => amount.as[Int] should be >= 0)
(tr \\\\ "timestamp").toList.foreach(amount => amount.as[Long] should be >= 0L)
(tr \\\\ "signature").toList.size should be >= 0
(tr \\\\ "sender").toList.size should be >= 0
(tr \\\\ "recipient").toList.size should be >= 0
}
} | ScorexProject/Scorex-Lagonaki | src/test/scala/scorex/lagonaki/integration/api/TransactionsAPISpecification.scala | Scala | cc0-1.0 | 2,301 |
package me.apidoc.swagger
import com.wordnik.swagger.{models => swagger}
import com.wordnik.swagger.models.properties.{ArrayProperty, Property, RefProperty}
private[swagger] case class MyDefinition(name: String, definition: swagger.Model) {
/**
* the list of types that this definition depends on
*/
val dependencies: Seq[String] = modelDependencies(definition)
/**
* Returns a list of all the non primitive types that this model
* depends on. Resolves references and inspects the properties of
* all fields defined on this model.
*/
private def modelDependencies(swaggerModel: swagger.Model): Seq[String] = {
swaggerModel match {
case m: swagger.ComposedModel => {
Util.toArray(m.getAllOf).flatMap { modelDependencies(_) }
}
case m: swagger.RefModel => {
Seq(m.getSimpleRef)
}
case m: swagger.ModelImpl => {
Util.toMap(m.getProperties).values.flatMap { schemaType(_) }.toSeq
}
case _ => {
Nil
}
}
}
/**
* If the type of this property is a primitive, returns
* None. Otherwise returns the name of the type.
*/
private def schemaType(prop: Property): Option[String] = {
prop match {
case p: ArrayProperty => {
schemaType(p.getItems)
}
case p: RefProperty => {
Some(p.getSimpleRef)
}
case _ => {
SchemaType.fromSwagger(prop.getType, Option(prop.getFormat)) match {
case None => Some(prop.getType)
case Some(_) => None // Primitive type - no need to resolve
}
}
}
}
}
private[swagger] case class ModelSelector(
swaggerDefinitions: Map[String, swagger.Model]
) {
private val definitions = swaggerDefinitions.map {
case (name, definition) => MyDefinition(name, definition)
}.toSeq
private var completed = scala.collection.mutable.ListBuffer[String]()
def remaining(): Seq[MyDefinition] = {
definitions.filter( md => !completed.contains(md.name) )
}
def next(): Option[MyDefinition] = {
remaining().find { m =>
m.dependencies.find( depName => !completed.contains(depName) ) match {
case None => true
case Some(_) => false
}
}.map { md =>
completed += md.name
md
}
}
}
| Seanstoppable/apidoc | swagger/src/main/scala/me/apidoc/swagger/ModelSelector.scala | Scala | mit | 2,289 |
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.ElasticDsl._
import com.sksamuel.elastic4s.mappings.FieldType.NestedType
import org.scalatest.{ Matchers, FreeSpec }
class NestedQueryTest extends FreeSpec with Matchers with ElasticSugar {
client.execute {
create index "nested" mappings {
"show" as {
"actor" typed NestedType
}
}
}.await
client.execute(
index into "nested/show" fields (
"name" -> "game of thrones",
"actor" -> Seq(
Map("name" -> "peter dinklage", "birthplace" -> "Morristown"),
Map("name" -> "pedro pascal", "birthplace" -> "Santiago")
)
)
).await
refresh("nested")
blockUntilCount(1, "nested")
"nested object" - {
"should be searchable by nested field" in {
val resp1 = client.execute {
search in "nested/show" query nestedQuery("actor").query(termQuery("actor.name" -> "dinklage"))
}.await
resp1.getHits.totalHits() shouldEqual 1
val resp2 = client.execute {
search in "nested/show" query nestedQuery("actor").query(termQuery("actor.name" -> "simon"))
}.await
resp2.getHits.totalHits() shouldEqual 0
}
}
}
| l15k4/elastic4s | elastic4s-core/src/test/scala/com/sksamuel/elastic4s/NestedQueryTest.scala | Scala | apache-2.0 | 1,191 |
class Test {
def this(un: Int) = {
this()
def test(xs: List[Int]) = xs map (x => x)
()
}
} | felixmulder/scala | test/pending/run/t3832.scala | Scala | bsd-3-clause | 184 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.transform.patmat
import scala.reflect.internal.util.StatisticsStatics
/** Translate typed Trees that represent pattern matches into the patternmatching IR, defined by TreeMakers.
*/
trait MatchTranslation {
self: PatternMatching =>
import global._
import definitions._
import treeInfo.{ Unapplied, unbind }
import CODE._
// Always map repeated params to sequences
private def setVarInfo(sym: Symbol, info: Type) =
sym setInfo debug.patmatResult(s"changing ${sym.defString} to")(repeatedToSeq(info))
trait MatchTranslator extends TreeMakers with TreeMakerWarnings {
import typer.context
def selectorPos: Position
/** A conservative approximation of which patterns do not discern anything.
* They are discarded during the translation.
*/
object WildcardPattern {
def unapply(pat: Tree): Boolean = pat match {
case Bind(nme.WILDCARD, WildcardPattern()) => true // don't skip when binding an interesting symbol!
case Star(WildcardPattern()) => true
case x: Ident => treeInfo.isVarPattern(x)
case Alternative(ps) => ps forall unapply
case EmptyTree => true
case _ => false
}
}
object PatternBoundToUnderscore {
def unapply(pat: Tree): Boolean = pat match {
case Bind(nme.WILDCARD, _) => true // don't skip when binding an interesting symbol!
case Ident(nme.WILDCARD) => true
case Alternative(ps) => ps forall unapply
case Typed(PatternBoundToUnderscore(), _) => true
case _ => false
}
}
object SymbolBound {
def unapply(tree: Tree): Option[(Symbol, Tree)] = tree match {
case Bind(_, expr) if tree.hasExistingSymbol => Some(tree.symbol -> expr)
case _ => None
}
}
final case class BoundTree(binder: Symbol, tree: Tree) {
private lazy val extractor = ExtractorCall(tree)
def pos = tree.pos
def tpe = binder.info.dealiasWiden // the type of the variable bound to the pattern
def pt = unbound match {
case Star(tpt) => this glbWith seqType(tpt.tpe)
case TypeBound(tpe) => tpe
case tree => tree.tpe
}
def glbWith(other: Type) = glb(tpe :: other :: Nil).normalize
object SymbolAndTypeBound {
def unapply(tree: Tree): Option[(Symbol, Type)] = tree match {
case SymbolBound(sym, TypeBound(tpe)) => Some(sym -> tpe)
case TypeBound(tpe) => Some(binder -> tpe)
case _ => None
}
}
object TypeBound {
def unapply(tree: Tree): Option[Type] = tree match {
case Typed(Ident(_), _) if tree.tpe != null => Some(tree.tpe)
case _ => None
}
}
private def rebindTo(pattern: Tree) = BoundTree(binder, pattern)
private def step(treeMakers: TreeMaker*)(subpatterns: BoundTree*): TranslationStep = TranslationStep(treeMakers.toList, subpatterns.toList)
private def bindingStep(sub: Symbol, subpattern: Tree) = step(SubstOnlyTreeMaker(sub, binder))(rebindTo(subpattern))
private def equalityTestStep() = step(EqualityTestTreeMaker(binder, tree, pos))()
private def typeTestStep(sub: Symbol, subPt: Type) = step(TypeTestTreeMaker(sub, binder, subPt, glbWith(subPt))(pos))()
private def alternativesStep(alts: List[Tree]) = step(AlternativesTreeMaker(binder, translatedAlts(alts), alts.head.pos))()
private def translatedAlts(alts: List[Tree]) = alts map (alt => rebindTo(alt).translate())
private def noStep() = step()()
private def unsupportedPatternMsg = sm"""
|unsupported pattern: ${tree.shortClass} / $this (this is a scalac bug.)
|""".trim
// example check: List[Int] <:< ::[Int]
private def extractorStep(): TranslationStep = {
import extractor.treeMakers
// paramType = the type expected by the unapply
// TODO: paramType may contain unbound type params (run/t2800, run/t3530)
val (makers, unappBinder) = {
val paramType = extractor.expectedExtractedType
// Statically conforms to paramType
if (tpe <:< paramType) {
// enforce all extractor patterns to be non-null
val nonNullTest = NonNullTestTreeMaker(binder, paramType, pos)
val unappBinder = nonNullTest.nextBinder
(nonNullTest :: treeMakers(unappBinder, pos), unappBinder)
}
else {
// chain a type-testing extractor before the actual extractor call
// it tests the type, checks the outer pointer and casts to the expected type
// TODO: the outer check is mandated by the spec for case classes, but we do it for user-defined unapplies as well [SPEC]
// (the prefix of the argument passed to the unapply must equal the prefix of the type of the binder)
val typeTest = TypeTestTreeMaker(binder, binder, paramType, paramType)(pos, extractorArgTypeTest = true)
val binderKnownNonNull = typeTest impliesBinderNonNull binder
// skip null test if it's implied
if (binderKnownNonNull) {
val unappBinder = typeTest.nextBinder
(typeTest :: treeMakers(unappBinder, pos), unappBinder)
} else {
val nonNullTest = NonNullTestTreeMaker(typeTest.nextBinder, paramType, pos)
val unappBinder = nonNullTest.nextBinder
(typeTest :: nonNullTest :: treeMakers(unappBinder, pos), unappBinder)
}
}
}
foreach2(extractor.subBoundTrees, extractor.subPatTypes(unappBinder)) { (bt, pt) =>
setVarInfo(bt.binder, pt)
}
step(makers: _*)(extractor.subBoundTrees: _*)
}
// Summary of translation cases. I moved the excerpts from the specification further below so all
// the logic can be seen at once.
//
// [1] skip wildcard trees -- no point in checking them
// [2] extractor and constructor patterns
// [3] replace subpatBinder by patBinder, as if the Bind was not there.
// It must be patBinder, as subpatBinder has the wrong info: even if the bind assumes a better type,
// this is not guaranteed until we cast
// [4] typed patterns - a typed pattern never has any subtrees
// must treat Typed and Bind together -- we need to know the patBinder of the Bind pattern to get at the actual type
// [5] literal and stable id patterns
// [6] pattern alternatives
// [7] symbol-less bind patterns - this happens in certain ill-formed programs, there'll be an error later
// don't fail here though (or should we?)
def nextStep(): TranslationStep = tree match {
case WildcardPattern() => noStep()
case _: UnApply | _: Apply => extractorStep()
case SymbolAndTypeBound(sym, tpe) => typeTestStep(sym, tpe)
case TypeBound(tpe) => typeTestStep(binder, tpe)
case SymbolBound(sym, expr) => bindingStep(sym, expr)
case Literal(Constant(_)) | Ident(_) | Select(_, _) | This(_) => equalityTestStep()
case Alternative(alts) => alternativesStep(alts)
case _ => reporter.error(pos, unsupportedPatternMsg) ; noStep()
}
def translate(): List[TreeMaker] = nextStep() merge (_.translate())
private def concreteType = tpe.upperBound
private def unbound = unbind(tree)
private def tpe_s = if (pt <:< concreteType) "" + pt else s"$pt (binder: $tpe)"
private def at_s = unbound match {
case WildcardPattern() => ""
case pat => s" @ $pat"
}
override def toString = s"${binder.name}: $tpe_s$at_s"
}
// a list of TreeMakers that encode `patTree`, and a list of arguments for recursive invocations of `translatePattern` to encode its subpatterns
final case class TranslationStep(makers: List[TreeMaker], subpatterns: List[BoundTree]) {
def merge(f: BoundTree => List[TreeMaker]): List[TreeMaker] = makers ::: (subpatterns flatMap f)
override def toString = if (subpatterns.isEmpty) "" else subpatterns.mkString("(", ", ", ")")
}
/** Implement a pattern match by turning its cases (including the implicit failure case)
* into the corresponding (monadic) extractors, and combining them with the `orElse` combinator.
*
* For `scrutinee match { case1 ... caseN }`, the resulting tree has the shape
* `runOrElse(scrutinee)(x => translateCase1(x).orElse(translateCase2(x)).....orElse(zero))`
*
* NOTE: the resulting tree is not type checked, nor are nested pattern matches transformed
* thus, you must typecheck the result (and that will in turn translate nested matches)
* this could probably optimized... (but note that the matchStrategy must be solved for each nested patternmatch)
*/
def translateMatch(match_ : Match): Tree = {
val Match(selector, cases) = match_
val (nonSyntheticCases, defaultOverride) = cases match {
case init :+ last if treeInfo isSyntheticDefaultCase last => (init, Some(((scrut: Tree) => last.body)))
case _ => (cases, None)
}
if (!settings.XnoPatmatAnalysis) checkMatchVariablePatterns(nonSyntheticCases)
// we don't transform after uncurry
// (that would require more sophistication when generating trees,
// and the only place that emits Matches after typers is for exception handling anyway)
if (phase.id >= currentRun.uncurryPhase.id)
devWarning(s"running translateMatch past uncurry (at $phase) on $selector match $cases")
debug.patmat("translating "+ cases.mkString("{", "\\n", "}"))
val start = if (StatisticsStatics.areSomeColdStatsEnabled) statistics.startTimer(statistics.patmatNanos) else null
val selectorTp = repeatedToSeq(elimAnonymousClass(selector.tpe.widen.withoutAnnotations))
// when one of the internal cps-type-state annotations is present, strip all CPS annotations
val origPt = removeCPSFromPt(match_.tpe)
// relevant test cases: pos/existentials-harmful.scala, pos/gadt-gilles.scala, pos/t2683.scala, pos/virtpatmat_exist4.scala
// pt is the skolemized version
val pt = repeatedToSeq(origPt)
// val packedPt = repeatedToSeq(typer.packedType(match_, context.owner))
val selectorSym = freshSym(selector.pos, pureType(selectorTp)) setFlag treeInfo.SYNTH_CASE_FLAGS
// pt = Any* occurs when compiling test/files/pos/annotDepMethType.scala
val combined = combineCases(selector, selectorSym, nonSyntheticCases map translateCase(selectorSym, pt), pt, selectorPos, matchOwner, defaultOverride)
if (StatisticsStatics.areSomeColdStatsEnabled) statistics.stopTimer(statistics.patmatNanos, start)
combined
}
// return list of typed CaseDefs that are supported by the backend (typed/bind/wildcard)
// we don't have a global scrutinee -- the caught exception must be bound in each of the casedefs
// there's no need to check the scrutinee for null -- "throw null" becomes "throw new NullPointerException"
// try to simplify to a type-based switch, or fall back to a catch-all case that runs a normal pattern match
// unlike translateMatch, we type our result before returning it
def translateTry(caseDefs: List[CaseDef], pt: Type, pos: Position): List[CaseDef] =
// if they're already simple enough to be handled by the back-end, we're done
if (caseDefs forall treeInfo.isCatchCase) {
// well, we do need to look for unreachable cases
if (!settings.XnoPatmatAnalysis) unreachableTypeSwitchCase(caseDefs).foreach(cd => reportUnreachable(cd.body.pos))
caseDefs
} else {
val swatches = { // switch-catches
// scala/bug#7459 must duplicate here as we haven't committed to switch emission, and just figuring out
// if we can ends up mutating `caseDefs` down in the use of `substituteSymbols` in
// `TypedSubstitution#Substitution`. That is called indirectly by `emitTypeSwitch`.
val bindersAndCases = caseDefs.map(_.duplicate) map { caseDef =>
// generate a fresh symbol for each case, hoping we'll end up emitting a type-switch (we don't have a global scrut there)
// if we fail to emit a fine-grained switch, have to do translateCase again with a single scrutSym (TODO: uniformize substitution on treemakers so we can avoid this)
val caseScrutSym = freshSym(caseDef.pat.pos, pureType(ThrowableTpe))
(caseScrutSym, propagateSubstitution(translateCase(caseScrutSym, pt)(caseDef), EmptySubstitution))
}
for(cases <- emitTypeSwitch(bindersAndCases, pt).toList
if cases forall treeInfo.isCatchCase; // must check again, since it's not guaranteed -- TODO: can we eliminate this? e.g., a type test could test for a trait or a non-trivial prefix, which are not handled by the back-end
cse <- cases) yield fixerUpper(matchOwner, pos)(cse).asInstanceOf[CaseDef]
}
val catches = if (swatches.nonEmpty) swatches else {
val scrutSym = freshSym(caseDefs.head.pat.pos, pureType(ThrowableTpe))
val casesNoSubstOnly = caseDefs map { caseDef => (propagateSubstitution(translateCase(scrutSym, pt)(caseDef), EmptySubstitution))}
val exSym = freshSym(pos, pureType(ThrowableTpe), "ex")
List(
atPos(pos) {
CaseDef(
Bind(exSym, Ident(nme.WILDCARD)), // TODO: does this need fixing upping?
EmptyTree,
combineCasesNoSubstOnly(REF(exSym), scrutSym, casesNoSubstOnly, pt, selectorPos, matchOwner, Some(scrut => Throw(REF(exSym))))
)
})
}
typer.typedCases(catches, ThrowableTpe, WildcardType)
}
/** The translation of `pat if guard => body` has two aspects:
* 1) the substitution due to the variables bound by patterns
* 2) the combination of the extractor calls using `flatMap`.
*
* 2) is easy -- it looks like: `translatePattern_1.flatMap(translatePattern_2....flatMap(translatePattern_N.flatMap(translateGuard.flatMap((x_i) => success(Xbody(x_i)))))...)`
* this must be right-leaning tree, as can be seen intuitively by considering the scope of bound variables:
* variables bound by pat_1 must be visible from the function inside the left-most flatMap right up to Xbody all the way on the right
* 1) is tricky because translatePattern_i determines the shape of translatePattern_i+1:
* zoom in on `translatePattern_1.flatMap(translatePattern_2)` for example -- it actually looks more like:
* `translatePattern_1(x_scrut).flatMap((x_1) => {y_i -> x_1._i}translatePattern_2)`
*
* `x_1` references the result (inside the monad) of the extractor corresponding to `pat_1`,
* this result holds the values for the constructor arguments, which translatePattern_1 has extracted
* from the object pointed to by `x_scrut`. The `y_i` are the symbols bound by `pat_1` (in order)
* in the scope of the remainder of the pattern, and they must thus be replaced by:
* - (for 1-ary unapply) x_1
* - (for n-ary unapply, n > 1) selection of the i'th tuple component of `x_1`
* - (for unapplySeq) x_1.apply(i)
*
* in the treemakers,
*
* Thus, the result type of `translatePattern_i`'s extractor must conform to `M[(T_1,..., T_n)]`.
*
* Operationally, phase 1) is a foldLeft, since we must consider the depth-first-flattening of
* the transformed patterns from left to right. For every pattern ast node, it produces a transformed ast and
* a function that will take care of binding and substitution of the next ast (to the right).
*
*/
def translateCase(scrutSym: Symbol, pt: Type)(caseDef: CaseDef) = {
val CaseDef(pattern, guard, body) = caseDef
translatePattern(BoundTree(scrutSym, pattern)) ++ translateGuard(guard) :+ translateBody(body, pt)
}
def translatePattern(bound: BoundTree): List[TreeMaker] = bound.translate()
def translateGuard(guard: Tree): List[TreeMaker] =
if (guard == EmptyTree) Nil
else List(GuardTreeMaker(guard))
// TODO: 1) if we want to support a generalisation of Kotlin's patmat continue, must not hard-wire lifting into the monad (which is now done by codegen.one),
// so that user can generate failure when needed -- use implicit conversion to lift into monad on-demand?
// to enable this, probably need to move away from Option to a monad specific to pattern-match,
// so that we can return Option's from a match without ambiguity whether this indicates failure in the monad, or just some result in the monad
// 2) body.tpe is the type of the body after applying the substitution that represents the solution of GADT type inference
// need the explicit cast in case our substitutions in the body change the type to something that doesn't take GADT typing into account
def translateBody(body: Tree, matchPt: Type): TreeMaker =
BodyTreeMaker(body, matchPt)
// Some notes from the specification
/*A constructor pattern is of the form c(p1, ..., pn) where n ≥ 0.
It consists of a stable identifier c, followed by element patterns p1, ..., pn.
The constructor c is a simple or qualified name which denotes a case class (§5.3.2).
If the case class is monomorphic, then it must conform to the expected type of the pattern,
and the formal parameter types of x’s primary constructor (§5.3) are taken as the expected
types of the element patterns p1, ..., pn.
If the case class is polymorphic, then its type parameters are instantiated so that the
instantiation of c conforms to the expected type of the pattern.
The instantiated formal parameter types of c’s primary constructor are then taken as the
expected types of the component patterns p1, ..., pn.
The pattern matches all objects created from constructor invocations c(v1, ..., vn)
where each element pattern pi matches the corresponding value vi .
A special case arises when c’s formal parameter types end in a repeated parameter.
This is further discussed in (§8.1.9).
**/
/* A typed pattern x : T consists of a pattern variable x and a type pattern T.
The type of x is the type pattern T, where each type variable and wildcard is replaced by a fresh, unknown type.
This pattern matches any value matched by the type pattern T (§8.2); it binds the variable name to that value.
*/
/* A pattern binder x@p consists of a pattern variable x and a pattern p.
The type of the variable x is the static type T of the pattern p.
This pattern matches any value v matched by the pattern p,
provided the run-time type of v is also an instance of T, <-- TODO! https://github.com/scala/bug/issues/1503
and it binds the variable name to that value.
*/
/* 8.1.4 Literal Patterns
A literal pattern L matches any value that is equal (in terms of ==) to the literal L.
The type of L must conform to the expected type of the pattern.
8.1.5 Stable Identifier Patterns (a stable identifier r (see §3.1))
The pattern matches any value v such that r == v (§12.1).
The type of r must conform to the expected type of the pattern.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// helper methods: they analyze types and trees in isolation, but they are not (directly) concerned with the structure of the overall translation
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
object ExtractorCall {
// TODO: check unargs == args
def apply(tree: Tree): ExtractorCall = tree match {
case UnApply(unfun@Unapplied(fun), args) => new ExtractorCallRegular(fun, args)(unfun) // extractor
case Apply(fun, args) => new ExtractorCallProd(fun, args) // case class
}
}
abstract class ExtractorCall(fun: Tree, args: List[Tree]) extends ExtractorAlignment(fun, args)(context) {
/** Create the TreeMaker that embodies this extractor call
*/
def treeMakers(binder: Symbol, pos: Position): List[TreeMaker]
// `subPatBinders` are the variables bound by this pattern in the following patterns
// subPatBinders are replaced by references to the relevant part of the extractor's result (tuple component, seq element, the result as-is)
// must set infos to `subPatTypes`, which are provided by extractor's result,
// as b.info may be based on a Typed type ascription, which has not been taken into account yet by the translation
// (it will later result in a type test when `tp` is not a subtype of `b.info`)
// TODO: can we simplify this, together with the Bound case?
def subPatBinders = subBoundTrees map (_.binder)
lazy val subBoundTrees: List[BoundTree] = args map {
case SymbolBound(sym, expr) => BoundTree(sym, expr)
case tree => BoundTree(freshSym(tree.pos, prefix = "p"), tree)
}
// never store these in local variables (for PreserveSubPatBinders)
lazy val ignoredSubPatBinders: Set[Symbol] = (subPatBinders zip args).collect { case (b, PatternBoundToUnderscore()) => b }.toSet
// there are `productArity` non-seq elements in the tuple.
protected def firstIndexingBinder = productArity
protected def expectedLength = elementArity
protected def lastIndexingBinder = nonStarArity - 1
private def productElemsToN(binder: Symbol, n: Int): List[Tree] = if (n == 0) Nil else List.tabulate(n)(i => tupleSel(binder)(i + 1))
private def genTake(binder: Symbol, n: Int): List[Tree] = if (n == 0) Nil else List.tabulate(n)(codegen index seqTree(binder, forceImmutable = false))
private def genDrop(binder: Symbol, n: Int): List[Tree] = codegen.drop(seqTree(binder, forceImmutable = false))(n) :: Nil
// codegen.drop(seqTree(binder))(nbIndexingIndices)))).toList
protected def seqTree(binder: Symbol, forceImmutable: Boolean) = tupleSel(binder)(firstIndexingBinder + 1)
protected def tupleSel(binder: Symbol)(i: Int): Tree = codegen.tupleSel(binder)(i)
// the trees that select the subpatterns on the extractor's result,
// referenced by `binder`
protected def subPatRefsSeq(binder: Symbol): List[Tree] = {
def lastTrees: List[Tree] = {
if (!isStar) Nil
else if (expectedLength == 0) seqTree(binder, forceImmutable = true) :: Nil
else genDrop(binder, expectedLength)
}
// this error-condition has already been checked by checkStarPatOK:
// if(isSeq) assert(firstIndexingBinder + nbIndexingIndices + (if(lastIsStar) 1 else 0) == totalArity, "(resultInMonad, ts, subPatTypes, subPats)= "+(resultInMonad, ts, subPatTypes, subPats))
// [1] there are `firstIndexingBinder` non-seq tuple elements preceding the Seq
// [2] then we have to index the binder that represents the sequence for the remaining subpatterns, except for...
// [3] the last one -- if the last subpattern is a sequence wildcard:
// drop the prefix (indexed by the refs on the preceding line), return the remainder
( productElemsToN(binder, firstIndexingBinder)
++ genTake(binder, expectedLength)
++ lastTrees
).toList
}
// the trees that select the subpatterns on the extractor's result, referenced by `binder`
// require (nbSubPats > 0 && (!lastIsStar || isSeq))
protected def subPatRefs(binder: Symbol): List[Tree] = {
if (totalArity > 0 && isSeq) subPatRefsSeq(binder)
else productElemsToN(binder, totalArity)
}
private def compareInts(t1: Tree, t2: Tree) =
gen.mkMethodCall(termMember(ScalaPackage, "math"), TermName("signum"), Nil, (t1 INT_- t2) :: Nil)
protected def lengthGuard(binder: Symbol): Option[Tree] =
// no need to check unless it's an unapplySeq and the minimal length is non-trivially satisfied
checkedLength map { expectedLength =>
// `binder.lengthCompare(expectedLength)`
// ...if binder has a lengthCompare method, otherwise
// `scala.math.signum(binder.length - expectedLength)`
def checkExpectedLength = {
val tree = seqTree(binder, forceImmutable = false)
val typedTree = typer.typed(tree)
val lengthCompareSym = typedTree.tpe.member(nme.lengthCompare)
if (lengthCompareSym == NoSymbol) compareInts(Select(typedTree, nme.length), LIT(expectedLength))
else (typedTree DOT lengthCompareSym)(LIT(expectedLength))
}
// the comparison to perform
// when the last subpattern is a wildcard-star the expectedLength is but a lower bound
// (otherwise equality is required)
def compareOp: (Tree, Tree) => Tree =
if (isStar) _ INT_>= _
else _ INT_== _
// `if (binder != null && $checkExpectedLength [== | >=] 0) then else zero`
(seqTree(binder, forceImmutable = false) ANY_!= NULL) AND compareOp(checkExpectedLength, ZERO)
}
def checkedLength: Option[Int] =
// no need to check unless it's an unapplySeq and the minimal length is non-trivially satisfied
if (!isSeq || expectedLength < starArity) None
else Some(expectedLength)
}
// TODO: to be called when there's a def unapplyProd(x: T): U
// U must have N members _1,..., _N -- the _i are type checked, call their type Ti,
// for now only used for case classes -- pretending there's an unapplyProd that's the identity (and don't call it)
class ExtractorCallProd(fun: Tree, args: List[Tree]) extends ExtractorCall(fun, args) {
/** Create the TreeMaker that embodies this extractor call
*
* `binder` has been casted to `paramType` if necessary
*/
def treeMakers(binder: Symbol, pos: Position): List[TreeMaker] = {
val paramAccessors = expectedExtractedType.typeSymbol.constrParamAccessors
val numParams = paramAccessors.length
def paramAccessorAt(subPatIndex: Int) = paramAccessors(math.min(subPatIndex, numParams - 1))
// binders corresponding to mutable fields should be stored (scala/bug#5158, scala/bug#6070)
// make an exception for classes under the scala package as they should be well-behaved,
// to optimize matching on List
val mutableBinders = (
if (!binder.info.typeSymbol.hasTransOwner(ScalaPackageClass) &&
(paramAccessors exists (x => x.isMutable || definitions.isRepeated(x)))) {
subPatBinders.zipWithIndex.flatMap {
case (binder, idx) =>
val param = paramAccessorAt(idx)
if (param.isMutable || (definitions.isRepeated(param) && !isStar)) binder :: Nil
else Nil
}
} else Nil
)
// checks binder ne null before chaining to the next extractor
ProductExtractorTreeMaker(binder, lengthGuard(binder))(subPatBinders, subPatRefs(binder), mutableBinders, ignoredSubPatBinders) :: Nil
}
// reference the (i-1)th case accessor if it exists, otherwise the (i-1)th tuple component
override protected def tupleSel(binder: Symbol)(i: Int): Tree = {
val accessors = expectedExtractedType.typeSymbol.caseFieldAccessors
if (accessors isDefinedAt (i-1)) gen.mkAttributedStableRef(binder) DOT accessors(i-1)
else codegen.tupleSel(binder)(i) // this won't type check for case classes, as they do not inherit ProductN
}
}
/**
*
* @param fun reference to the unapply method
* @param args the subpatterns
* @param unapplyAppliedToDummy an application of the unapply method to the (dummy) unapply selector
*/
class ExtractorCallRegular(fun: Tree, args: List[Tree])(unapplyAppliedToDummy: Tree) extends ExtractorCall(fun, args) {
override lazy val unapplySelector =
unapplyAppliedToDummy match {
case Apply(_, (dummy@Ident(nme.SELECTOR_DUMMY)) :: Nil) => dummy.symbol
case _ => NoSymbol // if the unapply is applied to <unapply-selector>.toXXXX, we can't use the selector dummy's symbol
}
/** Create the TreeMaker that embodies this extractor call
*
* `binder` has been casted to `paramType` if necessary
*
* TODO: implement review feedback by @retronym:
* Passing the pair of values around suggests:
* case class Binder(sym: Symbol, knownNotNull: Boolean).
* Perhaps it hasn't reached critical mass, but it would already clean things up a touch.
*/
def treeMakers(patBinderOrCasted: Symbol, pos: Position): List[TreeMaker] = {
// the extractor call (applied to the binder bound by the flatMap corresponding
// to the previous (i.e., enclosing/outer) pattern)
val (extractorApply, needsSubst) = spliceApply(pos, patBinderOrCasted)
// can't simplify this when subPatBinders.isEmpty, since UnitTpe is definitely
// wrong when isSeq, and resultInMonad should always be correct since it comes
// directly from the extractor's result type
val binder = freshSym(pos, pureType(resultInMonad(patBinderOrCasted)))
val potentiallyMutableBinders: Set[Symbol] =
if (extractorApply.tpe.typeSymbol.isNonBottomSubClass(OptionClass) && !isSeq)
Set.empty
else
// Ensures we capture unstable bound variables eagerly. These can arise under name based patmat or by indexing into mutable Seqs. See run t9003.scala
subPatBinders.toSet
// types may refer to the dummy symbol unapplySelector (in case of dependent method type for the unapply method)
val extractorTreeMaker = ExtractorTreeMaker(extractorApply, lengthGuard(binder), binder)(
subPatBinders,
subPatRefs(binder),
potentiallyMutableBinders,
isBool,
checkedLength,
patBinderOrCasted,
ignoredSubPatBinders
)
if (needsSubst)
SubstOnlyTreeMaker(unapplySelector, patBinderOrCasted) :: extractorTreeMaker :: Nil
else
extractorTreeMaker :: Nil
}
override protected def seqTree(binder: Symbol, forceImmutable: Boolean): Tree =
if (firstIndexingBinder == 0) {
val ref = REF(binder)
if (forceImmutable && !binder.tpe.typeSymbol.isNonBottomSubClass(SeqClass)) Select(ref, nme.toSeq)
else ref
}
else super.seqTree(binder, forceImmutable)
// the trees that select the subpatterns on the extractor's result, referenced by `binder`
// require (totalArity > 0 && (!lastIsStar || isSeq))
override protected def subPatRefs(binder: Symbol): List[Tree] =
if (isSingle) REF(binder) :: Nil // special case for extractors
else super.subPatRefs(binder)
protected def spliceApply(pos: Position, binder: Symbol): (Tree, Boolean) = {
var needsSubst = false
object splice extends Transformer {
def binderRef(pos: Position): Tree =
REF(binder) setPos pos
override def transform(t: Tree) = t match {
// duplicated with the extractor Unapplied
case Apply(x, List(i @ Ident(nme.SELECTOR_DUMMY))) =>
// in case the result type depended on the unapply's argument, plug in the new symbol
val apply = treeCopy.Apply(t, x, binderRef(i.pos) :: Nil)
val tpe = apply.tpe
val substedTpe = tpe.substSym(List(i.symbol), List(binder))
if (tpe ne substedTpe) {
needsSubst = true
apply.setType(substedTpe)
}
apply
// scala/bug#7868 Account for numeric widening, e.g. <unapplySelector>.toInt
case Apply(x, List(i @ (sel @ Select(Ident(nme.SELECTOR_DUMMY), name)))) =>
// not substituting `binder` for `i.symbol`: widening conversion implies the binder could not be used as a path
treeCopy.Apply(t, x, treeCopy.Select(sel, binderRef(i.pos), name) :: Nil)
case _ =>
super.transform(t)
}
}
(atPos(pos)(splice transform unapplyAppliedToDummy), needsSubst)
}
}
}
}
| martijnhoekstra/scala | src/compiler/scala/tools/nsc/transform/patmat/MatchTranslation.scala | Scala | apache-2.0 | 34,030 |
package aima.core.environment.map2d
/**
* @author Shawn Garner
*/
final class LabeledGraph[Vertex, Edge] {
import scala.collection.mutable
val globalEdgeLookup = new mutable.LinkedHashMap[Vertex, mutable.LinkedHashMap[Vertex, Edge]]() // TODO: get rid of mutability; ListMap should work
val vertexLabelsList = new mutable.ArrayBuffer[Vertex]() // TODO: get rid of mutability
def addVertex(v: Vertex): Unit = {
checkForNewVertex(v)
()
}
def set(from: Vertex, to: Vertex, edge: Edge): Unit = {
val localEdgeLookup = checkForNewVertex(from)
localEdgeLookup.put(to, edge)
checkForNewVertex(to)
()
}
def remove(from: Vertex, to: Vertex): Unit = {
val localEdgeLookup = globalEdgeLookup.get(from)
localEdgeLookup.foreach(l => l.remove(to))
}
def get(from: Vertex, to: Vertex): Option[Edge] = {
val localEdgeLookup = globalEdgeLookup.get(from)
localEdgeLookup.flatMap(_.get(to))
}
def successors(v: Vertex): List[Vertex] = {
val localEdgeLookup = globalEdgeLookup.get(v)
localEdgeLookup.toList.flatMap(_.keySet.toList)
}
def vertexLabels =
vertexLabelsList.toList
def isVertexLabel(v: Vertex): Boolean =
globalEdgeLookup.get(v).isDefined
def clear(): Unit = {
vertexLabelsList.clear()
globalEdgeLookup.clear()
}
private def checkForNewVertex(v: Vertex): mutable.LinkedHashMap[Vertex, Edge] = {
val maybeExisting = globalEdgeLookup.get(v)
maybeExisting match {
case None =>
val m = new mutable.LinkedHashMap[Vertex, Edge]
globalEdgeLookup.put(v, m)
vertexLabelsList.append(v)
m
case Some(existing) =>
existing
}
}
}
| aimacode/aima-scala | core/src/main/scala/aima/core/environment/map2d/LabeledGraph.scala | Scala | mit | 1,727 |
package de.twentyone.sbt
import java.nio.file.{CopyOption, Files, Path}
object BetterFiles {
def move(logger: sbt.Logger)(source: Path, target: Path, copyOptions: CopyOption*): Unit = {
if (Files.readAllBytes(source).sameElements(Files.readAllBytes(target)))
logger.info("\\t> Nothing to do")
else
Files.move(source, target, copyOptions: _*)
}
}
| 21re/sbt-play-routes-formatter | src/main/scala/de/twentyone/sbt/BetterFiles.scala | Scala | mit | 372 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization
import scala.collection.JavaConverters._
import scala.util.Random
import org.scalatest.Matchers
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression._
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.mllib.util.{MLUtils, MLlibTestSparkContext}
object ParallelizedSGDSuite {
def generateLogisticInputAsList(
offset: Double,
scale: Double,
nPoints: Int,
seed: Int): java.util.List[LabeledPoint] = {
generateGDInput(offset, scale, nPoints, seed).asJava
}
// Generate input of the form Y = logistic(offset + scale * X)
def generateGDInput(
offset: Double,
scale: Double,
nPoints: Int,
seed: Int): Seq[LabeledPoint] = {
val rnd = new Random(seed)
val x1 = Array.fill[Double](nPoints)(rnd.nextGaussian())
val unifRand = new Random(45)
val rLogis = (0 until nPoints).map { i =>
val u = unifRand.nextDouble()
math.log(u) - math.log(1.0-u)
}
val y: Seq[Int] = (0 until nPoints).map { i =>
val yVal = offset + scale * x1(i) + rLogis(i)
if (yVal > 0) 1 else 0
}
(0 until nPoints).map(i => LabeledPoint(y(i), Vectors.dense(x1(i))))
}
}
class ParallelizedSGDSuite extends SparkFunSuite with MLlibTestSparkContext with Matchers {
test("Assert the loss is decreasing.") {
val nPoints = 10000
val A = 2.0
val B = -1.5
val initialB = -1.0
val initialWeights = Array(initialB)
val gradient = new LogisticGradient()
val updater = new SimpleSGDUpdater()
val stepSize = 1.0
val numIterations = 10
val regParam = 0
val miniBatchFrac = 1.0
// Add a extra variable consisting of all 1.0's for the intercept.
val testData = ParallelizedSGDSuite.generateGDInput(A, B, nPoints, 42)
val data = testData.map { case LabeledPoint(label, features) =>
label -> MLUtils.appendBias(features)
}
val dataRDD = sc.parallelize(data, 2).cache()
val initialWeightsWithIntercept = Vectors.dense(initialWeights.toArray :+ 1.0)
val (_, loss) = ParallelizedSGD.runParallelizedSGD(
dataRDD,
gradient,
updater,
stepSize,
numIterations,
regParam,
miniBatchFrac,
initialWeightsWithIntercept)
assert(loss.last - loss.head < 0, "loss isn't decreasing.")
// val lossDiff = loss.init.zip(loss.tail).map { case (lhs, rhs) => lhs - rhs }
// assert(lossDiff.count(_ > 0).toDouble / lossDiff.size > 0.8)
}
test("Test the loss and gradient of first iteration with regularization.") {
val gradient = new LogisticGradient()
val updater = new SquaredL2SGDUpdater()
// Add a extra variable consisting of all 1.0's for the intercept.
val numPartitions = 2
val testData = ParallelizedSGDSuite.generateGDInput(2.0, -1.5, numPartitions, 42)
val data = testData.map { case LabeledPoint(label, features) =>
label -> Vectors.dense(1.0 +: features.toArray)
}
val dataRDD = sc.parallelize(data, numPartitions).cache()
// Prepare non-zero weights
val initialWeightsWithIntercept = Vectors.dense(1.0, 0.5)
val regParam0 = 0
val (newWeights0, loss0) = ParallelizedSGD.runParallelizedSGD(
dataRDD, gradient, updater, 1, 1, regParam0, 1.0, initialWeightsWithIntercept)
val regParam1 = 1
val (newWeights1, loss1) = ParallelizedSGD.runParallelizedSGD(
dataRDD, gradient, updater, 1, 1, regParam1, 1.0, initialWeightsWithIntercept)
assert(
loss1(0) ~= (loss0(0) + (math.pow(initialWeightsWithIntercept(0), 2) +
math.pow(initialWeightsWithIntercept(1), 2)) / 2) absTol 1E-5,
"""For non-zero weights, the regVal should be \\frac{1}{2}\\sum_i w_i^2.""")
assert(
(newWeights1(0) ~= (newWeights0(0) - initialWeightsWithIntercept(0)) absTol 1E-5) &&
(newWeights1(1) ~= (newWeights0(1) - initialWeightsWithIntercept(1)) absTol 1E-5),
"The different between newWeights with/without regularization " +
"should be initialWeightsWithIntercept.")
}
test("iteration should end with convergence tolerance") {
val nPoints = 10000
val A = 2.0
val B = -1.5
val initialB = -1.0
val initialWeights = Array(initialB)
val gradient = new LogisticGradient()
val updater = new SimpleSGDUpdater()
val stepSize = 1.0
val numIterations = 10
val regParam = 0
val miniBatchFrac = 1.0
val convergenceTolerance = 5.0e-1
// Add a extra variable consisting of all 1.0's for the intercept.
val testData = ParallelizedSGDSuite.generateGDInput(A, B, nPoints, 42)
val data = testData.map { case LabeledPoint(label, features) =>
label -> MLUtils.appendBias(features)
}
val dataRDD = sc.parallelize(data, 2).cache()
val initialWeightsWithIntercept = Vectors.dense(initialWeights.toArray :+ 1.0)
val (_, loss) = ParallelizedSGD.runParallelizedSGD(
dataRDD,
gradient,
updater,
stepSize,
numIterations,
regParam,
miniBatchFrac,
initialWeightsWithIntercept,
convergenceTolerance)
assert(loss.length < numIterations, "convergenceTolerance failed to stop optimization early")
}
}
| yu-iskw/spark-parallelized-sgd | src/test/scala/org/apache/spark/mllib/optimization/ParallelizedSGDSuite.scala | Scala | apache-2.0 | 6,065 |
package im.actor.server.cli
import scala.concurrent.Future
private[cli] trait BotHandlers {
this: CliHandlers ⇒
def createBot(rq: CreateBot): Future[Unit] = {
for (resp ← request(BotService, rq))
yield println(s"Bot user created, token: ${resp.token}")
}
def getBotToken(rq: GetBotToken): Future[Unit] = {
for (resp ← request(BotService, rq))
yield println(s"Bot token: ${resp.token}")
}
}
| dfsilva/actor-platform | actor-server/actor-cli/src/main/scala/im/actor/server/cli/BotHandlers.scala | Scala | agpl-3.0 | 430 |
package io.youi.util
import io.youi.dom
import org.scalajs.dom.{document, html}
import reactify.Var
object Measurer {
private lazy val container = {
val span = dom.create[html.Span]("span")
span.style.position = "absolute"
span.style.visibility = "hidden"
span.style.width = "auto"
span.style.height = "auto"
document.body.appendChild(span)
span
}
def measureHTML(htmlString: String, width: String, height: String, w: Var[Double], h: Var[Double]): Unit = {
container.innerHTML = htmlString
val e = container.firstElementChild.asInstanceOf[html.Element]
e.style.width = width
if (width != "auto") e.style.display = "block"
e.style.height = height
e.style.position = "static"
val bounding = e.getBoundingClientRect()
container.innerHTML = ""
w @= bounding.width
h @= bounding.height
}
}
| outr/youi | gui/src/main/scala/io/youi/util/Measurer.scala | Scala | mit | 865 |
package ml.wolfe.nlp
/**
* A typed key of an attribute
* @tparam T the type of the attribute value.
*/
trait Key[+T] {
override def toString = getClass.getSimpleName
}
/**
* Key for lemma attribute.
*/
case object Lemma extends Key[String]
/**
* Typed map of attributes.
*/
trait Attributes {
def get[T](key: Key[T]): Option[T]
def apply[T](key: Key[T]) = get(key).get
def add[T](key: Key[T], value: T): Attributes
def keys: Iterable[Key[_]]
def addOpt[T](key: Key[T], opt: Option[T]): Attributes = opt match {
case Some(value) => this add(key, value)
case None => this
}
override def toString = {
keys map (k => s"$k -> ${ apply(k) }") mkString ", "
}
}
/**
* Companion object to Attributes to provide builders etc.
*/
object Attributes {
class MapBasedAttributes(val map: Map[Key[_], Any]) extends Attributes {
def get[T](key: Key[T]) = map.get(key).asInstanceOf[Option[T]]
def add[T](key: Key[T], value: T) = new MapBasedAttributes(map + (key -> value))
def keys = map.keys
}
val empty = new Attributes {
def get[T](key: Key[T]) = None
def add[T](key: Key[T], value: T) = new MapBasedAttributes(Map(key -> value))
def keys = Iterable.empty
}
def apply(pairs: (Key[_], Any)*): Attributes = new MapBasedAttributes(pairs.toMap)
}
| wolfe-pack/wolfe | wolfe-nlp/src/main/scala/ml/wolfe/nlp/Attributes.scala | Scala | apache-2.0 | 1,313 |
package com.twitter.finagle.http2
import com.twitter.finagle.FailureFlags
import com.twitter.logging.{HasLogLevel, Level}
import java.net.SocketAddress
private[http2] class DeadConnectionException(addr: SocketAddress, val flags: Long)
extends Exception(s"assigned an already dead connection to address $addr")
with FailureFlags[DeadConnectionException]
with HasLogLevel {
protected def copyWithFlags(newFlags: Long): DeadConnectionException =
new DeadConnectionException(addr, newFlags)
def logLevel: Level = Level.DEBUG
}
| twitter/finagle | finagle-http2/src/main/scala/com/twitter/finagle/http2/Exceptions.scala | Scala | apache-2.0 | 547 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import java.util.Collections
import java.util.concurrent.{ExecutionException, TimeUnit}
import kafka.api.IntegrationTestHarness
import kafka.controller.{OfflineReplica, PartitionAndReplica}
import kafka.server.LogDirFailureTest._
import kafka.utils.{CoreUtils, Exit, TestUtils}
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.{KafkaStorageException, NotLeaderForPartitionException}
import org.apache.kafka.common.utils.Utils
import org.junit.Assert.{assertEquals, assertFalse, assertTrue}
import org.junit.{Before, Test}
import org.scalatest.Assertions.fail
import scala.jdk.CollectionConverters._
/**
* Test whether clients can producer and consume when there is log directory failure
*/
class LogDirFailureTest extends IntegrationTestHarness {
val producerCount: Int = 1
val consumerCount: Int = 1
val brokerCount: Int = 2
private val topic = "topic"
private val partitionNum = 12
override val logDirCount = 3
this.serverConfig.setProperty(KafkaConfig.ReplicaHighWatermarkCheckpointIntervalMsProp, "60000")
this.serverConfig.setProperty(KafkaConfig.NumReplicaFetchersProp, "1")
@Before
override def setUp(): Unit = {
super.setUp()
createTopic(topic, partitionNum, brokerCount)
}
@Test
def testProduceErrorFromFailureOnLogRoll(): Unit = {
testProduceErrorsFromLogDirFailureOnLeader(Roll)
}
@Test
def testIOExceptionDuringLogRoll(): Unit = {
testProduceAfterLogDirFailureOnLeader(Roll)
}
@Test
// Broker should halt on any log directory failure if inter-broker protocol < 1.0
def brokerWithOldInterBrokerProtocolShouldHaltOnLogDirFailure(): Unit = {
@volatile var statusCodeOption: Option[Int] = None
Exit.setHaltProcedure { (statusCode, _) =>
statusCodeOption = Some(statusCode)
throw new IllegalArgumentException
}
var server: KafkaServer = null
try {
val props = TestUtils.createBrokerConfig(brokerCount, zkConnect, logDirCount = 3)
props.put(KafkaConfig.InterBrokerProtocolVersionProp, "0.11.0")
props.put(KafkaConfig.LogMessageFormatVersionProp, "0.11.0")
val kafkaConfig = KafkaConfig.fromProps(props)
val logDir = new File(kafkaConfig.logDirs.head)
// Make log directory of the partition on the leader broker inaccessible by replacing it with a file
CoreUtils.swallow(Utils.delete(logDir), this)
logDir.createNewFile()
assertTrue(logDir.isFile)
server = TestUtils.createServer(kafkaConfig)
TestUtils.waitUntilTrue(() => statusCodeOption.contains(1), "timed out waiting for broker to halt")
} finally {
Exit.resetHaltProcedure()
if (server != null)
TestUtils.shutdownServers(List(server))
}
}
@Test
def testProduceErrorFromFailureOnCheckpoint(): Unit = {
testProduceErrorsFromLogDirFailureOnLeader(Checkpoint)
}
@Test
def testIOExceptionDuringCheckpoint(): Unit = {
testProduceAfterLogDirFailureOnLeader(Checkpoint)
}
@Test
def testReplicaFetcherThreadAfterLogDirFailureOnFollower(): Unit = {
this.producerConfig.setProperty(ProducerConfig.RETRIES_CONFIG, "0")
val producer = createProducer()
val partition = new TopicPartition(topic, 0)
val partitionInfo = producer.partitionsFor(topic).asScala.find(_.partition() == 0).get
val leaderServerId = partitionInfo.leader().id()
val leaderServer = servers.find(_.config.brokerId == leaderServerId).get
val followerServerId = partitionInfo.replicas().map(_.id()).find(_ != leaderServerId).get
val followerServer = servers.find(_.config.brokerId == followerServerId).get
followerServer.replicaManager.markPartitionOffline(partition)
// Send a message to another partition whose leader is the same as partition 0
// so that ReplicaFetcherThread on the follower will get response from leader immediately
val anotherPartitionWithTheSameLeader = (1 until partitionNum).find { i =>
leaderServer.replicaManager.nonOfflinePartition(new TopicPartition(topic, i))
.flatMap(_.leaderLogIfLocal).isDefined
}.get
val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, anotherPartitionWithTheSameLeader, topic.getBytes, "message".getBytes)
// When producer.send(...).get returns, it is guaranteed that ReplicaFetcherThread on the follower
// has fetched from the leader and attempts to append to the offline replica.
producer.send(record).get
assertEquals(brokerCount, leaderServer.replicaManager.nonOfflinePartition(new TopicPartition(topic, anotherPartitionWithTheSameLeader))
.get.inSyncReplicaIds.size)
followerServer.replicaManager.replicaFetcherManager.fetcherThreadMap.values.foreach { thread =>
assertFalse("ReplicaFetcherThread should still be working if its partition count > 0", thread.isShutdownComplete)
}
}
def testProduceErrorsFromLogDirFailureOnLeader(failureType: LogDirFailureType): Unit = {
// Disable retries to allow exception to bubble up for validation
this.producerConfig.setProperty(ProducerConfig.RETRIES_CONFIG, "0")
val producer = createProducer()
val partition = new TopicPartition(topic, 0)
val record = new ProducerRecord(topic, 0, s"key".getBytes, s"value".getBytes)
val leaderServerId = producer.partitionsFor(topic).asScala.find(_.partition() == 0).get.leader().id()
val leaderServer = servers.find(_.config.brokerId == leaderServerId).get
causeLogDirFailure(failureType, leaderServer, partition)
// send() should fail due to either KafkaStorageException or NotLeaderForPartitionException
try {
producer.send(record).get(6000, TimeUnit.MILLISECONDS)
fail("send() should fail with either KafkaStorageException or NotLeaderForPartitionException")
} catch {
case e: ExecutionException =>
e.getCause match {
case t: KafkaStorageException =>
case t: NotLeaderForPartitionException => // This may happen if ProduceRequest version <= 3
case t: Throwable => fail(s"send() should fail with either KafkaStorageException or NotLeaderForPartitionException instead of ${t.toString}")
}
}
}
def testProduceAfterLogDirFailureOnLeader(failureType: LogDirFailureType): Unit = {
val consumer = createConsumer()
subscribeAndWaitForAssignment(topic, consumer)
val producer = createProducer()
val partition = new TopicPartition(topic, 0)
val record = new ProducerRecord(topic, 0, s"key".getBytes, s"value".getBytes)
val leaderServerId = producer.partitionsFor(topic).asScala.find(_.partition() == 0).get.leader().id()
val leaderServer = servers.find(_.config.brokerId == leaderServerId).get
// The first send() should succeed
producer.send(record).get()
TestUtils.consumeRecords(consumer, 1)
causeLogDirFailure(failureType, leaderServer, partition)
TestUtils.waitUntilTrue(() => {
// ProduceResponse may contain KafkaStorageException and trigger metadata update
producer.send(record)
producer.partitionsFor(topic).asScala.find(_.partition() == 0).get.leader().id() != leaderServerId
}, "Expected new leader for the partition", 6000L)
// Block on send to ensure that new leader accepts a message.
producer.send(record).get(6000L, TimeUnit.MILLISECONDS)
// Consumer should receive some messages
TestUtils.pollUntilAtLeastNumRecords(consumer, 1)
// There should be no remaining LogDirEventNotification znode
assertTrue(zkClient.getAllLogDirEventNotifications.isEmpty)
// The controller should have marked the replica on the original leader as offline
val controllerServer = servers.find(_.kafkaController.isActive).get
val offlineReplicas = controllerServer.kafkaController.controllerContext.replicasInState(topic, OfflineReplica)
assertTrue(offlineReplicas.contains(PartitionAndReplica(new TopicPartition(topic, 0), leaderServerId)))
}
private def causeLogDirFailure(failureType: LogDirFailureType,
leaderServer: KafkaServer,
partition: TopicPartition): Unit = {
// Make log directory of the partition on the leader broker inaccessible by replacing it with a file
val localLog = leaderServer.replicaManager.localLogOrException(partition)
val logDir = localLog.dir.getParentFile
CoreUtils.swallow(Utils.delete(logDir), this)
logDir.createNewFile()
assertTrue(logDir.isFile)
if (failureType == Roll) {
try {
leaderServer.replicaManager.getLog(partition).get.roll()
fail("Log rolling should fail with KafkaStorageException")
} catch {
case e: KafkaStorageException => // This is expected
}
} else if (failureType == Checkpoint) {
leaderServer.replicaManager.checkpointHighWatermarks()
}
// Wait for ReplicaHighWatermarkCheckpoint to happen so that the log directory of the topic will be offline
TestUtils.waitUntilTrue(() => !leaderServer.logManager.isLogDirOnline(logDir.getAbsolutePath), "Expected log directory offline", 3000L)
assertTrue(leaderServer.replicaManager.localLog(partition).isEmpty)
}
private def subscribeAndWaitForAssignment(topic: String, consumer: KafkaConsumer[Array[Byte], Array[Byte]]): Unit = {
consumer.subscribe(Collections.singletonList(topic))
TestUtils.pollUntilTrue(consumer, () => !consumer.assignment.isEmpty, "Expected non-empty assignment")
}
}
object LogDirFailureTest {
sealed trait LogDirFailureType
case object Roll extends LogDirFailureType
case object Checkpoint extends LogDirFailureType
}
| sslavic/kafka | core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala | Scala | apache-2.0 | 10,564 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.report
import java.nio.file.Path
import io.gatling.charts.component.ComponentLibrary
import io.gatling.charts.config.ChartsFiles.{ globalFile, menuFile }
import io.gatling.charts.template.{ MenuTemplate, PageTemplate }
import io.gatling.commons.stats.RequestStatsPath
import io.gatling.commons.util.ScanHelper.deepCopyPackageContent
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.config.GatlingFiles._
private[gatling] class ReportsGenerator(implicit configuration: GatlingConfiguration) {
def generateFor(reportsGenerationInputs: ReportsGenerationInputs): Path = {
import reportsGenerationInputs._
def hasAtLeastOneRequestReported: Boolean =
logFileReader.statsPaths.exists(_.isInstanceOf[RequestStatsPath])
def generateMenu(): Unit = new TemplateWriter(menuFile(reportFolderName)).writeToFile(new MenuTemplate().getOutput)
def generateStats(): Unit = new StatsReportGenerator(reportsGenerationInputs, ComponentLibrary.Instance).generate()
def generateAssertions(): Unit = new AssertionsReportGenerator(reportsGenerationInputs, ComponentLibrary.Instance).generate()
def copyAssets(): Unit = {
deepCopyPackageContent(GatlingAssetsStylePackage, styleDirectory(reportFolderName))
deepCopyPackageContent(GatlingAssetsJsPackage, jsDirectory(reportFolderName))
}
if (!hasAtLeastOneRequestReported)
throw new UnsupportedOperationException("There were no requests sent during the simulation, reports won't be generated")
val reportGenerators =
List(
new AllSessionsReportGenerator(reportsGenerationInputs, ComponentLibrary.Instance),
new GlobalReportGenerator(reportsGenerationInputs, ComponentLibrary.Instance),
new RequestDetailsReportGenerator(reportsGenerationInputs, ComponentLibrary.Instance),
new GroupDetailsReportGenerator(reportsGenerationInputs, ComponentLibrary.Instance)
)
copyAssets()
generateMenu()
PageTemplate.setRunInfo(logFileReader.runMessage, logFileReader.runEnd)
reportGenerators.foreach(_.generate())
generateStats()
generateAssertions()
globalFile(reportFolderName)
}
}
| GabrielPlassard/gatling | gatling-charts/src/main/scala/io/gatling/charts/report/ReportsGenerator.scala | Scala | apache-2.0 | 2,826 |
/*
* Copyright 2016 agido GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.pageobject.core.driver
import java.util
import java.util.logging.Level
import org.openqa.selenium.Capabilities
import org.openqa.selenium.logging.LogType
import org.openqa.selenium.remote.CommandExecutor
import org.openqa.selenium.remote.RemoteWebDriver
import org.openqa.selenium.remote.RemoteWebElement
import org.openqa.selenium.remote.Response
import org.pageobject.core.tools.Environment
import org.pageobject.core.tools.Logging
import org.pageobject.core.tools.Perf
import scala.collection.JavaConverters.asScalaBufferConverter
import scala.collection.JavaConverters.mapAsScalaMapConverter
import scala.collection.concurrent.TrieMap
import scala.collection.immutable
import scala.collection.mutable
import scala.util.Failure
import scala.util.Success
import scala.util.Try
/**
* Heper object containg a mapping of fields to dump when a RemoteWebDriver command was executed.
**/
object TracedRemoteWebDriver {
private val commandArguments: immutable.Map[String, Seq[String]] = immutable.Map(
"newSession" -> Seq(),
"setWindowPosition" -> Seq("x", "y"),
"setWindowSize" -> Seq("width", "height"),
"get" -> Seq("url"),
"findElements" -> Seq("value", "using"),
"findChildElements" -> Seq("id", "value", "using"),
"isElementDisplayed" -> Seq("id"),
"isElementSelected" -> Seq("id"),
"getElementText" -> Seq("id"),
"getElementTagName" -> Seq("id"),
"clickElement" -> Seq("id"),
"getElementAttribute" -> Seq("id", "name"),
"sendKeysToElement" -> Seq("id", "value"),
"clearElement" -> Seq("id"),
"getTitle" -> Seq(),
"getCurrentUrl" -> Seq(),
"quit" -> Seq(),
"close" -> Seq()
)
lazy val enabled: Boolean = Environment.boolean("TRACE_REMOTE_WEB_DRIVER", default = true)
}
/**
* This class extends the Selenium default RemoteWebDriver to log all commands executed.
*
* @param executor passed directly to selenium's RemoteWebDriver
*
* @param desiredCapabilities passed directly to selenium's RemoteWebDriver
*
*/
class DefaultTracedRemoteWebDriver(executor: CommandExecutor,
desiredCapabilities: Capabilities)
extends RemoteWebDriver(executor, desiredCapabilities) with TracedRemoteWebDriver
trait TracedRemoteWebDriver extends RemoteWebDriver with Logging {
val idFoundBy: mutable.Map[String, String] = new TrieMap[String, String]()
private def prettyPrint(what: Any): String = what match {
case str: String => s""""$str""""
// this is needed by sendKeysToElement
case Array(str: CharSequence) => s""""$str""""
case null => "(null)" // scalastyle:ignore null
case _ => what.toString
}
private def formatCommand(driverCommand: String, parameters: mutable.Map[String, _ <: Any]): String = {
val arguments = TracedRemoteWebDriver.commandArguments.get(driverCommand)
val map: mutable.Map[String, _ <: Any] = if (parameters.contains("id")) {
val id = parameters("id").toString
parameters + ("id" -> idFoundBy.getOrElse(id, id))
} else {
parameters
}
s"$driverCommand(${arguments.fold(map.toString)(_.map(map(_)).map(prettyPrint).mkString(", "))})"
}
private val elementIdsToShow = 10
private def formatElementIds(driverCommand: String, parameters: mutable.Map[String, _ <: Any], result: Response):
String = {
val all = result.getValue.asInstanceOf[util.ArrayList[RemoteWebElement]].asScala.map(_.getId)
val (show, hide) = all.splitAt(elementIdsToShow)
val ids = show.mkString(", ")
val dots = hide.headOption.fold("")(_ => "...")
val value = parameters.getOrElse("value", "")
val using = parameters.getOrElse("using", "")
val prefix = parameters.get("id")
.map(_.toString)
.map(id => idFoundBy.get(id).map(_ + " ").getOrElse(id))
.getOrElse("")
all.foreach(id => idFoundBy.put(id, prefix + using match {
case "id" => s"$id <#$value>"
case "class name" => s"$id <.$value>"
case "css selector" => s"$id <$value>"
case "name" => s"$id <[name = '$value']>"
case _ => s"$id <$using($value)>"
}))
s"${show.size + hide.size} Elements: [$ids$dots]"
}
class CommandGroup(commands: String*) {
private val set = commands.toSet
def unapply(command: String): Boolean = set(command)
}
object FormatResultElementIds extends CommandGroup(
"findElements",
"findChildElements"
)
object PrettyPrintResultValue extends CommandGroup(
"executeScript",
"isElementSelected",
"isElementDisplayed",
"getElementText",
"getElementAttribute",
"getElementTagName",
"getTitle",
"getCurrentUrl"
)
private def formatResult(driverCommand: String, parameters: mutable.Map[String, _ <: Any], result: Response): String = {
driverCommand match {
case FormatResultElementIds() => formatElementIds(driverCommand, parameters, result)
case PrettyPrintResultValue() => prettyPrint(result.getValue)
case _ => ""
}
}
private def format(driverCommand: String, parameters: mutable.Map[String, _ <: Any], result: Try[Response]): String = {
result match {
case Success(success) =>
val formatted = formatResult(driverCommand, parameters, success)
s"execute ${formatCommand(driverCommand, parameters)}${if (!formatted.isEmpty) s" = $formatted" else ""}"
case Failure(th) => s"execute ${formatCommand(driverCommand, parameters)} exception ${th.getMessage}"
}
}
override def execute(driverCommand: String, parameters: java.util.Map[String, _]): Response = {
if (!TracedRemoteWebDriver.enabled || driverCommand == "getLog") {
super.execute(driverCommand, parameters)
} else {
if (this.isInstanceOf[TraceBrowserConsole] && Option(getSessionId).isDefined) {
manage.logs.get(LogType.BROWSER).getAll.asScala.foreach(log => {
def withoutNewLine: String = {
val message = log.getMessage
if (message.endsWith("\\n")) {
message.substring(0, message.length - 1)
} else {
message
}
}
val msg = s"Console: ${log.getLevel} $withoutNewLine"
if (log.getLevel == Level.SEVERE) {
warn(msg)
} else {
info(msg)
}
})
}
Perf.logResult(debug(_), format(driverCommand, parameters.asScala, _: Try[Response])) {
super.execute(driverCommand, parameters)
}
}
}
}
| agido/pageobject | core/src/main/scala/org/pageobject/core/driver/TracedRemoteWebDriver.scala | Scala | apache-2.0 | 7,052 |
package com.auginte.scarango.errors
import akka.http.scaladsl.model.HttpResponse
import com.auginte.scarango.Context
/**
* Exception, when resource is not found: 404 response
*/
case class NotFound(httpResponse: HttpResponse)(implicit context: Context) extends ScarangoException("Not found")(context)
| aurelijusb/scarango | src/main/scala/com/auginte/scarango/errors/NotFound.scala | Scala | apache-2.0 | 307 |
package im.actor.server.presences
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.time.{ Seconds, Span }
import im.actor.server.ActorSuite
class GroupPresenceManagerSpec extends ActorSuite {
behavior of "GroupPresenceManager"
it should "subscribe/unsubscribe to group presences" in e1
import GroupPresenceManager._
implicit val ec: ExecutionContext = system.dispatcher
override implicit val patienceConfig = PatienceConfig(timeout = Span(5, Seconds))
implicit val timeout: Timeout = Timeout(5.seconds)
implicit val userPresenceRegion = PresenceManager.startRegion()
implicit val region = GroupPresenceManager.startRegion()
val probe = TestProbe()
val userId = 1
val groupId = 100
def e1() = {
whenReady(subscribe(groupId, probe.ref)) { _ ⇒ }
GroupPresenceManager.notifyGroupUserAdded(groupId, userId)
PresenceManager.presenceSetOnline(userId, 1000)
probe.expectMsgPF() {
case GroupPresenceState(100, 1) ⇒
}
probe.expectMsgPF() {
case GroupPresenceState(100, 0) ⇒
}
whenReady(unsubscribe(groupId, probe.ref)) { _ ⇒ }
probe.expectNoMsg()
}
}
| Just-D/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/presences/GroupPresenceManagerSpec.scala | Scala | mit | 1,242 |
package chat.tox.antox.wrapper
class GroupPeer(var name: String,
var ignored: Boolean) {
override def toString: String = name
}
| gale320/Antox | app/src/main/scala/chat/tox/antox/wrapper/GroupPeer.scala | Scala | gpl-3.0 | 148 |
/*
* bytefrog: a tracing framework for the JVM. For more information
* see http://code-pulse.com/bytefrog
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.secdec.bytefrog.clients.javafxui.views
import javafx.fxml.FXMLLoader
import javafx.scene.Parent
import javafx.scene.Scene
/** Trace complete view
* @author robertf
*/
class TraceCompleteView {
private val loader = new FXMLLoader(getClass.getResource("trace-complete-view.fxml"))
val scene = new Scene(loader.load.asInstanceOf[Parent])
val controller = loader.getController[TraceCompleteController]
def model = controller.model
} | secdec/bytefrog-clients | javafx-ui/src/main/scala/com/secdec/bytefrog/clients/javafxui/views/TraceCompleteView.scala | Scala | apache-2.0 | 1,195 |
package temportalist.compression.main.common.block.tile
import net.minecraft.item.ItemStack
import net.minecraft.nbt.NBTTagCompound
import net.minecraft.tileentity.TileEntity
import temportalist.origin.api.common.helper.Names
import temportalist.origin.api.common.tile.ITileSaver
/**
*
* Created by TheTemportalist on 4/15/2016.
*
* @author TheTemportalist
*/
class TileCompressed extends TileEntity with ITileSaver {
private var itemStack: ItemStack = _
private var size: Long = 0
def setStack(state: ItemStack): Unit = {
this.itemStack = state
this.markDirty()
}
def getStack: ItemStack = this.itemStack
def setSize(size: Long): Unit = {
this.size = size
this.markDirty()
}
def getSize: Long = this.size
override def getUpdateTag: NBTTagCompound = {
this.writeToNBT(new NBTTagCompound)
}
override def writeToNBT(nbt: NBTTagCompound): NBTTagCompound = {
val tag = super.writeToNBT(nbt)
if (this.itemStack != null)
tag.setString("stack", Names.getName(this.itemStack, hasID = true, hasMeta = true))
tag.setLong("size", this.size)
tag
}
override def readFromNBT(nbt: NBTTagCompound): Unit = {
super.readFromNBT(nbt)
if (nbt.hasKey("stack"))
this.itemStack = Names.getItemStack(nbt.getString("stack"))
this.size = nbt.getLong("size")
}
}
| TheTemportalist/Compression | src/main/scala/temportalist/compression/main/common/block/tile/TileCompressed.scala | Scala | apache-2.0 | 1,301 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.scalastyle.PositionError
import org.scalastyle.ScalariformChecker
import org.scalastyle.ScalastyleError
import _root_.scalariform.lexer.Tokens.LBRACKET
import _root_.scalariform.lexer.Tokens.RBRACKET
import _root_.scalariform.parser.CompilationUnit
class NoWhitespaceBeforeLeftBracketChecker extends ScalariformChecker {
val errorKey = "no.whitespace.before.left.bracket"
def verify(ast: CompilationUnit): List[ScalastyleError] = {
val it = for {
List(left, right) <- ast.tokens.sliding(2)
if right.tokenType == LBRACKET && charsBetweenTokens(left, right) > 0
} yield {
PositionError(left.offset)
}
it.toList
}
}
class NoWhitespaceAfterLeftBracketChecker extends ScalariformChecker {
val errorKey = "no.whitespace.after.left.bracket"
def verify(ast: CompilationUnit): List[ScalastyleError] = {
val it = for {
List(left, right) <- ast.tokens.sliding(2)
if left.tokenType == LBRACKET && charsBetweenTokens(left, right) > 0
} yield {
PositionError(left.offset)
}
it.toList
}
}
class NoWhitespaceBeforeRightBracketChecker extends ScalariformChecker {
val errorKey = "no.whitespace.before.right.bracket"
def verify(ast: CompilationUnit): List[ScalastyleError] = {
val it = for {
List(left, right) <- ast.tokens.sliding(2)
if right.tokenType == RBRACKET && charsBetweenTokens(left, right) > 0
} yield {
PositionError(right.offset)
}
it.toList
}
}
| scalastyle/scalastyle | src/main/scala/org/scalastyle/scalariform/NoWhitespaceBracketChecker.scala | Scala | apache-2.0 | 2,248 |
package com.philipborg.mummu.math.noise
import scala.util.Random
import com.kurtspencer.math.noise.OpenSimplexNoise
class OSN4D(seed: Option[Long] = None) extends Noise4D {
private val osn = if (seed.isDefined) new OpenSimplexNoise(seed.get) else new OpenSimplexNoise(Random.nextLong);
def eval(x: Double, y: Double, z: Double, w: Double): Double = osn.eval(x, y, z, w);
} | philipborg/Mummu | src/main/scala/com/philipborg/mummu/math/noise/OSN4D.scala | Scala | agpl-3.0 | 379 |
import models._
import nozzle.modules.LoggingSupport._
import scalaz._
import Scalaz._
import scalaz.EitherT._
import nozzle.monadicctrl.DefaultErrorMonadicCtrl._
import scala.concurrent.ExecutionContext
case class CampingControllerConfig(aCampingName: String)
trait CampingController {
def getAll: FutureCtrlFlow[List[Camping]]
def getByCoolnessAndSize(coolness: String, size: Option[Int]): FutureCtrlFlow[List[Camping]]
def getById(id: Int): FutureCtrlFlow[Camping]
def create(camping: Camping): FutureCtrlFlow[Camping]
}
class CampingControllerImpl(implicit
executionContext: ExecutionContext,
logger: ModuleLogger[CampingController],
config: nozzle.config.Config[CampingControllerConfig]
) extends CampingController {
val log = logger.get
def checkCanCreate: FutureCtrlFlow[Unit] = ().point[FutureCtrlFlow]
def getAll: FutureCtrlFlow[List[Camping]] = {
log.info("getAll")
for {
_ <- checkCanCreate
res <- List(
Camping("Le Marze", 15),
Camping("Sunset Camping", 22)).point[FutureCtrlFlow]
} yield res
}
def getByCoolnessAndSize(coolness: String, size: Option[Int]): FutureCtrlFlow[List[Camping]] = List(
Camping(config.aCampingName, 15),
Camping("Sunset Camping", 22)).point[FutureCtrlFlow]
def getById(id: Int): FutureCtrlFlow[Camping] =
Camping("Le Marze", 15).point[FutureCtrlFlow]
def create(camping: Camping): FutureCtrlFlow[Camping] =
camping.point[FutureCtrlFlow]
}
| utaal/nozzle | example/src/main/scala/CampingController.scala | Scala | mit | 1,473 |
package mesosphere.marathon.health
import akka.event.EventStream
import mesosphere.marathon.MarathonSpec
import mesosphere.marathon.Protos.HealthCheckDefinition.Protocol
import mesosphere.marathon.state.PathId.StringPathId
import mesosphere.marathon.tasks.TaskIdUtil
import org.apache.mesos.Protos
import scala.concurrent.Await
import scala.concurrent.duration._
class DelegatingHealthCheckManagerTest extends MarathonSpec {
var hcManager: DelegatingHealthCheckManager = _
var taskIdUtil: TaskIdUtil = _
before {
taskIdUtil = new TaskIdUtil
hcManager = new DelegatingHealthCheckManager(mock[EventStream], taskIdUtil)
}
test("Add") {
val healthCheck = HealthCheck()
hcManager.add("test".toRootPath, healthCheck)
assert(hcManager.list("test".toRootPath).size == 1)
}
test("Update") {
val appId = "test".toRootPath
val taskId = taskIdUtil.newTaskId(appId)
val taskStatus = Protos.TaskStatus.newBuilder
.setTaskId(taskId)
.setState(Protos.TaskState.TASK_RUNNING)
.setHealthy(false)
.build
val healthCheck = HealthCheck(protocol = Protocol.COMMAND)
hcManager.add(appId, healthCheck)
Await.result(hcManager.status(appId, taskId.getValue), 5.seconds) match {
case List(None) =>
case _ => fail()
}
hcManager.update(taskStatus.toBuilder.setHealthy(false).build, "")
Await.result(hcManager.status(appId, taskId.getValue), 5.seconds) match {
case List(Some(health)) =>
assert(health.lastFailure.isDefined)
assert(health.lastSuccess.isEmpty)
case _ => fail()
}
hcManager.update(taskStatus.toBuilder.setHealthy(true).build, "")
Await.result(hcManager.status(appId, taskId.getValue), 5.seconds) match {
case List(Some(health)) =>
assert(health.lastFailure.isDefined)
assert(health.lastSuccess.isDefined)
assert(health.lastSuccess > health.lastFailure)
case _ => fail()
}
}
} | tnachen/marathon | src/test/scala/mesosphere/marathon/health/DelegatingHealthCheckManagerTest.scala | Scala | apache-2.0 | 1,972 |
/**
* This file is part of mycollab-esb.
*
* mycollab-esb is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-esb is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-esb. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.user.esb.impl
import java.util.Arrays
import com.esofthead.mycollab.cache.CleanCacheEvent
import com.esofthead.mycollab.module.GenericCommand
import com.esofthead.mycollab.module.billing.RegisterStatusConstants
import com.esofthead.mycollab.module.project.dao.ProjectMemberMapper
import com.esofthead.mycollab.module.project.domain.{ProjectMember, ProjectMemberExample}
import com.esofthead.mycollab.module.project.service.ProjectMemberService
import com.esofthead.mycollab.module.user.esb.DeleteUserEvent
import com.google.common.eventbus.{AllowConcurrentEvents, Subscribe}
import org.slf4j.{Logger, LoggerFactory}
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.stereotype.Component
/**
*
* @author MyCollab Ltd.
* @since 1.0
*
*/
object DeleteUserCommand {
private val LOG: Logger = LoggerFactory.getLogger(classOf[DeleteUserCommand])
}
@Component class DeleteUserCommand extends GenericCommand {
@Autowired private val projectMemberMapper: ProjectMemberMapper = null
@AllowConcurrentEvents
@Subscribe
def execute(event: DeleteUserEvent): Unit = {
DeleteUserCommand.LOG.debug("Remove user {} with account id {}", Array(event.username, event.accountid))
val ex: ProjectMemberExample = new ProjectMemberExample
ex.createCriteria.andStatusIn(Arrays.asList(RegisterStatusConstants.ACTIVE, RegisterStatusConstants.SENT_VERIFICATION_EMAIL,
RegisterStatusConstants.VERIFICATING)).andSaccountidEqualTo(event.accountid).andUsernameEqualTo(event.username)
val projectMember: ProjectMember = new ProjectMember
projectMember.setStatus(RegisterStatusConstants.DELETE)
projectMemberMapper.updateByExampleSelective(projectMember, ex)
asyncEventBus.post(new CleanCacheEvent(event.accountid, Array(classOf[ProjectMemberService])))
}
} | maduhu/mycollab | mycollab-esb/src/main/scala/com/esofthead/mycollab/module/user/esb/impl/DeleteUserCommand.scala | Scala | agpl-3.0 | 2,600 |
package org.clapper.peoplegen
import scala.util.{Failure, Success, Try}
object Main {
object Constants {
val Name = "peoplegen"
}
def main(args: Array[String]): Unit = {
def getMessageHandler(params: Params) = if (params.verbose)
VerboseMessageHandler
else
EmptyMessageHandler
val t = for { params <- CommandLineParser.parseParams(args)
msg = getMessageHandler(params)
generator = new PeopleGenerator(params, msg)
people <- generator.generatePeople
writer = new MainPeopleWriter(params, msg)
_ <- writer.write(people) }
yield ()
val rc = t match {
case Success(params) =>
0
case Failure(e: CommandLineException) =>
// Error already printed.
1
case Failure(e) =>
e.printStackTrace(System.err)
1
}
System.exit(rc)
}
}
| bmc/namegen | src/main/scala/org/clapper/peoplegen/Main.scala | Scala | bsd-3-clause | 953 |
package com.larroy.slf4j.akka
import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import com.larroy.slf4j.{HasLogger, Logging}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
trait Base extends HasLogger {
def f(): Unit = {
log.debug("Base.f called")
}
}
case class D() extends Base with Logging {
log.debug("D")
f()
}
class Act extends Actor with Base with ActorLoggingSlf4j {
override def receive: Receive = {
case msg ⇒
log.debug(msg.toString)
f()
sender ! msg
}
}
class ActorLoggingSlf4jSpec extends TestKit(ActorSystem("ActorLoggingSlf4jSpec"))
with WordSpecLike with BeforeAndAfterAll with Matchers with ImplicitSender {
override def afterAll: Unit = {
TestKit.shutdownActorSystem(system)
}
"log messages" in {
val act = system.actorOf(Props[Act], "Act")
val msg = "hi there"
act ! msg
expectMsg(msg)
val d = D()
}
}
| larroy/SLF4J_Akka_logging_adapter | src/test/scala/com/larroy/slf4j/akka/ActorLoggingSlf4jSpec.scala | Scala | mit | 962 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.rest.kubernetes
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.module.scala.JsonScalaEnumeration
object StagedResourcesOwnerType extends Enumeration {
type OwnerType = Value
// In more generic scenarios, we might want to be watching Deployments, etc.
val Pod = Value
}
class StagedResourcesOwnerTypeReference extends TypeReference[StagedResourcesOwnerType.type]
case class StagedResourcesOwner(
ownerNamespace: String,
ownerLabels: Map[String, String],
@JsonScalaEnumeration(classOf[StagedResourcesOwnerTypeReference])
ownerType: StagedResourcesOwnerType.OwnerType)
| kimoonkim/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/rest/kubernetes/StagedResourcesOwner.scala | Scala | apache-2.0 | 1,466 |
object Prob21 {
def divisors(x: Int): List[Int] = {
require(x > 0)
val upper = math.sqrt(x.toDouble).toInt
(1 to upper).filter {
i => x % i == 0
}.flatMap{
i => List(i, x / i)
}.toList
}
def d(x: Int): Int = {
divisors(x).sum - x
}
def main(args: Array[String]) {
assert(d(220) == 284)
assert(d(284) == 220)
val sum = (2 until 10000).filter { i =>
val result = d(i)
d(result) == i && result != i
}.sum
println(sum)
}
}
| ponkotuy/ProjectEular | src/main/scala/Prob21.scala | Scala | mit | 502 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.reactRouter
import scala.scalajs.js
import com.glipka.easyReactJS.react._
import GlobalDefinition._
@js.native
trait HistoryMixin extends js.Any{
val history: History=js.native
} | glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/reactRouter/HistoryMixin.scala | Scala | apache-2.0 | 808 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util.concurrent.ExecutionException
import java.util.concurrent.atomic.AtomicReference
import java.util.{Properties}
import kafka.common.TopicAndPartition
import kafka.integration.KafkaServerTestHarness
import kafka.server._
import kafka.utils._
import kafka.utils.Implicits._
import org.apache.kafka.clients.consumer._
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.{ClusterResource, ClusterResourceListener, TopicPartition}
import org.apache.kafka.test.{TestUtils => _, _}
import org.junit.Assert._
import org.junit.{Before, Test}
import scala.collection.JavaConverters._
import org.apache.kafka.test.TestUtils.isValidClusterId
import scala.collection.mutable.ArrayBuffer
/** The test cases here verify the following conditions.
* 1. The ProducerInterceptor receives the cluster id after the onSend() method is called and before onAcknowledgement() method is called.
* 2. The Serializer receives the cluster id before the serialize() method is called.
* 3. The producer MetricReporter receives the cluster id after send() method is called on KafkaProducer.
* 4. The ConsumerInterceptor receives the cluster id before the onConsume() method.
* 5. The Deserializer receives the cluster id before the deserialize() method is called.
* 6. The consumer MetricReporter receives the cluster id after poll() is called on KafkaConsumer.
* 7. The broker MetricReporter receives the cluster id after the broker startup is over.
* 8. The broker KafkaMetricReporter receives the cluster id after the broker startup is over.
* 9. All the components receive the same cluster id.
*/
object EndToEndClusterIdTest {
object MockConsumerMetricsReporter {
val CLUSTER_META = new AtomicReference[ClusterResource]
}
class MockConsumerMetricsReporter extends MockMetricsReporter with ClusterResourceListener {
override def onUpdate(clusterMetadata: ClusterResource) {
MockConsumerMetricsReporter.CLUSTER_META.set(clusterMetadata)
}
}
object MockProducerMetricsReporter {
val CLUSTER_META = new AtomicReference[ClusterResource]
}
class MockProducerMetricsReporter extends MockMetricsReporter with ClusterResourceListener {
override def onUpdate(clusterMetadata: ClusterResource) {
MockProducerMetricsReporter.CLUSTER_META.set(clusterMetadata)
}
}
object MockBrokerMetricsReporter {
val CLUSTER_META = new AtomicReference[ClusterResource]
}
class MockBrokerMetricsReporter extends MockMetricsReporter with ClusterResourceListener {
override def onUpdate(clusterMetadata: ClusterResource) {
MockBrokerMetricsReporter.CLUSTER_META.set(clusterMetadata)
}
}
}
class EndToEndClusterIdTest extends KafkaServerTestHarness {
import EndToEndClusterIdTest._
val producerCount = 1
val consumerCount = 1
val serverCount = 1
lazy val producerConfig = new Properties
lazy val consumerConfig = new Properties
lazy val serverConfig = new Properties
val numRecords = 1
val topic = "e2etopic"
val part = 0
val tp = new TopicPartition(topic, part)
val topicAndPartition = new TopicAndPartition(topic, part)
this.serverConfig.setProperty(KafkaConfig.MetricReporterClassesProp, "kafka.api.EndToEndClusterIdTest$MockBrokerMetricsReporter")
override def generateConfigs = {
val cfgs = TestUtils.createBrokerConfigs(serverCount, zkConnect, interBrokerSecurityProtocol = Some(securityProtocol),
trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties)
cfgs.foreach(_ ++= serverConfig)
cfgs.map(KafkaConfig.fromProps)
}
@Before
override def setUp() {
super.setUp
MockDeserializer.resetStaticVariables
// create the consumer offset topic
TestUtils.createTopic(this.zkUtils, topic, 2, serverCount, this.servers)
}
@Test
def testEndToEnd() {
val appendStr = "mock"
MockConsumerInterceptor.resetCounters()
MockProducerInterceptor.resetCounters()
assertNotNull(MockBrokerMetricsReporter.CLUSTER_META)
isValidClusterId(MockBrokerMetricsReporter.CLUSTER_META.get.clusterId)
val producerProps = new Properties()
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
producerProps.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockProducerInterceptor")
producerProps.put("mock.interceptor.append", appendStr)
producerProps.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, "kafka.api.EndToEndClusterIdTest$MockProducerMetricsReporter")
val testProducer = new KafkaProducer(producerProps, new MockSerializer, new MockSerializer)
// Send one record and make sure clusterId is set after send and before onAcknowledgement
sendRecords(testProducer, 1, tp)
assertNotEquals(MockProducerInterceptor.CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT, MockProducerInterceptor.NO_CLUSTER_ID)
assertNotNull(MockProducerInterceptor.CLUSTER_META)
assertEquals(MockProducerInterceptor.CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT.get.clusterId, MockProducerInterceptor.CLUSTER_META.get.clusterId)
isValidClusterId(MockProducerInterceptor.CLUSTER_META.get.clusterId)
// Make sure that serializer gets the cluster id before serialize method.
assertNotEquals(MockSerializer.CLUSTER_ID_BEFORE_SERIALIZE, MockSerializer.NO_CLUSTER_ID)
assertNotNull(MockSerializer.CLUSTER_META)
isValidClusterId(MockSerializer.CLUSTER_META.get.clusterId)
assertNotNull(MockProducerMetricsReporter.CLUSTER_META)
isValidClusterId(MockProducerMetricsReporter.CLUSTER_META.get.clusterId)
this.consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockConsumerInterceptor")
this.consumerConfig.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "kafka.api.EndToEndClusterIdTest$MockConsumerMetricsReporter")
val testConsumer = new KafkaConsumer(this.consumerConfig, new MockDeserializer, new MockDeserializer)
testConsumer.assign(List(tp).asJava)
testConsumer.seek(tp, 0)
// consume and verify that values are modified by interceptors
consumeRecords(testConsumer, numRecords)
// Check that cluster id is present after the first poll call.
assertNotEquals(MockConsumerInterceptor.CLUSTER_ID_BEFORE_ON_CONSUME, MockConsumerInterceptor.NO_CLUSTER_ID)
assertNotNull(MockConsumerInterceptor.CLUSTER_META)
isValidClusterId(MockConsumerInterceptor.CLUSTER_META.get.clusterId)
assertEquals(MockConsumerInterceptor.CLUSTER_ID_BEFORE_ON_CONSUME.get.clusterId, MockConsumerInterceptor.CLUSTER_META.get.clusterId)
assertNotEquals(MockDeserializer.clusterIdBeforeDeserialize, MockDeserializer.noClusterId)
assertNotNull(MockDeserializer.clusterMeta)
isValidClusterId(MockDeserializer.clusterMeta.get.clusterId)
assertEquals(MockDeserializer.clusterIdBeforeDeserialize.get.clusterId, MockDeserializer.clusterMeta.get.clusterId)
assertNotNull(MockConsumerMetricsReporter.CLUSTER_META)
isValidClusterId(MockConsumerMetricsReporter.CLUSTER_META.get.clusterId)
// Make sure everyone receives the same cluster id.
assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockSerializer.CLUSTER_META.get.clusterId)
assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockProducerMetricsReporter.CLUSTER_META.get.clusterId)
assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockConsumerInterceptor.CLUSTER_META.get.clusterId)
assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockDeserializer.clusterMeta.get.clusterId)
assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockConsumerMetricsReporter.CLUSTER_META.get.clusterId)
assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockBrokerMetricsReporter.CLUSTER_META.get.clusterId)
testConsumer.close()
testProducer.close()
MockConsumerInterceptor.resetCounters()
MockProducerInterceptor.resetCounters()
}
private def sendRecords(producer: KafkaProducer[Array[Byte], Array[Byte]], numRecords: Int, tp: TopicPartition) {
val futures = (0 until numRecords).map { i =>
val record = new ProducerRecord(tp.topic(), tp.partition(), s"$i".getBytes, s"$i".getBytes)
debug(s"Sending this record: $record")
producer.send(record)
}
try {
futures.foreach(_.get)
} catch {
case e: ExecutionException => throw e.getCause
}
}
private def consumeRecords(consumer: Consumer[Array[Byte], Array[Byte]],
numRecords: Int = 1,
startingOffset: Int = 0,
topic: String = topic,
part: Int = part) {
val records = new ArrayBuffer[ConsumerRecord[Array[Byte], Array[Byte]]]()
val maxIters = numRecords * 50
var iters = 0
while (records.size < numRecords) {
for (record <- consumer.poll(50).asScala) {
records += record
}
if (iters > maxIters)
throw new IllegalStateException("Failed to consume the expected records after " + iters + " iterations.")
iters += 1
}
for (i <- 0 until numRecords) {
val record = records(i)
val offset = startingOffset + i
assertEquals(topic, record.topic)
assertEquals(part, record.partition)
assertEquals(offset.toLong, record.offset)
}
}
}
| zzwlstarby/mykafka | core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala | Scala | apache-2.0 | 10,310 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.metrics.servlet
import java.util.concurrent.{ScheduledExecutorService, Executors, TimeUnit}
import java.util.regex.Pattern
import javax.servlet._
import javax.servlet.http.{HttpSessionEvent, HttpSessionListener, HttpServletRequest, HttpServletResponse}
import com.codahale.metrics._
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.metrics.config.MetricsConfig
/**
* Filter that will track request metrics, aggregated based on url patterns.
*/
class AggregatedMetricsFilter extends Filter with LazyLogging {
import AggregatedMetricsFilter.{LayerParameters, createMetrics}
var reporters: Seq[ScheduledReporter] = Seq.empty
var urlPatterns: Seq[Pattern] = null // urls to operate on
var mappings: List[Pattern] = null // url patterns to match
var metrics: (Int) => (Counter, Counter, Timer, Map[Int, Meter]) = null // metrics per url pattern
var getUri: (HttpServletRequest) => String = null // uri to evaluate mappings against
var trackSession: (HttpServletRequest, Int, Counter) => Unit = null
var executor: ScheduledExecutorService = null
override def init(filterConfig: FilterConfig): Unit = {
import AggregatedMetricsFilter.Params._
try {
// config can be passed in directly, or we use global typesafe config with defined path
val configString = filterConfig.getInitParameter(ServletConfig)
val config = if (configString != null && configString.trim.nonEmpty) {
ConfigFactory.parseString(configString)
} else {
ConfigFactory.load().getConfig(ServletConfigPath)
}
val registry = new MetricRegistry
reporters = MetricsConfig.reporters(config, registry, Some(Reporters))
// metric name prefix can be set based on filter param, typesafe config, or fallback to class name
val prefix = Option(filterConfig.getInitParameter(NamePrefix)).map(_.trim).filterNot(_.isEmpty).getOrElse {
if (config.hasPath(NamePrefix)) config.getString(NamePrefix) else this.getClass.getName
}
// validate urls against the full url, or just the layer name (pulled from the request)
// note: layer names will only be relevant for wfs/wms requests
val mapByLayer = if (config.hasPath(MapByLayer)) config.getBoolean(MapByLayer) else false
getUri = if (mapByLayer) {
(r) => LayerParameters.map(r.getParameter).find(_ != null).getOrElse("unknown")
} else {
(r) => { val qs = r.getQueryString; if (qs.isEmpty) r.getRequestURI else s"${r.getRequestURI}?$qs" }
}
// filter for requests to handle - if not defined, handle all
urlPatterns = if (config.hasPath(UrlPatterns)) {
import scala.collection.JavaConversions._
config.getStringList(UrlPatterns).map(Pattern.compile)
} else {
Seq(Pattern.compile(".*"))
}
// mappings that we group our metrics by - add a fallback option last so we always match something
val regexAndNames = if (config.hasPath(RequestMappings)) {
import scala.collection.JavaConversions._
config.getConfigList(RequestMappings).map { c =>
val name = c.getString("name")
val regex = Pattern.compile(c.getString("regex"))
(regex, name)
}.toList ++ List((Pattern.compile(".*"), "other"))
} else {
List((Pattern.compile(".*"), "other"))
}
val sessionExpiration = if (config.hasPath(SessionRemoval)) config.getInt(SessionRemoval) else -1
val trackSessions = sessionExpiration > 0
// index of mappings and metrics are lined up
mappings = regexAndNames.map(_._1)
// lazily create the metrics as they are requested, otherwise you get a bunch of empty reports
val array = Array.ofDim[(Counter, Counter, Timer, Map[Int, Meter])](mappings.length)
metrics = (i) => {
val metric = array(i)
if (metric != null) {
metric
} else {
val created = createMetrics(registry, s"$prefix.${regexAndNames(i)._2}", trackSessions)
array(i) = created
created
}
}
// handle session expiration
if (trackSessions) {
executor = Executors.newSingleThreadScheduledExecutor()
val mappedSessions = Array.fill(mappings.length)(scala.collection.mutable.Set.empty[String])
val expiredSessions = scala.collection.mutable.Set.empty[String]
AggregatedMetricsFilter.expiredSessions.put(this, expiredSessions)
trackSession = (request, i, counter) => {
// note: we are forcing creation of a session in order to track users
val session = request.getSession(true).getId
val sessions = mappedSessions(i)
if (sessions.synchronized(sessions.add(session))) {
counter.inc()
}
}
// as the session listener is a separate class, we periodically poll the sessions that have
// expired and decrement our session metric for each mapping
val sessionRemoval = new Runnable {
override def run(): Unit = {
val expired = expiredSessions.synchronized {
val copy = expiredSessions.toSeq
expiredSessions.clear()
copy
}
var i = 0
while (i < mappedSessions.length) {
val sessions = mappedSessions(i)
val count = sessions.synchronized {
val initial = sessions.size
sessions --= expired
initial - sessions.size
}
if (count > 0) {
metrics(i)._2.dec(count)
}
i += 1
}
}
}
executor.scheduleWithFixedDelay(sessionRemoval, sessionExpiration, sessionExpiration, TimeUnit.SECONDS)
} else {
trackSession = (_, _, _) => {}
}
} catch {
// initialization exceptions get swallowed - print them out to help debugging and re-throw
case t: Throwable => t.printStackTrace(); throw t
}
}
override def doFilter(req: ServletRequest, resp: ServletResponse, chain: FilterChain): Unit = {
(req, resp) match {
case (request: HttpServletRequest, response: HttpServletResponse) =>
val uri = request.getRequestURI
if (urlPatterns.exists(_.matcher(uri).matches())) {
doFilter(request, response, chain)
} else {
logger.trace(s"Skipping metrics for request '$uri'")
chain.doFilter(request, response)
}
case _ => throw new ServletException("UserMetricsFilter only supports HTTP requests")
}
}
private def doFilter(request: HttpServletRequest, response: HttpServletResponse, chain: FilterChain): Unit = {
val uri = getUri(request) // get the uri or layer name
val i = mappings.indexWhere(_.matcher(uri).matches()) // we have a fallback matcher so we know i is valid
val (activeRequests, activeSessions, timer, status) = metrics(i)
val wrappedResponse = new StatusExposingServletResponse(response)
activeRequests.inc()
trackSession(request, i, activeSessions)
val context = timer.time()
try {
chain.doFilter(request, wrappedResponse)
} finally {
context.stop()
activeRequests.dec()
status(wrappedResponse.status).mark()
}
}
override def destroy(): Unit = {
if (executor != null) {
executor.shutdown()
}
reporters.foreach(_.stop())
}
}
class SessionMetricsListener extends HttpSessionListener {
import AggregatedMetricsFilter.expiredSessions
override def sessionCreated(se: HttpSessionEvent): Unit = {}
override def sessionDestroyed(se: HttpSessionEvent): Unit = {
val session = se.getSession.getId
expiredSessions.values.foreach(sessions => sessions.synchronized(sessions.add(session)))
}
}
object AggregatedMetricsFilter {
object Params {
val ServletConfig = "config"
val ServletConfigPath = "geomesa.metrics.servlet"
val NamePrefix = "name-prefix"
val Reporters = "reporters"
val RequestMappings = "request-mappings"
val MapByLayer = "map-by-layer"
val UrlPatterns = "url-patterns"
val SessionRemoval = "session-removal-interval"
}
val LayerParameters = Seq("typeNames", "typeName", "layers")
val MeterNamesByStatus = Map(
200 -> "responseCodes.ok",
201 -> "responseCodes.created",
204 -> "responseCodes.noContext",
400 -> "responseCodes.badRequest",
401 -> "responseCodes.unauthenticated",
403 -> "responseCodes.unauthorized",
404 -> "responseCodes.notFound",
500 -> "responseCodes.serverError"
).withDefaultValue("responseCodes.other")
// static variable to track our sessions - this allows interaction between the sessions listener and the filter
// the values should be manually synchronized
val expiredSessions =
scala.collection.mutable.Map.empty[AggregatedMetricsFilter, scala.collection.mutable.Set[String]]
private def createMetrics(registry: MetricRegistry,
name: String,
trackSessions: Boolean): (Counter, Counter, Timer, Map[Int, Meter]) = {
val requests = registry.counter(s"$name.activeRequests")
val sessions = if (trackSessions) registry.counter(s"$name.activeSessions") else null.asInstanceOf[Counter]
val timer = registry.timer(s"$name.requests")
val status = MeterNamesByStatus.mapValues(status => registry.meter(s"$name.$status"))
.withDefault(i => registry.meter(s"$name.${MeterNamesByStatus(i)}"))
(requests, sessions, timer, status)
}
}
| ddseapy/geomesa | geomesa-metrics/src/main/scala/org/locationtech/geomesa/metrics/servlet/AggregatedMetricsFilter.scala | Scala | apache-2.0 | 10,140 |
package com.arcusys.learn.liferay.helpers
import java.util.Locale
import com.arcusys.learn.liferay.LiferayClasses.LJournalArticle
trait JournalArticleHelpers {
def getMap(article: LJournalArticle) = Map("articleID" -> article.getArticleId,
"groupID" -> article.getGroupId.toString,
"version" -> article.getVersion.toString,
"availableLocales" -> article.getAvailableLanguageIds.map(localeStr => {
val localeSplit = localeStr.split('_')
val locale = if (localeSplit.size > 1) new Locale(localeSplit(0), localeSplit(1)) else new Locale(localeSplit.head)
localeStr -> Map("language" -> locale.getDisplayLanguage, "country" -> locale.getDisplayCountry)
}).toMap,
"titles" -> article.getAvailableLanguageIds.map(locale => locale -> article.getTitle(locale)).toMap)
} | arcusys/Valamis | learn-liferay700-services/src/main/scala/com/arcusys/learn/liferay/helpers/JournalArticleHelpers.scala | Scala | gpl-3.0 | 805 |
package dao
import javax.inject.{Inject, Singleton}
import models.RelationshipOperationOutputEntity
import play.api.db.slick.{DatabaseConfigProvider, HasDatabaseConfigProvider}
import slick.driver.JdbcProfile
import scala.concurrent.Future
trait RelationshipOutputsComponent extends RelationshipOperationsComponent {
self: HasDatabaseConfigProvider[JdbcProfile] =>
import driver.api._
private val RelationshipOperations = TableQuery[RelationshipOperationTable]
class RelationshipOutputTable(tag: Tag) extends Table[RelationshipOperationOutputEntity](tag, "RELATIONSHIP_OUTPUT") {
def pk = primaryKey("RELATIONSHIP_OUTPUT_PK", (sourceInstanceId, targetInstanceId, relationshipType, interfaceName, operationName, key))
def sourceInstanceId = column[String]("SOURCE_INSTANCE_ID")
def targetInstanceId = column[String]("TARGET_INSTANCE_ID")
def relationshipType = column[String]("TYPE")
def interfaceName = column[String]("INTERFACE")
def operationName = column[String]("OPERATION")
def key = column[String]("KEY")
def value = column[String]("VALUE")
def relationshipInstance =
foreignKey("RELATIONSHIP_OUTPUT_RELATIONSHIP_OPERATION_FK", (sourceInstanceId, targetInstanceId, relationshipType, interfaceName, operationName), RelationshipOperations)(relOp => (relOp.sourceInstanceId, relOp.targetInstanceId, relOp.relationshipType, relOp.interfaceName, relOp.operationName), onUpdate = ForeignKeyAction.Cascade, onDelete = ForeignKeyAction.Cascade)
def * = (sourceInstanceId, targetInstanceId, relationshipType, interfaceName, operationName, key, value) <>(RelationshipOperationOutputEntity.tupled, RelationshipOperationOutputEntity.unapply)
}
}
@Singleton()
class RelationshipOutputDAO @Inject()(protected val dbConfigProvider: DatabaseConfigProvider) extends RelationshipOutputsComponent with HasDatabaseConfigProvider[JdbcProfile] {
import driver.api._
private val RelationshipOutputs = TableQuery[RelationshipOutputTable]
private def filterByRelationshipIdFunction(sourceInstanceId: String, targetInstanceId: String, relationshipType: String) = {
relationshipOutput: RelationshipOutputTable => {
relationshipOutput.sourceInstanceId === sourceInstanceId &&
relationshipOutput.targetInstanceId === targetInstanceId &&
relationshipOutput.relationshipType === relationshipType
}
}
def get(sourceInstanceId: String, targetInstanceId: String, relationshipType: String): Future[Seq[RelationshipOperationOutputEntity]] = {
db.run(RelationshipOutputs.filter(filterByRelationshipIdFunction(sourceInstanceId, targetInstanceId, relationshipType)).result)
}
def getInterfaces(sourceInstanceId: String, targetInstanceId: String, relationshipType: String) = {
db.run(RelationshipOutputs.filter(filterByRelationshipIdFunction(sourceInstanceId, targetInstanceId, relationshipType)).map(_.interfaceName).distinct.result)
}
def getOperations(sourceInstanceId: String, targetInstanceId: String, relationshipType: String, interfaceName: String) = {
db.run(RelationshipOutputs
.filter(filterByRelationshipIdFunction(sourceInstanceId, targetInstanceId, relationshipType))
.filter(_.interfaceName === interfaceName)
.map(_.operationName).distinct.result
)
}
def save(relationshipOutputEntity: RelationshipOperationOutputEntity): Future[Int] = db.run(RelationshipOutputs insertOrUpdate relationshipOutputEntity)
def saveAll(sourceInstanceId: String, targetInstanceId: String, relationshipType: String, interfaceName: String, operationName: String, outputs: Map[String, String]): Future[Option[Int]] = {
val deleteAction = RelationshipOutputs
.filter(filterByRelationshipIdFunction(sourceInstanceId, targetInstanceId, relationshipType))
.filter { output => output.interfaceName === interfaceName && output.operationName === operationName }
.delete
val insertAction = RelationshipOutputs ++= outputs.map {
case (key, value) => RelationshipOperationOutputEntity(sourceInstanceId, targetInstanceId, relationshipType, interfaceName, operationName, key, value)
}
db.run((deleteAction >> insertAction).transactionally)
}
}
| vuminhkh/tosca-runtime | deployer/app/dao/RelationshipOutputDAO.scala | Scala | mit | 4,202 |
package org.scalamu.common.filtering
import scala.util.matching.Regex
object RegexFilter {
def apply(acceptSymbols: Regex*): NameFilter =
if (acceptSymbols.nonEmpty) new RegexFilter(acceptSymbols) else AcceptAllFilter
}
class RegexFilter(acceptSymbols: Seq[Regex]) extends NameFilter {
override def accepts: (String) => Boolean =
name => acceptSymbols.exists(_.pattern.matcher(name).matches())
}
| sugakandrey/scalamu | common/src/main/scala/org/scalamu/common/filtering/RegexFilter.scala | Scala | gpl-3.0 | 411 |
package com.seanshubin.web.sync.domain
import scala.collection.mutable.ArrayBuffer
sealed abstract case class DownloadStatus(isError: Boolean, shouldLog: Boolean, name: String, description: String) {
DownloadStatus.valuesBuffer += this
override def toString = name
}
object DownloadStatus {
private val valuesBuffer = new ArrayBuffer[DownloadStatus]
lazy val values = valuesBuffer.toSeq
val MissingFromLocalAndRemote = new DownloadStatus(isError = true, shouldLog = true, "gone", "missing from local and remote") {}
val PresentLocallyAndMissingFromRemote = new DownloadStatus(isError = true, shouldLog = true, "missing", "present locally, but missing from remote") {}
val MissingFromLocalAndPresentInRemote = new DownloadStatus(isError = false, shouldLog = true, "download", "was missing locally, downloaded") {}
val SameInLocalAndRemote = new DownloadStatus(isError = false, shouldLog = false, "same", "up to date, no action taken") {}
val DifferentInLocalAndRemote = new DownloadStatus(isError = false, shouldLog = true, "different", "different, downloaded") {}
def fromString(name: String): Option[DownloadStatus] = {
def isMatch(status: DownloadStatus) = status.name == name
values.find(isMatch)
}
def validValuesString = values.map(_.name).mkString(", ")
}
| SeanShubin/web-sync | domain/src/main/scala/com/seanshubin/web/sync/domain/DownloadStatus.scala | Scala | unlicense | 1,301 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.coordinator
import java.util
import java.util.concurrent.atomic.AtomicReference
import org.apache.samza.{Partition, SamzaException}
import org.apache.samza.config._
import org.apache.samza.config.Config
import org.apache.samza.container.grouper.stream.SSPGrouperProxy
import org.apache.samza.container.grouper.stream.SystemStreamPartitionGrouperFactory
import org.apache.samza.container.grouper.task._
import org.apache.samza.coordinator.metadatastore.NamespaceAwareCoordinatorStreamStore
import org.apache.samza.coordinator.stream.messages.SetTaskContainerMapping
import org.apache.samza.coordinator.stream.messages.SetTaskModeMapping
import org.apache.samza.coordinator.stream.messages.SetTaskPartitionMapping
import org.apache.samza.container.LocalityManager
import org.apache.samza.container.TaskName
import org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore
import org.apache.samza.coordinator.server.HttpServer
import org.apache.samza.coordinator.server.JobServlet
import org.apache.samza.coordinator.stream.messages.SetContainerHostMapping
import org.apache.samza.job.model.ContainerModel
import org.apache.samza.job.model.JobModel
import org.apache.samza.job.model.TaskMode
import org.apache.samza.job.model.TaskModel
import org.apache.samza.metadatastore.MetadataStore
import org.apache.samza.metrics.MetricsRegistry
import org.apache.samza.metrics.MetricsRegistryMap
import org.apache.samza.runtime.LocationId
import org.apache.samza.system._
import org.apache.samza.util.ScalaJavaUtil.JavaOptionals
import org.apache.samza.util.{ConfigUtil, Logging, ReflectionUtil, Util}
import scala.collection.JavaConverters
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
/**
* Helper companion object that is responsible for wiring up a JobModelManager
* given a Config object.
*/
object JobModelManager extends Logging {
val SOURCE = "JobModelManager"
/**
* a volatile value to store the current instantiated <code>JobModelManager</code>
*/
@volatile var currentJobModelManager: JobModelManager = _
val jobModelRef: AtomicReference[JobModel] = new AtomicReference[JobModel]()
/**
* Currently used only in the ApplicationMaster for yarn deployment model.
* Does the following:
* a) Reads the jobModel from coordinator stream using the job's configuration.
* b) Recomputes the changelog partition mapping based on jobModel and job's configuration.
* c) Builds JobModelManager using the jobModel read from coordinator stream.
* @param config config from the coordinator stream.
* @param changelogPartitionMapping changelog partition-to-task mapping of the samza job.
* @param metricsRegistry the registry for reporting metrics.
* @return the instantiated {@see JobModelManager}.
*/
def apply(config: Config, changelogPartitionMapping: util.Map[TaskName, Integer],
metadataStore: MetadataStore,
metricsRegistry: MetricsRegistry = new MetricsRegistryMap()): JobModelManager = {
// Instantiate the respective metadata store util classes which uses the same coordinator metadata store.
val localityManager = new LocalityManager(new NamespaceAwareCoordinatorStreamStore(metadataStore, SetContainerHostMapping.TYPE))
val taskAssignmentManager = new TaskAssignmentManager(new NamespaceAwareCoordinatorStreamStore(metadataStore, SetTaskContainerMapping.TYPE), new NamespaceAwareCoordinatorStreamStore(metadataStore, SetTaskModeMapping.TYPE))
val taskPartitionAssignmentManager = new TaskPartitionAssignmentManager(new NamespaceAwareCoordinatorStreamStore(metadataStore, SetTaskPartitionMapping.TYPE))
val systemAdmins = new SystemAdmins(config)
try {
systemAdmins.start()
val streamMetadataCache = new StreamMetadataCache(systemAdmins, 0)
val grouperMetadata: GrouperMetadata = getGrouperMetadata(config, localityManager, taskAssignmentManager, taskPartitionAssignmentManager)
val jobModel: JobModel = readJobModel(config, changelogPartitionMapping, streamMetadataCache, grouperMetadata)
jobModelRef.set(new JobModel(jobModel.getConfig, jobModel.getContainers, localityManager))
updateTaskAssignments(jobModel, taskAssignmentManager, taskPartitionAssignmentManager, grouperMetadata)
val server = new HttpServer
server.addServlet("/", new JobServlet(jobModelRef))
currentJobModelManager = new JobModelManager(jobModelRef.get(), server, localityManager)
currentJobModelManager
} finally {
systemAdmins.stop()
// Not closing coordinatorStreamStore, since {@code ClusterBasedJobCoordinator} uses it to read container locality through {@code JobModel}.
}
}
/**
* Builds the {@see GrouperMetadataImpl} for the samza job.
* @param config represents the configurations defined by the user.
* @param localityManager provides the processor to host mapping persisted to the metadata store.
* @param taskAssignmentManager provides the processor to task assignments persisted to the metadata store.
* @param taskPartitionAssignmentManager provides the task to partition assignments persisted to the metadata store.
* @return the instantiated {@see GrouperMetadata}.
*/
def getGrouperMetadata(config: Config, localityManager: LocalityManager, taskAssignmentManager: TaskAssignmentManager, taskPartitionAssignmentManager: TaskPartitionAssignmentManager) = {
val processorLocality: util.Map[String, LocationId] = getProcessorLocality(config, localityManager)
val taskModes: util.Map[TaskName, TaskMode] = taskAssignmentManager.readTaskModes()
// We read the taskAssignment only for ActiveTasks, i.e., tasks that have no task-mode or have an active task mode
val taskAssignment: util.Map[String, String] = taskAssignmentManager.readTaskAssignment().
filterKeys(taskName => !taskModes.containsKey(new TaskName(taskName)) || taskModes.get(new TaskName(taskName)).eq(TaskMode.Active))
val taskNameToProcessorId: util.Map[TaskName, String] = new util.HashMap[TaskName, String]()
for ((taskName, processorId) <- taskAssignment) {
taskNameToProcessorId.put(new TaskName(taskName), processorId)
}
val taskLocality: util.Map[TaskName, LocationId] = new util.HashMap[TaskName, LocationId]()
for ((taskName, processorId) <- taskAssignment) {
if (processorLocality.containsKey(processorId)) {
taskLocality.put(new TaskName(taskName), processorLocality.get(processorId))
}
}
val sspToTaskMapping: util.Map[SystemStreamPartition, util.List[String]] = taskPartitionAssignmentManager.readTaskPartitionAssignments()
val taskPartitionAssignments: util.Map[TaskName, util.List[SystemStreamPartition]] = new util.HashMap[TaskName, util.List[SystemStreamPartition]]()
// Task to partition assignments is stored as {@see SystemStreamPartition} to list of {@see TaskName} in
// coordinator stream. This is done due to the 1 MB value size limit in a kafka topic. Conversion to
// taskName to SystemStreamPartitions is done here to wire-in the data to {@see JobModel}.
sspToTaskMapping foreach { case (systemStreamPartition: SystemStreamPartition, taskNames: util.List[String]) =>
for (task <- taskNames) {
val taskName: TaskName = new TaskName(task)
// We read the partition assignments only for active-tasks, i.e., tasks that have no task-mode or have an active task mode
if (!taskModes.containsKey(taskName) || taskModes.get(taskName).eq(TaskMode.Active)) {
taskPartitionAssignments.putIfAbsent(taskName, new util.ArrayList[SystemStreamPartition]())
taskPartitionAssignments.get(taskName).add(systemStreamPartition)
}
}
}
new GrouperMetadataImpl(processorLocality, taskLocality, taskPartitionAssignments, taskNameToProcessorId)
}
/**
* Retrieves and returns the processor locality of a samza job using provided {@see Config} and {@see LocalityManager}.
* @param config provides the configurations defined by the user. Required to connect to the storage layer.
* @param localityManager provides the processor to host mapping persisted to the metadata store.
* @return the processor locality.
*/
def getProcessorLocality(config: Config, localityManager: LocalityManager) = {
val containerToLocationId: util.Map[String, LocationId] = new util.HashMap[String, LocationId]()
val existingContainerLocality = localityManager.readContainerLocality()
for (containerId <- 0 until new JobConfig(config).getContainerCount) {
val localityMapping = existingContainerLocality.get(containerId.toString)
// To handle the case when the container count is increased between two different runs of a samza-yarn job,
// set the locality of newly added containers to any_host.
var locationId: LocationId = new LocationId("ANY_HOST")
if (localityMapping != null && localityMapping.containsKey(SetContainerHostMapping.HOST_KEY)) {
locationId = new LocationId(localityMapping.get(SetContainerHostMapping.HOST_KEY))
}
containerToLocationId.put(containerId.toString, locationId)
}
containerToLocationId
}
/**
* This method does the following:
* 1. Deletes the existing task assignments if the partition-task grouping has changed from the previous run of the job.
* 2. Saves the newly generated task assignments to the storage layer through the {@param TaskAssignementManager}.
*
* @param jobModel represents the {@see JobModel} of the samza job.
* @param taskAssignmentManager required to persist the processor to task assignments to the metadata store.
* @param taskPartitionAssignmentManager required to persist the task to partition assignments to the metadata store.
* @param grouperMetadata provides the historical metadata of the samza application.
*/
def updateTaskAssignments(jobModel: JobModel,
taskAssignmentManager: TaskAssignmentManager,
taskPartitionAssignmentManager: TaskPartitionAssignmentManager,
grouperMetadata: GrouperMetadata): Unit = {
info("Storing the task assignments into metadata store.")
val activeTaskNames: util.Set[String] = new util.HashSet[String]()
val standbyTaskNames: util.Set[String] = new util.HashSet[String]()
val systemStreamPartitions: util.Set[SystemStreamPartition] = new util.HashSet[SystemStreamPartition]()
for (container <- jobModel.getContainers.values()) {
for (taskModel <- container.getTasks.values()) {
if(taskModel.getTaskMode.eq(TaskMode.Active)) {
activeTaskNames.add(taskModel.getTaskName.getTaskName)
}
if(taskModel.getTaskMode.eq(TaskMode.Standby)) {
standbyTaskNames.add(taskModel.getTaskName.getTaskName)
}
systemStreamPartitions.addAll(taskModel.getSystemStreamPartitions)
}
}
val previousTaskToContainerId = grouperMetadata.getPreviousTaskToProcessorAssignment
if (activeTaskNames.size() != previousTaskToContainerId.size()) {
warn("Current task count %s does not match saved task count %s. Stateful jobs may observe misalignment of keys!"
format (activeTaskNames.size(), previousTaskToContainerId.size()))
// If the tasks changed, then the partition-task grouping is also likely changed and we can't handle that
// without a much more complicated mapping. Further, the partition count may have changed, which means
// input message keys are likely reshuffled w.r.t. partitions, so the local state may not contain necessary
// data associated with the incoming keys. Warn the user and default to grouper
// In this scenario the tasks may have been reduced, so we need to delete all the existing messages
taskAssignmentManager.deleteTaskContainerMappings(previousTaskToContainerId.keys.map(taskName => taskName.getTaskName).asJava)
taskPartitionAssignmentManager.delete(systemStreamPartitions)
}
// if the set of standby tasks has changed, e.g., when the replication-factor changed, or the active-tasks-set has
// changed, we log a warning and delete the existing mapping for these tasks
val previousStandbyTasks = taskAssignmentManager.readTaskModes().filter(x => x._2.eq(TaskMode.Standby))
if(standbyTaskNames.asScala.eq(previousStandbyTasks.keySet)) {
info("The set of standby tasks has changed, current standby tasks %s, previous standby tasks %s" format (standbyTaskNames, previousStandbyTasks.keySet))
taskAssignmentManager.deleteTaskContainerMappings(previousStandbyTasks.map(x => x._1.getTaskName).asJava)
}
// Task to partition assignments is stored as {@see SystemStreamPartition} to list of {@see TaskName} in
// coordinator stream. This is done due to the 1 MB value size limit in a kafka topic. Conversion to
// taskName to SystemStreamPartitions is done here to wire-in the data to {@see JobModel}.
val sspToTaskNameMap: util.Map[SystemStreamPartition, util.List[String]] = new util.HashMap[SystemStreamPartition, util.List[String]]()
val taskContainerMappings: util.Map[String, util.Map[String, TaskMode]] = new util.HashMap[String, util.Map[String, TaskMode]]()
for (container <- jobModel.getContainers.values()) {
for ((taskName, taskModel) <- container.getTasks) {
taskContainerMappings.putIfAbsent(container.getId, new util.HashMap[String, TaskMode]())
taskContainerMappings.get(container.getId).put(taskName.getTaskName, container.getTasks.get(taskName).getTaskMode)
for (partition <- taskModel.getSystemStreamPartitions) {
if (!sspToTaskNameMap.containsKey(partition)) {
sspToTaskNameMap.put(partition, new util.ArrayList[String]())
}
sspToTaskNameMap.get(partition).add(taskName.getTaskName)
}
}
}
taskAssignmentManager.writeTaskContainerMappings(taskContainerMappings)
taskPartitionAssignmentManager.writeTaskPartitionAssignments(sspToTaskNameMap);
}
/**
* Computes the input system stream partitions of a samza job using the provided {@param config}
* and {@param streamMetadataCache}.
* @param config the configuration of the job.
* @param streamMetadataCache to query the partition metadata of the input streams.
* @return the input {@see SystemStreamPartition} of the samza job.
*/
private def getInputStreamPartitions(config: Config, streamMetadataCache: StreamMetadataCache): Set[SystemStreamPartition] = {
val taskConfig = new TaskConfig(config)
// Expand regex input, if a regex-rewriter is defined in config
val inputSystemStreams =
JavaConverters.asScalaSetConverter(taskConfig.getInputStreams).asScala.toSet
// Get the set of partitions for each SystemStream from the stream metadata
streamMetadataCache
.getStreamMetadata(inputSystemStreams, partitionsMetadataOnly = true)
.flatMap {
case (systemStream, metadata) =>
metadata
.getSystemStreamPartitionMetadata
.asScala
.keys
.map(new SystemStreamPartition(systemStream, _))
}.toSet
}
/**
* Builds the input {@see SystemStreamPartition} based upon the {@param config} defined by the user.
* @param config configuration to fetch the metadata of the input streams.
* @param streamMetadataCache required to query the partition metadata of the input streams.
* @return the input SystemStreamPartitions of the job.
*/
private def getMatchedInputStreamPartitions(config: Config, streamMetadataCache: StreamMetadataCache):
Set[SystemStreamPartition] = {
val allSystemStreamPartitions = getInputStreamPartitions(config, streamMetadataCache)
val jobConfig = new JobConfig(config)
JavaOptionals.toRichOptional(jobConfig.getSSPMatcherClass).toOption match {
case Some(sspMatcherClassName) =>
val jfr = jobConfig.getSSPMatcherConfigJobFactoryRegex.r
JavaOptionals.toRichOptional(jobConfig.getStreamJobFactoryClass).toOption match {
case Some(jfr(_*)) =>
info("before match: allSystemStreamPartitions.size = %s" format allSystemStreamPartitions.size)
val sspMatcher = ReflectionUtil.getObj(sspMatcherClassName, classOf[SystemStreamPartitionMatcher])
val matchedPartitions = sspMatcher.filter(allSystemStreamPartitions.asJava, config).asScala.toSet
// Usually a small set hence ok to log at info level
info("after match: matchedPartitions = %s" format matchedPartitions)
matchedPartitions
case _ => allSystemStreamPartitions
}
case _ => allSystemStreamPartitions
}
}
/**
* Finds the {@see SystemStreamPartitionGrouperFactory} from the {@param config}. Instantiates the {@see SystemStreamPartitionGrouper}
* object through the factory.
* @param config the configuration of the samza job.
* @return the instantiated {@see SystemStreamPartitionGrouper}.
*/
private def getSystemStreamPartitionGrouper(config: Config) = {
val factoryString = new JobConfig(config).getSystemStreamPartitionGrouperFactory
val factory = ReflectionUtil.getObj(factoryString, classOf[SystemStreamPartitionGrouperFactory])
factory.getSystemStreamPartitionGrouper(config)
}
/**
* Refresh Kafka topic list used as input streams if enabled {@link org.apache.samza.config.RegExTopicGenerator}
* @param config Samza job config
* @return refreshed config
*/
private def refreshConfigByRegexTopicRewriter(config: Config): Config = {
val jobConfig = new JobConfig(config)
JavaOptionals.toRichOptional(jobConfig.getConfigRewriters).toOption match {
case Some(rewriters) => rewriters.split(",").
filter(rewriterName => JavaOptionals.toRichOptional(jobConfig.getConfigRewriterClass(rewriterName)).toOption
.getOrElse(throw new SamzaException("Unable to find class config for config rewriter %s." format rewriterName))
.equalsIgnoreCase(classOf[RegExTopicGenerator].getName)).
foldLeft(config)(ConfigUtil.applyRewriter(_, _))
case _ => config
}
}
/**
* Does the following:
* 1. Fetches metadata of the input streams defined in configuration through {@param streamMetadataCache}.
* 2. Applies the {@see SystemStreamPartitionGrouper}, {@see TaskNameGrouper} defined in the configuration
* to build the {@see JobModel}.
* @param originalConfig the configuration of the job.
* @param changeLogPartitionMapping the task to changelog partition mapping of the job.
* @param streamMetadataCache the cache that holds the partition metadata of the input streams.
* @param grouperMetadata provides the historical metadata of the application.
* @return the built {@see JobModel}.
*/
def readJobModel(originalConfig: Config,
changeLogPartitionMapping: util.Map[TaskName, Integer],
streamMetadataCache: StreamMetadataCache,
grouperMetadata: GrouperMetadata): JobModel = {
// refresh config if enabled regex topic rewriter
val config = refreshConfigByRegexTopicRewriter(originalConfig)
val taskConfig = new TaskConfig(config)
// Do grouping to fetch TaskName to SSP mapping
val allSystemStreamPartitions = getMatchedInputStreamPartitions(config, streamMetadataCache)
// processor list is required by some of the groupers. So, let's pass them as part of the config.
// Copy the config and add the processor list to the config copy.
val configMap = new util.HashMap[String, String](config)
configMap.put(JobConfig.PROCESSOR_LIST, String.join(",", grouperMetadata.getProcessorLocality.keySet()))
val grouper = getSystemStreamPartitionGrouper(new MapConfig(configMap))
val jobConfig = new JobConfig(config)
val groups: util.Map[TaskName, util.Set[SystemStreamPartition]] = if (jobConfig.isSSPGrouperProxyEnabled) {
val sspGrouperProxy: SSPGrouperProxy = new SSPGrouperProxy(config, grouper)
sspGrouperProxy.group(allSystemStreamPartitions, grouperMetadata)
} else {
warn("SSPGrouperProxy is disabled (%s = false). Stateful jobs may produce erroneous results if this is not enabled." format JobConfig.SSP_INPUT_EXPANSION_ENABLED)
grouper.group(allSystemStreamPartitions)
}
info("SystemStreamPartitionGrouper %s has grouped the SystemStreamPartitions into %d tasks with the following taskNames: %s" format(grouper, groups.size(), groups))
// If no mappings are present(first time the job is running) we return -1, this will allow 0 to be the first change
// mapping.
var maxChangelogPartitionId = changeLogPartitionMapping.asScala.values.map(_.toInt).toList.sorted.lastOption.getOrElse(-1)
// Sort the groups prior to assigning the changelog mapping so that the mapping is reproducible and intuitive
val sortedGroups = new util.TreeMap[TaskName, util.Set[SystemStreamPartition]](groups)
// Assign all SystemStreamPartitions to TaskNames.
val taskModels = {
sortedGroups.asScala.map { case (taskName, systemStreamPartitions) =>
val changelogPartition = Option(changeLogPartitionMapping.get(taskName)) match {
case Some(changelogPartitionId) => new Partition(changelogPartitionId)
case _ =>
// If we've never seen this TaskName before, then assign it a
// new changelog.
maxChangelogPartitionId += 1
info("New task %s is being assigned changelog partition %s." format(taskName, maxChangelogPartitionId))
new Partition(maxChangelogPartitionId)
}
new TaskModel(taskName, systemStreamPartitions, changelogPartition)
}.toSet
}
// Here is where we should put in a pluggable option for the
// SSPTaskNameGrouper for locality, load-balancing, etc.
val containerGrouperFactory =
ReflectionUtil.getObj(taskConfig.getTaskNameGrouperFactory, classOf[TaskNameGrouperFactory])
val standbyTasksEnabled = jobConfig.getStandbyTasksEnabled
val standbyTaskReplicationFactor = jobConfig.getStandbyTaskReplicationFactor
val taskNameGrouperProxy = new TaskNameGrouperProxy(containerGrouperFactory.build(config), standbyTasksEnabled, standbyTaskReplicationFactor)
var containerModels: util.Set[ContainerModel] = null
val isHostAffinityEnabled = new ClusterManagerConfig(config).getHostAffinityEnabled
if(isHostAffinityEnabled) {
containerModels = taskNameGrouperProxy.group(taskModels, grouperMetadata)
} else {
containerModels = taskNameGrouperProxy.group(taskModels, new util.ArrayList[String](grouperMetadata.getProcessorLocality.keySet()))
}
val containerMap = containerModels.asScala.map(containerModel => containerModel.getId -> containerModel).toMap
new JobModel(config, containerMap.asJava)
}
}
/**
* <p>JobModelManager is responsible for managing the lifecycle of a Samza job
* once it's been started. This includes starting and stopping containers,
* managing configuration, etc.</p>
*
* <p>Any new cluster manager that's integrated with Samza (YARN, Mesos, etc)
* must integrate with the job coordinator.</p>
*
* <p>This class' API is currently unstable, and likely to change. The
* responsibility is simply to propagate the job model, and HTTP
* server right now.</p>
*/
class JobModelManager(
/**
* The data model that describes the Samza job's containers and tasks.
*/
val jobModel: JobModel,
/**
* HTTP server used to serve a Samza job's container model to SamzaContainers when they start up.
*/
val server: HttpServer = null,
/**
* LocalityManager employed to read and write container and task locality information to metadata store.
*/
val localityManager: LocalityManager = null) extends Logging {
debug("Got job model: %s." format jobModel)
def start() {
if (server != null) {
debug("Starting HTTP server.")
server.start
info("Started HTTP server: %s" format server.getUrl)
}
}
def stop() {
if (server != null) {
debug("Stopping HTTP server.")
server.stop
info("Stopped HTTP server.")
if (localityManager != null) {
info("Stopping localityManager")
localityManager.close()
info("Stopped localityManager")
}
}
}
}
| abhishekshivanna/samza | samza-core/src/main/scala/org/apache/samza/coordinator/JobModelManager.scala | Scala | apache-2.0 | 25,259 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.ast.sample
import laika.ast._
trait ParagraphCompanionShortcuts { self =>
def p (spans: Span*): Paragraph = Paragraph(spans.toList)
def p (text: String): Paragraph = Paragraph(text)
}
| planet42/Laika | core/shared/src/test/scala/laika/ast/sample/ParagraphCompanionShortcuts.scala | Scala | apache-2.0 | 829 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import kafka.controller.LeaderIsrAndControllerEpoch
import kafka.server.{KafkaConfig, ReplicaFetcherManager}
import kafka.api.LeaderAndIsr
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.TopicPartition
import org.junit.Assert._
import org.junit.{Before, Test}
import org.easymock.EasyMock
class ReplicationUtilsTest extends ZooKeeperTestHarness {
val topic = "my-topic-test"
val partitionId = 0
val brokerId = 1
val leaderEpoch = 1
val controllerEpoch = 1
val zkVersion = 1
val topicPath = "/brokers/topics/my-topic-test/partitions/0/state"
val topicData = Json.encode(Map("controller_epoch" -> 1, "leader" -> 1,
"versions" -> 1, "leader_epoch" -> 1,"isr" -> List(1,2)))
val topicDataVersionMismatch = Json.encode(Map("controller_epoch" -> 1, "leader" -> 1,
"versions" -> 2, "leader_epoch" -> 1,"isr" -> List(1,2)))
val topicDataMismatch = Json.encode(Map("controller_epoch" -> 1, "leader" -> 1,
"versions" -> 2, "leader_epoch" -> 2,"isr" -> List(1,2)))
val topicDataLeaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(LeaderAndIsr(1,leaderEpoch,List(1,2),0), controllerEpoch)
@Before
override def setUp() {
super.setUp()
zkUtils.createPersistentPath(topicPath, topicData)
}
@Test
def testUpdateLeaderAndIsr() {
val configs = TestUtils.createBrokerConfigs(1, zkConnect).map(KafkaConfig.fromProps)
val log = EasyMock.createMock(classOf[kafka.log.Log])
EasyMock.expect(log.logEndOffset).andReturn(20).anyTimes()
EasyMock.expect(log)
EasyMock.replay(log)
val logManager = EasyMock.createMock(classOf[kafka.log.LogManager])
EasyMock.expect(logManager.getLog(new TopicPartition(topic, partitionId), false)).andReturn(Some(log)).anyTimes()
EasyMock.replay(logManager)
val replicaManager = EasyMock.createMock(classOf[kafka.server.ReplicaManager])
EasyMock.expect(replicaManager.config).andReturn(configs.head)
EasyMock.expect(replicaManager.logManager).andReturn(logManager)
EasyMock.expect(replicaManager.replicaFetcherManager).andReturn(EasyMock.createMock(classOf[ReplicaFetcherManager]))
EasyMock.expect(replicaManager.zkClient).andReturn(zkClient)
EasyMock.replay(replicaManager)
zkUtils.makeSurePersistentPathExists(ZkUtils.IsrChangeNotificationPath)
val replicas = List(0,1)
// regular update
val newLeaderAndIsr1 = new LeaderAndIsr(brokerId, leaderEpoch, replicas, 0)
val (updateSucceeded1,newZkVersion1) = ReplicationUtils.updateLeaderAndIsr(zkClient,
"my-topic-test", partitionId, newLeaderAndIsr1, controllerEpoch, 0)
assertTrue(updateSucceeded1)
assertEquals(newZkVersion1, 1)
// mismatched zkVersion with the same data
val newLeaderAndIsr2 = new LeaderAndIsr(brokerId, leaderEpoch, replicas, zkVersion + 1)
val (updateSucceeded2,newZkVersion2) = ReplicationUtils.updateLeaderAndIsr(zkClient,
"my-topic-test", partitionId, newLeaderAndIsr2, controllerEpoch, zkVersion + 1)
assertTrue(updateSucceeded2)
// returns true with existing zkVersion
assertEquals(newZkVersion2,1)
// mismatched zkVersion and leaderEpoch
val newLeaderAndIsr3 = new LeaderAndIsr(brokerId, leaderEpoch + 1, replicas, zkVersion + 1)
val (updateSucceeded3,newZkVersion3) = ReplicationUtils.updateLeaderAndIsr(zkClient,
"my-topic-test", partitionId, newLeaderAndIsr3, controllerEpoch, zkVersion + 1)
assertFalse(updateSucceeded3)
assertEquals(newZkVersion3,-1)
}
@Test
def testGetLeaderIsrAndEpochForPartition() {
val leaderIsrAndControllerEpoch = ReplicationUtils.getLeaderIsrAndEpochForPartition(zkUtils, topic, partitionId)
assertEquals(topicDataLeaderIsrAndControllerEpoch, leaderIsrAndControllerEpoch.get)
assertEquals(None, ReplicationUtils.getLeaderIsrAndEpochForPartition(zkUtils, topic, partitionId + 1))
}
}
| themarkypantz/kafka | core/src/test/scala/unit/kafka/utils/ReplicationUtilsTest.scala | Scala | apache-2.0 | 4,675 |
package com.twitter.finagle.netty4.ssl.client
import com.twitter.finagle.Address
import com.twitter.finagle.ssl._
import com.twitter.finagle.ssl.client.SslClientConfiguration
import com.twitter.io.TempFile
import java.io.File
import java.net.InetSocketAddress
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Netty4ClientEngineFactoryTest extends FunSuite {
private[this] val address: Address = Address(new InetSocketAddress("localhost", 12345))
private[this] val other: Address = Address.Failed(new Exception("testing"))
// Force JDK version for tests, because the native engine could fail to load in different
// environments
private[this] val factory = Netty4ClientEngineFactory(forceJdk = true)
test("default config with inet address creates client engine with peer") {
val config = SslClientConfiguration()
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine.getUseClientMode())
assert(sslEngine.getPeerHost() == "localhost")
assert(sslEngine.getPeerPort() == 12345)
}
test("default config without inet address creates client engine without peer") {
val config = SslClientConfiguration()
val engine = factory(other, config)
val sslEngine = engine.self
assert(sslEngine.getUseClientMode())
assert(sslEngine.getPeerHost() == null)
assert(sslEngine.getPeerPort() == -1)
}
test("config with good cert and key credentials succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-client.cert.pem")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with bad cert or key credential fails") {
val tempCertFile = File.createTempFile("test", "crt")
tempCertFile.deleteOnExit()
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
intercept[SslConfigurationException] {
val engine = factory(address, config)
}
}
test("config with expired cert and valid key credential fails") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-client-expired.cert.pem")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
intercept[SslConfigurationException] {
factory(address, config)
}
}
test("config with cert, key, and chain succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-client.cert.pem")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/svc-test-client-pkcs8.key.pem")
// deleteOnExit is handled by TempFile
// This file contains multiple certificates
val tempChainFile = TempFile.fromResourcePath("/ssl/certs/svc-test-chain.cert.pem")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertKeyAndChain(tempCertFile, tempKeyFile, tempChainFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with insecure trust credentials succeeds") {
val config = SslClientConfiguration(trustCredentials = TrustCredentials.Insecure)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with good trusted cert collection succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/svc-test-chain.cert.pem")
// deleteOnExit is handled by TempFile
val trustCredentials = TrustCredentials.CertCollection(tempCertFile)
val config = SslClientConfiguration(trustCredentials = trustCredentials)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with bad trusted cert collection fails") {
val tempCertFile = File.createTempFile("test", "crt")
tempCertFile.deleteOnExit()
val trustCredentials = TrustCredentials.CertCollection(tempCertFile)
val config = SslClientConfiguration(trustCredentials = trustCredentials)
intercept[IllegalArgumentException] {
val engine = factory(address, config)
}
}
test("config with good cipher suites succeeds") {
val cipherSuites = CipherSuites.Enabled(Seq("TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"))
val config = SslClientConfiguration(cipherSuites = cipherSuites)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
val enabled = sslEngine.getEnabledCipherSuites()
assert(enabled.length == 1)
assert(enabled(0) == "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384")
}
test("config with bad cipher suites fails") {
val cipherSuites = CipherSuites.Enabled(Seq("TLS_ECDHE_ECDSA_WITH_AES_102_CBC_SHA496"))
val config = SslClientConfiguration(cipherSuites = cipherSuites)
intercept[IllegalArgumentException] {
val engine = factory(address, config)
}
}
test("config with good enabled protocols succeeds") {
val protocols = Protocols.Enabled(Seq("TLSv1.2"))
val config = SslClientConfiguration(protocols = protocols)
val engine = factory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
val enabled = sslEngine.getEnabledProtocols()
assert(enabled.length == 1)
assert(enabled(0) == "TLSv1.2")
}
test("config with bad enabled protocols fails") {
val protocols = Protocols.Enabled(Seq("TLSv2.0"))
val config = SslClientConfiguration(protocols = protocols)
intercept[IllegalArgumentException] {
val engine = factory(address, config)
}
}
// application protocols are supported only by netty-tcnative, which is
// not tested via these tests.
test("config with any application protocols fails for JDK provider") {
// tests are run against the JDK provider which does not support NPN_AND_ALPN
val appProtocols = ApplicationProtocols.Supported(Seq("h2"))
val config = SslClientConfiguration(applicationProtocols = appProtocols)
intercept[RuntimeException] {
val engine = factory(address, config)
}
}
}
| mkhq/finagle | finagle-netty4/src/test/scala/com/twitter/finagle/netty4/ssl/client/Netty4ClientEngineFactoryTest.scala | Scala | apache-2.0 | 7,009 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.csv
import java.io.{ByteArrayOutputStream, EOFException, File, FileOutputStream}
import java.nio.charset.{Charset, StandardCharsets, UnsupportedCharsetException}
import java.nio.file.{Files, StandardOpenOption}
import java.sql.{Date, Timestamp}
import java.text.SimpleDateFormat
import java.time.{Instant, LocalDate, LocalDateTime}
import java.util.Locale
import java.util.zip.GZIPOutputStream
import scala.collection.JavaConverters._
import scala.util.Properties
import com.univocity.parsers.common.TextParsingException
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.{SparkConf, SparkException, TestUtils}
import org.apache.spark.sql.{AnalysisException, Column, DataFrame, Encoders, QueryTest, Row}
import org.apache.spark.sql.catalyst.util.{DateTimeTestUtils, DateTimeUtils}
import org.apache.spark.sql.execution.datasources.CommonFileDataSourceSuite
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
abstract class CSVSuite
extends QueryTest
with SharedSparkSession
with TestCsvData
with CommonFileDataSourceSuite {
import testImplicits._
override protected def dataSourceFormat = "csv"
private val carsFile = "test-data/cars.csv"
private val carsMalformedFile = "test-data/cars-malformed.csv"
private val carsFile8859 = "test-data/cars_iso-8859-1.csv"
private val carsTsvFile = "test-data/cars.tsv"
private val carsAltFile = "test-data/cars-alternative.csv"
private val carsMultiCharDelimitedFile = "test-data/cars-multichar-delim.csv"
private val carsMultiCharCrazyDelimitedFile = "test-data/cars-multichar-delim-crazy.csv"
private val carsUnbalancedQuotesFile = "test-data/cars-unbalanced-quotes.csv"
private val carsNullFile = "test-data/cars-null.csv"
private val carsEmptyValueFile = "test-data/cars-empty-value.csv"
private val carsBlankColName = "test-data/cars-blank-column-name.csv"
private val carsCrlf = "test-data/cars-crlf.csv"
private val emptyFile = "test-data/empty.csv"
private val commentsFile = "test-data/comments.csv"
private val disableCommentsFile = "test-data/disable_comments.csv"
private val boolFile = "test-data/bool.csv"
private val decimalFile = "test-data/decimal.csv"
private val simpleSparseFile = "test-data/simple_sparse.csv"
private val numbersFile = "test-data/numbers.csv"
private val datesFile = "test-data/dates.csv"
private val unescapedQuotesFile = "test-data/unescaped-quotes.csv"
private val valueMalformedFile = "test-data/value-malformed.csv"
private val badAfterGoodFile = "test-data/bad_after_good.csv"
private val malformedRowFile = "test-data/malformedRow.csv"
/** Verifies data and schema. */
private def verifyCars(
df: DataFrame,
withHeader: Boolean,
numCars: Int = 3,
numFields: Int = 5,
checkHeader: Boolean = true,
checkValues: Boolean = true,
checkTypes: Boolean = false): Unit = {
val numColumns = numFields
val numRows = if (withHeader) numCars else numCars + 1
// schema
assert(df.schema.fieldNames.length === numColumns)
assert(df.count === numRows)
if (checkHeader) {
if (withHeader) {
assert(df.schema.fieldNames === Array("year", "make", "model", "comment", "blank"))
} else {
assert(df.schema.fieldNames === Array("_c0", "_c1", "_c2", "_c3", "_c4"))
}
}
if (checkValues) {
val yearValues = List("2012", "1997", "2015")
val actualYears = if (!withHeader) "year" :: yearValues else yearValues
val years = if (withHeader) df.select("year").collect() else df.select("_c0").collect()
years.zipWithIndex.foreach { case (year, index) =>
if (checkTypes) {
assert(year === Row(actualYears(index).toInt))
} else {
assert(year === Row(actualYears(index)))
}
}
}
}
test("simple csv test") {
val cars = spark
.read
.format("csv")
.option("header", "false")
.load(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("simple csv test with calling another function to load") {
val cars = spark
.read
.option("header", "false")
.csv(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("simple csv test with type inference") {
val cars = spark
.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(carsFile))
verifyCars(cars, withHeader = true, checkTypes = true)
}
test("simple csv test with string dataset") {
val csvDataset = spark.read.text(testFile(carsFile)).as[String]
val cars = spark.read
.option("header", "true")
.option("inferSchema", "true")
.csv(csvDataset)
verifyCars(cars, withHeader = true, checkTypes = true)
val carsWithoutHeader = spark.read
.option("header", "false")
.csv(csvDataset)
verifyCars(carsWithoutHeader, withHeader = false, checkTypes = false)
}
test("test inferring booleans") {
val result = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(boolFile))
val expectedSchema = StructType(List(
StructField("bool", BooleanType, nullable = true)))
assert(result.schema === expectedSchema)
}
test("test inferring decimals") {
val result = spark.read
.format("csv")
.option("comment", "~")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(decimalFile))
val expectedSchema = StructType(List(
StructField("decimal", DecimalType(20, 0), nullable = true),
StructField("long", LongType, nullable = true),
StructField("double", DoubleType, nullable = true)))
assert(result.schema === expectedSchema)
}
test("test with alternative delimiter and quote") {
val cars = spark.read
.format("csv")
.options(Map("quote" -> "\\'", "delimiter" -> "|", "header" -> "true"))
.load(testFile(carsAltFile))
verifyCars(cars, withHeader = true)
}
test("test with tab delimiter and double quote") {
val cars = spark.read
.options(Map("quote" -> "\\"", "delimiter" -> """\\t""", "header" -> "true"))
.csv(testFile(carsTsvFile))
verifyCars(cars, numFields = 6, withHeader = true, checkHeader = false)
}
test("SPARK-24540: test with multiple character delimiter (comma space)") {
val cars = spark.read
.options(Map("quote" -> "\\'", "delimiter" -> ", ", "header" -> "true"))
.csv(testFile(carsMultiCharDelimitedFile))
verifyCars(cars, withHeader = true)
}
test("SPARK-24540: test with multiple (crazy) character delimiter") {
val cars = spark.read
.options(Map("quote" -> "\\'", "delimiter" -> """_/-\\\\_""", "header" -> "true"))
.csv(testFile(carsMultiCharCrazyDelimitedFile))
verifyCars(cars, withHeader = true)
// check all the other columns, besides year (which is covered by verifyCars)
val otherCols = cars.select("make", "model", "comment", "blank").collect()
val expectedOtherColVals = Seq(
("Tesla", "S", "No comment", null),
("Ford", "E350", "Go get one now they are going fast", null),
("Chevy", "Volt", null, null)
)
expectedOtherColVals.zipWithIndex.foreach { case (values, index) =>
val actualRow = otherCols(index)
values match {
case (make, model, comment, blank) =>
assert(make == actualRow.getString(0))
assert(model == actualRow.getString(1))
assert(comment == actualRow.getString(2))
assert(blank == actualRow.getString(3))
}
}
}
test("parse unescaped quotes with maxCharsPerColumn") {
val rows = spark.read
.format("csv")
.option("maxCharsPerColumn", "4")
.load(testFile(unescapedQuotesFile))
val expectedRows = Seq(Row("\\"a\\"b", "ccc", "ddd"), Row("ab", "cc\\"c", "ddd\\""))
checkAnswer(rows, expectedRows)
}
test("bad encoding name") {
val exception = intercept[UnsupportedCharsetException] {
spark
.read
.format("csv")
.option("charset", "1-9588-osi")
.load(testFile(carsFile8859))
}
assert(exception.getMessage.contains("1-9588-osi"))
}
test("test different encoding") {
withView("carsTable") {
// scalastyle:off
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable USING csv
|OPTIONS (path "${testFile(carsFile8859)}", header "true",
|charset "iso-8859-1", delimiter "þ")
""".stripMargin.replaceAll("\\n", " "))
// scalastyle:on
verifyCars(spark.table("carsTable"), withHeader = true)
}
}
test("crlf line separators in multiline mode") {
val cars = spark
.read
.format("csv")
.option("multiLine", "true")
.option("header", "true")
.load(testFile(carsCrlf))
verifyCars(cars, withHeader = true)
}
test("test aliases sep and encoding for delimiter and charset") {
// scalastyle:off
val cars = spark
.read
.format("csv")
.option("header", "true")
.option("encoding", "iso-8859-1")
.option("sep", "þ")
.load(testFile(carsFile8859))
// scalastyle:on
verifyCars(cars, withHeader = true)
}
test("DDL test with tab separated file") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable USING csv
|OPTIONS (path "${testFile(carsTsvFile)}", header "true", delimiter "\\t")
""".stripMargin.replaceAll("\\n", " "))
verifyCars(spark.table("carsTable"), numFields = 6, withHeader = true, checkHeader = false)
}
}
test("DDL test parsing decimal type") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, priceTag decimal,
| comments string, grp string)
|USING csv
|OPTIONS (path "${testFile(carsTsvFile)}", header "true", delimiter "\\t")
""".stripMargin.replaceAll("\\n", " "))
assert(
spark.sql("SELECT makeName FROM carsTable where priceTag > 60000").collect().size === 1)
}
}
test("test for DROPMALFORMED parsing mode") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "false") {
Seq(false, true).foreach { multiLine =>
val cars = spark.read
.format("csv")
.option("multiLine", multiLine)
.options(Map("header" -> "true", "mode" -> "dropmalformed"))
.load(testFile(carsFile))
assert(cars.select("year").collect().size === 2)
}
}
}
test("test for blank column names on read and select columns") {
val cars = spark.read
.format("csv")
.options(Map("header" -> "true", "inferSchema" -> "true"))
.load(testFile(carsBlankColName))
assert(cars.select("customer").collect().size == 2)
assert(cars.select("_c0").collect().size == 2)
assert(cars.select("_c1").collect().size == 2)
}
test("test for FAILFAST parsing mode") {
Seq(false, true).foreach { multiLine =>
val exception = intercept[SparkException] {
spark.read
.format("csv")
.option("multiLine", multiLine)
.options(Map("header" -> "true", "mode" -> "failfast"))
.load(testFile(carsFile)).collect()
}
assert(exception.getMessage.contains("Malformed CSV record"))
}
}
test("test for tokens more than the fields in the schema") {
val cars = spark
.read
.format("csv")
.option("header", "false")
.option("comment", "~")
.load(testFile(carsMalformedFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("test with null quote character") {
val cars = spark.read
.format("csv")
.option("header", "true")
.option("quote", "")
.load(testFile(carsUnbalancedQuotesFile))
verifyCars(cars, withHeader = true, checkValues = false)
}
test("test with empty file and known schema") {
val result = spark.read
.format("csv")
.schema(StructType(List(StructField("column", StringType, false))))
.load(testFile(emptyFile))
assert(result.collect.size === 0)
assert(result.schema.fieldNames.size === 1)
}
test("DDL test with empty file") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, comments string, grp string)
|USING csv
|OPTIONS (path "${testFile(emptyFile)}", header "false")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT count(*) FROM carsTable").collect().head(0) === 0)
}
}
test("DDL test with schema") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, comments string, blank string)
|USING csv
|OPTIONS (path "${testFile(carsFile)}", header "true")
""".stripMargin.replaceAll("\\n", " "))
val cars = spark.table("carsTable")
verifyCars(cars, withHeader = true, checkHeader = false, checkValues = false)
assert(
cars.schema.fieldNames === Array("yearMade", "makeName", "modelName", "comments", "blank"))
}
}
test("save csv") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.option("header", "true")
.csv(csvDir)
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("save csv with quote") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("quote", "\\"")
.save(csvDir)
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.option("quote", "\\"")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("save csv with quoteAll enabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \\"quote\\"", 123, "it \\"works\\"!", "\\"very\\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\\"")
.option("escape", "\\"")
.option("quoteAll", "true")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "\\"test \\"\\"quote\\"\\"\\",\\"123\\",\\"it \\"\\"works\\"\\"!\\",\\"\\"\\"very\\"\\" well\\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping enabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \\"quote\\"", 123, "it \\"works\\"!", "\\"very\\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\\"")
.option("escape", "\\"")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "\\"test \\"\\"quote\\"\\"\\",123,\\"it \\"\\"works\\"\\"!\\",\\"\\"\\"very\\"\\" well\\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping disabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \\"quote\\"", 123, "it \\"works\\"!", "\\"very\\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\\"")
.option("escapeQuotes", "false")
.option("escape", "\\"")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "test \\"quote\\",123,it \\"works\\"!,\\"\\"\\"very\\"\\" well\\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping, using charToEscapeQuoteEscaping option") {
withTempPath { path =>
// original text
val df1 = Seq(
"""You are "beautiful"""",
"""Yes, \\"in the inside"\\"""
).toDF()
// text written in CSV with following options:
// quote character: "
// escape character: \\
// character to escape quote escaping: #
val df2 = Seq(
""""You are \\"beautiful\\""""",
""""Yes, #\\\\"in the inside\\"#\\""""
).toDF()
df2.coalesce(1).write.text(path.getAbsolutePath)
val df3 = spark.read
.format("csv")
.option("quote", "\\"")
.option("escape", "\\\\")
.option("charToEscapeQuoteEscaping", "#")
.load(path.getAbsolutePath)
checkAnswer(df1, df3)
}
}
test("SPARK-19018: Save csv with custom charset") {
// scalastyle:off nonascii
val content = "µß áâä ÁÂÄ"
// scalastyle:on nonascii
Seq("iso-8859-1", "utf-8", "utf-16", "utf-32", "windows-1250").foreach { encoding =>
withTempPath { path =>
val csvDir = new File(path, "csv")
Seq(content).toDF().write
.option("encoding", encoding)
.csv(csvDir.getCanonicalPath)
csvDir.listFiles().filter(_.getName.endsWith("csv")).foreach({ csvFile =>
val readback = Files.readAllBytes(csvFile.toPath)
val expected = (content + Properties.lineSeparator).getBytes(Charset.forName(encoding))
assert(readback === expected)
})
}
}
}
test("SPARK-19018: error handling for unsupported charsets") {
val exception = intercept[SparkException] {
withTempPath { path =>
val csvDir = new File(path, "csv").getCanonicalPath
Seq("a,A,c,A,b,B").toDF().write
.option("encoding", "1-9588-osi")
.csv(csvDir)
}
}
assert(exception.getCause.getMessage.contains("1-9588-osi"))
}
test("commented lines in CSV data") {
Seq("false", "true").foreach { multiLine =>
val results = spark.read
.format("csv")
.options(Map("comment" -> "~", "header" -> "false", "multiLine" -> multiLine))
.load(testFile(commentsFile))
.collect()
val expected =
Seq(Seq("1", "2", "3", "4", "5.01", "2015-08-20 15:57:00"),
Seq("6", "7", "8", "9", "0", "2015-08-21 16:58:01"),
Seq("1", "2", "3", "4", "5", "2015-08-23 18:00:42"))
assert(results.toSeq.map(_.toSeq) === expected)
}
}
test("inferring schema with commented lines in CSV data") {
val results = spark.read
.format("csv")
.options(Map("comment" -> "~", "header" -> "false", "inferSchema" -> "true"))
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.load(testFile(commentsFile))
.collect()
val expected =
Seq(Seq(1, 2, 3, 4, 5.01D, Timestamp.valueOf("2015-08-20 15:57:00")),
Seq(6, 7, 8, 9, 0, Timestamp.valueOf("2015-08-21 16:58:01")),
Seq(1, 2, 3, 4, 5, Timestamp.valueOf("2015-08-23 18:00:42")))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("inferring timestamp types via custom date format") {
val options = Map(
"header" -> "true",
"inferSchema" -> "true",
"timestampFormat" -> "dd/MM/yyyy HH:mm")
val results = spark.read
.format("csv")
.options(options)
.load(testFile(datesFile))
.select("date")
.collect()
val dateFormat = new SimpleDateFormat("dd/MM/yyyy HH:mm", Locale.US)
val expected =
Seq(Seq(new Timestamp(dateFormat.parse("26/08/2015 18:00").getTime)),
Seq(new Timestamp(dateFormat.parse("27/10/2014 18:30").getTime)),
Seq(new Timestamp(dateFormat.parse("28/01/2016 20:00").getTime)))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("load date types via custom date format") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
val options = Map(
"header" -> "true",
"inferSchema" -> "false",
"dateFormat" -> "dd/MM/yyyy HH:mm")
val results = spark.read
.format("csv")
.options(options)
.option("timeZone", "UTC")
.schema(customSchema)
.load(testFile(datesFile))
.select("date")
.collect()
val dateFormat = new SimpleDateFormat("dd/MM/yyyy hh:mm", Locale.US)
val expected = Seq(
new Date(dateFormat.parse("26/08/2015 18:00").getTime),
new Date(dateFormat.parse("27/10/2014 18:30").getTime),
new Date(dateFormat.parse("28/01/2016 20:00").getTime))
val dates = results.toSeq.map(_.toSeq.head)
expected.zip(dates).foreach {
case (expectedDate, date) =>
// As it truncates the hours, minutes and etc., we only check
// if the dates (days, months and years) are the same via `toString()`.
assert(expectedDate.toString === date.toString)
}
}
test("setting comment to null disables comment support") {
val results = spark.read
.format("csv")
.options(Map("comment" -> "", "header" -> "false"))
.load(testFile(disableCommentsFile))
.collect()
val expected =
Seq(
Seq("#1", "2", "3"),
Seq("4", "5", "6"))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("nullable fields with user defined null value of \\"null\\"") {
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.options(Map("header" -> "true", "nullValue" -> "null"))
.load(testFile(carsNullFile))
verifyCars(cars, withHeader = true, checkValues = false)
val results = cars.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", null, null))
assert(results(2).toSeq === Array(null, "Chevy", "Volt", null, null))
}
test("empty fields with user defined empty values") {
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.option("emptyValue", "empty")
.load(testFile(carsEmptyValueFile))
verifyCars(cars, withHeader = true, checkValues = false)
val results = cars.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", "empty", "empty"))
assert(results(1).toSeq ===
Array(1997, "Ford", "E350", "Go get one now they are going fast", null))
assert(results(2).toSeq === Array(2015, "Chevy", "Volt", null, "empty"))
}
test("save csv with empty fields with user defined empty values") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.option("nullValue", "NULL")
.load(testFile(carsEmptyValueFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("emptyValue", "empty")
.option("nullValue", null)
.save(csvDir)
val carsCopy = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true, checkValues = false)
val results = carsCopy.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", "empty", "empty"))
assert(results(1).toSeq ===
Array(1997, "Ford", "E350", "Go get one now they are going fast", null))
assert(results(2).toSeq === Array(2015, "Chevy", "Volt", null, "empty"))
}
}
test("save csv with compression codec option") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("compression", "gZiP")
.save(csvDir)
val compressedFiles = new File(csvDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".csv.gz")))
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("SPARK-13543 Write the output as uncompressed via option()") {
val extraOptions = Map(
"mapreduce.output.fileoutputformat.compress" -> "true",
"mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString,
"mapreduce.map.output.compress" -> "true",
"mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName
)
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.options(extraOptions)
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("compression", "none")
.options(extraOptions)
.save(csvDir)
val compressedFiles = new File(csvDir).listFiles()
assert(compressedFiles.exists(!_.getName.endsWith(".csv.gz")))
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.options(extraOptions)
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("Schema inference correctly identifies the datatype when data is sparse.") {
val df = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(simpleSparseFile))
assert(
df.schema.fields.map(field => field.dataType).sameElements(
Array(IntegerType, IntegerType, IntegerType, IntegerType)))
}
test("old csv data source name works") {
val cars = spark
.read
.format("com.databricks.spark.csv")
.option("header", "false")
.load(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("nulls, NaNs and Infinity values can be parsed") {
val numbers = spark
.read
.format("csv")
.schema(StructType(List(
StructField("int", IntegerType, true),
StructField("long", LongType, true),
StructField("float", FloatType, true),
StructField("double", DoubleType, true)
)))
.options(Map(
"header" -> "true",
"mode" -> "DROPMALFORMED",
"nullValue" -> "--",
"nanValue" -> "NAN",
"negativeInf" -> "-INF",
"positiveInf" -> "INF"))
.load(testFile(numbersFile))
assert(numbers.count() == 8)
}
test("SPARK-15585 turn off quotations") {
val cars = spark.read
.format("csv")
.option("header", "true")
.option("quote", "")
.load(testFile(carsUnbalancedQuotesFile))
verifyCars(cars, withHeader = true, checkValues = false)
}
test("Write timestamps correctly in ISO8601 format by default") {
withTempDir { dir =>
val iso8601timestampsPath = s"${dir.getCanonicalPath}/iso8601timestamps.csv"
val timestamps = spark.read
.format("csv")
.option("inferSchema", "true")
.option("header", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestamps.write
.format("csv")
.option("header", "true")
.save(iso8601timestampsPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val iso8601Timestamps = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(iso8601timestampsPath)
val iso8501 = FastDateFormat.getInstance("yyyy-MM-dd'T'HH:mm:ss.SSSXXX", Locale.US)
val expectedTimestamps = timestamps.collect().map { r =>
// This should be ISO8601 formatted string.
Row(iso8501.format(r.toSeq.head))
}
checkAnswer(iso8601Timestamps, expectedTimestamps)
}
}
test("Write dates correctly in ISO8601 format by default") {
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "UTC") {
withTempDir { dir =>
val customSchema = new StructType(Array(StructField("date", DateType, true)))
val iso8601datesPath = s"${dir.getCanonicalPath}/iso8601dates.csv"
val dates = spark.read
.format("csv")
.schema(customSchema)
.option("header", "true")
.option("inferSchema", "false")
.option("dateFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
dates.write
.format("csv")
.option("header", "true")
.save(iso8601datesPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val iso8601dates = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(iso8601datesPath)
val iso8501 = FastDateFormat.getInstance("yyyy-MM-dd", Locale.US)
val expectedDates = dates.collect().map { r =>
// This should be ISO8601 formatted string.
Row(iso8501.format(r.toSeq.head))
}
checkAnswer(iso8601dates, expectedDates)
}
}
}
test("Roundtrip in reading and writing timestamps") {
withTempDir { dir =>
val iso8601timestampsPath = s"${dir.getCanonicalPath}/iso8601timestamps.csv"
val timestamps = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(datesFile))
timestamps.write
.format("csv")
.option("header", "true")
.save(iso8601timestampsPath)
val iso8601timestamps = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(iso8601timestampsPath)
checkAnswer(iso8601timestamps, timestamps)
}
}
test("Write dates correctly with dateFormat option") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
withTempDir { dir =>
// With dateFormat option.
val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.csv"
val datesWithFormat = spark.read
.format("csv")
.schema(customSchema)
.option("header", "true")
.option("dateFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
datesWithFormat.write
.format("csv")
.option("header", "true")
.option("dateFormat", "yyyy/MM/dd")
.save(datesWithFormatPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringDatesWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(datesWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26"),
Row("2014/10/27"),
Row("2016/01/28"))
checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option") {
withTempDir { dir =>
// With dateFormat option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.csv"
val timestampsWithFormat = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestampsWithFormat.write
.format("csv")
.option("header", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(timestampsWithFormatPath)
val expectedStringTimestampsWithFormat = Seq(
Row("2015/08/26 18:00"),
Row("2014/10/27 18:30"),
Row("2016/01/28 20:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringTimestampsWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option and timeZone option") {
withTempDir { dir =>
// With dateFormat option and timeZone option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.csv"
val timestampsWithFormat = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestampsWithFormat.write
.format("csv")
.option("header", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(timestampsWithFormatPath)
val expectedStringTimestampsWithFormat = Seq(
Row("2015/08/27 01:00"),
Row("2014/10/28 01:30"),
Row("2016/01/29 04:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringTimestampsWithFormat)
val readBack = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.load(timestampsWithFormatPath)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("load duplicated field names consistently with null or empty strings - case sensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTempPath { path =>
Seq("a,a,c,A,b,B").toDF().write.text(path.getAbsolutePath)
val actualSchema = spark.read
.format("csv")
.option("header", true)
.load(path.getAbsolutePath)
.schema
val fields = Seq("a0", "a1", "c", "A", "b", "B").map(StructField(_, StringType, true))
val expectedSchema = StructType(fields)
assert(actualSchema == expectedSchema)
}
}
}
test("load duplicated field names consistently with null or empty strings - case insensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
Seq("a,A,c,A,b,B").toDF().write.text(path.getAbsolutePath)
val actualSchema = spark.read
.format("csv")
.option("header", true)
.load(path.getAbsolutePath)
.schema
val fields = Seq("a0", "A1", "c", "A3", "b4", "B5").map(StructField(_, StringType, true))
val expectedSchema = StructType(fields)
assert(actualSchema == expectedSchema)
}
}
}
test("load null when the schema is larger than parsed tokens ") {
withTempPath { path =>
Seq("1").toDF().write.text(path.getAbsolutePath)
val schema = StructType(
StructField("a", IntegerType, true) ::
StructField("b", IntegerType, true) :: Nil)
val df = spark.read
.schema(schema)
.option("header", "false")
.csv(path.getAbsolutePath)
checkAnswer(df, Row(1, null))
}
}
test("SPARK-18699 put malformed records in a `columnNameOfCorruptRecord` field") {
Seq(false, true).foreach { multiLine =>
val schema = new StructType().add("a", IntegerType).add("b", DateType)
// We use `PERMISSIVE` mode by default if invalid string is given.
val df1 = spark
.read
.option("mode", "abcd")
.option("multiLine", multiLine)
.schema(schema)
.csv(testFile(valueMalformedFile))
checkAnswer(df1,
Row(0, null) ::
Row(1, java.sql.Date.valueOf("1983-08-04")) ::
Nil)
// If `schema` has `columnNameOfCorruptRecord`, it should handle corrupt records
val columnNameOfCorruptRecord = "_unparsed"
val schemaWithCorrField1 = schema.add(columnNameOfCorruptRecord, StringType)
val df2 = spark
.read
.option("mode", "Permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schemaWithCorrField1)
.csv(testFile(valueMalformedFile))
checkAnswer(df2,
Row(0, null, "0,2013-111_11 12:13:14") ::
Row(1, java.sql.Date.valueOf("1983-08-04"), null) ::
Nil)
// We put a `columnNameOfCorruptRecord` field in the middle of a schema
val schemaWithCorrField2 = new StructType()
.add("a", IntegerType)
.add(columnNameOfCorruptRecord, StringType)
.add("b", DateType)
val df3 = spark
.read
.option("mode", "permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schemaWithCorrField2)
.csv(testFile(valueMalformedFile))
checkAnswer(df3,
Row(0, "0,2013-111_11 12:13:14", null) ::
Row(1, null, java.sql.Date.valueOf("1983-08-04")) ::
Nil)
val errMsg = intercept[AnalysisException] {
spark
.read
.option("mode", "PERMISSIVE")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schema.add(columnNameOfCorruptRecord, IntegerType))
.csv(testFile(valueMalformedFile))
.collect
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
}
}
test("Enabling/disabling ignoreCorruptFiles") {
val inputFile = File.createTempFile("input-", ".gz")
try {
// Create a corrupt gzip file
val byteOutput = new ByteArrayOutputStream()
val gzip = new GZIPOutputStream(byteOutput)
try {
gzip.write(Array[Byte](1, 2, 3, 4))
} finally {
gzip.close()
}
val bytes = byteOutput.toByteArray
val o = new FileOutputStream(inputFile)
try {
// It's corrupt since we only write half of bytes into the file.
o.write(bytes.take(bytes.length / 2))
} finally {
o.close()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val e = intercept[SparkException] {
spark.read.csv(inputFile.toURI.toString).collect()
}
assert(e.getCause.isInstanceOf[EOFException])
assert(e.getCause.getMessage === "Unexpected end of input stream")
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
assert(spark.read.csv(inputFile.toURI.toString).collect().isEmpty)
}
} finally {
inputFile.delete()
}
}
test("SPARK-19610: Parse normal multi-line CSV files") {
val primitiveFieldAndType = Seq(
""""
|string","integer
|
|
|","long
|
|","bigInteger",double,boolean,null""".stripMargin,
""""this is a
|simple
|string.","
|
|10","
|21474836470","92233720368547758070","
|
|1.7976931348623157E308",true,""".stripMargin)
withTempPath { path =>
primitiveFieldAndType.toDF("value").coalesce(1).write.text(path.getAbsolutePath)
val df = spark.read
.option("header", true)
.option("multiLine", true)
.csv(path.getAbsolutePath)
// Check if headers have new lines in the names.
val actualFields = df.schema.fieldNames.toSeq
val expectedFields =
Seq("\\nstring", "integer\\n\\n\\n", "long\\n\\n", "bigInteger", "double", "boolean", "null")
assert(actualFields === expectedFields)
// Check if the rows have new lines in the values.
val expected = Row(
"this is a\\nsimple\\nstring.",
"\\n\\n10",
"\\n21474836470",
"92233720368547758070",
"\\n\\n1.7976931348623157E308",
"true",
null)
checkAnswer(df, expected)
}
}
test("Empty file produces empty dataframe with empty schema") {
Seq(false, true).foreach { multiLine =>
val df = spark.read.format("csv")
.option("header", true)
.option("multiLine", multiLine)
.load(testFile(emptyFile))
assert(df.schema === spark.emptyDataFrame.schema)
checkAnswer(df, spark.emptyDataFrame)
}
}
test("Empty string dataset produces empty dataframe and keep user-defined schema") {
val df1 = spark.read.csv(spark.emptyDataset[String])
assert(df1.schema === spark.emptyDataFrame.schema)
checkAnswer(df1, spark.emptyDataFrame)
val schema = StructType(StructField("a", StringType) :: Nil)
val df2 = spark.read.schema(schema).csv(spark.emptyDataset[String])
assert(df2.schema === schema)
}
test("ignoreLeadingWhiteSpace and ignoreTrailingWhiteSpace options - read") {
val input = " a,b , c "
// For reading, default of both `ignoreLeadingWhiteSpace` and`ignoreTrailingWhiteSpace`
// are `false`. So, these are excluded.
val combinations = Seq(
(true, true),
(false, true),
(true, false))
// Check if read rows ignore whitespaces as configured.
val expectedRows = Seq(
Row("a", "b", "c"),
Row(" a", "b", " c"),
Row("a", "b ", "c "))
combinations.zip(expectedRows)
.foreach { case ((ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace), expected) =>
val df = spark.read
.option("ignoreLeadingWhiteSpace", ignoreLeadingWhiteSpace)
.option("ignoreTrailingWhiteSpace", ignoreTrailingWhiteSpace)
.csv(Seq(input).toDS())
checkAnswer(df, expected)
}
}
test("SPARK-18579: ignoreLeadingWhiteSpace and ignoreTrailingWhiteSpace options - write") {
val df = Seq((" a", "b ", " c ")).toDF()
// For writing, default of both `ignoreLeadingWhiteSpace` and `ignoreTrailingWhiteSpace`
// are `true`. So, these are excluded.
val combinations = Seq(
(false, false),
(false, true),
(true, false))
// Check if written lines ignore each whitespaces as configured.
val expectedLines = Seq(
" a,b , c ",
" a,b, c",
"a,b ,c ")
combinations.zip(expectedLines)
.foreach { case ((ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace), expected) =>
withTempPath { path =>
df.write
.option("ignoreLeadingWhiteSpace", ignoreLeadingWhiteSpace)
.option("ignoreTrailingWhiteSpace", ignoreTrailingWhiteSpace)
.csv(path.getAbsolutePath)
// Read back the written lines.
val readBack = spark.read.text(path.getAbsolutePath)
checkAnswer(readBack, Row(expected))
}
}
}
test("SPARK-21263: Invalid float and double are handled correctly in different modes") {
val exception = intercept[SparkException] {
spark.read.schema("a DOUBLE")
.option("mode", "FAILFAST")
.csv(Seq("10u12").toDS())
.collect()
}
assert(exception.getMessage.contains("""input string: "10u12""""))
val count = spark.read.schema("a FLOAT")
.option("mode", "DROPMALFORMED")
.csv(Seq("10u12").toDS())
.count()
assert(count == 0)
val results = spark.read.schema("a FLOAT")
.option("mode", "PERMISSIVE")
.csv(Seq("10u12").toDS())
checkAnswer(results, Row(null))
}
test("SPARK-20978: Fill the malformed column when the number of tokens is less than schema") {
val df = spark.read
.schema("a string, b string, unparsed string")
.option("columnNameOfCorruptRecord", "unparsed")
.csv(Seq("a").toDS())
checkAnswer(df, Row("a", null, "a"))
}
test("SPARK-21610: Corrupt records are not handled properly when creating a dataframe " +
"from a file") {
val columnNameOfCorruptRecord = "_corrupt_record"
val schema = new StructType()
.add("a", IntegerType)
.add("b", DateType)
.add(columnNameOfCorruptRecord, StringType)
// negative cases
val msg = intercept[AnalysisException] {
spark
.read
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.csv(testFile(valueMalformedFile))
.select(columnNameOfCorruptRecord)
.collect()
}.getMessage
assert(msg.contains("only include the internal corrupt record column"))
// workaround
val df = spark
.read
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.csv(testFile(valueMalformedFile))
.cache()
assert(df.filter($"_corrupt_record".isNotNull).count() == 1)
assert(df.filter($"_corrupt_record".isNull).count() == 1)
checkAnswer(
df.select(columnNameOfCorruptRecord),
Row("0,2013-111_11 12:13:14") :: Row(null) :: Nil
)
}
test("SPARK-23846: schema inferring touches less data if samplingRatio < 1.0") {
// Set default values for the DataSource parameters to make sure
// that whole test file is mapped to only one partition. This will guarantee
// reliable sampling of the input file.
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> (128 * 1024 * 1024).toString,
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> (4 * 1024 * 1024).toString
)(withTempPath { path =>
val ds = sampledTestData.coalesce(1)
ds.write.text(path.getAbsolutePath)
val readback1 = spark.read
.option("inferSchema", true).option("samplingRatio", 0.1)
.csv(path.getCanonicalPath)
assert(readback1.schema == new StructType().add("_c0", IntegerType))
withClue("SPARK-32621: 'path' option can cause issues while inferring schema") {
// During infer, "path" option gets added again to the paths that have already been listed.
// This results in reading more data than necessary and causes different schema to be
// inferred when sampling ratio is involved.
val readback2 = spark.read
.option("inferSchema", true).option("samplingRatio", 0.1)
.option("path", path.getCanonicalPath)
.format("csv")
.load
assert(readback2.schema == new StructType().add("_c0", IntegerType))
}
})
}
test("SPARK-23846: usage of samplingRatio while parsing a dataset of strings") {
val ds = sampledTestData.coalesce(1)
val readback = spark.read
.option("inferSchema", true).option("samplingRatio", 0.1)
.csv(ds)
assert(readback.schema == new StructType().add("_c0", IntegerType))
}
test("SPARK-23846: samplingRatio is out of the range (0, 1.0]") {
val ds = spark.range(0, 100, 1, 1).map(_.toString)
val errorMsg0 = intercept[IllegalArgumentException] {
spark.read.option("inferSchema", true).option("samplingRatio", -1).csv(ds)
}.getMessage
assert(errorMsg0.contains("samplingRatio (-1.0) should be greater than 0"))
val errorMsg1 = intercept[IllegalArgumentException] {
spark.read.option("inferSchema", true).option("samplingRatio", 0).csv(ds)
}.getMessage
assert(errorMsg1.contains("samplingRatio (0.0) should be greater than 0"))
val sampled = spark.read.option("inferSchema", true).option("samplingRatio", 1.0).csv(ds)
assert(sampled.count() == ds.count())
}
test("SPARK-17916: An empty string should not be coerced to null when nullValue is passed.") {
val litNull: String = null
val df = Seq(
(1, "John Doe"),
(2, ""),
(3, "-"),
(4, litNull)
).toDF("id", "name")
// Checks for new behavior where an empty string is not coerced to null when `nullValue` is
// set to anything but an empty string literal.
withTempPath { path =>
df.write
.option("nullValue", "-")
.csv(path.getAbsolutePath)
val computed = spark.read
.option("nullValue", "-")
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, ""),
(3, litNull),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
// Keeps the old behavior where empty string us coerced to nullValue is not passed.
withTempPath { path =>
df.write
.csv(path.getAbsolutePath)
val computed = spark.read
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, litNull),
(3, "-"),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
}
test("SPARK-25241: An empty string should not be coerced to null when emptyValue is passed.") {
val litNull: String = null
val df = Seq(
(1, "John Doe"),
(2, ""),
(3, "-"),
(4, litNull)
).toDF("id", "name")
// Checks for new behavior where a null is not coerced to an empty string when `emptyValue` is
// set to anything but an empty string literal.
withTempPath { path =>
df.write
.option("emptyValue", "-")
.csv(path.getAbsolutePath)
val computed = spark.read
.option("emptyValue", "-")
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, "-"),
(3, "-"),
(4, "-")
).toDF("id", "name")
checkAnswer(computed, expected)
}
// Keeps the old behavior where empty string us coerced to emptyValue is not passed.
withTempPath { path =>
df.write
.csv(path.getAbsolutePath)
val computed = spark.read
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, litNull),
(3, "-"),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
}
test("SPARK-24329: skip lines with comments, and one or multiple whitespaces") {
val schema = new StructType().add("colA", StringType)
val ds = spark
.read
.schema(schema)
.option("multiLine", false)
.option("header", true)
.option("comment", "#")
.option("ignoreLeadingWhiteSpace", false)
.option("ignoreTrailingWhiteSpace", false)
.csv(testFile("test-data/comments-whitespaces.csv"))
checkAnswer(ds, Seq(Row(""" "a" """)))
}
test("SPARK-24244: Select a subset of all columns") {
withTempPath { path =>
import collection.JavaConverters._
val schema = new StructType()
.add("f1", IntegerType).add("f2", IntegerType).add("f3", IntegerType)
.add("f4", IntegerType).add("f5", IntegerType).add("f6", IntegerType)
.add("f7", IntegerType).add("f8", IntegerType).add("f9", IntegerType)
.add("f10", IntegerType).add("f11", IntegerType).add("f12", IntegerType)
.add("f13", IntegerType).add("f14", IntegerType).add("f15", IntegerType)
val odf = spark.createDataFrame(List(
Row(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
Row(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15)
).asJava, schema)
odf.write.csv(path.getCanonicalPath)
val idf = spark.read
.schema(schema)
.csv(path.getCanonicalPath)
.select('f15, 'f10, 'f5)
assert(idf.count() == 2)
checkAnswer(idf, List(Row(15, 10, 5), Row(-15, -10, -5)))
}
}
def checkHeader(multiLine: Boolean): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
val exception = intercept[SparkException] {
spark.read
.schema(ischema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
val shortSchema = new StructType().add("f1", DoubleType)
val exceptionForShortSchema = intercept[SparkException] {
spark.read
.schema(shortSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exceptionForShortSchema.getMessage.contains(
"Number of column in CSV header is not equal to number of fields in the schema"))
val longSchema = new StructType()
.add("f1", DoubleType)
.add("f2", DoubleType)
.add("f3", DoubleType)
val exceptionForLongSchema = intercept[SparkException] {
spark.read
.schema(longSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exceptionForLongSchema.getMessage.contains("Header length: 2, schema size: 3"))
val caseSensitiveSchema = new StructType().add("F1", DoubleType).add("f2", DoubleType)
val caseSensitiveException = intercept[SparkException] {
spark.read
.schema(caseSensitiveSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(caseSensitiveException.getMessage.contains(
"CSV header does not conform to the schema"))
}
}
}
test(s"SPARK-23786: Checking column names against schema in the multiline mode") {
checkHeader(multiLine = true)
}
test(s"SPARK-23786: Checking column names against schema in the per-line mode") {
checkHeader(multiLine = false)
}
test("SPARK-23786: CSV header must not be checked if it doesn't exist") {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", false).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
val idf = spark.read
.schema(ischema)
.option("header", false)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
checkAnswer(idf, odf)
}
}
test("SPARK-23786: Ignore column name case if spark.sql.caseSensitive is false") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
val oschema = new StructType().add("A", StringType)
val odf = spark.createDataFrame(List(Row("0")).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("a", StringType)
val idf = spark.read.schema(ischema)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
checkAnswer(idf, odf)
}
}
}
test("SPARK-23786: check header on parsing of dataset of strings") {
val ds = Seq("columnA,columnB", "1.0,1000.0").toDS()
val ischema = new StructType().add("columnB", DoubleType).add("columnA", DoubleType)
val exception = intercept[IllegalArgumentException] {
spark.read.schema(ischema).option("header", true).option("enforceSchema", false).csv(ds)
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
}
test("SPARK-23786: enforce inferred schema") {
val expectedSchema = new StructType().add("_c0", DoubleType).add("_c1", StringType)
val withHeader = spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.csv(Seq("_c0,_c1", "1.0,a").toDS())
assert(withHeader.schema == expectedSchema)
checkAnswer(withHeader, Seq(Row(1.0, "a")))
// Ignore the inferSchema flag if an user sets a schema
val schema = new StructType().add("colA", DoubleType).add("colB", StringType)
val ds = spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.schema(schema)
.csv(Seq("colA,colB", "1.0,a").toDS())
assert(ds.schema == schema)
checkAnswer(ds, Seq(Row(1.0, "a")))
val exception = intercept[IllegalArgumentException] {
spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.schema(schema)
.csv(Seq("col1,col2", "1.0,a").toDS())
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
}
test("SPARK-23786: warning should be printed if CSV header doesn't conform to schema") {
val testAppender1 = new LogAppender("CSV header matches to schema")
withLogAppender(testAppender1) {
val ds = Seq("columnA,columnB", "1.0,1000.0").toDS()
val ischema = new StructType().add("columnB", DoubleType).add("columnA", DoubleType)
spark.read.schema(ischema).option("header", true).option("enforceSchema", true).csv(ds)
}
assert(testAppender1.loggingEvents
.exists(msg => msg.getRenderedMessage.contains("CSV header does not conform to the schema")))
val testAppender2 = new LogAppender("CSV header matches to schema w/ enforceSchema")
withLogAppender(testAppender2) {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
spark.read
.schema(ischema)
.option("header", true)
.option("enforceSchema", true)
.csv(path.getCanonicalPath)
.collect()
}
}
assert(testAppender2.loggingEvents
.exists(msg => msg.getRenderedMessage.contains("CSV header does not conform to the schema")))
}
test("SPARK-25134: check header on parsing of dataset with projection and column pruning") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "true") {
Seq(false, true).foreach { multiLine =>
withTempPath { path =>
val dir = path.getAbsolutePath
Seq(("a", "b")).toDF("columnA", "columnB").write
.format("csv")
.option("header", true)
.save(dir)
// schema with one column
checkAnswer(spark.read
.format("csv")
.option("header", true)
.option("enforceSchema", false)
.option("multiLine", multiLine)
.load(dir)
.select("columnA"),
Row("a"))
// empty schema
assert(spark.read
.format("csv")
.option("header", true)
.option("enforceSchema", false)
.option("multiLine", multiLine)
.load(dir)
.count() === 1L)
}
}
}
}
test("SPARK-24645 skip parsing when columnPruning enabled and partitions scanned only") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "true") {
withTempPath { path =>
val dir = path.getAbsolutePath
spark.range(10).selectExpr("id % 2 AS p", "id").write.partitionBy("p").csv(dir)
checkAnswer(spark.read.csv(dir).selectExpr("sum(p)"), Row(5))
}
}
}
test("SPARK-24676 project required data from parsed data when columnPruning disabled") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "false") {
withTempPath { path =>
val dir = path.getAbsolutePath
spark.range(10).selectExpr("id % 2 AS p", "id AS c0", "id AS c1").write.partitionBy("p")
.option("header", "true").csv(dir)
val df1 = spark.read.option("header", true).csv(dir).selectExpr("sum(p)", "count(c0)")
checkAnswer(df1, Row(5, 10))
// empty required column case
val df2 = spark.read.option("header", true).csv(dir).selectExpr("sum(p)")
checkAnswer(df2, Row(5))
}
// the case where tokens length != parsedSchema length
withTempPath { path =>
val dir = path.getAbsolutePath
Seq("1,2").toDF().write.text(dir)
// more tokens
val df1 = spark.read.schema("c0 int").format("csv").option("mode", "permissive").load(dir)
checkAnswer(df1, Row(1))
// less tokens
val df2 = spark.read.schema("c0 int, c1 int, c2 int").format("csv")
.option("mode", "permissive").load(dir)
checkAnswer(df2, Row(1, 2, null))
}
}
}
test("count() for malformed input") {
def countForMalformedCSV(expected: Long, input: Seq[String]): Unit = {
val schema = new StructType().add("a", IntegerType)
val strings = spark.createDataset(input)
val df = spark.read.schema(schema).option("header", false).csv(strings)
assert(df.count() == expected)
}
def checkCount(expected: Long): Unit = {
val validRec = "1"
val inputs = Seq(
Seq("{-}", validRec),
Seq(validRec, "?"),
Seq("0xAC", validRec),
Seq(validRec, "0.314"),
Seq("\\\\\\\\\\\\", validRec)
)
inputs.foreach { input =>
countForMalformedCSV(expected, input)
}
}
checkCount(2)
countForMalformedCSV(0, Seq(""))
}
test("SPARK-25387: bad input should not cause NPE") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val input = spark.createDataset(Seq("\\u0001\\u0000\\u0001234"))
checkAnswer(spark.read.schema(schema).csv(input), Row(null))
checkAnswer(spark.read.option("multiLine", true).schema(schema).csv(input), Row(null))
assert(spark.read.schema(schema).csv(input).collect().toSet == Set(Row(null)))
}
test("SPARK-31261: bad csv input with `columnNameCorruptRecord` should not cause NPE") {
val schema = StructType(
StructField("a", IntegerType) :: StructField("_corrupt_record", StringType) :: Nil)
val input = spark.createDataset(Seq("\\u0001\\u0000\\u0001234"))
checkAnswer(
spark.read
.option("columnNameOfCorruptRecord", "_corrupt_record")
.schema(schema)
.csv(input),
Row(null, "\\u0001\\u0000\\u0001234"))
assert(spark.read.schema(schema).csv(input).collect().toSet ==
Set(Row(null, "\\u0001\\u0000\\u0001234")))
}
test("field names of inferred schema shouldn't compare to the first row") {
val input = Seq("1,2").toDS()
val df = spark.read.option("enforceSchema", false).csv(input)
checkAnswer(df, Row("1", "2"))
}
test("using the backward slash as the delimiter") {
val input = Seq("""abc\\1""").toDS()
val delimiter = """\\\\"""
checkAnswer(spark.read.option("delimiter", delimiter).csv(input), Row("abc", "1"))
checkAnswer(spark.read.option("inferSchema", true).option("delimiter", delimiter).csv(input),
Row("abc", 1))
val schema = new StructType().add("a", StringType).add("b", IntegerType)
checkAnswer(spark.read.schema(schema).option("delimiter", delimiter).csv(input), Row("abc", 1))
}
test("using spark.sql.columnNameOfCorruptRecord") {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
val csv = "\\""
val df = spark.read
.schema("a int, _unparsed string")
.csv(Seq(csv).toDS())
checkAnswer(df, Row(null, csv))
}
}
test("encoding in multiLine mode") {
val df = spark.range(3).toDF()
Seq("UTF-8", "ISO-8859-1", "CP1251", "US-ASCII", "UTF-16BE", "UTF-32LE").foreach { encoding =>
Seq(true, false).foreach { header =>
withTempPath { path =>
df.write
.option("encoding", encoding)
.option("header", header)
.csv(path.getCanonicalPath)
val readback = spark.read
.option("multiLine", true)
.option("encoding", encoding)
.option("inferSchema", true)
.option("header", header)
.csv(path.getCanonicalPath)
checkAnswer(readback, df)
}
}
}
}
test("""Support line separator - default value \\r, \\r\\n and \\n""") {
val data = "\\"a\\",1\\r\\"c\\",2\\r\\n\\"d\\",3\\n"
withTempPath { path =>
Files.write(path.toPath, data.getBytes(StandardCharsets.UTF_8))
val df = spark.read.option("inferSchema", true).csv(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("_c0", StringType) :: StructField("_c1", IntegerType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
def testLineSeparator(lineSep: String, encoding: String, inferSchema: Boolean, id: Int): Unit = {
test(s"Support line separator in ${encoding} #${id}") {
// Read
val data =
s""""a",1$lineSep
|c,2$lineSep"
|d",3""".stripMargin
val dataWithTrailingLineSep = s"$data$lineSep"
Seq(data, dataWithTrailingLineSep).foreach { lines =>
withTempPath { path =>
Files.write(path.toPath, lines.getBytes(encoding))
val schema = StructType(StructField("_c0", StringType)
:: StructField("_c1", LongType) :: Nil)
val expected = Seq(("a", 1), ("\\nc", 2), ("\\nd", 3))
.toDF("_c0", "_c1")
Seq(false, true).foreach { multiLine =>
val reader = spark
.read
.option("lineSep", lineSep)
.option("multiLine", multiLine)
.option("encoding", encoding)
val df = if (inferSchema) {
reader.option("inferSchema", true).csv(path.getAbsolutePath)
} else {
reader.schema(schema).csv(path.getAbsolutePath)
}
checkAnswer(df, expected)
}
}
}
// Write
withTempPath { path =>
Seq("a", "b", "c").toDF("value").coalesce(1)
.write
.option("lineSep", lineSep)
.option("encoding", encoding)
.csv(path.getAbsolutePath)
val partFile = TestUtils.recursiveList(path).filter(f => f.getName.startsWith("part-")).head
val readBack = new String(Files.readAllBytes(partFile.toPath), encoding)
assert(
readBack === s"a${lineSep}b${lineSep}c${lineSep}")
}
// Roundtrip
withTempPath { path =>
val df = Seq("a", "b", "c").toDF()
df.write
.option("lineSep", lineSep)
.option("encoding", encoding)
.csv(path.getAbsolutePath)
val readBack = spark
.read
.option("lineSep", lineSep)
.option("encoding", encoding)
.csv(path.getAbsolutePath)
checkAnswer(df, readBack)
}
}
}
// scalastyle:off nonascii
List(
(0, "|", "UTF-8", false),
(1, "^", "UTF-16BE", true),
(2, ":", "ISO-8859-1", true),
(3, "!", "UTF-32LE", false),
(4, 0x1E.toChar.toString, "UTF-8", true),
(5, "아", "UTF-32BE", false),
(6, "у", "CP1251", true),
(8, "\\r", "UTF-16LE", true),
(9, "\\u000d", "UTF-32BE", false),
(10, "=", "US-ASCII", false),
(11, "$", "utf-32le", true)
).foreach { case (testNum, sep, encoding, inferSchema) =>
testLineSeparator(sep, encoding, inferSchema, testNum)
}
// scalastyle:on nonascii
test("lineSep restrictions") {
val errMsg1 = intercept[IllegalArgumentException] {
spark.read.option("lineSep", "").csv(testFile(carsFile)).collect
}.getMessage
assert(errMsg1.contains("'lineSep' cannot be an empty string"))
val errMsg2 = intercept[IllegalArgumentException] {
spark.read.option("lineSep", "123").csv(testFile(carsFile)).collect
}.getMessage
assert(errMsg2.contains("'lineSep' can contain only 1 character"))
}
test("SPARK-26208: write and read empty data to csv file with headers") {
withTempPath { path =>
val df1 = spark.range(10).repartition(2).filter(_ < 0).map(_.toString).toDF
// we have 2 partitions but they are both empty and will be filtered out upon writing
// thanks to SPARK-23271 one new empty partition will be inserted
df1.write.format("csv").option("header", true).save(path.getAbsolutePath)
val df2 = spark.read.format("csv").option("header", true).option("inferSchema", false)
.load(path.getAbsolutePath)
assert(df1.schema === df2.schema)
checkAnswer(df1, df2)
}
}
test("Do not reuse last good value for bad input field") {
val schema = StructType(
StructField("col1", StringType) ::
StructField("col2", DateType) ::
Nil
)
val rows = spark.read
.schema(schema)
.format("csv")
.load(testFile(badAfterGoodFile))
val expectedRows = Seq(
Row("good record", java.sql.Date.valueOf("1999-08-01")),
Row("bad record", null))
checkAnswer(rows, expectedRows)
}
test("SPARK-27512: Decimal type inference should not handle ',' for backward compatibility") {
assert(spark.read
.option("delimiter", "|")
.option("inferSchema", "true")
.csv(Seq("1,2").toDS).schema.head.dataType === StringType)
}
test("SPARK-27873: disabling enforceSchema should not fail columnNameOfCorruptRecord") {
Seq("csv", "").foreach { reader =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> reader) {
withTempPath { path =>
val df = Seq(("0", "2013-111_11")).toDF("a", "b")
df.write
.option("header", "true")
.csv(path.getAbsolutePath)
val schema = StructType.fromDDL("a int, b date")
val columnNameOfCorruptRecord = "_unparsed"
val schemaWithCorrField = schema.add(columnNameOfCorruptRecord, StringType)
val readDF = spark
.read
.option("mode", "Permissive")
.option("header", "true")
.option("enforceSchema", false)
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schemaWithCorrField)
.csv(path.getAbsoluteFile.toString)
checkAnswer(readDF, Row(0, null, "0,2013-111_11") :: Nil)
}
}
}
}
test("SPARK-28431: prevent CSV datasource throw TextParsingException with large size message") {
withTempPath { path =>
val maxCharsPerCol = 10000
val str = "a" * (maxCharsPerCol + 1)
Files.write(
path.toPath,
str.getBytes(StandardCharsets.UTF_8),
StandardOpenOption.CREATE, StandardOpenOption.WRITE
)
val errMsg = intercept[TextParsingException] {
spark.read
.option("maxCharsPerColumn", maxCharsPerCol)
.csv(path.getAbsolutePath)
.count()
}.getMessage
assert(errMsg.contains("..."),
"expect the TextParsingException truncate the error content to be 1000 length.")
}
}
test("SPARK-29101 test count with DROPMALFORMED mode") {
Seq((true, 4), (false, 3)).foreach { case (csvColumnPruning, expectedCount) =>
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> csvColumnPruning.toString) {
val count = spark.read
.option("header", "true")
.option("mode", "DROPMALFORMED")
.csv(testFile(malformedRowFile))
.count()
assert(expectedCount == count)
}
}
}
test("parse timestamp in microsecond precision") {
withTempPath { path =>
val t = "2019-11-14 20:35:30.123456"
Seq(t).toDF("t").write.text(path.getAbsolutePath)
val readback = spark.read
.schema("t timestamp")
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss.SSSSSS")
.csv(path.getAbsolutePath)
checkAnswer(readback, Row(Timestamp.valueOf(t)))
}
}
test("Roundtrip in reading and writing timestamps in microsecond precision") {
withTempPath { path =>
val timestamp = Timestamp.valueOf("2019-11-18 11:56:00.123456")
Seq(timestamp).toDF("t")
.write
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss.SSSSSS")
.csv(path.getAbsolutePath)
val readback = spark.read
.schema("t timestamp")
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss.SSSSSS")
.csv(path.getAbsolutePath)
checkAnswer(readback, Row(timestamp))
}
}
test("return correct results when data columns overlap with partition columns") {
withTempPath { path =>
val tablePath = new File(s"${path.getCanonicalPath}/cOl3=c/cOl1=a/cOl5=e")
val inputDF = Seq((1, 2, 3, 4, 5)).toDF("cOl1", "cOl2", "cOl3", "cOl4", "cOl5")
inputDF.write
.option("header", "true")
.csv(tablePath.getCanonicalPath)
val resultDF = spark.read
.option("header", "true")
.option("inferSchema", "true")
.csv(path.getCanonicalPath)
.select("CoL1", "Col2", "CoL5", "CoL3")
checkAnswer(resultDF, Row("a", 2, "e", "c"))
}
}
test("filters push down") {
Seq(true, false).foreach { filterPushdown =>
Seq(true, false).foreach { columnPruning =>
withSQLConf(
SQLConf.CSV_FILTER_PUSHDOWN_ENABLED.key -> filterPushdown.toString,
SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> columnPruning.toString) {
withTempPath { path =>
val t = "2019-12-17 00:01:02"
Seq(
"c0,c1,c2",
"abc,1,2019-11-14 20:35:30",
s"def,2,$t").toDF("data")
.repartition(1)
.write.text(path.getAbsolutePath)
Seq(true, false).foreach { multiLine =>
Seq("PERMISSIVE", "DROPMALFORMED", "FAILFAST").foreach { mode =>
val readback = spark.read
.option("mode", mode)
.option("header", true)
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.option("multiLine", multiLine)
.schema("c0 string, c1 integer, c2 timestamp")
.csv(path.getAbsolutePath)
.where($"c1" === 2)
.select($"c2")
// count() pushes empty schema. This checks handling of a filter
// which refers to not existed field.
assert(readback.count() === 1)
checkAnswer(readback, Row(Timestamp.valueOf(t)))
}
}
}
}
}
}
}
test("filters push down - malformed input in PERMISSIVE mode") {
val invalidTs = "2019-123_14 20:35:30"
val invalidRow = s"0,$invalidTs,999"
val validTs = "2019-12-14 20:35:30"
Seq(true, false).foreach { filterPushdown =>
withSQLConf(SQLConf.CSV_FILTER_PUSHDOWN_ENABLED.key -> filterPushdown.toString) {
withTempPath { path =>
Seq(
"c0,c1,c2",
invalidRow,
s"1,$validTs,999").toDF("data")
.repartition(1)
.write.text(path.getAbsolutePath)
def checkReadback(condition: Column, expected: Seq[Row]): Unit = {
val readback = spark.read
.option("mode", "PERMISSIVE")
.option("columnNameOfCorruptRecord", "c3")
.option("header", true)
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.schema("c0 integer, c1 timestamp, c2 integer, c3 string")
.csv(path.getAbsolutePath)
.where(condition)
.select($"c0", $"c1", $"c3")
checkAnswer(readback, expected)
}
checkReadback(
condition = $"c2" === 999,
expected = Seq(Row(0, null, invalidRow), Row(1, Timestamp.valueOf(validTs), null)))
checkReadback(
condition = $"c2" === 999 && $"c1" > "1970-01-01 00:00:00",
expected = Seq(Row(1, Timestamp.valueOf(validTs), null)))
}
}
}
}
test("SPARK-30530: apply filters to malformed rows") {
withSQLConf(SQLConf.CSV_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { path =>
Seq(
"100.0,1.0,",
"200.0,,",
"300.0,3.0,",
"1.0,4.0,",
",4.0,",
"500.0,,",
",6.0,",
"-500.0,50.5").toDF("data")
.repartition(1)
.write.text(path.getAbsolutePath)
val schema = new StructType().add("floats", FloatType).add("more_floats", FloatType)
val readback = spark.read
.schema(schema)
.csv(path.getAbsolutePath)
.filter("floats is null")
checkAnswer(readback, Seq(Row(null, 4.0), Row(null, 6.0)))
}
}
}
test("SPARK-30810: parses and convert a CSV Dataset having different column from 'value'") {
val ds = spark.range(2).selectExpr("concat('a,b,', id) AS `a.text`").as[String]
val csv = spark.read.option("header", true).option("inferSchema", true).csv(ds)
assert(csv.schema.fieldNames === Seq("a", "b", "0"))
checkAnswer(csv, Row("a", "b", 1))
}
test("SPARK-30960: parse date/timestamp string with legacy format") {
val ds = Seq("2020-1-12 3:23:34.12, 2020-1-12 T").toDS()
val csv = spark.read.option("header", false).schema("t timestamp, d date").csv(ds)
checkAnswer(csv, Row(Timestamp.valueOf("2020-1-12 3:23:34.12"), Date.valueOf("2020-1-12")))
}
test("exception mode for parsing date/timestamp string") {
val ds = Seq("2020-01-27T20:06:11.847-0800").toDS()
val csv = spark.read
.option("header", false)
.option("timestampFormat", "yyyy-MM-dd'T'HH:mm:ss.SSSz")
.schema("t timestamp").csv(ds)
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "exception") {
val msg = intercept[SparkException] {
csv.collect()
}.getCause.getMessage
assert(msg.contains("Fail to parse"))
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "legacy") {
checkAnswer(csv, Row(Timestamp.valueOf("2020-01-27 20:06:11.847")))
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "corrected") {
checkAnswer(csv, Row(null))
}
}
test("SPARK-32025: infer the schema from mixed-type values") {
withTempPath { path =>
Seq("col_mixed_types", "2012", "1997", "True").toDS.write.text(path.getCanonicalPath)
val df = spark.read.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(path.getCanonicalPath)
assert(df.schema.last == StructField("col_mixed_types", StringType, true))
}
}
test("SPARK-32614: don't treat rows starting with null char as comment") {
withTempPath { path =>
Seq("\\u0000foo", "bar", "baz").toDS.write.text(path.getCanonicalPath)
val df = spark.read.format("csv")
.option("header", "false")
.option("inferSchema", "true")
.load(path.getCanonicalPath)
assert(df.count() == 3)
}
}
test("case sensitivity of filters references") {
Seq(true, false).foreach { filterPushdown =>
withSQLConf(SQLConf.CSV_FILTER_PUSHDOWN_ENABLED.key -> filterPushdown.toString) {
withTempPath { path =>
Seq(
"""aaa,BBB""",
"""0,1""",
"""2,3""").toDF().repartition(1).write.text(path.getCanonicalPath)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val readback = spark.read.schema("aaa integer, BBB integer")
.option("header", true)
.csv(path.getCanonicalPath)
checkAnswer(readback, Seq(Row(2, 3), Row(0, 1)))
checkAnswer(readback.filter($"AAA" === 2 && $"bbb" === 3), Seq(Row(2, 3)))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val readback = spark.read.schema("aaa integer, BBB integer")
.option("header", true)
.csv(path.getCanonicalPath)
checkAnswer(readback, Seq(Row(2, 3), Row(0, 1)))
val errorMsg = intercept[AnalysisException] {
readback.filter($"AAA" === 2 && $"bbb" === 3).collect()
}.getMessage
assert(errorMsg.contains("cannot resolve 'AAA'"))
}
}
}
}
}
test("SPARK-32810: CSV data source should be able to read files with " +
"escaped glob metacharacter in the paths") {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
// test CSV writer / reader without specifying schema
val csvTableName = "[abc]"
spark.range(3).coalesce(1).write.csv(s"$basePath/$csvTableName")
val readback = spark.read
.csv(s"$basePath/${"""(\\[|\\]|\\{|\\})""".r.replaceAllIn(csvTableName, """\\\\$1""")}")
assert(readback.collect sameElements Array(Row("0"), Row("1"), Row("2")))
}
}
test("SPARK-33566: configure UnescapedQuoteHandling to parse " +
"unescaped quotes and unescaped delimiter data correctly") {
withTempPath { path =>
val dataPath = path.getCanonicalPath
val row1 = Row("""a,""b,c""", "xyz")
val row2 = Row("""a,b,c""", """x""yz""")
// Generate the test data, use `,` as delimiter and `"` as quotes, but they didn't escape.
Seq(
"""c1,c2""",
s""""${row1.getString(0)}","${row1.getString(1)}"""",
s""""${row2.getString(0)}","${row2.getString(1)}"""")
.toDF().repartition(1).write.text(dataPath)
// Without configure UnescapedQuoteHandling to STOP_AT_CLOSING_QUOTE,
// the result will be Row(""""a,""b""", """c""""), Row("""a,b,c""", """"x""yz"""")
val result = spark.read
.option("inferSchema", "true")
.option("header", "true")
.option("unescapedQuoteHandling", "STOP_AT_CLOSING_QUOTE")
.csv(dataPath).collect()
val exceptResults = Array(row1, row2)
assert(result.sameElements(exceptResults))
}
}
test("SPARK-34768: counting a long record with ignoreTrailingWhiteSpace set to true") {
val bufSize = 128
val line = "X" * (bufSize - 1) + "| |"
withTempPath { path =>
Seq(line).toDF.write.text(path.getAbsolutePath)
assert(spark.read.format("csv")
.option("delimiter", "|")
.option("ignoreTrailingWhiteSpace", "true").load(path.getAbsolutePath).count() == 1)
}
}
test("SPARK-35912: turn non-nullable schema into a nullable schema") {
val inputCSVString = """1,"""
val schema = StructType(Seq(
StructField("c1", IntegerType, nullable = false),
StructField("c2", IntegerType, nullable = false)))
val expected = schema.asNullable
Seq("DROPMALFORMED", "FAILFAST", "PERMISSIVE").foreach { mode =>
val csv = spark.createDataset(
spark.sparkContext.parallelize(inputCSVString:: Nil))(Encoders.STRING)
val df = spark.read
.option("mode", mode)
.schema(schema)
.csv(csv)
assert(df.schema == expected)
checkAnswer(df, Row(1, null) :: Nil)
}
}
test("SPARK-36536: use casting when datetime pattern is not set") {
def isLegacy: Boolean = {
spark.conf.get(SQLConf.LEGACY_TIME_PARSER_POLICY).toUpperCase(Locale.ROOT) ==
SQLConf.LegacyBehaviorPolicy.LEGACY.toString
}
withSQLConf(
SQLConf.DATETIME_JAVA8API_ENABLED.key -> "true",
SQLConf.SESSION_LOCAL_TIMEZONE.key -> DateTimeTestUtils.UTC.getId) {
withTempPath { path =>
Seq(
"""d,ts_ltz,ts_ntz""",
"""2021,2021,2021""",
"""2021-01,2021-01 ,2021-01""",
""" 2021-2-1,2021-3-02,2021-10-1""",
"""2021-8-18 00:00:00,2021-8-18 21:44:30Z,2021-8-18T21:44:30.123"""
).toDF().repartition(1).write.text(path.getCanonicalPath)
val readback = spark.read.schema("d date, ts_ltz timestamp_ltz, ts_ntz timestamp_ntz")
.option("header", true)
.csv(path.getCanonicalPath)
checkAnswer(
readback,
Seq(
Row(LocalDate.of(2021, 1, 1), Instant.parse("2021-01-01T00:00:00Z"),
if (isLegacy) null else LocalDateTime.of(2021, 1, 1, 0, 0, 0)),
Row(LocalDate.of(2021, 1, 1), Instant.parse("2021-01-01T00:00:00Z"),
if (isLegacy) null else LocalDateTime.of(2021, 1, 1, 0, 0, 0)),
Row(LocalDate.of(2021, 2, 1), Instant.parse("2021-03-02T00:00:00Z"),
if (isLegacy) null else LocalDateTime.of(2021, 10, 1, 0, 0, 0)),
Row(LocalDate.of(2021, 8, 18), Instant.parse("2021-08-18T21:44:30Z"),
if (isLegacy) null else LocalDateTime.of(2021, 8, 18, 21, 44, 30, 123000000))))
}
}
}
}
class CSVv1Suite extends CSVSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "csv")
}
class CSVv2Suite extends CSVSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
}
class CSVLegacyTimeParserSuite extends CSVSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.LEGACY_TIME_PARSER_POLICY, "legacy")
}
| chuckchen/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala | Scala | apache-2.0 | 88,372 |
package org.camunda.feel.impl.builtin
import org.camunda.feel.impl.builtin.BuiltinFunction.builtinFunction
import org.camunda.feel.syntaxtree.{Val, ValBoolean, ValError, ValNull}
object BooleanBuiltinFunctions {
def functions = Map(
"not" -> List(notFunction),
"is defined" -> List(isDefinedFunction)
)
private def notFunction =
builtinFunction(params = List("negand"), invoke = {
case List(ValBoolean(negand)) => ValBoolean(!negand)
case List(_: Val) => ValNull
case _ => ValNull
})
private def isDefinedFunction = builtinFunction(
params = List("value"),
invoke = {
case (value: ValError) :: Nil => ValBoolean(false)
case _ => ValBoolean(true)
}
)
}
| camunda/feel-scala | src/main/scala/org/camunda/feel/impl/builtin/BooleanBuiltinFunctions.scala | Scala | apache-2.0 | 783 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import org.apache.spark.internal.Logging
/**
* Implements policies and bookkeeping for sharing an adjustable-sized pool of memory between tasks.
*
* Tries to ensure that each task gets a reasonable share of memory, instead of some task ramping up
* to a large amount first and then causing others to spill to disk repeatedly.
*
* If there are N tasks, it ensures that each task can acquire at least 1 / 2N of the memory
* before it has to spill, and at most 1 / N. Because N varies dynamically, we keep track of the
* set of active tasks and redo the calculations of 1 / 2N and 1 / N in waiting tasks whenever this
* set changes. This is all done by synchronizing access to mutable state and using wait() and
* notifyAll() to signal changes to callers. Prior to Spark 1.6, this arbitration of memory across
* tasks was performed by the ShuffleMemoryManager.
*
* @param lock a [[MemoryManager]] instance to synchronize on
* @param memoryMode the type of memory tracked by this pool (on- or off-heap)
*/
private[memory] class ExecutionMemoryPool(
lock: Object,
memoryMode: MemoryMode
) extends MemoryPool(lock) with Logging {
private[this] val poolName: String = memoryMode match {
case MemoryMode.ON_HEAP => "on-heap execution"
case MemoryMode.OFF_HEAP => "off-heap execution"
}
/**
* Map from taskAttemptId -> memory consumption in bytes
*/
@GuardedBy("lock")
private val memoryForTask = new mutable.HashMap[Long, Long]()
override def memoryUsed: Long = lock.synchronized {
memoryForTask.values.sum
}
/**
* Returns the memory consumption, in bytes, for the given task.
*/
def getMemoryUsageForTask(taskAttemptId: Long): Long = lock.synchronized {
memoryForTask.getOrElse(taskAttemptId, 0L)
}
/**
* Try to acquire up to `numBytes` of memory for the given task and return the number of bytes
* obtained, or 0 if none can be allocated.
*
* This call may block until there is enough free memory in some situations, to make sure each
* task has a chance to ramp up to at least 1 / 2N of the total memory pool (where N is the # of
* active tasks) before it is forced to spill. This can happen if the number of tasks increase
* but an older task had a lot of memory already.
*
* @param numBytes number of bytes to acquire
* @param taskAttemptId the task attempt acquiring memory
* @param maybeGrowPool a callback that potentially grows the size of this pool. It takes in
* one parameter (Long) that represents the desired amount of memory by
* which this pool should be expanded.
* @param computeMaxPoolSize a callback that returns the maximum allowable size of this pool
* at this given moment. This is not a field because the max pool
* size is variable in certain cases. For instance, in unified
* memory management, the execution pool can be expanded by evicting
* cached blocks, thereby shrinking the storage pool.
*
* @return the number of bytes granted to the task.
*/
private[memory] def acquireMemory(
numBytes: Long,
taskAttemptId: Long,
maybeGrowPool: Long => Unit = (additionalSpaceNeeded: Long) => (),
computeMaxPoolSize: () => Long = () => poolSize): Long = lock.synchronized {
assert(numBytes > 0, s"invalid number of bytes requested: $numBytes")
// TODO: clean up this clunky method signature
// Add this task to the taskMemory map just so we can keep an accurate count of the number
// of active tasks, to let other tasks ramp down their memory in calls to `acquireMemory`
if (!memoryForTask.contains(taskAttemptId)) {
memoryForTask(taskAttemptId) = 0L
// This will later cause waiting tasks to wake up and check numTasks again
lock.notifyAll()
}
// Keep looping until we're either sure that we don't want to grant this request (because this
// task would have more than 1 / numActiveTasks of the memory) or we have enough free
// memory to give it (we always let each task get at least 1 / (2 * numActiveTasks)).
// TODO: simplify this to limit each task to its own slot
while (true) {
val numActiveTasks = memoryForTask.keys.size
val curMem = memoryForTask(taskAttemptId)
// In every iteration of this loop, we should first try to reclaim any borrowed execution
// space from storage. This is necessary because of the potential race condition where new
// storage blocks may steal the free execution memory that this task was waiting for.
maybeGrowPool(numBytes - memoryFree)
// Maximum size the pool would have after potentially growing the pool.
// This is used to compute the upper bound of how much memory each task can occupy. This
// must take into account potential free memory as well as the amount this pool currently
// occupies. Otherwise, we may run into SPARK-12155 where, in unified memory management,
// we did not take into account space that could have been freed by evicting cached blocks.
val maxPoolSize = computeMaxPoolSize()
val maxMemoryPerTask = maxPoolSize / numActiveTasks
val minMemoryPerTask = poolSize / (2 * numActiveTasks)
// How much we can grant this task; keep its share within 0 <= X <= 1 / numActiveTasks
val maxToGrant = math.min(numBytes, math.max(0, maxMemoryPerTask - curMem))
// Only give it as much memory as is free, which might be none if it reached 1 / numTasks
val toGrant = math.min(maxToGrant, memoryFree)
// We want to let each task get at least 1 / (2 * numActiveTasks) before blocking;
// if we can't give it this much now, wait for other tasks to free up memory
// (this happens if older tasks allocated lots of memory before N grew)
if (toGrant < numBytes && curMem + toGrant < minMemoryPerTask) {
logInfo(s"TID $taskAttemptId waiting for at least 1/2N of $poolName pool to be free")
lock.wait()
} else {
memoryForTask(taskAttemptId) += toGrant
return toGrant
}
}
0L // Never reached
}
/**
* Release `numBytes` of memory acquired by the given task.
*/
def releaseMemory(numBytes: Long, taskAttemptId: Long): Unit = lock.synchronized {
val curMem = memoryForTask.getOrElse(taskAttemptId, 0L)
val memoryToFree = if (curMem < numBytes) {
logWarning(
s"Internal error: release called on $numBytes bytes but task only has $curMem bytes " +
s"of memory from the $poolName pool")
curMem
} else {
numBytes
}
if (memoryForTask.contains(taskAttemptId)) {
memoryForTask(taskAttemptId) -= memoryToFree
if (memoryForTask(taskAttemptId) <= 0) {
memoryForTask.remove(taskAttemptId)
}
}
lock.notifyAll() // Notify waiters in acquireMemory() that memory has been freed
}
/**
* Release all memory for the given task and mark it as inactive (e.g. when a task ends).
* @return the number of bytes freed.
*/
def releaseAllMemoryForTask(taskAttemptId: Long): Long = lock.synchronized {
val numBytesToFree = getMemoryUsageForTask(taskAttemptId)
releaseMemory(numBytesToFree, taskAttemptId)
numBytesToFree
}
}
| goldmedal/spark | core/src/main/scala/org/apache/spark/memory/ExecutionMemoryPool.scala | Scala | apache-2.0 | 8,269 |
package pt.cnbc.wikimodels.client.snippet
import scala.xml.NodeSeq
import net.liftweb._
import http._
import SHtml._
import js._
import JsCmds._
import JE._
import util._
import Helpers._
class HelloWorld {
def button(in: NodeSeq) =
SHtml.ajaxButton(in,
() => JsRaw("$.popup.show('The title', 'A nice ');").cmd)
/*"+(
() => JsRaw("$.blockUI({ message: "+(
<h1>
Do you really want to destroy Rhode Island?
{
SHtml.ajaxButton("yes", () => {println("Rhode Island Destroyed"); JsRaw("$.unblockUI();").cmd})
}
<button onclick="$.unblockUI()">No</button>
</h1>).toString.encJs+
" });").cmd)*/
}
| alexmsmartins/WikiModels | wm_web_client/src/main/scala/pt/cnbc/wikimodels/client/snippet/HelloWorld.scala | Scala | mit | 716 |
package org.sparkpipe.util.io
import java.io.File
import org.sparkpipe.test.util.UnitTestSpec
class PathsSpec extends UnitTestSpec {
/** tests local FS operations with Paths */
test("Paths should create local path") {
val paths = Seq(
("/home/temp/*.txt", true),
("home/temp/*.txt", true),
("file://home/temp/*.txt", true),
("file:///home/temp/*.txt", true),
("file:///home/temp/*.txt/", true),
("home/ano:ther/*.csv", false),
("file:/home/temp", false),
("//home/temp", false),
("home/{} path", false),
("file://home/file://test", false)
)
paths.foreach(path => path match {
case (a: String, true) =>
val dir = new LocalPath(a)
dir.local should equal (a.stripPrefix("file://").stripSuffix("/"))
case (b: String, false) =>
intercept[IllegalArgumentException] {
new LocalPath(b)
}
case _ =>
throw new Exception("Wrong argument for test")
})
}
/** tests HDFS paths */
test("Paths should work for HDFS paths") {
val paths = Seq(
("hdfs://host:50700/home/temp/*.csv", true),
("hdfs://anotherhost:80/home/temp/*.csv", true),
("hdfs://anotherhost:80/home/temp/*.csv/", true),
("hdfs://anotherhost:9/home/temp/*.csv", false),
("hdfs:/anotherhost:50700/home/temp/*.csv", false),
("file://host:50700/home/temp/*.csv", false),
("/home/temp/*.csv", false)
)
paths.foreach(path => path match {
case (a:String, true) =>
val dir = new HadoopPath(a)
dir.uri should equal (a.stripSuffix("/"))
case (b:String, false) =>
intercept[IllegalArgumentException] {
new HadoopPath(b)
}
case _ =>
throw new Exception("Wrong argument for test")
})
}
/** simple test to correctly recognize FS */
test("Paths should recognize FS correctly") {
val paths = Seq(
("/home/temp/*.txt", true),
("home/temp/*.txt", true),
("file://home/temp/*.txt", true),
("hdfs://host:50700/home/temp/*.csv", false)
)
paths.foreach(path => {
val dir = Paths.fromString(path._1)
dir.isLocalFS should be (path._2)
})
}
test("Paths should return correct absolute path") {
val paths = Seq(
("/home/temp/*.txt", "/home/temp/*.txt"),
("home/temp/*.txt", new File("home/temp/*.txt").getAbsolutePath),
("file://home/temp/*.txt", new File("home/temp/*.txt").getAbsolutePath),
("file:///home/temp/*.txt", "/home/temp/*.txt"),
("file:///home/temp/*.txt/", "/home/temp/*.txt"),
("hdfs://host:50700/home/temp/*.csv", "/home/temp/*.csv"),
("hdfs://host:50700/home/temp/*.csv/", "/home/temp/*.csv")
)
paths.foreach(path => {
Paths.fromString(path._1).absolute should equal (path._2)
})
}
test("Paths should return correct local path") {
val paths = Seq(
("/home/temp/*.txt", "/home/temp/*.txt"),
("home/temp/*.txt", "home/temp/*.txt"),
("file://home/temp/*.txt", "home/temp/*.txt"),
("file:///home/temp/*.txt", "/home/temp/*.txt"),
("file:///home/temp/*.txt/", "/home/temp/*.txt"),
("hdfs://host:50700/home/temp/*.csv", "/home/temp/*.csv"),
("hdfs://host:50700/home/temp/*.csv/", "/home/temp/*.csv")
)
paths.foreach(path => {
Paths.fromString(path._1).local should equal (path._2)
})
}
test("Paths should return correct URI") {
val paths = Seq(
("/home/temp/*.txt", "file:///home/temp/*.txt"),
("home/temp/*.txt", "file://" + new File("home/temp/*.txt").getAbsolutePath),
("file://home/temp/*.txt", "file://" + new File("home/temp/*.txt").getAbsolutePath),
("file:///home/temp/*.txt", "file:///home/temp/*.txt"),
("file:///home/temp/*.txt/", "file:///home/temp/*.txt"),
("hdfs://host:50700/home/temp/*.csv", "hdfs://host:50700/home/temp/*.csv"),
("hdfs://host:50700/home/temp/*.csv/", "hdfs://host:50700/home/temp/*.csv")
)
paths.foreach(path => {
Paths.fromString(path._1).uri should equal (path._2)
})
}
/** tests root as host:port for HDFS */
test("Paths should return correct root URL for HDFS") {
val paths = Seq(
("hdfs://host:50700/home/temp/*.csv", "hdfs://host:50700"),
("hdfs://anotherhost:8080/home/temp/*.csv", "hdfs://anotherhost:8080"),
("hdfs://another-host:80/home/temp/*.csv/", "hdfs://another-host:80"),
("hdfs://host1:9190/home/temp/*.csv", "hdfs://host1:9190")
)
paths.foreach(path => {
Paths.fromString(path._1).root should equal (path._2)
})
}
}
| sadikovi/sparkpipe | src/test/scala/org/sparkpipe/util/io/PathsSpec.scala | Scala | mit | 5,209 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import org.apache.kafka.common.config.ConfigException
import org.junit.Test
import junit.framework.Assert._
import org.scalatest.junit.JUnit3Suite
import kafka.utils.TestUtils
import kafka.message.GZIPCompressionCodec
import kafka.message.NoCompressionCodec
class KafkaConfigTest extends JUnit3Suite {
@Test
def testLogRetentionTimeHoursProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("log.retention.hours", "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals(60L * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeMinutesProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("log.retention.minutes", "30")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeMsProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("log.retention.ms", "1800000")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeNoConfigProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
val cfg = KafkaConfig.fromProps(props)
assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeBothMinutesAndHoursProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("log.retention.minutes", "30")
props.put("log.retention.hours", "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeBothMinutesAndMsProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("log.retention.ms", "1800000")
props.put("log.retention.minutes", "10")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testAdvertiseDefaults() {
val port = 9999
val hostName = "fake-host"
val props = TestUtils.createBrokerConfig(0, port)
props.put("host.name", hostName)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.advertisedHostName, hostName)
assertEquals(serverConfig.advertisedPort, port)
}
@Test
def testAdvertiseConfigured() {
val port = 9999
val advertisedHostName = "routable-host"
val advertisedPort = 1234
val props = TestUtils.createBrokerConfig(0, port)
props.put("advertised.host.name", advertisedHostName)
props.put("advertised.port", advertisedPort.toString)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.advertisedHostName, advertisedHostName)
assertEquals(serverConfig.advertisedPort, advertisedPort)
}
@Test
def testUncleanLeaderElectionDefault() {
val props = TestUtils.createBrokerConfig(0, 8181)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, true)
}
@Test
def testUncleanElectionDisabled() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("unclean.leader.election.enable", String.valueOf(false))
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, false)
}
@Test
def testUncleanElectionEnabled() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("unclean.leader.election.enable", String.valueOf(true))
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, true)
}
@Test
def testUncleanElectionInvalid() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("unclean.leader.election.enable", "invalid")
intercept[ConfigException] {
KafkaConfig.fromProps(props)
}
}
@Test
def testLogRollTimeMsProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("log.roll.ms", "1800000")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRollTimeMillis)
}
@Test
def testLogRollTimeBothMsAndHoursProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("log.roll.ms", "1800000")
props.put("log.roll.hours", "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRollTimeMillis)
}
@Test
def testLogRollTimeNoConfigProvided() {
val props = TestUtils.createBrokerConfig(0, 8181)
val cfg = KafkaConfig.fromProps(props)
assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRollTimeMillis )
}
@Test
def testDefaultCompressionType() {
val props = TestUtils.createBrokerConfig(0, 8181)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.compressionType, "producer")
}
@Test
def testValidCompressionType() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("compression.type", "gzip")
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.compressionType, "gzip")
}
@Test
def testInvalidCompressionType() {
val props = TestUtils.createBrokerConfig(0, 8181)
props.put("compression.type", "abc")
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props)
}
}
}
| WillCh/cs286A | dataMover/kafka/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala | Scala | bsd-2-clause | 6,226 |
package cpup.mc.computers.content
import cpup.mc.computers.CPupComputers
import cpup.mc.lib.content.CPupContent
import cpw.mods.fml.common.event.FMLPreInitializationEvent
object Content extends CPupContent[CPupComputers.type] {
override def mod = CPupComputers
}
| CoderPuppy/cpup-computers-mc | src/main/scala/cpup/mc/computers/content/Content.scala | Scala | mit | 266 |
package scalaoauth2.provider
import org.scalatest.OptionValues
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers._
import org.scalatest.concurrent.ScalaFutures
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
class PasswordSpec extends AnyFlatSpec with ScalaFutures with OptionValues {
val passwordClientCredReq = new Password()
val passwordNoClientCredReq = new Password() {
override def clientCredentialRequired = false
}
"Password when client credential required" should "handle request" in handlesRequest(
passwordClientCredReq,
Map(
"client_id" -> Seq("clientId1"),
"client_secret" -> Seq("clientSecret1")
)
)
"Password when client credential not required" should "handle request" in handlesRequest(
passwordNoClientCredReq,
Map.empty
)
def handlesRequest(password: Password, params: Map[String, Seq[String]]) = {
val request = new AuthorizationRequest(
Map(),
params ++ Map(
"username" -> Seq("user"),
"password" -> Seq("pass"),
"scope" -> Seq("all")
)
)
val clientCred = request.parseClientCredential
.fold[Option[ClientCredential]](None)(_.fold(_ => None, c => Some(c)))
val f = password.handleRequest(
clientCred,
request,
new MockDataHandler() {
override def findUser(
maybeClientCredential: Option[ClientCredential],
request: AuthorizationRequest
): Future[Option[User]] =
Future.successful(Some(MockUser(10000, "username")))
override def createAccessToken(
authInfo: AuthInfo[User]
): Future[AccessToken] =
Future.successful(
AccessToken(
"token1",
Some("refreshToken1"),
Some("all"),
Some(3600),
new java.util.Date()
)
)
}
)
whenReady(f) { result =>
result.tokenType should be("Bearer")
result.accessToken should be("token1")
result.expiresIn.value should (be <= 3600L and be > 3595L)
result.refreshToken should be(Some("refreshToken1"))
result.scope should be(Some("all"))
}
}
}
| nulab/scala-oauth2-provider | src/test/scala/scalaoauth2/provider/PasswordSpec.scala | Scala | mit | 2,257 |
package dialectic.micro
import org.scalacheck._
import org.scalacheck.Arbitrary._
import org.specs2._
import scalaz.{ @@, Equal, IList, Order, Tag }
import scalaz.scalacheck.ScalazProperties.semigroup
import scalaz.std.anyVal.intInstance
import scalaz.Tags.{ Conjunction, Disjunction }
class GoalTest extends Specification with ScalaCheck {
import GoalTestHelper._
def is =
s2"""
Goal
should have a lawful semigroup for conjunction ${conjSemigroupLaws[Int]}
should have a lawful semigroup for disjunction ${disjSemigroupLaws[Int]}
"""
def conjSemigroupLaws[A : Arbitrary : Equal : Order] =
semigroup.laws[Goal[A] @@ Conjunction](Goal.conjunctionSemigroup, goalConjEqual[A], goalConjArbitrary[A])
def disjSemigroupLaws[A : Arbitrary : Equal : Order] =
semigroup.laws[Goal[A] @@ Disjunction](Goal.disjunctionSemigroup, goalDisjEqual[A], goalDisjArbitrary[A])
}
object GoalTestHelper {
import TermTestHelper._
implicit def goalEqual[A : Equal]: Equal[Goal[A]] =
new Equal[Goal[A]] {
def equal(a1: Goal[A], a2: Goal[A]): Boolean =
Equal[IList[GoalState[A]]].equal(a1.runEmpty.takeAll, a2.runEmpty.takeAll)
}
implicit def goalConjEqual[A : Equal]: Equal[Goal[A] @@ Conjunction] = goalEqual[A].contramap(Tag.unwrap)
implicit def goalDisjEqual[A : Equal]: Equal[Goal[A] @@ Disjunction] = goalEqual[A].contramap(Tag.unwrap)
implicit def goalArbitrary[A : Arbitrary : Order]: Arbitrary[Goal[A]] = {
val unifyGen = arbitrary[Term[A]].flatMap(l => arbitrary[Term[A]].map(r => l =#= r))
def conjGen: Gen[Goal[A]] = goalGen.flatMap(l => goalGen.map(r => l /\\ r))
def disjGen: Gen[Goal[A]] = goalGen.flatMap(l => goalGen.map(r => l \\/ r))
def freshGen: Gen[Goal[A]] = arbFunction1[Term[A], Goal[A]](Arbitrary(goalGen)).arbitrary.map(Goal.callFresh)
def goalGen = Gen.lzy(Gen.oneOf(unifyGen, conjGen, disjGen, freshGen))
Arbitrary(unifyGen)
}
implicit def goalConjArbitrary[A : Arbitrary : Order]: Arbitrary[Goal[A] @@ Conjunction] =
Arbitrary(goalArbitrary[A].arbitrary.map(Tag[Goal[A], Conjunction]))
implicit def goalDisjArbitrary[A : Arbitrary : Order]: Arbitrary[Goal[A] @@ Disjunction] =
Arbitrary(goalArbitrary[A].arbitrary.map(Tag[Goal[A], Disjunction]))
}
| adelbertc/dialectic | micro/src/test/scala/dialectic/micro/GoalTest.scala | Scala | bsd-3-clause | 2,276 |
package mesosphere.marathon.api.v2
import javax.servlet.http.{ HttpServletResponse, HttpServletRequest }
import javax.ws.rs._
import javax.ws.rs.core.{ Context, MediaType, Response }
import com.codahale.metrics.annotation.Timed
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.api.v2.json.V2AppDefinition
import mesosphere.marathon.api.{ AuthResource, MarathonMediaType }
import mesosphere.marathon.plugin.auth.{ Authorizer, Authenticator, ViewAppOrGroup }
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state.Timestamp
import mesosphere.marathon.{ MarathonConf, MarathonSchedulerService }
import org.slf4j.LoggerFactory
@Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON))
@Consumes(Array(MediaType.APPLICATION_JSON))
class AppVersionsResource(service: MarathonSchedulerService,
val authenticator: Authenticator,
val authorizer: Authorizer,
val config: MarathonConf) extends AuthResource {
val log = LoggerFactory.getLogger(getClass.getName)
@GET
@Timed
def index(@PathParam("appId") appId: String,
@Context req: HttpServletRequest, @Context resp: HttpServletResponse): Response = {
doIfAuthorized(req, resp, ViewAppOrGroup, appId.toRootPath) { implicit principal =>
val id = appId.toRootPath
val versions = service.listAppVersions(id).toSeq
if (versions.isEmpty) unknownApp(id)
else ok(jsonObjString("versions" -> versions))
}
}
@GET
@Timed
@Path("{version}")
def show(@PathParam("appId") appId: String,
@PathParam("version") version: String,
@Context req: HttpServletRequest, @Context resp: HttpServletResponse): Response = {
doIfAuthorized(req, resp, ViewAppOrGroup, appId.toRootPath) { implicit principal =>
val id = appId.toRootPath
val timestamp = Timestamp(version)
service.getApp(id, timestamp).map(app => ok(jsonString(V2AppDefinition(app))))
.getOrElse(unknownApp(id, Option(timestamp)))
}
}
}
| Kosta-Github/marathon | src/main/scala/mesosphere/marathon/api/v2/AppVersionsResource.scala | Scala | apache-2.0 | 2,068 |
package com.seanshubin.detangler.maven.plugin
import com.seanshubin.detangler.console.ConsoleApplication
import org.apache.maven.plugin.AbstractMojo
import org.apache.maven.plugins.annotations.{Mojo, Parameter}
@Mojo(name = "report")
class ReportMojo extends AbstractMojo {
@Parameter(defaultValue = "${detanglerConfig}")
var detanglerConfig: String = null
override def execute(): Unit = {
ConsoleApplication.main(Array(detanglerConfig))
}
}
| SeanShubin/detangler | maven-plugin/src/main/scala/com/seanshubin/detangler/maven/plugin/ReportMojo.scala | Scala | unlicense | 457 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.scheduler
import scala.collection.mutable.ArrayBuffer
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskStart, TaskLocality}
import org.apache.spark.scheduler.TaskLocality.TaskLocality
import org.apache.spark.storage.{StorageLevel, StreamBlockId}
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver._
/** Testsuite for receiver scheduling */
class ReceiverTrackerSuite extends TestSuiteBase {
test("send rate update to receivers") {
withStreamingContext(new StreamingContext(conf, Milliseconds(100))) { ssc =>
ssc.scheduler.listenerBus.start(ssc.sc)
val newRateLimit = 100L
val inputDStream = new RateTestInputDStream(ssc)
val tracker = new ReceiverTracker(ssc)
tracker.start()
try {
// we wait until the Receiver has registered with the tracker,
// otherwise our rate update is lost
eventually(timeout(5 seconds)) {
assert(RateTestReceiver.getActive().nonEmpty)
}
// Verify that the rate of the block generator in the receiver get updated
val activeReceiver = RateTestReceiver.getActive().get
tracker.sendRateUpdate(inputDStream.id, newRateLimit)
eventually(timeout(5 seconds)) {
assert(activeReceiver.getDefaultBlockGeneratorRateLimit() === newRateLimit,
"default block generator did not receive rate update")
assert(activeReceiver.getCustomBlockGeneratorRateLimit() === newRateLimit,
"other block generator did not receive rate update")
}
} finally {
tracker.stop(false)
}
}
}
test("should restart receiver after stopping it") {
withStreamingContext(new StreamingContext(conf, Milliseconds(100))) { ssc =>
@volatile var startTimes = 0
ssc.addStreamingListener(new StreamingListener {
override def onReceiverStarted(receiverStarted: StreamingListenerReceiverStarted): Unit = {
startTimes += 1
}
})
val input = ssc.receiverStream(new StoppableReceiver)
val output = new TestOutputStream(input)
output.register()
ssc.start()
StoppableReceiver.shouldStop = true
eventually(timeout(10 seconds), interval(10 millis)) {
// The receiver is stopped once, so if it's restarted, it should be started twice.
assert(startTimes === 2)
}
}
}
test("SPARK-11063: TaskSetManager should use Receiver RDD's preferredLocations") {
// Use ManualClock to prevent from starting batches so that we can make sure the only task is
// for starting the Receiver
val _conf = conf.clone.set("spark.streaming.clock", "org.apache.spark.util.ManualClock")
withStreamingContext(new StreamingContext(_conf, Milliseconds(100))) { ssc =>
@volatile var receiverTaskLocality: TaskLocality = null
ssc.sparkContext.addSparkListener(new SparkListener {
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = {
receiverTaskLocality = taskStart.taskInfo.taskLocality
}
})
val input = ssc.receiverStream(new TestReceiver)
val output = new TestOutputStream(input)
output.register()
ssc.start()
eventually(timeout(10 seconds), interval(10 millis)) {
// If preferredLocations is set correctly, receiverTaskLocality should be NODE_LOCAL
assert(receiverTaskLocality === TaskLocality.NODE_LOCAL)
}
}
}
}
/** An input DStream with for testing rate controlling */
private[streaming] class RateTestInputDStream(@transient ssc_ : StreamingContext)
extends ReceiverInputDStream[Int](ssc_) {
override def getReceiver(): Receiver[Int] = new RateTestReceiver(id)
@volatile
var publishedRates = 0
override val rateController: Option[RateController] = {
Some(new RateController(id, new ConstantEstimator(100)) {
override def publish(rate: Long): Unit = {
publishedRates += 1
}
})
}
}
/** A receiver implementation for testing rate controlling */
private[streaming] class RateTestReceiver(receiverId: Int, host: Option[String] = None)
extends Receiver[Int](StorageLevel.MEMORY_ONLY) {
private lazy val customBlockGenerator = supervisor.createBlockGenerator(
new BlockGeneratorListener {
override def onPushBlock(blockId: StreamBlockId, arrayBuffer: ArrayBuffer[_]): Unit = {}
override def onError(message: String, throwable: Throwable): Unit = {}
override def onGenerateBlock(blockId: StreamBlockId): Unit = {}
override def onAddData(data: Any, metadata: Any): Unit = {}
}
)
setReceiverId(receiverId)
override def onStart(): Unit = {
customBlockGenerator
RateTestReceiver.registerReceiver(this)
}
override def onStop(): Unit = {
RateTestReceiver.deregisterReceiver()
}
override def preferredLocation: Option[String] = host
def getDefaultBlockGeneratorRateLimit(): Long = {
supervisor.getCurrentRateLimit
}
def getCustomBlockGeneratorRateLimit(): Long = {
customBlockGenerator.getCurrentLimit
}
}
/**
* A helper object to RateTestReceiver that give access to the currently active RateTestReceiver
* instance.
*/
private[streaming] object RateTestReceiver {
@volatile private var activeReceiver: RateTestReceiver = null
def registerReceiver(receiver: RateTestReceiver): Unit = {
activeReceiver = receiver
}
def deregisterReceiver(): Unit = {
activeReceiver = null
}
def getActive(): Option[RateTestReceiver] = Option(activeReceiver)
}
/**
* A custom receiver that could be stopped via StoppableReceiver.shouldStop
*/
class StoppableReceiver extends Receiver[Int](StorageLevel.MEMORY_ONLY) {
var receivingThreadOption: Option[Thread] = None
def onStart() {
val thread = new Thread() {
override def run() {
while (!StoppableReceiver.shouldStop) {
Thread.sleep(10)
}
StoppableReceiver.this.stop("stop")
}
}
thread.start()
}
def onStop() {
StoppableReceiver.shouldStop = true
receivingThreadOption.foreach(_.join())
// Reset it so as to restart it
StoppableReceiver.shouldStop = false
}
}
object StoppableReceiver {
@volatile var shouldStop = false
}
| practice-vishnoi/dev-spark-1 | streaming/src/test/scala/org/apache/spark/streaming/scheduler/ReceiverTrackerSuite.scala | Scala | apache-2.0 | 7,213 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
/**
* Builds a map that is keyed by an Attribute's expression id. Using the expression id allows values
* to be looked up even when the attributes used differ cosmetically (i.e., the capitalization
* of the name, or the expected nullability).
* 构建一个由Attribute的表达式id键入的映射,使用表达式id允许查找值,
* 即使所使用的属性在美容上不同(即名称的大小写或预期的可为空性)
*/
object AttributeMap {
def apply[A](kvs: Seq[(Attribute, A)]): AttributeMap[A] = {
new AttributeMap(kvs.map(kv => (kv._1.exprId, kv)).toMap)
}
}
class AttributeMap[A](baseMap: Map[ExprId, (Attribute, A)])
extends Map[Attribute, A] with Serializable {
override def get(k: Attribute): Option[A] = baseMap.get(k.exprId).map(_._2)
override def + [B1 >: A](kv: (Attribute, B1)): Map[Attribute, B1] = baseMap.values.toMap + kv
override def iterator: Iterator[(Attribute, A)] = baseMap.valuesIterator
override def -(key: Attribute): Map[Attribute, A] = baseMap.values.toMap - key
}
| tophua/spark1.52 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/AttributeMap.scala | Scala | apache-2.0 | 1,886 |
package models
import io.circe._
import io.circe.syntax._
import utils.CirceCommonCodecs
object ModelCodecs extends CirceCommonCodecs {
object movie {
implicit val decodeMovie: Decoder[Movie] =
Decoder.forProduct6(
"id",
"imdbId",
"movieTitle",
"availableSeats",
"screenId",
"createdAt"
)(Movie.apply)
implicit val encodeMovie: Encoder[Movie] =
Encoder.forProduct6(
"id",
"imdbId",
"movieTitle",
"availableSeats",
"screenId",
"createdAt"
)(m =>
(m.id, m.imdbId, m.movieTitle, m.availableSeats, m.screenId, m.createdAt))
}
object reservationCounter {
implicit val decodeReservationCounter: Decoder[ReservationCounter] =
Decoder.forProduct2("availableSeats", "reservedSeats")(ReservationCounter.apply)
implicit val encodeReservationCounter: Encoder[ReservationCounter] =
Encoder.forProduct2("availableSeats", "reservedSeats")(rc =>
(rc.availableSeats, rc.reservedSeats))
}
}
| ziyasal/Reserveon | src/main/scala/models/ModelCodecs.scala | Scala | mit | 1,054 |
package metaconfig.sconfig
import metaconfig.Conf
import metaconfig.ConfShow
import metaconfig.Generators.argConfShow
import org.scalacheck.Prop.forAll
class HoconPrinterRoundtripSuite extends munit.ScalaCheckSuite {
def assertRoundtrip(conf: String): Unit = {
val a = Conf.parseString(conf).get
val hocon = Conf.printHocon(a)
val b = Conf.parseString(hocon).get
val isEqual = a == b
assertEquals(a, b)
}
def ignore(conf: String): Unit = super.test(conf.ignore) {}
def checkRoundtrip(conf: String): Unit =
test(conf.take(100)) {
assertRoundtrip(clue(conf))
}
ignore(
"""
|a.a = "d"
|a.bc = 9
""".stripMargin
)
checkRoundtrip(
"""
|aa.bb = true
|aa.d = 3
|aa.aa = "cb"
""".stripMargin
)
property("roundtrip") {
forAll { conf: ConfShow => assertRoundtrip(conf.str) }
}
}
| olafurpg/metaconfig | metaconfig-tests/jvm/src/test/scala/metaconfig/sconfig/HoconPrinterRoundtripSuite.scala | Scala | apache-2.0 | 878 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.model.util
import org.beangle.commons.collection.Collections
import org.beangle.data.model.Entity
import org.beangle.data.model.meta.{EntityType, Property}
object Populator {
class CopyResult {
var fails: Map[String, String] = Map.empty
def addFail(attr: String, cause: String): Unit = {
fails += (attr -> cause)
}
}
}
/**
* Populator interface.
* @author chaostone
*/
trait Populator {
/**
* populate.
*/
def populate(target: Entity[_], EntityType: EntityType, params: collection.Map[String, Any]): Populator.CopyResult
/**
*
*/
def populate(target: Entity[_], EntityType: EntityType, attr: String, value: Any): Boolean
/**
* initProperty.
*/
def init(target: Entity[_], t: EntityType, attr: String): (Any, Property)
}
| beangle/data | model/src/main/scala/org/beangle/data/model/util/Populator.scala | Scala | lgpl-3.0 | 1,538 |
// Copyright © 2009, Esko Luontola. All Rights Reserved.
// This software is released under the MIT License. See LICENSE.txt
package net.orfjackal.bcd
import org.objectweb.asm._
import org.objectweb.asm.tree._
import org.specs._
object interpretObjectFieldMethodSpec extends Specification {
"Operating objects" >> {
val ORIG_STACK_SIZE = 1
def exec(insn: AbstractInsnNode) = {
val stack = List(UnknownValue())
val c = new MethodContext(stack, Map.empty)
c.stack.size must_== ORIG_STACK_SIZE
c.execute(insn)
}
"NEW" >> {
val c = exec(new TypeInsnNode(Opcodes.NEW, "java/lang/String"))
c.stack.size must_== ORIG_STACK_SIZE + 1
c.stack.head must_== KnownType(classOf[String])
}
"INSTANCEOF" >> {
val c = exec(new TypeInsnNode(Opcodes.INSTANCEOF, "java/lang/String"))
c.stack.size must_== ORIG_STACK_SIZE
c.stack.head must_== KnownType(classOf[Boolean])
}
"MONITORENTER" >> {
val c = exec(new InsnNode(Opcodes.MONITORENTER))
c.stack.size must_== ORIG_STACK_SIZE - 1
}
"MONITOREXIT" >> {
val c = exec(new InsnNode(Opcodes.MONITOREXIT))
c.stack.size must_== ORIG_STACK_SIZE - 1
}
}
"Getting values from fields" >> {
val ORIG_STACK_SIZE = 1
def exec(insn: AbstractInsnNode) = {
val stack = List(UnknownValue())
val c = new MethodContext(stack, Map.empty)
c.stack.size must_== ORIG_STACK_SIZE
c.execute(insn)
}
val ownerType = Type.getType(classOf[String])
val fieldName = "someField"
"GETFIELD object" >> {
val fieldType = Type.getType(classOf[String])
val c = exec(new FieldInsnNode(Opcodes.GETFIELD, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE
c.stack.head must_== KnownType(classOf[String])
}
"GETFIELD array" >> {
val fieldType = Type.getType(classOf[Array[String]])
val c = exec(new FieldInsnNode(Opcodes.GETFIELD, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE
c.stack.head must_== KnownType(classOf[Array[String]])
}
"GETFIELD int" >> {
val fieldType = Type.INT_TYPE
val c = exec(new FieldInsnNode(Opcodes.GETFIELD, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE
c.stack.head must_== KnownType(classOf[Int])
}
"GETFIELD long" >> {
val fieldType = Type.LONG_TYPE
val c = exec(new FieldInsnNode(Opcodes.GETFIELD, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE + 1
c.stack.take(2) must_== List(KnownType(classOf[Long]), KnownType(classOf[Long]))
}
"GETFIELD double" >> {
val fieldType = Type.DOUBLE_TYPE
val c = exec(new FieldInsnNode(Opcodes.GETFIELD, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE + 1
c.stack.take(2) must_== List(KnownType(classOf[Double]), KnownType(classOf[Double]))
}
"GETSTATIC" >> {
val fieldType = Type.getType(classOf[String])
val c = exec(new FieldInsnNode(Opcodes.GETSTATIC, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE + 1
c.stack.head must_== KnownType(classOf[String])
}
"GETSTATIC long/double" >> {
val fieldType = Type.LONG_TYPE
val c = exec(new FieldInsnNode(Opcodes.GETSTATIC, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE + 2
c.stack.take(2) must_== List(KnownType(classOf[Long]), KnownType(classOf[Long]))
}
}
"Putting values to fields" >> {
val ORIG_STACK_SIZE = 3
def exec(insn: AbstractInsnNode) = {
val stack = List(UnknownValue(), UnknownValue(), UnknownValue())
val c = new MethodContext(stack, Map.empty)
c.stack.size must_== ORIG_STACK_SIZE
c.execute(insn)
}
val ownerType = Type.getType(classOf[String])
val fieldName = "someField"
"PUTFIELD" >> {
val fieldType = Type.getType(classOf[String])
val c = exec(new FieldInsnNode(Opcodes.PUTFIELD, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE - 2
}
"PUTFIELD long/double" >> {
val fieldType = Type.getType(classOf[Long])
val c = exec(new FieldInsnNode(Opcodes.PUTFIELD, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE - 3
}
"PUTSTATIC" >> {
val fieldType = Type.getType(classOf[String])
val c = exec(new FieldInsnNode(Opcodes.PUTSTATIC, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE - 1
}
"PUTSTATIC long/double" >> {
val fieldType = Type.getType(classOf[Long])
val c = exec(new FieldInsnNode(Opcodes.PUTSTATIC, ownerType.getInternalName, fieldName, fieldType.getDescriptor))
c.stack.size must_== ORIG_STACK_SIZE - 2
}
}
"Invoking methods" >> {
val ORIG_STACK_SIZE = 3
def exec(insn: AbstractInsnNode) = {
val stack = List(UnknownValue(), UnknownValue(), UnknownValue())
val c = new MethodContext(stack, Map.empty)
c.stack.size must_== ORIG_STACK_SIZE
c.execute(insn)
}
val ownerType = Type.getType(classOf[String])
val methodName = "someMethod"
"INVOKEVIRTUAL (0 args, return void)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKEVIRTUAL, ownerType.getInternalName, methodName, "()V"))
c.stack.size must_== ORIG_STACK_SIZE - 1
}
"INVOKEVIRTUAL (1 int arg, return void)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKEVIRTUAL, ownerType.getInternalName, methodName, "(I)V"))
c.stack.size must_== ORIG_STACK_SIZE - 2
}
"INVOKEVIRTUAL (1 long arg, return void)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKEVIRTUAL, ownerType.getInternalName, methodName, "(J)V"))
c.stack.size must_== ORIG_STACK_SIZE - 3
}
"INVOKEVIRTUAL (2 int args, return void)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKEVIRTUAL, ownerType.getInternalName, methodName, "(II)V"))
c.stack.size must_== ORIG_STACK_SIZE - 3
}
"INVOKEVIRTUAL (0 args, return int)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKEVIRTUAL, ownerType.getInternalName, methodName, "()I"))
c.stack.size must_== ORIG_STACK_SIZE
c.stack.head must_== KnownType(classOf[Int])
}
"INVOKEVIRTUAL (0 args, return long)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKEVIRTUAL, ownerType.getInternalName, methodName, "()J"))
c.stack.size must_== ORIG_STACK_SIZE + 1
c.stack.take(2) must_== List(KnownType(classOf[Long]), KnownType(classOf[Long]))
}
"INVOKESPECIAL (1 args, return void)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKESPECIAL, ownerType.getInternalName, methodName, "(I)V"))
c.stack.size must_== ORIG_STACK_SIZE - 2
}
"INVOKESTATIC (1 args, return void)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKESTATIC, ownerType.getInternalName, methodName, "(I)V"))
c.stack.size must_== ORIG_STACK_SIZE - 1
}
"INVOKEINTERFACE (1 args, return void)" >> {
val c = exec(new MethodInsnNode(Opcodes.INVOKEINTERFACE, ownerType.getInternalName, methodName, "(I)V"))
c.stack.size must_== ORIG_STACK_SIZE - 2
}
}
}
| orfjackal/bytecode-detective | src/test/scala/net/orfjackal/bcd/interpretObjectFieldMethodSpec.scala | Scala | mit | 7,529 |
package org.arnoldc
case class MethodInformation(returnsValue: Boolean, numberOfArguments: Integer) | khodges42/DovahkiinC | src/main/scala/org/dovahkiinc/MethodInformation.scala | Scala | apache-2.0 | 101 |
package is.hail
import is.hail.stats._
import breeze.linalg.{Vector, DenseVector, max, sum}
import breeze.numerics._
import is.hail.utils._
package object experimental {
def findMaxAC(af: Double, an: Int, ci: Double = .95): Int = {
if (af == 0)
0
else {
val quantile_limit = ci // ci for one-sided, 1-(1-ci)/2 for two-sided
val max_ac = qpois(quantile_limit, an * af)
max_ac
}
}
def calcFilterAlleleFreq(ac: Int, an: Int, ci: Double = .95, lower: Double = 1e-10, upper: Double = 2, tol: Double = 1e-7, precision: Double = 1e-6): Double = {
if (ac <= 1 || an == 0) // FAF should not be calculated on singletons
0.0
else {
var f = (af: Double) => ac.toDouble - 1 - qpois(ci, an.toDouble * af)
val root = uniroot(f, lower, upper, tol)
val rounder = 1d / (precision / 100d)
var max_af = math.round(root.getOrElse(0.0) * rounder) / rounder
while (findMaxAC(max_af, an, ci) < ac) {
max_af += precision
}
max_af - precision
}
}
def calcFilterAlleleFreq(ac: Int, an: Int, ci: Double): Double = calcFilterAlleleFreq(ac, an, ci, lower = 1e-10, upper = 2, tol = 1e-7, precision = 1e-6)
def haplotypeFreqEM(gtCounts : IndexedSeq[Int]) : IndexedSeq[Double] = {
assert(gtCounts.size == 9, "haplotypeFreqEM requires genotype counts for the 9 possible genotype combinations.")
val _gtCounts = new DenseVector(gtCounts.toArray)
val nSamples = sum(_gtCounts)
//Needs some non-ref samples to compute
if(_gtCounts(0) >= nSamples){ return FastIndexedSeq(_gtCounts(0),0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)}
val nHaplotypes = 2.0*nSamples.toDouble
/**
* Constant quantities for each of the different haplotypes:
* n.AB => 2*n.AABB + n.AaBB + n.AABb
* n.Ab => 2*n.AAbb + n.Aabb + n.AABb
* n.aB => 2*n.aaBB + n.AaBB + n.aaBb
* n.ab => 2*n.aabb + n.aaBb + n.Aabb
*/
val const_counts = new DenseVector(Array[Double](
2.0*_gtCounts(0) + _gtCounts(1) + _gtCounts(3), //n.AB
2.0*_gtCounts(6) + _gtCounts(3) + _gtCounts(7), //n.Ab
2.0*_gtCounts(2) + _gtCounts(1) + _gtCounts(5), //n.aB
2.0*_gtCounts(8) + _gtCounts(5) + _gtCounts(7) //n.ab
))
//Initial estimate with AaBb contributing equally to each haplotype
var p_next = (const_counts +:+ new DenseVector(Array.fill[Double](4)(_gtCounts(4)/2.0))) /:/ nHaplotypes
var p_cur = p_next +:+ 1.0
//EM
while(max(abs(p_next -:- p_cur)) > 1e-7){
p_cur = p_next
p_next = (const_counts +:+
(new DenseVector(Array[Double](
p_cur(0)*p_cur(3), //n.AB
p_cur(1)*p_cur(2), //n.Ab
p_cur(1)*p_cur(2), //n.aB
p_cur(0)*p_cur(3) //n.ab
)) * (_gtCounts(4) / ((p_cur(0)*p_cur(3))+(p_cur(1)*p_cur(2)))))
) / nHaplotypes
}
return (p_next *:* nHaplotypes).toArray.toFastIndexedSeq
}
}
| hail-is/hail | hail/src/main/scala/is/hail/experimental/package.scala | Scala | mit | 2,912 |
/*
*************************************************************************************
* Copyright 2013 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.api
import com.normation.utils.HashcodeCaching
import org.joda.time.DateTime
/**
* ID of the Account
*/
final case class ApiAccountId(value:String) extends HashcodeCaching
/**
* Name of the principal, used in event log to know
* who did actions.
*/
final case class ApiAccountName(value:String) extends HashcodeCaching
/**
* The actual authentication token.
* A token is defined with [0-9a-zA-Z]{n}, with n not small.
*/
final case class ApiToken(value: String) extends HashcodeCaching
object ApiToken {
val tokenRegex = """[0-9a-zA-Z]{12,128}""".r
def buildCheckValue(value: String) : Option[ApiToken] = value.trim match {
case tokenRegex(v) => Some(ApiToken(v))
case _ => None
}
}
/**
* An API principal
*/
final case class ApiAccount(
id : ApiAccountId
//Authentication token. It is a mandatory value, and can't be ""
//If a token should be revoked, use isEnabled = false.
, name : ApiAccountName //used in event log to know who did actions.
, token : ApiToken
, description : String
, isEnabled : Boolean
, creationDate : DateTime
, tokenGenerationDate: DateTime
) extends HashcodeCaching
| Kegeruneku/rudder | rudder-core/src/main/scala/com/normation/rudder/api/Account.scala | Scala | agpl-3.0 | 2,920 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.s2graph.core.storage.hbase
import java.util
import java.util.Base64
import com.stumbleupon.async.{Callback, Deferred}
import com.typesafe.config.Config
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{ConnectionFactory, Durability}
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
import org.apache.hadoop.hbase.regionserver.BloomType
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.s2graph.core._
import org.apache.s2graph.core.mysqls.LabelMeta
import org.apache.s2graph.core.storage._
import org.apache.s2graph.core.storage.hbase.AsynchbaseStorage.{AsyncRPC, ScanWithRange}
import org.apache.s2graph.core.types.{HBaseType, VertexId}
import org.apache.s2graph.core.utils._
import org.hbase.async.FilterList.Operator.MUST_PASS_ALL
import org.hbase.async._
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.util.Try
import scala.util.hashing.MurmurHash3
object AsynchbaseStorage {
val vertexCf = Serializable.vertexCf
val edgeCf = Serializable.edgeCf
val emptyKVs = new util.ArrayList[KeyValue]()
AsynchbasePatcher.init()
def makeClient(config: Config, overrideKv: (String, String)*) = {
val asyncConfig: org.hbase.async.Config =
if (config.hasPath("hbase.security.auth.enable") && config.getBoolean("hbase.security.auth.enable")) {
val krb5Conf = config.getString("java.security.krb5.conf")
val jaas = config.getString("java.security.auth.login.config")
System.setProperty("java.security.krb5.conf", krb5Conf)
System.setProperty("java.security.auth.login.config", jaas)
new org.hbase.async.Config()
} else {
new org.hbase.async.Config()
}
for (entry <- config.entrySet() if entry.getKey.contains("hbase")) {
asyncConfig.overrideConfig(entry.getKey, entry.getValue.unwrapped().toString)
}
for ((k, v) <- overrideKv) {
asyncConfig.overrideConfig(k, v)
}
val client = new HBaseClient(asyncConfig)
logger.info(s"Asynchbase: ${client.getConfig.dumpConfiguration()}")
client
}
case class ScanWithRange(scan: Scanner, offset: Int, limit: Int)
type AsyncRPC = Either[GetRequest, ScanWithRange]
}
class AsynchbaseStorage(override val graph: S2Graph,
override val config: Config)(implicit ec: ExecutionContext)
extends Storage[AsyncRPC, Deferred[StepResult]](graph, config) {
import Extensions.DeferOps
/**
* Asynchbase client setup.
* note that we need two client, one for bulk(withWait=false) and another for withWait=true
*/
private val clientFlushInterval = config.getInt("hbase.rpcs.buffered_flush_interval").toString().toShort
/**
* since some runtime environment such as spark cluster has issue with guava version, that is used in Asynchbase.
* to fix version conflict, make this as lazy val for clients that don't require hbase client.
*/
lazy val client = AsynchbaseStorage.makeClient(config)
lazy val clientWithFlush = AsynchbaseStorage.makeClient(config, "hbase.rpcs.buffered_flush_interval" -> "0")
lazy val clients = Seq(client, clientWithFlush)
private val emptyKeyValues = new util.ArrayList[KeyValue]()
private val emptyStepResult = new util.ArrayList[StepResult]()
private def client(withWait: Boolean): HBaseClient = if (withWait) clientWithFlush else client
import CanDefer._
/** Future Cache to squash request */
lazy private val futureCache = new DeferCache[StepResult, Deferred, Deferred](config, StepResult.Empty, "AsyncHbaseFutureCache", useMetric = true)
/** Simple Vertex Cache */
lazy private val vertexCache = new DeferCache[Seq[SKeyValue], Promise, Future](config, Seq.empty[SKeyValue])
private val zkQuorum = config.getString("hbase.zookeeper.quorum")
private val zkQuorumSlave =
if (config.hasPath("hbase.slave.zookeeper.quorum")) Option(config.getString("hbase.slave.zookeeper.quorum"))
else None
/** v4 max next row size */
private val v4_max_num_rows = 10000
private def getV4MaxNumRows(limit : Int): Int = {
if (limit < v4_max_num_rows) limit
else v4_max_num_rows
}
/**
* fire rpcs into proper hbase cluster using client and
* return true on all mutation success. otherwise return false.
*/
override def writeToStorage(cluster: String, kvs: Seq[SKeyValue], withWait: Boolean): Future[Boolean] = {
if (kvs.isEmpty) Future.successful(true)
else {
val _client = client(withWait)
val (increments, putAndDeletes) = kvs.partition(_.operation == SKeyValue.Increment)
/** Asynchbase IncrementRequest does not implement HasQualifiers */
val incrementsFutures = increments.map { kv =>
val inc = new AtomicIncrementRequest(kv.table, kv.row, kv.cf, kv.qualifier, Bytes.toLong(kv.value))
val defer = _client.atomicIncrement(inc)
val future = defer.toFuture(Long.box(0)).map(_ => true).recover { case ex: Exception =>
logger.error(s"mutation failed. $kv", ex)
false
}
if (withWait) future else Future.successful(true)
}
/** PutRequest and DeleteRequest accept byte[][] qualifiers/values. */
val othersFutures = putAndDeletes.groupBy { kv =>
(kv.table.toSeq, kv.row.toSeq, kv.cf.toSeq, kv.operation, kv.timestamp)
}.map { case ((table, row, cf, operation, timestamp), groupedKeyValues) =>
val durability = groupedKeyValues.head.durability
val qualifiers = new ArrayBuffer[Array[Byte]]()
val values = new ArrayBuffer[Array[Byte]]()
groupedKeyValues.foreach { kv =>
if (kv.qualifier != null) qualifiers += kv.qualifier
if (kv.value != null) values += kv.value
}
val defer = operation match {
case SKeyValue.Put =>
val put = new PutRequest(table.toArray, row.toArray, cf.toArray, qualifiers.toArray, values.toArray, timestamp)
put.setDurable(durability)
_client.put(put)
case SKeyValue.Delete =>
val delete =
if (qualifiers.isEmpty)
new DeleteRequest(table.toArray, row.toArray, cf.toArray, timestamp)
else
new DeleteRequest(table.toArray, row.toArray, cf.toArray, qualifiers.toArray, timestamp)
delete.setDurable(durability)
_client.delete(delete)
}
if (withWait) {
defer.toFuture(new AnyRef()).map(_ => true).recover { case ex: Exception =>
groupedKeyValues.foreach { kv => logger.error(s"mutation failed. $kv", ex) }
false
}
} else Future.successful(true)
}
for {
incrementRets <- Future.sequence(incrementsFutures)
otherRets <- Future.sequence(othersFutures)
} yield (incrementRets ++ otherRets).forall(identity)
}
}
private def fetchKeyValues(rpc: AsyncRPC): Future[Seq[SKeyValue]] = {
val defer = fetchKeyValuesInner(rpc)
defer.toFuture(emptyKeyValues).map { kvsArr =>
kvsArr.map { kv =>
implicitly[CanSKeyValue[KeyValue]].toSKeyValue(kv)
}
}
}
override def fetchSnapshotEdgeKeyValues(queryRequest: QueryRequest): Future[Seq[SKeyValue]] = {
val edge = toRequestEdge(queryRequest, Nil)
val rpc = buildRequest(queryRequest, edge)
fetchKeyValues(rpc)
}
/**
* since HBase natively provide CheckAndSet on storage level, implementation becomes simple.
* @param rpc: key value that is need to be stored on storage.
* @param expectedOpt: last valid value for rpc's KeyValue.value from fetching.
* @return return true if expected value matches and our rpc is successfully applied, otherwise false.
* note that when some other thread modified same cell and have different value on this KeyValue,
* then HBase atomically return false.
*/
override def writeLock(rpc: SKeyValue, expectedOpt: Option[SKeyValue]): Future[Boolean] = {
val put = new PutRequest(rpc.table, rpc.row, rpc.cf, rpc.qualifier, rpc.value, rpc.timestamp)
val expected = expectedOpt.map(_.value).getOrElse(Array.empty)
client(withWait = true).compareAndSet(put, expected).map(true.booleanValue())(ret => ret.booleanValue()).toFuture(true)
}
/**
* given queryRequest, build storage specific RPC Request.
* In HBase case, we either build Scanner or GetRequest.
*
* IndexEdge layer:
* Tall schema(v4): use scanner.
* Wide schema(label's schema version in v1, v2, v3): use GetRequest with columnRangeFilter
* when query is given with itnerval option.
* SnapshotEdge layer:
* Tall schema(v3, v4): use GetRequest without column filter.
* Wide schema(label's schema version in v1, v2): use GetRequest with columnRangeFilter.
* Vertex layer:
* all version: use GetRequest without column filter.
* @param queryRequest
* @return Scanner or GetRequest with proper setup with StartKey, EndKey, RangeFilter.
*/
override def buildRequest(queryRequest: QueryRequest, edge: S2Edge): AsyncRPC = {
import Serializable._
val queryParam = queryRequest.queryParam
val label = queryParam.label
val serializer = if (queryParam.tgtVertexInnerIdOpt.isDefined) {
val snapshotEdge = edge.toSnapshotEdge
snapshotEdgeSerializer(snapshotEdge)
} else {
val indexEdge = edge.toIndexEdge(queryParam.labelOrderSeq)
indexEdgeSerializer(indexEdge)
}
val rowKey = serializer.toRowKey
val (minTs, maxTs) = queryParam.durationOpt.getOrElse((0L, Long.MaxValue))
val (intervalMaxBytes, intervalMinBytes) = queryParam.buildInterval(Option(edge))
label.schemaVersion match {
case HBaseType.VERSION4 if queryParam.tgtVertexInnerIdOpt.isEmpty =>
val scanner = AsynchbasePatcher.newScanner(client, label.hbaseTableName)
scanner.setFamily(edgeCf)
/*
* TODO: remove this part.
*/
val indexEdgeOpt = edge.edgesWithIndex.find(edgeWithIndex => edgeWithIndex.labelIndex.seq == queryParam.labelOrderSeq)
val indexEdge = indexEdgeOpt.getOrElse(throw new RuntimeException(s"Can`t find index for query $queryParam"))
val srcIdBytes = VertexId.toSourceVertexId(indexEdge.srcVertex.id).bytes
val labelWithDirBytes = indexEdge.labelWithDir.bytes
val labelIndexSeqWithIsInvertedBytes = StorageSerializable.labelOrderSeqWithIsInverted(indexEdge.labelIndexSeq, isInverted = false)
val baseKey = Bytes.add(srcIdBytes, labelWithDirBytes, labelIndexSeqWithIsInvertedBytes)
val (startKey, stopKey) =
if (queryParam.intervalOpt.isDefined) {
// interval is set.
val _startKey = queryParam.cursorOpt match {
case Some(cursor) => Base64.getDecoder.decode(cursor)
case None => Bytes.add(baseKey, intervalMaxBytes)
}
(_startKey , Bytes.add(baseKey, intervalMinBytes))
} else {
/**
* note: since propsToBytes encode size of property map at first byte, we are sure about max value here
*/
val _startKey = queryParam.cursorOpt match {
case Some(cursor) => Base64.getDecoder.decode(cursor)
case None => baseKey
}
(_startKey, Bytes.add(baseKey, Array.fill(1)(-1)))
}
scanner.setStartKey(startKey)
scanner.setStopKey(stopKey)
if (queryParam.limit == Int.MinValue) logger.debug(s"MinValue: $queryParam")
scanner.setMaxVersions(1)
// TODO: exclusive condition innerOffset with cursorOpt
if (queryParam.cursorOpt.isDefined) {
scanner.setMaxNumRows(getV4MaxNumRows(queryParam.limit))
} else {
scanner.setMaxNumRows(getV4MaxNumRows(queryParam.innerOffset + queryParam.innerLimit))
}
scanner.setMaxTimestamp(maxTs)
scanner.setMinTimestamp(minTs)
scanner.setRpcTimeout(queryParam.rpcTimeout)
// SET option for this rpc properly.
if (queryParam.cursorOpt.isDefined) Right(ScanWithRange(scanner, 0, queryParam.limit))
else Right(ScanWithRange(scanner, 0, queryParam.innerOffset + queryParam.innerLimit))
case _ =>
val get = if (queryParam.tgtVertexInnerIdOpt.isDefined) {
new GetRequest(label.hbaseTableName.getBytes, rowKey, edgeCf, serializer.toQualifier)
} else {
new GetRequest(label.hbaseTableName.getBytes, rowKey, edgeCf)
}
get.maxVersions(1)
get.setFailfast(true)
get.setMinTimestamp(minTs)
get.setMaxTimestamp(maxTs)
get.setTimeout(queryParam.rpcTimeout)
val pagination = new ColumnPaginationFilter(queryParam.limit, queryParam.offset)
val columnRangeFilterOpt = queryParam.intervalOpt.map { interval =>
new ColumnRangeFilter(intervalMaxBytes, true, intervalMinBytes, true)
}
get.setFilter(new FilterList(pagination +: columnRangeFilterOpt.toSeq, MUST_PASS_ALL))
Left(get)
}
}
/**
* we are using future cache to squash requests into same key on storage.
*
* @param queryRequest
* @param isInnerCall
* @param parentEdges
* @return we use Deferred here since it has much better performrance compared to scala.concurrent.Future.
* seems like map, flatMap on scala.concurrent.Future is slower than Deferred's addCallback
*/
override def fetch(queryRequest: QueryRequest,
isInnerCall: Boolean,
parentEdges: Seq[EdgeWithScore]): Deferred[StepResult] = {
def fetchInner(hbaseRpc: AsyncRPC): Deferred[StepResult] = {
val prevStepScore = queryRequest.prevStepScore
val fallbackFn: (Exception => StepResult) = { ex =>
logger.error(s"fetchInner failed. fallback return. $hbaseRpc}", ex)
StepResult.Failure
}
val queryParam = queryRequest.queryParam
fetchKeyValuesInner(hbaseRpc).mapWithFallback(emptyKeyValues)(fallbackFn) { kvs =>
val (startOffset, len) = queryParam.label.schemaVersion match {
case HBaseType.VERSION4 =>
val offset = if (queryParam.cursorOpt.isDefined) 0 else queryParam.offset
(offset, queryParam.limit)
case _ => (0, kvs.length)
}
toEdges(kvs, queryRequest, prevStepScore, isInnerCall, parentEdges, startOffset, len)
}
}
val queryParam = queryRequest.queryParam
val cacheTTL = queryParam.cacheTTLInMillis
/** with version 4, request's type is (Scanner, (Int, Int)). otherwise GetRequest. */
val edge = toRequestEdge(queryRequest, parentEdges)
val request = buildRequest(queryRequest, edge)
val (intervalMaxBytes, intervalMinBytes) = queryParam.buildInterval(Option(edge))
val requestCacheKey = Bytes.add(toCacheKeyBytes(request), intervalMaxBytes, intervalMinBytes)
if (cacheTTL <= 0) fetchInner(request)
else {
val cacheKeyBytes = Bytes.add(queryRequest.query.queryOption.cacheKeyBytes, requestCacheKey)
// val cacheKeyBytes = toCacheKeyBytes(request)
val cacheKey = queryParam.toCacheKey(cacheKeyBytes)
futureCache.getOrElseUpdate(cacheKey, cacheTTL)(fetchInner(request))
}
}
override def fetches(queryRequests: Seq[QueryRequest],
prevStepEdges: Map[VertexId, Seq[EdgeWithScore]]): Future[Seq[StepResult]] = {
val defers: Seq[Deferred[StepResult]] = for {
queryRequest <- queryRequests
} yield {
val queryOption = queryRequest.query.queryOption
val queryParam = queryRequest.queryParam
val shouldBuildParents = queryOption.returnTree || queryParam.whereHasParent
val parentEdges = if (shouldBuildParents) prevStepEdges.getOrElse(queryRequest.vertex.id, Nil) else Nil
fetch(queryRequest, isInnerCall = false, parentEdges)
}
val grouped: Deferred[util.ArrayList[StepResult]] = Deferred.groupInOrder(defers)
grouped.map(emptyStepResult) { queryResults: util.ArrayList[StepResult] =>
queryResults.toSeq
}.toFuture(emptyStepResult)
}
def fetchVertexKeyValues(request: QueryRequest): Future[Seq[SKeyValue]] = {
val edge = toRequestEdge(request, Nil)
fetchKeyValues(buildRequest(request, edge))
}
def fetchVertexKeyValues(request: AsyncRPC): Future[Seq[SKeyValue]] = fetchKeyValues(request)
/**
* when withWait is given, we use client with flushInterval set to 0.
* if we are not using this, then we are adding extra wait time as much as flushInterval in worst case.
*
* @param edges
* @param withWait
* @return
*/
override def incrementCounts(edges: Seq[S2Edge], withWait: Boolean): Future[Seq[(Boolean, Long, Long)]] = {
val _client = client(withWait)
val defers: Seq[Deferred[(Boolean, Long, Long)]] = for {
edge <- edges
} yield {
val futures: List[Deferred[(Boolean, Long, Long)]] = for {
relEdge <- edge.relatedEdges
edgeWithIndex <- relEdge.edgesWithIndexValid
} yield {
val countWithTs = edge.propertyValueInner(LabelMeta.count)
val countVal = countWithTs.innerVal.toString().toLong
val kv = buildIncrementsCountAsync(edgeWithIndex, countVal).head
val request = new AtomicIncrementRequest(kv.table, kv.row, kv.cf, kv.qualifier, Bytes.toLong(kv.value))
val fallbackFn: (Exception => (Boolean, Long, Long)) = { ex =>
logger.error(s"mutation failed. $request", ex)
(false, -1L, -1L)
}
val defer = _client.bufferAtomicIncrement(request).mapWithFallback(0L)(fallbackFn) { resultCount: java.lang.Long =>
(true, resultCount.longValue(), countVal)
}
if (withWait) defer
else Deferred.fromResult((true, -1L, -1L))
}
val grouped: Deferred[util.ArrayList[(Boolean, Long, Long)]] = Deferred.group(futures)
grouped.map(new util.ArrayList[(Boolean, Long, Long)]()) { resultLs => resultLs.head }
}
val grouped: Deferred[util.ArrayList[(Boolean, Long, Long)]] = Deferred.groupInOrder(defers)
grouped.toFuture(new util.ArrayList[(Boolean, Long, Long)]()).map(_.toSeq)
}
override def flush(): Unit = clients.foreach { client =>
super.flush()
val timeout = Duration((clientFlushInterval + 10) * 20, duration.MILLISECONDS)
Await.result(client.flush().toFuture(new AnyRef), timeout)
}
override def createTable(_zkAddr: String,
tableName: String,
cfs: List[String],
regionMultiplier: Int,
ttl: Option[Int],
compressionAlgorithm: String,
replicationScopeOpt: Option[Int] = None,
totalRegionCount: Option[Int] = None): Unit = {
/** TODO: Decide if we will allow each app server to connect to multiple hbase cluster */
for {
zkAddr <- Seq(zkQuorum) ++ zkQuorumSlave.toSeq
} {
logger.info(s"create table: $tableName on $zkAddr, $cfs, $regionMultiplier, $compressionAlgorithm")
val admin = getAdmin(zkAddr)
val regionCount = totalRegionCount.getOrElse(admin.getClusterStatus.getServersSize * regionMultiplier)
try {
if (!admin.tableExists(TableName.valueOf(tableName))) {
val desc = new HTableDescriptor(TableName.valueOf(tableName))
desc.setDurability(Durability.ASYNC_WAL)
for (cf <- cfs) {
val columnDesc = new HColumnDescriptor(cf)
.setCompressionType(Algorithm.valueOf(compressionAlgorithm.toUpperCase))
.setBloomFilterType(BloomType.ROW)
.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF)
.setMaxVersions(1)
.setTimeToLive(2147483647)
.setMinVersions(0)
.setBlocksize(32768)
.setBlockCacheEnabled(true)
if (ttl.isDefined) columnDesc.setTimeToLive(ttl.get)
if (replicationScopeOpt.isDefined) columnDesc.setScope(replicationScopeOpt.get)
desc.addFamily(columnDesc)
}
if (regionCount <= 1) admin.createTable(desc)
else admin.createTable(desc, getStartKey(regionCount), getEndKey(regionCount), regionCount)
} else {
logger.info(s"$zkAddr, $tableName, $cfs already exist.")
}
} catch {
case e: Throwable =>
logger.error(s"$zkAddr, $tableName failed with $e", e)
throw e
} finally {
admin.close()
admin.getConnection.close()
}
}
}
/** Asynchbase implementation override default getVertices to use future Cache */
override def getVertices(vertices: Seq[S2Vertex]): Future[Seq[S2Vertex]] = {
def fromResult(kvs: Seq[SKeyValue],
version: String): Option[S2Vertex] = {
if (kvs.isEmpty) None
else vertexDeserializer.fromKeyValues(None, kvs, version, None)
// .map(S2Vertex(graph, _))
}
val futures = vertices.map { vertex =>
val kvs = vertexSerializer(vertex).toKeyValues
val get = new GetRequest(vertex.hbaseTableName.getBytes, kvs.head.row, Serializable.vertexCf)
// get.setTimeout(this.singleGetTimeout.toShort)
get.setFailfast(true)
get.maxVersions(1)
val cacheKey = MurmurHash3.stringHash(get.toString)
vertexCache.getOrElseUpdate(cacheKey, cacheTTL = 10000)(fetchVertexKeyValues(Left(get))).map { kvs =>
fromResult(kvs, vertex.serviceColumn.schemaVersion)
}
}
Future.sequence(futures).map { result => result.toList.flatten }
}
class V4ResultHandler(scanner: Scanner, defer: Deferred[util.ArrayList[KeyValue]], offset: Int, limit : Int) extends Callback[Object, util.ArrayList[util.ArrayList[KeyValue]]] {
val results = new util.ArrayList[KeyValue]()
var offsetCount = 0
override def call(kvsLs: util.ArrayList[util.ArrayList[KeyValue]]): Object = {
try {
if (kvsLs == null) {
defer.callback(results)
Try(scanner.close())
} else {
val curRet = new util.ArrayList[KeyValue]()
kvsLs.foreach(curRet.addAll(_))
val prevOffset = offsetCount
offsetCount += curRet.size()
val nextRet = if(offsetCount > offset){
if(prevOffset < offset ) {
curRet.subList(offset - prevOffset, curRet.size())
} else{
curRet
}
} else{
emptyKeyValues
}
val needCount = limit - results.size()
if (needCount >= nextRet.size()) {
results.addAll(nextRet)
} else {
results.addAll(nextRet.subList(0, needCount))
}
if (results.size() < limit) {
scanner.nextRows().addCallback(this)
} else {
defer.callback(results)
Try(scanner.close())
}
}
} catch{
case ex: Exception =>
logger.error(s"fetchKeyValuesInner failed.", ex)
defer.callback(ex)
Try(scanner.close())
}
}
}
/**
* Private Methods which is specific to Asynchbase implementation.
*/
private def fetchKeyValuesInner(rpc: AsyncRPC): Deferred[util.ArrayList[KeyValue]] = {
rpc match {
case Left(get) => client.get(get)
case Right(ScanWithRange(scanner, offset, limit)) =>
val deferred = new Deferred[util.ArrayList[KeyValue]]()
scanner.nextRows().addCallback(new V4ResultHandler(scanner, deferred, offset, limit))
deferred
case _ => Deferred.fromError(new RuntimeException(s"fetchKeyValues failed. $rpc"))
}
}
private def toCacheKeyBytes(hbaseRpc: AsyncRPC): Array[Byte] = {
/** with version 4, request's type is (Scanner, (Int, Int)). otherwise GetRequest. */
hbaseRpc match {
case Left(getRequest) => getRequest.key
case Right(ScanWithRange(scanner, offset, limit)) =>
Bytes.add(scanner.getCurrentKey, Bytes.add(Bytes.toBytes(offset), Bytes.toBytes(limit)))
case _ =>
logger.error(s"toCacheKeyBytes failed. not supported class type. $hbaseRpc")
throw new RuntimeException(s"toCacheKeyBytes: $hbaseRpc")
}
}
private def getSecureClusterAdmin(zkAddr: String) = {
val jaas = config.getString("java.security.auth.login.config")
val krb5Conf = config.getString("java.security.krb5.conf")
val realm = config.getString("realm")
val principal = config.getString("principal")
val keytab = config.getString("keytab")
System.setProperty("java.security.auth.login.config", jaas)
System.setProperty("java.security.krb5.conf", krb5Conf)
// System.setProperty("sun.security.krb5.debug", "true")
// System.setProperty("sun.security.spnego.debug", "true")
val conf = new Configuration(true)
val hConf = HBaseConfiguration.create(conf)
hConf.set("hbase.zookeeper.quorum", zkAddr)
hConf.set("hadoop.security.authentication", "Kerberos")
hConf.set("hbase.security.authentication", "Kerberos")
hConf.set("hbase.master.kerberos.principal", "hbase/_HOST@" + realm)
hConf.set("hbase.regionserver.kerberos.principal", "hbase/_HOST@" + realm)
System.out.println("Connecting secure cluster, using keytab\n")
UserGroupInformation.setConfiguration(hConf)
UserGroupInformation.loginUserFromKeytab(principal, keytab)
val currentUser = UserGroupInformation.getCurrentUser()
System.out.println("current user : " + currentUser + "\n")
// get table list
val conn = ConnectionFactory.createConnection(hConf)
conn.getAdmin
}
/**
* following configuration need to come together to use secured hbase cluster.
* 1. set hbase.security.auth.enable = true
* 2. set file path to jaas file java.security.auth.login.config
* 3. set file path to kerberos file java.security.krb5.conf
* 4. set realm
* 5. set principal
* 6. set file path to keytab
* @param zkAddr
* @return
*/
private def getAdmin(zkAddr: String) = {
if (config.hasPath("hbase.security.auth.enable") && config.getBoolean("hbase.security.auth.enable")) {
getSecureClusterAdmin(zkAddr)
} else {
val conf = HBaseConfiguration.create()
conf.set("hbase.zookeeper.quorum", zkAddr)
val conn = ConnectionFactory.createConnection(conf)
conn.getAdmin
}
}
private def enableTable(zkAddr: String, tableName: String) = {
getAdmin(zkAddr).enableTable(TableName.valueOf(tableName))
}
private def disableTable(zkAddr: String, tableName: String) = {
getAdmin(zkAddr).disableTable(TableName.valueOf(tableName))
}
private def dropTable(zkAddr: String, tableName: String) = {
getAdmin(zkAddr).disableTable(TableName.valueOf(tableName))
getAdmin(zkAddr).deleteTable(TableName.valueOf(tableName))
}
private def getStartKey(regionCount: Int): Array[Byte] = {
Bytes.toBytes((Int.MaxValue / regionCount))
}
private def getEndKey(regionCount: Int): Array[Byte] = {
Bytes.toBytes((Int.MaxValue / regionCount * (regionCount - 1)))
}
}
| daewon/incubator-s2graph | s2core/src/main/scala/org/apache/s2graph/core/storage/hbase/AsynchbaseStorage.scala | Scala | apache-2.0 | 28,300 |
package rewriting.rules
import ir._
import ir.ast._
import lift.arithmetic.SizeVar
import opencl.executor.{Execute, TestWithExecutor}
import opencl.ir._
import opencl.ir.pattern.ReduceSeq
import org.junit.Assert._
import org.junit.Test
import rewriting.{Lower, Rewrite}
object TestInterchange extends TestWithExecutor
class TestInterchange {
private val N = SizeVar("N")
private val M = SizeVar("M")
private val inputSize = 256
private val inputMatrix = Array.tabulate(inputSize, inputSize)((_, _) => util.Random.nextFloat())
private val inputArray = Array.tabulate(inputSize)(_ => util.Random.nextFloat())
private val input3DMatrix = Array.tabulate(16,16,16)((_, _, _) => util.Random.nextFloat())
private def test(f: Lambda, g: Lambda, inputs: Any*): Unit = {
TypeChecker(f)
TypeChecker(g)
assertEquals(f.body.t, g.body.t)
val loweredF = Lower.sequential(f)
val loweredG = Lower.sequential(g)
val (outputF, _) = Execute()[Array[Float]](loweredF, inputs:_*)
val (outputG, _) = Execute()[Array[Float]](loweredG, inputs:_*)
assertArrayEquals(outputF, outputG, 0.001f)
}
@Test
def transposeBothSidesNotApplicable(): Unit = {
val g = fun(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
input => Map(Map(Map(plusOne)) o Split(2)) $ input
)
TypeChecker(g)
assertFalse(InterchangeRules.transposeBothSides.rewrite.isDefinedAt(g.body))
}
@Test
def transposeBothSides(): Unit = {
val f = fun(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
input => Map(Map(plusOne)) $ input
)
val g = Rewrite.applyRuleAtId(f, 0, InterchangeRules.transposeBothSides)
test(f, g, inputMatrix)
}
@Test
def mapMapTransposeWithZipInside(): Unit = {
val f = fun(ArrayTypeWSWC(ArrayTypeWSWC(Float, M), N),
ArrayTypeWSWC(Float, M),
(in1, in2) => Map(fun(x => Map(fun(x => add(Get(x, 0), Get(x, 1)))) $ Zip(in2, x))) $ in1
)
val g = Rewrite.applyRuleAt(f, f.body, InterchangeRules.mapMapTransposeZipInside)
test(f, g, inputMatrix, inputArray)
}
@Test
def mapMapTransposeWithZipOutside(): Unit = {
val f = fun(ArrayTypeWSWC(ArrayTypeWSWC(Float, N), M),
ArrayTypeWSWC(Float, M),
(in1, in2) =>
Map(fun(x =>
Map(fun(y => add(y, Get(x, 1)))) $ Get(x, 0)
)) $ Zip(in1, in2)
)
val g = Rewrite.applyRuleAtId(f, 0, InterchangeRules.mapMapTransposeZipOutside)
test(f, g, inputMatrix, inputArray)
}
@Test
def mapReduceReduceNesting0(): Unit = {
// Different f and init
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, N), N), N),
input => Map(ReduceSeq(add, 0.0f) o Join() o Map(PartRed(mult, 1.0f))) $ input
)
TypeChecker(f)
assertFalse(InterchangeRules.mapReducePartialReduce.isDefinedAt(f.body))
}
@Test
def mapReduceReduceNesting1(): Unit = {
// Different f and init
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, N), N), N),
input => Map(ReduceSeq(add, 0.0f) o Join() o Map(Reduce(mult, 1.0f))) $ input
)
TypeChecker(f)
assertFalse(InterchangeRules.mapReducePartialReduce.isDefinedAt(f.body))
}
@Test
def mapReduceReduceNesting2(): Unit = {
// Different init
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, N), N), N),
input => Map(ReduceSeq(add, 0.0f) o Join() o Map(Reduce(add, 1.0f))) $ input
)
TypeChecker(f)
assertFalse(InterchangeRules.mapReducePartialReduce.isDefinedAt(f.body))
}
@Test
def mapReduceReduceNesting3(): Unit = {
// Different init
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, N), N), N),
input => Map(ReduceSeq(add, 0.0f) o Join() o Map(PartRed(add, 1.0f))) $ input
)
TypeChecker(f)
assertFalse(InterchangeRules.mapReducePartialReduce.isDefinedAt(f.body))
}
@Test
def nestPartialReduceInReduce(): Unit = {
val f = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, 16), 16), 16),
a => Map( ReduceSeq(add, 0.0f) o Join() o Map(PartRed(fun((x, y) => add(x, y)), 0.0f)) ) $ a)
val g = Rewrite.applyRuleAtId(f, 0, InterchangeRules.mapReducePartialReduce)
test(f, g, input3DMatrix)
}
@Test
def mapReduceInterchange0(): Unit = {
val small2DMatrix = Array.tabulate(16, 8)((_, _) => util.Random.nextFloat())
val f = fun(ArrayTypeWSWC(ArrayTypeWSWC(Float, 8), 16),
input => Map(Reduce(add, 0.0f)) $ input
)
val g = Rewrite.applyRuleAtId(f, 0, InterchangeRules.mapReduceInterchange)
assertTrue(g.body.asInstanceOf[FunCall].args.head.asInstanceOf[FunCall].f.isInstanceOf[Reduce])
test(f, g, small2DMatrix)
}
@Test
def mapReduceInterchange1(): Unit = {
val small2DMatrix = Array.tabulate(16, 8)((_, _) => util.Random.nextFloat())
val f = fun(ArrayTypeWSWC(ArrayTypeWSWC(Float, 8), 16),
input => Map(ReduceSeq(add, 0.0f)) $ input
)
val g = Rewrite.applyRuleAtId(f, 0, InterchangeRules.mapReduceInterchange)
assertTrue(g.body.asInstanceOf[FunCall].args.head.asInstanceOf[FunCall].f.isInstanceOf[ReduceSeq])
test(f, g, small2DMatrix)
}
@Test
def mapReduceInterchangeWithZipOutside0(): Unit = {
// TODO: Reduce
}
@Test
def mapReduceInterchangeWithZipOutside1(): Unit = {
// TODO: ReduceSeq
}
@Test
def mapMapZipInsideUsedTwice(): Unit = {
val f = fun(
ArrayType(ArrayType(Float, N), M),
in1 =>
Map(fun(y =>
Map(fun(x =>
Map(fun(x =>
add(Get(x, 0), Get(x, 1))
)) $ Zip(Get(x, 0), Get(x, 1))
)) $ y
)) o Split(256) $ Zip(in1, in1)
)
val g = fun(
ArrayType(ArrayType(Float, N), M),
in1 =>
Map(fun(y =>
TransposeW() o
Map(fun(x =>
Map(fun(x =>
add(Get(x, 0), Get(x, 1))
)) $ Zip(Get(x, 0), Get(x, 1))
)) $ Zip(Transpose() o Map(Get(0)) $ y, Transpose() o Map(Get(1)) $ y)
)) o Split(256) $ Zip(in1, in1)
)
test(f, g, inputMatrix)
}
@Test
def mapMapZipInsideUsedTwiceUsingRule(): Unit = {
val f = fun(
ArrayType(ArrayType(Float, N), M),
in1 =>
Map(fun(y =>
Map(fun(x =>
Map(fun(x =>
add(Get(x, 0), Get(x, 1))
)) $ Zip(Get(x, 0), Get(x, 1))
)) $ y
)) o Split(256) $ Zip(in1, in1)
)
val g = Rewrite.applyRuleAtId(f, 5, InterchangeRules.mapMapTransposeZipInside)
test(f, g, inputMatrix)
}
@Test
def mapMapZipInsideUsedTwicePlusExtra(): Unit = {
val f = fun(
ArrayType(ArrayType(Float, N), M),
ArrayType(Float, N),
(matrix, array) =>
Map(fun(y =>
Map(fun(x =>
Map(fun(x =>
add(Get(x, 0), mult(Get(x, 1), Get(x, 2)))
)) $ Zip(Get(x, 0), Get(x, 1), array)
)) $ y
)) o Split(256) $ Zip(matrix, matrix)
)
val g = fun(
ArrayType(ArrayType(Float, N), M),
ArrayType(Float, N),
(matrix, array) =>
Map(fun(y =>
TransposeW() o
Map(fun(outX =>
Map(fun(x =>
add(Get(x, 0), mult(Get(x, 1), Get(outX, 2)))
)) $ Zip(Get(outX, 0), Get(outX, 1))
)) $ Zip(Transpose() o Map(Get(0)) $ y, Transpose() o Map(Get(1)) $ y, array)
)) o Split(256) $ Zip(matrix, matrix)
)
test(f, g, inputMatrix, inputArray)
}
@Test
def mapMapZipInsideUsedTwicePlusExtraUsingRule(): Unit = {
val f = fun(
ArrayType(ArrayType(Float, N), M),
ArrayType(Float, N),
(matrix, array) =>
Map(fun(y =>
Map(fun(x =>
Map(fun(x =>
add(Get(x, 0), mult(Get(x, 1), Get(x, 2)))
)) $ Zip(Get(x, 0), Get(x, 1), array)
)) $ y
)) o Split(256) $ Zip(matrix, matrix)
)
val g = Rewrite.applyRuleAtId(f, 5, InterchangeRules.mapMapTransposeZipInside)
test(f, g, inputMatrix, inputArray)
}
@Test
def mapMapZipInsideUsedTwiceWithReorder(): Unit = {
val f = fun(
ArrayType(ArrayType(Float, N), M),
ArrayType(Float, N),
(matrix, array) =>
Map(fun(y =>
Map(fun(x =>
Map(fun(x =>
add(Get(x, 0), mult(Get(x, 1), Get(x, 2)))
)) $ Zip(x, Gather(reverse) $ x, array)
)) $ y
)) o Split(256) $ matrix
)
val g = Rewrite.applyRuleAtId(f, 3, InterchangeRules.mapMapTransposeZipInside)
test(f, g, inputMatrix, inputArray)
}
}
| lift-project/lift | src/test/rewriting/rules/TestInterchange.scala | Scala | mit | 8,548 |
import scala.util.Random
class Coin {
var coinOption : String = ""
def getCoinOption : String = {
val r = new Random
coinOption = if (r.nextInt(2) == 0) "Heads" else "Tails"
print(s"Coin got: $coinOption\n")
coinOption
}
}
| cirquit/Personal-Repository | Scala/coinflip-0.1/src/main/scala/Coin.scala | Scala | mit | 262 |
package vanadis.modules.examples.scalacalc.calcservices
trait Adder {
def add(args: Array[Int]) : Int
} | kjetilv/vanadis | modules/examples/scalacalc/calcservices/src/main/scala/vanadis/modules/examples/scalacalc/calcservices/Adder.scala | Scala | apache-2.0 | 107 |
object Dict {
def nouns(word: String): Option[String] = word match {
case "dog" => Some("pies")
case "house" => Some("dom")
case _ => None
}
def colors(word: String): Option[String] = word match {
case "red" => Some("czerwony")
case "green" => Some("zielony")
case _ => None
}
val translate = Chain.chain(nouns, colors)
}
object Chain {
def chain[A,B](fs: (A => Option[B])*): A => Option[B] = ???
}
| grzegorzbalcerek/scala-exercises | Chain/Chain.scala | Scala | bsd-2-clause | 435 |
package org.odfi.wsb.fwapp.module.semantic
trait SemanticMenuView extends SemanticView {
/*
*
*/
def semanticVerticalRightPointingMenu(content: Map[String, Any]) = {
def makeItem(value: (String, Any)): Unit = {
value._2 match {
case link: String =>
"item" :: div {
"header" :: a(link)(text(value._1))
}
//"item" :: a(link)(text(value._1))
case sub: Map[_, _] =>
"item" :: div {
// Header
"header" :: value._1
// sub menu
"menu" :: div {
// Items
sub.foreach {
case (k, v) =>
makeItem(k.toString -> v)
}
}
}
case other =>
"item" :: a(value._2.toString)(text(value._1))
}
}
"ui fluid vertical menu" :: div {
content.foreach {
case v => makeItem(v)
}
}
}
def semanticHorizontalBottomPointingMenu(content: Map[String, Any]) = {
def makeItem(value: (String, Any)): Unit = {
value._2 match {
case link: String =>
"item" :: a(link)(text(value._1))
case sub: Map[_, _] =>
"ui menu" :: div {
sub.foreach {
case v => makeItem((v.toString, v))
}
}
case other =>
"item" :: a(value._2.toString)(text(value._1))
}
}
"ui fluid pointing menu" :: div {
content.foreach {
case v => makeItem(v)
}
}
}
} | opendesignflow/fwapp | src/main/scala/org/odfi/wsb/fwapp/module/semantic/SemanticMenuView.scala | Scala | agpl-3.0 | 1,996 |
package toplev
import scala.collection.mutable.{Map,HashMap,HashSet}
import exceptions._
/* This is a generic type environment. It provides
*
* Note that the map is inherently mutable.
*
* The parent stores the parent type environment,
* to allow for variable name overloading. Note
* that since typechecking has already occured, we
* can assume that all variables identifiers within
* a nesting level are unique.
*
* We require that this class is an instance of TypeEnvClass.
*/
abstract class GenericTypeEnv[TypeEnvClass <: GenericTypeEnv[TypeEnvClass,
From, To],
From <: GenericPrintable,
To <: GenericPrintable with GenericType[To]]
(var parent: Option[GenericTypeEnv[TypeEnvClass, From, To]]) {
def this() = this(None)
/* Every type in this map can be used either as a type quantified
* here or a type quantified elsewhere.
*
* Types that are quantified at this level of the type environment
* are returned by typeCloning all of the subtypes in that type.
*/
private val map: Map[From, (To, Option[GenericTypeSet[To]])] =
HashMap[From, (To, Option[GenericTypeSet[To]])]()
def prettyPrint = """
%s
""".format(map.map(pair => pair._1.prettyPrint + ": " +
pair._2._1.prettyPrint).mkString("\\n "))
def hasType(id: From): Boolean =
map.contains(id) || parent.map(_.hasType(id)).getOrElse(false)
/* Searchs parents up unil (but not including) the passed
* bound. If the type if found, return true. Otherwise, return
* false. */
def hasTypeBetweenExclusive(bound: TypeEnvClass, id: From): Boolean =
if (!envInHierarchy(bound))
// We must not throw here as this may happen if the first env is a miss.
false
else if (bound == this) {
false
} else {
innermostHasType(id) || (parent match {
case Some(parentEnv) => parentEnv.hasTypeBetweenExclusive(bound, id)
case None =>
throw new ICE("""Error: Parent type env did not appear in the
|hierarchy""".stripMargin)
})
}
def hasTypeBetweenInclusive(bound: TypeEnvClass, id: From): Boolean =
if (!envInHierarchy(bound)) {
// We must not throw here as this may happen if the first env is a miss.
false
} else if (bound == this) {
innermostHasType(id)
} else {
innermostHasType(id) || (parent match {
case Some(parentEnv) => parentEnv.hasTypeBetweenInclusive(bound, id)
case None =>
throw new ICE("""Error: Parent type env did not appear in the
|hierarchy""".stripMargin)
})
}
/* This returns true if one of the parent environments or this envrionment
* is equal to the passed environment. False otherwise. */
def envInHierarchy(env: TypeEnvClass): Boolean = {
(env == this) || (parent match {
case None => false
case Some(parent) => parent.envInHierarchy(env)
})
}
/* This call is always safe by the assertion made in the constructor. */
def getSelf: TypeEnvClass = this.asInstanceOf[TypeEnvClass]
/* This seaches only this environment for the type. */
def innermostHasType(id: From): Boolean =
map.contains(id)
/* This searches only the top level environment for the type. */
def topLevelHasType(id: From): Boolean = parent match {
case Some(parentEnv) => parentEnv.topLevelHasType(id)
case None => innermostHasType(id)
}
def add(id: From, typ: To, qualified: Boolean): Unit =
if (qualified)
add(id, typ, Some(typ.getTypeVars()))
else
add(id, typ, None)
def add(id: From, typ: To,
qualifiedTypes: Option[GenericTypeSet[To]]): Unit = {
map(id) = (typ, qualifiedTypes)
}
/* addTopLevel is very much like 'add', but it adds the mapping to this
* map iff it has no parent. If it has a parent, the mapping is defered
* to the parent (and so on up the tree).
*/
def addTopLevel(id: From, typ: To, qualified: Boolean): Unit =
if (qualified)
addTopLevel(id, typ, Some(typ.getTypeVars()))
else
addTopLevel(id, typ, None)
def addTopLevel(id: From, typ: To,
qualifiedTypes: Option[GenericTypeSet[To]]): Unit = {
parent match {
case Some(parentEnv) => parentEnv.addTopLevel(id, typ, qualifiedTypes)
case None => add(id, typ, qualifiedTypes)
}
}
/* Given some other environment, this adds all the elements from the
* bottom level oof the other type env. */
def addBottomLevelEnv(other: TypeEnvClass) =
other.foreachInnermost {
case (otherID, (otherType, otherVars)) =>
add(otherID, otherType, otherVars)
}
/* The default qualified types is all the types or none of the
* types
*/
def updateId(id: From, newTyp: To, qualified: Boolean): Unit =
if (qualified)
updateId(id, newTyp, Some(newTyp.getTypeVars()))
else
updateId(id, newTyp, None)
/* This function automatically validates that the variable it is
* replacing is OK to replace with the one replacing it.
*
* Throw a runtime exception if that is not OK.
*
* This should be used unless you are really sure that it is OK.
*/
def updateId(id: From, newTyp: To,
quantifiedTypes: Option[GenericTypeSet[To]]): Unit = {
getOrFail(id).unify(newTyp)
updateIdNoValidate(id, newTyp, quantifiedTypes)
}
/* This attempts to update types in the parent if possible. */
def updateIdNoValidate(id: From, newTyp: To,
qualifiedTypes: Option[GenericTypeSet[To]]): Unit = {
if (map.contains(id))
map(id) = (newTyp, qualifiedTypes)
else
parent match {
case Some(parentEnv) =>
parentEnv.updateIdNoValidate(id, newTyp, qualifiedTypes)
case None => notFound(id)
}
}
/* Given some name X (in the map) and some name Y (not in the map),
* change the name of X to Y.
*
* This subsitution is done in the appropriate parent map.
*/
def swapNames(from: From, to: From): Unit = {
assert(hasType(from))
assert(!hasType(to))
if (map.contains(from)) {
map(to) = map(from)
map.remove(from)
} else {
// If this does not contain from, then we know the
// parent must as we have already asserted that
// the map has the right type.
parent.get.swapNames(from, to)
}
}
/* Given some identifier X, remove X from the map.
* If it is in the parent, remove it from there (recursively).
*
* Throws if x is not in the map. */
def remove(x: From): Unit = {
if (innermostHasType(x)) {
map.remove(x)
} else {
parent match {
case Some(parentEnv) => parentEnv.remove(x)
case None => notFound(x)
}
}
}
/* This returns a tuple of the type of the identifier and a set
* of the quantified variables. */
def getRaw(id: From): Option[(To, Option[GenericTypeSet[To]])] = {
val mapContents = map.get(id)
mapContents match {
case Some(pair) => Some(pair)
case None => // Try the parent
parent match {
case Some(parentEnv) => parentEnv.getRaw(id)
case None => None
}
}
}
def getRawOrFail(id: From): (To, Option[GenericTypeSet[To]]) = {
getRaw(id).getOrElse(notFound(id))
}
/* This gets a value from the map and substitutes
* any quantified variables in for new variables.
*/
def get(id: From): Option[To] = {
getRaw(id) match {
case Some((typ, Some(qualifiedTypes))) =>
Some(typ.typeClone(qualifiedTypes))
case Some((typ, None)) => Some(typ)
// getRaw checks the parent, so this function does not have to.
case None => None
}
}
def getOrFail(id: From): To = {
get(id).getOrElse(notFound(id))
}
/* This returns all the unquantified types for some variable
* in the map.
*
* This is used for unification to unify only unquantified types.
*/
def getUnquantifiedTypesFor(id: From): GenericTypeSet[To] = {
val (typ, quantified) = map(id)
quantified match {
case None => typ.getTypeVars()
case Some(quantified) => typ.getTypeVars() - quantified
}
}
/* This goes through all the atomics (INCLUDING QUANTIFIED ATOMICS)
* that are used. It is used to change ASTNumberType -> ASTIntType
* at the top level.
*
* It subsequently removes any forall quantified items
* from the quantified section that have been replaced.
*/
def specializeAtomicsMatching(f: (To => Boolean), sub: To): Unit = {
// This check is made not beause this method cannot support
// a sub with nested tyvars, but because it is significantly
// more complicated. There are subtle failure modes with the lowering
// pass if the quantifiedTypeVars aren't cleaned up.
if (!sub.getTypeVars().isEmpty) {
throw new ICE("""Cannot specialzeAtomics to poly types. Polytype used
| was %s. """.stripMargin.format(sub.prettyPrint))
}
// If this is too slow, we could adjust the function definition
// to only do the substituion once for any particular mapping.
// Then keep track of the mappings and only do the new mappings.
foreachInnermost({
case(name, (to, quantifiedTypes)) => {
val toVars = to.getTypeVars()
var substitutedTo = to
for (toVar <- toVars) {
if (f(toVar)) {
substitutedTo = substitutedTo.substituteFor(toVar, sub)
}
}
// Despite the rather general definition of this method,
// the target is NumType => ASTIntType (and the similar
// group operations)
val newTyVars = substitutedTo.getTypeVars()
updateIdNoValidate(name, substitutedTo,
quantifiedTypes.map(_.intersection(newTyVars)))
}
})
}
/* This gets a value from the map and substitutes
* any quantified variables in for new variables.
*/
def apply(id: From): Option[To] = get(id)
/* This returns the value from the map without
* substituting in for the quantified types.
*/
def getNoSubstitute(id: From): Option[To] =
if (map.contains(id))
map.get(id).map(_._1)
else
parent match {
case Some(parentEnv) => parentEnv.getNoSubstitute(id)
case None => None
}
def getNoSubstituteOrFail(id: From): To =
getNoSubstitute(id).getOrElse(notFound(id))
def insertInto(id: From, ttypeEnv: TypeEnvClass) = {
val (to, qualifiedTypes) = getRawOrFail(id)
ttypeEnv.add(id, to, qualifiedTypes)
}
private def notFound(id: From) =
throw new ICE(""" Error, type %s not found in
|the environment""".stripMargin.format(id.prettyPrint))
/* This iterates over all elements in the environment and it's parents. */
def foreachAll(f : (((From, (To, Option[GenericTypeSet[To]])))
=> Unit)): Unit = {
map.foreach(f)
parent.map(_.foreachAll(f))
}
/* This iterates over all elements in this environment (i.e. NOT
* the parent environments). */
def foreachInnermost(f: (((From, (To, Option[GenericTypeSet[To]])))
=> Unit)): Unit = {
map.foreach(f)
}
/* This is used for unification. It only iterates over elements that can
* be 'seen' from this environment. So, if 'x' is shadowing something
* in the parent, this will not iterate over the parent 'x'. */
def foreachUnshadowed(
f: (((From, (To, Option[GenericTypeSet[To]])) => Unit))): Unit = {
val seenSet = new HashSet[From]()
map.foreach{ case (name, value) => {
seenSet.add(name)
f(name, value)
}}
parent.map(_.foreachUnshadowed{ case (name, value) => {
if (!seenSet.contains(name)) {
f(name, value)
}
} })
}
}
| j-c-w/mlc | src/main/scala/GenericTypeEnv.scala | Scala | gpl-3.0 | 11,825 |
package io.udash.i18n
import com.avsystem.commons.serialization.GenCodec
import com.avsystem.commons.serialization.json.{JsonStringInput, JsonStringOutput}
import io.udash.rpc.JsonStr
import io.udash.testing.UdashSharedTest
import scala.concurrent.Future
class TranslationKeyTest extends UdashSharedTest {
import Utils._
implicit val lang = Lang("en")
implicit val provider = new TranslationProvider {
override def translate(key: String, argv: Any*)(implicit lang: Lang): Future[Translated] = {
val sb = new StringBuilder
sb.append(key)
sb.append(":")
sb.append(argv.toSeq.map(_.toString).mkString(","))
Future.successful(Translated(sb.result()))
}
override protected def handleMixedPlaceholders(template: String): Unit = ()
}
def write[T: GenCodec](value: T): JsonStr =
JsonStr(JsonStringOutput.write(value))
def read[T: GenCodec](jsonStr: JsonStr): T =
JsonStringInput.read[T](jsonStr.json)
val testKey0 = TranslationKey.key("test0")
val testKey1 = TranslationKey.key1[Int]("test1")
val testKey2 = TranslationKey.key2[Int, String]("test2")
val testKey3 = TranslationKey.key3[Int, String, Int]("test3")
val testKey4 = TranslationKey.key4[Int, String, Int, String]("test4")
val testKey5 = TranslationKey.key5[Int, String, Int, String, Int]("test5")
val testKey6 = TranslationKey.key6[Int, String, Int, String, Int, String]("test6")
val testKey7 = TranslationKey.key7[Int, String, Int, String, Int, String, Int]("test7")
val testKey8 = TranslationKey.key8[Int, String, Int, String, Int, String, Int, String]("test8")
val testKey9 = TranslationKey.key9[Int, String, Int, String, Int, String, Int, String, Int]("test9")
val testKeyX = TranslationKey.keyX("testX")
val testKeyU = TranslationKey.untranslatable("testUntranslatable")
val testKeyR = testKey3(1, "two", 5)
"Test template placeholders substitution" in {
implicit val provider: TranslationProvider = new TranslationProvider {
override def translate(key: String, argv: Any*)(implicit lang: Lang): Future[Translated] = {
Future.successful(putArgs(key, argv: _*))
}
override protected def handleMixedPlaceholders(template: String): Unit = ()
}
//escape regex chars in replacement (actually: putArgs test)
val plainKey = TranslationKey.key1[String]("This is {}")
getTranslatedString(plainKey("plain string")) should be("This is plain string")
getTranslatedString(plainKey("${foo}")) should be("This is ${foo}")
getTranslatedString(plainKey("<([{\\\\^-=$!|]})?*+.>")) should be("This is <([{\\\\^-=$!|]})?*+.>") //regex special chars
//indexed template
val indexedKey = TranslationKey.key3[Int, Int, Int]("This is {2} {1} {0}")
getTranslatedString(indexedKey(1,2,3)) should be("This is 3 2 1")
//mixed templates are actually unhandled
val mixedKey = TranslationKey.key3[Int, Int, Int]("This is {2} {} {0}")
getTranslatedString(mixedKey(1,2,3)) should be("This is 3 {} 1")
}
"TranslationKey" should {
"obtain translation from provider" in {
getTranslatedString(testKey0) should be("test0:")
getTranslatedString(testKey1(1)) should be("test1:1")
getTranslatedString(testKey2(1, "2")) should be("test2:1,2")
getTranslatedString(testKey3(1, "2", 3)) should be("test3:1,2,3")
getTranslatedString(testKey4(1, "2", 3, "4")) should be("test4:1,2,3,4")
getTranslatedString(testKey5(1, "2", 3, "4", 5)) should be("test5:1,2,3,4,5")
getTranslatedString(testKey6(1, "2", 3, "4", 5, "6")) should be("test6:1,2,3,4,5,6")
getTranslatedString(testKey7(1, "2", 3, "4", 5, "6", 7)) should be("test7:1,2,3,4,5,6,7")
getTranslatedString(testKey8(1, "2", 3, "4", 5, "6", 7, "8")) should be("test8:1,2,3,4,5,6,7,8")
getTranslatedString(testKey9(1, "2", 3, "4", 5, "6", 7, "8", 9)) should be("test9:1,2,3,4,5,6,7,8,9")
getTranslatedString(testKeyX(1, 2, "3", 4.5)) should be("testX:1,2,3,4.5")
getTranslatedString(testKeyU) should be("testUntranslatable")
}
"compile only with valid types" in {
"testKey1(1)" should compile
"testKey1(\\"1\\")" shouldNot typeCheck
"testKey1(1.5)" shouldNot typeCheck
}
"serialize and deserialize TranslationKey0" in {
val serialized = write(testKey0)
val deserialized = read[TranslationKey0](serialized)
getTranslatedString(deserialized) should be(getTranslatedString(testKey0))
}
"serialize and deserialize Untranslatable" in {
val serialized = write(testKeyU)
val deserialized = read[TranslationKey0](serialized)
getTranslatedString(deserialized) should be(getTranslatedString(testKeyU))
}
"serialize and deserialize reduced keys" in {
val serialized = write(testKey1(5))
val deserialized = read[TranslationKey0](serialized)
getTranslatedString(deserialized) should be(getTranslatedString(testKey1(5)))
val serialized2 = write(testKey5(1, "2", 3, "4", 5))
val deserialized2 = read[TranslationKey0](serialized2)
getTranslatedString(deserialized2) should be(getTranslatedString(testKey5(1, "2", 3, "4", 5)))
val serializedX = write(testKeyX((1, 2, "3", 4.5)))
val deserializedX = read[TranslationKey0](serializedX)
getTranslatedString(deserializedX) should be(getTranslatedString(testKeyX((1, 2, "3", 4.5))))
}
"have descriptive toString" in {
testKey0.toString should be("SimpleTranslationKey0(test0)")
testKey1.toString should be("TranslationKey1(test1)")
testKey2.toString should be("TranslationKey2(test2)")
testKey3.toString should be("TranslationKey3(test3)")
testKey4.toString should be("TranslationKey4(test4)")
testKey5.toString should be("TranslationKey5(test5)")
testKey6.toString should be("TranslationKey6(test6)")
testKey7.toString should be("TranslationKey7(test7)")
testKey8.toString should be("TranslationKey8(test8)")
testKey9.toString should be("TranslationKey9(test9)")
testKeyX.toString should be("TranslationKeyX(testX)")
testKeyU.toString should be("Untranslatable(testUntranslatable)")
testKeyR.toString should be("ReducedTranslationKey(test3,1,two,5)")
}
"have content-based equals and hashCode" in {
def cmp(what: TranslationKey, same: TranslationKey, notSame: TranslationKey): Unit = {
what should be(same)
what.hashCode() should be(same.hashCode())
what shouldNot be(notSame)
what.hashCode() shouldNot be(notSame.hashCode())
}
cmp(testKey0, TranslationKey0("test0"), TranslationKey0("test1"))
cmp(testKey1, TranslationKey1("test1"), TranslationKey1("test2"))
cmp(testKeyU, TranslationKey.untranslatable("testUntranslatable"), TranslationKey.untranslatable("testUntranslatable2"))
cmp(testKeyR, testKey3(1, "two", 5), testKey3(1, "two", 6))
}
"not equal TranslationKeys of different types" in {
val ordinaryKey = TranslationKey.key("testUntranslatable")
testKeyU shouldNot be(ordinaryKey)
ordinaryKey shouldNot be(testKeyU)
}
}
}
| UdashFramework/udash-core | i18n/src/test/scala/io/udash/i18n/TranslationKeyTest.scala | Scala | apache-2.0 | 7,121 |
package nibbler
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterEach, Suite}
trait SparkContextAware extends BeforeAndAfterEach {
this: Suite =>
private val configuration = new SparkConf().setAppName("test").setMaster("local")
protected var sparkContext: SparkContext = null
override protected def beforeEach(): Unit = {
sparkContext = new SparkContext(configuration)
}
override protected def afterEach(): Unit = {
sparkContext.stop()
}
protected def textFile(inputFilePath: String) = {
sparkContext.textFile(inputFilePath)
}
}
| pkoperek/nibbler | src/test/scala/nibbler/SparkContextAware.scala | Scala | gpl-3.0 | 603 |
package com.olvind.crud
package server
import slick.dbio.DBIO
trait dbOps extends executionContexts {
case class CrudDbOp[+T](res: DBIO[XRes[T]]){
def map[U](f: T => U): CrudDbOp[U] =
CrudDbOp(res map (_ map f))
def mapIO[U](f: DBIO[XRes[T]] => DBIO[XRes[U]]): CrudDbOp[U] =
CrudDbOp[U](f(res))
def flatMap[U](f: T => CrudDbOp[U]): CrudDbOp[U] =
CrudDbOp[U](
res.flatMap {
case XSuccess(t) => f(t).res
case other => DBIO successful other.asInstanceOf[XRes[U]]
}
)
}
object CrudDbOp {
def apply[T](t: XRes[T]): CrudDbOp[T] =
CrudDbOp[T](DBIO successful t)
def success[T](t: T): CrudDbOp[T] =
CrudDbOp[T](DBIO successful XSuccess(t))
def failure[F <: XFail](f: F): CrudDbOp[Nothing] =
CrudDbOp[Nothing](DBIO successful f)
def fromOpt[F <: XFail, T](ot: Option[T], left: => F): CrudDbOp[T] =
CrudDbOp[T](DBIO successful ot.fold[XRes[T]](left)(XSuccess.apply))
def fromEither[F <: XFail, T](et: Either[F, T]): CrudDbOp[T] =
CrudDbOp[T](DBIO successful et.fold(identity, XSuccess.apply))
def fromDbio[T](iot: DBIO[T]): CrudDbOp[T] =
CrudDbOp[T](iot map XSuccess.apply)
def require[F <: XFail](condition: Boolean, left: => F): CrudDbOp[Unit] =
CrudDbOp(DBIO successful (if (condition) XSuccess(()) else left))
}
}
| elacin/slick-crud | crud/jvm/src/main/scala/com/olvind/crud/server/dbOps.scala | Scala | apache-2.0 | 1,376 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.authenticator.format
import io.circe.Json
import io.circe.jawn.decode
import silhouette.authenticator.format.JwtReads._
import silhouette.authenticator.{ Authenticator, AuthenticatorException, StatefulReads, StatelessReads }
import silhouette.crypto.Base64
import silhouette.{ LoginInfo, jwt }
import scala.concurrent.Future
import scala.util.{ Failure, Success, Try }
/**
* A reads which transforms a JWT into an [[Authenticator]].
*
* Because of the fact that a JWT itself stores a complete serialized form of the authenticator, it's normally not
* needed to use a backing store, because on subsequent requests the authenticator can be fully unserialized from the
* JWT. But this has the disadvantage that a JWT cannot be easily invalidated. But with a backing store that creates a
* mapping between the JWT and a stored instance, it's possible to invalidate the authenticators server side. Therefore
* this reads can be used in a stateless and a stateful manner.
*
* @param jwtReads The underlying JWT reads implementation.
*/
final case class JwtReads(jwtReads: jwt.Reads) extends StatelessReads[String] with StatefulReads[String] {
/**
* Transforms a JWT into an [[Authenticator]].
*
* @param jwt The JWT to transform.
* @return An authenticator on success, an error on failure.
*/
override def read(jwt: String): Future[Authenticator] = Future.fromTry {
jwtReads.read(jwt).map { claims =>
val custom = Json.fromJsonObject(claims.custom)
Authenticator(
id = claims.jwtID.getOrElse(throw new AuthenticatorException(MissingClaimValue.format("jwtID"))),
loginInfo = buildLoginInfo(Base64.decode(claims.subject
.getOrElse(throw new AuthenticatorException(MissingClaimValue.format("subject"))))).get,
touched = claims.issuedAt,
expires = claims.expirationTime,
tags = custom.hcursor.downField("tags").as[Seq[String]].getOrElse(Seq()),
fingerprint = custom.hcursor.downField("fingerprint").as[String].toOption,
payload = custom.hcursor.downField("payload").focus
)
}
}
/**
* Builds the login info from Json.
*
* @param str The string representation of the login info.
* @return The login info on success, otherwise a failure.
*/
private def buildLoginInfo(str: String): Try[LoginInfo] = {
decode[LoginInfo](str) match {
case Left(error) =>
Failure(new AuthenticatorException(JsonParseError.format(str), Some(error.getCause)))
case Right(loginInfo) =>
Success(loginInfo)
}
}
}
/**
* The companion object.
*/
object JwtReads {
val JsonParseError = "Cannot parse Json: %s"
val UnexpectedJsonValue = "Unexpected Json value: %s; expected %s"
val MissingClaimValue = "Cannot get value for claim `%s` from JWT"
}
| mohiva/silhouette | modules/authenticator/src/main/scala/silhouette/authenticator/format/JwtReads.scala | Scala | apache-2.0 | 3,583 |
package com.landoop.streamreactor.connect.hive.parquet
import org.apache.parquet.hadoop.metadata.CompressionCodecName
case class ParquetSourceConfig(projection: Seq[String] = Nil,
dictionaryFiltering: Boolean = true)
case class ParquetSinkConfig(overwrite: Boolean = false,
compressionCodec: CompressionCodecName = CompressionCodecName.SNAPPY,
validation: Boolean = true,
enableDictionary: Boolean = true)
| datamountaineer/stream-reactor | kafka-connect-hive/src/main/scala/com/landoop/streamreactor/connect/hive/parquet/config.scala | Scala | apache-2.0 | 527 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.oap.adapter
import org.scalacheck.{Prop, Properties}
object PropertiesAdapter {
/**
* The Properties has changed in the spark2.3 version.
* Ignore it in the spark2.1, spark2.2 version
*/
def getProp(properties: Properties): Prop = Prop.all(properties.properties.map(_._2): _*)
}
| Intel-bigdata/OAP | oap-cache/oap/src/test/scala/org/apache/spark/sql/execution/datasources/oap/adapter/PropertiesAdapter.scala | Scala | apache-2.0 | 1,148 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.