code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package com.sksamuel.elastic4s.validate
import com.sksamuel.elastic4s.{IndexesAndTypes, ProxyClients}
import com.sksamuel.exts.OptionImplicits._
import org.elasticsearch.action.admin.indices.validate.query.{ValidateQueryAction, ValidateQueryRequestBuilder}
import org.elasticsearch.index.query.QueryBuilder
case class ValidateDefinition(indexesAndTypes: IndexesAndTypes,
query: QueryBuilder,
rewrite: Option[Boolean] = None,
explain: Option[Boolean] = None) {
require(indexesAndTypes != null, "value must not be null or empty")
def builder = {
val builder = new ValidateQueryRequestBuilder(ProxyClients.indices, ValidateQueryAction.INSTANCE)
.setIndices(indexesAndTypes.indexes: _*)
.setTypes(indexesAndTypes.types: _*)
.setQuery(query)
rewrite.foreach(builder.setRewrite)
explain.foreach(builder.setExplain)
builder
}
def rewrite(rewrite: Boolean): ValidateDefinition = copy(rewrite = rewrite.some)
def explain(explain: Boolean): ValidateDefinition = copy(explain = explain.some)
}
| ulric260/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/validate/ValidateDefinition.scala | Scala | apache-2.0 | 1,121 |
/*
* Copyright 2012 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.core.svaractor
import akka.actor._
import akka.remote.{DisassociatedEvent, RemoteScope}
import com.typesafe.config.{Config, ConfigFactory}
import simx.core.clustering.ClusterSubSystem
import simx.core.entity.description.SVal
import simx.core.entity.description.SVal.SValType
import simx.core.entity.typeconversion.ConvertedSVar
import simx.core.helper.{JVMTools, Loggable}
import simx.core.svaractor.TimedRingBuffer._
import simx.core.svaractor.handlersupport.Types.CPSRet
import scala.collection.mutable
import scala.ref.WeakReference
import scala.reflect.ClassTag
/**
* This is the companion object of the SVarActor, an actor that can handle
* State Variables.
*/
object SVarActor {
type Ref = akka.actor.ActorRef
protected[core] type Address = akka.actor.Address
protected[core] def addressOf(ref : Ref) : Address = ref.path.address
protected[core] def isLocal(ref : Ref) : Boolean = SVarActor.addressOf(ref).hasLocalScope
private var systemName = "SVarActor"
private var hostname = System.getProperty("simx.remote.hostname", "")
private val port = System.getProperty("simx.remote.port", "0")
private var system : Option[ActorSystem] = None
private var config : Option[Config] = None
private var profiling = false
//From http://doc.akka.io/docs/akka/2.3.11/general/configuration.html
//If you are scheduling a lot of tasks you should consider increasing the ticks per wheel.
//e.g. 'scheduler.ticks-per-wheel = 2048'
private def configString =
if(isRemotingEnabled)
"""
akka {
scheduler {
tick-duration = """ + JVMTools.minTickDuration + """
implementation = simx.core.helper.JustDoItScheduler
}
actor {
provider = "akka.remote.RemoteActorRefProvider"
}
remote {
enabled-transports = ["akka.remote.netty.tcp"]
netty.tcp {
hostname = """" + hostname + """"
port = """ + port + """
}
}
}
"""
else {
if( profiling ) {
"akka.scheduler.tick-duration=" + JVMTools.minTickDuration + "\\n" +
"akka.actor.provider = \\"akka.actor.profiling.LocalProfilingActorRefProvider\\"" + "\\n" +
"akka.scheduler.implementation = simx.core.helper.JustDoItScheduler"
} else
"""
akka {
log-dead-letters = 1
scheduler.tick-duration=""" + JVMTools.minTickDuration + """
scheduler.implementation = simx.core.helper.JustDoItScheduler
}
"""
}
private def getConfig : Config =
config.getOrElse( ConfigFactory.parseString(configString).withFallback(ConfigFactory.load()))
protected def getAddress = {
require(system.nonEmpty, "System has to be instantiated before calling getAddress function")
class Ext(s: ExtendedActorSystem) extends Extension{ def getAddress = s.provider.getDefaultAddress }
object ExtKey extends ExtensionKey[Ext]
ExtKey.apply(getSystem).getAddress
}
protected def getAddressOf(ref : SVarActor.Ref) =
ref.path.toStringWithAddress(getAddress)
protected def getSystem = {
if (system.isEmpty)
system = Some(ActorSystem(systemName, getConfig))
system.get
}
protected def subscribeAkkaEvents[T](c : Class[T], actor : SVarActor.Ref){
getSystem.eventStream.subscribe(actor, c)
}
def isRemotingEnabled : Boolean =
hostname.nonEmpty
def setSystemName(name : String){
require(system.isEmpty, "SystemName may be defined before starting the first actor")
systemName = name
}
def setProfiling( profiling : Boolean ) {
this.profiling = profiling
}
def setHostName(hostName : String){
require(system.isEmpty, "Hostname may be defined before starting the first actor")
hostname = hostName
}
def setSystemConfig(cfg : Config){
require(system.isEmpty, "Config may be defined before starting the first actor")
config = Some(cfg)
}
def getHostname : Option[String] =
if (hostname.nonEmpty) Some(hostname) else None
def getSystemName =
systemName
def createActor( props : Props, name : Option[String] ) : Ref = {
val sys = getSystem
if( name.isDefined )
sys.actorOf(props, name.get )
else
sys.actorOf(props, props.actorClass().getCanonicalName + "-" + java.util.UUID.randomUUID())
}
def createActor[ T <: SVarActor : ClassTag ](ctor : => T, name : Option[String] = None ) : Ref =
createActor(Props.apply(ctor), name)
def shutdownSystem(){
system.foreach(_.shutdown())
}
def shutdownActor(ref : Ref)(implicit sender : Ref){
ref ! Shutdown()
}
}
/**
* This trait is an actor that can handle State Variables and fullfills the
* required notifications.
*/
trait SVarActor extends SVarActorBase with SVarFunctions with Loggable{
//
//
//
// inner classes
//
//
override def preStart() {
subscribeAkkaEvents(classOf[DisassociatedEvent])
super.preStart()
}
private def defaultErrorHandler( e : Exception ) {
e.printStackTrace()
System.exit( -100 )
}
final protected def spawnActor[T <: SVarActor : ClassTag]( constructorCall : => T, targetNode : Option[Symbol] = None ) =
createActor(constructorCall, targetNode)(_ => {})()
final private def _createActor(props : Props, targetNode : Option[Symbol] = None )
( handler : SVarActor.Ref => Unit )
( errorHandler : Exception => Unit = defaultErrorHandler ) = {
val actor =
if( targetNode.isEmpty || !ClusterSubSystem.getKnown.contains( targetNode.get ) ) {
context.actorOf(props, props.actorClass().getCanonicalName + "-" + java.util.UUID.randomUUID())
} else {
val (interface, port) = ClusterSubSystem.getKnown( targetNode.get )
context.actorOf(
props.withDeploy( Deploy( scope = RemoteScope( Address( "akka", SVarActor.systemName, interface, port ) ) ) ),
props.actorClass().getCanonicalName + "-" + java.util.UUID.randomUUID()
)
}
try {
ask[Any](actor, ActorCreation){
case ActorCreation => handler(actor)
}
} catch {
case e : Exception =>
errorHandler( e )
}
actor
}
final protected def createActor( props : Props )( handler : SVarActor.Ref => Unit )
( errorHandler : Exception => Unit) =
_createActor(props)(handler)(errorHandler)
final protected def createActor[T <: SVarActor : ClassTag]( constructorCall : => T, targetNode : Option[Symbol] = None )
( handler : SVarActor.Ref => Unit )
( errorHandler : Exception => Unit = defaultErrorHandler ) =
_createActor(Props.apply(constructorCall), targetNode)(handler)(errorHandler)
protected def subscribeAkkaEvents[T](c : Class[T]){
SVarActor.subscribeAkkaEvents(c, self)
}
protected def getAddressOf(ref : SVarActor.Ref) : String =
SVarActor.getAddressOf(ref)
protected def getPort : Option[Int] =
SVarActor.getAddress.port
/**
* This method sets a new value to the given State Variable. If the
* State Variable is owned by the calling Actor the value gets written
* immediately. If the state variable is owned by another actor, the other
* actor get an advice to write the value, but it depends on it's own logic
* if the value gets written or not.
*
* The method always returns immediately and never blocks.
*
* @param value The new value for the State Variable
*/
override protected[svaractor] def set[T](sVar: SVar[T], value: T, at : Time): Boolean =
set(sVar, value, at, forceUpdate = false)
private[svaractor] def set[T](sVar : SVar[T], value: T, at : Time, forceUpdate : Boolean) = sVar match {
case convertedSVar : ConvertedSVar[_,T] =>
convertedSVar.set(value, at, forceUpdate)
case _ if sVar.isMutable =>
val currentOwner = owner(sVar)
if (currentOwner isSameAs self)
write(self, sVar, value, at, forceUpdate)
else
currentOwner ! WriteSVarMessage( self, sVar, value, at, forceUpdate )
true
case _ => false
}
protected[svaractor] def notifyObserver[T](observer : SVarActor.Ref, msg : NotifyWriteSVarMessage[T]){
observer ! msg
}
// Retrieve the current value. The supplied handler is used only once.
/**
* This method reads the current value of the State Variable. It does not
* block and returnd immediately. The given consumer function at the parameter
* gets called when the value has been provided. The consumer is only valid
* one time and gets deleted after the value has been provided.
*
* The given handler is processed in the current actor that is calling the
* method.
*
* If the State Variable belongs to the current actor, the value can be read
* immediately. In that case the consumer function is processed immediately
* and the get method returns after the consumer function has been completed.
*
* @param consume A function that consumes the value of the State Variable.
*/
protected[svaractor] def get[T : ClassTag ](stateVariable : StateParticle[T], at : Time = Now,
accessMethod : AccessMethod = GetClosest)( consume: T => Unit ) {
get(at, accessMethod, stateVariable)((x : ContentType[T]) => consume(x._1))
}
protected[svaractor] def get[T : ClassTag ](at : Time, accessMethod : AccessMethod, stateVariable : StateParticle[T])
( consume: ContentType[T] => Unit ) {
stateVariable match {
case convertedSVar : ConvertedSVar[_, T] =>
convertedSVar.get(at, accessMethod)(consume)
case sval : SValType[T] =>
sval.get(at, accessMethod)(consume)
case sVar : SVar[T] =>
if( owner( sVar ) isSameAs self )
consume ( read( sVar, at, accessMethod ) )
else {
owner( sVar ) ! ReadSVarMessage( sVar, at, accessMethod )
addSingleUseHandlerPF(new PartialFunction[ValueOfSVarMessage[T], Any@CPSRet] {
override def toString(): String = "ValueOfSVarMessage handler, looking for sVar with id " + sVar.id
override def isDefinedAt(x: ValueOfSVarMessage[T]): Boolean = x.sVar.id == sVar.id
override def apply(v1: ValueOfSVarMessage[T]): Any@CPSRet = consume(v1.value)
})
}
}
}
/**
* Calling this method will observe the given state variable. Every time the
* value of the State Variable gets changed the given handler messages gets
* called. The handler message is running in the actor that called the
* observe method.
*
* Only one handler can be registered at one time. If the method gets called
* again within the the same actor the old handler gets replaced.
*
* An actor can observe the own state variable.
*
* A change of the value is only be notified if the value really change. E.g.
* a State Variable contains the value 1 and a write operation with the value 1
* is performed, no observers will be notified, because the value has not changed.
*
* @param handler The handler, that gets called when the value of the State Variable has changed.
* @param ignoredWriters Value changes by SVarActors contained in this set are ignored.
*/
protected[svaractor] def observe[T](sVar : SVar[T], ignoredWriters: Set[SVarActor.Ref] = Set())(handler: ContentType[T] => Unit) =
sVar match {
case convertedSVar : ConvertedSVar[_, T] =>
convertedSVar.observe((v, t) => handler(v -> t), ignoredWriters)
case _ =>
if ( owner( sVar) isSameAs self )
addObserver( sVar, self, ignoredWriters )
else
owner( sVar ) ! ObserveSVarMessage( sVar, self, ignoredWriters )
addSVarObserveHandler(sVar)(handler)
}
/**
* This method returns the last known owner of the state variable.
*
*/
final protected[svaractor] def owner[T](sVar : SVar[T] ) : Owner =
sVar match {
case convertedSVar : ConvertedSVar[_,_] =>
owner( convertedSVar.wrappedSVar )
case mutableSVar : SVar[T] =>
getOrUpdateSVarOwner(sVar, mutableSVar.initialOwner)
case _ => NoOwner
}
private case class KnownOwner(owner : SVarActor.Ref) extends Owner(owner.tell){
def isSameAs(that: SVarActor.Ref): Boolean = owner equals that
}
private case object NoOwner extends Owner( (_, _) => {} ){
def isSameAs(x : SVarActor.Ref) = false
}
protected abstract class Owner( val tell : (Any, SVarActor.Ref) => Unit ){
def !(msg: Any)(implicit sender : SVarActor.Ref){ tell(msg, sender) }
def isSameAs(that : SVarActor.Ref) : Boolean
}
// Assign a new owner.
/**
* This method assigns a new owner to the state variable. It does not block
* and returns immediately. An owner change can be rejected by the current
* owner.
*
* @param newOwner The new owner of the State Variable. Must not be null.
*/
final protected[svaractor] def owner[T](sVar : SVar[T], newOwner: SVarActor.Ref) {
val currentOwner = owner( sVar )
if(currentOwner isSameAs self)
changeOwner(self, sVar, newOwner, bufferMode = data(sVar).getBufferSetting)
else
currentOwner ! ChangeOwnerOfSVarMessage( sVar, newOwner, data(sVar).getBufferSetting )
}
// Stop observing this SVar.
/**
* Calling this method stops observing the State Variable. The registred
* handler gets removed and the current actor is not informed about changes
* of the value any more.
*/
protected[svaractor] def ignore[T](sVar : SVar[T] ) {
sVar match {
case convertedSVar : ConvertedSVar[_,_] =>
ignore( convertedSVar.wrappedSVar )
case _ =>
owner( sVar ) ! IgnoreSVarMessage( sVar, self )
removeSVarObserveHandlers( sVar )
}
}
protected class ChangeOwnerStatus(val svar : SVar[_], val newOwner : SVarActor.Ref) {
private def clearQueueFor( actor : SVarActor.Ref )(implicit context : SVarActor) {
heldMessages.get( actor ) match {
case None =>
case Some(queue) =>
heldMessages -= actor
// forward messages if the current actor was the sender, return to sender otherwise
if (actor == context)
queue.foreach( newOwner ! _ )
else
actor ! HeldMessagesMessage( svar, newOwner, queue.toList)(self)
}
}
def apply( actor : SVarActor.Ref) : Option[mutable.Queue[SVarMessage]] =
heldMessages.get(actor)
def acknowledgeBy( actor : SVarActor.Ref ) =
if (isAccepted) clearQueueFor( actor ) else heldAcknowledges += actor
def pushMessage( msg : SVarMessage ) {
val queue = heldMessages.getOrElse(msg.sender, mutable.Queue[SVarMessage]())
queue += msg
heldMessages.update(msg.sender, queue)
}
def acceptChange() = {
changeAccepted = true
heldAcknowledges.foreach( a => clearQueueFor( a ) )
heldAcknowledges.clear()
acknowledgeBy(self)
}
def isAccepted = changeAccepted
private val heldMessages = mutable.Map[SVarActor.Ref, mutable.Queue[SVarMessage]]()
private val heldAcknowledges = mutable.Set[SVarActor.Ref]()
private var changeAccepted = false
}
//
//
// variable initialization
//
//
private val data = mutable.HashMap[SVar[_],SVarDataImpl[_]]()
//! the map which holds queues for each svar that is beeing transferred
private val heldMessages = mutable.WeakHashMap[SVar[_], ChangeOwnerStatus]()
//! the locally known owners
private val sVarOwners = mutable.WeakHashMap[SVar[_], SVarActor.Ref]()
protected val deadOwners = mutable.Set[SVarActor.Address]()
//!
protected var isRunning : Boolean = true
protected val sVarObserveHandlers = mutable.WeakHashMap[SVar[_], ContentType[Any] => Unit]()
//
//
// package private methods (at least most of them)
//
//
//! !!! DO NOT USE THIS FUNCTION BUT THE SVARS OWNER() FUNCTION UNLESS YOU KNOW EXACTLY WHAT YOU ARE DOING !!!
private[svaractor] def getLocallyKnownSVarOwner(svar: SVar[_]): Option[SVarActor.Ref] =
sVarOwners.get(svar)
private def getOrUpdateSVarOwner(svar: SVar[_], newOwner : SVarActor.Ref) : Owner = sVarOwners.get(svar) match{
case Some(owner) => KnownOwner(owner)
case None =>
if (deadOwners.contains(SVarActor.addressOf(newOwner))){
NoOwner
} else {
sVarOwners.update(svar, newOwner)
KnownOwner(newOwner)
}
}
private[svaractor] def addSVarObserveHandler[T](svar : SVar[T])( handler : ContentType[T] => Unit ) : java.util.UUID = {
sVarObserveHandlers.update(svar, handler.asInstanceOf[ContentType[Any] => Unit])
java.util.UUID.randomUUID()
}
private[svaractor] def removeSVarObserveHandlers(svar : SVar[_]) =
sVarObserveHandlers -= svar
private[svaractor] def addSVarOwner(svar: SVar[_], owner : SVarActor.Ref) {
sVarOwners += svar -> owner
}
private[svaractor] def changeOwner[T](sender : SVarActor.Ref,
svar : SVar[T],
newOwner : SVarActor.Ref,
value : Option[(SVal.SValType[T], Time)] = None,
bufferMode : BufferMode = TimedRingBuffer.defaultMode ) : Boolean = {
if (self == newOwner){
if (!this.isOwnerOf(svar)) value match {
case Some((_data, timeStamp)) => insertSVar(svar, _data, timeStamp, bufferMode)
case None => throw new Exception
}
sender ! AcceptSVarMessage(svar)
}
else if (isOwnerOf(svar)) heldMessages.get(svar) match {
case Some(changeOwnerStatus) if changeOwnerStatus.isAccepted => owner(svar, newOwner ) //!!! is this correct? don't we have to tell "ownerHasChanged"?
case Some(changeOwnerStatus) => changeOwnerStatus.pushMessage(ChangeOwnerOfSVarMessage( svar, newOwner, bufferMode))
case None =>
heldMessages(svar) = new ChangeOwnerStatus(svar, newOwner)
newOwner ! BunchOfSimXMessagesMessages(
OfferSVarMessage( getOriginal(svar), data(svar).readFull(Now, GetClosest), bufferMode ) :: moveObservers(svar, newOwner)
)
}
//if nothing matched, the svar was unknown, so we return false
else
return false
//if we got here, everything is ok, so we return true
true
}
protected def moveObservers[T](svar : SVar[T], newOwner : SVarActor.Ref) : List[SimXMessage] =
data.get( svar ) match {
case Some(_data) =>
(for ( (observer, ignoredWriters) <- _data.getObservers) yield
ObserveSVarMessage( svar, self, ignoredWriters)).toList
case None => Nil
}
protected def getSVarsObservedBy( observer : SVarActor.Ref ) =
data.filter( pair => pair._2.isObservedBy( observer) )
private def getObservedBy( addressEquals : SVarActor.Ref => Boolean ) =
data.filter( d => d._1.isMutable && d._2.getObservers.keys.exists( addressEquals ) ).map{
tuple => tuple._1.asInstanceOf[SVar[_]] -> tuple._2.getObservers.find( x => addressEquals(x._1) ).get._1
}
protected def getObserversOf( svar : SVar[_] ) = data.get(svar) match {
case Some(svardata) => svardata.getObservers
case None => Map[SVarActor.Ref, Set[SVarActor.Ref]]()
}
/**
* Creates a new SVar.
* The data of this SVar is stored.
* The weak reference that is stored with the data is used detrmine, if
* the data is not needed any more.
*/
protected[svaractor] def createSVar[T]( value: SVal.SValType[T], timeStamp : Time, bufferMode : BufferMode ) = {
val retVal = new SVarImpl(self, value.typedSemantics.classTag, value.typedSemantics.typeTag)
insertSVar(retVal, value, timeStamp, bufferMode)
retVal
}
private def insertSVar[T]( sVar: SVar[T], value: SValType[T], timeStamp : Time, bufferSetting : BufferMode ) {
if (sVar.isMutable){
val dataSVar = new SVarDataImpl( value, timeStamp, new WeakReference( sVar ), bufferSetting )
data += sVar -> dataSVar
addSVarOwner(sVar, self)
}
}
// private def removeSVar( svar : SVar[_] ) = {
// removeSVarOwner(svar)
// removeSVarData(svar)
// }
private def removeSVarData( sVar: SVar[_] ) =
data -= sVar
private[svaractor] final def write[T]( writer: SVarActor.Ref, sVar: SVar[T], value: T, at : Time, forceUpdate : Boolean) {
heldMessages.get(sVar) match {
case Some(changeOwnerStatus) => changeOwnerStatus.pushMessage( WriteSVarMessage( writer, sVar, value, at, forceUpdate) )
case None =>
if( forceUpdate || !read( sVar, at, GetClosest )._1.equals( value ) )
data( sVar ).write( writer, value, at )
}
}
private[svaractor] def read[T]( sVar: SVar[T], at : Time, accessMethod : AccessMethod ) : ContentType[T] = {
data( sVar ).read(at, accessMethod).asInstanceOf[ContentType[T]]
}
private[svaractor] final def addObserver( sVar: SVar[_], a: SVarActor.Ref, ignoredWriters: Set[SVarActor.Ref] ) {
heldMessages.get(sVar) match {
case Some(changeOwnerStatus) => changeOwnerStatus.pushMessage( ObserveSVarMessage( sVar, self, ignoredWriters))
case None => internalAddObserver( sVar, a, ignoredWriters )
}
}
protected def internalAddObserver( sVar: SVar[_], a: SVarActor.Ref, ignoredWriters: Set[SVarActor.Ref] ) {
data( sVar ).addObserver( a, ignoredWriters )
}
//
//
// private methods
//
//
protected final def removeObserver( sVar: SVar[_], a: SVarActor.Ref ) {
heldMessages.get(sVar) match {
case Some(changeOwnerStatus) => changeOwnerStatus.pushMessage( IgnoreSVarMessage( sVar, self) )
case None => if (data contains sVar) internalRemoveObserver(sVar, a)
}
}
protected def internalRemoveObserver(sVar: SVar[_], a: SVarActor.Ref ) {
data( sVar ).removeObserver( a )
}
private def getOriginal[T](svar : SVar[T]) : SVar[T] = data.get(svar) match {
case Some(_data) => _data.svar.get match {
case Some(refToOrig) => refToOrig.asInstanceOf[SVar[T]]
case _ => throw InvalidWeakRefException
}
case None => throw NotSVarOwnerException
}
private def isOwnerChangeInProgress( svar : SVar[_] ) : Boolean =
heldMessages.contains(svar)
private def isOwnerOf(svar : SVar[_]) : Boolean =
data.contains(svar)
private def updateSVarOwner(svar : SVar[_], newOwner : SVarActor.Ref) {
sVarOwners.update(svar, newOwner)
}
private def handleOwnerDependentMsg[T <: SVarHoldingMessage[_]]( handler : T => Unit )( msg : T ) {
if ( isOwnerChangeInProgress(msg.sVar) )
changeOwnerInProgessHandler( msg )
else if ( isOwnerOf(msg.sVar) )
handler( msg )
else owner(msg.sVar) match {
case KnownOwner(owner) => msg.sender ! SVarOwnerChangedMessage( msg.sVar, owner, msg )
case _ =>
}
}
private def createValueOfSVarMsg[T](sVar : SVar[T], at : Time, accessMethod : AccessMethod) : ValueOfSVarMessage[T] =
ValueOfSVarMessage( sVar, read(sVar, at, accessMethod))
//
//
// public methods
//
//
/**
*
*/
def shutdown() {
//isRunning = false
//TODO: implement this
context.stop(self)
}
final protected def handleMessage : PartialFunction[Any, Any] =
handlersAsPF orElse { case msg => handlersupport.UnhandledMessage(msg, "no matching handler found") }
//------------------------------------//
// //
// handler definition section //
// //
//------------------------------------//
private def changeOwnerInProgessHandler( msg : SVarHoldingMessage[_]) {
heldMessages.get(msg.sVar) match {
case None => throw OwnerChangeNotInProgressException
case Some(changeOwnerStatus) =>
if (changeOwnerStatus(msg.sender).isEmpty)
msg.sender ! SVarOwnerChangeInProgressMessage( msg.sVar, changeOwnerStatus.newOwner)
changeOwnerStatus.pushMessage(msg)
heldMessages.update(msg.sVar, changeOwnerStatus)
}
}
addHandler[OfferSVarMessage[Any]]{
msg => changeOwner( msg.sender, msg.sVar, self, Some( msg.value ), msg.bufferMode )
}
addHandler[Shutdown]{
msg => shutdown()
}
addHandler[SVarOwnerChangeInProgressMessage[_]]{ msg =>
if(heldMessages.get(msg.sVar).isEmpty)
heldMessages(msg.sVar) = new ChangeOwnerStatus(msg.sVar, msg.newOwner)
msg.sender ! AcknowledgeMessage( SVarOwnerChangeInProgressMessage( msg.sVar, msg.newOwner))
}
addHandler[AcknowledgeMessage]{ msg =>
Match(msg.refMessage){
case SVarOwnerChangeInProgressMessage( svar, newOwner) => heldMessages.get(svar) match {
case Some(changeOwnerStatus) => changeOwnerStatus.acknowledgeBy( msg.sender )
case None =>
}
}
}
addHandler[AcceptSVarMessage[_]]{ msg =>
updateSVarOwner(msg.sVar, msg.sender)
removeSVarData(msg.sVar)
heldMessages.get(msg.sVar) match {
case Some(changeOwnerStatus) => changeOwnerStatus.acceptChange()
case None =>
}
}
addHandler[HeldMessagesMessage[_]]{ msg =>
updateSVarOwner(msg.sVar, msg.newOwner)
msg.msgs.foreach( applyHandlers( _ ) )
//Handle msgs stored after the acknowledge was sent
heldMessages.get(msg.sVar) collect {
case changeOwnerStatus =>
heldMessages.remove( msg.sVar )
changeOwnerStatus(self).collect {
case queue => queue.foreach( applyHandlers( _ ) )
}
}
}
addHandler[ChangeOwnerOfSVarMessage[_]]{ msg =>
if (isOwnerChangeInProgress(msg.sVar))
changeOwnerInProgessHandler( msg )
else {
val successful = changeOwner(msg.sender, msg.sVar, msg.newOwner, bufferMode = msg.bufferMode)
if (! successful) getLocallyKnownSVarOwner(msg.sVar) match {
case Some(owner) => msg.sender ! SVarOwnerChangedMessage( msg.sVar, owner, msg)
case _ => msg.sender ! UnknownSVarMessage(msg.sVar, msg)
}
}
}
addHandler[CreateSVarMessage[Any]] { msg =>
msg.sender ! SVarCreatedMessage( SVarImpl( msg.value, msg.timeStamp ), msg )
}
addHandler[ReadSVarMessage[_]]{
handleOwnerDependentMsg{ msg : ReadSVarMessage[_] => msg.sender ! createValueOfSVarMsg( msg.sVar, msg.at, msg.accessMethod ) }
}
addHandler[WriteSVarMessage[Any]]{
handleOwnerDependentMsg( (msg : WriteSVarMessage[Any]) => write( msg.writer, msg.sVar, msg.value, msg.at, msg.forceUpdate ) )
}
addHandler[ObserveSVarMessage[_]]{
handleOwnerDependentMsg( (msg : ObserveSVarMessage[_]) => addObserver(msg.sVar, msg.observer, msg.ignoredWriters) )
}
addHandler[IgnoreSVarMessage[_]]{
handleOwnerDependentMsg( (msg : IgnoreSVarMessage[_]) => removeObserver(msg.sVar, msg.observer) )
}
addHandler[NotifyWriteSVarMessage[_]] {
handleNotifyWrite
}
private[svaractor] def handleNotifyWrite(msg : NotifyWriteSVarMessage[_]){
sVarObserveHandlers get msg.sVar collect { case handler => handler( msg.value ) }
}
addHandler[SVarOwnerChangedMessage[_]] { msg =>
addSVarOwner(msg.sVar, msg.newOwner)
msg.newOwner ! msg.originalMessage
}
//multimessage related functions
addHandler[BunchOfSimXMessagesMessages]{
handleBunchOfMessages
}
addHandler[AtomicUpdate]{
handleAtomicUpdate
}
addHandler[AtomicSet]{
handleAtomicSet
}
private[svaractor] def handleAtomicUpdate(msg : AtomicUpdate){
handleBunchOfMessages(BunchOfSimXMessagesMessages(msg.msgs))
}
private[svaractor] def handleAtomicSet(msg : AtomicSet){
handleBunchOfMessages(BunchOfSimXMessagesMessages(msg.msgs))
}
private def handleBunchOfMessages(msg : BunchOfSimXMessagesMessages){
msg.msgs.foreach( applyHandlers )
}
addHandler[DisassociatedEvent]{ msg =>
def sameAsDisassociated(ref : SVarActor.Ref) = SVarActor.addressOf(ref) == msg.remoteAddress
getObservedBy(sameAsDisassociated).foreach( tuple => removeObserver(tuple._1, tuple._2) )
val toRemove = sVarOwners.filter{ tuple => sameAsDisassociated(tuple._2) }.keys
deadOwners += msg.remoteAddress
sVarOwners --= toRemove
}
protected def addJobIn( in : Long )( job : => Unit ) {
addJobAt(System.currentTimeMillis() + in)(job)
}
private case class JobRequest(at : Long, job : () => Unit )
addHandler[JobRequest]{
request => addJobAt(request.at)(request.job())
}
def requestJobIn(in : Long)( job : => Unit) : Unit ={
self ! JobRequest(System.currentTimeMillis() + in, () => job)
}
}
trait SVarFunctions{
protected[svaractor] def set[T](sVar : SVar[T], value: T, at : Time) : Boolean
protected[svaractor] def get[T : ClassTag /*: TypeTag*/]( at : Time, accessMethod : AccessMethod, sVar : StateParticle[T])(consume: ContentType[T] => Unit)
protected[svaractor] def observe[T](sVar : SVar[T])( handler: ContentType[T] => Unit) : java.util.UUID = observe(sVar, Set[SVarActor.Ref]())(handler)
protected[svaractor] def observe[T](sVar : SVar[T], ignoredWriters: Set[SVarActor.Ref] = Set())(handler: ContentType[T] => Unit) : java.util.UUID
protected[svaractor] def ignore[T](sVar : SVar[T] )
protected[svaractor] def owner[T](sVar : SVar[T], newOwner: SVarActor.Ref)
// protected[svaractor] def owner[T](sVar : SVar[T] ) : SVarActor.Ref
} | simulator-x/core | src/simx/core/svaractor/SVarActor.scala | Scala | apache-2.0 | 30,318 |
/*
* Odessa State environmental University
* Copyright (C) 2014
*/
package ua.edu.odeku.ceem.mapRadar.utils.thread
/**
* Типаж прендназначен для передачи сообщения о том, что необходимо остановится
* Created by Aleo on 23.03.14.
*/
trait StopProcess {
var stopProcess = false
}
| aleo72/ww-ceem-radar | src/main/java/ua/edu/odeku/ceem/mapRadar/utils/thread/StopProcess.scala | Scala | apache-2.0 | 353 |
/*
* PipeOperatorSupport.scala
*
* Copyright (c) 2013 Lonnie Pryor III
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fulcrum.util
import fulcrum.code.{ Code, Macro }
import language.experimental.macros
import reflect.macros.Context
/**
* A view that enables the pipe operator `|>` on any value with no extra overhead.
*/
final class PipeOperatorSupport[I](val input: I) extends AnyVal {
import PipeOperatorSupportMacros._
/** Applies the specified function to the input value. */
def |>[O](function: I => O): O = macro PipeOperatorSupportCls.pipe[I, O]
}
/**
* Definitions of the package macro logic.
*/
object PipeOperatorSupportMacros {
/**
* Implementations of the pipe operator support macro logic.
*/
trait PipeOperatorSupportCls extends Macro with Code {
import c.universe._
/** Generates the pipe expansion. */
def pipe[I: WeakTypeTag, O: WeakTypeTag](function: Expr[I => O]): Expr[O] =
$expr(function) $apply reify($prefix[PipeOperatorSupport[I]].splice.input)
}
/**
* Expansions of the pipe operator support macro logic.
*/
object PipeOperatorSupportCls {
/** Transforms the non-empty option's value. */
def pipe[I: c.WeakTypeTag, O: c.WeakTypeTag](c: Context)(function: c.Expr[I => O]): c.Expr[O] =
new Macro.Bundle[c.type](c) with PipeOperatorSupportCls {}.pipe[I, O](function)
}
} | lpryor/fulcrum-spike | util/src/main/scala/fulcrum/util/PipeOperatorSupport.scala | Scala | apache-2.0 | 1,902 |
package org.jetbrains.plugins.scala
package console
import java.io.{IOException, OutputStream}
import com.intellij.openapi.actionSystem.{AnAction, AnActionEvent, CommonDataKeys}
import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.editor.ex.EditorEx
import com.intellij.openapi.util.TextRange
/**
* @author Ksenia.Sautina
* @since 9/18/12
*/
class ScalaConsoleExecuteAction extends AnAction {
override def update(e: AnActionEvent) {
val editor = e.getData(CommonDataKeys.EDITOR)
if (editor == null || !editor.isInstanceOf[EditorEx]) {
e.getPresentation.setEnabled(false)
return
}
val console = ScalaConsoleInfo.getConsole(editor)
if (console == null) {
e.getPresentation.setEnabled(false)
return
}
val isEnabled: Boolean = !editor.asInstanceOf[EditorEx].isRendererMode &&
!ScalaConsoleInfo.getProcessHandler(editor).isProcessTerminated
e.getPresentation.setEnabled(isEnabled)
}
def actionPerformed(e: AnActionEvent) {
val editor = e.getData(CommonDataKeys.EDITOR)
if (editor == null) {
return
}
val console = ScalaConsoleInfo.getConsole(editor)
val processHandler = ScalaConsoleInfo.getProcessHandler(editor)
val model = ScalaConsoleInfo.getController(editor)
if (editor != null && console != null && processHandler != null && model != null) {
val document = console.getEditorDocument
val text = document.getText
// Process input and add to history
extensions.inWriteAction {
val range: TextRange = new TextRange(0, document.getTextLength)
editor.getSelectionModel.setSelection(range.getStartOffset, range.getEndOffset)
console.addToHistory(range, console.getConsoleEditor, true)
model.addToHistory(text)
editor.getCaretModel.moveToOffset(0)
editor.getDocument.setText("")
}
text.split('\\n').foreach(line => {
if (line != "") {
val outputStream: OutputStream = processHandler.getProcessInput
try {
val bytes: Array[Byte] = (line + "\\n").getBytes
outputStream.write(bytes)
outputStream.flush()
}
catch {
case e: IOException => //ignore
}
}
console.textSent(line + "\\n")
})
} else {
ScalaConsoleExecuteAction.LOG.info(new Throwable(s"Enter action in console failed: $editor, " +
s"$console"))
}
}
}
object ScalaConsoleExecuteAction {
private val LOG = Logger.getInstance(this.getClass)
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/console/ScalaConsoleExecuteAction.scala | Scala | apache-2.0 | 2,560 |
package io.github.mandar2812.dynaml.models.svm
import breeze.linalg.{DenseMatrix, norm, DenseVector}
import breeze.numerics.sqrt
import org.apache.log4j.Logger
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.rdd.RDD
import io.github.mandar2812.dynaml.models.KernelizedModel
import io.github.mandar2812.dynaml.kernels.{SVMKernel, GaussianDensityKernel}
import io.github.mandar2812.dynaml.prototype.{QuadraticRenyiEntropy, GreedyEntropySelector}
import org.apache.spark.storage.StorageLevel
import scala.util.Random
/**
* Implementation of the Fixed Size
* Kernel based LS SVM
*
* Fixed Size implies that the model
* chooses a subset of the original
* data to calculate a low rank approximation
* to the kernel matrix.
*
* Feature Extraction is done in the primal
* space using the Nystrom approximation.
*
* @author mandar2812
*/
abstract class KernelSparkModel(data: RDD[LabeledPoint], task: String)
extends KernelizedModel[RDD[(Long, LabeledPoint)], RDD[LabeledPoint],
DenseVector[Double], DenseVector[Double], Double, Int, Int](task)
with Serializable {
override protected val g = LSSVMSparkModel.indexedRDD(data)
protected var processed_g = g
val colStats = Statistics.colStats(g.map(_._2.features))
override protected val nPoints: Long = colStats.count
override protected var hyper_parameters: List[String] = List("RegParam")
override protected var current_state: Map[String, Double] = Map("RegParam" -> 1.0)
protected var featuredims: Int = g.first()._2.features.size
protected var effectivedims: Int = featuredims + 1
protected var prototypes: List[DenseVector[Double]] = List()
val logger = Logger.getLogger(this.getClass)
override def getXYEdges: RDD[LabeledPoint] = data
def getRegParam: Double
def setRegParam(l: Double): this.type
def setMiniBatchFraction(f: Double): this.type = {
assert(f <= 1.0 && f >= 0.0, "Mini Batch Fraction must be between 0 and 1.0")
this.optimizer.setMiniBatchFraction(f)
this
}
override def initParams() = DenseVector.ones[Double](effectivedims)
override def optimumSubset(M: Int): Unit = {
points = (0L to this.npoints - 1).toList
if (M < this.npoints) {
logger.info("Calculating sample variance of the data set")
//Get the original features of the data
//Calculate the column means and variances
val (mean, variance) = (DenseVector(colStats.mean.toArray),
DenseVector(colStats.variance.toArray))
//Use the adjusted value of the variance
val adjvarance: DenseVector[Double] = variance :/= (npoints.toDouble - 1)
val density = new GaussianDensityKernel
logger.info("Using Silvermans rule of thumb to set bandwidth of density kernel")
logger.info("Std Deviation of the data: " + adjvarance.toString())
logger.info("norm: " + norm(adjvarance))
density.setBandwidth(DenseVector.tabulate[Double](featuredims) {
i => 1.06 * math.sqrt(adjvarance(i)) / math.pow(npoints, 0.2)
})
logger.info("Building low rank approximation to kernel matrix")
prototypes = GreedyEntropySelector.subsetSelectionQRE(this.g,
new QuadraticRenyiEntropy(density), M, 25, 0.0001)
}
}
override def applyKernel(kernel: SVMKernel[DenseMatrix[Double]],
M: Int = math.sqrt(npoints).toInt): Unit = {
if(M != this.prototypes.length) {
this.optimumSubset(M)
}
if(this.processed_g.first()._2.features.size > featuredims) {
this.processed_g.unpersist()
}
val (mean, variance) = (DenseVector(colStats.mean.toArray),
DenseVector(colStats.variance.toArray))
val scalingFunc = KernelSparkModel.scalePrototype(mean, variance) _
val scaledPrototypes = prototypes map scalingFunc
val kernelMatrix =
kernel.buildKernelMatrix(scaledPrototypes, M)
val decomposition = kernelMatrix.eigenDecomposition(M)
var selectedEigenVectors: List[DenseMatrix[Double]] = List()
var selectedEigenvalues: List[Double] = List()
(0 until M).foreach((p) => {
//Check the Girolami criterion
// (1.u)^2 >= 2M/(1+M)
//This increases parsimony
val u = decomposition._2(::, p)
if(math.pow(norm(u,1), 2.0) >= 2.0*M/(1.0+M.toDouble)) {
selectedEigenvalues :+= decomposition._1(p)
selectedEigenVectors :+= u.toDenseMatrix
}
})
logger.info("Selected Components: "+selectedEigenvalues.length)
effectivedims = selectedEigenvalues.length + 1
val decomp = (DenseVector(selectedEigenvalues.toArray),
DenseMatrix.vertcat(selectedEigenVectors:_*).t)
this.featureMap = kernel.featureMapping(decomp)(scaledPrototypes)
this.params = DenseVector.ones[Double](effectivedims)
}
override def applyFeatureMap: Unit = {
val meanb = g.context.broadcast(DenseVector(colStats.mean.toArray))
val varianceb = g.context.broadcast(DenseVector(colStats.variance.toArray))
val featureMapb = g.context.broadcast(featureMap)
this.processed_g = g.map((point) => {
val vec = DenseVector(point._2.features.toArray)
val ans = vec - meanb.value
ans :/= sqrt(varianceb.value)
(point._1, new LabeledPoint(
point._2.label,
Vectors.dense(DenseVector.vertcat(
featureMapb.value(ans),
DenseVector(1.0))
.toArray)
))
}).cache()
}
override def trainTest(test: List[Long]) = {
val training_data = this.processed_g.filter((keyValue) =>
!test.contains(keyValue._1)).map(_._2)
val test_data = this.processed_g.filter((keyValue) =>
test.contains(keyValue._1)).map(_._2)
training_data.persist(StorageLevel.MEMORY_AND_DISK)
test_data.persist(StorageLevel.MEMORY_AND_DISK)
(training_data, test_data)
}
}
object KernelSparkModel {
def scalePrototype(mean: DenseVector[Double],
variance: DenseVector[Double])
(prototype: DenseVector[Double]): DenseVector[Double] =
(prototype - mean)/sqrt(variance)
}
| twitwi/DynaML | src/main/scala/io/github/mandar2812/dynaml/models/svm/KernelSparkModel.scala | Scala | apache-2.0 | 6,130 |
/*
* Copyright 2017-2020 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package higherkindness.mu.rpc.benchmarks
import java.util.concurrent.TimeUnit
import cats.effect.IO
import higherkindness.mu.rpc.benchmarks.shared.Utils._
import higherkindness.mu.rpc.benchmarks.shared.models._
import higherkindness.mu.rpc.benchmarks.shared.protocols.PersonServicePB
import higherkindness.mu.rpc.benchmarks.shared._
import higherkindness.mu.rpc.protocol.Empty
import org.openjdk.jmh.annotations._
@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class ProtoBenchmark extends ServerRuntime {
lazy val client = PersonServicePB.unsafeClientFromChannel[IO](clientChannel)
@Setup
def setup(): Unit = startServer
@TearDown
def shutdown(): Unit = tearDown
@Benchmark
def listPersons: PersonList =
client.listPersons(Empty).unsafeRunTimed(defaultTimeOut).get
@Benchmark
def getPerson: Person =
client.getPerson(PersonId("1")).unsafeRunTimed(defaultTimeOut).get
@Benchmark
def getPersonLinks: PersonLinkList =
client.getPersonLinks(PersonId("1")).unsafeRunTimed(defaultTimeOut).get
@Benchmark
def createPerson: Person =
client.createPerson(person).unsafeRunTimed(defaultTimeOut).get
@Benchmark
def programComposition: PersonAggregation =
(for {
personList <- client.listPersons(Empty)
p1 <- client.getPerson(PersonId("1"))
p2 <- client.getPerson(PersonId("2"))
p3 <- client.getPerson(PersonId("3"))
p4 <- client.getPerson(PersonId("4"))
p1Links <- client.getPersonLinks(PersonId(p1.id))
p3Links <- client.getPersonLinks(PersonId(p3.id))
pNew <- client.createPerson(person)
} yield (p1, p2, p3, p4, p1Links, p3Links, personList.add(pNew)))
.unsafeRunTimed(defaultTimeOut)
.get
}
| frees-io/freestyle-rpc | benchmarks/shared/src/main/scala/higherkindness/mu/rpc/benchmarks/ProtoBenchmark.scala | Scala | apache-2.0 | 2,430 |
package views.html
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import play.api.i18n._
import play.api.mvc._
import play.api.data._
import views.html._
/* adminHome Template File */
object adminHome extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template1[RequestHeader,play.api.templates.HtmlFormat.Appendable] {
/* adminHome Template File */
def apply/*2.2*/()(implicit request: RequestHeader):play.api.templates.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*2.37*/("""
"""),_display_(/*3.2*/main("Admin Home")/*3.20*/{_display_(Seq[Any](format.raw/*3.21*/("""
<div class="well">
<h1>Admin Home</h1>
<div class="alert alert-success">
<h3><a href="/addDevice">Add Device</a></h3>
<h3><a href="/removeDevice">Remove Device</a></h3>
</div>
</div>
<div class="alert alert-success">
<div id="messages"></div>
</div>
<script type="text/javascript">
$(function()"""),format.raw/*16.15*/("""{"""),format.raw/*16.16*/("""
var ws = new WebSocket(""""),_display_(/*17.29*/routes/*17.35*/.Application.events().webSocketURL()),format.raw/*17.71*/("""");
ws.onmessage = function(msg)"""),format.raw/*18.32*/("""{"""),format.raw/*18.33*/("""
$('<h4 />').text(msg.data).appendTo('#messages');
"""),format.raw/*20.4*/("""}"""),format.raw/*20.5*/("""
"""),format.raw/*21.3*/("""}"""),format.raw/*21.4*/("""
)
</script>
""")))}))}
}
def render(request:RequestHeader): play.api.templates.HtmlFormat.Appendable = apply()(request)
def f:(() => (RequestHeader) => play.api.templates.HtmlFormat.Appendable) = () => (request) => apply()(request)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Tue Jul 01 16:19:28 IST 2014
SOURCE: /home/nagarjuna/FooService/app/views/adminHome.scala.html
HASH: d997d8c98d7cbb05889dd05f035d6e063a70699e
MATRIX: 617->31|746->66|773->68|799->86|837->87|1184->406|1213->407|1269->436|1284->442|1341->478|1404->513|1433->514|1518->572|1546->573|1576->576|1604->577
LINES: 19->2|22->2|23->3|23->3|23->3|36->16|36->16|37->17|37->17|37->17|38->18|38->18|40->20|40->20|41->21|41->21
-- GENERATED --
*/
| pamu/FooService | FooService2/target/scala-2.10/src_managed/main/views/html/adminHome.template.scala | Scala | apache-2.0 | 2,546 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.driver.cube
import java.sql.{Date, Timestamp}
import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.driver.factory.SparkContextFactory
import com.stratio.sparta.driver.trigger.TriggerWriter
import com.stratio.sparta.sdk._
import org.apache.spark.sql._
import org.apache.spark.streaming.dstream.DStream
import scala.util.{Failure, Success, Try}
case class CubeWriterOptions(outputs: Seq[String],
dateType: TypeOp.Value = TypeOp.Timestamp,
fixedMeasures: MeasuresValues = MeasuresValues(Map.empty),
isAutoCalculatedId: Boolean = false)
case class CubeWriter(cube: Cube,
tableSchema: TableSchema,
options: CubeWriterOptions,
outputs: Seq[Output],
triggerOutputs: Seq[Output],
triggerSchemas: Seq[TableSchema])
extends TriggerWriter with SLF4JLogging {
val upsertOptions = tableSchema.timeDimension.fold(Map.empty[String, String]) { timeName =>
Map(Output.TimeDimensionKey -> timeName)
} ++ Map(Output.TableNameKey -> tableSchema.tableName,
Output.IdAutoCalculatedKey -> tableSchema.isAutoCalculatedId.toString)
def write(stream: DStream[(DimensionValuesTime, MeasuresValues)]): Unit = {
stream.map { case (dimensionValuesTime, measuresValues) =>
toRow(dimensionValuesTime, measuresValues)
}.foreachRDD(rdd => {
if (rdd.take(1).length > 0) {
val sqlContext = SparkContextFactory.sparkSqlContextInstance
val cubeDataFrame = sqlContext.createDataFrame(rdd, tableSchema.schema)
options.outputs.foreach(outputName =>
outputs.find(output => output.name == outputName) match {
case Some(outputWriter) => Try(outputWriter.upsert(cubeDataFrame, upsertOptions)) match {
case Success(_) =>
log.debug(s"Data stored in ${tableSchema.tableName}")
case Failure(e) =>
log.error(s"Something goes wrong. Table: ${tableSchema.tableName}")
log.error(s"Schema. ${cubeDataFrame.schema}")
log.error(s"Head element. ${cubeDataFrame.head}")
log.error(s"Error message : ", e)
}
case None => log.error(s"The output in the cube : $outputName not match in the outputs")
})
writeTriggers(cubeDataFrame, cube.triggers, tableSchema.tableName, triggerSchemas, triggerOutputs)
} else log.debug("Empty event received")
})
}
def toRow(dimensionValuesT: DimensionValuesTime, measures: MeasuresValues): Row = {
val measuresSorted = measuresValuesSorted(measures.values ++ options.fixedMeasures.values)
val rowValues = dimensionValuesT.timeConfig match {
case None =>
val dimensionValues = dimensionValuesWithId(dimensionsValuesSorted(dimensionValuesT.dimensionValues))
dimensionValues ++ measuresSorted
case Some(timeConfig) =>
val timeValue = Seq(timeFromDateType(timeConfig.eventTime, options.dateType))
val dimFilteredByTime = filterDimensionsByTime(dimensionValuesT.dimensionValues, timeConfig.timeDimension)
val dimensionValues = dimensionValuesWithId(dimensionsValuesSorted(dimFilteredByTime) ++ timeValue)
val measuresValuesWithTime = measuresSorted
dimensionValues ++ measuresValuesWithTime
}
Row.fromSeq(rowValues)
}
private def dimensionsValuesSorted(dimensionValues: Seq[DimensionValue]): Seq[Any] =
dimensionValues.sorted.map(dimVal => dimVal.value)
private def measuresValuesSorted(measures: Map[String, Option[Any]]): Seq[Any] =
measures.toSeq.sortWith(_._1 < _._1).map(measure => measure._2.getOrElse(null))
private def dimensionValuesWithId(values: Seq[Any]): Seq[Any] =
if (options.isAutoCalculatedId) Seq(values.mkString(Output.Separator)) ++ values
else values
private def filterDimensionsByTime(dimensionValues: Seq[DimensionValue], timeDimension: String): Seq[DimensionValue] =
dimensionValues.filter(dimensionValue => dimensionValue.dimension.name != timeDimension)
private def timeFromDateType[T](time: Long, dateType: TypeOp.Value): Any = {
dateType match {
case TypeOp.Date | TypeOp.DateTime => new Date(time)
case TypeOp.Long => time
case TypeOp.Timestamp => new Timestamp(time)
case _ => time.toString
}
}
}
object CubeWriter {
final val FixedMeasureSeparator = ":"
final val DefaultIsAutocalculatedId = false
} | danielcsant/sparta | driver/src/main/scala/com/stratio/sparta/driver/cube/CubeWriter.scala | Scala | apache-2.0 | 5,171 |
/*
* Copyright 2013-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.render
import laika.tree.Elements._
/** API for renderers that produce character output.
*
* @param out the function to use for writing character data
* @param render the function to use for rendering child elements
* @param root the root element to render
* @param indentItem the string to write for a single level of indentation
* @param newLine the new line character sequence
*
* @author Jens Halm
*/
class TextWriter (out: String => Unit,
render: Element => Unit,
root: Element,
indentItem: String = " ",
newLine: String = "\\n") {
protected object Indent {
var current = newLine
def >>> = { current += indentItem; this }
def <<< = { current = current.dropRight(indentItem.length); this }
def write = { out(current); this }
def indented (indent: Int, block: => Unit): Unit = {
if (indent > (current.length - 1) && indentItem.nonEmpty) {
val oldIndent = current
current = "\\n" + (" " * indent)
block
current = oldIndent
}
else block
}
}
private var elementStack: List[Element] = List(root)
protected def parents: List[Element] = elementStack.tail
private def renderElement (element: Element): Unit = {
elementStack = element :: elementStack
render(element)
elementStack = elementStack.tail
}
/** Executes the specified block while temporarily
* shifting the indentation level (if it is greater
* than the currently active one).
*/
def indented (indent: Int)(block: => Unit): Unit = Indent.indented(indent, block)
/** Writes a new line character sequence.
*/
def <| : this.type = { Indent.write; this }
/** Writes the specified string to the output, on the same line.
*/
def << (str: String): this.type = { out(str); this }
/** Writes the specified elements to the output,
* all on the same line.
*/
def << (elements: Seq[Element]): this.type = { elements.foreach(renderElement); this }
/** Writes the specified element to the output,
* on the same line.
*/
def << (element: Element): this.type = { renderElement(element); this }
/** Writes the specified string to the output,
* on a new line using the current level of indentation.
*/
def <<| (str: String): this.type = { Indent.write; out(str); this }
/** Writes the specified elements to the output,
* each of them on a new line using the current level of indentation.
*/
def <<| (elements: Seq[Element]): this.type = { elements.foreach(e => { Indent.write; renderElement(e) }); this }
/** Writes the specified element to the output,
* on a new line using the current level of indentation.
*/
def <<| (element: Element): this.type = { Indent.write; renderElement(element); this }
/** Writes the specified string to the output,
* on a new line and increasing indentation one level to the right.
*/
def <<|> (str: String): this.type = {
Indent.>>>.write
out(str)
Indent.<<<.write
this
}
/** Writes the specified elements to the output,
* each of them on a new line with the indentation increased one level to the right.
*/
def <<|> (elements: Seq[Element]): this.type = {
Indent.>>>
elements.foreach { e =>
Indent.write
renderElement(e)
}
Indent.<<<
this
}
/** Writes the specified element to the output,
* on a new line and increasing indentation one level to the right.
*/
def <<|> (element: Element): this.type = {
Indent.>>>.write
renderElement(element)
Indent.<<<.write
this
}
}
| amuramatsu/Laika | core/src/main/scala/laika/render/TextWriter.scala | Scala | apache-2.0 | 4,362 |
package com.mesosphere.universe
// index.json schema for each package from Universe
case class UniverseIndexEntry(
name: String,
currentVersion: PackageDetailsVersion,
versions: Map[PackageDetailsVersion, ReleaseVersion],
description: String,
framework: Boolean = false,
tags: List[String], //TODO: pattern: "^[^\\\\s]+$"
selected: Option[Boolean] = None
)
| movicha/cosmos | cosmos-model/src/main/scala/com/mesosphere/universe/UniverseIndexEntry.scala | Scala | apache-2.0 | 373 |
trait K
trait L
object O {
type LK = K with L
val A: LK = new K with L
val B: LK = new K with L
}
object Test extends App {
val scrut: O.LK = O.B
scrut match {
case O.A => ???
case O.B => // spurious unreachable
}
}
| som-snytt/dotty | tests/run/t8611a.scala | Scala | apache-2.0 | 238 |
package org.bigsolr.spark
import org.bigsolr.hadoop.SolrInputFormat
import org.bigsolr.hadoop.SolrRecord
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.conf.Configuration
import scala.collection.JavaConversions._
import scala.language.existentials
import org.apache.spark.sql._
import org.apache.spark.sql.sources.{TableScan, PrunedFilteredScan, BaseRelation, Filter}
case class SolrRelation(
query: String,
serverUrl: String,
serverMode: String,
collection: String,
fields: String
)(@transient val sqlContext: SQLContext) extends PrunedFilteredScan {
val schema = {
StructType(fields.split(",").map(fieldName => StructField(fieldName, StringType, true)))
}
override def buildScan(requiredColumns: Array[String], filters: Array[Filter]) = {
// Build the job configuration
var conf = new Configuration()
conf.set("solr.query", query)
conf.set("solr.server.url", serverUrl)
conf.set("solr.server.mode", serverMode)
conf.set("solr.server.collection", collection)
conf.set("solr.server.fields", fields)
val rdds = sqlContext.sparkContext.newAPIHadoopRDD(
conf,
classOf[SolrInputFormat],
classOf[NullWritable],
classOf[SolrRecord]
)
rdds.map {
case (key, value) => {
val row = scala.collection.mutable.ListBuffer.empty[String]
requiredColumns.foreach{field =>
row += value.getFieldValues(field).toString()
}
Row.fromSeq(row)
}
}
}
} | mrt/bigsolr | src/main/scala/org/bigsolr/spark/SolrRelation.scala | Scala | apache-2.0 | 1,617 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
package stream
package ochan
package immutable
import parsing._
/**
* Parse a ichan continuously
*
*/
case class ParserT[A, B: Message](
val complexity: Int,
val reset: Parser[B, A],
val parser: Parser[B, A])
extends Transformer[A, B] { outer =>
final def apply(ochan: OChan[A]): OChan[B] = new OChan[B] {
def close(signal: Signal) =
ochan.close(signal)
def write(t: UThread, seg: Seg[B], sigOpt: Option[Signal], k: OChan[B] => Unit): Unit = {
sigOpt match {
case sig @ Some(signal) =>
val (bs, failure) = parseFinish(reset, parser, seg, signal)
failure match {
case sig @ Some(fail) =>
ochan.write(t, bs, sig, utils.NOOP)
k(OChan(fail))
case _ =>
ochan.write(t, bs, sig, utils.NOOP)
k(OChan(signal))
}
case _ =>
if (seg.isEmpty)
k(this)
else
parse(parser, seg, t, k)
}
}
// invariant: tail never nil
private[this] final def parse(parser: Parser[B, A], bs: Seg[B], t: UThread, k: OChan[B] => Unit): Unit = {
val (as, result) = parsePartial(reset, parser, bs)
result match {
case None =>
ochan.write(t, as, None, ochan => k(ochan.add(new ParserT(complexity, reset, reset))))
case Some(Right(parser)) =>
ochan.write(t, as, None, ochan => k(ochan.add(new ParserT(complexity, reset, parser))))
case Some(Left((fail, rem))) =>
rem.poison(fail)
ochan.close(fail)
k(OChan(fail))
}
}
def add[C: Message](transformer: Transformer[B, C]): OChan[C] =
transformer(this)
}
}
| molecule-labs/molecule | molecule-core/src/main/scala/molecule/stream/ochan/immutable/ParserT.scala | Scala | apache-2.0 | 2,440 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.query
import org.geotools.data.collection.ListFeatureCollection
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.util.Random
@RunWith(classOf[JUnitRunner])
class RouteSearchProcessTest extends Specification {
import scala.collection.JavaConversions._
sequential
val r = new Random(-10)
val routeSft = SimpleFeatureTypes.createType("route", "*geom:LineString:srid=4326")
val sft = SimpleFeatureTypes.createType("tracks", "track:String,heading:Double,dtg:Date,*geom:Point:srid=4326")
val process = new RouteSearchProcess
val routes = new ListFeatureCollection(routeSft,
List(ScalaSimpleFeature.create(routeSft, "r0", "LINESTRING (40 40, 40.5 40.5, 40.5 41)")))
// features along the lower angled part of the route, headed in the opposite direction
val features0 = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"0$i")
sf.setAttribute("track", "0")
sf.setAttribute("heading", Double.box(217.3 + (r.nextDouble * 10) - 5))
sf.setAttribute("dtg", s"2017-02-20T00:00:0$i.000Z")
val route = (40.0 + (10 - i) * 0.05) - (r.nextDouble / 100) - 0.005
sf.setAttribute("geom", s"POINT($route $route)")
sf
}
// features along the upper vertical part of the route
val features1 = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"1$i")
sf.setAttribute("track", "1")
sf.setAttribute("heading", Double.box((r.nextDouble * 10) - 5))
sf.setAttribute("dtg", s"2017-02-20T00:01:0$i.000Z")
sf.setAttribute("geom", s"POINT(${40.5 + (r.nextDouble / 100) - 0.005} ${40.5 + (i + 1) * 0.005})")
sf
}
// features along the upper vertical part of the route, but with a heading off by 5-15 degrees
val features2 = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"2$i")
sf.setAttribute("track", "2")
sf.setAttribute("heading", Double.box(10 + (r.nextDouble * 10) - 5))
sf.setAttribute("dtg", s"2017-02-20T00:02:0$i.000Z")
sf.setAttribute("geom", s"POINT(${40.5 + (r.nextDouble / 100) - 0.005} ${40.5 + (i + 1) * 0.005})")
sf
}
// features headed along the upper vertical part of the route, but not close to the route
val features3 = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"3$i")
sf.setAttribute("track", "3")
sf.setAttribute("heading", Double.box((r.nextDouble * 10) - 5))
sf.setAttribute("dtg", s"2017-02-20T00:03:0$i.000Z")
sf.setAttribute("geom", s"POINT(${40.7 + (r.nextDouble / 10) - 0.005} ${40.5 + (i + 1) * 0.005})")
sf
}
val input = new ListFeatureCollection(sft, features0 ++ features1 ++ features2 ++ features3)
"RouteSearch" should {
"return features along a route" in {
val collection = process.execute(input, routes, 1000.0, 5.0, null, null, false, "heading")
val results = SelfClosingIterator(collection.features).toSeq
results must containTheSameElementsAs(features1)
}
"return features along a route with a wider heading tolerance" in {
val collection = process.execute(input, routes, 1000.0, 15.0, null, null, false, "heading")
val results = SelfClosingIterator(collection.features).toSeq
results must containTheSameElementsAs(features1 ++ features2)
}
"return features along a wide buffered route" in {
val collection = process.execute(input, routes, 100000.0, 5.0, null, null, false, "heading")
val results = SelfClosingIterator(collection.features).toSeq
results must containTheSameElementsAs(features1 ++ features3)
}
"return features along a bidirectional route" in {
val collection = process.execute(input, routes, 1000.0, 5.0, null, null, true, "heading")
val results = SelfClosingIterator(collection.features).toSeq
results must containTheSameElementsAs(features0 ++ features1)
}
}
}
| ronq/geomesa | geomesa-process/geomesa-process-vector/src/test/scala/org/locationtech/geomesa/process/query/RouteSearchProcessTest.scala | Scala | apache-2.0 | 4,591 |
package scaspell.service
import scaspell.api.Spellchecker
import com.twitter.finagle.Service
import com.twitter.finagle.httpx.{Response, Request}
case class GetLanguagesService(spellchecker: Spellchecker) extends Service[Request, Response] {
import spray.json._
import DefaultJsonProtocol._
override def apply(req: Request) = spellchecker.availableLanguages(req.params.get("filter")) map {
modes =>
val res = Response()
res.setContentString(modes.toJson.compactPrint)
res.setContentType("application/json")
res.setStatusCode(200)
res
}
}
| KimStebel/scalpel | src/main/scala/scaspell/service/GetLanguagesService.scala | Scala | apache-2.0 | 627 |
package yuuto.enhancedinventories.block.base
import java.util.ArrayList
import net.minecraft.block.Block
import net.minecraft.block.material.Material
import net.minecraft.enchantment.EnchantmentHelper
import net.minecraft.entity.EntityLivingBase
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.item.Item
import net.minecraft.item.ItemStack
import net.minecraft.stats.StatList
import net.minecraft.tileentity.TileEntity
import net.minecraft.world.World
import yuuto.enhancedinventories.EnhancedInventories
import yuuto.enhancedinventories.materials.DecorationHelper
import yuuto.enhancedinventories.ref.ReferenceEI
import yuuto.enhancedinventories.tile.base.TileBaseEI
import yuuto.enhancedinventories.tile.traits.TDecorative
import yuuto.yuutolib.block.ModBlockContainer
import yuuto.yuutolib.block.traits.TBlockContainerInventory
import net.minecraft.util.MovingObjectPosition
abstract class BlockBaseEI(mat:Material, name:String) extends ModBlockContainer(mat, EnhancedInventories.tab, ReferenceEI.MOD_ID, name) with TBlockContainerInventory{
override def harvestBlock(world:World, player:EntityPlayer, x:Int, y:Int, z:Int, meta:Int)
{
}
override def onBlockHarvested(world:World, x:Int, y:Int, z:Int, meta:Int, player:EntityPlayer) {
if(player.capabilities.isCreativeMode)
return;
player.addStat(StatList.mineBlockStatArray(Block.getIdFromBlock(this)), 1);
player.addExhaustion(0.025F);
harvesters.set(player);
val i1:Int = EnchantmentHelper.getFortuneModifier(player);
this.dropBlockAsItem(world, x, y, z, meta, i1);
harvesters.set(null);
}
override def getDrops(world:World, x:Int, y:Int, z:Int, meta:Int, fortune:Int):ArrayList[ItemStack]={
val tile:TileEntity = world.getTileEntity(x, y, z);
if(tile != null && tile.isInstanceOf[TileBaseEI]){
val item:Item = getItemDropped(meta, world.rand, fortune);
if(item != null){
return getDrops(world, x, y, z, meta, fortune, new ItemStack(item), tile.asInstanceOf[TileBaseEI]);
}
}
return super.getDrops(world, x, y, z, meta, fortune);
}
/**
* gets a list of drops from a tile entity, allows for easy addition of upgrade drops and nbtdata
* @param world
* @param x
* @param y
* @param z
* @param metadata
* @param fortune
* @param blockStack
* @param tile
* @return
*/
def getDrops(world:World, x:Int, y:Int, z:Int, meta:Int, fortune:Int, blockStack:ItemStack, tile:TileBaseEI):ArrayList[ItemStack]={
val blockStack2 = tile.getItemStack(blockStack);
blockStack2.stackSize = quantityDropped(meta, fortune, world.rand);
val ret:ArrayList[ItemStack] = new ArrayList[ItemStack]();
ret.add(blockStack2);
return ret;
}
//Adds nbtdata to the picked itemstack
override def getPickBlock(target:MovingObjectPosition, world:World, x:Int, y:Int, z:Int):ItemStack={
val item:Item = getItem(world, x, y, z);
if (item == null)
{
return null;
}
//Block block = item instanceof ItemBlock && !isFlowerPot() ? Block.getBlockFromItem(item) : this;
var ret:ItemStack = new ItemStack(item, 1, world.getBlockMetadata(x, y, z));
val tile:TileEntity = world.getTileEntity(x, y, z);
if(tile != null && tile.isInstanceOf[TileBaseEI]){
ret = tile.asInstanceOf[TileBaseEI].getItemStack(ret);
return ret;
}
return ret;
}
} | AnimeniacYuuto/EnhancedInventories | src/main/scala/yuuto/enhancedinventories/block/base/BlockBaseEI.scala | Scala | gpl-2.0 | 3,404 |
/**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package noop.model;
/**
* A boolean literal, either true or false.
*
* @author Erik Soe Sorensen (eriksoe@gmail.com)
* @author tocman@gmail.com (Jeremie Lenfant-Engelmann)
*/
class BooleanLiteralExpression(val value: Boolean) extends Expression {
def accept(visitor: Visitor) = {
visitor.visit(this);
}
override def hashCode = value.hashCode;
override def equals(other: Any) = other match {
case that: BooleanLiteralExpression => that.value.equals(value);
case _ => false;
}
}
| masterx2/noop | core/src/main/scala/noop/model/BooleanLiteralExpression.scala | Scala | apache-2.0 | 1,103 |
package com.wanghuanming.tfidf
import scala.io._
object test {
def main(args: Array[String]) = {
// test get keywords
val URLs = Array("http://wanghuanming.com/2014/12/thread-and-process",
"http://wanghuanming.com/2014/12/mesos-deploy",
"http://www.cnblogs.com/jasonkoo/articles/2834727.html")
for (url <- URLs) {
println(url)
val content = Source.fromURL(url).mkString
val keywords = TFIDF.getKeywords(stripTags(content), 5)
keywords.foreach(println)
}
}
private def stripTags(article: String) = {
article.replaceAll("<script[\\\\s\\\\S]*</script>", "").replaceAll("<[^>]*?>", "").replaceAll("\\\\s", "")
}
}
| HelloCodeMing/scala-tfidf | src/test/scala/com/wanghuanming/tfidf/test.scala | Scala | mit | 672 |
package core
import org.scalatest.{FunSpec, Matchers}
class ImportedResourcePathsSpec extends FunSpec with Matchers {
it("generates appropriate path for resources from imported models") {
val common = """
{
"name": "common",
"namespace": "test.common.v0",
"models": {
"user": {
"fields": [
{ "name": "id", "type": "string" }
]
}
}
}
"""
val uri = "http://localhost/test/common/0.0.1/service.json"
val user = s"""
{
"name": "user",
"imports": [ { "uri": "$uri" } ],
"resources": {
"test.common.v0.models.user": {
"operations": [
{ "method": "DELETE" }
]
}
}
}
"""
val fetcher = MockServiceFetcher()
fetcher.add(uri, TestHelper.serviceValidatorFromApiJson(common).service())
val validator = TestHelper.serviceValidatorFromApiJson(user, fetcher = fetcher)
validator.errors() should be(Nil)
val userResource = validator.service().resources.head
userResource.operations.map(_.path) should be(Seq("/users"))
}
}
| apicollective/apibuilder | core/src/test/scala/core/ImportedResourcePathsSpec.scala | Scala | mit | 1,123 |
package artisanal.pickle.maker
import models._
import parser._
import org.specs2._
import mutable._
import specification._
import scala.reflect.internal.pickling.ByteCodecs
import scala.tools.scalap.scalax.rules.scalasig._
import com.novus.salat.annotations.util._
import scala.reflect.ScalaSignature
class ListIntSpec extends mutable.Specification {
"a ScalaSig for case class MyRecord_ListInt(po: List[Int])" should {
"have the correct string" in {
val mySig = new artisanal.pickle.maker.ScalaSig(List("case class"), List("models", "MyRecord_ListInt"), List(("po", "List[Int]")))
val correctParsedSig = SigParserHelper.parseByteCodeFromAnnotation(classOf[MyRecord_ListInt]).map(ScalaSigAttributeParsers.parse(_)).get
val myParsedSig = SigParserHelper.parseByteCodeFromMySig(mySig).map(ScalaSigAttributeParsers.parse(_)).get
correctParsedSig.toString === myParsedSig.toString
}
}
}
| julianpeeters/artisanal-pickle-maker | src/test/scala/singleValueMember/ListSpecs/List[Int]Spec.scala | Scala | apache-2.0 | 925 |
package io.eels.component.hive
import java.io.File
import io.eels.Row
import io.eels.datastream.DataStream
import io.eels.schema.{Field, StringType, StructType}
import org.scalatest.{FunSuite, Matchers}
class HiveAlignmentTest extends FunSuite with Matchers {
import HiveConfig._
val dbname = HiveTestUtils.createTestDatabase
private val table = "align_test_" + System.currentTimeMillis()
test("pad a row with nulls") {
assume(new File(s"$basePath/core-site.xml").exists)
val schema = StructType(Field("a", StringType), Field("b", StringType, true))
HiveTable(dbname, table).drop()
HiveTable(dbname, table).create(schema)
DataStream.fromValues(schema.removeField("b"), Seq(Seq("a"))).to(HiveSink(dbname, table))
HiveSource(dbname, table).toDataStream().collect shouldBe Vector(
Row(schema, Vector("a", null))
)
}
test("align a row with the hive metastore") {
assume(new File(s"$basePath/core-site.xml").exists)
HiveTable(dbname, table).drop()
// correct schema
val schema1 = StructType(Field("a", StringType), Field("b", StringType, true))
DataStream.fromValues(schema1, Seq(Seq("a", "b"))).to(HiveSink(dbname, table).withCreateTable(true))
// reversed schema, the row should be aligned
val schema2 = StructType(Field("b", StringType), Field("a", StringType, true))
DataStream.fromValues(schema2, Seq(Seq("b", "a"))).to(HiveSink(dbname, table))
HiveSource(dbname, table).toDataStream().collect shouldBe Vector(
Row(schema1, Vector("a", "b")),
Row(schema1, Vector("a", "b"))
)
}
}
| eel-lib/eel | eel-hive/src/test/scala/io/eels/component/hive/HiveAlignmentTest.scala | Scala | mit | 1,595 |
package com.emptyarray.scala.android.util
import android.os.AsyncTask
import scala.concurrent.ExecutionContext
/**
* Created by Weasel on 7/7/15.
*/
object ThreadPoolExecutionContext {
implicit val execContext = ExecutionContext.fromExecutor( AsyncTask.THREAD_POOL_EXECUTOR )
}
| emptyarray/scala-android-animation | src/main/scala/com/emptyarray/scala/android/util/ThreadPoolExecutionContext.scala | Scala | apache-2.0 | 285 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.expressions.utils.Func0
import org.apache.flink.table.runtime.utils.JavaUserDefinedAggFunctions.OverAgg0
import org.apache.flink.table.utils.TableTestBase
import org.junit.Test
class UserDefinedFunctionValidationTest extends TableTestBase {
@Test
def testScalarFunctionOperandTypeCheck(): Unit = {
thrown.expect(classOf[ValidationException])
thrown.expectMessage(
"Given parameters of function 'func' do not match any signature. \\n" +
"Actual: (java.lang.String) \\n" +
"Expected: (int)")
val util = streamTestUtil()
util.addTable[(Int, String)]("t", 'a, 'b)
util.tableEnv.registerFunction("func", Func0)
util.verifySql("select func(b) from t", "n/a")
}
@Test
def testAggregateFunctionOperandTypeCheck(): Unit = {
thrown.expect(classOf[ValidationException])
thrown.expectMessage(
"Given parameters of function do not match any signature. \\n" +
"Actual: (org.apache.flink.table.runtime.utils.JavaUserDefinedAggFunctions" +
".Accumulator0, java.lang.String, java.lang.Integer) \\n" +
"Expected: (org.apache.flink.table.runtime.utils.JavaUserDefinedAggFunctions" +
".Accumulator0, long, int)")
val util = streamTestUtil()
val agg = new OverAgg0
util.addTable[(Int, String)]("t", 'a, 'b)
util.tableEnv.registerFunction("agg", agg)
util.verifySql("select agg(b, a) from t", "n/a")
}
}
| tzulitai/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/validation/UserDefinedFunctionValidationTest.scala | Scala | apache-2.0 | 2,364 |
/*
* This file is part of Kiama.
*
* Copyright (C) 2011-2015 Anthony M Sloane, Macquarie University.
*
* Kiama is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* Kiama is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package org.kiama
package example.oberon0
package L1.source
trait SourcePrettyPrinter extends L0.source.SourcePrettyPrinter {
import base.source.{Block, Expression, SourceNode}
override def toDoc (n : SourceNode) : Doc =
n match {
case s : IfStatement =>
ifToDoc (s)
case s : WhileStatement =>
"WHILE" <+> toDoc (s.cond) <+> "DO" <> semisep (s.block.stmts) <@> "END"
case _ =>
super.toDoc (n)
}
def ifToDoc (s : IfStatement) : Doc = {
def elsifToDoc (ei : (Expression, Block)) : Doc =
line <> "ELSIF" <+> toDoc (ei._1) <+> "THEN" <> semisep (ei._2.stmts)
"IF" <+> toDoc (s.cond) <+> "THEN" <>
semisep (s.block.stmts) <>
hcat (s.elsifs map elsifToDoc) <>
s.optelse.map (b => line <> "ELSE" <> semisep (b.stmts)).getOrElse (empty) <@>
"END"
}
}
| joaoraf/kiama | library/src/org/kiama/example/oberon0/L1/source/SourcePrettyPrinter.scala | Scala | gpl-3.0 | 1,730 |
//
// MessagePack for Java
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package org.msgpack.core
import java.io.{FileInputStream, FileOutputStream, File, ByteArrayOutputStream}
import org.msgpack.core.buffer.{ChannelBufferOutput, MessageBufferOutput, OutputStreamBufferOutput}
import xerial.core.io.IOUtil
import scala.util.Random
import org.msgpack.value.ValueFactory
/**
*
*/
class MessagePackerTest extends MessagePackSpec {
val msgpack = MessagePack.DEFAULT
def verifyIntSeq(answer:Array[Int], packed:Array[Byte]) {
val unpacker = msgpack.newUnpacker(packed)
val b = Array.newBuilder[Int]
while(unpacker.hasNext) {
b += unpacker.unpackInt()
}
val result = b.result
result.size shouldBe answer.size
result shouldBe answer
}
def createTempFile = {
val f = File.createTempFile("msgpackTest", "msgpack")
f.deleteOnExit
f
}
def createTempFileWithOutputStream = {
val f = createTempFile
val out = new FileOutputStream(f)
(f, out)
}
def createTempFileWithChannel = {
val (f, out) = createTempFileWithOutputStream
val ch = out.getChannel
(f, ch)
}
"MessagePacker" should {
"reset the internal states" in {
val intSeq = (0 until 100).map(i => Random.nextInt).toArray
val b = new ByteArrayOutputStream
val packer = msgpack.newPacker(b)
intSeq foreach packer.packInt
packer.close
verifyIntSeq(intSeq, b.toByteArray)
val intSeq2 = intSeq.reverse
val b2 = new ByteArrayOutputStream
packer.reset(new OutputStreamBufferOutput(b2))
intSeq2 foreach packer.packInt
packer.close
verifyIntSeq(intSeq2, b2.toByteArray)
val intSeq3 = intSeq2.sorted
val b3 = new ByteArrayOutputStream
packer.reset(new OutputStreamBufferOutput(b3))
intSeq3 foreach packer.packInt
packer.close
verifyIntSeq(intSeq3, b3.toByteArray)
}
"improve the performance via reset method" taggedAs("reset") in {
val N = 1000
val t = time("packer", repeat = 10) {
block("no-buffer-reset") {
val out = new ByteArrayOutputStream
IOUtil.withResource(msgpack.newPacker(out)) { packer =>
for (i <- 0 until N) {
val outputStream = new ByteArrayOutputStream()
packer.reset(new OutputStreamBufferOutput(outputStream))
packer.packInt(0)
packer.flush()
}
}
}
block("buffer-reset") {
val out = new ByteArrayOutputStream
IOUtil.withResource(msgpack.newPacker(out)) { packer =>
val bufferOut = new OutputStreamBufferOutput(new ByteArrayOutputStream())
for (i <- 0 until N) {
val outputStream = new ByteArrayOutputStream()
bufferOut.reset(outputStream)
packer.reset(bufferOut)
packer.packInt(0)
packer.flush()
}
}
}
}
t("buffer-reset").averageWithoutMinMax should be <= t("no-buffer-reset").averageWithoutMinMax
}
"pack larger string array than byte buf" taggedAs ("larger-string-array-than-byte-buf") in {
// Based on https://github.com/msgpack/msgpack-java/issues/154
// TODO: Refactor this test code to fit other ones.
def test(bufferSize: Int, stringSize: Int): Boolean = {
val msgpack = new MessagePack(new MessagePack.ConfigBuilder().packerBufferSize(bufferSize).build)
val str = "a" * stringSize
val rawString = ValueFactory.newRawString(str.getBytes("UTF-8"))
val array = ValueFactory.newArray(rawString)
val out = new ByteArrayOutputStream()
val packer = msgpack.newPacker(out)
packer.packValue(array)
packer.close()
out.toByteArray
true
}
val testCases = List(
32 -> 30,
33 -> 31,
32 -> 31,
34 -> 32
)
testCases.foreach{
case (bufferSize, stringSize) => test(bufferSize, stringSize)
}
}
"reset OutputStreamBufferOutput" in {
val (f0, out0) = createTempFileWithOutputStream
val packer = MessagePack.newDefaultPacker(out0)
packer.packInt(99)
packer.close
val up0 = MessagePack.newDefaultUnpacker(new FileInputStream(f0))
up0.unpackInt shouldBe 99
up0.hasNext shouldBe false
up0.close
val (f1, out1) = createTempFileWithOutputStream
packer.reset(new OutputStreamBufferOutput(out1))
packer.packInt(99)
packer.flush
packer.reset(new OutputStreamBufferOutput(out1))
packer.packString("hello")
packer.close
val up1 = MessagePack.newDefaultUnpacker(new FileInputStream(f1))
up1.unpackInt shouldBe 99
up1.unpackString shouldBe "hello"
up1.hasNext shouldBe false
up1.close
}
"reset ChannelBufferOutput" in {
val (f0, out0) = createTempFileWithChannel
val packer = MessagePack.newDefaultPacker(out0)
packer.packInt(99)
packer.close
val up0 = MessagePack.newDefaultUnpacker(new FileInputStream(f0))
up0.unpackInt shouldBe 99
up0.hasNext shouldBe false
up0.close
val (f1, out1) = createTempFileWithChannel
packer.reset(new ChannelBufferOutput(out1))
packer.packInt(99)
packer.flush
packer.reset(new ChannelBufferOutput(out1))
packer.packString("hello")
packer.close
val up1 = MessagePack.newDefaultUnpacker(new FileInputStream(f1))
up1.unpackInt shouldBe 99
up1.unpackString shouldBe "hello"
up1.hasNext shouldBe false
up1.close
}
}
"compute totalWrittenBytes" in {
val out = new ByteArrayOutputStream
val packerTotalWrittenBytes = IOUtil.withResource(msgpack.newPacker(out)) { packer =>
packer.packByte(0) // 1
.packBoolean(true) // 1
.packShort(12) // 1
.packInt(1024) // 3
.packLong(Long.MaxValue) // 5
.packString("foobar") // 7
.flush()
packer.getTotalWrittenBytes
}
out.toByteArray.length shouldBe packerTotalWrittenBytes
}
}
| xerial/msgpack-java | msgpack-core/src/test/scala/org/msgpack/core/MessagePackerTest.scala | Scala | apache-2.0 | 6,669 |
package org.jetbrains.plugins.scala.lang.psi.types.api
import java.util.concurrent.ConcurrentMap
import com.intellij.util.containers.ContainerUtil
import org.jetbrains.plugins.scala.extensions.TraversableExt
import org.jetbrains.plugins.scala.lang.psi.types.api.ParameterizedType.substitutorCache
import org.jetbrains.plugins.scala.lang.psi.types.{ScSubstitutor, ScType}
import scala.collection.immutable.HashSet
/**
* @author adkozlov
*/
trait ParameterizedType extends TypeInTypeSystem with ValueType {
val designator: ScType
val typeArguments: Seq[ScType]
def substitutor = Option(substitutorCache.get(this)).getOrElse {
val result = substitutorInner
substitutorCache.put(this, result)
result
}
protected def substitutorInner: ScSubstitutor
override def removeAbstracts = ParameterizedType(designator.removeAbstracts,
typeArguments.map(_.removeAbstracts))
override def recursiveUpdate(update: ScType => (Boolean, ScType), visited: HashSet[ScType]): ScType = {
if (visited.contains(this)) {
return update(this) match {
case (true, res) => res
case _ => this
}
}
val newVisited = visited + this
update(this) match {
case (true, res) => res
case _ =>
ParameterizedType(designator.recursiveUpdate(update, newVisited),
typeArguments.map(_.recursiveUpdate(update, newVisited)))
}
}
override def typeDepth = {
val result = designator.typeDepth
typeArguments.map(_.typeDepth) match {
case Seq() => result //todo: shouldn't be possible
case seq => result.max(seq.max + 1)
}
}
override def isFinalType: Boolean = designator.isFinalType && typeArguments.filterBy(classOf[TypeParameterType])
.forall(_.isInvariant)
}
object ParameterizedType {
val substitutorCache: ConcurrentMap[ParameterizedType, ScSubstitutor] =
ContainerUtil.createConcurrentWeakMap[ParameterizedType, ScSubstitutor]()
def apply(designator: ScType, typeArguments: Seq[ScType])
(implicit typeSystem: TypeSystem) = typeSystem.parameterizedType(designator, typeArguments)
def unapply(parameterized: ParameterizedType): Option[(ScType, Seq[ScType])] =
Some(parameterized.designator, parameterized.typeArguments)
}
| whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/types/api/ParameterizedType.scala | Scala | apache-2.0 | 2,258 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.cluster
import akka.actor.ExtendedActorSystem
import akka.serialization.SerializerWithStringManifest
import akka.serialization.BaseSerializer
import com.lightbend.lagom.internal.cluster.ClusterDistribution.EnsureActive
import com.lightbend.lagom.internal.cluster.protobuf.msg.{ ClusterMessages => cm }
private[lagom] class ClusterMessageSerializer(val system: ExtendedActorSystem)
extends SerializerWithStringManifest
with BaseSerializer {
val EnsureActiveManifest = "E"
override def manifest(obj: AnyRef): String = obj match {
case _: EnsureActive => EnsureActiveManifest
case _ =>
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
def toBinary(obj: AnyRef): Array[Byte] = obj match {
case ea: EnsureActive => ensureActiveToProto(ea).toByteArray
case _ =>
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
private def ensureActiveToProto(ensureActive: EnsureActive): cm.EnsureActive = {
cm.EnsureActive.newBuilder().setEntityId(ensureActive.entityId).build()
}
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match {
case EnsureActiveManifest => ensureActiveFromBinary(bytes)
case _ =>
throw new IllegalArgumentException(
s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]"
)
}
private def ensureActiveFromBinary(bytes: Array[Byte]): EnsureActive = {
ensureActiveFromProto(cm.EnsureActive.parseFrom(bytes))
}
private def ensureActiveFromProto(ensureActive: cm.EnsureActive): EnsureActive = {
EnsureActive(ensureActive.getEntityId)
}
}
| ignasi35/lagom | cluster/core/src/main/scala/com/lightbend/lagom/internal/cluster/ClusterMessageSerializer.scala | Scala | apache-2.0 | 1,863 |
package clustering.metrics.indexes
import scala.collection.mutable.ListBuffer
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.rdd.RDD.doubleRDDToDoubleRDDFunctions
import org.apache.spark.sql.DataFrame
import clustering.metrics.Spark
import clustering.metrics.ClusteringIndexes
import clustering.metrics.ClusteringIndexes.ResultIndex
import clustering.metrics.ClusteringIndexes.TuplaModelos
object IndexKL {
/**
* ***************************************************************************
* *
* KL INDEX (Krzanowski and Lai) - 1988 *
* *
* Index = |DIFFq / DIFFq+1| *
* *
* DIFFq = (pow((q - 1), 2/p) * Wq-1) - (pow(q, 2/p) * Wq) *
* *
* The maximum value of the index indicates the best solution *
* *
* *
* ***************************************************************************
*/
def calculate(modelTuples: List[TuplaModelos], vectorData: DataFrame) = {
println(s"KL INDEX -> ${modelTuples.map(_.k)}")
import Spark.spark.implicits._
val p = vectorData.head().getAs[org.apache.spark.ml.linalg.Vector]("features").size.toDouble
val WqByKKmeans: ListBuffer[Tuple2[Int, Double]] = ListBuffer[Tuple2[Int, Double]]()
val WqByKBisectingKmeans: ListBuffer[Tuple2[Int, Double]] = ListBuffer[Tuple2[Int, Double]]()
val WqByKGMM: ListBuffer[Tuple3[Int, Double, Int]] = ListBuffer[Tuple3[Int, Double, Int]]()
for (modelsK <- modelTuples if (modelsK.k > 1)) {
val k = modelsK.k
// El indice KL solo puede emplearse para un k superior a 2, ya que este indice necesita tener el modelo para el k anterior
if (k > 2) {
println(s"CALCULANDO KL INDEX PARA k = $k")
val modelKMeans = modelsK.modelKMeans
val modelBisectingKMeans = modelsK.modelBisectingKMeans
val modelGMM = modelsK.modelGMM
// KMEANS
if (modelKMeans != null) {
// Wq = WSSSE (Within Set Sum of Squared Errors)
val Wq = modelKMeans._1.computeCost(vectorData)
WqByKKmeans += Tuple2(k, Wq)
}
// BISECTING KMEANS
if (modelBisectingKMeans != null) {
// Wq = WSSSE (Within Set Sum of Squared Errors)
val Wq = modelBisectingKMeans._1.computeCost(vectorData)
WqByKBisectingKmeans += Tuple2(k, Wq)
}
// MEZCLAS GAUSSIANAS
if (modelGMM != null) {
val clusteredData = modelGMM._2
var numClustersFinales = 0
var Wq = 0.0
for (cluster <- 0 to k - 1) {
val clusterData = clusteredData.where("prediction =" + cluster)
val numObjetosCluster = clusterData.count()
if (numObjetosCluster > 0) {
numClustersFinales = numClustersFinales + 1
val centroide = modelGMM._1.gaussians(cluster).mean
Wq = Wq + clusterData.map(x => x.getAs[Double]("MaxProb")).rdd.sum
}
}
WqByKGMM += Tuple3(numClustersFinales, Wq, k)
}
}
}
val listResultFinal = ListBuffer.empty[ResultIndex]
if (!WqByKKmeans.isEmpty) {
if (modelTuples.map(_.k).sorted.head == 1) {
val features = vectorData.rdd.map(x => org.apache.spark.mllib.linalg.Vectors.dense(x.getAs[org.apache.spark.ml.linalg.Vector]("features").toArray))
val centroideDataSet = Statistics.colStats(features).mean
val Wq = vectorData.map(x => Vectors.sqdist(centroideDataSet.asML, x.getAs[org.apache.spark.ml.linalg.Vector]("features"))).rdd.sum()
WqByKKmeans += Tuple2(1, Wq)
}
val DIFFsq = WqByKKmeans.sortBy(x => x._1).sliding(2).map(x => {
val qActual = x(1)._1
val WqActual = x(1)._2
val qAnterior = x(0)._1
val WqAnterior = x(0)._2
val resAnterior = (math.pow((qAnterior), (2 / p))) * WqAnterior
val resActual = (math.pow(qActual, (2 / p))) * WqActual
val DIFFq = resAnterior - resActual
(qActual, DIFFq)
}).toList
val KLIndexes = DIFFsq.sortBy(_._1).sliding(2).map(x => {
val DIFFq1 = x(0)._2
val DIFFq2 = x(1)._2
val KLIndex = math.abs(DIFFq1 / DIFFq2)
(x(0)._1, KLIndex)
}).toList
val result = KLIndexes.sortBy(x => x._2)
var points = 0
for (result_value <- result) {
listResultFinal += ResultIndex(ClusteringIndexes.METHOD_KMEANS, ClusteringIndexes.INDEX_KL, result_value._2, points, result_value._1, result_value._1)
points = points + 1
}
}
if (!WqByKBisectingKmeans.isEmpty) {
if (modelTuples.map(_.k).sorted.head == 1) {
val features = vectorData.rdd.map(x => org.apache.spark.mllib.linalg.Vectors.dense(x.getAs[org.apache.spark.ml.linalg.Vector]("features").toArray))
val centroideDataSet = Statistics.colStats(features).mean
val Wq = vectorData.map(x => Vectors.sqdist(centroideDataSet.asML, x.getAs[org.apache.spark.ml.linalg.Vector]("features"))).rdd.sum()
WqByKBisectingKmeans += Tuple2(1, Wq)
}
val DIFFsq = WqByKBisectingKmeans.sortBy(x => x._1).sliding(2).map(x => {
val qActual = x(1)._1
val WqActual = x(1)._2
val qAnterior = x(0)._1
val WqAnterior = x(0)._2
val resAnterior = (math.pow((qAnterior), (2 / p))) * WqAnterior
val resActual = (math.pow(qActual, (2 / p))) * WqActual
val DIFFq = resAnterior - resActual
(qActual, DIFFq)
}).toList
val KLIndexes = DIFFsq.sortBy(x => x._1).sliding(2).map(x => {
val DIFFq1 = x(0)._2
val DIFFq2 = x(1)._2
val KLIndex = math.abs(DIFFq1 / DIFFq2)
(x(0)._1, KLIndex)
}).toList
val result = KLIndexes.sortBy(x => x._2)
var points = 0
for (result_value <- result) {
listResultFinal += ResultIndex(ClusteringIndexes.METHOD_BISECTING_KMEANS, ClusteringIndexes.INDEX_KL, result_value._2, points, result_value._1, result_value._1)
points = points + 1
}
}
if (!WqByKGMM.isEmpty) {
if (modelTuples.map(_.k).sorted.head == 1) {
val features = vectorData.rdd.map(x => org.apache.spark.mllib.linalg.Vectors.dense(x.getAs[org.apache.spark.ml.linalg.Vector]("features").toArray))
val centroideDataSet = Statistics.colStats(features).mean
val Wq = vectorData.map(x => Vectors.sqdist(centroideDataSet.asML, x.getAs[org.apache.spark.ml.linalg.Vector]("features"))).rdd.sum()
WqByKGMM += Tuple3(1, Wq, 1)
}
val DIFFsq = WqByKGMM.sortBy(x => x._1).sliding(2).map(x => {
val qActual = x(1)._1
val WqActual = x(1)._2
val qAnterior = x(0)._1
val WqAnterior = x(0)._2
val resAnterior = (math.pow((qAnterior), (2 / p))) * WqAnterior
val resActual = (math.pow(qActual, (2 / p))) * WqActual
val DIFFq = resAnterior - resActual
(qActual, DIFFq, x(1)._3)
}).toList
val KLIndexes = DIFFsq.sortBy(x => x._1).sliding(2).map(x => {
val DIFFq1 = x(0)._2
val DIFFq2 = x(1)._2
val KLIndex = math.abs(DIFFq1 / DIFFq2)
(x(0)._1, KLIndex, x(0)._3)
}).toList
val result = KLIndexes.sortBy(x => x._2)
var points = 0
for (result_value <- result) {
listResultFinal += ResultIndex(ClusteringIndexes.METHOD_GMM, ClusteringIndexes.INDEX_KL, result_value._2, points, result_value._3, result_value._1)
points = points + 1
}
}
listResultFinal
}
} | DanielTizon/ClusteringMetrics | src/main/scala/clustering/metrics/indexes/IndexKL.scala | Scala | apache-2.0 | 7,852 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import scala.collection.mutable
import scala.concurrent.duration._
import org.mockito.Mockito.{mock, when}
import org.scalatest.{BeforeAndAfterAll, PrivateMethodTester}
import org.scalatest.concurrent.Eventually._
import org.apache.spark._
import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState}
import org.apache.spark.deploy.master.ApplicationInfo
import org.apache.spark.deploy.master.Master
import org.apache.spark.deploy.worker.Worker
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef, RpcEnv}
import org.apache.spark.scheduler.TaskSchedulerImpl
import org.apache.spark.scheduler.cluster._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RegisterExecutor
/**
* End-to-end tests for dynamic allocation in standalone mode.
*/
class StandaloneDynamicAllocationSuite
extends SparkFunSuite
with LocalSparkContext
with BeforeAndAfterAll
with PrivateMethodTester {
private val numWorkers = 2
private val conf = new SparkConf()
private val securityManager = new SecurityManager(conf)
private var masterRpcEnv: RpcEnv = null
private var workerRpcEnvs: Seq[RpcEnv] = null
private var master: Master = null
private var workers: Seq[Worker] = null
/**
* Start the local cluster.
* Note: local-cluster mode is insufficient because we want a reference to the Master.
*/
override def beforeAll(): Unit = {
super.beforeAll()
masterRpcEnv = RpcEnv.create(Master.SYSTEM_NAME, "localhost", 0, conf, securityManager)
workerRpcEnvs = (0 until numWorkers).map { i =>
RpcEnv.create(Worker.SYSTEM_NAME + i, "localhost", 0, conf, securityManager)
}
master = makeMaster()
workers = makeWorkers(10, 2048)
// Wait until all workers register with master successfully
eventually(timeout(60.seconds), interval(10.millis)) {
assert(getMasterState.workers.size === numWorkers)
}
}
override def afterAll(): Unit = {
try {
masterRpcEnv.shutdown()
workerRpcEnvs.foreach(_.shutdown())
master.stop()
workers.foreach(_.stop())
masterRpcEnv = null
workerRpcEnvs = null
master = null
workers = null
} finally {
super.afterAll()
}
}
test("dynamic allocation default behavior") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
// request 1 more
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 2)
// request 1 more; this one won't go through
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 3)
// kill all existing executors; we should end up with 3 - 2 = 1 executor
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
// kill all executors again; this time we'll have 1 - 1 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 1000)
}
test("dynamic allocation with max cores <= cores per worker") {
sc = new SparkContext(appConf.set("spark.cores.max", "8"))
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(4, 4))
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.executors.values.head.cores === 8)
assert(apps.head.getExecutorLimit === 1)
// request 1 more; this one won't go through because we're already at max cores.
// This highlights a limitation of using dynamic allocation with max cores WITHOUT
// setting cores per executor: once an application scales down and then scales back
// up, its executors may not be spread out anymore!
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 2)
// request 1 more; this one also won't go through for the same reason
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 3)
// kill all existing executors; we should end up with 3 - 1 = 2 executor
// Note: we scheduled these executors together, so their cores should be evenly distributed
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(4, 4))
assert(apps.head.getExecutorLimit === 2)
// kill all executors again; this time we'll have 1 - 1 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(4, 4))
assert(apps.head.getExecutorLimit === 1000)
}
test("dynamic allocation with max cores > cores per worker") {
sc = new SparkContext(appConf.set("spark.cores.max", "16"))
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(8, 8))
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.executors.values.head.cores === 10)
assert(apps.head.getExecutorLimit === 1)
// request 1 more
// Note: the cores are not evenly distributed because we scheduled these executors 1 by 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toSet === Set(10, 6))
assert(apps.head.getExecutorLimit === 2)
// request 1 more; this one won't go through
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 3)
// kill all existing executors; we should end up with 3 - 2 = 1 executor
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.executors.values.head.cores === 10)
assert(apps.head.getExecutorLimit === 1)
// kill all executors again; this time we'll have 1 - 1 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(8, 8))
assert(apps.head.getExecutorLimit === 1000)
}
test("dynamic allocation with cores per executor") {
sc = new SparkContext(appConf.set("spark.executor.cores", "2"))
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 10) // 20 cores total
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
// request 3 more
assert(sc.requestExecutors(3))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 4)
// request 10 more; only 6 will go through
assert(sc.requestExecutors(10))
apps = getApplications()
assert(apps.head.executors.size === 10)
assert(apps.head.getExecutorLimit === 14)
// kill 2 executors; we should get 2 back immediately
assert(killNExecutors(sc, 2))
apps = getApplications()
assert(apps.head.executors.size === 10)
assert(apps.head.getExecutorLimit === 12)
// kill 4 executors; we should end up with 12 - 4 = 8 executors
assert(killNExecutors(sc, 4))
apps = getApplications()
assert(apps.head.executors.size === 8)
assert(apps.head.getExecutorLimit === 8)
// kill all executors; this time we'll have 8 - 8 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 10)
assert(apps.head.getExecutorLimit === 1000)
}
test("dynamic allocation with cores per executor AND max cores") {
sc = new SparkContext(appConf
.set("spark.executor.cores", "2")
.set("spark.cores.max", "8"))
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 4) // 8 cores total
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
// request 3 more
assert(sc.requestExecutors(3))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 4)
// request 10 more; none will go through
assert(sc.requestExecutors(10))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 14)
// kill all executors; 4 executors will be launched immediately
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 10)
// ... and again
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 6)
// ... and again; now we end up with 6 - 4 = 2 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 2)
// ... and again; this time we have 2 - 2 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 1000)
}
test("kill the same executor twice (SPARK-9795)") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
sc.requestExecutors(2)
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 2)
}
// sync executors between the Master and the driver, needed because
// the driver refuses to kill executors it does not know about
syncExecutors(sc)
// kill the same executor twice
val executors = getExecutorIds(sc)
assert(executors.size === 2)
assert(sc.killExecutor(executors.head))
assert(!sc.killExecutor(executors.head))
val apps = getApplications()
assert(apps.head.executors.size === 1)
// The limit should not be lowered twice
assert(apps.head.getExecutorLimit === 1)
}
test("the pending replacement executors should not be lost (SPARK-10515)") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
sc.requestExecutors(2)
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 2)
}
// sync executors between the Master and the driver, needed because
// the driver refuses to kill executors it does not know about
syncExecutors(sc)
val executors = getExecutorIds(sc)
val executorIdsBefore = executors.toSet
assert(executors.size === 2)
// kill and replace an executor
assert(sc.killAndReplaceExecutor(executors.head))
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.head.executors.size === 2)
val executorIdsAfter = getExecutorIds(sc).toSet
// make sure the executor was killed and replaced
assert(executorIdsBefore != executorIdsAfter)
}
// kill old executor (which is killedAndReplaced) should fail
assert(!sc.killExecutor(executors.head))
// refresh executors list
val newExecutors = getExecutorIds(sc)
syncExecutors(sc)
// kill newly created executor and do not replace it
assert(sc.killExecutor(newExecutors(1)))
val apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
}
test("disable force kill for busy executors (SPARK-9552)") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
var apps = getApplications()
// sync executors between the Master and the driver, needed because
// the driver refuses to kill executors it does not know about
syncExecutors(sc)
val executors = getExecutorIds(sc)
assert(executors.size === 2)
// simulate running a task on the executor
val getMap =
PrivateMethod[mutable.HashMap[String, mutable.HashSet[Long]]]('executorIdToRunningTaskIds)
val taskScheduler = sc.taskScheduler.asInstanceOf[TaskSchedulerImpl]
val executorIdToRunningTaskIds = taskScheduler invokePrivate getMap()
executorIdToRunningTaskIds(executors.head) = mutable.HashSet(1L)
// kill the busy executor without force; this should fail
assert(killExecutor(sc, executors.head, force = false).isEmpty)
apps = getApplications()
assert(apps.head.executors.size === 2)
// force kill busy executor
assert(killExecutor(sc, executors.head, force = true).nonEmpty)
apps = getApplications()
// kill executor successfully
assert(apps.head.executors.size === 1)
}
test("initial executor limit") {
val initialExecutorLimit = 1
val myConf = appConf
.set("spark.dynamicAllocation.enabled", "true")
.set("spark.shuffle.service.enabled", "true")
.set("spark.dynamicAllocation.initialExecutors", initialExecutorLimit.toString)
sc = new SparkContext(myConf)
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === initialExecutorLimit)
assert(apps.head.getExecutorLimit === initialExecutorLimit)
}
}
// ===============================
// | Utility methods for testing |
// ===============================
/** Return a SparkConf for applications that want to talk to our Master. */
private def appConf: SparkConf = {
new SparkConf()
.setMaster(masterRpcEnv.address.toSparkURL)
.setAppName("test")
.set("spark.executor.memory", "256m")
}
/** Make a master to which our application will send executor requests. */
private def makeMaster(): Master = {
val master = new Master(masterRpcEnv, masterRpcEnv.address, 0, securityManager, conf)
masterRpcEnv.setupEndpoint(Master.ENDPOINT_NAME, master)
master
}
/** Make a few workers that talk to our master. */
private def makeWorkers(cores: Int, memory: Int): Seq[Worker] = {
(0 until numWorkers).map { i =>
val rpcEnv = workerRpcEnvs(i)
val worker = new Worker(rpcEnv, 0, cores, memory, Array(masterRpcEnv.address),
Worker.ENDPOINT_NAME, null, conf, securityManager)
rpcEnv.setupEndpoint(Worker.ENDPOINT_NAME, worker)
worker
}
}
/** Get the Master state */
private def getMasterState: MasterStateResponse = {
master.self.askWithRetry[MasterStateResponse](RequestMasterState)
}
/** Get the applications that are active from Master */
private def getApplications(): Seq[ApplicationInfo] = {
getMasterState.activeApps
}
/** Kill all executors belonging to this application. */
private def killAllExecutors(sc: SparkContext): Boolean = {
killNExecutors(sc, Int.MaxValue)
}
/** Kill N executors belonging to this application. */
private def killNExecutors(sc: SparkContext, n: Int): Boolean = {
syncExecutors(sc)
sc.killExecutors(getExecutorIds(sc).take(n))
}
/** Kill the given executor, specifying whether to force kill it. */
private def killExecutor(sc: SparkContext, executorId: String, force: Boolean): Seq[String] = {
syncExecutors(sc)
sc.schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.killExecutors(Seq(executorId), replace = false, force)
case _ => fail("expected coarse grained scheduler")
}
}
/**
* Return a list of executor IDs belonging to this application.
*
* Note that we must use the executor IDs according to the Master, which has the most
* updated view. We cannot rely on the executor IDs according to the driver because we
* don't wait for executors to register. Otherwise the tests will take much longer to run.
*/
private def getExecutorIds(sc: SparkContext): Seq[String] = {
val app = getApplications().find(_.id == sc.applicationId)
assert(app.isDefined)
// Although executors is transient, master is in the same process so the message won't be
// serialized and it's safe here.
app.get.executors.keys.map(_.toString).toSeq
}
/**
* Sync executor IDs between the driver and the Master.
*
* This allows us to avoid waiting for new executors to register with the driver before
* we submit a request to kill them. This must be called before each kill request.
*/
private def syncExecutors(sc: SparkContext): Unit = {
val driverExecutors = sc.getExecutorStorageStatus
.map(_.blockManagerId.executorId)
.filter { _ != SparkContext.DRIVER_IDENTIFIER}
val masterExecutors = getExecutorIds(sc)
val missingExecutors = masterExecutors.toSet.diff(driverExecutors.toSet).toSeq.sorted
missingExecutors.foreach { id =>
// Fake an executor registration so the driver knows about us
val endpointRef = mock(classOf[RpcEndpointRef])
val mockAddress = mock(classOf[RpcAddress])
when(endpointRef.address).thenReturn(mockAddress)
val message = RegisterExecutor(id, endpointRef, "localhost", 10, Map.empty)
val backend = sc.schedulerBackend.asInstanceOf[CoarseGrainedSchedulerBackend]
backend.driverEndpoint.askWithRetry[Boolean](message)
}
}
}
| spark0001/spark2.1.1 | core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala | Scala | apache-2.0 | 22,612 |
package pl.abankowski.musicbrainz.client.service
import scala.concurrent.Future
import pl.abankowski.musicbrainz.client.dto.LabelId
import pl.abankowski.musicbrainz.client.dto.LabelInfo
import pl.abankowski.musicbrainz.client.dto.ResourceResult
import pl.abankowski.musicbrainz.client.dto.ResourceResult
import pl.abankowski.musicbrainz.client.query._
trait LabelService {
def get(id: LabelId): Future[Option[LabelInfo]]
def search(query: Query): Future[ResourceResult[LabelInfo]]
} | abankowski/musicbrainz-scala-client | src/main/scala/pl/abankowski/musicbrainz/client/service/LabelService.scala | Scala | mit | 487 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1
import java.lang.{Long => JLong}
import java.util.Date
import scala.xml.{NodeSeq, Text}
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import com.fasterxml.jackson.core.{JsonGenerator, JsonParser}
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.databind.{DeserializationContext, JsonDeserializer, JsonSerializer, SerializerProvider}
import com.fasterxml.jackson.databind.annotation.{JsonDeserialize, JsonSerialize}
import org.apache.spark.JobExecutionStatus
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.metrics.ExecutorMetricType
case class ApplicationInfo private[spark](
id: String,
name: String,
coresGranted: Option[Int],
maxCores: Option[Int],
coresPerExecutor: Option[Int],
memoryPerExecutorMB: Option[Int],
attempts: Seq[ApplicationAttemptInfo])
@JsonIgnoreProperties(
value = Array("startTimeEpoch", "endTimeEpoch", "lastUpdatedEpoch"),
allowGetters = true)
case class ApplicationAttemptInfo private[spark](
attemptId: Option[String],
startTime: Date,
endTime: Date,
lastUpdated: Date,
duration: Long,
sparkUser: String,
completed: Boolean = false,
appSparkVersion: String) {
def getStartTimeEpoch: Long = startTime.getTime
def getEndTimeEpoch: Long = endTime.getTime
def getLastUpdatedEpoch: Long = lastUpdated.getTime
}
class ExecutorStageSummary private[spark](
val taskTime : Long,
val failedTasks : Int,
val succeededTasks : Int,
val killedTasks : Int,
val inputBytes : Long,
val inputRecords : Long,
val outputBytes : Long,
val outputRecords : Long,
val shuffleRead : Long,
val shuffleReadRecords : Long,
val shuffleWrite : Long,
val shuffleWriteRecords : Long,
val memoryBytesSpilled : Long,
val diskBytesSpilled : Long,
val isBlacklistedForStage: Boolean)
class ExecutorSummary private[spark](
val id: String,
val hostPort: String,
val isActive: Boolean,
val rddBlocks: Int,
val memoryUsed: Long,
val diskUsed: Long,
val totalCores: Int,
val maxTasks: Int,
val activeTasks: Int,
val failedTasks: Int,
val completedTasks: Int,
val totalTasks: Int,
val totalDuration: Long,
val totalGCTime: Long,
val totalInputBytes: Long,
val totalShuffleRead: Long,
val totalShuffleWrite: Long,
val isBlacklisted: Boolean,
val maxMemory: Long,
val addTime: Date,
val removeTime: Option[Date],
val removeReason: Option[String],
val executorLogs: Map[String, String],
val memoryMetrics: Option[MemoryMetrics],
val blacklistedInStages: Set[Int],
@JsonSerialize(using = classOf[ExecutorMetricsJsonSerializer])
@JsonDeserialize(using = classOf[ExecutorMetricsJsonDeserializer])
val peakMemoryMetrics: Option[ExecutorMetrics])
class MemoryMetrics private[spark](
val usedOnHeapStorageMemory: Long,
val usedOffHeapStorageMemory: Long,
val totalOnHeapStorageMemory: Long,
val totalOffHeapStorageMemory: Long)
/** deserializer for peakMemoryMetrics: convert map to ExecutorMetrics */
private[spark] class ExecutorMetricsJsonDeserializer
extends JsonDeserializer[Option[ExecutorMetrics]] {
override def deserialize(
jsonParser: JsonParser,
deserializationContext: DeserializationContext): Option[ExecutorMetrics] = {
val metricsMap = jsonParser.readValueAs[Option[Map[String, Long]]](
new TypeReference[Option[Map[String, java.lang.Long]]] {})
metricsMap.map(metrics => new ExecutorMetrics(metrics))
}
}
/** serializer for peakMemoryMetrics: convert ExecutorMetrics to map with metric name as key */
private[spark] class ExecutorMetricsJsonSerializer
extends JsonSerializer[Option[ExecutorMetrics]] {
override def serialize(
metrics: Option[ExecutorMetrics],
jsonGenerator: JsonGenerator,
serializerProvider: SerializerProvider): Unit = {
metrics.foreach { m: ExecutorMetrics =>
val metricsMap = ExecutorMetricType.metricToOffset.map { case (metric, _) =>
metric -> m.getMetricValue(metric)
}
jsonGenerator.writeObject(metricsMap)
}
}
override def isEmpty(provider: SerializerProvider, value: Option[ExecutorMetrics]): Boolean =
value.isEmpty
}
class JobData private[spark](
val jobId: Int,
val name: String,
val description: Option[String],
val submissionTime: Option[Date],
val completionTime: Option[Date],
val stageIds: Seq[Int],
val jobGroup: Option[String],
val status: JobExecutionStatus,
val numTasks: Int,
val numActiveTasks: Int,
val numCompletedTasks: Int,
val numSkippedTasks: Int,
val numFailedTasks: Int,
val numKilledTasks: Int,
val numCompletedIndices: Int,
val numActiveStages: Int,
val numCompletedStages: Int,
val numSkippedStages: Int,
val numFailedStages: Int,
val killedTasksSummary: Map[String, Int])
class RDDStorageInfo private[spark](
val id: Int,
val name: String,
val numPartitions: Int,
val numCachedPartitions: Int,
val storageLevel: String,
val memoryUsed: Long,
val diskUsed: Long,
val dataDistribution: Option[Seq[RDDDataDistribution]],
val partitions: Option[Seq[RDDPartitionInfo]])
class RDDDataDistribution private[spark](
val address: String,
val memoryUsed: Long,
val memoryRemaining: Long,
val diskUsed: Long,
@JsonDeserialize(contentAs = classOf[JLong])
val onHeapMemoryUsed: Option[Long],
@JsonDeserialize(contentAs = classOf[JLong])
val offHeapMemoryUsed: Option[Long],
@JsonDeserialize(contentAs = classOf[JLong])
val onHeapMemoryRemaining: Option[Long],
@JsonDeserialize(contentAs = classOf[JLong])
val offHeapMemoryRemaining: Option[Long])
class RDDPartitionInfo private[spark](
val blockName: String,
val storageLevel: String,
val memoryUsed: Long,
val diskUsed: Long,
val executors: Seq[String])
class StageData private[spark](
val status: StageStatus,
val stageId: Int,
val attemptId: Int,
val numTasks: Int,
val numActiveTasks: Int,
val numCompleteTasks: Int,
val numFailedTasks: Int,
val numKilledTasks: Int,
val numCompletedIndices: Int,
val executorRunTime: Long,
val executorCpuTime: Long,
val submissionTime: Option[Date],
val firstTaskLaunchedTime: Option[Date],
val completionTime: Option[Date],
val failureReason: Option[String],
val inputBytes: Long,
val inputRecords: Long,
val outputBytes: Long,
val outputRecords: Long,
val shuffleReadBytes: Long,
val shuffleReadRecords: Long,
val shuffleWriteBytes: Long,
val shuffleWriteRecords: Long,
val memoryBytesSpilled: Long,
val diskBytesSpilled: Long,
val name: String,
val description: Option[String],
val details: String,
val schedulingPool: String,
val rddIds: Seq[Int],
val accumulatorUpdates: Seq[AccumulableInfo],
val tasks: Option[Map[Long, TaskData]],
val executorSummary: Option[Map[String, ExecutorStageSummary]],
val killedTasksSummary: Map[String, Int])
class TaskData private[spark](
val taskId: Long,
val index: Int,
val attempt: Int,
val launchTime: Date,
val resultFetchStart: Option[Date],
@JsonDeserialize(contentAs = classOf[JLong])
val duration: Option[Long],
val executorId: String,
val host: String,
val status: String,
val taskLocality: String,
val speculative: Boolean,
val accumulatorUpdates: Seq[AccumulableInfo],
val errorMessage: Option[String] = None,
val taskMetrics: Option[TaskMetrics] = None,
val executorLogs: Map[String, String],
val schedulerDelay: Long,
val gettingResultTime: Long)
class TaskMetrics private[spark](
val executorDeserializeTime: Long,
val executorDeserializeCpuTime: Long,
val executorRunTime: Long,
val executorCpuTime: Long,
val resultSize: Long,
val jvmGcTime: Long,
val resultSerializationTime: Long,
val memoryBytesSpilled: Long,
val diskBytesSpilled: Long,
val peakExecutionMemory: Long,
val inputMetrics: InputMetrics,
val outputMetrics: OutputMetrics,
val shuffleReadMetrics: ShuffleReadMetrics,
val shuffleWriteMetrics: ShuffleWriteMetrics)
class InputMetrics private[spark](
val bytesRead: Long,
val recordsRead: Long)
class OutputMetrics private[spark](
val bytesWritten: Long,
val recordsWritten: Long)
class ShuffleReadMetrics private[spark](
val remoteBlocksFetched: Long,
val localBlocksFetched: Long,
val fetchWaitTime: Long,
val remoteBytesRead: Long,
val remoteBytesReadToDisk: Long,
val localBytesRead: Long,
val recordsRead: Long)
class ShuffleWriteMetrics private[spark](
val bytesWritten: Long,
val writeTime: Long,
val recordsWritten: Long)
class TaskMetricDistributions private[spark](
val quantiles: IndexedSeq[Double],
val executorDeserializeTime: IndexedSeq[Double],
val executorDeserializeCpuTime: IndexedSeq[Double],
val executorRunTime: IndexedSeq[Double],
val executorCpuTime: IndexedSeq[Double],
val resultSize: IndexedSeq[Double],
val jvmGcTime: IndexedSeq[Double],
val resultSerializationTime: IndexedSeq[Double],
val gettingResultTime: IndexedSeq[Double],
val schedulerDelay: IndexedSeq[Double],
val peakExecutionMemory: IndexedSeq[Double],
val memoryBytesSpilled: IndexedSeq[Double],
val diskBytesSpilled: IndexedSeq[Double],
val inputMetrics: InputMetricDistributions,
val outputMetrics: OutputMetricDistributions,
val shuffleReadMetrics: ShuffleReadMetricDistributions,
val shuffleWriteMetrics: ShuffleWriteMetricDistributions)
class InputMetricDistributions private[spark](
val bytesRead: IndexedSeq[Double],
val recordsRead: IndexedSeq[Double])
class OutputMetricDistributions private[spark](
val bytesWritten: IndexedSeq[Double],
val recordsWritten: IndexedSeq[Double])
class ShuffleReadMetricDistributions private[spark](
val readBytes: IndexedSeq[Double],
val readRecords: IndexedSeq[Double],
val remoteBlocksFetched: IndexedSeq[Double],
val localBlocksFetched: IndexedSeq[Double],
val fetchWaitTime: IndexedSeq[Double],
val remoteBytesRead: IndexedSeq[Double],
val remoteBytesReadToDisk: IndexedSeq[Double],
val totalBlocksFetched: IndexedSeq[Double])
class ShuffleWriteMetricDistributions private[spark](
val writeBytes: IndexedSeq[Double],
val writeRecords: IndexedSeq[Double],
val writeTime: IndexedSeq[Double])
class AccumulableInfo private[spark](
val id: Long,
val name: String,
val update: Option[String],
val value: String)
class VersionInfo private[spark](
val spark: String)
class ApplicationEnvironmentInfo private[spark] (
val runtime: RuntimeInfo,
val sparkProperties: Seq[(String, String)],
val hadoopProperties: Seq[(String, String)],
val systemProperties: Seq[(String, String)],
val classpathEntries: Seq[(String, String)])
class RuntimeInfo private[spark](
val javaVersion: String,
val javaHome: String,
val scalaVersion: String)
case class StackTrace(elems: Seq[String]) {
override def toString: String = elems.mkString
def html: NodeSeq = {
val withNewLine = elems.foldLeft(NodeSeq.Empty) { (acc, elem) =>
if (acc.isEmpty) {
acc :+ Text(elem)
} else {
acc :+ <br /> :+ Text(elem)
}
}
withNewLine
}
def mkString(start: String, sep: String, end: String): String = {
elems.mkString(start, sep, end)
}
}
case class ThreadStackTrace(
val threadId: Long,
val threadName: String,
val threadState: Thread.State,
val stackTrace: StackTrace,
val blockedByThreadId: Option[Long],
val blockedByLock: String,
val holdingLocks: Seq[String])
| hhbyyh/spark | core/src/main/scala/org/apache/spark/status/api/v1/api.scala | Scala | apache-2.0 | 12,731 |
package mesosphere.marathon.upgrade
import akka.actor.{ ActorSystem, Props }
import akka.testkit.{ TestActorRef, TestKit }
import com.codahale.metrics.MetricRegistry
import mesosphere.marathon.{ MarathonConf, SchedulerActions, TaskUpgradeCanceledException }
import mesosphere.marathon.event.{ HealthStatusChanged, MesosStatusUpdateEvent }
import mesosphere.marathon.state.AppDefinition
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.tasks.{ TaskTracker, TaskQueue }
import org.apache.mesos.SchedulerDriver
import org.apache.mesos.state.InMemoryState
import org.mockito.Mockito.{ spy, times, verify }
import org.scalatest.mock.MockitoSugar
import org.scalatest.{ BeforeAndAfterAll, FunSuiteLike, Matchers }
import scala.concurrent.duration._
import scala.concurrent.{ Await, Promise }
class TaskStartActorTest
extends TestKit(ActorSystem("System"))
with FunSuiteLike
with Matchers
with MockitoSugar
with BeforeAndAfterAll {
override protected def afterAll(): Unit = {
super.afterAll()
system.shutdown()
}
test("Start success") {
val driver = mock[SchedulerDriver]
val scheduler = mock[SchedulerActions]
val taskQueue = new TaskQueue
val registry = new MetricRegistry
val taskTracker = new TaskTracker(new InMemoryState, mock[MarathonConf], registry)
val promise = Promise[Unit]()
val app = AppDefinition("myApp".toPath, instances = 5)
val ref = TestActorRef(Props(
classOf[TaskStartActor],
driver,
scheduler,
taskQueue,
taskTracker,
system.eventStream,
app,
app.instances,
false,
promise))
watch(ref)
awaitCond(taskQueue.count(app) == 5, 3.seconds)
for ((task, i) <- taskQueue.removeAll().zipWithIndex)
system.eventStream.publish(MesosStatusUpdateEvent("", s"task-$i", "TASK_RUNNING", "", app.id, "", Nil, app.version.toString))
Await.result(promise.future, 3.seconds) should be(())
expectTerminated(ref)
}
test("Start success with no instances to start") {
val driver = mock[SchedulerDriver]
val scheduler = mock[SchedulerActions]
val taskQueue = new TaskQueue
val registry = new MetricRegistry
val taskTracker = new TaskTracker(new InMemoryState, mock[MarathonConf], registry)
val promise = Promise[Boolean]()
val app = AppDefinition("myApp".toPath, instances = 0)
val ref = TestActorRef(Props(
classOf[TaskStartActor],
driver,
scheduler,
taskQueue,
taskTracker,
system.eventStream,
app,
app.instances,
false,
promise))
watch(ref)
Await.result(promise.future, 3.seconds) should be(())
expectTerminated(ref)
}
test("Start with health checks") {
val driver = mock[SchedulerDriver]
val scheduler = mock[SchedulerActions]
val taskQueue = new TaskQueue
val registry = new MetricRegistry
val taskTracker = new TaskTracker(new InMemoryState, mock[MarathonConf], registry)
val promise = Promise[Boolean]()
val app = AppDefinition("myApp".toPath, instances = 5)
val ref = TestActorRef(Props(
classOf[TaskStartActor],
driver,
scheduler,
taskQueue,
taskTracker,
system.eventStream,
app,
app.instances,
true,
promise))
watch(ref)
awaitCond(taskQueue.count(app) == 5, 3.seconds)
for ((_, i) <- taskQueue.removeAll().zipWithIndex)
system.eventStream.publish(HealthStatusChanged(app.id, s"task_${i}", app.version.toString, true))
Await.result(promise.future, 3.seconds) should be(())
expectTerminated(ref)
}
test("Start with health checks with no instances to start") {
val driver = mock[SchedulerDriver]
val scheduler = mock[SchedulerActions]
val taskQueue = new TaskQueue
val registry = new MetricRegistry
val taskTracker = new TaskTracker(new InMemoryState, mock[MarathonConf], registry)
val promise = Promise[Boolean]()
val app = AppDefinition("myApp".toPath, instances = 0)
val ref = TestActorRef(Props(
classOf[TaskStartActor],
driver,
scheduler,
taskQueue,
taskTracker,
system.eventStream,
app,
app.instances,
true,
promise))
watch(ref)
Await.result(promise.future, 3.seconds) should be(())
expectTerminated(ref)
}
test("Cancelled") {
val driver = mock[SchedulerDriver]
val scheduler = mock[SchedulerActions]
val taskQueue = new TaskQueue
val registry = new MetricRegistry
val taskTracker = new TaskTracker(new InMemoryState, mock[MarathonConf], registry)
val promise = Promise[Boolean]()
val app = AppDefinition("myApp".toPath, instances = 5)
val ref = system.actorOf(Props(
classOf[TaskStartActor],
driver,
scheduler,
taskQueue,
taskTracker,
system.eventStream,
app,
app.instances,
false,
promise))
watch(ref)
system.stop(ref)
intercept[TaskUpgradeCanceledException] {
Await.result(promise.future, 5.seconds)
}.getMessage should equal("The task upgrade has been cancelled")
expectTerminated(ref)
}
test("Task fails to start") {
val driver = mock[SchedulerDriver]
val scheduler = mock[SchedulerActions]
val taskQueue = spy(new TaskQueue)
val registry = new MetricRegistry
val taskTracker = new TaskTracker(new InMemoryState, mock[MarathonConf], registry)
val promise = Promise[Unit]()
val app = AppDefinition("myApp".toPath, instances = 1)
val ref = TestActorRef(Props(
classOf[TaskStartActor],
driver,
scheduler,
taskQueue,
taskTracker,
system.eventStream,
app,
app.instances,
false,
promise))
watch(ref)
awaitCond(taskQueue.count(app) == 1, 3.seconds)
for (task <- taskQueue.removeAll())
system.eventStream.publish(MesosStatusUpdateEvent("", "", "TASK_FAILED", "", app.id, "", Nil, app.version.toString))
awaitCond(taskQueue.count(app) == 1, 3.seconds)
verify(taskQueue, times(2)).add(app)
for (task <- taskQueue.removeAll())
system.eventStream.publish(MesosStatusUpdateEvent("", "", "TASK_RUNNING", "", app.id, "", Nil, app.version.toString))
Await.result(promise.future, 3.seconds) should be(())
expectTerminated(ref)
}
}
| sttts/marathon | src/test/scala/mesosphere/marathon/upgrade/TaskStartActorTest.scala | Scala | apache-2.0 | 6,340 |
package actors.persistent.staffing
import actors.persistent.Sizes.oneMegaByte
import actors.acking.AckingReceiver.StreamCompleted
import actors.persistent.{PersistentDrtActor, RecoveryActorLike}
import akka.actor.Scheduler
import akka.persistence._
import akka.stream.scaladsl.SourceQueueWithComplete
import drt.shared.CrunchApi.MillisSinceEpoch
import drt.shared.Terminals.Terminal
import drt.shared.{FixedPointAssignments, MilliDate, SDateLike, StaffAssignment}
import org.slf4j.{Logger, LoggerFactory}
import scalapb.GeneratedMessage
import server.protobuf.messages.FixedPointMessage.{FixedPointMessage, FixedPointsMessage, FixedPointsStateSnapshotMessage}
import services.OfferHandler
import scala.concurrent.ExecutionContext.Implicits.global
case class SetFixedPoints(newFixedPoints: Seq[StaffAssignment])
case class SetFixedPointsAck(newFixedPoints: Seq[StaffAssignment])
class FixedPointsActor(val now: () => SDateLike) extends FixedPointsActorBase(now) {
var subscribers: List[SourceQueueWithComplete[FixedPointAssignments]] = List()
implicit val scheduler: Scheduler = this.context.system.scheduler
override def onUpdateState(fixedPoints: FixedPointAssignments): Unit = {
log.info(s"Telling subscribers ($subscribers) about updated fixed points: $fixedPoints")
subscribers.foreach(s => OfferHandler.offerWithRetries(s, fixedPoints, 5))
}
val subsReceive: Receive = {
case AddFixedPointSubscribers(newSubscribers) =>
subscribers = newSubscribers.foldLeft(subscribers) {
case (soFar, newSub) =>
log.info(s"Adding fixed points subscriber $newSub")
newSub :: soFar
}
}
override def receiveCommand: Receive = {
subsReceive orElse super.receiveCommand
}
}
abstract class FixedPointsActorBase(now: () => SDateLike) extends RecoveryActorLike with PersistentDrtActor[FixedPointAssignments] {
val log: Logger = LoggerFactory.getLogger(getClass)
override def persistenceId = "fixedPoints-store"
override val snapshotBytesThreshold: Int = oneMegaByte
override val maybeSnapshotInterval: Option[Int] = Option(250)
override val recoveryStartMillis: MillisSinceEpoch = now().millisSinceEpoch
var state: FixedPointAssignments = initialState
def initialState: FixedPointAssignments = FixedPointAssignments.empty
import FixedPointsMessageParser._
override def stateToMessage: GeneratedMessage = FixedPointsStateSnapshotMessage(fixedPointsToFixedPointsMessages(state, now()))
def updateState(fixedPoints: FixedPointAssignments): Unit = state = fixedPoints
def onUpdateState(data: FixedPointAssignments): Unit
def processSnapshotMessage: PartialFunction[Any, Unit] = {
case snapshot: FixedPointsStateSnapshotMessage =>
log.info(s"Processing a snapshot message")
state = fixedPointMessagesToFixedPoints(snapshot.fixedPoints)
}
def processRecoveryMessage: PartialFunction[Any, Unit] = {
case fixedPointsMessage: FixedPointsMessage =>
val fp = fixedPointMessagesToFixedPoints(fixedPointsMessage.fixedPoints)
updateState(fp)
}
def receiveCommand: Receive = {
case GetState =>
log.debug(s"GetState received")
sender() ! state
case SetFixedPoints(fixedPointStaffAssignments) =>
if (fixedPointStaffAssignments != state) {
log.info(s"Replacing fixed points state with $fixedPointStaffAssignments")
updateState(FixedPointAssignments(fixedPointStaffAssignments))
onUpdateState(FixedPointAssignments(fixedPointStaffAssignments))
val createdAt = now()
val fixedPointsMessage = FixedPointsMessage(fixedPointsToFixedPointsMessages(state, createdAt), Option(createdAt.millisSinceEpoch))
persistAndMaybeSnapshotWithAck(fixedPointsMessage, Option(sender(), SetFixedPointsAck(fixedPointStaffAssignments)))
} else {
log.info(s"No change. Nothing to persist")
sender() ! SetFixedPointsAck(fixedPointStaffAssignments)
}
case SaveSnapshotSuccess(md) =>
log.info(s"Save snapshot success: $md")
ackIfRequired()
case SaveSnapshotFailure(md, cause) =>
log.error(s"Save snapshot failure: $md", cause)
case SaveSnapshot =>
log.info(s"Received request to snapshot")
takeSnapshot(stateToMessage)
case StreamCompleted => log.warn("Received shutdown")
case unexpected => log.info(s"unhandled message: $unexpected")
}
}
object FixedPointsMessageParser {
val log: Logger = LoggerFactory.getLogger(getClass)
def staffAssignmentToMessage(assignment: StaffAssignment, createdAt: SDateLike): FixedPointMessage = FixedPointMessage(
name = Option(assignment.name),
terminalName = Option(assignment.terminal.toString),
numberOfStaff = Option(assignment.numberOfStaff.toString),
startTimestamp = Option(assignment.startDt.millisSinceEpoch),
endTimestamp = Option(assignment.endDt.millisSinceEpoch),
createdAt = Option(createdAt.millisSinceEpoch)
)
def fixedPointMessageToStaffAssignment(fixedPointMessage: FixedPointMessage) = StaffAssignment(
name = fixedPointMessage.name.getOrElse(""),
terminal = Terminal(fixedPointMessage.terminalName.getOrElse("")),
startDt = MilliDate(fixedPointMessage.startTimestamp.getOrElse(0L)),
endDt = MilliDate(fixedPointMessage.endTimestamp.getOrElse(0L)),
numberOfStaff = fixedPointMessage.numberOfStaff.getOrElse("0").toInt,
createdBy = None
)
def fixedPointsToFixedPointsMessages(fixedPointStaffAssignments: FixedPointAssignments, createdAt: SDateLike): Seq[FixedPointMessage] =
fixedPointStaffAssignments.assignments.map(a => staffAssignmentToMessage(a, createdAt))
def fixedPointMessagesToFixedPoints(fixedPointMessages: Seq[FixedPointMessage]): FixedPointAssignments =
FixedPointAssignments(fixedPointMessages.map(fixedPointMessageToStaffAssignment))
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/main/scala/actors/persistent/staffing/FixedPointsActorBase.scala | Scala | apache-2.0 | 5,834 |
/*
* This file is part of Kiama.
*
* Copyright (C) 2011-2015 Anthony M Sloane, Macquarie University.
*
* Kiama is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* Kiama is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package org.kiama
package util
import org.kiama.output.PrettyPrinter
import org.scalatest.prop.Checkers
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuiteLike}
import scala.util.parsing.combinator.RegexParsers
/**
* General test support.
*/
trait Tests extends FunSuiteLike with BeforeAndAfter with BeforeAndAfterAll with Checkers {
import Comparison.{optsame, same, samecollection}
import Messaging.Messages
import org.scalatest.Tag
/**
* ScalaTest by default only shows the unqualified class name when
* it displays the name of the suite. If a suite class name is
* used in more than one package we can't tell them apart. Here
* we override the name that is printed so that we get a project
* relative source file name as well.
*
* This definition assumes that the test suite resides in the
* library project, that the name of the suite class is the same
* as the basename of the file and that the file is located in
* the folder given by the package name.
*/
override def suiteName = {
val filename = getClass.getName.replaceAllLiterally (".", "/")
s"${super.suiteName} in src/$filename"
}
/**
* Fail a test with a message about finding something and expecting
* something else.
*/
def failExpectedTest[T] (expected : T, found : T, description : String = "") {
fail ("expected %s'%s', not '%s'".format (description, expected, found))
}
/**
* Analogous to ScalaTest's `assertResult` but it uses `same` to compare
* the two values instead of equality.
*/
def assertSame (expected : Any) (actual : Any) {
if (!same (expected, actual)) {
failExpectedTest (expected, actual, "same object as ")
}
}
/**
* Analogous to ScalaTest's `assertResult` but it uses `same` to compare
* the two values instead of equality.
*/
def assertNotSame (expected : Any) (actual : Any) {
if (same (expected, actual)) {
failExpectedTest (expected, actual, "not same object as ")
}
}
/**
* Analogous to ScalaTest's `assertResult` but it uses `optsame` to compare
* the two values instead of equality.
*/
def assertOptSame (expected : Any) (actual : Any) {
if (!optsame (expected, actual)) {
failExpectedTest (expected, actual, "same object as ")
}
}
/**
* Analogous to ScalaTest's `assertResult` but it uses `optsame` to compare
* the two values instead of equality.
*/
def assertNotOptSame (expected : Any) (actual : Any) {
if (optsame (expected, actual)) {
failExpectedTest (expected, actual, "not same object as ")
}
}
/**
* Analogous to ScalaTest's `assertResult` but it uses `samecollection` to compare
* two collections instead of equality.
*/
def assertSameCollection (expected : Any) (actual : Any) {
if (!samecollection (expected, actual)) {
failExpectedTest (expected, actual, "same collection as ")
}
}
/**
* Analogous to ScalaTest's `assertResult` but it uses `samecollection` to compare
* two collections instead of equality.
*/
def assertNotSameCollection (expected : Any) (actual : Any) {
if (samecollection (expected, actual)) {
failExpectedTest (expected, actual, "not same collection as ")
}
}
/**
* Assert that the `received` list of messsages has recorded the `expected`
* messages in the same order.
*/
def assertMessages (received : Messages, expected : Message*) {
assert (received.size === expected.size, "wrong number of messages produced")
received.zip (expected).zipWithIndex.map {
case ((rec, exp), i) =>
assertMessage (rec, i, exp)
}
}
/**
* Assert that a `received` message at the given zero-based `index` conforms
* to an expected one in that it reports the same message label at the same
* position.
*/
def assertMessage (received : Message, index : Int, expected : Message) {
assertResult (expected.label, s"wrong text in message $index") (received.label)
assertResult (expected.line, s"wrong line number in message $index") (received.line)
assertResult (expected.column, s"wrong column number in message $index") (received.column)
}
/**
* A ScalaTest tag that enables us to focus attention on particular tests
* rather than running all of them each time. Add this as an argument to
* the particular test methods that you want to focus on. Then you can
* use an sbt command such as "test-only *RewriterTests -- -n FocusTest"
* to run just the tests in that suite with this tag.
*/
object FocusTest extends Tag ("FocusTest")
}
/**
* Useful test routines for RegexParsers.
*/
trait RegexParserTests extends Tests {
self : RegexParsers =>
/**
* Fail a test with a message about reaching the end of the input.
*/
def failInputEnd (in : Input) {
fail (s"input remaining at ${in.pos}")
}
/**
* Fail a test with a message detailing a parse error.
*/
def failParseError (error : Error) {
fail (s"parse error: $error")
}
/**
* Fail a test with a message detailing a parse failure.
*/
def failParseFailure (failure : Failure) {
fail (s"parse faiure: $failure")
}
/**
* Parse a string and if the parse succeeds, pass the result of the parse
* to a function for further processing or checking. `str` is the string to
* be parsed and `parser` is the parser to parse it with. `func` accepts the
* result value of the parse and returns whatever it likes which is returned
* from `assertParseCheck`. Fail if the parse succeeds but doesn't consume
* all of `str` or if the parse fails.
*/
def assertParseCheck[T,U] (str : String, parser : Parser[T]) (func : T => U) : U =
parseAll (parser, str) match {
case Success (value, in) if in.atEnd =>
func (value)
case Success (_, in) =>
fail (s"extraneous input at ${in.pos}: $str")
case f =>
fail (s"parse failure: $f")
}
/**
* Assert that a parsing operation should be performed correctly.
* Try to parse `str` as a `T` using `parser`, which is expected
* to succeed and to produce the `expected` value. Fail if `p` doesn't
* produce the expected value or if `parser` doesn't consume all of the
* input.
*/
def assertParseOk[T] (str : String, parser : Parser[T], expected : T) {
assertParseCheck (str, parser) {
result =>
if (expected != result)
failExpectedTest (expected, result)
}
}
/**
* Assert that a parsing operation should not result in success.
* Try to parse `str` as a `T` using `parser`, which is expected
* to not succeed, giving either a fatal error or failure (as specified
* by the `iserr` parameter, which defaults to failure). Fail the test
* if the parsing operation succeeds. Furthermore, fail the test if it
* fails, but the error or failure is not indicated at the given `line`
* and `column` location or doesn't contain the given message `msg`.
*/
def assertParseError[T] (str : String, parser : Parser[T], line : Int,
column : Int, msg : String, iserr : Boolean = false) {
parseAll (parser, str) match {
case Success (r, _) =>
fail ("expected to find parse error in %s but it succeeded with %s".format (str, r))
case e : NoSuccess =>
if (iserr && e.isInstanceOf[Failure])
fail ("got parse failure when expecting parse error")
else if (!iserr & e.isInstanceOf[Error])
fail ("got parse error when expecting parse failure")
assertResult (msg, "wrong message in error") (e.msg)
assertResult (line, "wrong line number in error") (e.next.pos.line)
assertResult (column, "wrong column number in error") (e.next.pos.column)
}
}
/**
* Parse a string and if the parse succeeds, return the result of the parse.
* `str` is the string to be parsed and `parser` is the parser to parse it
* with.
*/
def assertParseReturn[T] (str : String, parser : Parser[T]) : T =
assertParseCheck (str, parser) (identity)
}
/**
* Useful test routines for transformers.
*/
trait TransformerTests extends RegexParserTests {
self : RegexParsers =>
/**
* Assert that a transformation should be performed correctly. Try to parse
* `str` as a `T` using the parser `parser`, which is expected to succeed
* while consuming all of the input. Then pass the resulting `T` to the
* `trans` transformation function. Fail the test if the value produced by
* the transformation is not `expected`.
*/
def assertTransformOk[T] (str : String, parser : Parser[T], trans : T => T, expected : T) {
assertParseCheck (str, parser) {
result =>
val transformed = trans (result)
if (transformed != expected) failExpectedTest (expected, transformed)
}
}
}
/**
* Useful test routines for pretty-printers.
*/
trait PrettyPrinterTests extends Tests {
import org.kiama.output.PrettyPrinterTypes.{Document, Layout, Link, Links}
/**
* Assert that a doc when pretty-printed has the given layout.
*/
def assertLayout (expected : Layout) (document : Document) {
assertResult (expected) (document.layout)
}
/**
* Assert that a doc when pretty-printed has the given links.
*/
def assertLinks (expected : List[(AnyRef,Range)]) (document : Document) {
for ((v, r) <- expected) {
assertLink (r) (document.links, v)
}
}
/**
* Assert that a value has a given link in a links map.
*/
def assertLink (expected : Range) (links : Links, value : AnyRef) {
val optRange = links.collectFirst {
case Link (k, v) if k eq value =>
v
}
optRange match {
case Some (r) =>
assertResult (expected, s"for value $value") (r)
case None =>
fail (s"link for $value not found")
}
}
}
| joaoraf/kiama | library/src/org/kiama/util/Tests.scala | Scala | gpl-3.0 | 11,473 |
package streams
import common._
/**
* This component implements a parser to define terrains from a
* graphical ASCII representation.
*
* When mixing in that component, a level can be defined by
* defining the field `level` in the following form:
*
* val level =
* """------
* |--ST--
* |--oo--
* |--oo--
* |------""".stripMargin
*
* - The `-` character denotes parts which are outside the terrain
* - `o` denotes fields which are part of the terrain
* - `S` denotes the start position of the block (which is also considered
inside the terrain)
* - `T` denotes the final position of the block (which is also considered
inside the terrain)
*
* In this example, the first and last lines could be omitted, and
* also the columns that consist of `-` characters only.
*/
trait StringParserTerrain extends GameDef {
/**
* A ASCII representation of the terrain. This field should remain
* abstract here.
*/
val level: String
/**
* This method returns terrain function that represents the terrain
* in `levelVector`. The vector contains parsed version of the `level`
* string. For example, the following level
*
* val level =
* """ST
* |oo
* |oo""".stripMargin
*
* is represented as
*
* Vector(Vector('S', 'T'), Vector('o', 'o'), Vector('o', 'o'))
*
* The resulting function should return `true` if the position `pos` is
* a valid position (not a '-' character) inside the terrain described
* by `levelVector`.
*/
def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean =
pos => if (pos.x < 0 || pos.y < 0 ||
pos.x >= levelVector.length ||
pos.y >= levelVector(pos.x).length) {
false
} else {
levelVector(pos.x)(pos.y) != '-'
}
/**
* This function should return the position of character `c` in the
* terrain described by `levelVector`. You can assume that the `c`
* appears exactly once in the terrain.
*
* Hint: you can use the functions `indexWhere` and / or `indexOf` of the
* `Vector` class
*/
def findChar(c: Char, levelVector: Vector[Vector[Char]]): Pos = {
val row = levelVector.indexWhere(vec => vec.contains(c))
val col = levelVector(row).indexOf(c)
Pos(row,col)
}
private lazy val vector: Vector[Vector[Char]] =
Vector(level.split("\n").map(str => Vector(str: _*)): _*)
lazy val terrain: Terrain = terrainFunction(vector)
lazy val startPos: Pos = findChar('S', vector)
lazy val goal: Pos = findChar('T', vector)
}
| shouya/thinking-dumps | progfun2/week2-bloxorz/src/main/scala/streams/StringParserTerrain.scala | Scala | mit | 2,638 |
package com.tomogle.magnetpattern
import org.scalatest.{FlatSpec, Matchers}
class PreMagnetPatternTest extends FlatSpec with Matchers {
behavior of "PreMagnetPatternTest"
it should "concatenate 'xyz' to an input String" in new PreMagnetPattern {
val result = preMagnetFunction("initial")
result shouldBe "initialxyz"
}
it should "increment and convert an int to a long" in new PreMagnetPattern {
val result = preMagnetFunction(1)
result should equal(2L)
}
it should "add 100 to a long" in new PreMagnetPattern {
val result = preMagnetFunction(20L)
result shouldBe 120L
}
}
| tom-ogle/scala-scratch-code | src/test/scala/com/tomogle/magnetpattern/PreMagnetPatternTest.scala | Scala | mit | 618 |
package com.plasmaconduit.trytoobservable
import rx.lang.scala.Observable
import scala.util.{Failure, Success, Try}
object TryToObservable {
def apply[A](value: Try[A]): Observable[A] = value match {
case Success(success) => Observable.just(success)
case Failure(failure) => Observable.error(failure)
}
}
| plasmaconduit/try-to-observable | src/main/scala/com/plasmaconduit/trytoobservable/TryToObservable.scala | Scala | mit | 322 |
package scala.reflect.internal
import scala.tools.nsc.symtab.SymbolTableForUnitTesting
class SubstMapTest {
object symbolTable extends SymbolTableForUnitTesting
import symbolTable._
// compile-test for https://github.com/scala/community-build/pull/1413
new SubstMap[String](Nil, Nil) {
protected def toType(fromtp: Type, tp: String) = fromtp
}
}
| scala/scala | test/junit/scala/reflect/internal/SubstMapTest.scala | Scala | apache-2.0 | 363 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600a.v3
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtOptionalInteger}
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
import uk.gov.hmrc.ct.ct600.v3.calculations.LoansToParticipatorsCalculator
import uk.gov.hmrc.ct.ct600a.v3.retriever.CT600ABoxRetriever
case class A35(value: Option[Int]) extends CtBoxIdentifier(name = "A35 - Amount released or written off after the end of the period but earlier than nine months and one day after the end of the period") with CtOptionalInteger
object A35 extends LoansToParticipatorsCalculator {
def calculate(fieldValueRetriever: CT600ABoxRetriever, computationsBoxRetriever: ComputationsBoxRetriever): A35 = {
calculateA35(computationsBoxRetriever.cp2(), fieldValueRetriever.loansToParticipators())
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600a/v3/A35.scala | Scala | apache-2.0 | 1,418 |
package ch.uzh.ifi.pdeboer.pplib.process
import ch.uzh.ifi.pdeboer.pplib.process.entities.ProcessStub
import org.junit.{Assert, Test}
/**
* Created by pdeboer on 11/11/14.
*/
class ProcessStubTest {
@Test
def testEqualsAndHashCode(): Unit = {
Assert.assertEquals(new TestProcessStubA(), new TestProcessStubA())
Assert.assertFalse(new TestProcessStubA().equals(new TestProcessStubAB()))
Assert.assertFalse(new TestProcessStubA().equals(new TestProcessStubB()))
Assert.assertFalse(new TestProcessStubA(Map("str" -> "a")).equals(new TestProcessStubA(Map("str" -> "b"))))
}
private class A(val a: String = "")
private class TestProcessStubA(params: Map[String, Any] = Map.empty[String, Any]) extends ProcessStub[String, A](params) {
override protected def run(data: String): A = {
new A(data)
}
}
private class TestProcessStubAB(params: Map[String, Any] = Map.empty[String, Any]) extends TestProcessStubA(params) {
override protected def run(data: String): A = {
new A(data)
}
}
private class TestProcessStubB(params: Map[String, Any] = Map.empty[String, Any]) extends ProcessStub[String, A](params) {
override protected def run(data: String): A = {
new A(data + "B")
}
}
}
| uzh/PPLib | src/test/scala/ch/uzh/ifi/pdeboer/pplib/process/ProcessStubTest.scala | Scala | mit | 1,218 |
package com.twitter.finagle.netty4.http
import com.twitter.finagle.http.{Request, Response}
import com.twitter.finagle.netty4.http.handler.UnpoolHttpHandler
import io.netty.buffer.{ByteBuf, Unpooled}
import io.netty.handler.codec.http._
import java.net.InetSocketAddress
import io.netty.channel.embedded.EmbeddedChannel
import java.nio.charset.{StandardCharsets => Charsets}
/**
* Utilities for encoding/decoding [[Request]]s and [[Response]]s to/from Strings
* and byte arrays using Netty4 as the underlying implementation.
*/
private[finagle] object Netty4HttpCodec {
/** Encode a [[Request]] to a String. */
def encodeRequestToString(request: Request): String = {
new String(encodeRequestToBytes(request), "UTF-8")
}
/** Encode a [[Request]] to a byte array */
def encodeRequestToBytes(request: Request): Array[Byte] = {
val ch = new EmbeddedChannel(
new HttpRequestEncoder
)
ch.writeOneOutbound(Bijections.finagle.requestToNetty(request))
ch.flushOutbound()
val acc = ch.alloc.compositeBuffer()
try {
while (!ch.outboundMessages.isEmpty) {
acc.addComponent(true, ch.readOutbound[ByteBuf]())
}
val out = new Array[Byte](acc.readableBytes)
acc.readBytes(out)
out
} finally {
acc.release()
ch.finishAndReleaseAll()
}
}
/** Decode a [[Request]] from a String */
def decodeStringToRequest(s: String): Request = {
decodeBytesToRequest(s.getBytes(Charsets.UTF_8))
}
/** Decode a [[Request]] from a byte array */
def decodeBytesToRequest(b: Array[Byte]): Request = {
val ch = new EmbeddedChannel(
new HttpRequestDecoder(Int.MaxValue, Int.MaxValue, Int.MaxValue),
new HttpObjectAggregator(Int.MaxValue),
UnpoolHttpHandler // Make sure nothing is ref-counted
)
try {
ch.writeInbound(Unpooled.wrappedBuffer(b))
ch.flushInbound()
// Should be exactly one message or the input data was likely malformed
// Note: we perform the assert before reading so that the if it fails any
// messages are still freed in the finally block
assert(ch.inboundMessages.size == 1)
val nettyReq = ch.readInbound[FullHttpRequest]()
Bijections.netty.fullRequestToFinagle(nettyReq, new InetSocketAddress(0))
} finally {
ch.finishAndReleaseAll()
}
}
/** Encode a [[Response]] to a String */
def encodeResponseToString(response: Response): String = {
val ch = new EmbeddedChannel(
new HttpResponseEncoder
)
val acc = ch.alloc().compositeBuffer()
try {
ch.writeOutbound(Bijections.finagle.fullResponseToNetty(response))
ch.flushOutbound()
while (!ch.outboundMessages.isEmpty) {
acc.addComponent(true, ch.readOutbound[ByteBuf]())
}
acc.toString(Charsets.UTF_8)
} finally {
acc.release()
ch.finishAndReleaseAll()
}
}
/** Decode a [[Response]] from a String */
def decodeStringToResponse(s: String): Response = {
decodeBytesToResponse(s.getBytes(Charsets.UTF_8))
}
/** Decode a [[Response]] from a byte array */
def decodeBytesToResponse(b: Array[Byte]): Response = {
val ch = new EmbeddedChannel(
new HttpResponseDecoder(Int.MaxValue, Int.MaxValue, Int.MaxValue),
new HttpObjectAggregator(Int.MaxValue),
UnpoolHttpHandler // Make sure nothing is ref-counted
)
try {
ch.writeInbound(Unpooled.wrappedBuffer(b))
ch.flushInbound()
// Should be exactly one message or the message was likely malformed
// Note: we perform the assert before reading so that the if it fails any
// messages are still freed in the finally block
assert(ch.inboundMessages.size == 1)
val resp = ch.readInbound[FullHttpResponse]()
Bijections.netty.fullResponseToFinagle(resp)
} finally {
ch.finishAndReleaseAll()
}
}
}
| luciferous/finagle | finagle-netty4-http/src/main/scala/com/twitter/finagle/netty4/http/Netty4HttpCodec.scala | Scala | apache-2.0 | 3,886 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.testhelpers.viewSpecshelper.registration
trait RegistrationChecklistMessages {
val pageIhtRegistrationChecklistTitle = "Before you start registration"
val pageIhtRegistrationChecklistLabel1 = "Only start once you have everything you will need, because there is no way to save an incomplete registration."
val pageIhtRegistrationChecklistLabel2 = "To register the estate you will need to sign in to your Government Gateway account (or you will need to create one). You may also be asked some questions to confirm who you are. Details about yourself you may need include:"
val ihtRegistrationChecklistYourNino = "your National Insurance number"
val ihtRegistrationChecklist2FA = "mobile or landline phone number for 2-step verification"
val ihtRegistrationChecklistPassport = "valid UK passport details"
val ihtRegistrationChecklistPayslip = "recent payslips or last P60"
val ihtRegistrationChecklistTaxCredit = "tax credits or pension payment amounts"
val ihtRegistrationDetailsNeededTitle = "Details you will need to register the estate"
val ihtRegistrationDetailsNeededLabel1 = "You will not be able to change these details later on."
val ihtRegistrationDetailsNeededLabel2 = "For the person who has died you will need:"
val ihtRegistrationDetailsNeededOname = "official name"
val ihtRegistrationChecklistDateOfBirth = "date of birth"
val pageIhtRegistrationChecklistDeceasedLabel3 = "date of death (needs to be on or before 31 December 2021)"
val ihtNationalInsuranceNo = "National Insurance number"
val pageIhtRegistrationChecklistDeceasedLabel5 = "last known contact address"
val pageIhtRegistrationChecklistDeceasedLabel7 = "relationship status"
val pageIhtRegistrationChecklistRevealTextDied = "How to find the details of the person who has died"
val pageIhtRegistrationChecklistRevealTextExecutors = "How to find the details of executors or administrators"
val ihtRegistrationDetailsNeededLabel3 = "The person’s first and last name will be on the death certificate. Their last known contact address is where they had their post sent before they died. This could be to a home that they owned or where they last lived."
val ihtRegistrationDetailsNeededLabel4 = "If the person did not have a National Insurance number, you will need to submit an IHT205 paper form instead."
val ihtRegistrationDetailsNeededLabel5 = "The person who has died’s relationship status is their actual situation when they died, not what they, or someone else, might have thought it to be. If the person who has died was married at some time in their life and then divorced, you should select divorced."
val ihtRegistrationExecutorLabel1 = "For all the executors or administrators listed on your probate application, you will need their:"
val ihtRegistrationExecutorAddress = "address"
val ihtRegistrationChecklistPhoneNoLowerCaseInitial = "phone number"
val ihtRegistrationExecutorLabel2 = "All names should be as they appear on a passport or other official document. They might be different to the names that you know people by."
val ihtRegistrationExecutorLabel3 = "The phone number is so that HMRC can contact this person. This might happen after you have finished the estate report and it is being reviewed."
val pageIhtRegistrationChecklistContinueButton = "Continue"
val pageIhtRegistrationChecklistLeaveLink = "Leave this page and start your application from the beginning"
val pageIhtRegistrationChecklistSaveLink = "Save the link to this page or add it to your favourites."
}
| hmrc/iht-frontend | test/iht/testhelpers/viewSpecshelper/registration/RegistrationChecklistMessages.scala | Scala | apache-2.0 | 4,577 |
// create a polyhedron that is sub-divided a set amount of times
// create polyhedron, subdivide, and output
object CreatePoly{
def main(args: Array[String]){
val arg = args.toList
val p = new Polyhedron()
val iter = arg.head.toInt
for(i<-0 until iter){
p.subdivide
}
println(p.html())
}
}
| mckennapsean/code-examples | Scala/CreatePoly/CreatePoly.scala | Scala | mit | 325 |
package pirate.spec
import org.specs2._
abstract class Spec extends Specification with ScalaCheck
| etorreborre/pirate | src/test/scala/pirate.spec/Spec.scala | Scala | bsd-3-clause | 100 |
package com.szadowsz.gospel.core.data.util
import java.{util => ju}
import com.szadowsz.gospel.core.Prolog
import com.szadowsz.gospel.core.data.Term
import com.szadowsz.gospel.core.engine.EngineRunner
class TermQueue {
private val _queue = new ju.LinkedList[Term]
def get(t: Term, engine: Prolog, er: EngineRunner): Boolean = {
synchronized{searchLoop(t, engine, true, true, er)}
}
private def searchLoop(t: Term, engine: Prolog, block: Boolean, remove: Boolean, er: EngineRunner): Boolean = {
synchronized {var found: Boolean = false
var interrupted = false
do {
found = search(t, engine, remove)
if (found)
return true
er.setSolving(false)
try {
wait()
} catch {
case e: InterruptedException => {
interrupted = true
}
}
} while (block && !interrupted)
false
}
}
private def search(t: Term, engine: Prolog, remove: Boolean): Boolean = {
synchronized{
var found: Boolean = false
var msg: Term = null
val it : ju.ListIterator[Term]= _queue.listIterator
while (!found) {
if (it.hasNext) {
msg = it.next
}
else {
return false
}
found = engine.unify(t, msg)
}
if (remove) {
_queue.remove(msg)
}
return true
}
}
def peek(t: Term, engine: Prolog): Boolean = {
synchronized{return search(t, engine, false)}
}
def remove(t: Term, engine: Prolog): Boolean = {
synchronized{return search(t, engine, true)}
}
def wait(t: Term, engine: Prolog, er: EngineRunner): Boolean = {
synchronized{return searchLoop(t, engine, true, false, er)}
}
def store(t: Term) {
synchronized{_queue.addLast(t)
notifyAll()}
}
def size: Int = {
synchronized{return _queue.size}
}
def clear() {
synchronized{_queue.clear()}
}
} | zakski/project-soisceal | scala-core/src/main/scala/com/szadowsz/gospel/core/data/util/TermQueue.scala | Scala | lgpl-3.0 | 1,932 |
package server.routes
import akka.http.scaladsl.model.StatusCodes.{NotFound, OK}
import akka.http.scaladsl.server.Route
import com.ketilovre.server.handlers.GreetHandler
import com.ketilovre.server.routes.GreetRoute
import helpers.{BaseSpec, RouteSpec}
class GreetRouteSpec extends BaseSpec with RouteSpec {
val handler = mock[GreetHandler]
val route = Route.seal(new GreetRoute(handler).route)
"HelloRoute" should {
"reject if the name parameter is missing" in {
Get("/greet") ~> route ~> check {
status mustEqual NotFound
}
}
"reply with a greeting if a name is passed" in prop { name: String =>
handler.hello(name) returns name
Get(s"/greet/$name") ~> route ~> check {
if (name.isEmpty) {
status mustEqual NotFound
} else {
status mustEqual OK
entityAs[String] mustEqual name
}
}
}
}
}
| ketilovre/akka-http-macwire | src/test/scala/server/routes/GreetRouteSpec.scala | Scala | mit | 915 |
import scala.tools.partest._
import java.io.{Console => _, _}
object Test extends DirectTest {
override def extraSettings: String = "-usejavacp -Xprint:uncurry -Ydelambdafy:inline -d " + testOutput.path
override def code = """class Foo {
| def bar = {
| val f = {x: Int => x + 1}
| }
|}
|""".stripMargin.trim
override def show(): Unit = {
Console.withErr(System.out) {
compile()
}
}
}
| yusuke2255/dotty | tests/pending/run/delambdafy_uncurry_inline.scala | Scala | bsd-3-clause | 536 |
package org.eso.ias.asce.test.transfer
import java.util.Properties
import org.eso.ias.asce.transfer.{IasIO, IasioInfo, ScalaTransferExecutor, TransferFunctionSetting}
import org.eso.ias.logging.IASLogger
import org.eso.ias.types.{Alarm, IASTypes}
/**
* A transfer function to test if the component correctly detects
* and inhibit misbehaving (slow) TFs.
*
* @param asceId: the ID of the ASCE
* @param asceRunningId: the runningID of the ASCE
* @param validityTimeFrame: The time frame (msec) to invalidate monitor points
* @param props: the user defined properties
* @see TransferExecutor
*/
class SimulatedSlowTF(
cEleId: String,
cEleRunningId: String,
validityTimeFrame: Long,
props: Properties) extends ScalaTransferExecutor[Alarm](cEleId,cEleRunningId,validityTimeFrame,props) {
/** The time to wait to trigger the detection of the slowness in the ASCE */
val timeToWait: Long = TransferFunctionSetting.MaxTolerableTFTime+100
/**
* Initialize the TF
*
* @param inputsInfo The IDs and types of the inputs
* @param outputInfo The Id and type of thr output
**/
override def initialize(inputsInfo: Set[IasioInfo], outputInfo: IasioInfo): Unit = {
require(inputsInfo.size==1)
require(inputsInfo.head.iasioType==IASTypes.BOOLEAN)
SimulatedSlowTF.logger.info("Scala TF intialized")
}
/**
* Shut dwon
*
* @see TransferExecutor
*/
override def shutdown() {
SimulatedSlowTF.logger.info("Scala TF shut down")
}
/**
* Produces the output simulating a slowness.
*
* If the boolean input is truue the TF will take too long to
* produce th eoutput.
* If false, returns immediately
*
* @param compInputs: the inputs to the ASCE
* @param actualOutput: the actual output of the ASCE
* @return the computed value to set as output of the ASCE
*/
override def eval(compInputs: Map[String, IasIO[_]], actualOutput: IasIO[Alarm]): IasIO[Alarm] = {
assert(compInputs.size == 1)
val input = compInputs.values.head.asInstanceOf[IasIO[Boolean]]
assert(input.value.isDefined)
val inputValue = input.value.head
if (inputValue) {
SimulatedSlowTF.logger.info("Waiting to trigger slowness...")
Thread.sleep(timeToWait)
SimulatedSlowTF.logger.info("Waked up")
}
if (inputValue) actualOutput.updateValue(Alarm.getSetDefault)
else actualOutput.updateValue(Alarm.CLEARED)
}
}
object SimulatedSlowTF {
/** The logger */
private val logger = IASLogger.getLogger(this.getClass)
}
| IntegratedAlarmSystem-Group/ias | CompElement/src/test/scala/org/eso/ias/asce/test/transfer/SimulatedSlowTF.scala | Scala | lgpl-3.0 | 2,629 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.projections
import org.bdgenomics.formats.avro.Fragment
/**
* Enumeration of Fragment field names for predicates and projections.
*/
object FragmentField extends FieldEnumeration(Fragment.SCHEMA$) {
val readName, instrument, runId, fragmentSize, alignments = SchemaValue
}
| massie/adam | adam-core/src/main/scala/org/bdgenomics/adam/projections/FragmentField.scala | Scala | apache-2.0 | 1,102 |
package demo
package components
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import scala.scalajs.js
case class Github(login: String = "",
html_url: String = "",
avatar_url: String = "",
time: Double = 0) {
override def equals(obj: Any): Boolean = obj match {
case that: Github => that.login.equalsIgnoreCase(this.login)
case _ => false
}
}
object GithubUser {
object Styles {
val userGroup = Seq(^.display := "inline-block",
^.textAlign := "center",
^.textDecoration := "none",
^.color := "black").toTagMod
val userIcon = Seq(^.margin := "10px",
^.display := "block",
^.width := "100px",
^.height := "100px",
^.borderRadius := "50%").toTagMod
val userName = Seq(^.fontSize := "18px", ^.fontWeight := "500").toTagMod
}
case class Backend($ : BackendScope[Props, _]) {
def render(P: Props) = {
<.a(Styles.userGroup, ^.href := P.user.html_url)(
<.img(Styles.userIcon, ^.src := P.user.avatar_url),
<.span(Styles.userName)(P.user.login)
)
}
}
val component = ScalaComponent
.builder[Props]("GithubUser")
.renderBackend[Backend]
.build
case class Props(user: Github)
def apply(user: Github) = component(Props(user))()
}
| rleibman/scalajs-react-components | demo/src/main/scala/demo/components/GithubUser.scala | Scala | apache-2.0 | 1,472 |
package com.pommedeterresautee.twoborange3.Common
import android.app.Activity
import android.support.v4.widget.DrawerLayout
import android.support.v4.app.{FragmentActivity, ActionBarDrawerToggle}
import android.support.v4.view.GravityCompat
import android.content.Intent
import android.view.{MenuItem, Gravity, ViewGroup, View}
import android.os.Bundle
import android.widget.{TextView, BaseAdapter, AdapterView, ListView}
import android.widget.AdapterView.OnItemClickListener
import android.util.TypedValue
import com.pommedeterresautee.twoborange3.{FONT, InterfaceFunctions, R}
import com.pommedeterresautee.twoborange3.Section.Terminal.TerminalActivity
import com.pommedeterresautee.twoborange3.Section.InstallScript.InstallScriptActivity
/**
* Trait to add a side menu to an Activity
*/
trait SideMenu extends FragmentActivity {
case class SideMenuItem(mTitle: Int, mIcon: Int, mIntent: Intent, mShouldFinishCurrentActivity: Boolean = true)
private var mDrawerLayout: DrawerLayout = _
private var mDrawerList: ListView = _
private var mDrawerToggle: ActionBarDrawerToggle = _
private var mMenuList: List[SideMenuItem] = _
override def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
//Init side menu content
mMenuList = List(SideMenuItem(R.string.terminal_activity_title, R.drawable.ic_drawer, new Intent(this, classOf[TerminalActivity])), SideMenuItem(R.string.script_installation_activity_title, R.drawable.ic_drawer, new Intent(this, classOf[InstallScriptActivity])))
getActionBar.setDisplayHomeAsUpEnabled(true)
getActionBar.setHomeButtonEnabled(true)
}
override def onStart() {
super.onStart()
if (mDrawerLayout == null) {
mDrawerLayout = findViewById(R.id.drawer_layout).asInstanceOf[DrawerLayout]
mDrawerLayout.setDrawerShadow(R.drawable.drawer_shadow, GravityCompat.START)
mDrawerList = findViewById(R.id.sidemenu_drawer).asInstanceOf[ListView]
mDrawerList.setAdapter(new SideMenuAdapter())
mDrawerList.setOnItemClickListener(new DrawerItemClickListener())
mDrawerToggle = new CustomActionBarDrawerToggle(this, mDrawerLayout,
R.drawable.ic_drawer, R.string.open, R.string.close)
mDrawerLayout.setDrawerListener(mDrawerToggle)
}
mDrawerToggle.syncState()
}
class CustomActionBarDrawerToggle(var activity: Activity, var drawerLayout: DrawerLayout, var drawerImageRes: Int, var openDrawerContentDescRes: Int, var closeDrawerContentDescRes: Int) extends ActionBarDrawerToggle(activity, drawerLayout, drawerImageRes, openDrawerContentDescRes, closeDrawerContentDescRes) {
override def onDrawerClosed(view: View) = invalidateOptionsMenu()
override def onDrawerOpened(drawerView: View) = invalidateOptionsMenu()
}
override def onOptionsItemSelected(item: MenuItem): Boolean = {
if (mDrawerToggle.onOptionsItemSelected(item)) {
return true
}
super.onOptionsItemSelected(item)
}
class DrawerItemClickListener extends OnItemClickListener {
def onItemClick(parent: AdapterView[_], view: View, position: Int, id: Long): Unit = {
val item: SideMenuItem = mMenuList(position)
startActivity(item.mIntent)
if (item.mShouldFinishCurrentActivity) {
finish()
overridePendingTransition(0, 0)
} else {
mDrawerLayout.closeDrawers()
}
}
}
class SideMenuAdapter extends BaseAdapter {
override def getCount: Int = mMenuList.size
override def getItem(position: Int): Object = mMenuList(position)
override def getItemId(position: Int) = position
override def getView(position: Int, convertView: View, parent: ViewGroup): View = {
val item: SideMenuItem = mMenuList(position)
//
// new SLinearLayout {
// SButton(item.mTitle).textSize(18 sp).height(64 dip).gravity(Gravity.CENTER_VERTICAL).compoundDrawablePadding(5 dip).textColor(R.color.white).typeface(FONT.LIGHT.getTypeFace(ctx)).<<.wrap.>>
// }.padding(16 dip, 9 dip, 0 dip, 9 dip)
val paddingLeft = InterfaceFunctions.Dp2Px(16, SideMenu.this)
val paddingTop = InterfaceFunctions.Dp2Px(9, SideMenu.this)
val iconPadding = InterfaceFunctions.Dp2Px(5, SideMenu.this)
val tvHeight = InterfaceFunctions.Dp2Px(64, SideMenu.this)
val tv = new TextView(SideMenu.this)
tv.setText(item.mTitle)
tv.setTextSize(TypedValue.COMPLEX_UNIT_SP, 18)
tv.setPadding(paddingLeft, paddingTop, 0, paddingTop)
tv.setHeight(tvHeight)
tv.setGravity(Gravity.CENTER_VERTICAL)
tv.setCompoundDrawablePadding(iconPadding)
tv.setTextColor(getResources.getColor(R.color.white))
tv.setTypeface(FONT.LIGHT.getTypeFace(tv.getContext))
tv.setCompoundDrawablesWithIntrinsicBounds(item.mIcon, 0, 0, 0)
tv
}
}
} | pommedeterresautee/obackup_scala | src/main/scala/com/pommedeterresautee/twoborange3/Common/SideMenu.scala | Scala | gpl-2.0 | 4,796 |
// import akka.http.scaladsl.marshalling.{Marshaller, _}
// import akka.http.scaladsl.model.MediaType
// import akka.http.scaladsl.model.MediaTypes._
// import play.twirl.api.{Html, Txt, Xml}
// package object provingground {
//
// /** Twirl marshallers for Xml, Html and Txt mediatypes */
// implicit val twirlHtmlMarshaller = twirlMarshaller[Html](`text/html`)
// implicit val twirlTxtMarshaller = twirlMarshaller[Txt](`text/plain`)
// implicit val twirlXmlMarshaller = twirlMarshaller[Xml](`text/xml`)
//
// def twirlMarshaller[A](contentType: MediaType): ToEntityMarshaller[A] = {
// Marshaller.StringMarshaller.wrap(contentType)(_.toString)
// }
// }
| siddhartha-gadgil/ProvingGround | server/src/main/scala/provingground/interface/package.scala | Scala | mit | 675 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.chill
import com.esotericsoftware.kryo.io.ByteBufferInputStream
import _root_.java.io.{InputStream, Serializable}
import _root_.java.nio.ByteBuffer
import scala.util.control.Exception.allCatch
import scala.reflect._
/**
* Enrichment pattern to add methods to Kryo objects TODO: make this a value-class in scala 2.10 This also
* follows the builder pattern to allow easily chaining this calls
*/
class RichKryo(val k: Kryo) extends RichKryoCompat {
def alreadyRegistered(klass: Class[_]): Boolean =
k.getClassResolver.getRegistration(klass) != null
def alreadyRegistered[T](implicit cmf: ClassTag[T]): Boolean = alreadyRegistered(cmf.runtimeClass)
def forSubclass[T](kser: KSerializer[T])(implicit cmf: ClassTag[T]): Kryo = {
k.addDefaultSerializer(cmf.runtimeClass, kser)
k
}
def forClass[T](kser: KSerializer[T])(implicit cmf: ClassTag[T]): Kryo = {
k.register(cmf.runtimeClass, kser)
k
}
/**
* Use Java serialization, which is very slow. avoid this if possible, but for very rare classes it is
* probably fine
*/
def javaForClass[T <: Serializable](implicit cmf: ClassTag[T]): Kryo = {
k.register(cmf.runtimeClass, new com.esotericsoftware.kryo.serializers.JavaSerializer)
k
}
/**
* Use Java serialization, which is very slow. avoid this if possible, but for very rare classes it is
* probably fine
*/
def javaForSubclass[T <: Serializable](implicit cmf: ClassTag[T]): Kryo = {
k.addDefaultSerializer(cmf.runtimeClass, new com.esotericsoftware.kryo.serializers.JavaSerializer)
k
}
def registerClasses(klasses: TraversableOnce[Class[_]]): Kryo = {
klasses.foreach { klass: Class[_] =>
if (!alreadyRegistered(ClassTag(klass)))
k.register(klass)
}
k
}
/**
* Populate the wrapped Kryo instance with this registrar
*/
def populateFrom(reg: IKryoRegistrar): Kryo = {
reg(k)
k
}
def fromInputStream(s: InputStream): Option[AnyRef] = {
// Can't reuse Input and call Input#setInputStream everytime
val streamInput = new Input(s)
allCatch.opt(k.readClassAndObject(streamInput))
}
def fromByteBuffer(b: ByteBuffer): Option[AnyRef] =
fromInputStream(new ByteBufferInputStream(b))
}
| twitter/chill | chill-scala/src/main/scala/com/twitter/chill/RichKryo.scala | Scala | apache-2.0 | 2,817 |
package com.ignition.frame
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import com.ignition.types.{ RichStructType, date, fieldToRichStruct, int, string }
@RunWith(classOf[JUnitRunner])
class DebugOutputSpec extends FrameFlowSpecification {
private val newid = "39abc670-5386-11e5-b7ab-d61480493bf3"
"DebugOutput" should {
"output frame data with names, types and title" in {
val grid = DataGrid(string("id") ~ string("name") ~ int("weight") ~ date("dob")) rows (
(newid, "john", 155, javaDate(1980, 5, 2)),
(newid, "jane", 190, javaDate(1982, 4, 25)),
(newid, "jake", 160, javaDate(1974, 11, 3)),
(newid, "josh", 120, javaDate(1995, 1, 10)))
val debug = DebugOutput() showNames true showTypes true title "Summary"
grid --> debug
val baos = new java.io.ByteArrayOutputStream
Console.withOut(baos) { debug.output }
val lines = baos.toString.split("\\\\r?\\\\n")
lines.length === 10
lines(0) === "Summary"
lines(2) === "| id| name| weight| dob|"
lines(3) === "| string|string|integer| date|"
lines(5) === "|39abc670-5386-11e5-b7ab-d61480493bf3| john| 155|1980-05-02|"
lines(6) === "|39abc670-5386-11e5-b7ab-d61480493bf3| jane| 190|1982-04-25|"
lines(7) === "|39abc670-5386-11e5-b7ab-d61480493bf3| jake| 160|1974-11-03|"
lines(8) === "|39abc670-5386-11e5-b7ab-d61480493bf3| josh| 120|1995-01-10|"
}
"output frame data long lines" in {
val grid = DataGrid(string("id") ~ string("name") ~ int("weight") ~ date("dob")) rows (
(newid, "johnjohnjohnjohnjohnjohn", 111222333, javaDate(1980, 5, 2)),
(newid, "janejanejanejanejanejane", 444555666, javaDate(1982, 4, 25)),
(newid, "jakejakejakejakejakejake", 777888999, javaDate(1974, 11, 3)),
(newid, "joshjoshjoshjoshjoshjosh", 111000000, javaDate(1995, 1, 10)))
val debug = DebugOutput() showNames false showTypes false noTitle () maxWidth (50)
grid --> debug
val baos = new java.io.ByteArrayOutputStream
Console.withOut(baos) { debug.output }
val lines = baos.toString.split("\\\\r?\\\\n")
lines.length === 6
lines(1) === "|39abc670-5386-11e5-|johnjohnjohn|1112|1980-|"
lines(2) === "|39abc670-5386-11e5-|janejanejane|4445|1982-|"
lines(3) === "|39abc670-5386-11e5-|jakejakejake|7778|1974-|"
lines(4) === "|39abc670-5386-11e5-|joshjoshjosh|1110|1995-|"
}
"save to/load from xml" in {
val d1 = DebugOutput() showNames false showTypes false noTitle () unlimitedWidth ()
d1.toXml must ==/(<debug-output names="false" types="false"/>)
DebugOutput.fromXml(d1.toXml) === d1
val d2 = DebugOutput() showNames true showTypes true title "debug" maxWidth 100
d2.toXml must ==/(
<debug-output names="true" types="true" max-width="100">
<title>debug</title>
</debug-output>)
DebugOutput.fromXml(d2.toXml) === d2
}
"save to/load from json" in {
import org.json4s.JsonDSL._
val d1 = DebugOutput() showNames false showTypes false noTitle () unlimitedWidth ()
d1.toJson === ("tag" -> "debug-output") ~ ("names" -> false) ~ ("types" -> false) ~
("title" -> jNone) ~ ("maxWidth" -> jNone)
DebugOutput.fromJson(d1.toJson) === d1
val d2 = DebugOutput() showNames true showTypes true title "debug" maxWidth 100
d2.toJson === ("tag" -> "debug-output") ~ ("names" -> true) ~ ("types" -> true) ~
("title" -> "debug") ~ ("maxWidth" -> 100)
DebugOutput.fromJson(d2.toJson) === d2
}
}
} | uralian/ignition | src/test/scala/com/ignition/frame/DebugOutputSpec.scala | Scala | apache-2.0 | 3,694 |
/*
* Copyright 2001-2011 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.concurrent
import org.scalatest._
import matchers.ShouldMatchers
import Thread.State._
import java.util.concurrent.atomic.AtomicBoolean
class ConductorSuite extends FunSuite with ShouldMatchers with SharedHelpers {
val baseLineNumber = 25
test("if conduct is called twice, the second time it throws an NotAllowedException") {
val conductor = new Conductor
conductor.conduct()
val caught = intercept[NotAllowedException] { conductor.conduct() }
caught.getMessage should be ("A Conductor's conduct method can only be invoked once.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 5))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("if conduct has not been called, conductingHasBegun should return false"){
val conductor = new Conductor
conductor.conductingHasBegun should be (false)
}
test("if conduct has been called, conductingHasBegun should return true") {
val conductor = new Conductor
conductor.conduct
conductor.conductingHasBegun should be (true)
}
test("if thread {} is called after the test has been conducted, it throws an NotAllowedException" +
"with a detail message that explains the problem") {
val conductor = new Conductor
conductor.conduct
val caught =
intercept[NotAllowedException] {
conductor.thread("name") { 1 should be (1) }
}
caught.getMessage should be ("Cannot invoke the thread method on Conductor after its multi-threaded test has completed.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 30))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("if thread(String) {} is called after the test has been conducted, it throws NotAllowedException" +
"with a detail message that explains the problem"){
val conductor = new Conductor
conductor.conduct
val caught =
intercept[NotAllowedException] {
conductor.thread("name") { 1 should be (1) }
}
caught.getMessage should be ("Cannot invoke the thread method on Conductor after its multi-threaded test has completed.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 45))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("if whenFinished is called twice on the same conductor, a NotAllowedException is thrown that explains it " +
"can only be called once") {
val conductor = new Conductor
conductor.whenFinished { 1 should be (1) }
val caught =
intercept[NotAllowedException] {
conductor.whenFinished { 1 should be (1) }
}
caught.getMessage should be ("Cannot invoke whenFinished after conduct (which is called by whenFinished) has been invoked.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 60))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("if thread(String) is called twice with the same String name, the second invocation results " +
"in an IllegalArgumentException that explains each thread in a multi-threaded test " +
"must have a unique name") {
val conductor = new Conductor
conductor.thread("Fiesta del Mar") { 1 should be (1) }
val caught =
intercept[NotAllowedException] {
conductor.thread("Fiesta del Mar") { 2 should be (2) }
}
caught.getMessage should be ("Cannot register two threads with the same name. Duplicate name: Fiesta del Mar.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 77))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("waitForBeat throws NotAllowedException if is called with zero or a negative number") {
val conductor = new Conductor
val caught =
intercept[NotAllowedException] {
conductor.waitForBeat(0)
}
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 90))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
caught.getMessage should be ("A Conductor starts at beat zero, so you can't wait for beat zero.")
val caught2 =
intercept[NotAllowedException] {
conductor.waitForBeat(-1)
}
caught2.getMessage should be ("A Conductor starts at beat zero, so you can only wait for a beat greater than zero.")
caught2.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 99))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("If a non-positive number is passed to conduct for clockPeriod, it will throw NotAllowedException") {
val conductor = new Conductor
val caught =
intercept[NotAllowedException] {
conductor.conduct(0, 100)
}
caught.getMessage should be ("The clockPeriod passed to conduct must be greater than zero. Value passed was: 0.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 112))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
val caught2 =
intercept[NotAllowedException] {
conductor.conduct(-1, 100)
}
caught2.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 121))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
caught2.getMessage should be ("The clockPeriod passed to conduct must be greater than zero. Value passed was: -1.")
}
test("If a non-positive number is passed to conduct for runLimit, it will throw NotAllowedException") {
val conductor = new Conductor
val caught =
intercept[NotAllowedException] {
conductor.conduct(100, 0)
}
caught.getMessage should be ("The timeout passed to conduct must be greater than zero. Value passed was: 0.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 134))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
val caught2 =
intercept[NotAllowedException] {
conductor.conduct(100, -1)
}
caught2.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorSuite.scala:" + (baseLineNumber + 143))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
caught2.getMessage should be ("The timeout passed to conduct must be greater than zero. Value passed was: -1.")
}
test("withConductorFrozen executes the passed function once") {
val conductor = new Conductor
var functionExecutionCount = 0
conductor.withConductorFrozen { // Function will be executed by the calling thread
functionExecutionCount += 1
}
functionExecutionCount should be (1)
}
test("first exception thrown is reported") {
val e = new RuntimeException("howdy")
class MySuite extends FunSuite {
test("this will fail") {
val conductor = new Conductor
import conductor._
thread {
waitForBeat(1)
}
thread {
throw e
()
}
conductor.conduct()
}
}
val a = new MySuite
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tf = rep.testFailedEventsReceived
tf.size should be === 1
tf.head.throwable should be ('defined)
tf.head.throwable.get should be theSameInstanceAs e
}
test("whenFinished can only be called by thread that created Conductor.") {
val conductor = new Conductor
import conductor._
thread {
intercept[NotAllowedException] {
whenFinished { 1 should be (1) }
}.getMessage should be ("whenFinished can only be called by the thread that created Conductor.")
}
whenFinished { 1 should be (1) }
}
test("isConductorFrozen returns true if the conductor is frozen, false otherwise") {
val conductor = new Conductor
import conductor._
conductor.isConductorFrozen should be (false)
withConductorFrozen {
conductor.isConductorFrozen should be (true)
}
}
test("the beat method returns the correct value") {
val conductor = new Conductor
import conductor._
beat should equal (0)
thread {
beat should equal (0)
waitForBeat(1)
beat should equal (1)
waitForBeat(2)
beat should equal (2)
}
whenFinished {
beat should equal (2)
}
}
test("if I wait for a beat that's lower than the current beat, I just keep going") {
val conductor = new Conductor
import conductor._
beat should equal (0)
thread {
beat should equal (0)
waitForBeat(1)
beat should equal (1)
waitForBeat(1) // This should also work
beat should equal (1)
waitForBeat(2)
beat should equal (2)
waitForBeat(1) // This should also work
beat should equal (2)
}
whenFinished {
beat should equal (2)
}
}
class Forevermore {
def waitForever() {
synchronized {
wait()
}
}
}
test("deadlock is detected") {
val conductor = new Conductor
import conductor._
val monitor = new Forevermore
thread {
monitor.waitForever()
}
thread {
monitor.waitForever()
}
val caught =
intercept[RuntimeException] {
conduct()
}
caught.getMessage should be ("Test aborted because of suspected deadlock. No progress has been made (the beat did not advance) for 50 clock periods (500 ms).")
}
test("other threads are killed when one thread throws an exception") {
val conductor = new Conductor
import conductor._
val monitor = new Forevermore
val threadWasKilled = new AtomicBoolean()
thread {
try {
monitor.waitForever()
}
catch {
case t: ThreadDeath =>
threadWasKilled.set(true)
throw t
}
}
thread {
waitForBeat(1)
fail()
()
}
intercept[RuntimeException] {
conduct()
}
threadWasKilled.get should be (true)
}
test("runaway threads will cause a test to be timed out") {
val conductor = new Conductor
import conductor._
class Counter {
@volatile var count = 0
}
val counter = new Counter
thread {
while (true)
counter.count += 1
}
thread {
while (true)
counter.count -= 1
}
val caught =
intercept[RuntimeException] {
conduct(10, 1)
}
caught.getMessage should be ("Test timed out because threads existed that were runnable while no progress was made (the beat did not advance) for 1 seconds.")
}
}
| yyuu/scalatest | src/test/scala/org/scalatest/concurrent/ConductorSuite.scala | Scala | apache-2.0 | 12,107 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.runtime.utils.{StreamITCase, StreamingWithStateTestBase}
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit._
import java.sql.Timestamp
import scala.collection.mutable
class TemporalJoinITCase extends StreamingWithStateTestBase {
@Before
def clear(): Unit = {
StreamITCase.clear
}
/**
* Because of nature of the processing time, we can not (or at least it is not that easy)
* validate the result here. Instead of that, here we are just testing whether there are no
* exceptions in a full blown ITCase. Actual correctness is tested in unit tests.
*/
@Test
def testProcessTimeInnerJoin(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val settings = EnvironmentSettings.newInstance().useOldPlanner().build
val tEnv = StreamTableEnvironment.create(env, settings)
env.setStateBackend(getStateBackend)
env.setParallelism(1)
env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime)
val sqlQuery =
"""
|SELECT
| o.amount * r.rate AS amount
|FROM
| Orders AS o,
| LATERAL TABLE (Rates(o.proctime)) AS r
|WHERE r.currency = o.currency
|""".stripMargin
val ordersData = new mutable.MutableList[(Long, String)]
ordersData.+=((2L, "Euro"))
ordersData.+=((1L, "US Dollar"))
ordersData.+=((50L, "Yen"))
ordersData.+=((3L, "Euro"))
ordersData.+=((5L, "US Dollar"))
val ratesHistoryData = new mutable.MutableList[(String, Long)]
ratesHistoryData.+=(("US Dollar", 102L))
ratesHistoryData.+=(("Euro", 114L))
ratesHistoryData.+=(("Yen", 1L))
ratesHistoryData.+=(("Euro", 116L))
ratesHistoryData.+=(("Euro", 119L))
val orders = env
.fromCollection(ordersData)
.toTable(tEnv, 'amount, 'currency, 'proctime.proctime)
val ratesHistory = env
.fromCollection(ratesHistoryData)
.toTable(tEnv, 'currency, 'rate, 'proctime.proctime)
tEnv.registerTable("Orders", orders)
tEnv.registerTable("RatesHistory", ratesHistory)
tEnv.registerFunction(
"Rates",
ratesHistory.createTemporalTableFunction('proctime, 'currency))
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
}
@Test
def testEventTimeInnerJoin(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val settings = EnvironmentSettings.newInstance().useOldPlanner().build
val tEnv = StreamTableEnvironment.create(env, settings)
env.setStateBackend(getStateBackend)
env.setParallelism(1)
val sqlQuery =
"""
|SELECT
| o.amount * r.rate AS amount
|FROM
| Orders AS o,
| LATERAL TABLE (Rates(o.rowtime)) AS r
|WHERE r.currency = o.currency
|""".stripMargin
val ordersData = new mutable.MutableList[(Long, String, Timestamp)]
ordersData.+=((2L, "Euro", new Timestamp(2L)))
ordersData.+=((1L, "US Dollar", new Timestamp(3L)))
ordersData.+=((50L, "Yen", new Timestamp(4L)))
ordersData.+=((3L, "Euro", new Timestamp(5L)))
val ratesHistoryData = new mutable.MutableList[(String, Long, Timestamp)]
ratesHistoryData.+=(("US Dollar", 102L, new Timestamp(1L)))
ratesHistoryData.+=(("Euro", 114L, new Timestamp(1L)))
ratesHistoryData.+=(("Yen", 1L, new Timestamp(1L)))
ratesHistoryData.+=(("Euro", 116L, new Timestamp(5L)))
ratesHistoryData.+=(("Euro", 119L, new Timestamp(7L)))
var expectedOutput = new mutable.HashSet[String]()
expectedOutput += (2 * 114).toString
expectedOutput += (3 * 116).toString
val orders = env
.fromCollection(ordersData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(Long, String, Timestamp)]())
.toTable(tEnv, 'amount, 'currency, 'rowtime.rowtime)
val ratesHistory = env
.fromCollection(ratesHistoryData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(String, Long, Timestamp)]())
.toTable(tEnv, 'currency, 'rate, 'rowtime.rowtime)
tEnv.registerTable("Orders", orders)
tEnv.registerTable("RatesHistory", ratesHistory)
tEnv.registerTable("FilteredRatesHistory", tEnv.scan("RatesHistory").filter('rate > 110L))
tEnv.registerFunction(
"Rates",
tEnv.scan("FilteredRatesHistory").createTemporalTableFunction('rowtime, 'currency))
tEnv.registerTable("TemporalJoinResult", tEnv.sqlQuery(sqlQuery))
// Scan from registered table to test for interplay between
// LogicalCorrelateToTemporalTableJoinRule and TableScanRule
val result = tEnv.scan("TemporalJoinResult").toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
assertEquals(expectedOutput, StreamITCase.testResults.toSet)
}
@Test
def testNestedTemporalJoin(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val settings = EnvironmentSettings.newInstance().useOldPlanner().build
val tEnv = StreamTableEnvironment.create(env, settings)
env.setStateBackend(getStateBackend)
val sqlQuery =
"""
|SELECT
| o.orderId,
| (o.amount * p.price * r.rate) as total_price
|FROM
| Orders AS o,
| LATERAL TABLE (Prices(o.rowtime)) AS p,
| LATERAL TABLE (Rates(o.rowtime)) AS r
|WHERE
| o.productId = p.productId AND
| r.currency = p.currency
|""".stripMargin
val ordersData = new mutable.MutableList[(Long, String, Long, Timestamp)]
ordersData.+=((1L, "A1", 2L, new Timestamp(2L)))
ordersData.+=((2L, "A2", 1L, new Timestamp(3L)))
ordersData.+=((3L, "A4", 50L, new Timestamp(4L)))
ordersData.+=((4L, "A1", 3L, new Timestamp(5L)))
val orders = env
.fromCollection(ordersData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(Long, String, Long, Timestamp)]())
.toTable(tEnv, 'orderId, 'productId, 'amount, 'rowtime.rowtime)
val ratesHistoryData = new mutable.MutableList[(String, Long, Timestamp)]
ratesHistoryData.+=(("US Dollar", 102L, new Timestamp(1L)))
ratesHistoryData.+=(("Euro", 114L, new Timestamp(1L)))
ratesHistoryData.+=(("Yen", 1L, new Timestamp(1L)))
ratesHistoryData.+=(("Euro", 116L, new Timestamp(5L)))
ratesHistoryData.+=(("Euro", 119L, new Timestamp(7L)))
val ratesHistory = env
.fromCollection(ratesHistoryData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(String, Long, Timestamp)]())
.toTable(tEnv, 'currency, 'rate, 'rowtime.rowtime)
val pricesHistoryData = new mutable.MutableList[(String, String, Double, Timestamp)]
pricesHistoryData.+=(("A2", "US Dollar", 10.2D, new Timestamp(1L)))
pricesHistoryData.+=(("A1", "Euro", 11.4D, new Timestamp(1L)))
pricesHistoryData.+=(("A4", "Yen", 1D, new Timestamp(1L)))
pricesHistoryData.+=(("A1", "Euro", 11.6D, new Timestamp(5L)))
pricesHistoryData.+=(("A1", "Euro", 11.9D, new Timestamp(7L)))
val pricesHistory = env
.fromCollection(pricesHistoryData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(String, String, Double, Timestamp)]())
.toTable(tEnv, 'productId, 'currency, 'price, 'rowtime.rowtime)
tEnv.createTemporaryView("Orders", orders)
tEnv.createTemporaryView("RatesHistory", ratesHistory)
tEnv.registerFunction(
"Rates",
ratesHistory.createTemporalTableFunction($"rowtime", $"currency"))
tEnv.registerFunction(
"Prices",
pricesHistory.createTemporalTableFunction($"rowtime", $"productId"))
tEnv.createTemporaryView("TemporalJoinResult", tEnv.sqlQuery(sqlQuery))
// Scan from registered table to test for interplay between
// LogicalCorrelateToTemporalTableJoinRule and TableScanRule
val result = tEnv.from("TemporalJoinResult").toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = List(
s"1,${2 * 114 * 11.4}",
s"2,${1 * 102 * 10.2}",
s"3,${50 * 1 * 1.0}",
s"4,${3 * 116 * 11.6}")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
}
class TimestampExtractor[T <: Product]
extends BoundedOutOfOrdernessTimestampExtractor[T](Time.seconds(10)) {
override def extractTimestamp(element: T): Long = element match {
case (_, _, ts: Timestamp) => ts.getTime
case (_, _, _, ts: Timestamp) => ts.getTime
case _ => throw new IllegalArgumentException(
"Expected the last element in a tuple to be of a Timestamp type.")
}
}
| rmetzger/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/stream/sql/TemporalJoinITCase.scala | Scala | apache-2.0 | 9,875 |
package rules
import models.{APILimit, APILimitTable, Work, WorkType}
import org.joda.time.DateTime
import play.api.Play
import play.api.Play.current
object APILimitRules {
private lazy val config = Play.configuration
def isLimited(work: Work): Boolean = {
val defaultWindow = APILimit(-1, "", DateTime.now().minusDays(1), 0)
val endpoints = work.workType match {
case WorkType.Tweet => Seq("statuses/user_timeline")
case WorkType.UserProfile => Seq("friends/list", "followers/list")
}
val limitViolation = endpoints.map {
endpoint =>
val maxRequests = config.getInt(s"dtc.limit.$endpoint").getOrElse(0)
val currentWindow = APILimitTable.getLatestWindow(endpoint).getOrElse(defaultWindow)
val window = if (currentWindow.windowStart.plusMinutes(15).isAfterNow) {
currentWindow
} else {
APILimitTable.create(APILimit(-1, endpoint, DateTime.now, 0))
}
window.windowStart.plusMinutes(15).isAfterNow && window.requests < maxRequests
}
limitViolation.contains(false)
}
def withAPILimit[A](endpoint: String)(block: => Option[A]): Option[A] = {
APILimitTable.getLatestWindow(endpoint).map {
window =>
APILimitTable.update(window.copy(requests = window.requests + 1))
block
}.flatten
}
} | rtfpessoa/distributed-twitter-crawler | app/rules/APILimitRules.scala | Scala | mit | 1,338 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.tools.export
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.fs.data.FileSystemDataStore
import org.locationtech.geomesa.fs.data.FileSystemDataStoreFactory.FileSystemDataStoreParams
import org.locationtech.geomesa.fs.tools.FsDataStoreCommand.{FsDistributedCommand, FsParams}
import org.locationtech.geomesa.fs.tools.export.FsExportCommand.OptionalQueryThreads
import org.locationtech.geomesa.fs.tools.export.FsPlaybackCommand.FsPlaybackParams
import org.locationtech.geomesa.tools.export.PlaybackCommand
import org.locationtech.geomesa.tools.export.PlaybackCommand.PlaybackParams
class FsPlaybackCommand extends PlaybackCommand[FileSystemDataStore] with FsDistributedCommand {
override val params = new FsPlaybackParams
override def connection: Map[String, String] =
super.connection + (FileSystemDataStoreParams.ReadThreadsParam.getName -> params.threads.toString)
}
object FsPlaybackCommand {
@Parameters(commandDescription = "Playback features from a GeoMesa data store, based on the feature date")
class FsPlaybackParams extends PlaybackParams with FsParams with OptionalQueryThreads
}
| aheyne/geomesa | geomesa-fs/geomesa-fs-tools/src/main/scala/org/locationtech/geomesa/fs/tools/export/FsPlaybackCommand.scala | Scala | apache-2.0 | 1,631 |
package org.jetbrains.plugins.scala.lang.scaladoc.psi.api
import _root_.org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import com.intellij.psi.PsiElement
import com.intellij.psi.javadoc.PsiDocTag
/**
* User: Alexander Podkhalyuzin
* Date: 22.07.2008
*/
trait ScDocTag extends ScalaPsiElement with PsiDocTag {
def getCommentDataText(): String
def getAllText(handler: PsiElement => String): String
def getAllText: String = getAllText(element => element.getText)
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/scaladoc/psi/api/ScDocTag.scala | Scala | apache-2.0 | 482 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.planner.logical.idp
import org.neo4j.cypher.internal.compiler.v2_3.planner.logical.Solvable
import org.neo4j.cypher.internal.compiler.v2_3.planner.logical.plans.LogicalPlan
import scala.collection.{Map, mutable}
class IDPPlanTable extends (Set[Solvable] => Option[LogicalPlan]) {
private val table = new mutable.HashMap[Set[Solvable], LogicalPlan]()
def singleRemainingPlan = {
assert(table.size == 1, "Expected a single plan to be left in the plan table")
table.head._2
}
def apply(solved: Set[Solvable]): Option[LogicalPlan] = table.get(solved)
def put(solved: Set[Solvable], plan: LogicalPlan) {
table.put(solved, plan)
}
def removeAllTracesOf(solvables: Set[Solvable]) = {
table.retain {
case (k, _) => (k intersect solvables).isEmpty
}
}
def contains(solved: Set[Solvable]): Boolean = table.contains(solved)
def plansOfSize(k: Int): Map[Set[Solvable], LogicalPlan] = table.filterKeys(_.size == k)
def keySet: Set[Set[Solvable]] = table.keySet.toSet
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/planner/logical/idp/IDPPlanTable.scala | Scala | apache-2.0 | 1,858 |
import sbt._
// This plugin is used to load the sbt-jasmine plugin into our project.
// This allows us to import the SbtJasminePlugin file
// in Build.scala, and then set the settings and configuration for Sbt-Jasmine
object Plugins extends Build {
lazy val plugins = Project("plugins", file("."))
.dependsOn(uri("git://github.com/mresposito/sbt-jasmine-plugin.git#0.7"))
}
| mresposito/gitEvolve | project/project/Plugins.scala | Scala | apache-2.0 | 383 |
/*
* Copyright 2018 TWO SIGMA OPEN SOURCE, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twosigma.beakerx.scala.spark
import java.util.{HashMap, Map}
import com.twosigma.beakerx.mimetype.MIMEContainer
import com.twosigma.beakerx.scala.table.TableDisplay
import jupyter.{Displayer, Displayers}
object TimeSeriesRDDDisplayer {
def register() {
Displayers.register(classOf[com.twosigma.flint.timeseries.TimeSeriesRDD], new Displayer[com.twosigma.flint.timeseries.TimeSeriesRDD]() {
override def display(ds: com.twosigma.flint.timeseries.TimeSeriesRDD): Map[String, String] = new HashMap[String, String]() {
displayTimeSeriesRDD(ds)
put(MIMEContainer.MIME.HIDDEN, "")
}
})
}
def displayTimeSeriesRDD(sRdd: com.twosigma.flint.timeseries.TimeSeriesRDD, rows: Integer = 20): Unit = {
val t: TableDisplay = preview(sRdd, rows)
t.display()
}
private def preview(tsRdd: com.twosigma.flint.timeseries.TimeSeriesRDD, num: Integer = 20): TableDisplay = {
val cols = tsRdd.schema.fieldNames
val rows = tsRdd.toDF.take(num)
val listOfMaps = rows.map { r => (cols zip r.toSeq).toMap }
val display = new TableDisplay(listOfMaps)
return display
}
def implicitCodeAsString(): String = {
"implicit class TimeSeriesRDDOps(tsd: com.twosigma.flint.timeseries.TimeSeriesRDD) {\\n" +
" def display(rows: Int = 20) = {\\n" +
" com.twosigma.beakerx.scala.spark.TimeSeriesRDDDisplayer.displayTimeSeriesRDD(tsd,rows)" +
" }\\n" +
"}\\n"
}
}
| twosigma/beaker-notebook | kernel/sparkex/src/main/scala/com.twosigma.beakerx.scala.spark/TimeSeriesRDDDisplayer.scala | Scala | apache-2.0 | 2,077 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules
import org.apache.calcite.rel.core.RelFactories
import org.apache.calcite.rel.rules._
import org.apache.calcite.tools.{RuleSet, RuleSets}
import org.apache.flink.table.plan.rules.common._
import org.apache.flink.table.plan.rules.logical._
import org.apache.flink.table.plan.rules.dataSet._
import org.apache.flink.table.plan.rules.datastream._
import org.apache.flink.table.plan.nodes.logical._
object FlinkRuleSets {
/**
* Convert sub-queries before query decorrelation.
*/
val TABLE_SUBQUERY_RULES: RuleSet = RuleSets.ofList(
SubQueryRemoveRule.FILTER,
SubQueryRemoveRule.PROJECT,
SubQueryRemoveRule.JOIN)
/**
* Convert table references before query decorrelation.
*/
val TABLE_REF_RULES: RuleSet = RuleSets.ofList(
TableScanRule.INSTANCE,
EnumerableToLogicalTableScan.INSTANCE)
val LOGICAL_OPT_RULES: RuleSet = RuleSets.ofList(
// push a filter into a join
FilterJoinRule.FILTER_ON_JOIN,
// push filter into the children of a join
FilterJoinRule.JOIN,
// push filter through an aggregation
FilterAggregateTransposeRule.INSTANCE,
// aggregation and projection rules
AggregateProjectMergeRule.INSTANCE,
AggregateProjectPullUpConstantsRule.INSTANCE,
// push a projection past a filter or vice versa
ProjectFilterTransposeRule.INSTANCE,
FilterProjectTransposeRule.INSTANCE,
// push a projection to the children of a join
// push all expressions to handle the time indicator correctly
new ProjectJoinTransposeRule(PushProjector.ExprCondition.FALSE, RelFactories.LOGICAL_BUILDER),
// merge projections
ProjectMergeRule.INSTANCE,
// remove identity project
ProjectRemoveRule.INSTANCE,
// reorder sort and projection
SortProjectTransposeRule.INSTANCE,
ProjectSortTransposeRule.INSTANCE,
// join rules
JoinPushExpressionsRule.INSTANCE,
// remove union with only a single child
UnionEliminatorRule.INSTANCE,
// convert non-all union into all-union + distinct
UnionToDistinctRule.INSTANCE,
// remove aggregation if it does not aggregate and input is already distinct
AggregateRemoveRule.INSTANCE,
// push aggregate through join
AggregateJoinTransposeRule.EXTENDED,
// aggregate union rule
AggregateUnionAggregateRule.INSTANCE,
// expand distinct aggregate to normal aggregate with groupby
AggregateExpandDistinctAggregatesRule.JOIN,
// reduce aggregate functions like AVG, STDDEV_POP etc.
AggregateReduceFunctionsRule.INSTANCE,
// remove unnecessary sort rule
SortRemoveRule.INSTANCE,
// prune empty results rules
PruneEmptyRules.AGGREGATE_INSTANCE,
PruneEmptyRules.FILTER_INSTANCE,
PruneEmptyRules.JOIN_LEFT_INSTANCE,
PruneEmptyRules.JOIN_RIGHT_INSTANCE,
PruneEmptyRules.PROJECT_INSTANCE,
PruneEmptyRules.SORT_INSTANCE,
PruneEmptyRules.UNION_INSTANCE,
// calc rules
FilterCalcMergeRule.INSTANCE,
ProjectCalcMergeRule.INSTANCE,
FilterToCalcRule.INSTANCE,
ProjectToCalcRule.INSTANCE,
CalcMergeRule.INSTANCE,
// scan optimization
PushProjectIntoTableSourceScanRule.INSTANCE,
PushFilterIntoTableSourceScanRule.INSTANCE,
// unnest rule
LogicalUnnestRule.INSTANCE,
// translate to flink logical rel nodes
FlinkLogicalAggregate.CONVERTER,
FlinkLogicalWindowAggregate.CONVERTER,
FlinkLogicalOverWindow.CONVERTER,
FlinkLogicalCalc.CONVERTER,
FlinkLogicalCorrelate.CONVERTER,
FlinkLogicalIntersect.CONVERTER,
FlinkLogicalJoin.CONVERTER,
FlinkLogicalMinus.CONVERTER,
FlinkLogicalSort.CONVERTER,
FlinkLogicalUnion.CONVERTER,
FlinkLogicalValues.CONVERTER,
FlinkLogicalTableSourceScan.CONVERTER,
FlinkLogicalTableFunctionScan.CONVERTER,
FlinkLogicalNativeTableScan.CONVERTER
)
/**
* RuleSet to normalize plans for batch / DataSet execution
*/
val DATASET_NORM_RULES: RuleSet = RuleSets.ofList(
// simplify expressions rules
ReduceExpressionsRule.FILTER_INSTANCE,
ReduceExpressionsRule.PROJECT_INSTANCE,
ReduceExpressionsRule.CALC_INSTANCE,
ReduceExpressionsRule.JOIN_INSTANCE,
ProjectToWindowRule.PROJECT,
// Transform window to LogicalWindowAggregate
DataSetLogicalWindowAggregateRule.INSTANCE,
WindowStartEndPropertiesRule.INSTANCE,
WindowStartEndPropertiesHavingRule.INSTANCE
)
/**
* RuleSet to optimize plans for batch / DataSet execution
*/
val DATASET_OPT_RULES: RuleSet = RuleSets.ofList(
// translate to Flink DataSet nodes
DataSetWindowAggregateRule.INSTANCE,
DataSetAggregateRule.INSTANCE,
DataSetAggregateWithNullValuesRule.INSTANCE,
DataSetDistinctRule.INSTANCE,
DataSetCalcRule.INSTANCE,
DataSetJoinRule.INSTANCE,
DataSetSingleRowJoinRule.INSTANCE,
DataSetScanRule.INSTANCE,
DataSetUnionRule.INSTANCE,
DataSetIntersectRule.INSTANCE,
DataSetMinusRule.INSTANCE,
DataSetSortRule.INSTANCE,
DataSetValuesRule.INSTANCE,
DataSetCorrelateRule.INSTANCE,
BatchTableSourceScanRule.INSTANCE
)
/**
* RuleSet to normalize plans for stream / DataStream execution
*/
val DATASTREAM_NORM_RULES: RuleSet = RuleSets.ofList(
// Transform window to LogicalWindowAggregate
DataStreamLogicalWindowAggregateRule.INSTANCE,
WindowStartEndPropertiesRule.INSTANCE,
WindowStartEndPropertiesHavingRule.INSTANCE,
// simplify expressions rules
ReduceExpressionsRule.FILTER_INSTANCE,
ReduceExpressionsRule.PROJECT_INSTANCE,
ReduceExpressionsRule.CALC_INSTANCE,
ProjectToWindowRule.PROJECT
)
/**
* RuleSet to optimize plans for stream / DataStream execution
*/
val DATASTREAM_OPT_RULES: RuleSet = RuleSets.ofList(
// translate to DataStream nodes
DataStreamSortRule.INSTANCE,
DataStreamGroupAggregateRule.INSTANCE,
DataStreamOverAggregateRule.INSTANCE,
DataStreamGroupWindowAggregateRule.INSTANCE,
DataStreamCalcRule.INSTANCE,
DataStreamScanRule.INSTANCE,
DataStreamUnionRule.INSTANCE,
DataStreamValuesRule.INSTANCE,
DataStreamCorrelateRule.INSTANCE,
DataStreamWindowJoinRule.INSTANCE,
StreamTableSourceScanRule.INSTANCE
)
/**
* RuleSet to decorate plans for stream / DataStream execution
*/
val DATASTREAM_DECO_RULES: RuleSet = RuleSets.ofList(
// retraction rules
DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE,
DataStreamRetractionRules.UPDATES_AS_RETRACTION_INSTANCE,
DataStreamRetractionRules.ACCMODE_INSTANCE
)
}
| PangZhi/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/FlinkRuleSets.scala | Scala | apache-2.0 | 7,377 |
package de.kalass.batchmonads.base.impl
// case class, so that we get a good equals implementation for free
private[base] case class BaseOperation[I, A](value: I, creator: BatchOperation[I, A]) extends Operation[A] {
} | kkalass/BatchMonads | src/de/kalass/batchmonads/base/impl/BaseOperation.scala | Scala | lgpl-3.0 | 219 |
/**
* Copyright 2013 Alex Jones
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* @author unclealex72
*
*/
package dates
import java.time.ZonedDateTime
/**
* A date parser that will chain through a set of given date parsers until a date can be parsed or found.
* @author alex
*
*/
class ChainingDateParser(
/**
* The sequence of date parsers that will be used to find or parse dates.
*/
dateParsers: Seq[DateParser]) extends DateParser {
override def parse(str: String) = traverse((dp: DateParser) => dp parse str)
override def find(str: String) = traverse((dp: DateParser) => dp find str)
/**
* Traverse through each date parser until a non-None result is found.
*/
protected def traverse(f: DateParser => Traversable[ZonedDateTime]): Option[ZonedDateTime] = {
dateParsers.toStream.flatMap(f).headOption
}
} | unclealex72/west-ham-calendar | app/dates/ChainingDateParser.scala | Scala | apache-2.0 | 1,611 |
package fr.eurecom.dsg.treelib.core
/**
* This class is representative for each value of each feature in the data set
* @param index Index of the feature in the whole data set, based zero
* @param xValue Value of the current feature
* @param yValue Value of the Y feature associated (target, predicted feature)
* @param frequency Frequency of this value
*/
class FeatureValueAggregate(var index: Int, var xValue: Any,
var yValue: Double, var yValuePower2 : Double,
var frequency: Int) extends Serializable {
/**
* Increase the frequency of this feature
* @param acc the accumulator
*/
def updateByAggregate(yAcc : Double, yp2Acc : Double, frequencyAcc: Int) = {
this.yValue = this.yValue + yAcc
this.yValuePower2 = this.yValuePower2 + yp2Acc
this.frequency = this.frequency + frequencyAcc
}
/**
* Sum two FeatureValueAggregates (sum two yValues and two frequencies)
*/
def +(that: FeatureValueAggregate) = {
new FeatureValueAggregate(this.index, this.xValue,
this.yValue + that.yValue,
this.yValuePower2 + that.yValuePower2,
this.frequency + that.frequency)
}
override def toString() = "Feature(index:" + index + " | xValue:" + xValue +
" | yValue" + yValue + " | frequency:" + frequency + ")";
}
| bigfootproject/treelib | src/main/scala/fr/eurecom/dsg/treelib/core/FeatureValueAggregate.scala | Scala | apache-2.0 | 1,354 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.math.BigDecimal
import java.sql.{Date, DriverManager, SQLException, Timestamp}
import java.util.{Calendar, GregorianCalendar, Properties}
import scala.collection.JavaConverters._
import org.h2.jdbc.JdbcSQLException
import org.scalatest.{BeforeAndAfter, PrivateMethodTester}
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeTestUtils}
import org.apache.spark.sql.execution.{DataSourceScanExec, ExtendedMode}
import org.apache.spark.sql.execution.command.{ExplainCommand, ShowCreateTableCommand}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JDBCPartition, JDBCRDD, JDBCRelation, JdbcUtils}
import org.apache.spark.sql.execution.metric.InputOutputMetricsHelper
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class JDBCSuite extends QueryTest
with BeforeAndAfter with PrivateMethodTester with SharedSparkSession {
import testImplicits._
val url = "jdbc:h2:mem:testdb0"
val urlWithUserAndPass = "jdbc:h2:mem:testdb0;user=testUser;password=testPass"
var conn: java.sql.Connection = null
val testBytes = Array[Byte](99.toByte, 134.toByte, 135.toByte, 200.toByte, 205.toByte)
val testH2Dialect = new JdbcDialect {
override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] =
Some(StringType)
}
val testH2DialectTinyInt = new JdbcDialect {
override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2")
override def getCatalystType(
sqlType: Int,
typeName: String,
size: Int,
md: MetadataBuilder): Option[DataType] = {
sqlType match {
case java.sql.Types.TINYINT => Some(ByteType)
case _ => None
}
}
}
override def beforeAll(): Unit = {
super.beforeAll()
Utils.classForName("org.h2.Driver")
// Extra properties that will be specified for our database. We need these to test
// usage of parameters from OPTIONS clause in queries.
val properties = new Properties()
properties.setProperty("user", "testUser")
properties.setProperty("password", "testPass")
properties.setProperty("rowId", "false")
conn = DriverManager.getConnection(url, properties)
conn.prepareStatement("create schema test").executeUpdate()
conn.prepareStatement(
"create table test.people (name TEXT(32) NOT NULL, theid INTEGER NOT NULL)").executeUpdate()
conn.prepareStatement("insert into test.people values ('fred', 1)").executeUpdate()
conn.prepareStatement("insert into test.people values ('mary', 2)").executeUpdate()
conn.prepareStatement(
"insert into test.people values ('joe ''foo'' \\"bar\\"', 3)").executeUpdate()
conn.commit()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW foobar
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.PEOPLE', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW fetchtwo
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.PEOPLE', user 'testUser', password 'testPass',
| ${JDBCOptions.JDBC_BATCH_FETCH_SIZE} '2')
""".stripMargin.replaceAll("\\n", " "))
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW parts
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.PEOPLE', user 'testUser', password 'testPass',
| partitionColumn 'THEID', lowerBound '1', upperBound '4', numPartitions '3')
""".stripMargin.replaceAll("\\n", " "))
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW partsoverflow
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.PEOPLE', user 'testUser', password 'testPass',
| partitionColumn 'THEID', lowerBound '-9223372036854775808',
| upperBound '9223372036854775807', numPartitions '3')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement("create table test.inttypes (a INT, b BOOLEAN, c TINYINT, "
+ "d SMALLINT, e BIGINT)").executeUpdate()
conn.prepareStatement("insert into test.inttypes values (1, false, 3, 4, 1234567890123)"
).executeUpdate()
conn.prepareStatement("insert into test.inttypes values (null, null, null, null, null)"
).executeUpdate()
conn.commit()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW inttypes
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.INTTYPES', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement("create table test.strtypes (a BINARY(20), b VARCHAR(20), "
+ "c VARCHAR_IGNORECASE(20), d CHAR(20), e BLOB, f CLOB)").executeUpdate()
val stmt = conn.prepareStatement("insert into test.strtypes values (?, ?, ?, ?, ?, ?)")
stmt.setBytes(1, testBytes)
stmt.setString(2, "Sensitive")
stmt.setString(3, "Insensitive")
stmt.setString(4, "Twenty-byte CHAR")
stmt.setBytes(5, testBytes)
stmt.setString(6, "I am a clob!")
stmt.executeUpdate()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW strtypes
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.STRTYPES', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement("create table test.timetypes (a TIME, b DATE, c TIMESTAMP)"
).executeUpdate()
conn.prepareStatement("insert into test.timetypes values ('12:34:56', "
+ "'1996-01-01', '2002-02-20 11:22:33.543543543')").executeUpdate()
conn.prepareStatement("insert into test.timetypes values ('12:34:56', "
+ "null, '2002-02-20 11:22:33.543543543')").executeUpdate()
conn.commit()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW timetypes
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.TIMETYPES', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement("CREATE TABLE test.timezone (tz TIMESTAMP WITH TIME ZONE) " +
"AS SELECT '1999-01-08 04:05:06.543543543 GMT-08:00'")
.executeUpdate()
conn.commit()
conn.prepareStatement("CREATE TABLE test.array (ar ARRAY) " +
"AS SELECT '(1, 2, 3)'")
.executeUpdate()
conn.commit()
conn.prepareStatement("create table test.flttypes (a DOUBLE, b REAL, c DECIMAL(38, 18))"
).executeUpdate()
conn.prepareStatement("insert into test.flttypes values ("
+ "1.0000000000000002220446049250313080847263336181640625, "
+ "1.00000011920928955078125, "
+ "123456789012345.543215432154321)").executeUpdate()
conn.commit()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW flttypes
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.FLTTYPES', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement(
s"""
|create table test.nulltypes (a INT, b BOOLEAN, c TINYINT, d BINARY(20), e VARCHAR(20),
|f VARCHAR_IGNORECASE(20), g CHAR(20), h BLOB, i CLOB, j TIME, k DATE, l TIMESTAMP,
|m DOUBLE, n REAL, o DECIMAL(38, 18))
""".stripMargin.replaceAll("\\n", " ")).executeUpdate()
conn.prepareStatement("insert into test.nulltypes values ("
+ "null, null, null, null, null, null, null, null, null, "
+ "null, null, null, null, null, null)").executeUpdate()
conn.commit()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW nulltypes
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.NULLTYPES', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement(
"create table test.emp(name TEXT(32) NOT NULL," +
" theid INTEGER, \\"Dept\\" INTEGER)").executeUpdate()
conn.prepareStatement(
"insert into test.emp values ('fred', 1, 10)").executeUpdate()
conn.prepareStatement(
"insert into test.emp values ('mary', 2, null)").executeUpdate()
conn.prepareStatement(
"insert into test.emp values ('joe ''foo'' \\"bar\\"', 3, 30)").executeUpdate()
conn.prepareStatement(
"insert into test.emp values ('kathy', null, null)").executeUpdate()
conn.commit()
conn.prepareStatement(
"create table test.seq(id INTEGER)").executeUpdate()
(0 to 6).foreach { value =>
conn.prepareStatement(
s"insert into test.seq values ($value)").executeUpdate()
}
conn.prepareStatement(
"insert into test.seq values (null)").executeUpdate()
conn.commit()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW nullparts
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST.EMP', user 'testUser', password 'testPass',
|partitionColumn '"Dept"', lowerBound '1', upperBound '4', numPartitions '3')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement(
"""create table test."mixedCaseCols" ("Name" TEXT(32), "Id" INTEGER NOT NULL)""")
.executeUpdate()
conn.prepareStatement("""insert into test."mixedCaseCols" values ('fred', 1)""").executeUpdate()
conn.prepareStatement("""insert into test."mixedCaseCols" values ('mary', 2)""").executeUpdate()
conn.prepareStatement("""insert into test."mixedCaseCols" values (null, 3)""").executeUpdate()
conn.commit()
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW mixedCaseCols
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable 'TEST."mixedCaseCols"', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
conn.prepareStatement("CREATE TABLE test.partition (THEID INTEGER, `THE ID` INTEGER) " +
"AS SELECT 1, 1")
.executeUpdate()
conn.commit()
conn.prepareStatement("CREATE TABLE test.datetime (d DATE, t TIMESTAMP)").executeUpdate()
conn.prepareStatement(
"INSERT INTO test.datetime VALUES ('2018-07-06', '2018-07-06 05:50:00.0')").executeUpdate()
conn.prepareStatement(
"INSERT INTO test.datetime VALUES ('2018-07-06', '2018-07-06 08:10:08.0')").executeUpdate()
conn.prepareStatement(
"INSERT INTO test.datetime VALUES ('2018-07-08', '2018-07-08 13:32:01.0')").executeUpdate()
conn.prepareStatement(
"INSERT INTO test.datetime VALUES ('2018-07-12', '2018-07-12 09:51:15.0')").executeUpdate()
conn.commit()
// Untested: IDENTITY, OTHER, UUID, ARRAY, and GEOMETRY types.
}
override def afterAll(): Unit = {
conn.close()
super.afterAll()
}
// Check whether the tables are fetched in the expected degree of parallelism
def checkNumPartitions(df: DataFrame, expectedNumPartitions: Int): Unit = {
val jdbcRelations = df.queryExecution.analyzed.collect {
case LogicalRelation(r: JDBCRelation, _, _, _) => r
}
assert(jdbcRelations.length == 1)
assert(jdbcRelations.head.parts.length == expectedNumPartitions,
s"Expecting a JDBCRelation with $expectedNumPartitions partitions, but got:`$jdbcRelations`")
}
private def checkPushdown(df: DataFrame): DataFrame = {
val parentPlan = df.queryExecution.executedPlan
// Check if SparkPlan Filter is removed in a physical plan and
// the plan only has PhysicalRDD to scan JDBCRelation.
assert(parentPlan.isInstanceOf[org.apache.spark.sql.execution.WholeStageCodegenExec])
val node = parentPlan.asInstanceOf[org.apache.spark.sql.execution.WholeStageCodegenExec]
assert(node.child.isInstanceOf[org.apache.spark.sql.execution.DataSourceScanExec])
assert(node.child.asInstanceOf[DataSourceScanExec].nodeName.contains("JDBCRelation"))
df
}
private def checkNotPushdown(df: DataFrame): DataFrame = {
val parentPlan = df.queryExecution.executedPlan
// Check if SparkPlan Filter is not removed in a physical plan because JDBCRDD
// cannot compile given predicates.
assert(parentPlan.isInstanceOf[org.apache.spark.sql.execution.WholeStageCodegenExec])
val node = parentPlan.asInstanceOf[org.apache.spark.sql.execution.WholeStageCodegenExec]
assert(node.child.isInstanceOf[org.apache.spark.sql.execution.FilterExec])
df
}
test("SELECT *") {
assert(sql("SELECT * FROM foobar").collect().size === 3)
}
test("SELECT * WHERE (simple predicates)") {
assert(checkPushdown(sql("SELECT * FROM foobar WHERE THEID < 1")).collect().size == 0)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE THEID != 2")).collect().size == 2)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE THEID = 1")).collect().size == 1)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME = 'fred'")).collect().size == 1)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME <=> 'fred'")).collect().size == 1)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME > 'fred'")).collect().size == 2)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME != 'fred'")).collect().size == 2)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME IN ('mary', 'fred')"))
.collect().size == 2)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME NOT IN ('fred')"))
.collect().size == 2)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE THEID = 1 OR NAME = 'mary'"))
.collect().size == 2)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE THEID = 1 OR NAME = 'mary' "
+ "AND THEID = 2")).collect().size == 2)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME LIKE 'fr%'")).collect().size == 1)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME LIKE '%ed'")).collect().size == 1)
assert(checkPushdown(sql("SELECT * FROM foobar WHERE NAME LIKE '%re%'")).collect().size == 1)
assert(checkPushdown(sql("SELECT * FROM nulltypes WHERE A IS NULL")).collect().size == 1)
assert(checkPushdown(sql("SELECT * FROM nulltypes WHERE A IS NOT NULL")).collect().size == 0)
// This is a test to reflect discussion in SPARK-12218.
// The older versions of spark have this kind of bugs in parquet data source.
val df1 = sql("SELECT * FROM foobar WHERE NOT (THEID != 2) OR NOT (NAME != 'mary')")
assert(df1.collect.toSet === Set(Row("mary", 2)))
// SPARK-22548: Incorrect nested AND expression pushed down to JDBC data source
val df2 = sql("SELECT * FROM foobar " +
"WHERE (THEID > 0 AND TRIM(NAME) = 'mary') OR (NAME = 'fred')")
assert(df2.collect.toSet === Set(Row("fred", 1), Row("mary", 2)))
assert(checkNotPushdown(sql("SELECT * FROM foobar WHERE (THEID + 1) < 2")).collect().size == 0)
assert(checkNotPushdown(sql("SELECT * FROM foobar WHERE (THEID + 2) != 4")).collect().size == 2)
}
test("SELECT COUNT(1) WHERE (predicates)") {
// Check if an answer is correct when Filter is removed from operations such as count() which
// does not require any columns. In some data sources, e.g., Parquet, `requiredColumns` in
// org.apache.spark.sql.sources.interfaces is not given in logical plans, but some filters
// are applied for columns with Filter producing wrong results. On the other hand, JDBCRDD
// correctly handles this case by assigning `requiredColumns` properly. See PR 10427 for more
// discussions.
assert(sql("SELECT COUNT(1) FROM foobar WHERE NAME = 'mary'").collect.toSet === Set(Row(1)))
}
test("SELECT * WHERE (quoted strings)") {
assert(sql("select * from foobar").where('NAME === "joe 'foo' \\"bar\\"").collect().size === 1)
}
test("SELECT first field") {
val names = sql("SELECT NAME FROM foobar").collect().map(x => x.getString(0)).sortWith(_ < _)
assert(names.size === 3)
assert(names(0).equals("fred"))
assert(names(1).equals("joe 'foo' \\"bar\\""))
assert(names(2).equals("mary"))
}
test("SELECT first field when fetchsize is two") {
val names = sql("SELECT NAME FROM fetchtwo").collect().map(x => x.getString(0)).sortWith(_ < _)
assert(names.size === 3)
assert(names(0).equals("fred"))
assert(names(1).equals("joe 'foo' \\"bar\\""))
assert(names(2).equals("mary"))
}
test("SELECT second field") {
val ids = sql("SELECT THEID FROM foobar").collect().map(x => x.getInt(0)).sortWith(_ < _)
assert(ids.size === 3)
assert(ids(0) === 1)
assert(ids(1) === 2)
assert(ids(2) === 3)
}
test("SELECT second field when fetchsize is two") {
val ids = sql("SELECT THEID FROM fetchtwo").collect().map(x => x.getInt(0)).sortWith(_ < _)
assert(ids.size === 3)
assert(ids(0) === 1)
assert(ids(1) === 2)
assert(ids(2) === 3)
}
test("SELECT * partitioned") {
val df = sql("SELECT * FROM parts")
checkNumPartitions(df, expectedNumPartitions = 3)
assert(df.collect().length == 3)
}
test("SELECT WHERE (simple predicates) partitioned") {
val df1 = sql("SELECT * FROM parts WHERE THEID < 1")
checkNumPartitions(df1, expectedNumPartitions = 3)
assert(df1.collect().length === 0)
val df2 = sql("SELECT * FROM parts WHERE THEID != 2")
checkNumPartitions(df2, expectedNumPartitions = 3)
assert(df2.collect().length === 2)
val df3 = sql("SELECT THEID FROM parts WHERE THEID = 1")
checkNumPartitions(df3, expectedNumPartitions = 3)
assert(df3.collect().length === 1)
}
test("SELECT second field partitioned") {
val ids = sql("SELECT THEID FROM parts").collect().map(x => x.getInt(0)).sortWith(_ < _)
assert(ids.size === 3)
assert(ids(0) === 1)
assert(ids(1) === 2)
assert(ids(2) === 3)
}
test("overflow of partition bound difference does not give negative stride") {
val df = sql("SELECT * FROM partsoverflow")
checkNumPartitions(df, expectedNumPartitions = 3)
assert(df.collect().length == 3)
}
test("Register JDBC query with renamed fields") {
// Regression test for bug SPARK-7345
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW renamed
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable '(select NAME as NAME1, NAME as NAME2 from TEST.PEOPLE)',
|user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
val df = sql("SELECT * FROM renamed")
assert(df.schema.fields.size == 2)
assert(df.schema.fields(0).name == "NAME1")
assert(df.schema.fields(1).name == "NAME2")
}
test("Basic API") {
assert(spark.read.jdbc(
urlWithUserAndPass, "TEST.PEOPLE", new Properties()).collect().length === 3)
}
test("Missing partition columns") {
withView("tempPeople") {
val e = intercept[IllegalArgumentException] {
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW tempPeople
|USING org.apache.spark.sql.jdbc
|OPTIONS (
| url 'jdbc:h2:mem:testdb0;user=testUser;password=testPass',
| dbtable 'TEST.PEOPLE',
| lowerBound '0',
| upperBound '52',
| numPartitions '53',
| fetchSize '10000' )
""".stripMargin.replaceAll("\\n", " "))
}.getMessage
assert(e.contains("When reading JDBC data sources, users need to specify all or none " +
"for the following options: 'partitionColumn', 'lowerBound', 'upperBound', and " +
"'numPartitions'"))
}
}
test("Basic API with FetchSize") {
(0 to 4).foreach { size =>
val properties = new Properties()
properties.setProperty(JDBCOptions.JDBC_BATCH_FETCH_SIZE, size.toString)
assert(spark.read.jdbc(
urlWithUserAndPass, "TEST.PEOPLE", properties).collect().length === 3)
}
}
test("Partitioning via JDBCPartitioningInfo API") {
val df = spark.read.jdbc(urlWithUserAndPass, "TEST.PEOPLE", "THEID", 0, 4, 3, new Properties())
checkNumPartitions(df, expectedNumPartitions = 3)
assert(df.collect().length === 3)
}
test("Partitioning via list-of-where-clauses API") {
val parts = Array[String]("THEID < 2", "THEID >= 2")
val df = spark.read.jdbc(urlWithUserAndPass, "TEST.PEOPLE", parts, new Properties())
checkNumPartitions(df, expectedNumPartitions = 2)
assert(df.collect().length === 3)
}
test("Partitioning on column that might have null values.") {
val df = spark.read.jdbc(urlWithUserAndPass, "TEST.EMP", "theid", 0, 4, 3, new Properties())
checkNumPartitions(df, expectedNumPartitions = 3)
assert(df.collect().length === 4)
val df2 = spark.read.jdbc(urlWithUserAndPass, "TEST.EMP", "THEID", 0, 4, 3, new Properties())
checkNumPartitions(df2, expectedNumPartitions = 3)
assert(df2.collect().length === 4)
// partitioning on a nullable quoted column
assert(
spark.read.jdbc(urlWithUserAndPass, "TEST.EMP", """"Dept"""", 0, 4, 3, new Properties())
.collect().length === 4)
}
test("Partitioning on column where numPartitions is zero") {
val res = spark.read.jdbc(
url = urlWithUserAndPass,
table = "TEST.seq",
columnName = "id",
lowerBound = 0,
upperBound = 4,
numPartitions = 0,
connectionProperties = new Properties()
)
checkNumPartitions(res, expectedNumPartitions = 1)
assert(res.count() === 8)
}
test("Partitioning on column where numPartitions are more than the number of total rows") {
val res = spark.read.jdbc(
url = urlWithUserAndPass,
table = "TEST.seq",
columnName = "id",
lowerBound = 1,
upperBound = 5,
numPartitions = 10,
connectionProperties = new Properties()
)
checkNumPartitions(res, expectedNumPartitions = 4)
assert(res.count() === 8)
}
test("Partitioning on column where lowerBound is equal to upperBound") {
val res = spark.read.jdbc(
url = urlWithUserAndPass,
table = "TEST.seq",
columnName = "id",
lowerBound = 5,
upperBound = 5,
numPartitions = 4,
connectionProperties = new Properties()
)
checkNumPartitions(res, expectedNumPartitions = 1)
assert(res.count() === 8)
}
test("Partitioning on column where lowerBound is larger than upperBound") {
val e = intercept[IllegalArgumentException] {
spark.read.jdbc(
url = urlWithUserAndPass,
table = "TEST.seq",
columnName = "id",
lowerBound = 5,
upperBound = 1,
numPartitions = 3,
connectionProperties = new Properties()
)
}.getMessage
assert(e.contains("Operation not allowed: the lower bound of partitioning column " +
"is larger than the upper bound. Lower bound: 5; Upper bound: 1"))
}
test("SELECT * on partitioned table with a nullable partition column") {
val df = sql("SELECT * FROM nullparts")
checkNumPartitions(df, expectedNumPartitions = 3)
assert(df.collect().length == 4)
}
test("H2 integral types") {
val rows = sql("SELECT * FROM inttypes WHERE A IS NOT NULL").collect()
assert(rows.length === 1)
assert(rows(0).getInt(0) === 1)
assert(rows(0).getBoolean(1) === false)
assert(rows(0).getInt(2) === 3)
assert(rows(0).getInt(3) === 4)
assert(rows(0).getLong(4) === 1234567890123L)
}
test("H2 null entries") {
val rows = sql("SELECT * FROM inttypes WHERE A IS NULL").collect()
assert(rows.length === 1)
assert(rows(0).isNullAt(0))
assert(rows(0).isNullAt(1))
assert(rows(0).isNullAt(2))
assert(rows(0).isNullAt(3))
assert(rows(0).isNullAt(4))
}
test("H2 string types") {
val rows = sql("SELECT * FROM strtypes").collect()
assert(rows(0).getAs[Array[Byte]](0).sameElements(testBytes))
assert(rows(0).getString(1).equals("Sensitive"))
assert(rows(0).getString(2).equals("Insensitive"))
assert(rows(0).getString(3).equals("Twenty-byte CHAR"))
assert(rows(0).getAs[Array[Byte]](4).sameElements(testBytes))
assert(rows(0).getString(5).equals("I am a clob!"))
}
test("H2 time types") {
val rows = sql("SELECT * FROM timetypes").collect()
val cal = new GregorianCalendar(java.util.Locale.ROOT)
cal.setTime(rows(0).getAs[java.sql.Timestamp](0))
assert(cal.get(Calendar.HOUR_OF_DAY) === 12)
assert(cal.get(Calendar.MINUTE) === 34)
assert(cal.get(Calendar.SECOND) === 56)
cal.setTime(rows(0).getAs[java.sql.Timestamp](1))
assert(cal.get(Calendar.YEAR) === 1996)
assert(cal.get(Calendar.MONTH) === 0)
assert(cal.get(Calendar.DAY_OF_MONTH) === 1)
cal.setTime(rows(0).getAs[java.sql.Timestamp](2))
assert(cal.get(Calendar.YEAR) === 2002)
assert(cal.get(Calendar.MONTH) === 1)
assert(cal.get(Calendar.DAY_OF_MONTH) === 20)
assert(cal.get(Calendar.HOUR) === 11)
assert(cal.get(Calendar.MINUTE) === 22)
assert(cal.get(Calendar.SECOND) === 33)
assert(rows(0).getAs[java.sql.Timestamp](2).getNanos === 543543000)
}
test("test DATE types") {
val rows = spark.read.jdbc(
urlWithUserAndPass, "TEST.TIMETYPES", new Properties()).collect()
val cachedRows = spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new Properties())
.cache().collect()
assert(rows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01"))
assert(rows(1).getAs[java.sql.Date](1) === null)
assert(cachedRows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01"))
}
test("test DATE types in cache") {
withTempView("mycached_date") {
val rows = spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new Properties()).collect()
spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new Properties())
.cache().createOrReplaceTempView("mycached_date")
val cachedRows = sql("select * from mycached_date").collect()
assert(rows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01"))
assert(cachedRows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01"))
}
}
test("test types for null value") {
val rows = spark.read.jdbc(
urlWithUserAndPass, "TEST.NULLTYPES", new Properties()).collect()
assert((0 to 14).forall(i => rows(0).isNullAt(i)))
}
test("H2 floating-point types") {
val rows = sql("SELECT * FROM flttypes").collect()
assert(rows(0).getDouble(0) === 1.00000000000000022)
assert(rows(0).getDouble(1) === 1.00000011920928955)
assert(rows(0).getAs[BigDecimal](2) ===
new BigDecimal("123456789012345.543215432154321000"))
assert(rows(0).schema.fields(2).dataType === DecimalType(38, 18))
val result = sql("SELECT C FROM flttypes where C > C - 1").collect()
assert(result(0).getAs[BigDecimal](0) ===
new BigDecimal("123456789012345.543215432154321000"))
}
test("SQL query as table name") {
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW hack
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable '(SELECT B, B*B FROM TEST.FLTTYPES)',
| user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
val rows = sql("SELECT * FROM hack").collect()
assert(rows(0).getDouble(0) === 1.00000011920928955) // Yes, I meant ==.
// For some reason, H2 computes this square incorrectly...
assert(math.abs(rows(0).getDouble(1) - 1.00000023841859331) < 1e-12)
}
test("Pass extra properties via OPTIONS") {
// We set rowId to false during setup, which means that _ROWID_ column should be absent from
// all tables. If rowId is true (default), the query below doesn't throw an exception.
intercept[JdbcSQLException] {
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW abc
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', dbtable '(SELECT _ROWID_ FROM test.people)',
| user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
}
}
test("Remap types via JdbcDialects") {
JdbcDialects.registerDialect(testH2Dialect)
val df = spark.read.jdbc(urlWithUserAndPass, "TEST.PEOPLE", new Properties())
assert(df.schema.filter(_.dataType != org.apache.spark.sql.types.StringType).isEmpty)
val rows = df.collect()
assert(rows(0).get(0).isInstanceOf[String])
assert(rows(0).get(1).isInstanceOf[String])
JdbcDialects.unregisterDialect(testH2Dialect)
}
test("Map TINYINT to ByteType via JdbcDialects") {
JdbcDialects.registerDialect(testH2DialectTinyInt)
val df = spark.read.jdbc(urlWithUserAndPass, "test.inttypes", new Properties())
val rows = df.collect()
assert(rows.length === 2)
assert(rows(0).get(2).isInstanceOf[Byte])
assert(rows(0).getByte(2) === 3)
assert(rows(1).isNullAt(2))
JdbcDialects.unregisterDialect(testH2DialectTinyInt)
}
test("Default jdbc dialect registration") {
assert(JdbcDialects.get("jdbc:mysql://127.0.0.1/db") == MySQLDialect)
assert(JdbcDialects.get("jdbc:postgresql://127.0.0.1/db") == PostgresDialect)
assert(JdbcDialects.get("jdbc:db2://127.0.0.1/db") == DB2Dialect)
assert(JdbcDialects.get("jdbc:sqlserver://127.0.0.1/db") == MsSqlServerDialect)
assert(JdbcDialects.get("jdbc:derby:db") == DerbyDialect)
assert(JdbcDialects.get("test.invalid") == NoopDialect)
}
test("quote column names by jdbc dialect") {
val MySQL = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
val Postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
val Derby = JdbcDialects.get("jdbc:derby:db")
val columns = Seq("abc", "key")
val MySQLColumns = columns.map(MySQL.quoteIdentifier(_))
val PostgresColumns = columns.map(Postgres.quoteIdentifier(_))
val DerbyColumns = columns.map(Derby.quoteIdentifier(_))
assert(MySQLColumns === Seq("`abc`", "`key`"))
assert(PostgresColumns === Seq(""""abc"""", """"key""""))
assert(DerbyColumns === Seq(""""abc"""", """"key""""))
}
test("compile filters") {
val compileFilter = PrivateMethod[Option[String]](Symbol("compileFilter"))
def doCompileFilter(f: Filter): String =
JDBCRDD invokePrivate compileFilter(f, JdbcDialects.get("jdbc:")) getOrElse("")
assert(doCompileFilter(EqualTo("col0", 3)) === """"col0" = 3""")
assert(doCompileFilter(Not(EqualTo("col1", "abc"))) === """(NOT ("col1" = 'abc'))""")
assert(doCompileFilter(And(EqualTo("col0", 0), EqualTo("col1", "def")))
=== """("col0" = 0) AND ("col1" = 'def')""")
assert(doCompileFilter(Or(EqualTo("col0", 2), EqualTo("col1", "ghi")))
=== """("col0" = 2) OR ("col1" = 'ghi')""")
assert(doCompileFilter(LessThan("col0", 5)) === """"col0" < 5""")
assert(doCompileFilter(LessThan("col3",
Timestamp.valueOf("1995-11-21 00:00:00.0"))) === """"col3" < '1995-11-21 00:00:00.0'""")
assert(doCompileFilter(LessThan("col4", Date.valueOf("1983-08-04")))
=== """"col4" < '1983-08-04'""")
assert(doCompileFilter(LessThanOrEqual("col0", 5)) === """"col0" <= 5""")
assert(doCompileFilter(GreaterThan("col0", 3)) === """"col0" > 3""")
assert(doCompileFilter(GreaterThanOrEqual("col0", 3)) === """"col0" >= 3""")
assert(doCompileFilter(In("col1", Array("jkl"))) === """"col1" IN ('jkl')""")
assert(doCompileFilter(In("col1", Array.empty)) ===
"""CASE WHEN "col1" IS NULL THEN NULL ELSE FALSE END""")
assert(doCompileFilter(Not(In("col1", Array("mno", "pqr"))))
=== """(NOT ("col1" IN ('mno', 'pqr')))""")
assert(doCompileFilter(IsNull("col1")) === """"col1" IS NULL""")
assert(doCompileFilter(IsNotNull("col1")) === """"col1" IS NOT NULL""")
assert(doCompileFilter(And(EqualNullSafe("col0", "abc"), EqualTo("col1", "def")))
=== """((NOT ("col0" != 'abc' OR "col0" IS NULL OR 'abc' IS NULL) """
+ """OR ("col0" IS NULL AND 'abc' IS NULL))) AND ("col1" = 'def')""")
}
test("Dialect unregister") {
JdbcDialects.unregisterDialect(H2Dialect)
try {
JdbcDialects.registerDialect(testH2Dialect)
JdbcDialects.unregisterDialect(testH2Dialect)
assert(JdbcDialects.get(urlWithUserAndPass) == NoopDialect)
} finally {
JdbcDialects.registerDialect(H2Dialect)
}
}
test("Aggregated dialects") {
val agg = new AggregatedDialect(List(new JdbcDialect {
override def canHandle(url: String) : Boolean = url.startsWith("jdbc:h2:")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] =
if (sqlType % 2 == 0) {
Some(LongType)
} else {
None
}
override def quoteIdentifier(colName: String): String = {
s"My $colName quoteIdentifier"
}
override def getTableExistsQuery(table: String): String = {
s"My $table Table"
}
override def getSchemaQuery(table: String): String = {
s"My $table Schema"
}
override def isCascadingTruncateTable(): Option[Boolean] = Some(true)
}, testH2Dialect))
assert(agg.canHandle("jdbc:h2:xxx"))
assert(!agg.canHandle("jdbc:h2"))
assert(agg.getCatalystType(0, "", 1, null) === Some(LongType))
assert(agg.getCatalystType(1, "", 1, null) === Some(StringType))
assert(agg.isCascadingTruncateTable() === Some(true))
assert(agg.quoteIdentifier ("Dummy") === "My Dummy quoteIdentifier")
assert(agg.getTableExistsQuery ("Dummy") === "My Dummy Table")
assert(agg.getSchemaQuery ("Dummy") === "My Dummy Schema")
}
test("Aggregated dialects: isCascadingTruncateTable") {
def genDialect(cascadingTruncateTable: Option[Boolean]): JdbcDialect = new JdbcDialect {
override def canHandle(url: String): Boolean = true
override def getCatalystType(
sqlType: Int,
typeName: String,
size: Int,
md: MetadataBuilder): Option[DataType] = None
override def isCascadingTruncateTable(): Option[Boolean] = cascadingTruncateTable
}
def testDialects(cascadings: List[Option[Boolean]], expected: Option[Boolean]): Unit = {
val dialects = cascadings.map(genDialect(_))
val agg = new AggregatedDialect(dialects)
assert(agg.isCascadingTruncateTable() === expected)
}
testDialects(List(Some(true), Some(false), None), Some(true))
testDialects(List(Some(true), Some(true), None), Some(true))
testDialects(List(Some(false), Some(false), None), None)
testDialects(List(Some(true), Some(true)), Some(true))
testDialects(List(Some(false), Some(false)), Some(false))
testDialects(List(None, None), None)
}
test("DB2Dialect type mapping") {
val db2Dialect = JdbcDialects.get("jdbc:db2://127.0.0.1/db")
assert(db2Dialect.getJDBCType(StringType).map(_.databaseTypeDefinition).get == "CLOB")
assert(db2Dialect.getJDBCType(BooleanType).map(_.databaseTypeDefinition).get == "CHAR(1)")
assert(db2Dialect.getJDBCType(ShortType).map(_.databaseTypeDefinition).get == "SMALLINT")
assert(db2Dialect.getJDBCType(ByteType).map(_.databaseTypeDefinition).get == "SMALLINT")
// test db2 dialect mappings on read
assert(db2Dialect.getCatalystType(java.sql.Types.REAL, "REAL", 1, null) == Option(FloatType))
assert(db2Dialect.getCatalystType(java.sql.Types.OTHER, "DECFLOAT", 1, null) ==
Option(DecimalType(38, 18)))
assert(db2Dialect.getCatalystType(java.sql.Types.OTHER, "XML", 1, null) == Option(StringType))
assert(db2Dialect.getCatalystType(java.sql.Types.OTHER, "TIMESTAMP WITH TIME ZONE", 1, null) ==
Option(TimestampType))
}
test("PostgresDialect type mapping") {
val Postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
val md = new MetadataBuilder().putLong("scale", 0)
assert(Postgres.getCatalystType(java.sql.Types.OTHER, "json", 1, null) === Some(StringType))
assert(Postgres.getCatalystType(java.sql.Types.OTHER, "jsonb", 1, null) === Some(StringType))
assert(Postgres.getCatalystType(java.sql.Types.ARRAY, "_numeric", 0, md) ==
Some(ArrayType(DecimalType.SYSTEM_DEFAULT)))
assert(Postgres.getCatalystType(java.sql.Types.ARRAY, "_bpchar", 64, md) ==
Some(ArrayType(StringType)))
assert(Postgres.getJDBCType(FloatType).map(_.databaseTypeDefinition).get == "FLOAT4")
assert(Postgres.getJDBCType(DoubleType).map(_.databaseTypeDefinition).get == "FLOAT8")
assert(Postgres.getJDBCType(ByteType).map(_.databaseTypeDefinition).get == "SMALLINT")
}
test("DerbyDialect jdbc type mapping") {
val derbyDialect = JdbcDialects.get("jdbc:derby:db")
assert(derbyDialect.getJDBCType(StringType).map(_.databaseTypeDefinition).get == "CLOB")
assert(derbyDialect.getJDBCType(ByteType).map(_.databaseTypeDefinition).get == "SMALLINT")
assert(derbyDialect.getJDBCType(BooleanType).map(_.databaseTypeDefinition).get == "BOOLEAN")
}
test("OracleDialect jdbc type mapping") {
val oracleDialect = JdbcDialects.get("jdbc:oracle")
val metadata = new MetadataBuilder().putString("name", "test_column").putLong("scale", -127)
assert(oracleDialect.getCatalystType(java.sql.Types.NUMERIC, "float", 1, metadata) ==
Some(DecimalType(DecimalType.MAX_PRECISION, 10)))
assert(oracleDialect.getCatalystType(java.sql.Types.NUMERIC, "numeric", 0, null) ==
Some(DecimalType(DecimalType.MAX_PRECISION, 10)))
assert(oracleDialect.getCatalystType(OracleDialect.BINARY_FLOAT, "BINARY_FLOAT", 0, null) ==
Some(FloatType))
assert(oracleDialect.getCatalystType(OracleDialect.BINARY_DOUBLE, "BINARY_DOUBLE", 0, null) ==
Some(DoubleType))
assert(oracleDialect.getCatalystType(OracleDialect.TIMESTAMPTZ, "TIMESTAMP", 0, null) ==
Some(TimestampType))
}
test("MsSqlServerDialect jdbc type mapping") {
val msSqlServerDialect = JdbcDialects.get("jdbc:sqlserver")
assert(msSqlServerDialect.getJDBCType(TimestampType).map(_.databaseTypeDefinition).get ==
"DATETIME")
assert(msSqlServerDialect.getJDBCType(StringType).map(_.databaseTypeDefinition).get ==
"NVARCHAR(MAX)")
assert(msSqlServerDialect.getJDBCType(BooleanType).map(_.databaseTypeDefinition).get ==
"BIT")
assert(msSqlServerDialect.getJDBCType(BinaryType).map(_.databaseTypeDefinition).get ==
"VARBINARY(MAX)")
Seq(true, false).foreach { flag =>
withSQLConf(SQLConf.LEGACY_MSSQLSERVER_NUMERIC_MAPPING_ENABLED.key -> s"$flag") {
if (SQLConf.get.legacyMsSqlServerNumericMappingEnabled) {
assert(msSqlServerDialect.getJDBCType(ShortType).map(_.databaseTypeDefinition).isEmpty)
} else {
assert(msSqlServerDialect.getJDBCType(ShortType).map(_.databaseTypeDefinition).get ==
"SMALLINT")
}
}
}
}
test("SPARK-28152 MsSqlServerDialect catalyst type mapping") {
val msSqlServerDialect = JdbcDialects.get("jdbc:sqlserver")
val metadata = new MetadataBuilder().putLong("scale", 1)
Seq(true, false).foreach { flag =>
withSQLConf(SQLConf.LEGACY_MSSQLSERVER_NUMERIC_MAPPING_ENABLED.key -> s"$flag") {
if (SQLConf.get.legacyMsSqlServerNumericMappingEnabled) {
assert(msSqlServerDialect.getCatalystType(java.sql.Types.SMALLINT, "SMALLINT", 1,
metadata).isEmpty)
assert(msSqlServerDialect.getCatalystType(java.sql.Types.REAL, "REAL", 1,
metadata).isEmpty)
} else {
assert(msSqlServerDialect.getCatalystType(java.sql.Types.SMALLINT, "SMALLINT", 1,
metadata).get == ShortType)
assert(msSqlServerDialect.getCatalystType(java.sql.Types.REAL, "REAL", 1,
metadata).get == FloatType)
}
}
}
}
test("table exists query by jdbc dialect") {
val MySQL = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
val Postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
val db2 = JdbcDialects.get("jdbc:db2://127.0.0.1/db")
val h2 = JdbcDialects.get(url)
val derby = JdbcDialects.get("jdbc:derby:db")
val table = "weblogs"
val defaultQuery = s"SELECT * FROM $table WHERE 1=0"
val limitQuery = s"SELECT 1 FROM $table LIMIT 1"
assert(MySQL.getTableExistsQuery(table) == limitQuery)
assert(Postgres.getTableExistsQuery(table) == limitQuery)
assert(db2.getTableExistsQuery(table) == defaultQuery)
assert(h2.getTableExistsQuery(table) == defaultQuery)
assert(derby.getTableExistsQuery(table) == defaultQuery)
}
test("truncate table query by jdbc dialect") {
val mysql = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
val postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
val db2 = JdbcDialects.get("jdbc:db2://127.0.0.1/db")
val h2 = JdbcDialects.get(url)
val derby = JdbcDialects.get("jdbc:derby:db")
val oracle = JdbcDialects.get("jdbc:oracle://127.0.0.1/db")
val teradata = JdbcDialects.get("jdbc:teradata://127.0.0.1/db")
val table = "weblogs"
val defaultQuery = s"TRUNCATE TABLE $table"
val postgresQuery = s"TRUNCATE TABLE ONLY $table"
val teradataQuery = s"DELETE FROM $table ALL"
Seq(mysql, db2, h2, derby).foreach{ dialect =>
assert(dialect.getTruncateQuery(table, Some(true)) == defaultQuery)
}
assert(postgres.getTruncateQuery(table) == postgresQuery)
assert(oracle.getTruncateQuery(table) == defaultQuery)
assert(teradata.getTruncateQuery(table) == teradataQuery)
}
test("SPARK-22880: Truncate table with CASCADE by jdbc dialect") {
// cascade in a truncate should only be applied for databases that support this,
// even if the parameter is passed.
val mysql = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
val postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
val db2 = JdbcDialects.get("jdbc:db2://127.0.0.1/db")
val h2 = JdbcDialects.get(url)
val derby = JdbcDialects.get("jdbc:derby:db")
val oracle = JdbcDialects.get("jdbc:oracle://127.0.0.1/db")
val teradata = JdbcDialects.get("jdbc:teradata://127.0.0.1/db")
val table = "weblogs"
val defaultQuery = s"TRUNCATE TABLE $table"
val postgresQuery = s"TRUNCATE TABLE ONLY $table CASCADE"
val oracleQuery = s"TRUNCATE TABLE $table CASCADE"
val teradataQuery = s"DELETE FROM $table ALL"
Seq(mysql, db2, h2, derby).foreach{ dialect =>
assert(dialect.getTruncateQuery(table, Some(true)) == defaultQuery)
}
assert(postgres.getTruncateQuery(table, Some(true)) == postgresQuery)
assert(oracle.getTruncateQuery(table, Some(true)) == oracleQuery)
assert(teradata.getTruncateQuery(table, Some(true)) == teradataQuery)
}
test("Test DataFrame.where for Date and Timestamp") {
// Regression test for bug SPARK-11788
val timestamp = java.sql.Timestamp.valueOf("2001-02-20 11:22:33.543543");
val date = java.sql.Date.valueOf("1995-01-01")
val jdbcDf = spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new Properties())
val rows = jdbcDf.where($"B" > date && $"C" > timestamp).collect()
assert(rows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01"))
assert(rows(0).getAs[java.sql.Timestamp](2)
=== java.sql.Timestamp.valueOf("2002-02-20 11:22:33.543543"))
}
test("test credentials in the properties are not in plan output") {
val df = sql("SELECT * FROM parts")
val explain = ExplainCommand(df.queryExecution.logical, ExtendedMode)
spark.sessionState.executePlan(explain).executedPlan.executeCollect().foreach {
r => assert(!List("testPass", "testUser").exists(r.toString.contains))
}
// test the JdbcRelation toString output
df.queryExecution.analyzed.collect {
case r: LogicalRelation =>
assert(r.relation.toString == "JDBCRelation(TEST.PEOPLE) [numPartitions=3]")
}
}
test("test credentials in the connection url are not in the plan output") {
val df = spark.read.jdbc(urlWithUserAndPass, "TEST.PEOPLE", new Properties())
val explain = ExplainCommand(df.queryExecution.logical, ExtendedMode)
spark.sessionState.executePlan(explain).executedPlan.executeCollect().foreach {
r => assert(!List("testPass", "testUser").exists(r.toString.contains))
}
}
test("hide credentials in create and describe a persistent/temp table") {
val password = "testPass"
val tableName = "tab1"
Seq("TABLE", "TEMPORARY VIEW").foreach { tableType =>
withTable(tableName) {
val df = sql(
s"""
|CREATE $tableType $tableName
|USING org.apache.spark.sql.jdbc
|OPTIONS (
| url '$urlWithUserAndPass',
| dbtable 'TEST.PEOPLE',
| user 'testUser',
| password '$password')
""".stripMargin)
val explain = ExplainCommand(df.queryExecution.logical, ExtendedMode)
spark.sessionState.executePlan(explain).executedPlan.executeCollect().foreach { r =>
assert(!r.toString.contains(password))
}
sql(s"DESC FORMATTED $tableName").collect().foreach { r =>
assert(!r.toString().contains(password))
}
}
}
}
test("Hide credentials in show create table") {
val userName = "testUser"
val password = "testPass"
val tableName = "tab1"
val dbTable = "TEST.PEOPLE"
withTable(tableName) {
sql(
s"""
|CREATE TABLE $tableName
|USING org.apache.spark.sql.jdbc
|OPTIONS (
| url '$urlWithUserAndPass',
| dbtable '$dbTable',
| user '$userName',
| password '$password')
""".stripMargin)
val show = ShowCreateTableCommand(TableIdentifier(tableName))
spark.sessionState.executePlan(show).executedPlan.executeCollect().foreach { r =>
assert(!r.toString.contains(password))
assert(r.toString.contains(dbTable))
assert(r.toString.contains(userName))
}
sql(s"SHOW CREATE TABLE $tableName").collect().foreach { r =>
assert(!r.toString.contains(password))
assert(r.toString.contains(dbTable))
assert(r.toString.contains(userName))
}
withSQLConf(SQLConf.SQL_OPTIONS_REDACTION_PATTERN.key -> "(?i)dbtable|user") {
spark.sessionState.executePlan(show).executedPlan.executeCollect().foreach { r =>
assert(!r.toString.contains(password))
assert(!r.toString.contains(dbTable))
assert(!r.toString.contains(userName))
}
}
}
}
test("Replace CatalogUtils.maskCredentials with SQLConf.get.redactOptions") {
val password = "testPass"
val tableName = "tab1"
withTable(tableName) {
sql(
s"""
|CREATE TABLE $tableName
|USING org.apache.spark.sql.jdbc
|OPTIONS (
| url '$urlWithUserAndPass',
| dbtable 'TEST.PEOPLE',
| user 'testUser',
| password '$password')
""".stripMargin)
val storageProps = sql(s"DESC FORMATTED $tableName")
.filter("col_name = 'Storage Properties'")
.select("data_type").collect()
assert(storageProps.length === 1)
storageProps.foreach { r =>
assert(r.getString(0).contains(s"url=${Utils.REDACTION_REPLACEMENT_TEXT}"))
assert(r.getString(0).contains(s"password=${Utils.REDACTION_REPLACEMENT_TEXT}"))
}
val information = sql(s"SHOW TABLE EXTENDED LIKE '$tableName'")
.select("information").collect()
assert(information.length === 1)
information.foreach { r =>
assert(r.getString(0).contains(s"url=${Utils.REDACTION_REPLACEMENT_TEXT}"))
assert(r.getString(0).contains(s"password=${Utils.REDACTION_REPLACEMENT_TEXT}"))
}
val createTabStmt = sql(s"SHOW CREATE TABLE $tableName")
.select("createtab_stmt").collect()
assert(createTabStmt.length === 1)
createTabStmt.foreach { r =>
assert(r.getString(0).contains(s"`url` '${Utils.REDACTION_REPLACEMENT_TEXT}'"))
assert(r.getString(0).contains(s"`password` '${Utils.REDACTION_REPLACEMENT_TEXT}'"))
}
}
}
test("SPARK 12941: The data type mapping for StringType to Oracle") {
val oracleDialect = JdbcDialects.get("jdbc:oracle://127.0.0.1/db")
assert(oracleDialect.getJDBCType(StringType).
map(_.databaseTypeDefinition).get == "VARCHAR2(255)")
}
test("SPARK-16625: General data types to be mapped to Oracle") {
def getJdbcType(dialect: JdbcDialect, dt: DataType): String = {
dialect.getJDBCType(dt).orElse(JdbcUtils.getCommonJDBCType(dt)).
map(_.databaseTypeDefinition).get
}
val oracleDialect = JdbcDialects.get("jdbc:oracle://127.0.0.1/db")
assert(getJdbcType(oracleDialect, BooleanType) == "NUMBER(1)")
assert(getJdbcType(oracleDialect, IntegerType) == "NUMBER(10)")
assert(getJdbcType(oracleDialect, LongType) == "NUMBER(19)")
assert(getJdbcType(oracleDialect, FloatType) == "NUMBER(19, 4)")
assert(getJdbcType(oracleDialect, DoubleType) == "NUMBER(19, 4)")
assert(getJdbcType(oracleDialect, ByteType) == "NUMBER(3)")
assert(getJdbcType(oracleDialect, ShortType) == "NUMBER(5)")
assert(getJdbcType(oracleDialect, StringType) == "VARCHAR2(255)")
assert(getJdbcType(oracleDialect, BinaryType) == "BLOB")
assert(getJdbcType(oracleDialect, DateType) == "DATE")
assert(getJdbcType(oracleDialect, TimestampType) == "TIMESTAMP")
}
private def assertEmptyQuery(sqlString: String): Unit = {
assert(sql(sqlString).collect().isEmpty)
}
test("SPARK-15916: JDBC filter operator push down should respect operator precedence") {
val TRUE = "NAME != 'non_exists'"
val FALSE1 = "THEID > 1000000000"
val FALSE2 = "THEID < -1000000000"
assertEmptyQuery(s"SELECT * FROM foobar WHERE ($TRUE OR $FALSE1) AND $FALSE2")
assertEmptyQuery(s"SELECT * FROM foobar WHERE $FALSE1 AND ($FALSE2 OR $TRUE)")
// Tests JDBCPartition whereClause clause push down.
withTempView("tempFrame") {
val jdbcPartitionWhereClause = s"$FALSE1 OR $TRUE"
val df = spark.read.jdbc(
urlWithUserAndPass,
"TEST.PEOPLE",
predicates = Array[String](jdbcPartitionWhereClause),
new Properties())
df.createOrReplaceTempView("tempFrame")
assertEmptyQuery(s"SELECT * FROM tempFrame where $FALSE2")
}
}
test("SPARK-16387: Reserved SQL words are not escaped by JDBC writer") {
val df = spark.createDataset(Seq("a", "b", "c")).toDF("order")
val schema = JdbcUtils.schemaString(
df.schema,
df.sqlContext.conf.caseSensitiveAnalysis,
"jdbc:mysql://localhost:3306/temp")
assert(schema.contains("`order` TEXT"))
}
test("SPARK-18141: Predicates on quoted column names in the jdbc data source") {
assert(sql("SELECT * FROM mixedCaseCols WHERE Id < 1").collect().size == 0)
assert(sql("SELECT * FROM mixedCaseCols WHERE Id <= 1").collect().size == 1)
assert(sql("SELECT * FROM mixedCaseCols WHERE Id > 1").collect().size == 2)
assert(sql("SELECT * FROM mixedCaseCols WHERE Id >= 1").collect().size == 3)
assert(sql("SELECT * FROM mixedCaseCols WHERE Id = 1").collect().size == 1)
assert(sql("SELECT * FROM mixedCaseCols WHERE Id != 2").collect().size == 2)
assert(sql("SELECT * FROM mixedCaseCols WHERE Id <=> 2").collect().size == 1)
assert(sql("SELECT * FROM mixedCaseCols WHERE Name LIKE 'fr%'").collect().size == 1)
assert(sql("SELECT * FROM mixedCaseCols WHERE Name LIKE '%ed'").collect().size == 1)
assert(sql("SELECT * FROM mixedCaseCols WHERE Name LIKE '%re%'").collect().size == 1)
assert(sql("SELECT * FROM mixedCaseCols WHERE Name IS NULL").collect().size == 1)
assert(sql("SELECT * FROM mixedCaseCols WHERE Name IS NOT NULL").collect().size == 2)
assert(sql("SELECT * FROM mixedCaseCols").filter($"Name".isin()).collect().size == 0)
assert(sql("SELECT * FROM mixedCaseCols WHERE Name IN ('mary', 'fred')").collect().size == 2)
assert(sql("SELECT * FROM mixedCaseCols WHERE Name NOT IN ('fred')").collect().size == 1)
assert(sql("SELECT * FROM mixedCaseCols WHERE Id = 1 OR Name = 'mary'").collect().size == 2)
assert(sql("SELECT * FROM mixedCaseCols WHERE Name = 'mary' AND Id = 2").collect().size == 1)
}
test("SPARK-18419: Fix `asConnectionProperties` to filter case-insensitively") {
val parameters = Map(
"url" -> "jdbc:mysql://localhost:3306/temp",
"dbtable" -> "t1",
"numPartitions" -> "10")
assert(new JDBCOptions(parameters).asConnectionProperties.isEmpty)
assert(new JDBCOptions(CaseInsensitiveMap(parameters)).asConnectionProperties.isEmpty)
}
test("SPARK-16848: jdbc API throws an exception for user specified schema") {
val schema = StructType(Seq(
StructField("name", StringType, false), StructField("theid", IntegerType, false)))
val parts = Array[String]("THEID < 2", "THEID >= 2")
val e1 = intercept[AnalysisException] {
spark.read.schema(schema).jdbc(urlWithUserAndPass, "TEST.PEOPLE", parts, new Properties())
}.getMessage
assert(e1.contains("User specified schema not supported with `jdbc`"))
val e2 = intercept[AnalysisException] {
spark.read.schema(schema).jdbc(urlWithUserAndPass, "TEST.PEOPLE", new Properties())
}.getMessage
assert(e2.contains("User specified schema not supported with `jdbc`"))
}
test("jdbc API support custom schema") {
val parts = Array[String]("THEID < 2", "THEID >= 2")
val customSchema = "NAME STRING, THEID INT"
val props = new Properties()
props.put("customSchema", customSchema)
val df = spark.read.jdbc(urlWithUserAndPass, "TEST.PEOPLE", parts, props)
assert(df.schema.size === 2)
assert(df.schema === CatalystSqlParser.parseTableSchema(customSchema))
assert(df.count() === 3)
}
test("jdbc API custom schema DDL-like strings.") {
withTempView("people_view") {
val customSchema = "NAME STRING, THEID INT"
sql(
s"""
|CREATE TEMPORARY VIEW people_view
|USING org.apache.spark.sql.jdbc
|OPTIONS (uRl '$url', DbTaBlE 'TEST.PEOPLE', User 'testUser', PassWord 'testPass',
|customSchema '$customSchema')
""".stripMargin.replaceAll("\\n", " "))
val df = sql("select * from people_view")
assert(df.schema.length === 2)
assert(df.schema === CatalystSqlParser.parseTableSchema(customSchema))
assert(df.count() === 3)
}
}
test("SPARK-15648: teradataDialect StringType data mapping") {
val teradataDialect = JdbcDialects.get("jdbc:teradata://127.0.0.1/db")
assert(teradataDialect.getJDBCType(StringType).
map(_.databaseTypeDefinition).get == "VARCHAR(255)")
}
test("SPARK-15648: teradataDialect BooleanType data mapping") {
val teradataDialect = JdbcDialects.get("jdbc:teradata://127.0.0.1/db")
assert(teradataDialect.getJDBCType(BooleanType).
map(_.databaseTypeDefinition).get == "CHAR(1)")
}
test("Checking metrics correctness with JDBC") {
val foobarCnt = spark.table("foobar").count()
val res = InputOutputMetricsHelper.run(sql("SELECT * FROM foobar").toDF())
assert(res === (foobarCnt, 0L, foobarCnt) :: Nil)
}
test("unsupported types") {
var e = intercept[SQLException] {
spark.read.jdbc(urlWithUserAndPass, "TEST.TIMEZONE", new Properties()).collect()
}.getMessage
assert(e.contains("Unsupported type TIMESTAMP_WITH_TIMEZONE"))
e = intercept[SQLException] {
spark.read.jdbc(urlWithUserAndPass, "TEST.ARRAY", new Properties()).collect()
}.getMessage
assert(e.contains("Unsupported type ARRAY"))
}
test("SPARK-19318: Connection properties keys should be case-sensitive.") {
def testJdbcOptions(options: JDBCOptions): Unit = {
// Spark JDBC data source options are case-insensitive
assert(options.tableOrQuery == "t1")
// When we convert it to properties, it should be case-sensitive.
assert(options.asProperties.size == 3)
assert(options.asProperties.get("customkey") == null)
assert(options.asProperties.get("customKey") == "a-value")
assert(options.asConnectionProperties.size == 1)
assert(options.asConnectionProperties.get("customkey") == null)
assert(options.asConnectionProperties.get("customKey") == "a-value")
}
val parameters = Map("url" -> url, "dbTAblE" -> "t1", "customKey" -> "a-value")
testJdbcOptions(new JDBCOptions(parameters))
testJdbcOptions(new JDBCOptions(CaseInsensitiveMap(parameters)))
// test add/remove key-value from the case-insensitive map
var modifiedParameters =
(CaseInsensitiveMap(Map.empty) ++ parameters).asInstanceOf[Map[String, String]]
testJdbcOptions(new JDBCOptions(modifiedParameters))
modifiedParameters -= "dbtable"
assert(modifiedParameters.get("dbTAblE").isEmpty)
modifiedParameters -= "customkey"
assert(modifiedParameters.get("customKey").isEmpty)
modifiedParameters += ("customKey" -> "a-value")
modifiedParameters += ("dbTable" -> "t1")
testJdbcOptions(new JDBCOptions(modifiedParameters))
assert ((modifiedParameters -- parameters.keys).size == 0)
}
test("SPARK-19318: jdbc data source options should be treated case-insensitive.") {
val df = spark.read.format("jdbc")
.option("Url", urlWithUserAndPass)
.option("DbTaBle", "TEST.PEOPLE")
.load()
assert(df.count() == 3)
withTempView("people_view") {
sql(
s"""
|CREATE TEMPORARY VIEW people_view
|USING org.apache.spark.sql.jdbc
|OPTIONS (uRl '$url', DbTaBlE 'TEST.PEOPLE', User 'testUser', PassWord 'testPass')
""".stripMargin.replaceAll("\\n", " "))
assert(sql("select * from people_view").count() == 3)
}
}
test("SPARK-21519: option sessionInitStatement, run SQL to initialize the database session.") {
val initSQL1 = "SET @MYTESTVAR 21519"
val df1 = spark.read.format("jdbc")
.option("url", urlWithUserAndPass)
.option("dbtable", "(SELECT NVL(@MYTESTVAR, -1))")
.option("sessionInitStatement", initSQL1)
.load()
assert(df1.collect() === Array(Row(21519)))
val initSQL2 = "SET SCHEMA DUMMY"
val df2 = spark.read.format("jdbc")
.option("url", urlWithUserAndPass)
.option("dbtable", "TEST.PEOPLE")
.option("sessionInitStatement", initSQL2)
.load()
val e = intercept[SparkException] {df2.collect()}.getMessage
assert(e.contains("""Schema "DUMMY" not found"""))
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW test_sessionInitStatement
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$urlWithUserAndPass',
|dbtable '(SELECT NVL(@MYTESTVAR1, -1), NVL(@MYTESTVAR2, -1))',
|sessionInitStatement 'SET @MYTESTVAR1 21519; SET @MYTESTVAR2 1234')
""".stripMargin)
val df3 = sql("SELECT * FROM test_sessionInitStatement")
assert(df3.collect() === Array(Row(21519, 1234)))
}
test("jdbc data source shouldn't have unnecessary metadata in its schema") {
val schema = StructType(Seq(
StructField("NAME", StringType, true), StructField("THEID", IntegerType, true)))
val df = spark.read.format("jdbc")
.option("Url", urlWithUserAndPass)
.option("DbTaBle", "TEST.PEOPLE")
.load()
assert(df.schema === schema)
withTempView("people_view") {
sql(
s"""
|CREATE TEMPORARY VIEW people_view
|USING org.apache.spark.sql.jdbc
|OPTIONS (uRl '$url', DbTaBlE 'TEST.PEOPLE', User 'testUser', PassWord 'testPass')
""".stripMargin.replaceAll("\\n", " "))
assert(sql("select * from people_view").schema === schema)
}
}
test("SPARK-23856 Spark jdbc setQueryTimeout option") {
val numJoins = 100
val longRunningQuery =
s"SELECT t0.NAME AS c0, ${(1 to numJoins).map(i => s"t$i.NAME AS c$i").mkString(", ")} " +
s"FROM test.people t0 ${(1 to numJoins).map(i => s"join test.people t$i").mkString(" ")}"
val df = spark.read.format("jdbc")
.option("Url", urlWithUserAndPass)
.option("dbtable", s"($longRunningQuery)")
.option("queryTimeout", 1)
.load()
val errMsg = intercept[SparkException] {
df.collect()
}.getMessage
assert(errMsg.contains("Statement was canceled or the session timed out"))
}
test("SPARK-24327 verify and normalize a partition column based on a JDBC resolved schema") {
def testJdbcParitionColumn(partColName: String, expectedColumnName: String): Unit = {
val df = spark.read.format("jdbc")
.option("url", urlWithUserAndPass)
.option("dbtable", "TEST.PARTITION")
.option("partitionColumn", partColName)
.option("lowerBound", 1)
.option("upperBound", 4)
.option("numPartitions", 3)
.load()
val quotedPrtColName = testH2Dialect.quoteIdentifier(expectedColumnName)
df.logicalPlan match {
case LogicalRelation(JDBCRelation(_, parts, _), _, _, _) =>
val whereClauses = parts.map(_.asInstanceOf[JDBCPartition].whereClause).toSet
assert(whereClauses === Set(
s"$quotedPrtColName < 2 or $quotedPrtColName is null",
s"$quotedPrtColName >= 2 AND $quotedPrtColName < 3",
s"$quotedPrtColName >= 3"))
}
}
testJdbcParitionColumn("THEID", "THEID")
testJdbcParitionColumn("\\"THEID\\"", "THEID")
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
testJdbcParitionColumn("ThEiD", "THEID")
}
testJdbcParitionColumn("THE ID", "THE ID")
def testIncorrectJdbcPartitionColumn(partColName: String): Unit = {
val errMsg = intercept[AnalysisException] {
testJdbcParitionColumn(partColName, "THEID")
}.getMessage
assert(errMsg.contains(s"User-defined partition column $partColName not found " +
"in the JDBC relation:"))
}
testIncorrectJdbcPartitionColumn("NoExistingColumn")
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
testIncorrectJdbcPartitionColumn(testH2Dialect.quoteIdentifier("ThEiD"))
}
}
test("query JDBC option - negative tests") {
val query = "SELECT * FROM test.people WHERE theid = 1"
// load path
val e1 = intercept[RuntimeException] {
val df = spark.read.format("jdbc")
.option("Url", urlWithUserAndPass)
.option("query", query)
.option("dbtable", "test.people")
.load()
}.getMessage
assert(e1.contains("Both 'dbtable' and 'query' can not be specified at the same time."))
// jdbc api path
val properties = new Properties()
properties.setProperty(JDBCOptions.JDBC_QUERY_STRING, query)
val e2 = intercept[RuntimeException] {
spark.read.jdbc(urlWithUserAndPass, "TEST.PEOPLE", properties).collect()
}.getMessage
assert(e2.contains("Both 'dbtable' and 'query' can not be specified at the same time."))
val e3 = intercept[RuntimeException] {
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW queryOption
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', query '$query', dbtable 'TEST.PEOPLE',
| user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
}.getMessage
assert(e3.contains("Both 'dbtable' and 'query' can not be specified at the same time."))
val e4 = intercept[RuntimeException] {
val df = spark.read.format("jdbc")
.option("Url", urlWithUserAndPass)
.option("query", "")
.load()
}.getMessage
assert(e4.contains("Option `query` can not be empty."))
// Option query and partitioncolumn are not allowed together.
val expectedErrorMsg =
s"""
|Options 'query' and 'partitionColumn' can not be specified together.
|Please define the query using `dbtable` option instead and make sure to qualify
|the partition columns using the supplied subquery alias to resolve any ambiguity.
|Example :
|spark.read.format("jdbc")
| .option("url", jdbcUrl)
| .option("dbtable", "(select c1, c2 from t1) as subq")
| .option("partitionColumn", "c1")
| .option("lowerBound", "1")
| .option("upperBound", "100")
| .option("numPartitions", "3")
| .load()
""".stripMargin
val e5 = intercept[RuntimeException] {
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW queryOption
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', query '$query', user 'testUser', password 'testPass',
| partitionColumn 'THEID', lowerBound '1', upperBound '4', numPartitions '3')
""".stripMargin.replaceAll("\\n", " "))
}.getMessage
assert(e5.contains(expectedErrorMsg))
}
test("query JDBC option") {
val query = "SELECT name, theid FROM test.people WHERE theid = 1"
// query option to pass on the query string.
val df = spark.read.format("jdbc")
.option("Url", urlWithUserAndPass)
.option("query", query)
.load()
checkAnswer(
df,
Row("fred", 1) :: Nil)
// query option in the create table path.
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW queryOption
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$url', query '$query', user 'testUser', password 'testPass')
""".stripMargin.replaceAll("\\n", " "))
checkAnswer(
sql("select name, theid from queryOption"),
Row("fred", 1) :: Nil)
}
test("SPARK-22814 support date/timestamp types in partitionColumn") {
val expectedResult = Seq(
("2018-07-06", "2018-07-06 05:50:00.0"),
("2018-07-06", "2018-07-06 08:10:08.0"),
("2018-07-08", "2018-07-08 13:32:01.0"),
("2018-07-12", "2018-07-12 09:51:15.0")
).map { case (date, timestamp) =>
Row(Date.valueOf(date), Timestamp.valueOf(timestamp))
}
// DateType partition column
val df1 = spark.read.format("jdbc")
.option("url", urlWithUserAndPass)
.option("dbtable", "TEST.DATETIME")
.option("partitionColumn", "d")
.option("lowerBound", "2018-07-06")
.option("upperBound", "2018-07-20")
.option("numPartitions", 3)
.load()
df1.logicalPlan match {
case LogicalRelation(JDBCRelation(_, parts, _), _, _, _) =>
val whereClauses = parts.map(_.asInstanceOf[JDBCPartition].whereClause).toSet
assert(whereClauses === Set(
""""D" < '2018-07-10' or "D" is null""",
""""D" >= '2018-07-10' AND "D" < '2018-07-14'""",
""""D" >= '2018-07-14'"""))
}
checkAnswer(df1, expectedResult)
// TimestampType partition column
val df2 = spark.read.format("jdbc")
.option("url", urlWithUserAndPass)
.option("dbtable", "TEST.DATETIME")
.option("partitionColumn", "t")
.option("lowerBound", "2018-07-04 03:30:00.0")
.option("upperBound", "2018-07-27 14:11:05.0")
.option("numPartitions", 2)
.load()
df2.logicalPlan match {
case LogicalRelation(JDBCRelation(_, parts, _), _, _, _) =>
val whereClauses = parts.map(_.asInstanceOf[JDBCPartition].whereClause).toSet
assert(whereClauses === Set(
""""T" < '2018-07-15 20:50:32.5' or "T" is null""",
""""T" >= '2018-07-15 20:50:32.5'"""))
}
checkAnswer(df2, expectedResult)
}
test("throws an exception for unsupported partition column types") {
val errMsg = intercept[AnalysisException] {
spark.read.format("jdbc")
.option("url", urlWithUserAndPass)
.option("dbtable", "TEST.PEOPLE")
.option("partitionColumn", "name")
.option("lowerBound", "aaa")
.option("upperBound", "zzz")
.option("numPartitions", 2)
.load()
}.getMessage
assert(errMsg.contains(
"Partition column type should be numeric, date, or timestamp, but string found."))
}
test("SPARK-24288: Enable preventing predicate pushdown") {
val table = "test.people"
val df = spark.read.format("jdbc")
.option("Url", urlWithUserAndPass)
.option("dbTable", table)
.option("pushDownPredicate", false)
.load()
.filter("theid = 1")
.select("name", "theid")
checkAnswer(
checkNotPushdown(df),
Row("fred", 1) :: Nil)
// pushDownPredicate option in the create table path.
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW predicateOption
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$urlWithUserAndPass', dbTable '$table', pushDownPredicate 'false')
""".stripMargin.replaceAll("\\n", " "))
checkAnswer(
checkNotPushdown(sql("SELECT name, theid FROM predicateOption WHERE theid = 1")),
Row("fred", 1) :: Nil)
}
test("SPARK-26383 throw IllegalArgumentException if wrong kind of driver to the given url") {
val e = intercept[IllegalArgumentException] {
val opts = Map(
"url" -> "jdbc:mysql://localhost/db",
"dbtable" -> "table",
"driver" -> "org.postgresql.Driver"
)
spark.read.format("jdbc").options(opts).load
}.getMessage
assert(e.contains("The driver could not open a JDBC connection. " +
"Check the URL: jdbc:mysql://localhost/db"))
}
test("support casting patterns for lower/upper bounds of TimestampType") {
DateTimeTestUtils.outstandingTimezonesIds.foreach { timeZone =>
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> timeZone) {
Seq(
("1972-07-04 03:30:00", "1972-07-15 20:50:32.5", "1972-07-27 14:11:05"),
("2019-01-20 12:00:00.502", "2019-01-20 12:00:00.751", "2019-01-20 12:00:01.000"),
("2019-01-20T00:00:00.123456", "2019-01-20 00:05:00.123456",
"2019-01-20T00:10:00.123456"),
("1500-01-20T00:00:00.123456", "1500-01-20 00:05:00.123456", "1500-01-20T00:10:00.123456")
).foreach { case (lower, middle, upper) =>
val df = spark.read.format("jdbc")
.option("url", urlWithUserAndPass)
.option("dbtable", "TEST.DATETIME")
.option("partitionColumn", "t")
.option("lowerBound", lower)
.option("upperBound", upper)
.option("numPartitions", 2)
.load()
df.logicalPlan match {
case lr: LogicalRelation if lr.relation.isInstanceOf[JDBCRelation] =>
val jdbcRelation = lr.relation.asInstanceOf[JDBCRelation]
val whereClauses = jdbcRelation.parts.map(_.asInstanceOf[JDBCPartition].whereClause)
assert(whereClauses.toSet === Set(
s""""T" < '$middle' or "T" is null""",
s""""T" >= '$middle'"""))
}
}
}
}
}
test("Add exception when isolationLevel is Illegal") {
val e = intercept[IllegalArgumentException] {
spark.read.format("jdbc")
.option("Url", urlWithUserAndPass)
.option("dbTable", "test.people")
.option("isolationLevel", "test")
.load()
}.getMessage
assert(e.contains(
"Invalid value `test` for parameter `isolationLevel`. This can be " +
"`NONE`, `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ` or `SERIALIZABLE`."))
}
test("SPARK-28552: Case-insensitive database URLs in JdbcDialect") {
assert(JdbcDialects.get("jdbc:mysql://localhost/db") === MySQLDialect)
assert(JdbcDialects.get("jdbc:MySQL://localhost/db") === MySQLDialect)
assert(JdbcDialects.get("jdbc:postgresql://localhost/db") === PostgresDialect)
assert(JdbcDialects.get("jdbc:postGresql://localhost/db") === PostgresDialect)
assert(JdbcDialects.get("jdbc:db2://localhost/db") === DB2Dialect)
assert(JdbcDialects.get("jdbc:DB2://localhost/db") === DB2Dialect)
assert(JdbcDialects.get("jdbc:sqlserver://localhost/db") === MsSqlServerDialect)
assert(JdbcDialects.get("jdbc:sqlServer://localhost/db") === MsSqlServerDialect)
assert(JdbcDialects.get("jdbc:derby://localhost/db") === DerbyDialect)
assert(JdbcDialects.get("jdbc:derBy://localhost/db") === DerbyDialect)
assert(JdbcDialects.get("jdbc:oracle://localhost/db") === OracleDialect)
assert(JdbcDialects.get("jdbc:Oracle://localhost/db") === OracleDialect)
assert(JdbcDialects.get("jdbc:teradata://localhost/db") === TeradataDialect)
assert(JdbcDialects.get("jdbc:Teradata://localhost/db") === TeradataDialect)
}
test("SQLContext.jdbc (deprecated)") {
val sqlContext = spark.sqlContext
var jdbcDF = sqlContext.jdbc(urlWithUserAndPass, "TEST.PEOPLE")
checkAnswer(jdbcDF, Row("fred", 1) :: Row("mary", 2) :: Row ("joe 'foo' \\"bar\\"", 3) :: Nil)
jdbcDF = sqlContext.jdbc(urlWithUserAndPass, "TEST.PEOPLE", "THEID", 0, 4, 3)
checkNumPartitions(jdbcDF, 3)
checkAnswer(jdbcDF, Row("fred", 1) :: Row("mary", 2) :: Row ("joe 'foo' \\"bar\\"", 3) :: Nil)
val parts = Array[String]("THEID = 2")
jdbcDF = sqlContext.jdbc(urlWithUserAndPass, "TEST.PEOPLE", parts)
checkAnswer(jdbcDF, Row("mary", 2) :: Nil)
}
test("SPARK-32364: JDBCOption constructor") {
val extraOptions = CaseInsensitiveMap[String](Map("UrL" -> "url1", "dBTable" -> "table1"))
val connectionProperties = new Properties()
connectionProperties.put("url", "url2")
connectionProperties.put("dbtable", "table2")
// connection property should override the options in extraOptions
val params = extraOptions ++ connectionProperties.asScala
assert(params.size == 2)
assert(params.get("uRl").contains("url2"))
assert(params.get("DbtaBle").contains("table2"))
// JDBCOptions constructor parameter should overwrite the existing conf
val options = new JDBCOptions(url, "table3", params)
assert(options.asProperties.size == 2)
assert(options.asProperties.get("url") == url)
assert(options.asProperties.get("dbtable") == "table3")
}
}
| shuangshuangwang/spark | sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala | Scala | apache-2.0 | 74,984 |
package unit.models.service
import models.daos.{ContributionDAO, RepositoryDAO, UserDAO}
import models.services.KarmaService
import models.{Contribution, Repository, User}
import org.junit.runner._
import org.specs2.mock.Mockito
import org.specs2.mutable._
import org.specs2.runner._
@RunWith(classOf[JUnitRunner])
class KarmaServiceSpec extends Specification with Mockito{
val userDAO = mock[UserDAO]
val repositoryDAO = mock[RepositoryDAO]
val contributionDAO = mock[ContributionDAO]
val karmaService = new KarmaService(userDAO, repositoryDAO, contributionDAO )
"karmaService#calculateKarma" should {
"if user has no contributions karma should be 0" in {
val user = mock[User]
val contributionList = Seq[(Repository,Contribution)]()
karmaService.calculateKarma(user, contributionList) shouldEqual 0
}
"if user has 1 contribution karma should be related to that" in {
val user = mock[User]
val repo = mock[Repository]
val contrib = mock[Contribution]
contrib.addedLines returns 10
contrib.removedLines returns 10
repo.score returns 4
repo.addedLines returns 10
repo.removedLines returns 20
val contributionList = Seq[(Repository,Contribution)]((repo,contrib))
karmaService.calculateKarma(user, contributionList) shouldEqual (((10.0+10)/30) *16).toInt
}
"if user has 2 contribution karma should be related to that" in {
val user = mock[User]
val repo = mock[Repository]
val repo2 = mock[Repository]
val contrib = mock[Contribution]
contrib.addedLines returns 10
contrib.removedLines returns 10
repo.score returns 4
repo.addedLines returns 10
repo.removedLines returns 20
repo2.score returns 4
repo2.addedLines returns 10
repo2.removedLines returns 20
val contributionList = Seq[(Repository,Contribution)]((repo,contrib),(repo2,contrib))
karmaService.calculateKarma(user, contributionList) shouldEqual (2*(((10.0+10)/30)*16)).toInt
}
}
}
| gitlinks/gitrank-web | test/unit/models/service/KarmaServiceSpec.scala | Scala | apache-2.0 | 2,042 |
/*
* Copyright 2012 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.components.sound
import simx.core.component.Component
import simx.core.worldinterface.eventhandling.EventDescription
import simx.core.ontology.Symbols
import scala.reflect.ClassTag
import simx.core.entity.component.ComponentAspect
/**
* User: dwiebusch
* Date: 12.04.11
* Time: 19:08
*/
object SoundComponent{
def componentType = simx.core.ontology.Symbols.sound
}
abstract class SoundComponent(name : Symbol) extends Component(name, SoundComponent.componentType)
abstract class SoundComponentAspect[T <: Component : ClassTag](name : Symbol, args : Any*)
extends ComponentAspect[T](SoundComponent.componentType, name, args)
object SoundEvents{
val sound = new EventDescription(Symbols.sound)
} | simulator-x/lwjgl-sound | src/simx/components/sound/SoundComponent.scala | Scala | apache-2.0 | 1,581 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.physical.batch
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalCalc
import org.apache.flink.table.plan.nodes.physical.batch.BatchExecCalc
import org.apache.calcite.plan.RelOptRule
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
/**
* Rule that converts [[FlinkLogicalCalc]] to [[BatchExecCalc]].
*/
class BatchExecCalcRule
extends ConverterRule(
classOf[FlinkLogicalCalc],
FlinkConventions.LOGICAL,
FlinkConventions.BATCH_PHYSICAL,
"BatchExecCalcRule") {
def convert(rel: RelNode): RelNode = {
val calc = rel.asInstanceOf[FlinkLogicalCalc]
val newTrait = rel.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
val newInput = RelOptRule.convert(calc.getInput, FlinkConventions.BATCH_PHYSICAL)
new BatchExecCalc(
rel.getCluster,
newTrait,
newInput,
calc.getProgram,
rel.getRowType)
}
}
object BatchExecCalcRule {
val INSTANCE: RelOptRule = new BatchExecCalcRule
}
| ueshin/apache-flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/rules/physical/batch/BatchExecCalcRule.scala | Scala | apache-2.0 | 1,906 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package ast.parser
import scala.tools.nsc.util.{CharArrayReader, CharArrayReaderData}
import scala.reflect.internal.util._
import scala.reflect.internal.Chars._
import Tokens._
import scala.annotation.{switch, tailrec}
import scala.collection.mutable, mutable.{ListBuffer, ArrayBuffer}
import scala.tools.nsc.ast.parser.xml.Utility.isNameStart
import java.lang.StringBuilder
object Cbuf {
final val TargetCapacity = 256
def create(): StringBuilder = new StringBuilder(TargetCapacity)
implicit class StringBuilderOps(val sb: StringBuilder) extends AnyVal {
def clear(): Unit = {
if (sb.capacity() > TargetCapacity) {
sb.setLength(TargetCapacity)
sb.trimToSize()
}
sb.setLength(0)
}
def toCharArray: Array[Char] = {
val n = sb.length()
val res = new Array[Char](n)
sb.getChars(0, n, res, 0)
res
}
def isEmpty = sb.length() == 0
def last = sb.charAt(sb.length() - 1)
}
}
import Cbuf.StringBuilderOps
/** See Parsers.scala / ParsersCommon for some explanation of ScannersCommon.
*/
trait ScannersCommon {
val global : Global
import global._
/** Offset into source character array */
type Offset = Int
type Token = Int
trait CommonTokenData {
def token: Token
def name: TermName
}
trait ScannerCommon extends CommonTokenData {
// things to fill in, in addition to buf, decodeUni which come from CharArrayReader
def error(off: Offset, msg: String): Unit
def incompleteInputError(off: Offset, msg: String): Unit
def deprecationWarning(off: Offset, msg: String, since: String): Unit
}
// Hooks for ScaladocUnitScanner and ScaladocJavaUnitScanner
trait DocScanner {
protected def beginDocComment(prefix: String): Unit = {}
protected def processCommentChar(): Unit = {}
protected def finishDocComment(): Unit = {}
private var lastDoc: DocComment = null
// get last doc comment
def flushDoc(): DocComment = try lastDoc finally lastDoc = null
def registerDocComment(raw: String, pos: Position) = {
lastDoc = DocComment(raw, pos)
signalParsedDocComment(raw, pos)
}
/** To prevent doc comments attached to expressions from leaking out of scope
* onto the next documentable entity, they are discarded upon passing a right
* brace, bracket, or parenthesis.
*/
def discardDocBuffer(): Unit = {}
}
def createKeywordArray(keywords: Seq[(Name, Token)], defaultToken: Token): (Token, Array[Token]) = {
val names = keywords sortBy (_._1.start) map { case (k, v) => (k.start, v) }
val low = names.head._1
val high = names.last._1
val arr = Array.fill(high - low + 1)(defaultToken)
names foreach { case (k, v) => arr(k + low) = v }
(low, arr)
}
}
trait Scanners extends ScannersCommon {
val global : Global
import global._
trait TokenData extends CommonTokenData {
/** the next token */
var token: Token = EMPTY
/** the offset of the first character of the current token */
var offset: Offset = 0
/** the offset of the character following the token preceding this one */
var lastOffset: Offset = 0
/** the name of an identifier */
var name: TermName = null
/** the string value of a literal */
var strVal: String = null
/** the base of a number */
var base: Int = 0
def copyFrom(td: TokenData): this.type = {
this.token = td.token
this.offset = td.offset
this.lastOffset = td.lastOffset
this.name = td.name
this.strVal = td.strVal
this.base = td.base
this
}
}
/** An interface to most of mutable data in Scanner defined in TokenData
* and CharArrayReader (+ next, prev fields) with copyFrom functionality
* to backup/restore data (used by quasiquotes' lookingAhead).
*/
trait ScannerData extends TokenData with CharArrayReaderData {
/** we need one token lookahead and one token history
*/
val next: TokenData = new TokenData{}
val prev: TokenData = new TokenData{}
def copyFrom(sd: ScannerData): this.type = {
this.next copyFrom sd.next
this.prev copyFrom sd.prev
super[CharArrayReaderData].copyFrom(sd)
super[TokenData].copyFrom(sd)
this
}
}
abstract class Scanner extends CharArrayReader with TokenData with ScannerData with ScannerCommon with DocScanner {
private def isDigit(c: Char) = java.lang.Character isDigit c
private var openComments = 0
final protected def putCommentChar(): Unit = { processCommentChar(); nextChar() }
@tailrec private def skipLineComment(): Unit = ch match {
case SU | CR | LF =>
case _ => nextChar() ; skipLineComment()
}
private def maybeOpen(): Unit = {
putCommentChar()
if (ch == '*') {
putCommentChar()
openComments += 1
}
}
private def maybeClose(): Boolean = {
putCommentChar()
(ch == '/') && {
putCommentChar()
openComments -= 1
openComments == 0
}
}
@tailrec final def skipNestedComments(): Unit = ch match {
case '/' => maybeOpen() ; skipNestedComments()
case '*' => if (!maybeClose()) skipNestedComments()
case SU => incompleteInputError("unclosed comment")
case _ => putCommentChar() ; skipNestedComments()
}
private def skipToCommentEnd(isLineComment: Boolean): Unit = {
nextChar()
if (isLineComment) skipLineComment()
else {
openComments = 1
val isDocComment = (ch == '*') && { nextChar(); true }
if (isDocComment) {
// Check for the amazing corner case of /**/
if (ch == '/')
nextChar()
else {
beginDocComment("/**")
skipNestedComments()
}
}
else skipNestedComments()
}
}
/** Returns true if a comment was skipped.
* @note Pre-condition: ch == '/'
*/
final def skipComment(): Boolean = ch match {
case '/' | '*' => skipToCommentEnd(isLineComment = ch == '/') ; finishDocComment(); true
case _ => false
}
def isAtEnd = charOffset >= buf.length
def resume(lastCode: Token) = {
token = lastCode
if (next.token != EMPTY && !reporter.hasErrors)
syntaxError("unexpected end of input: possible missing '}' in XML block")
nextToken()
}
/** A character buffer for literals
*/
val cbuf = Cbuf.create()
/** append Unicode character to "cbuf" buffer
*/
protected def putChar(c: Char): Unit = {
// assert(cbuf.size < 10000, cbuf)
cbuf.append(c)
}
/** Determines whether this scanner should emit identifier deprecation warnings,
* e.g. when seeing `macro` or `then`, which are planned to become keywords in future versions of Scala.
*/
protected def emitIdentifierDeprecationWarnings = true
/** Clear buffer and set name and token */
private def finishNamed(idtoken: Token = IDENTIFIER): Unit = {
name = newTermName(cbuf.toCharArray)
cbuf.clear()
token = idtoken
if (idtoken == IDENTIFIER) {
val idx = name.start - kwOffset
if (idx >= 0 && idx < kwArray.length) {
token = kwArray(idx)
if (token == IDENTIFIER && allowIdent != name) {
if (name == nme.MACROkw)
syntaxError(s"$name is now a reserved word; usage as an identifier is disallowed")
else if (emitIdentifierDeprecationWarnings)
deprecationWarning(s"$name is a reserved word (since 2.10.0); usage as an identifier is deprecated", "2.10.0")
}
}
}
}
/** Clear buffer and set string */
private def setStrVal(): Unit = {
strVal = cbuf.toString
cbuf.clear()
}
/** a stack of tokens which indicates whether line-ends can be statement separators
* also used for keeping track of nesting levels.
* We keep track of the closing symbol of a region. This can be
* RPAREN if region starts with '('
* RBRACKET if region starts with '['
* RBRACE if region starts with '{'
* ARROW if region starts with 'case'
* STRINGLIT if region is a string interpolation expression starting with '${'
* (the STRINGLIT appears twice in succession on the stack iff the
* expression is a multiline string literal).
*/
var sepRegions: List[Token] = List()
// Get next token ------------------------------------------------------------
/** Are we directly in a string interpolation expression?
*/
private def inStringInterpolation =
sepRegions.nonEmpty && sepRegions.head == STRINGLIT
/** Are we directly in a multiline string interpolation expression?
* @pre inStringInterpolation
*/
private def inMultiLineInterpolation =
inStringInterpolation && sepRegions.tail.nonEmpty && sepRegions.tail.head == STRINGPART
/** Are we in a `${ }` block? such that RBRACE exits back into multiline string. */
private def inMultiLineInterpolatedExpression = {
sepRegions match {
case RBRACE :: STRINGLIT :: STRINGPART :: rest => true
case _ => false
}
}
/** read next token and return last offset
*/
def skipToken(): Offset = {
val off = offset
nextToken()
off
}
/** Allow an otherwise deprecated ident here */
private var allowIdent: Name = nme.EMPTY
/** Get next token, and allow the otherwise deprecated ident `name` */
def nextTokenAllow(name: Name) = {
val prev = allowIdent
allowIdent = name
try {
nextToken()
} finally {
allowIdent = prev
}
}
/** Produce next token, filling TokenData fields of Scanner.
*/
def nextToken(): Unit = {
val lastToken = token
// Adapt sepRegions according to last token
(lastToken: @switch) match {
case LPAREN =>
sepRegions = RPAREN :: sepRegions
case LBRACKET =>
sepRegions = RBRACKET :: sepRegions
case LBRACE =>
sepRegions = RBRACE :: sepRegions
case CASE =>
sepRegions = ARROW :: sepRegions
case RBRACE =>
while (!sepRegions.isEmpty && sepRegions.head != RBRACE)
sepRegions = sepRegions.tail
if (!sepRegions.isEmpty)
sepRegions = sepRegions.tail
discardDocBuffer()
case RBRACKET | RPAREN =>
if (!sepRegions.isEmpty && sepRegions.head == lastToken)
sepRegions = sepRegions.tail
discardDocBuffer()
case ARROW =>
if (!sepRegions.isEmpty && sepRegions.head == lastToken)
sepRegions = sepRegions.tail
case STRINGLIT =>
if (inMultiLineInterpolation)
sepRegions = sepRegions.tail.tail
else if (inStringInterpolation)
sepRegions = sepRegions.tail
case _ =>
}
// Read a token or copy it from `next` tokenData
if (next.token == EMPTY) {
lastOffset = charOffset - 1
if (lastOffset > 0 && buf(lastOffset) == '\\n' && buf(lastOffset - 1) == '\\r') {
lastOffset -= 1
}
if (inStringInterpolation) fetchStringPart() else fetchToken()
if (token == ERROR) {
if (inMultiLineInterpolation)
sepRegions = sepRegions.tail.tail
else if (inStringInterpolation)
sepRegions = sepRegions.tail
}
} else {
this copyFrom next
next.token = EMPTY
}
/* Insert NEWLINE or NEWLINES if
* - we are after a newline
* - we are within a { ... } or on toplevel (wrt sepRegions)
* - the current token can start a statement and the one before can end it
* insert NEWLINES if we are past a blank line, NEWLINE otherwise
*/
if (!applyBracePatch() && afterLineEnd() && inLastOfStat(lastToken) && inFirstOfStat(token) &&
(sepRegions.isEmpty || sepRegions.head == RBRACE)) {
next copyFrom this
offset = if (lineStartOffset <= offset) lineStartOffset else lastLineStartOffset
token = if (pastBlankLine()) NEWLINES else NEWLINE
}
// Join CASE + CLASS => CASECLASS, CASE + OBJECT => CASEOBJECT, SEMI + ELSE => ELSE
if (token == CASE) {
prev copyFrom this
val nextLastOffset = charOffset - 1
fetchToken()
def resetOffset(): Unit = {
offset = prev.offset
lastOffset = prev.lastOffset
}
if (token == CLASS) {
token = CASECLASS
resetOffset()
} else if (token == OBJECT) {
token = CASEOBJECT
resetOffset()
} else {
lastOffset = nextLastOffset
next copyFrom this
this copyFrom prev
}
} else if (token == SEMI) {
prev copyFrom this
fetchToken()
if (token != ELSE) {
next copyFrom this
this copyFrom prev
}
} else if (token == COMMA) {
// SIP-27 Trailing Comma (multi-line only) support
// If a comma is followed by a new line & then a closing paren, bracket or brace
// then it is a trailing comma and is ignored
val saved = new ScannerData {} copyFrom this
fetchToken()
if (afterLineEnd() && (token == RPAREN || token == RBRACKET || token == RBRACE)) {
/* skip the trailing comma */
} else if (token == EOF) { // e.g. when the REPL is parsing "val List(x, y, _*,"
/* skip the trailing comma */
} else this copyFrom saved
}
// print("["+this+"]")
}
/** Is current token first one after a newline? */
private def afterLineEnd(): Boolean =
lastOffset < lineStartOffset &&
(lineStartOffset <= offset ||
lastOffset < lastLineStartOffset && lastLineStartOffset <= offset)
/** Is there a blank line between the current token and the last one?
* @pre afterLineEnd().
*/
private def pastBlankLine(): Boolean = {
var idx = lastOffset
var ch = buf(idx)
val end = offset
while (idx < end) {
if (ch == LF || ch == FF) {
do {
idx += 1; ch = buf(idx)
if (ch == LF || ch == FF) {
// println("blank line found at "+lastOffset+":"+(lastOffset to idx).map(buf(_)).toList)
return true
}
if (idx == end) return false
} while (ch <= ' ')
}
idx += 1; ch = buf(idx)
}
false
}
/** read next token, filling TokenData fields of Scanner.
*/
@tailrec
protected final def fetchToken(): Unit = {
offset = charOffset - 1
(ch: @switch) match {
case ' ' | '\\t' | CR | LF | FF =>
nextChar()
fetchToken()
case 'A' | 'B' | 'C' | 'D' | 'E' |
'F' | 'G' | 'H' | 'I' | 'J' |
'K' | 'L' | 'M' | 'N' | 'O' |
'P' | 'Q' | 'R' | 'S' | 'T' |
'U' | 'V' | 'W' | 'X' | 'Y' |
'Z' | '$' | '_' |
'a' | 'b' | 'c' | 'd' | 'e' |
'f' | 'g' | 'h' | 'i' | 'j' |
'k' | 'l' | 'm' | 'n' | 'o' |
'p' | 'q' | 'r' | 's' | 't' |
'u' | 'v' | 'w' | 'x' | 'y' | // scala-mode: need to understand multi-line case patterns
'z' =>
putChar(ch)
nextChar()
getIdentRest()
if (ch == '"' && token == IDENTIFIER)
token = INTERPOLATIONID
case '<' => // is XMLSTART?
def fetchLT() = {
val last = if (charOffset >= 2) buf(charOffset - 2) else ' '
nextChar()
last match {
case ' ' | '\\t' | '\\n' | '{' | '(' | '>' if isNameStart(ch) || ch == '!' || ch == '?' =>
token = XMLSTART
case _ =>
// Console.println("found '<', but last is '"+in.last+"'"); // DEBUG
putChar('<')
getOperatorRest()
}
}
fetchLT()
case '~' | '!' | '@' | '#' | '%' |
'^' | '*' | '+' | '-' | /*'<' | */
'>' | '?' | ':' | '=' | '&' |
'|' | '\\\\' =>
putChar(ch)
nextChar()
getOperatorRest()
case '/' =>
nextChar()
if (skipComment()) {
fetchToken()
} else {
putChar('/')
getOperatorRest()
}
case '0' =>
def fetchLeadingZero(): Unit = {
nextChar()
ch match {
case 'x' | 'X' => base = 16 ; nextChar()
case _ => base = 8 // single decimal zero, perhaps
}
}
fetchLeadingZero()
getNumber()
case '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' =>
base = 10
getNumber()
case '`' =>
getBackquotedIdent()
case '\\"' =>
def fetchDoubleQuote() = {
if (token == INTERPOLATIONID) {
nextRawChar()
if (ch == '\\"') {
val lookahead = lookaheadReader
lookahead.nextChar()
if (lookahead.ch == '\\"') {
nextRawChar() // now eat it
offset += 3
nextRawChar()
getStringPart(multiLine = true)
sepRegions = STRINGPART :: sepRegions // indicate string part
sepRegions = STRINGLIT :: sepRegions // once more to indicate multi line string part
} else {
nextChar()
token = STRINGLIT
strVal = ""
}
} else {
offset += 1
getStringPart(multiLine = false)
sepRegions = STRINGLIT :: sepRegions // indicate single line string part
}
} else {
nextChar()
if (ch == '\\"') {
nextChar()
if (ch == '\\"') {
nextRawChar()
getRawStringLit()
} else {
token = STRINGLIT
strVal = ""
}
} else {
getStringLit()
}
}
}
fetchDoubleQuote()
case '\\'' =>
def unclosedCharLit() = {
val unclosed = "unclosed character literal"
// advise if previous token was Symbol contiguous with the orphan single quote at offset
val msg = {
val maybeMistakenQuote =
this match {
case sfs: SourceFileScanner =>
val wholeLine = sfs.source.lineToString(sfs.source.offsetToLine(offset))
wholeLine.count(_ == '\\'') > 1
case _ => false
}
if (token == SYMBOLLIT && offset == lastOffset) s"""$unclosed (or use " for string literal "$strVal")"""
else if (maybeMistakenQuote) s"""$unclosed (or use " not ' for string literal)"""
else unclosed
}
syntaxError(msg)
}
def fetchSingleQuote() = {
nextChar()
if (isIdentifierStart(ch))
charLitOr(() => getIdentRest())
else if (isOperatorPart(ch) && (ch != '\\\\'))
charLitOr(() => getOperatorRest())
else if (!isAtEnd && (ch != SU && ch != CR && ch != LF || isUnicodeEscape)) {
val isEmptyCharLit = (ch == '\\'')
getLitChar()
if (ch == '\\'') {
if (isEmptyCharLit && currentRun.isScala213)
syntaxError("empty character literal (use '\\\\'' for single quote)")
else {
if (isEmptyCharLit)
deprecationWarning("deprecated syntax for character literal (use '\\\\'' for single quote)", "2.12.2")
nextChar()
token = CHARLIT
setStrVal()
}
} else if (isEmptyCharLit) {
syntaxError("empty character literal")
} else {
unclosedCharLit()
}
}
else unclosedCharLit()
}
fetchSingleQuote()
case '.' =>
nextChar()
if ('0' <= ch && ch <= '9') {
putChar('.'); getFraction()
} else {
token = DOT
}
case ';' =>
nextChar(); token = SEMI
case ',' =>
nextChar(); token = COMMA
case '(' =>
nextChar(); token = LPAREN
case '{' =>
nextChar(); token = LBRACE
case ')' =>
nextChar(); token = RPAREN
case '}' =>
if (inMultiLineInterpolatedExpression) nextRawChar() else nextChar()
token = RBRACE
case '[' =>
nextChar(); token = LBRACKET
case ']' =>
nextChar(); token = RBRACKET
case SU =>
if (isAtEnd) token = EOF
else {
syntaxError("illegal character")
nextChar()
}
case _ =>
def fetchOther() = {
if (ch == '\\u21D2') {
deprecationWarning("The unicode arrow `⇒` is deprecated, use `=>` instead. If you still wish to display it as one character, consider using a font with programming ligatures such as Fira Code.", "2.13.0")
nextChar(); token = ARROW
} else if (ch == '\\u2190') {
deprecationWarning("The unicode arrow `←` is deprecated, use `<-` instead. If you still wish to display it as one character, consider using a font with programming ligatures such as Fira Code.", "2.13.0")
nextChar(); token = LARROW
} else if (Character.isUnicodeIdentifierStart(ch)) {
putChar(ch)
nextChar()
getIdentRest()
} else if (isSpecial(ch)) {
putChar(ch)
nextChar()
getOperatorRest()
} else {
syntaxError("illegal character '" + ("" + '\\\\' + 'u' + "%04x".format(ch.toInt)) + "'")
nextChar()
}
}
fetchOther()
}
}
/** Can token start a statement? */
def inFirstOfStat(token: Token) = token match {
case EOF | CATCH | ELSE | EXTENDS | FINALLY | FORSOME | MATCH | WITH | YIELD |
COMMA | SEMI | NEWLINE | NEWLINES | DOT | COLON | EQUALS | ARROW | LARROW |
SUBTYPE | VIEWBOUND | SUPERTYPE | HASH | RPAREN | RBRACKET | RBRACE | LBRACKET =>
false
case _ =>
true
}
/** Can token end a statement? */
def inLastOfStat(token: Token) = token match {
case CHARLIT | INTLIT | LONGLIT | FLOATLIT | DOUBLELIT | STRINGLIT | SYMBOLLIT |
IDENTIFIER | BACKQUOTED_IDENT | THIS | NULL | TRUE | FALSE | RETURN | USCORE |
TYPE | XMLSTART | RPAREN | RBRACKET | RBRACE =>
true
case _ =>
false
}
// Identifiers ---------------------------------------------------------------
private def getBackquotedIdent(): Unit = {
nextChar()
getLitChars('`')
if (ch == '`') {
nextChar()
finishNamed(BACKQUOTED_IDENT)
if (name.length == 0) syntaxError("empty quoted identifier")
}
else syntaxError("unclosed quoted identifier")
}
@tailrec
private def getIdentRest(): Unit = (ch: @switch) match {
case 'A' | 'B' | 'C' | 'D' | 'E' |
'F' | 'G' | 'H' | 'I' | 'J' |
'K' | 'L' | 'M' | 'N' | 'O' |
'P' | 'Q' | 'R' | 'S' | 'T' |
'U' | 'V' | 'W' | 'X' | 'Y' |
'Z' | '$' |
'a' | 'b' | 'c' | 'd' | 'e' |
'f' | 'g' | 'h' | 'i' | 'j' |
'k' | 'l' | 'm' | 'n' | 'o' |
'p' | 'q' | 'r' | 's' | 't' |
'u' | 'v' | 'w' | 'x' | 'y' |
'z' |
'0' | '1' | '2' | '3' | '4' |
'5' | '6' | '7' | '8' | '9' =>
putChar(ch)
nextChar()
getIdentRest()
case '_' =>
putChar(ch)
nextChar()
getIdentOrOperatorRest()
case SU => // strangely enough, Character.isUnicodeIdentifierPart(SU) returns true!
finishNamed()
case _ =>
if (Character.isUnicodeIdentifierPart(ch)) {
putChar(ch)
nextChar()
getIdentRest()
} else {
finishNamed()
}
}
@tailrec
private def getOperatorRest(): Unit = (ch: @switch) match {
case '~' | '!' | '@' | '#' | '%' |
'^' | '*' | '+' | '-' | '<' |
'>' | '?' | ':' | '=' | '&' |
'|' | '\\\\' =>
putChar(ch); nextChar(); getOperatorRest()
case '/' =>
nextChar()
if (skipComment()) finishNamed()
else { putChar('/'); getOperatorRest() }
case _ =>
if (isSpecial(ch)) { putChar(ch); nextChar(); getOperatorRest() }
else finishNamed()
}
private def getIdentOrOperatorRest(): Unit = {
if (isIdentifierPart(ch))
getIdentRest()
else ch match {
case '~' | '!' | '@' | '#' | '%' |
'^' | '*' | '+' | '-' | '<' |
'>' | '?' | ':' | '=' | '&' |
'|' | '\\\\' | '/' =>
getOperatorRest()
case _ =>
if (isSpecial(ch)) getOperatorRest()
else finishNamed()
}
}
// Literals -----------------------------------------------------------------
private def getStringLit() = {
getLitChars('"')
if (ch == '"') {
setStrVal()
nextChar()
token = STRINGLIT
} else unclosedStringLit()
}
private def unclosedStringLit(): Unit = syntaxError("unclosed string literal")
@tailrec private def getRawStringLit(): Unit = {
if (ch == '\\"') {
nextRawChar()
if (isTripleQuote()) {
setStrVal()
token = STRINGLIT
} else
getRawStringLit()
} else if (ch == SU) {
incompleteInputError("unclosed multi-line string literal")
} else {
putChar(ch)
nextRawChar()
getRawStringLit()
}
}
@tailrec private def getStringPart(multiLine: Boolean): Unit = {
def finishStringPart() = {
setStrVal()
token = STRINGPART
next.lastOffset = charOffset - 1
next.offset = charOffset - 1
}
if (ch == '"') {
if (multiLine) {
nextRawChar()
if (isTripleQuote()) {
setStrVal()
token = STRINGLIT
} else
getStringPart(multiLine)
} else {
nextChar()
setStrVal()
token = STRINGLIT
}
} else if (ch == '$') {
nextRawChar()
if (ch == '$') {
putChar(ch)
nextRawChar()
getStringPart(multiLine)
} else if (ch == '{') {
finishStringPart()
nextRawChar()
next.token = LBRACE
} else if (ch == '_') {
finishStringPart()
nextRawChar()
next.token = USCORE
} else if (Character.isUnicodeIdentifierStart(ch)) {
finishStringPart()
do {
putChar(ch)
nextRawChar()
} while (ch != SU && Character.isUnicodeIdentifierPart(ch))
next.token = IDENTIFIER
next.name = newTermName(cbuf.toString)
cbuf.clear()
val idx = next.name.start - kwOffset
if (idx >= 0 && idx < kwArray.length) {
next.token = kwArray(idx)
}
} else {
syntaxError(s"invalid string interpolation $$$ch, expected: $$$$, $$identifier or $${expression}")
}
} else {
val isUnclosedLiteral = !isUnicodeEscape && (ch == SU || (!multiLine && (ch == CR || ch == LF)))
if (isUnclosedLiteral) {
if (multiLine)
incompleteInputError("unclosed multi-line string literal")
else
unclosedStringLit()
}
else {
putChar(ch)
nextRawChar()
getStringPart(multiLine)
}
}
}
private def fetchStringPart() = {
offset = charOffset - 1
getStringPart(multiLine = inMultiLineInterpolation)
}
private def isTripleQuote(): Boolean =
if (ch == '"') {
nextRawChar()
if (ch == '"') {
nextChar()
while (ch == '"') {
putChar('"')
nextChar()
}
true
} else {
putChar('"')
putChar('"')
false
}
} else {
putChar('"')
false
}
/** copy current character into cbuf, interpreting any escape sequences,
* and advance to next character.
*/
protected def getLitChar(): Unit =
if (ch == '\\\\') {
nextChar()
if ('0' <= ch && ch <= '7') {
val start = charOffset - 2
val leadch: Char = ch
var oct: Int = digit2int(ch, 8)
nextChar()
if ('0' <= ch && ch <= '7') {
oct = oct * 8 + digit2int(ch, 8)
nextChar()
if (leadch <= '3' && '0' <= ch && ch <= '7') {
oct = oct * 8 + digit2int(ch, 8)
nextChar()
}
}
val alt = if (oct == LF) "\\\\n" else "\\\\u%04x" format oct
syntaxError(start, s"octal escape literals are unsupported: use $alt instead")
putChar(oct.toChar)
} else {
ch match {
case 'b' => putChar('\\b')
case 't' => putChar('\\t')
case 'n' => putChar('\\n')
case 'f' => putChar('\\f')
case 'r' => putChar('\\r')
case '\\"' => putChar('\\"')
case '\\'' => putChar('\\'')
case '\\\\' => putChar('\\\\')
case _ => invalidEscape()
}
nextChar()
}
} else {
putChar(ch)
nextChar()
}
protected def invalidEscape(): Unit = {
syntaxError(charOffset - 1, "invalid escape character")
putChar(ch)
}
private def getLitChars(delimiter: Char) = {
while (ch != delimiter && !isAtEnd && (ch != SU && ch != CR && ch != LF || isUnicodeEscape))
getLitChar()
}
/** read fractional part and exponent of floating point number
* if one is present.
*/
protected def getFraction(): Unit = {
while ('0' <= ch && ch <= '9' || isNumberSeparator(ch)) {
putChar(ch)
nextChar()
}
checkNoTrailingSeparator()
if (ch == 'e' || ch == 'E') {
val lookahead = lookaheadReader
lookahead.nextChar()
if (lookahead.ch == '+' || lookahead.ch == '-') {
lookahead.nextChar()
}
if ('0' <= lookahead.ch && lookahead.ch <= '9') {
putChar(ch)
nextChar()
if (ch == '+' || ch == '-') {
putChar(ch)
nextChar()
}
while ('0' <= ch && ch <= '9' || isNumberSeparator(ch)) {
putChar(ch)
nextChar()
}
checkNoTrailingSeparator()
}
token = DOUBLELIT
}
if (ch == 'd' || ch == 'D') {
putChar(ch)
nextChar()
token = DOUBLELIT
} else if (ch == 'f' || ch == 'F') {
putChar(ch)
nextChar()
token = FLOATLIT
} else
token = DOUBLELIT
checkNoLetter()
setStrVal()
}
/** Convert current strVal to char value
*/
def charVal: Char = if (strVal.length > 0) strVal.charAt(0) else 0
/** Convert current strVal, base to long value.
* This is tricky because of max negative value.
*
* Conversions in base 10 and 16 are supported. As a permanent migration
* path, attempts to write base 8 literals except `0` emit a verbose error.
*/
def intVal(negated: Boolean): Long = {
def intConvert: Long = {
def malformed: Long = { syntaxError("malformed integer number") ; 0 }
def tooBig: Long = { syntaxError("integer number too large") ; 0 }
val len = strVal.length
if (len == 0) {
if (base != 8) syntaxError("missing integer number") // e.g., 0x;
0 // 0 still looks like octal prefix
} else {
if (base == 8) {
if (settings.warnOctalLiteral)
deprecationWarning("Decimal integer literals should not have a leading zero. (Octal syntax is obsolete.)" , since="2.10")
base = 10
}
val divider = if (base == 10) 1 else 2
val limit: Long = if (token == LONGLIT) Long.MaxValue else Int.MaxValue
@tailrec def convert(value: Long, i: Int): Long =
if (i >= len) value
else {
val c = strVal.charAt(i)
if (isNumberSeparator(c)) convert(value, i + 1)
else {
val d = digit2int(c, base)
if (d < 0)
malformed
else if (value < 0 ||
limit / (base / divider) < value ||
limit - (d / divider) < value * (base / divider) &&
!(negated && limit == value * base - 1 + d))
tooBig
else
convert(value * base + d, i + 1)
}
}
val result = convert(0, 0)
if (negated) -result else result
}
}
if (token == CHARLIT && !negated) charVal.toLong else intConvert
}
def intVal: Long = intVal(negated = false)
private val zeroFloat = raw"[0.]+(?:[eE][+-]?[0-9]+)?[fFdD]?".r
/** Convert current strVal, base to float value.
*/
def floatVal(negated: Boolean): Float = {
val text = removeNumberSeparators(strVal)
try {
val value: Float = java.lang.Float.parseFloat(text)
if (value > Float.MaxValue)
syntaxError("floating point number too large")
if (value == 0.0f && !zeroFloat.pattern.matcher(text).matches)
syntaxError("floating point number too small")
if (negated) -value else value
} catch {
case _: NumberFormatException =>
syntaxError("malformed floating point number")
0.0f
}
}
def floatVal: Float = floatVal(negated = false)
/** Convert current strVal, base to double value.
*/
def doubleVal(negated: Boolean): Double = {
val text = removeNumberSeparators(strVal)
try {
val value: Double = java.lang.Double.parseDouble(text)
if (value > Double.MaxValue)
syntaxError("double precision floating point number too large")
if (value == 0.0d && !zeroFloat.pattern.matcher(text).matches)
syntaxError("double precision floating point number too small")
if (negated) -value else value
} catch {
case _: NumberFormatException =>
syntaxError("malformed double precision floating point number")
0.0
}
}
def doubleVal: Double = doubleVal(negated = false)
def checkNoLetter(): Unit = {
if (isIdentifierPart(ch) && ch >= ' ')
syntaxError("Invalid literal number")
}
@inline private def isNumberSeparator(c: Char): Boolean = c == '_' //|| c == '\\''
@inline private def removeNumberSeparators(s: String): String =
if (s.indexOf('_') > 0) s.replaceAllLiterally("_", "") /*.replaceAll("'","")*/ else s
// disallow trailing numeric separator char, but let lexing limp along
def checkNoTrailingSeparator(): Unit =
if (!cbuf.isEmpty && isNumberSeparator(cbuf.last)) {
syntaxError(offset + cbuf.length - 1, "trailing separator is not allowed")
cbuf.setLength(cbuf.length - 1)
}
/** Read a number into strVal.
*
* The `base` can be 8, 10 or 16, where base 8 flags a leading zero.
* For ints, base 8 is legal only for the case of exactly one zero.
*/
protected def getNumber(): Unit = {
// consume digits of a radix
def consumeDigits(radix: Int): Unit =
while (isNumberSeparator(ch) || digit2int(ch, radix) >= 0) {
putChar(ch)
nextChar()
}
// at dot with digit following
def restOfNonIntegralNumber(): Unit = {
putChar('.')
nextChar()
getFraction()
}
// 1l is an acknowledged bad practice
def lintel(): Unit = {
val msg = "Lowercase el for long is not recommended because it is easy to confuse with numeral 1; use uppercase L instead"
if (ch == 'l') deprecationWarning(offset + cbuf.length, msg, since="2.13.0")
}
// after int: 5e7f, 42L, 42.toDouble but not 42b. Repair 0d.
def restOfNumber(): Unit = {
ch match {
case 'e' | 'E' | 'f' | 'F' |
'd' | 'D' => if (cbuf.isEmpty) putChar('0'); getFraction()
case 'l' | 'L' => lintel() ; token = LONGLIT ; setStrVal() ; nextChar()
case _ => token = INTLIT ; setStrVal() ; checkNoLetter()
}
}
// consume leading digits, provisionally an Int
consumeDigits(if (base == 16) 16 else 10)
checkNoTrailingSeparator()
val detectedFloat: Boolean = base != 16 && ch == '.' && isDigit(lookaheadReader.getc)
if (detectedFloat) restOfNonIntegralNumber() else restOfNumber()
}
/** Parse character literal if current character is followed by \\',
* or follow with given op and return a symbol literal token
*/
def charLitOr(op: () => Unit): Unit = {
putChar(ch)
nextChar()
if (ch == '\\'') {
nextChar()
token = CHARLIT
setStrVal()
} else {
op()
token = SYMBOLLIT
strVal = name.toString
}
}
// Errors -----------------------------------------------------------------
/** generate an error at the given offset */
def syntaxError(off: Offset, msg: String): Unit = {
error(off, msg)
token = ERROR
}
/** generate an error at the current token offset */
def syntaxError(msg: String): Unit = syntaxError(offset, msg)
def deprecationWarning(msg: String, since: String): Unit = deprecationWarning(offset, msg, since)
/** signal an error where the input ended in the middle of a token */
def incompleteInputError(msg: String): Unit = {
incompleteInputError(offset, msg)
token = EOF
}
override def toString() = token match {
case IDENTIFIER | BACKQUOTED_IDENT =>
"id(" + name + ")"
case CHARLIT =>
"char(" + intVal + ")"
case INTLIT =>
"int(" + intVal + ")"
case LONGLIT =>
"long(" + intVal + ")"
case FLOATLIT =>
"float(" + floatVal + ")"
case DOUBLELIT =>
"double(" + floatVal + ")"
case STRINGLIT =>
"string(" + strVal + ")"
case STRINGPART =>
"stringpart(" + strVal + ")"
case INTERPOLATIONID =>
"interpolationid(" + name + ")"
case SEMI =>
";"
case NEWLINE =>
";"
case NEWLINES =>
";;"
case COMMA =>
","
case _ =>
token2string(token)
}
// ------------- brace counting and healing ------------------------------
/** overridden in UnitScanners:
* apply brace patch if one exists for this offset
* return true if subsequent end of line handling should be suppressed.
*/
def applyBracePatch(): Boolean = false
/** overridden in UnitScanners */
def parenBalance(token: Token) = 0
/** overridden in UnitScanners */
def healBraces(): List[BracePatch] = List()
/** Initialization method: read first char, then first token
*/
def init(): Unit = {
nextChar()
nextToken()
}
} // end Scanner
// ------------- keyword configuration -----------------------------------
private val allKeywords = List[(Name, Token)](
nme.ABSTRACTkw -> ABSTRACT,
nme.CASEkw -> CASE,
nme.CATCHkw -> CATCH,
nme.CLASSkw -> CLASS,
nme.DEFkw -> DEF,
nme.DOkw -> DO,
nme.ELSEkw -> ELSE,
nme.EXTENDSkw -> EXTENDS,
nme.FALSEkw -> FALSE,
nme.FINALkw -> FINAL,
nme.FINALLYkw -> FINALLY,
nme.FORkw -> FOR,
nme.FORSOMEkw -> FORSOME,
nme.IFkw -> IF,
nme.IMPLICITkw -> IMPLICIT,
nme.IMPORTkw -> IMPORT,
nme.LAZYkw -> LAZY,
nme.MATCHkw -> MATCH,
nme.NEWkw -> NEW,
nme.NULLkw -> NULL,
nme.OBJECTkw -> OBJECT,
nme.OVERRIDEkw -> OVERRIDE,
nme.PACKAGEkw -> PACKAGE,
nme.PRIVATEkw -> PRIVATE,
nme.PROTECTEDkw -> PROTECTED,
nme.RETURNkw -> RETURN,
nme.SEALEDkw -> SEALED,
nme.SUPERkw -> SUPER,
nme.THISkw -> THIS,
nme.THROWkw -> THROW,
nme.TRAITkw -> TRAIT,
nme.TRUEkw -> TRUE,
nme.TRYkw -> TRY,
nme.TYPEkw -> TYPE,
nme.VALkw -> VAL,
nme.VARkw -> VAR,
nme.WHILEkw -> WHILE,
nme.WITHkw -> WITH,
nme.YIELDkw -> YIELD,
nme.DOTkw -> DOT,
nme.USCOREkw -> USCORE,
nme.COLONkw -> COLON,
nme.EQUALSkw -> EQUALS,
nme.ARROWkw -> ARROW,
nme.LARROWkw -> LARROW,
nme.SUBTYPEkw -> SUBTYPE,
nme.VIEWBOUNDkw -> VIEWBOUND,
nme.SUPERTYPEkw -> SUPERTYPE,
nme.HASHkw -> HASH,
nme.ATkw -> AT,
nme.MACROkw -> IDENTIFIER,
nme.THENkw -> IDENTIFIER)
private var kwOffset: Offset = -1
private val kwArray: Array[Token] = {
val (offset, arr) = createKeywordArray(allKeywords, IDENTIFIER)
kwOffset = offset
arr
}
final val token2name = (allKeywords map (_.swap)).toMap
// Token representation ----------------------------------------------------
/** Returns the string representation of given token. */
def token2string(token: Token): String = (token: @switch) match {
case IDENTIFIER | BACKQUOTED_IDENT => "identifier"
case CHARLIT => "character literal"
case INTLIT => "integer literal"
case LONGLIT => "long literal"
case FLOATLIT => "float literal"
case DOUBLELIT => "double literal"
case STRINGLIT | STRINGPART | INTERPOLATIONID => "string literal"
case SYMBOLLIT => "symbol literal"
case LPAREN => "'('"
case RPAREN => "')'"
case LBRACE => "'{'"
case RBRACE => "'}'"
case LBRACKET => "'['"
case RBRACKET => "']'"
case EOF => "eof"
case ERROR => "something"
case SEMI => "';'"
case NEWLINE => "';'"
case NEWLINES => "';'"
case COMMA => "','"
case CASECLASS => "case class"
case CASEOBJECT => "case object"
case XMLSTART => "$XMLSTART$<"
case _ =>
(token2name get token) match {
case Some(name) => "'" + name + "'"
case _ => "'<" + token + ">'"
}
}
class MalformedInput(val offset: Offset, val msg: String) extends Exception
/** A scanner for a given source file not necessarily attached to a compilation unit.
* Useful for looking inside source files that are not currently compiled to see what's there
*/
class SourceFileScanner(val source: SourceFile) extends Scanner {
val buf = source.content
override val decodeUni: Boolean = !settings.nouescape
// suppress warnings, throw exception on errors
def deprecationWarning(off: Offset, msg: String, since: String): Unit = ()
def error(off: Offset, msg: String): Unit = throw new MalformedInput(off, msg)
def incompleteInputError(off: Offset, msg: String): Unit = throw new MalformedInput(off, msg)
}
/** A scanner over a given compilation unit
*/
class UnitScanner(val unit: CompilationUnit, patches: List[BracePatch]) extends SourceFileScanner(unit.source) {
def this(unit: CompilationUnit) = this(unit, List())
override def deprecationWarning(off: Offset, msg: String, since: String) = currentRun.reporting.deprecationWarning(unit.position(off), msg, since)
override def error(off: Offset, msg: String) = reporter.error(unit.position(off), msg)
override def incompleteInputError(off: Offset, msg: String) = currentRun.parsing.incompleteInputError(unit.position(off), msg)
private var bracePatches: List[BracePatch] = patches
lazy val parensAnalyzer = new ParensAnalyzer(unit, List())
override def parenBalance(token: Token) = parensAnalyzer.balance(token)
override def healBraces(): List[BracePatch] = {
var patches: List[BracePatch] = List()
if (!parensAnalyzer.tabSeen) {
var bal = parensAnalyzer.balance(RBRACE)
while (bal < 0) {
patches = new ParensAnalyzer(unit, patches).insertRBrace()
bal += 1
}
while (bal > 0) {
patches = new ParensAnalyzer(unit, patches).deleteRBrace()
bal -= 1
}
}
patches
}
/** Insert or delete a brace, if a patch exists for this offset */
override def applyBracePatch(): Boolean = {
if (bracePatches.isEmpty || bracePatches.head.off != offset) false
else {
val patch = bracePatches.head
bracePatches = bracePatches.tail
// println("applying brace patch "+offset)//DEBUG
if (patch.inserted) {
next copyFrom this
error(offset, "Missing closing brace `}` assumed here")
token = RBRACE
true
} else {
error(offset, "Unmatched closing brace '}' ignored here")
fetchToken()
false
}
}
}
}
class ParensAnalyzer(unit: CompilationUnit, patches: List[BracePatch]) extends UnitScanner(unit, patches) {
val balance = mutable.Map(RPAREN -> 0, RBRACKET -> 0, RBRACE -> 0)
/** The source code with braces and line starts annotated with [NN] showing the index */
private def markedSource = {
val code = unit.source.content
val braces = code.indices.filter(idx => "{}\\n" contains code(idx)).toSet
val mapped = code.indices map (idx => if (braces(idx)) s"${code(idx)}[$idx]" else "" + code(idx))
mapped.mkString("")
}
init()
log(s"ParensAnalyzer for ${unit.source} of length ${unit.source.content.length}\\n```\\n$markedSource\\n```")
/** The offset of the first token on this line, or next following line if blank
*/
val lineStart = new ArrayBuffer[Int]
/** The list of matching top-level brace pairs (each of which may contain nested brace pairs).
*/
val bracePairs: List[BracePair] = {
var lineCount = 1
var lastOffset = 0
var indent = 0
val oldBalance = mutable.Map[Int, Int]()
def markBalance() = for ((k, v) <- balance) oldBalance(k) = v
markBalance()
def scan(bpbuf: ListBuffer[BracePair]): (Int, Int) = {
if (token != NEWLINE && token != NEWLINES) {
while (lastOffset < offset) {
if (buf(lastOffset) == LF) lineCount += 1
lastOffset += 1
}
while (lineCount > lineStart.length) {
lineStart += offset
// reset indentation unless there are new opening brackets or
// braces since last ident line and at the same time there
// are no new braces.
if (balance(RPAREN) >= oldBalance(RPAREN) &&
balance(RBRACKET) >= oldBalance(RBRACKET) ||
balance(RBRACE) != oldBalance(RBRACE)) {
indent = column(offset)
markBalance()
}
}
}
token match {
case LPAREN =>
balance(RPAREN) -= 1; nextToken(); scan(bpbuf)
case LBRACKET =>
balance(RBRACKET) -= 1; nextToken(); scan(bpbuf)
case RPAREN =>
balance(RPAREN) += 1; nextToken(); scan(bpbuf)
case RBRACKET =>
balance(RBRACKET) += 1; nextToken(); scan(bpbuf)
case LBRACE =>
balance(RBRACE) -= 1
val lc = lineCount
val loff = offset
val lindent = indent
val bpbuf1 = new ListBuffer[BracePair]
nextToken()
val (roff, rindent) = scan(bpbuf1)
if (lc != lineCount)
bpbuf += BracePair(loff, lindent, roff, rindent, bpbuf1.toList)
scan(bpbuf)
case RBRACE =>
balance(RBRACE) += 1
val off = offset; nextToken(); (off, indent)
case EOF =>
(-1, -1)
case _ =>
nextToken(); scan(bpbuf)
}
}
val bpbuf = new ListBuffer[BracePair]
while (token != EOF) {
val (roff, rindent) = scan(bpbuf)
if (roff != -1) {
val current = BracePair(-1, -1, roff, rindent, bpbuf.toList)
bpbuf.clear()
bpbuf += current
}
}
def bracePairString(bp: BracePair, indent: Int): String = {
val rangeString = {
import bp._
val lline = line(loff)
val rline = line(roff)
val tokens = List(lline, lindent, rline, rindent) map (n => if (n < 0) "??" else "" + n)
"%s:%s to %s:%s".format(tokens: _*)
}
val outer = (" " * indent) + rangeString
val inners = bp.nested map (bracePairString(_, indent + 2))
if (inners.isEmpty) outer
else inners.mkString(outer + "\\n", "\\n", "")
}
def bpString = bpbuf.toList map ("\\n" + bracePairString(_, 0)) mkString ""
def startString = lineStart.mkString("line starts: [", ", ", "]")
log(s"\\n$startString\\n$bpString")
bpbuf.toList
}
var tabSeen = false
def line(offset: Offset): Int = {
@tailrec
def findLine(lo: Int, hi: Int): Int = {
val mid = (lo + hi) / 2
if (offset < lineStart(mid)) findLine(lo, mid - 1)
else if (mid + 1 < lineStart.length && offset >= lineStart(mid + 1)) findLine(mid + 1, hi)
else mid
}
if (offset <= 0) 0
else findLine(0, lineStart.length - 1)
}
def column(offset: Offset): Int = {
var col = 0
var i = offset - 1
while (i >= 0 && buf(i) != CR && buf(i) != LF) {
if (buf(i) == '\\t') tabSeen = true
col += 1
i -= 1
}
col
}
def insertPatch(patches: List[BracePatch], patch: BracePatch): List[BracePatch] = patches match {
case List() => List(patch)
case bp :: bps => if (patch.off < bp.off) patch :: patches
else bp :: insertPatch(bps, patch)
}
def insertRBrace(): List[BracePatch] = {
def insert(bps: List[BracePair]): List[BracePatch] = bps match {
case List() => patches
case (bp @ BracePair(loff, lindent, roff, rindent, nested)) :: bps1 =>
if (lindent <= rindent) insert(bps1)
else {
// println("patch inside "+bp+"/"+line(loff)+"/"+lineStart(line(loff))+"/"+lindent"/"+rindent)//DEBUG
val patches1 = insert(nested)
if (patches1 ne patches) patches1
else {
var lin = line(loff) + 1
while (lin < lineStart.length && column(lineStart(lin)) > lindent)
lin += 1
if (lin < lineStart.length) {
val patches1 = insertPatch(patches, BracePatch(lineStart(lin), inserted = true))
//println("patch for "+bp+"/"+imbalanceMeasure+"/"+new ParensAnalyzer(unit, patches1).imbalanceMeasure)
/*if (improves(patches1))*/
patches1
/*else insert(bps1)*/
// (this test did not seem to work very well in practice)
} else patches
}
}
}
insert(bracePairs)
}
def deleteRBrace(): List[BracePatch] = {
def delete(bps: List[BracePair]): List[BracePatch] = bps match {
case List() => patches
case BracePair(loff, lindent, roff, rindent, nested) :: bps1 =>
if (lindent >= rindent) delete(bps1)
else {
val patches1 = delete(nested)
if (patches1 ne patches) patches1
else insertPatch(patches, BracePatch(roff, inserted = false))
}
}
delete(bracePairs)
}
// don't emit deprecation warnings about identifiers like `macro` or `then`
// when skimming through the source file trying to heal braces
override def emitIdentifierDeprecationWarnings = false
override def error(offset: Offset, msg: String): Unit = ()
}
}
| martijnhoekstra/scala | src/compiler/scala/tools/nsc/ast/parser/Scanners.scala | Scala | apache-2.0 | 53,111 |
package org.elasticsearch.spark.rdd
import scala.collection.JavaConverters.mapAsJavaMapConverter
import scala.collection.Map
import org.apache.commons.logging.LogFactory
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_INPUT_JSON
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_OUTPUT_JSON
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_QUERY
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_READ
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_WRITE
import org.elasticsearch.hadoop.cfg.PropertiesSettings
import org.elasticsearch.spark.cfg.SparkSettingsManager
object EsSpark {
//
// Load methods
//
def esRDD(sc: SparkContext): RDD[(String, Map[String, AnyRef])] = new ScalaEsRDD[Map[String, AnyRef]](sc)
def esRDD(sc: SparkContext, cfg: Map[String, String]): RDD[(String, Map[String, AnyRef])] =
new ScalaEsRDD[Map[String, AnyRef]](sc, cfg)
def esRDD(sc: SparkContext, resource: String): RDD[(String, Map[String, AnyRef])] =
new ScalaEsRDD[Map[String, AnyRef]](sc, Map(ES_RESOURCE_READ -> resource))
def esRDD(sc: SparkContext, resource: String, query: String): RDD[(String, Map[String, AnyRef])] =
new ScalaEsRDD[Map[String, AnyRef]](sc, Map(ES_RESOURCE_READ -> resource, ES_QUERY -> query))
def esRDD(sc: SparkContext, resource: String, cfg: Map[String, String]): RDD[(String, Map[String, AnyRef])] =
new ScalaEsRDD[Map[String, AnyRef]](sc, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_READ -> resource))
def esRDD(sc: SparkContext, resource: String, query: String, cfg: Map[String, String]): RDD[(String, Map[String, AnyRef])] =
new ScalaEsRDD[Map[String, AnyRef]](sc, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_READ -> resource, ES_QUERY -> query))
// load data as JSON
def esJsonRDD(sc: SparkContext): RDD[(String, String)] = new ScalaEsRDD[String](sc, Map(ES_OUTPUT_JSON -> true.toString))
def esJsonRDD(sc: SparkContext, cfg: Map[String, String]): RDD[(String, String)] =
new ScalaEsRDD[String](sc, collection.mutable.Map(cfg.toSeq: _*) += (ES_OUTPUT_JSON -> true.toString))
def esJsonRDD(sc: SparkContext, resource: String): RDD[(String, String)] =
new ScalaEsRDD[String](sc, Map(ES_RESOURCE_READ -> resource, ES_OUTPUT_JSON -> true.toString))
def esJsonRDD(sc: SparkContext, resource: String, query: String): RDD[(String, String)] =
new ScalaEsRDD[String](sc, Map(ES_RESOURCE_READ -> resource, ES_QUERY -> query, ES_OUTPUT_JSON -> true.toString))
def esJsonRDD(sc: SparkContext, resource: String, cfg: Map[String, String]): RDD[(String, String)] =
new ScalaEsRDD[String](sc, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_READ -> resource, ES_OUTPUT_JSON -> true.toString))
def esJsonRDD(sc: SparkContext, resource: String, query: String, cfg: Map[String, String]): RDD[(String, String)] =
new ScalaEsRDD[String](sc, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_READ -> resource, ES_QUERY -> query, ES_OUTPUT_JSON -> true.toString))
//
// Save methods
//
def saveToEs(rdd: RDD[_], resource: String) { saveToEs(rdd, Map(ES_RESOURCE_WRITE -> resource)) }
def saveToEs(rdd: RDD[_], resource: String, cfg: Map[String, String]) {
saveToEs(rdd, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_WRITE -> resource))
}
def saveToEs(rdd: RDD[_], cfg: Map[String, String]) {
CompatUtils.warnSchemaRDD(rdd, LogFactory.getLog("org.elasticsearch.spark.rdd.EsSpark"))
if (rdd == null || rdd.partitions.length == 0 || rdd.take(1).length == 0) {
return
}
val sparkCfg = new SparkSettingsManager().load(rdd.sparkContext.getConf)
val config = new PropertiesSettings().load(sparkCfg.save())
config.merge(cfg.asJava)
rdd.sparkContext.runJob(rdd, new EsRDDWriter(config.save()).write _)
}
// Save with metadata
def saveToEsWithMeta[K,V](rdd: RDD[(K,V)], resource: String) { saveToEsWithMeta(rdd, Map(ES_RESOURCE_WRITE -> resource)) }
def saveToEsWithMeta[K,V](rdd: RDD[(K,V)], resource: String, cfg: Map[String, String]) {
saveToEsWithMeta(rdd, collection.mutable.Map(cfg.toSeq: _*) += (ES_RESOURCE_WRITE -> resource))
}
def saveToEsWithMeta[K,V](rdd: RDD[(K,V)], cfg: Map[String, String]) {
CompatUtils.warnSchemaRDD(rdd, LogFactory.getLog("org.elasticsearch.spark.rdd.EsSpark"))
if (rdd == null || rdd.partitions.length == 0) {
return
}
val sparkCfg = new SparkSettingsManager().load(rdd.sparkContext.getConf)
val config = new PropertiesSettings().load(sparkCfg.save())
config.merge(cfg.asJava)
rdd.sparkContext.runJob(rdd, new EsRDDWriter(config.save(), true).write _)
}
// JSON variant
def saveJsonToEs(rdd: RDD[_], resource: String) { saveToEs(rdd, resource, Map(ES_INPUT_JSON -> true.toString)) }
def saveJsonToEs(rdd: RDD[_], resource: String, cfg: Map[String, String]) {
saveToEs(rdd, resource, collection.mutable.Map(cfg.toSeq: _*) += (ES_INPUT_JSON -> true.toString))
}
def saveJsonToEs(rdd: RDD[_], cfg: Map[String, String]) {
saveToEs(rdd, collection.mutable.Map(cfg.toSeq: _*) += (ES_INPUT_JSON -> true.toString))
}
} | yonglehou/elasticsearch-hadoop | spark/core/main/scala/org/elasticsearch/spark/rdd/EsSpark.scala | Scala | apache-2.0 | 5,219 |
package org.openmole.gui.server.core
/*
* Copyright (C) 22/09/14 // mathieu.leclaire@openmole.org
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import java.io.File
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.servlet.DefaultServlet
import org.eclipse.jetty.webapp.WebAppContext
import org.scalatra.servlet.ScalatraListener
import javax.servlet.ServletContext
import org.scalatra._
import org.eclipse.jetty.util.resource.{ Resource ⇒ Res }
class GUIServer(port: Option[Int], webapp: File) {
val p = port getOrElse 8080
val server = new Server(p)
val context = new WebAppContext()
context.setContextPath("/")
context.setResourceBase(webapp.getAbsolutePath)
context.setClassLoader(classOf[GUIServer].getClassLoader)
context.addEventListener(new ScalatraListener)
server.setHandler(context)
def start() = {
server.start
server.join
}
def end() {
server.stop
server.join
}
}
| ISCPIF/PSEExperiments | openmole-src/openmole/gui/server/org.openmole.gui.server.core/src/main/scala/org/openmole/gui/server/core/GUIServer.scala | Scala | agpl-3.0 | 1,563 |
package org.emailscript.dnsbl
object SpamHausDnsbl {
val SpamHausHost = "dbl.spamhaus.org"
def apply() = new SpamHausDnsbl
}
class SpamHausDnsbl extends DnsblLookup{
override def getDescriptionForResult(result: String): String = {
result match {
case "127.0.1.1" => "Spam domain (SpamHaus)"
case "127.0.1.2" => "spammed redirector domain (SpamHaus)"
case _ => "Unrecognized result"
}
}
override def getLookupName(host: String): String = {
host + "." + SpamHausDnsbl.SpamHausHost
}
}
| OdysseusLevy/emailscript | src/main/scala/org/emailscript/dnsbl/SpamHausDnsbl.scala | Scala | lgpl-3.0 | 533 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.benchmarks
import java.util.concurrent.TimeUnit
import monix.eval.Task
import org.openjdk.jmh.annotations._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
/** To do comparative benchmarks between versions:
*
* benchmarks/run-benchmark TaskMapCallsBenchmark
*
* This will generate results in `benchmarks/results`.
*
* Or to run the benchmark from within SBT:
*
* jmh:run -i 10 -wi 10 -f 2 -t 1 monix.benchmarks.TaskMapCallsBenchmark
*
* Which means "10 iterations", "10 warm-up iterations", "2 forks", "1 thread".
* Please note that benchmarks should be usually executed at least in
* 10 iterations (as a rule of thumb), but more is better.
*/
@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class TaskMapCallsBenchmark {
import TaskMapCallsBenchmark.test
@Benchmark
def one(): Long = test(12000, 1)
@Benchmark
def batch30(): Long = test(12000 / 30, 30)
@Benchmark
def batch120(): Long = test(12000 / 120, 120)
}
object TaskMapCallsBenchmark {
def test(iterations: Int, batch: Int): Long = {
val f = (x: Int) => x + 1
var task = Task.eval(0)
var j = 0
while (j < batch) { task = task.map(f); j += 1 }
var sum = 0L
var i = 0
while (i < iterations) {
sum += Await.result(task.runToFuture, Duration.Inf)
i += 1
}
sum
}
} | ddworak/monix | benchmarks/shared/src/main/scala/monix/benchmarks/TaskMapCallsBenchmark.scala | Scala | apache-2.0 | 2,084 |
object Macros {
import scala.quoted.*
inline def go[T](inline t: T) = ${ impl('t) }
def impl[T](expr: Expr[T])(using Quotes) : Expr[Unit] = {
import quotes.reflect.*
val tree = expr.asTerm
val methods =
tree.tpe.classSymbol.get.declaredMethods.map { m =>
val name = m.fullName
m.tree match
case ddef: DefDef =>
val returnType = ddef.returnTpt.tpe.show
s"$name : $returnType"
}.sorted
methods.foldLeft('{}) { (res, m) => '{ $res; println(${Expr(m)}) } }
}
}
| dotty-staging/dotty | tests/run-macros/inferred-repeated-result/test_1.scala | Scala | apache-2.0 | 547 |
/*
* Copyright (c) 2014 - 2015 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
package proxies.primitives
import org.scalaide.debug.internal.expression.Names.Java
import org.scalaide.debug.internal.expression.Names.Scala
import org.scalaide.debug.internal.expression.context.JdiContext
import com.sun.jdi.LongValue
/**
* JdiProxy implementation for `long`, `java.lang.Long` and `scala.Long`.
*/
case class LongJdiProxy(__context: JdiContext, __value: LongValue)
extends PrimitiveJdiProxy[Long, LongJdiProxy, LongValue](LongJdiProxy) {
override def __primitiveValue[I] = __value.value.asInstanceOf[I]
}
object LongJdiProxy extends PrimitiveJdiProxyCompanion[Long, LongJdiProxy, LongValue](TypeNames.Long) {
protected def mirror(value: Long, context: JdiContext): LongValue = context.mirrorOf(value)
}
| romanowski/scala-ide | org.scala-ide.sdt.debug.expression/src/org/scalaide/debug/internal/expression/proxies/primitives/LongJdiProxy.scala | Scala | bsd-3-clause | 853 |
/*
* Contributions:
* Jean-Francois GUENA: implement "suffixed collection name" feature (issue #39 partially fulfilled)
* ...
*/
package akka.contrib.persistence.mongodb
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CasbahPersistenceSnapshotterTckSpec extends SnapshotTckSpec(classOf[CasbahPersistenceExtension], "casbah")
@RunWith(classOf[JUnitRunner])
class CasbahSuffixPersistenceSnapshotterTckSpec extends SnapshotTckSpec(classOf[CasbahPersistenceExtension], "casbah", SuffixCollectionNamesTest.extendedConfig) | alari/akka-persistence-mongo | casbah/src/test/scala/akka/contrib/persistence/mongodb/CasbahPersistenceSnapshotterTckSpec.scala | Scala | apache-2.0 | 586 |
package model.repositories.anorm
import anorm.SqlParser._
import anorm._
import com.mohiva.play.silhouette.impl.providers.OAuth2Info
import model.dtos._
import model.dtos.{DBPasswordInfo, DBLoginInfo}
import model.repositories.anorm._
import play.api.db.DB
import scala.collection.mutable
import scala.concurrent.Future
import play.api.Play.current
import _root_.anorm._
import _root_.anorm.SqlParser._
import play.api.db.DB
/**
* Created by pisaris on 9/7/2015.
*/
object OAuth2InfoParser {
val Parse: RowParser[OAuth2Info] = {
long("id") ~
str("accesstoken") ~
get[Option[String]]("tokentype") ~
get[Option[Int]]("expiresin") ~
get[Option[String]]("refreshtoken") ~
long("logininfoid") map
{
case id ~ accesstoken ~ tokentype ~ expiredin ~ refreshtoken ~ logininfoid =>
OAuth2Info(accesstoken,tokentype,expiredin,refreshtoken)
}
}
}
| scify/DemocracIT-Web | app/model/repositories/anorm/OAuth2InfoParser.scala | Scala | apache-2.0 | 912 |
package com.blinkbox.books.catalogue.searchv1
import com.blinkbox.books.catalogue.searchv1.V1SearchService.{Book => BookResponse, BookSearchResponse}
import com.blinkbox.books.catalogue.common.Events.{Book => BookMessage}
import com.blinkbox.books.catalogue.common.BookFixtures
import org.scalatest.{FlatSpec, Matchers}
import spray.http.StatusCodes
class TitleSearchSpecs extends FlatSpec with Matchers with ApiSpecBase {
val f = BookFixtures
private def queryAndCheck[T](q: String)(f: => T) = Get(s"/catalogue/search/books?q=$q") ~> routes ~> check(f)
private def toBookResponse(q: String, total: Int, books: BookMessage*): BookSearchResponse = {
val booksList = if (books.size == 0) None else Some(books.map { b =>
BookResponse(b.isbn, b.title, b.contributors.filter(_.role.toLowerCase == "author").map(_.displayName))
}.toList)
BookSearchResponse(q, booksList, total)
}
"Matching a document" should "ignore stop-words in the document title" in {
catalogueIndex indexAndCheck(f.theUniverse, f.universe, f.theUniverseAndOtherThings) andAfter { _ =>
queryAndCheck("universe") {
status should equal(StatusCodes.OK)
responseAs[BookSearchResponse].numberOfResults should equal(3)
}
}
}
it should "ignore stop-words in the provided query" in {
catalogueIndex indexAndCheck(f.theUniverse, f.universe, f.theUniverseAndOtherThings) andAfter { _ =>
queryAndCheck("a%20the%20for%20universe") {
status should equal(StatusCodes.OK)
responseAs[BookSearchResponse].numberOfResults should equal(3)
}
}
}
it should "rank documents that perfectly match the title on top of documents that match but include also stop-words" in {
catalogueIndex indexAndCheck(f.theUniverseAndOtherThings, f.theUniverse, f.universe) andAfter { _ =>
queryAndCheck("universe") {
status should equal(StatusCodes.OK)
val respBooks = responseAs[BookSearchResponse]
respBooks.numberOfResults should equal(3)
respBooks should equal(toBookResponse("universe", 3, f.universe, f.theUniverse, f.theUniverseAndOtherThings))
}
}
}
it should "rank documents that perfectly match the title including stop-words in the query on top of documents that match but do not have the provided stop-words in the title" in {
catalogueIndex indexAndCheck(f.theUniverseAndOtherThings, f.theUniverse, f.universe) andAfter { _ =>
queryAndCheck("the%20universe") {
status should equal(StatusCodes.OK)
val respBooks = responseAs[BookSearchResponse]
respBooks.numberOfResults should equal(3)
respBooks should equal(toBookResponse("the universe", 3, f.theUniverse, f.theUniverseAndOtherThings, f.universe))
}
}
}
it should "rank documents that match on the title on top of documents that match on the content" in {
catalogueIndex indexAndCheck(f.theUniverseAndOtherThings, f.everything, f.theUniverse, f.universe) andAfter { _ =>
queryAndCheck("universe") {
status should equal(StatusCodes.OK)
val respBooks = responseAs[BookSearchResponse]
respBooks.numberOfResults should equal(4)
respBooks should equal(toBookResponse("universe", 4, f.universe, f.theUniverse, f.theUniverseAndOtherThings, f.everything))
}
}
}
it should "rank a document that matches perfectly on the title above a document that matches both title (partially) and content" in {
catalogueIndex indexAndCheck(f.universeAndOtherThingsWithDescription, f.theUniverse, f.universe) andAfter { _ =>
queryAndCheck("universe") {
status should equal(StatusCodes.OK)
val respBooks = responseAs[BookSearchResponse]
respBooks.numberOfResults should equal(3)
respBooks should equal(toBookResponse("universe", 3, f.universe, f.theUniverse, f.universeAndOtherThingsWithDescription))
}
}
}
it should "work correctly with title permutations (see CAT-77)" in {
catalogueIndex indexAndCheck f.titlePermutationsBook andAfter { _ =>
queryAndCheck("apple%20banana") {
status should equal(StatusCodes.OK)
responseAs[BookSearchResponse].numberOfResults should equal(1)
}
queryAndCheck("apple%20banana%20pear") {
status should equal(StatusCodes.OK)
responseAs[BookSearchResponse].numberOfResults should equal(1)
}
queryAndCheck("apple%20pear") {
status should equal(StatusCodes.OK)
responseAs[BookSearchResponse].numberOfResults should equal(1)
}
}
}
}
| blinkboxbooks/catalogue-v2.scala | catalogue2-search-public/src/test/scala/com/blinkbox/books/catalogue/searchv1/TitleSearchSpecs.scala | Scala | mit | 4,575 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import java.beans.{Introspector, PropertyDescriptor}
import java.lang.{Iterable => JIterable}
import java.lang.reflect.Type
import java.util.{Iterator => JIterator, List => JList, Map => JMap}
import scala.language.existentials
import com.google.common.reflect.TypeToken
import org.apache.spark.sql.catalyst.analysis.{GetColumnByOrdinal, UnresolvedAttribute, UnresolvedExtractValue}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, DateTimeUtils, GenericArrayData}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* Type-inference utilities for POJOs and Java collections.
*/
object JavaTypeInference {
private val iterableType = TypeToken.of(classOf[JIterable[_]])
private val mapType = TypeToken.of(classOf[JMap[_, _]])
private val listType = TypeToken.of(classOf[JList[_]])
private val iteratorReturnType = classOf[JIterable[_]].getMethod("iterator").getGenericReturnType
private val nextReturnType = classOf[JIterator[_]].getMethod("next").getGenericReturnType
private val keySetReturnType = classOf[JMap[_, _]].getMethod("keySet").getGenericReturnType
private val valuesReturnType = classOf[JMap[_, _]].getMethod("values").getGenericReturnType
/**
* Infers the corresponding SQL data type of a JavaBean class.
* @param beanClass Java type
* @return (SQL data type, nullable)
*/
def inferDataType(beanClass: Class[_]): (DataType, Boolean) = {
inferDataType(TypeToken.of(beanClass))
}
/**
* Infers the corresponding SQL data type of a Java type.
* @param beanType Java type
* @return (SQL data type, nullable)
*/
private[sql] def inferDataType(beanType: Type): (DataType, Boolean) = {
inferDataType(TypeToken.of(beanType))
}
/**
* Infers the corresponding SQL data type of a Java type.
* @param typeToken Java type
* @return (SQL data type, nullable)
*/
private def inferDataType(typeToken: TypeToken[_]): (DataType, Boolean) = {
typeToken.getRawType match {
case c: Class[_] if c.isAnnotationPresent(classOf[SQLUserDefinedType]) =>
(c.getAnnotation(classOf[SQLUserDefinedType]).udt().newInstance(), true)
case c: Class[_] if UDTRegistration.exists(c.getName) =>
val udt = UDTRegistration.getUDTFor(c.getName).get.newInstance()
.asInstanceOf[UserDefinedType[_ >: Null]]
(udt, true)
case c: Class[_] if c == classOf[java.lang.String] => (StringType, true)
case c: Class[_] if c == classOf[Array[Byte]] => (BinaryType, true)
case c: Class[_] if c == java.lang.Short.TYPE => (ShortType, false)
case c: Class[_] if c == java.lang.Integer.TYPE => (IntegerType, false)
case c: Class[_] if c == java.lang.Long.TYPE => (LongType, false)
case c: Class[_] if c == java.lang.Double.TYPE => (DoubleType, false)
case c: Class[_] if c == java.lang.Byte.TYPE => (ByteType, false)
case c: Class[_] if c == java.lang.Float.TYPE => (FloatType, false)
case c: Class[_] if c == java.lang.Boolean.TYPE => (BooleanType, false)
case c: Class[_] if c == classOf[java.lang.Short] => (ShortType, true)
case c: Class[_] if c == classOf[java.lang.Integer] => (IntegerType, true)
case c: Class[_] if c == classOf[java.lang.Long] => (LongType, true)
case c: Class[_] if c == classOf[java.lang.Double] => (DoubleType, true)
case c: Class[_] if c == classOf[java.lang.Byte] => (ByteType, true)
case c: Class[_] if c == classOf[java.lang.Float] => (FloatType, true)
case c: Class[_] if c == classOf[java.lang.Boolean] => (BooleanType, true)
case c: Class[_] if c == classOf[java.math.BigDecimal] => (DecimalType.SYSTEM_DEFAULT, true)
case c: Class[_] if c == classOf[java.math.BigInteger] => (DecimalType.BigIntDecimal, true)
case c: Class[_] if c == classOf[java.sql.Date] => (DateType, true)
case c: Class[_] if c == classOf[java.sql.Timestamp] => (TimestampType, true)
case _ if typeToken.isArray =>
val (dataType, nullable) = inferDataType(typeToken.getComponentType)
(ArrayType(dataType, nullable), true)
case _ if iterableType.isAssignableFrom(typeToken) =>
val (dataType, nullable) = inferDataType(elementType(typeToken))
(ArrayType(dataType, nullable), true)
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
val (keyDataType, _) = inferDataType(keyType)
val (valueDataType, nullable) = inferDataType(valueType)
(MapType(keyDataType, valueDataType, nullable), true)
case _ =>
// TODO: we should only collect properties that have getter and setter. However, some tests
// pass in scala case class as java bean class which doesn't have getter and setter.
val beanInfo = Introspector.getBeanInfo(typeToken.getRawType)
val properties = beanInfo.getPropertyDescriptors.filterNot(_.getName == "class")
val fields = properties.map { property =>
val returnType = typeToken.method(property.getReadMethod).getReturnType
val (dataType, nullable) = inferDataType(returnType)
new StructField(property.getName, dataType, nullable)
}
(new StructType(fields), true)
}
}
private def getJavaBeanProperties(beanClass: Class[_]): Array[PropertyDescriptor] = {
val beanInfo = Introspector.getBeanInfo(beanClass)
beanInfo.getPropertyDescriptors
.filter(p => p.getReadMethod != null && p.getWriteMethod != null)
}
private def elementType(typeToken: TypeToken[_]): TypeToken[_] = {
val typeToken2 = typeToken.asInstanceOf[TypeToken[_ <: JIterable[_]]]
val iterableSuperType = typeToken2.getSupertype(classOf[JIterable[_]])
val iteratorType = iterableSuperType.resolveType(iteratorReturnType)
iteratorType.resolveType(nextReturnType)
}
private def mapKeyValueType(typeToken: TypeToken[_]): (TypeToken[_], TypeToken[_]) = {
val typeToken2 = typeToken.asInstanceOf[TypeToken[_ <: JMap[_, _]]]
val mapSuperType = typeToken2.getSupertype(classOf[JMap[_, _]])
val keyType = elementType(mapSuperType.resolveType(keySetReturnType))
val valueType = elementType(mapSuperType.resolveType(valuesReturnType))
keyType -> valueType
}
/**
* Returns the Spark SQL DataType for a given java class. Where this is not an exact mapping
* to a native type, an ObjectType is returned.
*
* Unlike `inferDataType`, this function doesn't do any massaging of types into the Spark SQL type
* system. As a result, ObjectType will be returned for things like boxed Integers.
*/
private def inferExternalType(cls: Class[_]): DataType = cls match {
case c if c == java.lang.Boolean.TYPE => BooleanType
case c if c == java.lang.Byte.TYPE => ByteType
case c if c == java.lang.Short.TYPE => ShortType
case c if c == java.lang.Integer.TYPE => IntegerType
case c if c == java.lang.Long.TYPE => LongType
case c if c == java.lang.Float.TYPE => FloatType
case c if c == java.lang.Double.TYPE => DoubleType
case c if c == classOf[Array[Byte]] => BinaryType
case _ => ObjectType(cls)
}
/**
* Returns an expression that can be used to deserialize an internal row to an object of java bean
* `T` with a compatible schema. Fields of the row will be extracted using UnresolvedAttributes
* of the same name as the constructor arguments. Nested classes will have their fields accessed
* using UnresolvedExtractValue.
*/
def deserializerFor(beanClass: Class[_]): Expression = {
deserializerFor(TypeToken.of(beanClass), None)
}
private def deserializerFor(typeToken: TypeToken[_], path: Option[Expression]): Expression = {
/** Returns the current path with a sub-field extracted. */
def addToPath(part: String): Expression = path
.map(p => UnresolvedExtractValue(p, expressions.Literal(part)))
.getOrElse(UnresolvedAttribute(part))
/** Returns the current path or `GetColumnByOrdinal`. */
def getPath: Expression = path.getOrElse(GetColumnByOrdinal(0, inferDataType(typeToken)._1))
typeToken.getRawType match {
case c if !inferExternalType(c).isInstanceOf[ObjectType] => getPath
case c if c == classOf[java.lang.Short] =>
NewInstance(c, getPath :: Nil, ObjectType(c))
case c if c == classOf[java.lang.Integer] =>
NewInstance(c, getPath :: Nil, ObjectType(c))
case c if c == classOf[java.lang.Long] =>
NewInstance(c, getPath :: Nil, ObjectType(c))
case c if c == classOf[java.lang.Double] =>
NewInstance(c, getPath :: Nil, ObjectType(c))
case c if c == classOf[java.lang.Byte] =>
NewInstance(c, getPath :: Nil, ObjectType(c))
case c if c == classOf[java.lang.Float] =>
NewInstance(c, getPath :: Nil, ObjectType(c))
case c if c == classOf[java.lang.Boolean] =>
NewInstance(c, getPath :: Nil, ObjectType(c))
case c if c == classOf[java.sql.Date] =>
StaticInvoke(
DateTimeUtils.getClass,
ObjectType(c),
"toJavaDate",
getPath :: Nil,
propagateNull = true)
case c if c == classOf[java.sql.Timestamp] =>
StaticInvoke(
DateTimeUtils.getClass,
ObjectType(c),
"toJavaTimestamp",
getPath :: Nil,
propagateNull = true)
case c if c == classOf[java.lang.String] =>
Invoke(getPath, "toString", ObjectType(classOf[String]))
case c if c == classOf[java.math.BigDecimal] =>
Invoke(getPath, "toJavaBigDecimal", ObjectType(classOf[java.math.BigDecimal]))
case c if c.isArray =>
val elementType = c.getComponentType
val primitiveMethod = elementType match {
case c if c == java.lang.Boolean.TYPE => Some("toBooleanArray")
case c if c == java.lang.Byte.TYPE => Some("toByteArray")
case c if c == java.lang.Short.TYPE => Some("toShortArray")
case c if c == java.lang.Integer.TYPE => Some("toIntArray")
case c if c == java.lang.Long.TYPE => Some("toLongArray")
case c if c == java.lang.Float.TYPE => Some("toFloatArray")
case c if c == java.lang.Double.TYPE => Some("toDoubleArray")
case _ => None
}
primitiveMethod.map { method =>
Invoke(getPath, method, ObjectType(c))
}.getOrElse {
Invoke(
MapObjects(
p => deserializerFor(typeToken.getComponentType, Some(p)),
getPath,
inferDataType(elementType)._1),
"array",
ObjectType(c))
}
case c if listType.isAssignableFrom(typeToken) =>
val et = elementType(typeToken)
val array =
Invoke(
MapObjects(
p => deserializerFor(et, Some(p)),
getPath,
inferDataType(et)._1),
"array",
ObjectType(classOf[Array[Any]]))
StaticInvoke(classOf[java.util.Arrays], ObjectType(c), "asList", array :: Nil)
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
val keyDataType = inferDataType(keyType)._1
val valueDataType = inferDataType(valueType)._1
val keyData =
Invoke(
MapObjects(
p => deserializerFor(keyType, Some(p)),
Invoke(getPath, "keyArray", ArrayType(keyDataType)),
keyDataType),
"array",
ObjectType(classOf[Array[Any]]))
val valueData =
Invoke(
MapObjects(
p => deserializerFor(valueType, Some(p)),
Invoke(getPath, "valueArray", ArrayType(valueDataType)),
valueDataType),
"array",
ObjectType(classOf[Array[Any]]))
StaticInvoke(
ArrayBasedMapData.getClass,
ObjectType(classOf[JMap[_, _]]),
"toJavaMap",
keyData :: valueData :: Nil)
case other =>
val properties = getJavaBeanProperties(other)
assert(properties.length > 0)
val setters = properties.map { p =>
val fieldName = p.getName
val fieldType = typeToken.method(p.getReadMethod).getReturnType
val (_, nullable) = inferDataType(fieldType)
val constructor = deserializerFor(fieldType, Some(addToPath(fieldName)))
val setter = if (nullable) {
constructor
} else {
AssertNotNull(constructor, Seq("currently no type path record in java"))
}
p.getWriteMethod.getName -> setter
}.toMap
val newInstance = NewInstance(other, Nil, ObjectType(other), propagateNull = false)
val result = InitializeJavaBean(newInstance, setters)
if (path.nonEmpty) {
expressions.If(
IsNull(getPath),
expressions.Literal.create(null, ObjectType(other)),
result
)
} else {
result
}
}
}
/**
* Returns an expression for serializing an object of the given type to an internal row.
*/
def serializerFor(beanClass: Class[_]): CreateNamedStruct = {
val inputObject = BoundReference(0, ObjectType(beanClass), nullable = true)
serializerFor(inputObject, TypeToken.of(beanClass)).asInstanceOf[CreateNamedStruct]
}
private def serializerFor(inputObject: Expression, typeToken: TypeToken[_]): Expression = {
def toCatalystArray(input: Expression, elementType: TypeToken[_]): Expression = {
val (dataType, nullable) = inferDataType(elementType)
if (ScalaReflection.isNativeType(dataType)) {
NewInstance(
classOf[GenericArrayData],
input :: Nil,
dataType = ArrayType(dataType, nullable))
} else {
MapObjects(serializerFor(_, elementType), input, ObjectType(elementType.getRawType))
}
}
if (!inputObject.dataType.isInstanceOf[ObjectType]) {
inputObject
} else {
typeToken.getRawType match {
case c if c == classOf[String] =>
StaticInvoke(
classOf[UTF8String],
StringType,
"fromString",
inputObject :: Nil)
case c if c == classOf[java.sql.Timestamp] =>
StaticInvoke(
DateTimeUtils.getClass,
TimestampType,
"fromJavaTimestamp",
inputObject :: Nil)
case c if c == classOf[java.sql.Date] =>
StaticInvoke(
DateTimeUtils.getClass,
DateType,
"fromJavaDate",
inputObject :: Nil)
case c if c == classOf[java.math.BigDecimal] =>
StaticInvoke(
Decimal.getClass,
DecimalType.SYSTEM_DEFAULT,
"apply",
inputObject :: Nil)
case c if c == classOf[java.lang.Boolean] =>
Invoke(inputObject, "booleanValue", BooleanType)
case c if c == classOf[java.lang.Byte] =>
Invoke(inputObject, "byteValue", ByteType)
case c if c == classOf[java.lang.Short] =>
Invoke(inputObject, "shortValue", ShortType)
case c if c == classOf[java.lang.Integer] =>
Invoke(inputObject, "intValue", IntegerType)
case c if c == classOf[java.lang.Long] =>
Invoke(inputObject, "longValue", LongType)
case c if c == classOf[java.lang.Float] =>
Invoke(inputObject, "floatValue", FloatType)
case c if c == classOf[java.lang.Double] =>
Invoke(inputObject, "doubleValue", DoubleType)
case _ if typeToken.isArray =>
toCatalystArray(inputObject, typeToken.getComponentType)
case _ if listType.isAssignableFrom(typeToken) =>
toCatalystArray(inputObject, elementType(typeToken))
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
ExternalMapToCatalyst(
inputObject,
ObjectType(keyType.getRawType),
serializerFor(_, keyType),
ObjectType(valueType.getRawType),
serializerFor(_, valueType)
)
case other =>
val properties = getJavaBeanProperties(other)
if (properties.length > 0) {
CreateNamedStruct(properties.flatMap { p =>
val fieldName = p.getName
val fieldType = typeToken.method(p.getReadMethod).getReturnType
val fieldValue = Invoke(
inputObject,
p.getReadMethod.getName,
inferExternalType(fieldType.getRawType))
expressions.Literal(fieldName) :: serializerFor(fieldValue, fieldType) :: Nil
})
} else {
throw new UnsupportedOperationException(
s"Cannot infer type for class ${other.getName} because it is not bean-compliant")
}
}
}
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala | Scala | apache-2.0 | 17,924 |
package helloscala.common.util
import java.time.{LocalDateTime, ZoneOffset, ZonedDateTime}
import org.scalatest.{MustMatchers, WordSpec}
/**
* Created by yangbajing(yangbajing@gmail.com) on 2017-03-20.
*/
class TimeUtilsTest extends WordSpec with MustMatchers {
"TimeUtilsTest" must {
"toLocalDate" in {
val strLdt = "2017-01-23 22:32:11"
TimeUtils.toLocalDateTime(strLdt) mustBe LocalDateTime.of(2017, 1, 23, 22, 32, 11)
}
"toZonedDateTime" in {
TimeUtils.toZonedDateTime("2018-1-2 11:11:11+08") mustBe ZonedDateTime.of(2018,
1,
2,
11,
11,
11,
0,
ZoneOffset.ofHours(8))
TimeUtils.toZonedDateTime("2018-1-2 11:11:11+08:00") mustBe ZonedDateTime
.of(2018, 1, 2, 11, 11, 11, 0, ZoneOffset.ofHours(8))
TimeUtils.toZonedDateTime("2018-1-2 11:11:11-08") mustBe ZonedDateTime.of(2018,
1,
2,
11,
11,
11,
0,
ZoneOffset.ofHours(-8))
TimeUtils.toZonedDateTime("2018-1-2T11:11:11-08:00") mustBe ZonedDateTime
.of(2018, 1, 2, 11, 11, 11, 0, ZoneOffset.ofHours(-8))
TimeUtils.toZonedDateTime("2018-11-2T11:11:11-08:30") mustBe ZonedDateTime
.of(2018, 11, 2, 11, 11, 11, 0, ZoneOffset.of("-08:30"))
TimeUtils.toZonedDateTime("2018-1-2T11:11:11") mustBe ZonedDateTime.of(2018,
1,
2,
11,
11,
11,
0,
ZoneOffset.ofHours(8))
TimeUtils.toZonedDateTime("2018-1-22 11:11:11") mustBe ZonedDateTime.of(2018,
1,
22,
11,
11,
11,
0,
ZoneOffset.ofHours(8))
TimeUtils.toZonedDateTime("2018-01-22 11:11:11") mustBe ZonedDateTime.of(2018,
1,
22,
11,
11,
11,
0,
ZoneOffset.ofHours(8))
TimeUtils.toZonedDateTime("2018-01-22 11:11:11.321") mustBe ZonedDateTime
.of(2018, 1, 22, 11, 11, 11, 321 * 1000 * 1000, ZoneOffset.ofHours(8))
TimeUtils.toZonedDateTime("2018-01-22 11:11:11.321+8") mustBe ZonedDateTime
.of(2018, 1, 22, 11, 11, 11, 321 * 1000 * 1000, ZoneOffset.ofHours(8))
}
}
}
| helloscala/helloscala | hs-test/src/test/scala/helloscala/common/util/TimeUtilsTest.scala | Scala | apache-2.0 | 4,650 |
package com.greencatsoft.angularjs.core
import com.greencatsoft.angularjs.injectable
import scala.scalajs.js
/**
* Use the \\$locationProvider to configure how the application deep linking paths are stored.
*
* @see https://docs.angularjs.org/api/ng/provider/\\$locationProvider
*/
@injectable("$locationProvider")
trait LocationProvider extends js.Object {
/**
* @param prefix Prefix for hash part (containing path and search)
* @return current value if used as getter or itself (chaining) if used as setter
*/
def hashPrefix(prefix: String = null): String = js.native
/**
* @param mode If boolean, sets html5Mode.enabled to value. If object, sets enabled, requireBase and rewriteLinks to
* respective values. Supported properties:
* @return html5Mode object if used as getter or itself (chaining) if used as setter
*/
def html5Mode(mode: Boolean): Html5ModeInfo = js.native
/**
* @param mode If boolean, sets html5Mode.enabled to value. If object, sets enabled, requireBase and rewriteLinks to
* respective values. Supported properties:
* @return html5Mode object if used as getter or itself (chaining) if used as setter
*/
def html5Mode(mode: Html5ModeInfo): Html5ModeInfo = js.native
}
trait Html5ModeInfo extends js.Object {
/**
* (default: false) If true, will rely on history.pushState to change urls where supported. Will fall back to
* hash-prefixed paths in browsers that do not support pushState.
*/
var enabled: Boolean = js.native
/**
* (default: true) When html5Mode is enabled, specifies whether or not a tag is required to be present. If enabled
* and requireBase are true, and a base tag is not present, an error will be thrown when \\$location is injected. See
* the \\$location guide for more information
*/
var requireBase: Boolean = js.native
/**
* (default: true) When html5Mode is enabled, enables/disables url rewriting for relative links.
*/
var rewriteLinks: Boolean = js.native
}
object Html5ModeInfo {
def apply(enabled: Boolean = false, requireBase: Boolean = true, rewriteLinks: Boolean = true): Html5ModeInfo = {
val mode = new js.Object().asInstanceOf[Html5ModeInfo]
mode.enabled = enabled
mode.requireBase = requireBase
mode.rewriteLinks = rewriteLinks
mode
}
}
@injectable("$location")
trait Location extends js.Object {
def absUrl(): String = js.native
def url(url: String = null, replace: String = null): String = js.native
def protocol(): String = js.native
def host(): String = js.native
def port(): Int = js.native
def path(): String = js.native
def path(path: String): Location = js.native
// TODO: refine argument types?
def search(search: js.Any, paramValue: js.Any = null): js.Object = js.native
def hash(hash: String = null): String = js.native
def replace(): Unit = js.native
}
| rankomat/scalajs-angular | src/main/scala/com/greencatsoft/angularjs/core/Location.scala | Scala | apache-2.0 | 2,902 |
package io.github.mandar2812.PlasmaML.omni
import breeze.linalg.{DenseMatrix, DenseVector}
import io.github.mandar2812.dynaml.DynaMLPipe
import io.github.mandar2812.dynaml.evaluation.RegressionMetrics
import io.github.mandar2812.dynaml.kernels._
import io.github.mandar2812.dynaml.models.gp.GPRegression
import io.github.mandar2812.dynaml.optimization.{
GradBasedGlobalOptimizer,
GridSearch
}
import io.github.mandar2812.dynaml.pipes.DataPipe
import scala.util.Random
/**
* @author mandar2812 datum 19/11/15.
*
* Train and evaluate a "vanilla"
* GP regression model f(x): R_n --> R
*/
object TestGPOmni {
def apply(
kernel: LocalScalarKernel[DenseVector[Double]],
year: Int,
yeartest: Int,
bandwidth: Double,
noise: LocalScalarKernel[DenseVector[Double]],
num_training: Int,
num_test: Int,
columns: List[Int],
grid: Int,
step: Double,
randomSample: Boolean,
globalOpt: String,
stepSize: Double,
maxIt: Int
): Unit = {
runExperiment(
year,
yeartest,
kernel,
bandwidth,
noise,
num_training,
num_test,
columns,
grid,
step,
globalOpt,
randomSample,
Map(
"tolerance" -> "0.0001",
"step" -> stepSize.toString,
"maxIterations" -> maxIt.toString
)
)
}
def runExperiment(
year: Int = 2006,
yeartest: Int = 2007,
kernel: LocalScalarKernel[DenseVector[Double]],
bandwidth: Double = 0.5,
noise: LocalScalarKernel[DenseVector[Double]],
num_training: Int = 200,
num_test: Int = 50,
columns: List[Int] = List(40, 16, 21, 23, 24, 22, 25),
grid: Int = 5,
step: Double = 0.2,
globalOpt: String = "ML",
randomSample: Boolean = false,
opt: Map[String, String]
): Unit = {
//function to train and test a GP Regression model
//accepts training and test splits separately.
val modelTrainTest =
(trainTest: (
(
Iterable[(DenseVector[Double], Double)],
Iterable[(DenseVector[Double], Double)]
),
(DenseVector[Double], DenseVector[Double])
)) => {
val model =
new GPRegression(kernel, noise, trainingdata = trainTest._1._1.toSeq)
val gs = globalOpt match {
case "GS" =>
new GridSearch[model.type](model)
.setGridSize(grid)
.setStepSize(step)
.setLogScale(false)
case "ML" => new GradBasedGlobalOptimizer[GPRegression](model)
}
val startConf = kernel.state ++ noise.state
val (_, conf) = gs.optimize(startConf, opt)
model.setState(conf)
val res = model.test(trainTest._1._2.toSeq)
val scoresAndLabelsPipe =
DataPipe(
(res: Seq[(DenseVector[Double], Double, Double, Double, Double)]) =>
res.map(i => (i._3, i._2)).toList
) > DataPipe(
(list: List[(Double, Double)]) =>
list.map { l =>
(
l._1 * trainTest._2._2(-1) + trainTest._2._1(-1),
l._2 * trainTest._2._2(-1) + trainTest._2._1(-1)
)
}
)
val scoresAndLabels = scoresAndLabelsPipe.run(res)
val metrics =
new RegressionMetrics(scoresAndLabels, scoresAndLabels.length)
//println(scoresAndLabels)
metrics.print()
metrics.generatePlots()
}
//Load Omni data into a stream
//Extract the time and Dst values
//separate data into training and test
//pipe training data to model and then generate test predictions
//create RegressionMetrics instance and produce plots
val preProcessPipe = DynaMLPipe.fileToStream >
DynaMLPipe.replaceWhiteSpaces >
DynaMLPipe.extractTrainingFeatures(
columns,
Map(
16 -> "999.9",
21 -> "999.9",
24 -> "9999.",
23 -> "999.9",
40 -> "99999",
22 -> "9999999.",
25 -> "999.9",
28 -> "99.99",
27 -> "9.999",
39 -> "999"
)
) >
DynaMLPipe.removeMissingLines >
DynaMLPipe.splitFeaturesAndTargets
/*
* Create the final pipe composed as follows
*
* train, test
* | |
* |-------|
* | |
* v v
* p_train, p_test : pre-process
* | |
* |-------|
* | |
* v v
* s_train, s_test : sub-sample
* | |
* |-------|
* | |
* v v
* norm_tr, norm_te : mean center and standardize
* | |
* |-------|
* | |
* v v
* | |
* |-----------------|
* | Train, tune and |
* | test the model. |
* | Output graphs, |
* | metrics |
* |_________________|
*
* */
val trainTestPipe = DynaMLPipe.duplicate(preProcessPipe) >
DataPipe(
(data: (
Iterable[(DenseVector[Double], Double)],
Iterable[(DenseVector[Double], Double)]
)) => {
if (!randomSample)
(data._1.take(num_training), data._2.takeRight(num_test))
else
(
data._1.filter(
_ => Random.nextDouble() <= num_training / data._1.size.toDouble
),
data._2.filter(
_ => Random.nextDouble() <= num_test / data._2.size.toDouble
)
)
}
) >
DynaMLPipe.trainTestGaussianStandardization >
DataPipe(modelTrainTest)
trainTestPipe.run(
("data/omni2_" + year + ".csv", "data/omni2_" + yeartest + ".csv")
)
}
}
| mandar2812/PlasmaML | omni/src/main/scala/io/github/mandar2812/PlasmaML/omni/TestGPOmni.scala | Scala | lgpl-2.1 | 5,767 |
package com.stefansavev.core.serialization
import java.io.{InputStream, OutputStream}
/**
* Incomplete extensions if input/output streams used to read/write values
*/
object StreamExtensions{
implicit class IntSerializerExt(outputStream: OutputStream) {
def writeInt(value: Int): Unit = {
IntSerializer.write(outputStream, value)
}
}
implicit class IntDeSerializerExt(inputStream: InputStream) {
def readInt(): Int = {
IntSerializer.read(inputStream)
}
}
implicit class DoubleArraySerializerExt(outputStream: OutputStream) {
def writeDoubleArray(values: Array[Double]): Unit = {
DoubleArraySerializer.write(outputStream, values)
}
}
implicit class DoubleArrayDeSerializerExt(inputStream: InputStream) {
def readDoubleArray(): Array[Double] = {
DoubleArraySerializer.read(inputStream)
}
}
}
| stefansavev/random-projections-at-berlinbuzzwords | src/main/scala/com/stefansavev/core/serialization/StreamExtensions.scala | Scala | apache-2.0 | 871 |
package component
import javafx.fxml.FXML
import javafx.scene.layout.Pane
import me.mtrupkin.console.Screen
import me.mtrupkin.control.ConsoleFx
import me.mtrupkin.core.{Size, Point}
import model.EntityViewer
import model.space.{Coordinate, Ship, Entity}
import scalafx.Includes._
import scalafx.beans.property.{ObjectProperty, Property}
import scalafx.scene.input.MouseButton
import scalafx.scene.{control => sfxc, input => sfxi, layout => sfxl, shape => sfxs, text => sfxt}
/**
* Created by mtrupkin on 5/22/2015.
*/
class EntityController {
@FXML var consoleParent: Pane = _
val entityHighlighted = new ObjectProperty[Entity]
val entityPrimarySelected = new ObjectProperty[Entity]
val secondarySelected = new ObjectProperty[Coordinate]
var console: ConsoleFx = _
var screen: Screen = _
var entityViewer: EntityViewer = _
def initialize(): Unit = {
val consoleSize = Size(40, 20)
console = new ConsoleFx(consoleSize, fontSize = 23)
console.setStyle("-fx-border-color: white")
screen = Screen(consoleSize)
consoleParent.onMouseMoved = (e: sfxi.MouseEvent) => handleMouseMove(e)
consoleParent.onMouseClicked = (e: sfxi.MouseEvent) => handleMouseClicked(e)
consoleParent.onMouseExited = (e: sfxi.MouseEvent) => handleMouseExit(e)
consoleParent.getChildren.add(console)
consoleParent.setFocusTraversable(true)
}
def setEntity(entity: Entity, player: Ship) {
screen.clear()
this.entityViewer = EntityViewer(entity, player)
entityViewer.render(screen)
console.draw(screen)
}
def handleMouseMove(mouseEvent: sfxi.MouseEvent): Unit = {
for( s <- mouseToPoint(mouseEvent)) {
updateMouseInfo(s)
}
}
def handleMouseClicked(mouseEvent: sfxi.MouseEvent): Unit = {
for {
s <- mouseToPoint(mouseEvent)
target <- entityViewer.target(s)
} {
mouseEvent.button match {
case MouseButton.PRIMARY => entityPrimarySelected.update(target)
case _ =>
}
}
for {
s <- mouseToPoint(mouseEvent)
target = entityViewer.toWorld(s)
} {
mouseEvent.button match {
case MouseButton.SECONDARY => secondarySelected.update(Coordinate(entityViewer.id, target))
case _ =>
}
}
}
def handleMouseExit(mouseEvent: sfxi.MouseEvent): Unit = {
}
def updateMouseInfo(w: Point): Unit = {
for {
target <- entityViewer.target(w)
} {
entityHighlighted.update(target)
}
}
def mouseToPoint(mouseEvent: sfxi.MouseEvent): Option[Point] = console.toScreen(mouseEvent.x, mouseEvent.y)
}
| mtrupkin/flagship | src/main/scala/component/EntityController.scala | Scala | mit | 2,675 |
package converter
import text.{StringNone, StringOption, StringSome}
import scala.util.matching.Regex
/**
* <pre>
* Created on 7/5/15.
* </pre>
* @author K.Sakamoto
*/
object KanjiNumeralParser {
private val numeralNotation1Regex: Regex = """([^兆億万]+兆)?([^兆億万]+億)?([^兆億万]+万)?([^兆億万]*)""".r//("cho", "oku", "man", "num")
private val numeralNotation2Regex: Regex = """([^千百十]*千)?([^千百十]*百)?([^千百十]*十)?([^千百十]*)""".r//("thousand", "hundred", "ten", "num")
private val zero: String = "〇"
def parseKanjiNumerals(kanjiNumerals: StringOption): Option[Long] = {
kanjiNumerals match {
case StringSome(kanji) =>
numeralNotation1Parser(StringOption(kanji.
replaceAll("ゼロ-zero", zero).//Unidic対策
replaceAll("零", zero)
))
case StringNone =>
None
}
}
private def numeralNotation1Parser(numerals: StringOption): Option[Long] = {
numerals match {
case StringSome(n) =>
n match {
case numeralNotation1Regex(cho, oku, man, num) =>
val base: Int = 10000
val defaultString: String = "a"
val choStr: String = StringOption(cho).getOrElse(defaultString).init
val okuStr: String = StringOption(oku).getOrElse(defaultString).init
val manStr: String = StringOption(man).getOrElse(defaultString).init
var value: Long = 0L
value += numeralNotation2Parser(StringOption(choStr)).getOrElse(0L) * base * base * base
value += numeralNotation2Parser(StringOption(okuStr)).getOrElse(0L) * base * base
value += numeralNotation2Parser(StringOption(manStr)).getOrElse(0L) * base
value += numeralNotation2Parser(StringOption(num)).getOrElse(0L)
Option(value)
case _ =>
None
}
case StringNone =>
None
}
}
private def numeralNotation2Parser(numerals: StringOption): Option[Long] = {
if (numerals.isEmpty || numerals.get == "a") {
return None
}
numerals match {
case StringSome(n) =>
n match {
case numeralNotation2Regex(thousand, hundred, ten, num) =>
if (StringOption(thousand).nonEmpty ||
StringOption(hundred).nonEmpty ||
StringOption(ten).nonEmpty) {
val base: Int = 10
val defaultString: String = "a"
val thousandStr: String = StringOption(thousand).getOrElse(defaultString).init
val hundredStr: String = StringOption(hundred).getOrElse(defaultString).init
val tenStr: String = StringOption(ten).getOrElse(defaultString).init
var value: Long = 0L
value += numeralNotation3Parser(StringOption(thousandStr), hasDefault = true ).getOrElse(0) * base * base * base
value += numeralNotation3Parser(StringOption(hundredStr), hasDefault = true ).getOrElse(0) * base * base
value += numeralNotation3Parser(StringOption(tenStr), hasDefault = true ).getOrElse(0) * base
value += numeralNotation3Parser(StringOption(num), hasDefault = false).getOrElse(0)
Option(value)
} else {
numeralNotation4Parser(numerals) map {_.toLong}
}
case _ =>
None
}
case StringNone =>
None
}
}
private def replaceAllKanjiToArabic(numerals: StringOption): StringOption = {
numerals match {
case StringSome(n) =>
StringOption(n.
replaceAll("一", "1").
replaceAll("二", "2").
replaceAll("三", "3").
replaceAll("四", "4").
replaceAll("五", "5").
replaceAll("六", "6").
replaceAll("七", "7").
replaceAll("八", "8").
replaceAll("九", "9").
replaceAll(zero, "0").
replaceAll("[^0123456789]", "").
replaceAll("^0+", ""))
case StringNone =>
StringNone
}
}
private def numeralNotation3Parser(numerals: StringOption, hasDefault: Boolean): Option[Int] = {
if (numerals.isEmpty) {
if (hasDefault) {
return Some(1)
} else {
return None
}
}
try {
replaceAllKanjiToArabic(numerals) match {
case StringSome(n) =>
Option(n.toInt)
case StringNone =>
None
}
} catch {
case e: Exception =>
e.printStackTrace()
None
}
}
private def numeralNotation4Parser(numerals: StringOption): Option[Int] = {
replaceAllKanjiToArabic(numerals) match {
case StringSome(n) =>
val numeralArray: Array[String] = n.toCharArray.reverse.map(_.toString)
var num: Int = 0
for (i <- numeralArray.indices) {
try {
num += (numeralArray(i).toDouble * math.pow(10, i)).toInt
} catch {
case e: NumberFormatException =>
e.printStackTrace()
}
}
Option(num)
case StringNone =>
None
}
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/converter/KanjiNumeralParser.scala | Scala | apache-2.0 | 5,130 |
package models.domain
case class JSEnabled(jsEnabled: Boolean = false)
| Department-for-Work-and-Pensions/ClaimCapture | c3/app/models/domain/JSEnabled.scala | Scala | mit | 72 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system
import org.apache.samza.serializers.SerdeManager
import org.apache.samza.util.Logging
import org.apache.samza.SamzaException
class SystemProducers(
producers: Map[String, SystemProducer],
serdeManager: SerdeManager,
metrics: SystemProducersMetrics = new SystemProducersMetrics,
/**
* If set to true, Samza will drop the messages that have serialization errors
* and keep running. If set to false, Samza will throw the SamzaException
* to fail the container. Default is false.
*/
dropSerializationError: Boolean = false) extends Logging {
def start {
debug("Starting producers.")
producers.values.foreach(_.start)
}
def stop {
debug("Stopping producers.")
producers.values.foreach(_.stop)
}
def register(source: String) {
debug("Registering source: %s" format source)
metrics.registerSource(source)
producers.values.foreach(_.register(source))
}
def flush(source: String) {
debug("Flushing source: %s" format source)
metrics.flushes.inc
metrics.sourceFlushes(source).inc
producers.values.foreach(_.flush(source))
}
def send(source: String, envelope: OutgoingMessageEnvelope) {
trace("Sending message from source: %s, %s" format (envelope, source))
metrics.sends.inc
metrics.sourceSends(source).inc
val bytesEnvelope = try {
Some(serdeManager.toBytes(envelope))
} catch {
case e: Throwable if !dropSerializationError => throw new SamzaException("can not serialize the message", e)
case ex: Throwable => {
debug("Serialization fails: %s . Drop the error message" format ex)
metrics.serializationError.inc
None
}
}
if (!bytesEnvelope.isEmpty) {
val system = envelope.getSystemStream.getSystem
val producer = producers.getOrElse(system, throw new SamzaException("Attempting to produce to unknown system: %s. Available systems: %s. Please add the system to your configuration, or update outgoing message envelope to send to a defined system." format (system, producers.keySet)))
producer.send(source, bytesEnvelope.get)
}
}
}
| fredji97/samza | samza-core/src/main/scala/org/apache/samza/system/SystemProducers.scala | Scala | apache-2.0 | 2,962 |
package org.broadinstitute.dsde.vault.datamanagement.services
import org.broadinstitute.dsde.vault.datamanagement.DataManagementDatabaseFreeSpec
import org.broadinstitute.dsde.vault.datamanagement.model.UnmappedBAM
import org.broadinstitute.dsde.vault.datamanagement.model.Properties._
import org.broadinstitute.dsde.vault.datamanagement.services.JsonImplicits._
import spray.http.StatusCodes._
import spray.httpx.SprayJsonSupport._
class UnmappedBAMServiceSpec extends DataManagementDatabaseFreeSpec with UnmappedBAMService {
"UnmappedBAMService" - {
val versions = Table(
"version",
None,
Option(1),
Option(2)
)
val pageLimits = Table(
"pageLimit",
0,
1
)
forAll(versions) { (version: Option[Int]) =>
val pathBase = "/ubams" + v(version)
s"when accessing the $pathBase path" - {
val files = Map("bam" -> "/path/to/bam", "bai" -> "/path/to/bai")
val metadata = Map("someKey" -> "someValue")
var properties: Option[Map[String, String]] = None
var createdId: Option[String] = None
"POST should store a new unmapped BAM" in {
Post(s"$pathBase", UnmappedBAM(files, metadata)) ~> openAMSession ~> ingestRoute ~> check {
val unmappedBAM = responseAs[UnmappedBAM]
unmappedBAM.files should be(files)
unmappedBAM.metadata should be(metadata)
unmappedBAM.id shouldNot be(empty)
createdId = unmappedBAM.id
version match {
case Some(x) if x > 1 =>
unmappedBAM.properties.get(CreatedBy) shouldNot be(empty)
unmappedBAM.properties.get(CreatedDate) shouldNot be(empty)
properties = unmappedBAM.properties
case _ => unmappedBAM.properties should be(empty)
}
}
}
"GET should retrieve the previously stored unmapped BAM" in {
assume(createdId.isDefined)
Get(s"$pathBase/" + createdId.get) ~> describeRoute ~> check {
val unmappedBAM = responseAs[UnmappedBAM]
unmappedBAM.files should be(files)
unmappedBAM.metadata should be(metadata)
unmappedBAM.id should be(createdId)
version match {
case Some(x) if x > 1 =>
unmappedBAM.properties should be(properties)
unmappedBAM.properties.get.get(CreatedBy) shouldNot be(empty)
unmappedBAM.properties.get.get(CreatedDate) shouldNot be(empty)
case _ => unmappedBAM.properties should be(empty)
}
}
}
"GET of an unknown id should return a not found error" in {
Get(s"$pathBase/unknown-id") ~> sealRoute(describeRoute) ~> check {
status should be(NotFound)
}
}
"GET should retrieve a list of unmapped BAMs or a not found error" in {
if (version.isDefined) {
Get(s"$pathBase") ~> openAMSession ~> describeRouteList ~> check {
val unmappedBAMs = responseAs[List[UnmappedBAM]]
unmappedBAMs shouldNot be(empty)
unmappedBAMs.foreach(unmappedBAM => {
unmappedBAM.id shouldNot be(empty)
version match {
case Some(x) if x > 1 =>
unmappedBAM.properties.get.get(CreatedBy) shouldNot be(empty)
unmappedBAM.properties.get.get(CreatedDate) shouldNot be(empty)
case _ => unmappedBAM.properties should be(empty)
}
})
}
} else {
Get(s"$pathBase") ~> openAMSession ~>sealRoute(describeRouteList) ~> check {
status should be(NotFound)
}
}
}
forAll(pageLimits) { (pageLimit: Int) =>
s"GET with ?page[limit]=$pageLimit should retrieve $pageLimit unmapped BAMs or a not found error" in {
if (version.isDefined) {
Get(s"$pathBase?page[limit]=$pageLimit") ~> openAMSession ~> describeRouteList ~> check {
val unmappedBAMs = responseAs[List[UnmappedBAM]]
unmappedBAMs should have size pageLimit
}
} else {
Get(s"$pathBase?page[limit]=$pageLimit") ~> openAMSession ~> sealRoute(describeRouteList) ~> check {
status should be(NotFound)
}
}
}
}
}
}
}
}
| broadinstitute/vault-datamanagement | src/test/scala/org/broadinstitute/dsde/vault/datamanagement/services/UnmappedBAMServiceSpec.scala | Scala | bsd-3-clause | 4,488 |
/*
Facsimile: A Discrete-Event Simulation Library
Copyright © 2004-2020, Michael J Allen.
This file is part of Facsimile.
Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
version.
Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see
http://www.gnu.org/licenses/lgpl.
The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the
project home page at:
http://facsim.org/
Thank you for your interest in the Facsimile project!
IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for inclusion
as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If your code
fails to comply with the standard, then your patches will be rejected. For further information, please visit the coding
standards at:
http://facsim.org/Documentation/CodingStandards/
========================================================================================================================
Scala source file from the org.facsim.anim.cell package.
*/
package org.facsim.anim.cell
import org.facsim.{assertNonNull, LibResource}
import org.facsim.anim.Point3D
/**
Class representing a basic cell point in 3D space.
@constructor Construct a new basic point from the cell data stream.
@param scene Reference to the CellScene of which this point is a part.
@param pointType Type of point represented by this instance.
@throws org.facsim.anim.cell.IncorrectFormatException if the file supplied is
not an ''AutoMod® cell'' file.
@throws org.facsim.anim.cell.ParsingErrorException if errors are encountered
during parsing of the file.
*/
private[cell] class Point(scene: CellScene, pointType: Point.Value) {
/*
Sanity checks.
*/
assertNonNull(scene)
assertNonNull(pointType)
/**
Read the 3D point from the cell data stream.
*/
private[cell] final val point = Point.read(scene, pointType)
}
/**
Utility enumeration object for processing cell file points.
*/
private[cell] object Point
extends Enumeration {
/**
Polyhedron point.
*/
val Polyhedron = Value
/**
Text list point.
*/
val TextList = Value
/**
Vector list point.
*/
val VectorList = Value
/**
Point read string resource key.
*/
private val ReadKey = "anim.cell.Point.read"
/**
Read a point from the cell scene and return it.
@param scene Reference to the CellScene of which this point is a part.
@param pointType Type of point represented by this instance.
@return Point read from the scene.
@throws org.facsim.anim.cell.IncorrectFormatException if the file supplied is
not an ''AutoMod® cell'' file.
@throws org.facsim.anim.cell.ParsingErrorException if errors are encountered
during parsing of the file.
*/
def read(scene: CellScene, pointType: Value): Point3D = {
/*
Sanity checks.
*/
assertNonNull(scene)
assertNonNull(pointType)
/**
Read the point's X coordinate.
*/
val x = scene.readDouble(LibResource(ReadKey, pointType.id, 0))
/**
Read the point's Y coordinate.
*/
val y = scene.readDouble(LibResource(ReadKey, pointType.id, 1))
/**
Read the point's Z coordinate.
*/
val z = scene.readDouble(LibResource(ReadKey, pointType.id, 2))
/*
Return the point read.
*/
Point3D(x, y, z)
}
} | MichaelJAllen/facsimile | core/src/main/scala/org/facsim/anim/cell/Point.scala | Scala | lgpl-3.0 | 3,742 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions
import _root_.java.math.{BigDecimal => JBigDecimal}
import _root_.java.util.{List => JList}
import org.apache.flink.api.common.typeinfo.{SqlTimeTypeInfo, TypeInformation}
import org.apache.flink.table.api._
import org.apache.flink.table.delegation.PlannerExpressionParser
import ApiExpressionUtils._
import org.apache.flink.table.functions.BuiltInFunctionDefinitions
import org.apache.flink.table.types.utils.TypeConversions.fromLegacyInfoToDataType
import _root_.scala.collection.JavaConversions._
import _root_.scala.language.implicitConversions
import _root_.scala.util.parsing.combinator.{JavaTokenParsers, PackratParsers}
/**
* The implementation of a [[PlannerExpressionParser]] which parsers expressions inside a String.
*
* <p><strong>WARNING</strong>: please keep this class in sync with PlannerExpressionParserImpl
* variant in flink-table-planner-blink module.
*/
class PlannerExpressionParserImpl extends PlannerExpressionParser {
def parseExpression(exprString: String): Expression = {
PlannerExpressionParserImpl.parseExpression(exprString)
}
override def parseExpressionList(expression: String): JList[Expression] = {
PlannerExpressionParserImpl.parseExpressionList(expression)
}
}
/**
* Parser for expressions inside a String. This parses exactly the same expressions that
* would be accepted by the Scala Expression DSL.
*
* See [[org.apache.flink.table.api.bridge.scala.ImplicitExpressionConversions]] and
* [[org.apache.flink.table.api.bridge.scala.ImplicitExpressionOperations]] for the constructs
* available in the Scala Expression DSL. This parser must be kept in sync with the Scala DSL
* lazy valined in the above files.
*/
object PlannerExpressionParserImpl extends JavaTokenParsers
with PackratParsers
with PlannerExpressionParser {
case class Keyword(key: String)
// Convert the keyword into an case insensitive Parser
// The pattern ensures that the keyword is not matched as a prefix, i.e.,
// the keyword is not followed by a Java identifier character.
implicit def keyword2Parser(kw: Keyword): Parser[String] = {
("""(?i)\\Q""" + kw.key + """\\E(?![_$\\p{javaJavaIdentifierPart}])""").r
}
// Keyword
lazy val AS: Keyword = Keyword("as")
lazy val CAST: Keyword = Keyword("cast")
lazy val ASC: Keyword = Keyword("asc")
lazy val DESC: Keyword = Keyword("desc")
lazy val NULL: Keyword = Keyword("Null")
lazy val NULL_OF: Keyword = Keyword("nullOf")
lazy val IF: Keyword = Keyword("?")
lazy val TO_DATE: Keyword = Keyword("toDate")
lazy val TO_TIME: Keyword = Keyword("toTime")
lazy val TO_TIMESTAMP: Keyword = Keyword("toTimestamp")
lazy val TRIM: Keyword = Keyword("trim")
lazy val EXTRACT: Keyword = Keyword("extract")
lazy val TIMESTAMP_DIFF: Keyword = Keyword("timestampDiff")
lazy val FLOOR: Keyword = Keyword("floor")
lazy val CEIL: Keyword = Keyword("ceil")
lazy val LOG: Keyword = Keyword("log")
lazy val YEARS: Keyword = Keyword("years")
lazy val YEAR: Keyword = Keyword("year")
lazy val QUARTERS: Keyword = Keyword("quarters")
lazy val QUARTER: Keyword = Keyword("quarter")
lazy val MONTHS: Keyword = Keyword("months")
lazy val MONTH: Keyword = Keyword("month")
lazy val WEEKS: Keyword = Keyword("weeks")
lazy val WEEK: Keyword = Keyword("week")
lazy val DAYS: Keyword = Keyword("days")
lazy val DAY: Keyword = Keyword("day")
lazy val HOURS: Keyword = Keyword("hours")
lazy val HOUR: Keyword = Keyword("hour")
lazy val MINUTES: Keyword = Keyword("minutes")
lazy val MINUTE: Keyword = Keyword("minute")
lazy val SECONDS: Keyword = Keyword("seconds")
lazy val SECOND: Keyword = Keyword("second")
lazy val MILLIS: Keyword = Keyword("millis")
lazy val MILLI: Keyword = Keyword("milli")
lazy val ROWS: Keyword = Keyword("rows")
lazy val STAR: Keyword = Keyword("*")
lazy val GET: Keyword = Keyword("get")
lazy val FLATTEN: Keyword = Keyword("flatten")
lazy val OVER: Keyword = Keyword("over")
lazy val DISTINCT: Keyword = Keyword("distinct")
lazy val CURRENT_ROW: Keyword = Keyword("current_row")
lazy val CURRENT_RANGE: Keyword = Keyword("current_range")
lazy val UNBOUNDED_ROW: Keyword = Keyword("unbounded_row")
lazy val UNBOUNDED_RANGE: Keyword = Keyword("unbounded_range")
lazy val ROWTIME: Keyword = Keyword("rowtime")
lazy val PROCTIME: Keyword = Keyword("proctime")
lazy val TRUE: Keyword = Keyword("true")
lazy val FALSE: Keyword = Keyword("false")
lazy val PRIMITIVE_ARRAY: Keyword = Keyword("PRIMITIVE_ARRAY")
lazy val OBJECT_ARRAY: Keyword = Keyword("OBJECT_ARRAY")
lazy val MAP: Keyword = Keyword("MAP")
lazy val BYTE: Keyword = Keyword("BYTE")
lazy val SHORT: Keyword = Keyword("SHORT")
lazy val INTERVAL_MONTHS: Keyword = Keyword("INTERVAL_MONTHS")
lazy val INTERVAL_MILLIS: Keyword = Keyword("INTERVAL_MILLIS")
lazy val INT: Keyword = Keyword("INT")
lazy val LONG: Keyword = Keyword("LONG")
lazy val FLOAT: Keyword = Keyword("FLOAT")
lazy val DOUBLE: Keyword = Keyword("DOUBLE")
lazy val BOOLEAN: Keyword = Keyword("BOOLEAN")
lazy val STRING: Keyword = Keyword("STRING")
lazy val SQL_DATE: Keyword = Keyword("SQL_DATE")
lazy val SQL_TIMESTAMP: Keyword = Keyword("SQL_TIMESTAMP")
lazy val SQL_TIME: Keyword = Keyword("SQL_TIME")
lazy val DECIMAL: Keyword = Keyword("DECIMAL")
lazy val TRIM_MODE_LEADING: Keyword = Keyword("LEADING")
lazy val TRIM_MODE_TRAILING: Keyword = Keyword("TRAILING")
lazy val TRIM_MODE_BOTH: Keyword = Keyword("BOTH")
lazy val TO: Keyword = Keyword("TO")
def functionIdent: PlannerExpressionParserImpl.Parser[String] = super.ident
// symbols
lazy val timeIntervalUnit: PackratParser[Expression] = TimeIntervalUnit.values map {
unit: TimeIntervalUnit => literal(unit.toString) ^^^ valueLiteral(unit)
} reduceLeft(_ | _)
lazy val timePointUnit: PackratParser[Expression] = TimePointUnit.values map {
unit: TimePointUnit => literal(unit.toString) ^^^ valueLiteral(unit)
} reduceLeft(_ | _)
lazy val currentRange: PackratParser[Expression] = CURRENT_RANGE ^^ {
_ => unresolvedCall(BuiltInFunctionDefinitions.CURRENT_RANGE)
}
lazy val currentRow: PackratParser[Expression] = CURRENT_ROW ^^ {
_ => unresolvedCall(BuiltInFunctionDefinitions.CURRENT_ROW)
}
lazy val unboundedRange: PackratParser[Expression] = UNBOUNDED_RANGE ^^ {
_ => unresolvedCall(BuiltInFunctionDefinitions.UNBOUNDED_RANGE)
}
lazy val unboundedRow: PackratParser[Expression] = UNBOUNDED_ROW ^^ {
_ => unresolvedCall(BuiltInFunctionDefinitions.UNBOUNDED_ROW)
}
lazy val overConstant: PackratParser[Expression] =
currentRange | currentRow | unboundedRange | unboundedRow
lazy val trimMode: PackratParser[String] =
TRIM_MODE_LEADING | TRIM_MODE_TRAILING | TRIM_MODE_BOTH
// data types
lazy val dataType: PackratParser[TypeInformation[_]] =
PRIMITIVE_ARRAY ~ "(" ~> dataType <~ ")" ^^ { ct => Types.PRIMITIVE_ARRAY(ct) } |
OBJECT_ARRAY ~ "(" ~> dataType <~ ")" ^^ { ct => Types.OBJECT_ARRAY(ct) } |
MAP ~ "(" ~> dataType ~ "," ~ dataType <~ ")" ^^ { mt => Types.MAP(mt._1._1, mt._2)} |
BYTE ^^ { e => Types.BYTE } |
SHORT ^^ { e => Types.SHORT } |
INTERVAL_MONTHS ^^ { e => Types.INTERVAL_MONTHS } |
INTERVAL_MILLIS ^^ { e => Types.INTERVAL_MILLIS } |
INT ^^ { e => Types.INT } |
LONG ^^ { e => Types.LONG } |
FLOAT ^^ { e => Types.FLOAT } |
DOUBLE ^^ { e => Types.DOUBLE } |
BOOLEAN ^^ { { e => Types.BOOLEAN } } |
STRING ^^ { e => Types.STRING } |
SQL_DATE ^^ { e => Types.SQL_DATE } |
SQL_TIMESTAMP ^^ { e => Types.SQL_TIMESTAMP } |
SQL_TIME ^^ { e => Types.SQL_TIME } |
DECIMAL ^^ { e => Types.DECIMAL }
// literals
// same as floatingPointNumber but we do not allow trailing dot "12.d" or "2."
lazy val floatingPointNumberFlink: Parser[String] =
"""-?(\\d+(\\.\\d+)?|\\d*\\.\\d+)([eE][+-]?\\d+)?[fFdD]?""".r
lazy val numberLiteral: PackratParser[Expression] =
(wholeNumber <~ ("l" | "L")) ^^ { n => valueLiteral(n.toLong) } |
(decimalNumber <~ ("p" | "P")) ^^ { n => valueLiteral(new JBigDecimal(n)) } |
(floatingPointNumberFlink | decimalNumber) ^^ {
n =>
if (n.matches("""-?\\d+""")) {
valueLiteral(n.toInt)
} else if (n.endsWith("f") || n.endsWith("F")) {
valueLiteral(n.toFloat)
} else {
valueLiteral(n.toDouble)
}
}
// string with single quotes such as 'It''s me.'
lazy val singleQuoteStringLiteral: Parser[Expression] = "'(?:''|[^'])*'".r ^^ {
str =>
val escaped = str.substring(1, str.length - 1).replace("''", "'")
valueLiteral(escaped)
}
// string with double quotes such as "I ""like"" dogs."
lazy val doubleQuoteStringLiteral: PackratParser[Expression] = "\\"(?:\\"\\"|[^\\"])*\\"".r ^^ {
str =>
val escaped = str.substring(1, str.length - 1).replace("\\"\\"", "\\"")
valueLiteral(escaped)
}
lazy val boolLiteral: PackratParser[Expression] = (TRUE | FALSE) ^^ {
str => valueLiteral(str.toBoolean)
}
lazy val nullLiteral: PackratParser[Expression] = (NULL | NULL_OF) ~ "(" ~> dataType <~ ")" ^^ {
dt => valueLiteral(null, fromLegacyInfoToDataType(dt).nullable())
}
lazy val literalExpr: PackratParser[Expression] =
numberLiteral | doubleQuoteStringLiteral | singleQuoteStringLiteral | boolLiteral
lazy val fieldReference: PackratParser[UnresolvedReferenceExpression] = (STAR | ident) ^^ {
sym => unresolvedRef(sym)
}
lazy val atom: PackratParser[Expression] =
( "(" ~> expression <~ ")" ) | (fieldReference ||| literalExpr)
lazy val over: PackratParser[Expression] = composite ~ OVER ~ fieldReference ^^ {
case agg ~ _ ~ windowRef =>
unresolvedCall(BuiltInFunctionDefinitions.OVER, agg, windowRef)
}
// suffix operators
lazy val suffixAsc : PackratParser[Expression] = composite <~ "." ~ ASC ~ opt("()") ^^ { e =>
unresolvedCall(BuiltInFunctionDefinitions.ORDER_ASC, e)
}
lazy val suffixDesc : PackratParser[Expression] = composite <~ "." ~ DESC ~ opt("()") ^^ { e =>
unresolvedCall(BuiltInFunctionDefinitions.ORDER_DESC, e)
}
lazy val suffixCast: PackratParser[Expression] =
composite ~ "." ~ CAST ~ "(" ~ dataType ~ ")" ^^ {
case e ~ _ ~ _ ~ _ ~ dt ~ _ =>
unresolvedCall(
BuiltInFunctionDefinitions.CAST,
e,
typeLiteral(fromLegacyInfoToDataType(dt)))
}
lazy val suffixTrim: PackratParser[Expression] =
composite ~ "." ~ TRIM ~ "(" ~ trimMode ~
"," ~ expression ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ mode ~ _ ~ trimCharacter ~ _ =>
unresolvedCall(
BuiltInFunctionDefinitions.TRIM,
valueLiteral(mode == TRIM_MODE_LEADING.key || mode == TRIM_MODE_BOTH.key),
valueLiteral(mode == TRIM_MODE_TRAILING.key || mode == TRIM_MODE_BOTH.key),
trimCharacter,
operand)
}
lazy val suffixTrimWithoutArgs: PackratParser[Expression] =
composite <~ "." ~ TRIM ~ opt("()") ^^ {
e =>
unresolvedCall(
BuiltInFunctionDefinitions.TRIM,
valueLiteral(true),
valueLiteral(true),
valueLiteral(" "),
e)
}
lazy val suffixIf: PackratParser[Expression] =
composite ~ "." ~ IF ~ "(" ~ expression ~ "," ~ expression ~ ")" ^^ {
case condition ~ _ ~ _ ~ _ ~ ifTrue ~ _ ~ ifFalse ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.IF, condition, ifTrue, ifFalse)
}
lazy val suffixExtract: PackratParser[Expression] =
composite ~ "." ~ EXTRACT ~ "(" ~ timeIntervalUnit ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ unit ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.EXTRACT, unit, operand)
}
lazy val suffixFloor: PackratParser[Expression] =
composite ~ "." ~ FLOOR ~ "(" ~ timeIntervalUnit ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ unit ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.FLOOR, operand, unit)
}
lazy val suffixCeil: PackratParser[Expression] =
composite ~ "." ~ CEIL ~ "(" ~ timeIntervalUnit ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ unit ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.CEIL, operand, unit)
}
// required because op.log(base) changes order of a parameters
lazy val suffixLog: PackratParser[Expression] =
composite ~ "." ~ LOG ~ "(" ~ expression ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ base ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.LOG, base, operand)
}
lazy val suffixFunctionCall: PackratParser[Expression] =
composite ~ "." ~ functionIdent ~ "(" ~ repsep(expression, ",") ~ ")" ^^ {
case operand ~ _ ~ name ~ _ ~ args ~ _ =>
lookupCall(name, operand :: args: _*)
}
lazy val suffixFunctionCallOneArg: PackratParser[Expression] =
composite ~ "." ~ functionIdent ^^ {
case operand ~ _ ~ name =>
lookupCall(name, operand)
}
lazy val suffixToDate: PackratParser[Expression] =
composite <~ "." ~ TO_DATE ~ opt("()") ^^ { e =>
unresolvedCall(
BuiltInFunctionDefinitions.CAST,
e,
typeLiteral(fromLegacyInfoToDataType(SqlTimeTypeInfo.DATE)))
}
lazy val suffixToTimestamp: PackratParser[Expression] =
composite <~ "." ~ TO_TIMESTAMP ~ opt("()") ^^ { e =>
unresolvedCall(
BuiltInFunctionDefinitions.CAST,
e,
typeLiteral(fromLegacyInfoToDataType(SqlTimeTypeInfo.TIMESTAMP)))
}
lazy val suffixToTime: PackratParser[Expression] =
composite <~ "." ~ TO_TIME ~ opt("()") ^^ { e =>
unresolvedCall(
BuiltInFunctionDefinitions.CAST,
e,
typeLiteral(fromLegacyInfoToDataType(SqlTimeTypeInfo.TIME)))
}
lazy val suffixTimeInterval : PackratParser[Expression] =
composite ~ "." ~ (YEARS | QUARTERS | MONTHS | WEEKS | DAYS | HOURS | MINUTES |
SECONDS | MILLIS | YEAR | QUARTER | MONTH | WEEK | DAY | HOUR | MINUTE | SECOND | MILLI) ^^ {
case expr ~ _ ~ (YEARS.key | YEAR.key) => toMonthInterval(expr, 12)
case expr ~ _ ~ (QUARTERS.key | QUARTER.key) => toMonthInterval(expr, 3)
case expr ~ _ ~ (MONTHS.key | MONTH.key) => toMonthInterval(expr, 1)
case expr ~ _ ~ (WEEKS.key | WEEK.key) => toMilliInterval(expr, 7 * MILLIS_PER_DAY)
case expr ~ _ ~ (DAYS.key | DAY.key) => toMilliInterval(expr, MILLIS_PER_DAY)
case expr ~ _ ~ (HOURS.key | HOUR.key) => toMilliInterval(expr, MILLIS_PER_HOUR)
case expr ~ _ ~ (MINUTES.key | MINUTE.key) => toMilliInterval(expr, MILLIS_PER_MINUTE)
case expr ~ _ ~ (SECONDS.key | SECOND.key) => toMilliInterval(expr, MILLIS_PER_SECOND)
case expr ~ _ ~ (MILLIS.key | MILLI.key)=> toMilliInterval(expr, 1)
}
lazy val suffixRowInterval : PackratParser[Expression] =
composite <~ "." ~ ROWS ^^ { e => toRowInterval(e) }
lazy val suffixGet: PackratParser[Expression] =
composite ~ "." ~ GET ~ "(" ~ literalExpr ~ ")" ^^ {
case e ~ _ ~ _ ~ _ ~ index ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.GET, e, index)
}
lazy val suffixFlattening: PackratParser[Expression] =
composite <~ "." ~ FLATTEN ~ opt("()") ^^ { e =>
unresolvedCall(BuiltInFunctionDefinitions.FLATTEN, e)
}
lazy val suffixDistinct: PackratParser[Expression] =
composite <~ "." ~ DISTINCT ~ opt("()") ^^ { e =>
unresolvedCall(BuiltInFunctionDefinitions.DISTINCT, e)
}
lazy val suffixAs: PackratParser[Expression] =
composite ~ "." ~ AS ~ "(" ~ rep1sep(fieldReference, ",") ~ ")" ^^ {
case e ~ _ ~ _ ~ _ ~ names ~ _ =>
unresolvedCall(
BuiltInFunctionDefinitions.AS,
e :: names.map(n => valueLiteral(n.getName)): _*)
}
lazy val suffixed: PackratParser[Expression] =
// expressions that need to be resolved early
suffixFlattening |
// expressions that need special expression conversion
suffixAs | suffixTimeInterval | suffixRowInterval | suffixToTimestamp | suffixToTime |
suffixToDate |
// expression for log
suffixLog |
// expression for ordering
suffixAsc | suffixDesc |
// expressions that take enumerations
suffixCast | suffixTrim | suffixTrimWithoutArgs | suffixExtract | suffixFloor | suffixCeil |
// expressions that take literals
suffixGet |
// expression with special identifier
suffixIf |
// expression with distinct suffix modifier
suffixDistinct |
// function call must always be at the end
suffixFunctionCall | suffixFunctionCallOneArg |
// rowtime or proctime
timeIndicator
// prefix operators
lazy val prefixCast: PackratParser[Expression] =
CAST ~ "(" ~ expression ~ "," ~ dataType ~ ")" ^^ {
case _ ~ _ ~ e ~ _ ~ dt ~ _ =>
unresolvedCall(
BuiltInFunctionDefinitions.CAST,
e,
typeLiteral(fromLegacyInfoToDataType(dt)))
}
lazy val prefixIf: PackratParser[Expression] =
IF ~ "(" ~ expression ~ "," ~ expression ~ "," ~ expression ~ ")" ^^ {
case _ ~ _ ~ condition ~ _ ~ ifTrue ~ _ ~ ifFalse ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.IF, condition, ifTrue, ifFalse)
}
lazy val prefixFunctionCall: PackratParser[Expression] =
functionIdent ~ "(" ~ repsep(expression, ",") ~ ")" ^^ {
case name ~ _ ~ args ~ _ =>
lookupCall(name, args: _*)
}
lazy val prefixFunctionCallOneArg: PackratParser[Expression] =
functionIdent ~ "(" ~ expression ~ ")" ^^ {
case name ~ _ ~ arg ~ _ =>
lookupCall(name, arg)
}
lazy val prefixTrim: PackratParser[Expression] =
TRIM ~ "(" ~ trimMode ~ "," ~ expression ~ "," ~ expression ~ ")" ^^ {
case _ ~ _ ~ mode ~ _ ~ trimCharacter ~ _ ~ operand ~ _ =>
unresolvedCall(
BuiltInFunctionDefinitions.TRIM,
valueLiteral(mode == TRIM_MODE_LEADING.key || mode == TRIM_MODE_BOTH.key),
valueLiteral(mode == TRIM_MODE_TRAILING.key || mode == TRIM_MODE_BOTH.key),
trimCharacter,
operand)
}
lazy val prefixTrimWithoutArgs: PackratParser[Expression] =
TRIM ~ "(" ~ expression ~ ")" ^^ {
case _ ~ _ ~ operand ~ _ =>
unresolvedCall(
BuiltInFunctionDefinitions.TRIM,
valueLiteral(true),
valueLiteral(true),
valueLiteral(" "),
operand)
}
lazy val prefixExtract: PackratParser[Expression] =
EXTRACT ~ "(" ~ expression ~ "," ~ timeIntervalUnit ~ ")" ^^ {
case _ ~ _ ~ operand ~ _ ~ unit ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.EXTRACT, unit, operand)
}
lazy val prefixTimestampDiff: PackratParser[Expression] =
TIMESTAMP_DIFF ~ "(" ~ timePointUnit ~ "," ~ expression ~ "," ~ expression ~ ")" ^^ {
case _ ~ _ ~ unit ~ _ ~ operand1 ~ _ ~ operand2 ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.TIMESTAMP_DIFF, unit, operand1, operand2)
}
lazy val prefixFloor: PackratParser[Expression] =
FLOOR ~ "(" ~ expression ~ "," ~ timeIntervalUnit ~ ")" ^^ {
case _ ~ _ ~ operand ~ _ ~ unit ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.FLOOR, operand, unit)
}
lazy val prefixCeil: PackratParser[Expression] =
CEIL ~ "(" ~ expression ~ "," ~ timeIntervalUnit ~ ")" ^^ {
case _ ~ _ ~ operand ~ _ ~ unit ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.CEIL, operand, unit)
}
lazy val prefixGet: PackratParser[Expression] =
GET ~ "(" ~ composite ~ "," ~ literalExpr ~ ")" ^^ {
case _ ~ _ ~ e ~ _ ~ index ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.GET, e, index)
}
lazy val prefixFlattening: PackratParser[Expression] =
FLATTEN ~ "(" ~> composite <~ ")" ^^ { e =>
unresolvedCall(BuiltInFunctionDefinitions.FLATTEN, e)
}
lazy val prefixToDate: PackratParser[Expression] =
TO_DATE ~ "(" ~> expression <~ ")" ^^ { e =>
unresolvedCall(
BuiltInFunctionDefinitions.CAST,
e,
typeLiteral(fromLegacyInfoToDataType(SqlTimeTypeInfo.DATE)))
}
lazy val prefixToTimestamp: PackratParser[Expression] =
TO_TIMESTAMP ~ "(" ~> expression <~ ")" ^^ { e =>
unresolvedCall(
BuiltInFunctionDefinitions.CAST,
e,
typeLiteral(fromLegacyInfoToDataType(SqlTimeTypeInfo.TIMESTAMP)))
}
lazy val prefixToTime: PackratParser[Expression] =
TO_TIME ~ "(" ~> expression <~ ")" ^^ { e =>
unresolvedCall(
BuiltInFunctionDefinitions.CAST,
e,
typeLiteral(fromLegacyInfoToDataType(SqlTimeTypeInfo.TIME)))
}
lazy val prefixDistinct: PackratParser[Expression] =
functionIdent ~ "." ~ DISTINCT ~ "(" ~ repsep(expression, ",") ~ ")" ^^ {
case name ~ _ ~ _ ~ _ ~ args ~ _ =>
unresolvedCall(BuiltInFunctionDefinitions.DISTINCT, lookupCall(name, args: _*))
}
lazy val prefixAs: PackratParser[Expression] =
AS ~ "(" ~ expression ~ "," ~ rep1sep(fieldReference, ",") ~ ")" ^^ {
case _ ~ _ ~ e ~ _ ~ names ~ _ =>
unresolvedCall(
BuiltInFunctionDefinitions.AS,
e :: names.map(n => valueLiteral(n.getName)): _*)
}
lazy val prefixed: PackratParser[Expression] =
// expressions that need to be resolved early
prefixFlattening |
// expressions that need special expression conversion
prefixAs| prefixToTimestamp | prefixToTime | prefixToDate |
// expressions that take enumerations
prefixCast | prefixTrim | prefixTrimWithoutArgs | prefixExtract | prefixFloor | prefixCeil |
prefixTimestampDiff |
// expressions that take literals
prefixGet |
// expression with special identifier
prefixIf |
// expression with prefix distinct
prefixDistinct |
// function call must always be at the end
prefixFunctionCall | prefixFunctionCallOneArg
// suffix/prefix composite
lazy val composite: PackratParser[Expression] =
over | suffixed | nullLiteral | prefixed | atom |
failure("Composite expression expected.")
// unary ops
lazy val unaryNot: PackratParser[Expression] = "!" ~> composite ^^ { e =>
unresolvedCall(BuiltInFunctionDefinitions.NOT, e)
}
lazy val unaryMinus: PackratParser[Expression] = "-" ~> composite ^^ { e =>
unresolvedCall(BuiltInFunctionDefinitions.MINUS_PREFIX, e)
}
lazy val unaryPlus: PackratParser[Expression] = "+" ~> composite ^^ { e => e }
lazy val unary: PackratParser[Expression] = composite | unaryNot | unaryMinus | unaryPlus |
failure("Unary expression expected.")
// arithmetic
lazy val product: PackratParser[Expression] = unary * (
"*" ^^^ {
(a:Expression, b:Expression) => unresolvedCall(BuiltInFunctionDefinitions.TIMES, a, b)
} | "/" ^^^ {
(a:Expression, b:Expression) => unresolvedCall(BuiltInFunctionDefinitions.DIVIDE, a, b)
} | "%" ^^^ {
(a:Expression, b:Expression) => unresolvedCall(BuiltInFunctionDefinitions.MOD, a, b)
}) | failure("Product expected.")
lazy val term: PackratParser[Expression] = product * (
"+" ^^^ {
(a:Expression, b:Expression) => unresolvedCall(BuiltInFunctionDefinitions.PLUS, a, b)
} | "-" ^^^ {
(a:Expression, b:Expression) => unresolvedCall(BuiltInFunctionDefinitions.MINUS, a, b)
}) | failure("Term expected.")
// comparison
lazy val equalTo: PackratParser[Expression] = term ~ ("===" | "==" | "=") ~ term ^^ {
case l ~ _ ~ r => unresolvedCall(BuiltInFunctionDefinitions.EQUALS, l, r)
}
lazy val notEqualTo: PackratParser[Expression] = term ~ ("!==" | "!=" | "<>") ~ term ^^ {
case l ~ _ ~ r => unresolvedCall(BuiltInFunctionDefinitions.NOT_EQUALS, l, r)
}
lazy val greaterThan: PackratParser[Expression] = term ~ ">" ~ term ^^ {
case l ~ _ ~ r => unresolvedCall(BuiltInFunctionDefinitions.GREATER_THAN, l, r)
}
lazy val greaterThanOrEqual: PackratParser[Expression] = term ~ ">=" ~ term ^^ {
case l ~ _ ~ r => unresolvedCall(BuiltInFunctionDefinitions.GREATER_THAN_OR_EQUAL, l, r)
}
lazy val lessThan: PackratParser[Expression] = term ~ "<" ~ term ^^ {
case l ~ _ ~ r => unresolvedCall(BuiltInFunctionDefinitions.LESS_THAN, l, r)
}
lazy val lessThanOrEqual: PackratParser[Expression] = term ~ "<=" ~ term ^^ {
case l ~ _ ~ r => unresolvedCall(BuiltInFunctionDefinitions.LESS_THAN_OR_EQUAL, l, r)
}
lazy val comparison: PackratParser[Expression] =
equalTo | notEqualTo |
greaterThan | greaterThanOrEqual |
lessThan | lessThanOrEqual | term |
failure("Comparison expected.")
// logic
lazy val logic: PackratParser[Expression] = comparison * (
"&&" ^^^ {
(a:Expression, b:Expression) => unresolvedCall(BuiltInFunctionDefinitions.AND, a, b)
} | "||" ^^^ {
(a:Expression, b:Expression) => unresolvedCall(BuiltInFunctionDefinitions.OR, a, b)
}) | failure("Logic expected.")
// time indicators
lazy val timeIndicator: PackratParser[Expression] = proctime | rowtime
lazy val proctime: PackratParser[Expression] = fieldReference ~ "." ~ PROCTIME ^^ {
case f ~ _ ~ _ => unresolvedCall(BuiltInFunctionDefinitions.PROCTIME, f)
}
lazy val rowtime: PackratParser[Expression] = fieldReference ~ "." ~ ROWTIME ^^ {
case f ~ _ ~ _ => unresolvedCall(BuiltInFunctionDefinitions.ROWTIME, f)
}
// alias
lazy val alias: PackratParser[Expression] = logic ~ AS ~ fieldReference ^^ {
case e ~ _ ~ name =>
unresolvedCall(BuiltInFunctionDefinitions.AS, e, valueLiteral(name.getName))
} | logic ~ AS ~ "(" ~ rep1sep(fieldReference, ",") ~ ")" ^^ {
case e ~ _ ~ _ ~ names ~ _ =>
unresolvedCall(
BuiltInFunctionDefinitions.AS,
e :: names.map(n => valueLiteral(n.getName)): _*)
} | logic
lazy val aliasMapping: PackratParser[Expression] =
fieldReference ~ AS ~ fieldReference ^^ {
case e ~ _ ~ name =>
unresolvedCall(BuiltInFunctionDefinitions.AS, e, valueLiteral(name.getName))
}
// columns
lazy val fieldNameRange: PackratParser[Expression] = fieldReference ~ TO ~ fieldReference ^^ {
case start ~ _ ~ end => unresolvedCall(BuiltInFunctionDefinitions.RANGE_TO, start, end)
}
lazy val fieldIndexRange: PackratParser[Expression] = numberLiteral ~ TO ~ numberLiteral ^^ {
case start ~ _ ~ end => unresolvedCall(BuiltInFunctionDefinitions.RANGE_TO, start, end)
}
lazy val range = fieldNameRange | fieldIndexRange
lazy val expression: PackratParser[Expression] = range | overConstant | alias |
failure("Invalid expression.")
lazy val expressionList: Parser[List[Expression]] = rep1sep(expression, ",")
def parseExpressionList(expression: String): JList[Expression] = {
parseAll(expressionList, expression) match {
case Success(lst, _) => lst
case NoSuccess(msg, next) =>
throwError(msg, next)
}
}
def parseExpression(exprString: String): Expression = {
parseAll(expression, exprString) match {
case Success(lst, _) => lst
case NoSuccess(msg, next) =>
throwError(msg, next)
}
}
private def throwError(msg: String, next: Input): Nothing = {
val improvedMsg = msg.replace("string matching regex `\\\\z'", "End of expression")
throw new ExpressionParserException(
s"""Could not parse expression at column ${next.pos.column}: $improvedMsg
|${next.pos.longString}""".stripMargin)
}
}
| tzulitai/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/expressions/PlannerExpressionParserImpl.scala | Scala | apache-2.0 | 28,093 |
/*
* Copyright (C) 2010 Lalit Pant <pant.lalit@gmail.com>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo
package turtle
import org.junit.After
import org.junit.Before
import org.junit.Test
import org.junit.Assert._
import org.scalacheck.{Test => SCTest}
import org.scalacheck.Prop.forAll
import edu.umd.cs.piccolo.PCanvas
import net.kogics.kojo.util.Utils._
import core.Style
import java.awt.Color
class TurtleUndoTest extends KojoTestBase {
val turtle = new Turtle(SpriteCanvas.instance, "/images/turtle32.png")
@Before
def setUp: Unit = {
turtle.init()
turtle.setAnimationDelay(0)
}
@After
def tearDown: Unit = {
}
@Test
def testForwardUndo1 {
val s0 = turtle.state
turtle.forward(100)
turtle.undo()
val s1 = turtle.state
assertEquals(s0, s1)
}
@Test
def testManyForwardsUndo {
val propForwardUndo = forAll { stepSize: Int =>
val s0 = turtle.state
turtle.forward(stepSize)
turtle.undo()
val s1 = turtle.state
s0 == s1
}
assertTrue(SCTest.check(propForwardUndo).passed)
}
@Test
def testTurnUndo1 {
val s0 = turtle.state
turtle.turn(40)
turtle.undo()
val s1 = turtle.state
assertEquals(s0, s1)
}
@Test
def testManyTurnsUndo {
val propTurn = forAll { turnSize: Int =>
val s0 = turtle.state
turtle.turn(turnSize)
turtle.undo()
val s1 = turtle.state
s0 == s1
}
assertTrue(SCTest.check(propTurn).passed)
}
@Test
def testColorfulCircleUndo {
val random = new java.util.Random
def randomColor = new java.awt.Color(random.nextInt(255), random.nextInt(255), random.nextInt(255))
var states: List[SpriteState] = Nil
states = turtle.state :: states
for (idx <- 1 to 360) {
turtle.setPenColor(randomColor)
states = turtle.state :: states
turtle.setPenThickness(idx)
states = turtle.state :: states
turtle.setFillColor(randomColor)
states = turtle.state :: states
turtle.forward(1)
states = turtle.state :: states
turtle.turn(1)
states = turtle.state :: states
}
states = states.tail
for (idx <- 1 to 360*5) {
turtle.undo()
assertEquals(states.head, turtle.state)
states = states.tail
}
}
@Test
def testMoveToUndo1 {
val s0 = turtle.state
turtle.moveTo(100, 100)
turtle.undo()
val s1 = turtle.state
assertEquals(s0, s1)
}
@Test
def testManyMoveToUndo {
val propForwardUndo = forAll { stepSize: Int =>
val s0 = turtle.state
turtle.moveTo(100, 100)
turtle.undo()
val s1 = turtle.state
s0 == s1
}
assertTrue(SCTest.check(propForwardUndo).passed)
}
@Test
def testStyleRestoreUndo1 {
// style 1
turtle.setPenThickness(1)
turtle.setPenColor(Color.blue)
turtle.setFillColor(Color.green)
turtle.saveStyle()
// style 2
turtle.setPenThickness(3)
turtle.setPenColor(Color.green)
turtle.setFillColor(Color.blue)
assertEquals(Style(Color.green, 3, Color.blue), turtle.style)
// change to style 1
turtle.restoreStyle()
assertEquals(Style(Color.blue, 1, Color.green), turtle.style)
// undo style 1 change. Back to style 2
turtle.undo()
assertEquals(Style(Color.green, 3, Color.blue), turtle.style)
// undo 3 steps of setting style 2
turtle.undo()
turtle.undo()
turtle.undo()
// back to style 1
assertEquals(Style(Color.blue, 1, Color.green), turtle.style)
}
@Test
def testStyleRestoreUndo2 {
turtle.setPenThickness(5)
turtle.saveStyle()
turtle.setPenThickness(10)
assertEquals(10, turtle.style.penThickness, 0.001)
turtle.restoreStyle()
assertEquals(5, turtle.style.penThickness, 0.001)
// undo restore
turtle.undo()
assertEquals(10, turtle.style.penThickness, 0.001)
// make sure re-restore works
turtle.restoreStyle()
assertEquals(5, turtle.style.penThickness, 0.001)
// undo restore back to 10
turtle.undo()
// undo setting to 10
turtle.undo()
assertEquals(5, turtle.style.penThickness, 0.001)
}
@Test
def testStyleRestoreUndo3 {
// style 1
turtle.setPenThickness(1)
turtle.setPenColor(Color.blue)
turtle.setFillColor(Color.green)
turtle.saveStyle()
// style 2
turtle.setPenThickness(3)
turtle.setPenColor(Color.green)
turtle.setFillColor(Color.blue)
assertEquals(Style(Color.green, 3, Color.blue), turtle.style)
assertEquals(3, turtle.penPaths.last.strokeThickness, 0.001)
// change to style 1
turtle.restoreStyle()
assertEquals(Style(Color.blue, 1, Color.green), turtle.style)
assertEquals(1, turtle.penPaths.last.strokeThickness, 0.001)
// undo style 1 change. Back to style 2
turtle.undo()
assertEquals(Style(Color.green, 3, Color.blue), turtle.style)
assertEquals(3, turtle.penPaths.last.strokeThickness, 0.001)
}
}
| richardfontana/fontana2007-t | KojoEnv/test/unit/src/net/kogics/kojo/turtle/TurtleUndoTest.scala | Scala | gpl-3.0 | 5,434 |
package models.annotation
import com.scalableminds.util.accesscontext.DBAccessContext
import com.scalableminds.util.tools.{Fox, FoxImplicits}
import com.scalableminds.webknossos.datastore.rpc.RPC
import com.scalableminds.webknossos.schema.Tables._
import com.typesafe.scalalogging.LazyLogging
import javax.inject.Inject
import models.binary.DataSet
import play.api.i18n.{Messages, MessagesProvider}
import play.api.libs.json.{JsObject, Json}
import play.api.mvc.{Result, Results}
import slick.jdbc.PostgresProfile.api._
import slick.lifted.Rep
import utils.{SQLClient, SQLDAO}
import scala.concurrent.{ExecutionContext, Future}
case class TracingStore(
name: String,
url: String,
publicUrl: String,
key: String,
isDeleted: Boolean = false
)
object TracingStore {
def fromUpdateForm(name: String, url: String, publicUrl: String): TracingStore =
TracingStore(name, url, publicUrl, "")
}
class TracingStoreService @Inject()(tracingStoreDAO: TracingStoreDAO, rpc: RPC)(implicit ec: ExecutionContext)
extends FoxImplicits
with LazyLogging
with Results {
def publicWrites(tracingStore: TracingStore): Fox[JsObject] =
Fox.successful(
Json.obj(
"name" -> tracingStore.name,
"url" -> tracingStore.publicUrl
))
def validateAccess[A](name: String, key: String)(block: TracingStore => Future[Result])(
implicit m: MessagesProvider): Fox[Result] =
tracingStoreDAO
.findOneByKey(key) // Check if key is valid
.flatMap(tracingStore => block(tracingStore)) // Run underlying action
.getOrElse {
logger.info(s"Denying tracing store request from $name due to unknown key.")
Forbidden(Messages("tracingStore.notFound"))
} // Default error
def clientFor(dataSet: DataSet)(implicit ctx: DBAccessContext): Fox[WKRemoteTracingStoreClient] =
for {
tracingStore <- tracingStoreDAO.findFirst ?~> "tracingStore.notFound"
} yield new WKRemoteTracingStoreClient(tracingStore, dataSet, rpc)
}
class TracingStoreDAO @Inject()(sqlClient: SQLClient)(implicit ec: ExecutionContext)
extends SQLDAO[TracingStore, TracingstoresRow, Tracingstores](sqlClient) {
val collection = Tracingstores
def idColumn(x: Tracingstores): Rep[String] = x.name
def isDeletedColumn(x: Tracingstores): Rep[Boolean] = x.isdeleted
def parse(r: TracingstoresRow): Fox[TracingStore] =
Fox.successful(
TracingStore(
r.name,
r.url,
r.publicurl,
r.key,
r.isdeleted
))
def findOneByKey(key: String): Fox[TracingStore] =
for {
rOpt <- run(Tracingstores.filter(r => notdel(r) && r.key === key).result.headOption)
r <- rOpt.toFox
parsed <- parse(r)
} yield {
parsed
}
def findOneByName(name: String): Fox[TracingStore] =
for {
rOpt <- run(Tracingstores.filter(r => notdel(r) && r.name === name).result.headOption)
r <- rOpt.toFox
parsed <- parse(r)
} yield {
parsed
}
def findOneByUrl(url: String)(implicit ctx: DBAccessContext): Fox[TracingStore] =
for {
accessQuery <- readAccessQuery
r <- run(
sql"select #$columns from webknossos.tracingstores_ where url = $url and #$accessQuery".as[TracingstoresRow])
parsed <- parseFirst(r, url)
} yield parsed
def findFirst(implicit ctx: DBAccessContext): Fox[TracingStore] =
for {
all <- findAll
first <- all.headOption.toFox
} yield first
def insertOne(t: TracingStore): Fox[Unit] =
for {
_ <- run(sqlu"""insert into webknossos.tracingStores(name, url, publicUrl, key, isDeleted)
values(${t.name}, ${t.url}, ${t.publicUrl}, ${t.key}, ${t.isDeleted})""")
} yield ()
def deleteOneByName(name: String): Fox[Unit] =
for {
_ <- run(sqlu"""update webknossos.tracingStores set isDeleted = true where name = $name""")
} yield ()
def updateOne(t: TracingStore): Fox[Unit] =
for {
_ <- run(
sqlu""" update webknossos.tracingStores set url = ${t.url}, publicUrl = ${t.publicUrl} where name = ${t.name}""")
} yield ()
}
| scalableminds/webknossos | app/models/annotation/TracingStore.scala | Scala | agpl-3.0 | 4,125 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the DataCounter entity.
*/
class DataCounterGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authentication = Map(
"Content-Type" -> """application/json""",
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"Authorization" -> "${access_token}"
)
val scn = scenario("Test the DataCounter entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authenticate")
.headers(headers_http_authentication)
.body(StringBody("""{"username":"admin", "password":"admin"}""")).asJSON
.check(header.get("Authorization").saveAs("access_token"))).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all dataCounters")
.get("/api/data-counters")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new dataCounter")
.post("/api/data-counters")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "vtype":"SAMPLE_TEXT", "value":"0"}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_dataCounter_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created dataCounter")
.get("${new_dataCounter_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created dataCounter")
.delete("${new_dataCounter_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
| chrislovecnm/gpmr | pet-race-ui/src/test/gatling/simulations/DataCounterGatlingTest.scala | Scala | apache-2.0 | 3,342 |
//: ----------------------------------------------------------------------------
//: Copyright (C) 2015 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package quiver
import scodec._
object GraphCodecs {
def ledge[N: Codec, A: Codec]: Codec[LEdge[N,A]] =
(implicitly[Codec[N]] ::
implicitly[Codec[N]] ::
implicitly[Codec[A]]).as[LEdge[N,A]]
def lnode[N: Codec, A: Codec]: Codec[LNode[N,A]] =
(implicitly[Codec[N]] ::
implicitly[Codec[A]]).as[LNode[N,A]]
// needed because sometimes the codecs are greedy which
// makes the whole graph not decode correctly.
private def indexedSeq[A : Codec]: Codec[IndexedSeq[A]] =
codecs.variableSizeBytes(
codecs.int32,
codecs.vector(implicitly[Codec[A]]).xmap(a => a, _.toVector))
def graph[N: Codec, A: Codec, B: Codec]: Codec[Graph[N,A,B]] =
(indexedSeq(lnode[N,A]) ~ indexedSeq(ledge[N,B])).xmap(
q => safeMkGraph(q._1,q._2),
g => (g.labNodes, g.labEdges)
)
}
| oncue/quiver | codecs/src/main/scala/GraphCodecs.scala | Scala | apache-2.0 | 1,632 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.benchmarks
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations._
/** To do comparative benchmarks between Monix versions:
*
* benchmarks/run-benchmark CoevalShallowBindBenchmark
*
* This will generate results in `benchmarks/results`.
*
* Or to run the benchmark from within SBT:
*
* jmh:run monix.benchmarks.TaskShiftBenchmark
* The above test will take default values as "10 iterations", "10 warm-up iterations",
* "2 forks", "1 thread".
*
* Or to specify custom values use below format:
*
* jmh:run -i 20 -wi 20 -f 4 -t 2 monix.benchmarks.TaskShiftBenchmark
*
* Which means "20 iterations", "20 warm-up iterations", "4 forks", "2 thread".
* Please note that benchmarks should be usually executed at least in
* 10 iterations (as a rule of thumb), but more is better.
*/
@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Measurement(iterations = 10)
@Warmup(iterations = 10)
@Fork(2)
@Threads(1)
class CoevalShallowBindBenchmark {
@Param(Array("10000"))
var size: Int = _
@Benchmark
def now(): Int = {
import monix.eval.Coeval
def loop(i: Int): Coeval[Int] =
if (i < size) Coeval.now(i + 1).flatMap(loop)
else Coeval.now(i)
Coeval.now(0).flatMap(loop).value()
}
@Benchmark
def eval(): Int = {
import monix.eval.Coeval
def loop(i: Int): Coeval[Int] =
if (i < size) Coeval.eval(i + 1).flatMap(loop)
else Coeval.eval(i)
Coeval.eval(0).flatMap(loop).value()
}
}
| alexandru/monifu | benchmarks/shared/src/main/scala/monix/benchmarks/CoevalShallowBindBenchmark.scala | Scala | apache-2.0 | 2,246 |
package lila.chat
import chess.Color
import lila.db.dsl._
import lila.user.{ User, UserRepo }
final class ChatApi(
coll: Coll,
chatTimeout: ChatTimeout,
flood: lila.security.Flood,
shutup: akka.actor.ActorSelection,
modLog: akka.actor.ActorSelection,
lilaBus: lila.common.Bus,
maxLinesPerChat: Int,
netDomain: String) {
import Chat.userChatBSONHandler
object userChat {
def findOption(chatId: ChatId): Fu[Option[UserChat]] =
coll.byId[UserChat](chatId)
def find(chatId: ChatId): Fu[UserChat] =
findOption(chatId) map (_ | Chat.makeUser(chatId))
def findAll(chatIds : List[ChatId]) : Fu[List[UserChat]] =
coll.byIds[UserChat](chatIds)
def findMine(chatId: ChatId, me: User): Fu[UserChat.Mine] = find(chatId) flatMap { chat =>
(!chat.isEmpty ?? chatTimeout.isActive(chatId, me.id)) map {
UserChat.Mine(chat forUser me.some, _)
}
}
def findMine(chatId: ChatId, me: Option[User]): Fu[UserChat.Mine] = me match {
case Some(user) => findMine(chatId, user)
case None => find(chatId) map { UserChat.Mine(_, false) }
}
def write(chatId: ChatId, userId: String, text: String, public: Boolean): Funit =
makeLine(chatId, userId, text) flatMap {
_ ?? { line =>
pushLine(chatId, line) >>- {
shutup ! {
import lila.hub.actorApi.shutup._
if (public) RecordPublicChat(chatId, userId, text)
else RecordPrivateChat(chatId, userId, text)
}
lilaBus.publish(actorApi.ChatLine(chatId, line), channelOf(chatId))
}
}
}
def system(chatId: ChatId, text: String) = {
val line = UserLine(systemUserId, Writer delocalize text, troll = false, deleted = false)
pushLine(chatId, line) >>-
lilaBus.publish(actorApi.ChatLine(chatId, line), channelOf(chatId)) inject line.some
}
def timeout(chatId: ChatId, modId: String, userId: String, reason: ChatTimeout.Reason): Funit =
coll.byId[UserChat](chatId) zip UserRepo.byId(modId) zip UserRepo.byId(userId) flatMap {
case ((Some(chat), Some(mod)), Some(user)) if isMod(mod) => doTimeout(chat, mod, user, reason)
case _ => fuccess(none)
}
def userModInfo(username: String): Fu[Option[UserModInfo]] =
UserRepo named username flatMap {
_ ?? { user =>
chatTimeout.history(user, 20) map { UserModInfo(user, _).some }
}
}
private def doTimeout(c: UserChat, mod: User, user: User, reason: ChatTimeout.Reason): Funit = {
val line = UserLine(
username = systemUserId,
text = s"${user.username} was timed out 10 minutes for ${reason.name}.",
troll = false, deleted = false)
val chat = c.markDeleted(user) add line
coll.update($id(chat.id), chat).void >>
chatTimeout.add(c, mod, user, reason) >>- {
lilaBus.publish(actorApi.OnTimeout(user.username), channelOf(chat.id))
lilaBus.publish(actorApi.ChatLine(chat.id, line), channelOf(chat.id))
modLog ! lila.hub.actorApi.mod.ChatTimeout(
mod = mod.id, user = user.id, reason = reason.key)
}
}
def reinstate(list: List[ChatTimeout.Reinstate]) = list.foreach { r =>
lilaBus.publish(actorApi.OnReinstate(r.user), Symbol(s"chat-${r.chat}"))
}
private def isMod(user: User) = lila.security.Granter(_.ChatTimeout)(user)
private[ChatApi] def makeLine(chatId: String, userId: String, t1: String): Fu[Option[UserLine]] =
UserRepo.byId(userId) zip chatTimeout.isActive(chatId, userId) map {
case (Some(user), false) if !user.disabled => Writer cut t1 flatMap { t2 =>
flood.allowMessage(user.id, t2) option
UserLine(user.username, Writer preprocessUserInput t2, troll = user.troll, deleted = false)
}
case _ => none
}
}
object playerChat {
def findOption(chatId: ChatId): Fu[Option[MixedChat]] =
coll.byId[MixedChat](chatId)
def find(chatId: ChatId): Fu[MixedChat] =
findOption(chatId) map (_ | Chat.makeMixed(chatId))
def findNonEmpty(chatId: ChatId): Fu[Option[MixedChat]] =
findOption(chatId) map (_ filter (_.nonEmpty))
def write(chatId: ChatId, color: Color, text: String): Funit =
makeLine(chatId, color, text) ?? { line =>
pushLine(chatId, line) >>-
lilaBus.publish(actorApi.ChatLine(chatId, line), channelOf(chatId))
}
private def makeLine(chatId: ChatId, color: Color, t1: String): Option[Line] =
Writer cut t1 flatMap { t2 =>
flood.allowMessage(s"$chatId/${color.letter}", t2) option
PlayerLine(color, Writer preprocessUserInput t2)
}
}
private def pushLine(chatId: ChatId, line: Line): Funit = coll.update(
$id(chatId),
$doc("$push" -> $doc(
Chat.BSONFields.lines -> $doc(
"$each" -> List(Line.lineBSONHandler(false).write(line)),
"$slice" -> -maxLinesPerChat)
)),
upsert = true
).void >>- lila.mon.chat.message()
private def channelOf(id: String) = Symbol(s"chat-$id")
private object Writer {
import java.util.regex.Matcher.quoteReplacement
def preprocessUserInput(in: String) = noShouting(delocalize(noPrivateUrl(in)))
def cut(text: String) = Some(text.trim take 140) filter (_.nonEmpty)
val delocalize = new lila.common.String.Delocalizer(netDomain)
val domainRegex = netDomain.replace(".", """\\.""")
val gameUrlRegex = (domainRegex + """\\b/([\\w]{8})[\\w]{4}\\b""").r
def noPrivateUrl(str: String): String =
gameUrlRegex.replaceAllIn(str, m => quoteReplacement(netDomain + "/" + (m group 1)))
}
private object noShouting {
import java.lang.Character.isUpperCase
private val onlyLettersRegex = """[^\\w]""".r
def apply(text: String) = if (text.size < 5) text else {
val onlyLetters = onlyLettersRegex.replaceAllIn(text take 80, "")
if (onlyLetters.count(isUpperCase) > onlyLetters.size / 2)
text.toLowerCase
else text
}
}
}
| clarkerubber/lila | modules/chat/src/main/ChatApi.scala | Scala | agpl-3.0 | 6,074 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.calcite
import org.apache.flink.sql.parser.ExtendedSqlNode
import org.apache.flink.sql.parser.dql.{SqlShowCatalogs, SqlShowDatabases, SqlShowFunctions, SqlShowTables}
import org.apache.flink.table.api.{TableException, ValidationException}
import org.apache.flink.table.catalog.CatalogReader
import org.apache.calcite.plan.RelOptTable.ViewExpander
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelRoot
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rex.RexBuilder
import org.apache.calcite.sql.advise.{SqlAdvisor, SqlAdvisorValidator}
import org.apache.calcite.sql.{SqlKind, SqlNode, SqlOperatorTable}
import org.apache.calcite.sql2rel.{SqlRexConvertletTable, SqlToRelConverter}
import org.apache.calcite.tools.{FrameworkConfig, RelConversionException}
import _root_.java.lang.{Boolean => JBoolean}
import _root_.java.util
import _root_.java.util.function.{Function => JFunction}
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
/**
* NOTE: this is heavily inspired by Calcite's PlannerImpl.
* We need it in order to share the planner between the Table API relational plans
* and the SQL relation plans that are created by the Calcite parser.
* The main difference is that we do not create a new RelOptPlanner in the ready() method.
*/
class FlinkPlannerImpl(
val config: FrameworkConfig,
val catalogReaderSupplier: JFunction[JBoolean, CatalogReader],
planner: RelOptPlanner,
val typeFactory: FlinkTypeFactory)
extends ViewExpander {
val operatorTable: SqlOperatorTable = config.getOperatorTable
val parser: CalciteParser = new CalciteParser(config.getParserConfig)
val convertletTable: SqlRexConvertletTable = config.getConvertletTable
val sqlToRelConverterConfig: SqlToRelConverter.Config = config.getSqlToRelConverterConfig
var validator: FlinkCalciteSqlValidator = _
def getCompletionHints(sql: String, cursor: Int): Array[String] = {
val advisorValidator = new SqlAdvisorValidator(
operatorTable,
catalogReaderSupplier.apply(true), // ignore cases for lenient completion
typeFactory,
config.getParserConfig.conformance())
val advisor = new SqlAdvisor(advisorValidator, config.getParserConfig)
val replaced = Array[String](null)
val hints = advisor.getCompletionHints(sql, cursor, replaced)
.map(item => item.toIdentifier.toString)
hints.toArray
}
/**
* Get the [[FlinkCalciteSqlValidator]] instance from this planner, create a new instance
* if current validator has not been initialized, or returns the validator
* instance directly.
*
* <p>The validator instance creation is not thread safe.
*
* @return a new validator instance or current existed one
*/
def getOrCreateSqlValidator(): FlinkCalciteSqlValidator = {
if (validator == null) {
val catalogReader = catalogReaderSupplier.apply(false)
validator = createSqlValidator(catalogReader)
}
validator
}
private def createSqlValidator(catalogReader: CatalogReader) = {
val validator = new FlinkCalciteSqlValidator(
operatorTable,
catalogReader,
typeFactory)
validator.setIdentifierExpansion(true)
// Disable implicit type coercion for now.
validator.setEnableTypeCoercion(false)
validator
}
def validate(sqlNode: SqlNode): SqlNode = {
val validator = getOrCreateSqlValidator()
validateInternal(sqlNode, validator)
}
private def validateInternal(sqlNode: SqlNode, validator: FlinkCalciteSqlValidator): SqlNode = {
try {
sqlNode.accept(new PreValidateReWriter(
validator.getCatalogReader.unwrap(classOf[CatalogReader]), typeFactory))
// do extended validation.
sqlNode match {
case node: ExtendedSqlNode =>
node.validate()
case _ =>
}
// no need to validate row type for DDL and insert nodes.
if (sqlNode.getKind.belongsTo(SqlKind.DDL)
|| sqlNode.getKind == SqlKind.INSERT
|| sqlNode.getKind == SqlKind.CREATE_FUNCTION
|| sqlNode.getKind == SqlKind.DROP_FUNCTION
|| sqlNode.getKind == SqlKind.OTHER_DDL
|| sqlNode.isInstanceOf[SqlShowCatalogs]
|| sqlNode.isInstanceOf[SqlShowDatabases]
|| sqlNode.isInstanceOf[SqlShowTables]
|| sqlNode.isInstanceOf[SqlShowFunctions]) {
return sqlNode
}
validator.validate(sqlNode)
}
catch {
case e: RuntimeException =>
throw new ValidationException(s"SQL validation failed. ${e.getMessage}", e)
}
}
def rel(validatedSqlNode: SqlNode): RelRoot = {
rel(validatedSqlNode, getOrCreateSqlValidator())
}
private def rel(validatedSqlNode: SqlNode, sqlValidator: FlinkCalciteSqlValidator) = {
try {
assert(validatedSqlNode != null)
val rexBuilder: RexBuilder = createRexBuilder
val cluster: RelOptCluster = FlinkRelOptClusterFactory.create(planner, rexBuilder)
val sqlToRelConverter: SqlToRelConverter = new SqlToRelConverter(
this,
sqlValidator,
sqlValidator.getCatalogReader.unwrap(classOf[CatalogReader]),
cluster,
convertletTable,
sqlToRelConverterConfig)
sqlToRelConverter.convertQuery(validatedSqlNode, false, true)
// we disable automatic flattening in order to let composite types pass without modification
// we might enable it again once Calcite has better support for structured types
// root = root.withRel(sqlToRelConverter.flattenTypes(root.rel, true))
// TableEnvironment.optimize will execute the following
// root = root.withRel(RelDecorrelator.decorrelateQuery(root.rel))
// convert time indicators
// root = root.withRel(RelTimeIndicatorConverter.convert(root.rel, rexBuilder))
} catch {
case e: RelConversionException => throw new TableException(e.getMessage)
}
}
override def expandView(
rowType: RelDataType,
queryString: String,
schemaPath: util.List[String],
viewPath: util.List[String])
: RelRoot = {
val parsed = parser.parse(queryString)
val originalReader = catalogReaderSupplier.apply(false)
val readerWithPathAdjusted = new CatalogReader(
originalReader.getRootSchema,
List(schemaPath, schemaPath.subList(0, 1)).asJava,
originalReader.getTypeFactory,
originalReader.getConfig
)
val validator = createSqlValidator(readerWithPathAdjusted)
val validated = validateInternal(parsed, validator)
rel(validated, validator)
}
private def createRexBuilder: RexBuilder = {
new RexBuilder(typeFactory)
}
}
| bowenli86/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/calcite/FlinkPlannerImpl.scala | Scala | apache-2.0 | 7,477 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.