code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2005-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.actors
package remote
/**
* This object provides methods for creating, registering, and
* selecting remotely accessible actors.
*
* A remote actor is typically created like this:
* {{{
* actor {
* alive(9010)
* register('myName, self)
*
* // behavior
* }
* }}}
* It can be accessed by an actor running on a (possibly)
* different node by selecting it in the following way:
* {{{
* actor {
* // ...
* val c = select(Node("127.0.0.1", 9010), 'myName)
* c ! msg
* // ...
* }
* }}}
*
* @author Philipp Haller
*/
object RemoteActor {
private val kernels = new scala.collection.mutable.HashMap[Actor, NetKernel]
/* If set to <code>null</code> (default), the default class loader
* of <code>java.io.ObjectInputStream</code> is used for deserializing
* objects sent as messages.
*/
private var cl: ClassLoader = null
def classLoader: ClassLoader = cl
def classLoader_=(x: ClassLoader) { cl = x }
/**
* Makes <code>self</code> remotely accessible on TCP port
* <code>port</code>.
*/
def alive(port: Int): Unit = synchronized {
createNetKernelOnPort(port)
}
private def createNetKernelOnPort(port: Int): NetKernel = {
val serv = TcpService(port, cl)
val kern = serv.kernel
val s = Actor.self
kernels += Pair(s, kern)
s.onTerminate {
Debug.info("alive actor "+s+" terminated")
// remove mapping for `s`
kernels -= s
// terminate `kern` when it does
// not appear as value any more
if (!kernels.valuesIterator.contains(kern)) {
Debug.info("terminating "+kern)
// terminate NetKernel
kern.terminate()
}
}
kern
}
@deprecated("this member is going to be removed in a future release")
def createKernelOnPort(port: Int): NetKernel =
createNetKernelOnPort(port)
/**
* Registers <code>a</code> under <code>name</code> on this
* node.
*/
def register(name: Symbol, a: Actor): Unit = synchronized {
val kernel = kernels.get(Actor.self) match {
case None =>
val serv = TcpService(TcpService.generatePort, cl)
kernels += Pair(Actor.self, serv.kernel)
serv.kernel
case Some(k) =>
k
}
kernel.register(name, a)
}
private def selfKernel = kernels.get(Actor.self) match {
case None =>
// establish remotely accessible
// return path (sender)
createNetKernelOnPort(TcpService.generatePort)
case Some(k) =>
k
}
/**
* Returns (a proxy for) the actor registered under
* <code>name</code> on <code>node</code>.
*/
def select(node: Node, sym: Symbol): AbstractActor = synchronized {
selfKernel.getOrCreateProxy(node, sym)
}
private[remote] def someNetKernel: NetKernel =
kernels.valuesIterator.next
@deprecated("this member is going to be removed in a future release")
def someKernel: NetKernel =
someNetKernel
}
/**
* This class represents a machine node on a TCP network.
*
* @param address the host name, or <code>null</code> for the loopback address.
* @param port the port number.
*
* @author Philipp Haller
*/
case class Node(address: String, port: Int)
| cran/rkafkajars | java/scala/actors/remote/RemoteActor.scala | Scala | apache-2.0 | 3,738 |
package com.github.vonnagy.service.container.health
import akka.actor._
import akka.testkit.TestActorRef
import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import org.specs2.mutable.SpecificationLike
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
class RegisteredHealthCheckActorSpec extends AkkaTestkitSpecs2Support with SpecificationLike {
sequential
"Health check registration" should {
"allow for the creation of a registered health check" in {
val r = new TestRegisteredHealthCheck()(system)
Await.result[HealthInfo](r.getHealth, 1 second).state must be equalTo HealthState.OK
Health(system).getChecks.length must be equalTo 1
}
"allow for the creation of a registered health check actor" in {
val ext = Health(system)
val act = TestActorRef(new Actor with RegisteredHealthCheckActor {
def receive = {
case GetHealth => sender ! HealthInfo("test", HealthState.OK, "details")
}
})
Await.result[HealthInfo](act.underlyingActor.getHealth, 1 second).state must be equalTo HealthState.OK
ext.getChecks.length must be equalTo 2
}
}
}
class TestRegisteredHealthCheck(implicit val system: ActorSystem) extends RegisteredHealthCheck {
import system.dispatcher
def getHealth: Future[HealthInfo] = Future {
HealthInfo("test", HealthState.OK, "details")
}
}
| vonnagy/service-container | service-container/src/test/scala/com/github/vonnagy/service/container/health/RegisteredHealthCheckActorSpec.scala | Scala | apache-2.0 | 1,418 |
package demo
package pages
import chandu0101.scalajs.react.components.mixins.AsyncLoad
import demo.components.LeftNavPage
import demo.routes.{LeftRoute, SpinnerRouteModule}
import japgolly.scalajs.react.extra.router2.RouterCtl
import japgolly.scalajs.react.{BackendScope, ReactComponentB}
object SpinnerPage {
class Backend(t: BackendScope[_, _]) extends AsyncLoad {
override val jsResources: Vector[String] = Vector("assets/spinnerPage-bundle.js")
}
val component = ReactComponentB[Props]("SpinnerPage")
.stateless
.backend(new Backend(_))
.render((P,S,B) => {
LeftNavPage(SpinnerRouteModule.menu, P.selectedPage, P.ctrl)
})
.configure(AsyncLoad.mixin)
.build
case class Props(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute])
def apply(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute]) = component(Props(selectedPage, ctrl))
}
| tpdi/scalajs-react-components | demo/src/main/scala/demo/pages/SpinnerPage.scala | Scala | apache-2.0 | 882 |
package com.sammyrulez.ocs.api
import org.scalatra._
import scalate.ScalateSupport
class MyServlet extends ScalatraServlet with ScalateSupport {
get("/") {
<html>
<body>
<h1>Hello, world!</h1>
Say <a href="hello-scalate">hello to Scalate</a>.
</body>
</html>
}
}
| sammyrulez/octo-crypto-samurai | src/main/scala/com/sammyrulez/ocs/api/MyServlet.scala | Scala | mit | 305 |
package org.http4s.ember.server.internal
import fs2._
import fs2.concurrent._
import fs2.io.tcp._
import cats.effect._
import cats.implicits._
import scala.concurrent.duration._
import java.net.InetSocketAddress
import org.http4s._
import _root_.org.http4s.ember.core.{Encoder, Parser}
import _root_.org.http4s.ember.core.Util.readWithTimeout
import _root_.io.chrisdavenport.log4cats.Logger
private[server] object ServerHelpers {
def server[F[_]: Concurrent: ContextShift](
bindAddress: InetSocketAddress,
httpApp: HttpApp[F],
sg: SocketGroup,
// Defaults
onError: Throwable => Response[F] = { _: Throwable =>
Response[F](Status.InternalServerError)
},
onWriteFailure: (Option[Request[F]], Response[F], Throwable) => F[Unit],
terminationSignal: Option[SignallingRef[F, Boolean]] = None,
maxConcurrency: Int = Int.MaxValue,
receiveBufferSize: Int = 256 * 1024,
maxHeaderSize: Int = 10 * 1024,
requestHeaderReceiveTimeout: Duration = 5.seconds,
additionalSocketOptions: List[SocketOptionMapping[_]] = List.empty,
logger: Logger[F]
)(implicit C: Clock[F]): Stream[F, Nothing] = {
// Termination Signal, if not present then does not terminate.
val termSignal: F[SignallingRef[F, Boolean]] =
terminationSignal.fold(SignallingRef[F, Boolean](false))(_.pure[F])
def socketReadRequest(
socket: Socket[F],
requestHeaderReceiveTimeout: Duration,
receiveBufferSize: Int
): F[Request[F]] = {
val (initial, readDuration) = requestHeaderReceiveTimeout match {
case fin: FiniteDuration => (true, fin)
case _ => (false, 0.millis)
}
SignallingRef[F, Boolean](initial).flatMap { timeoutSignal =>
C.realTime(MILLISECONDS)
.flatMap(now =>
Parser.Request
.parser(maxHeaderSize)(
readWithTimeout[F](socket, now, readDuration, timeoutSignal.get, receiveBufferSize)
)(logger)
.flatMap { req =>
// Sync[F].delay(logger.debug(s"Request Processed $req")) *>
timeoutSignal.set(false).as(req)
})
}
}
Stream
.eval(termSignal)
.flatMap(
terminationSignal =>
sg.server[F](bindAddress, additionalSocketOptions = additionalSocketOptions)
.map(connect =>
Stream.eval(
connect.use { socket =>
val app: F[(Request[F], Response[F])] = for {
req <- socketReadRequest(socket, requestHeaderReceiveTimeout, receiveBufferSize)
resp <- httpApp
.run(req)
.handleError(onError)
// .flatTap(resp => Sync[F].delay(logger.debug(s"Response Created $resp")))
} yield (req, resp)
def send(request: Option[Request[F]], resp: Response[F]): F[Unit] =
Stream(resp)
.covary[F]
.flatMap(Encoder.respToBytes[F])
.through(socket.writes())
.compile
.drain
.attempt
.flatMap {
case Left(err) => onWriteFailure(request, resp, err)
case Right(()) => Sync[F].pure(())
}
app.attempt.flatMap {
case Right((request, response)) => send(Some(request), response)
case Left(err) => send(None, onError(err))
}
}
))
.parJoin(maxConcurrency)
.interruptWhen(terminationSignal)
.drain)
}
}
| ChristopherDavenport/http4s | ember-server/src/main/scala/org/http4s/ember/server/internal/ServerHelpers.scala | Scala | apache-2.0 | 3,761 |
/**
* Generated by Scrooge
* version: 4.7.0
* rev: d9d56174937f524a1981b38ebd6280eef7eeda4a
* built at: 20160427-121531
*/
package com.komanov.serialization.domain.thriftscala
import com.twitter.scrooge.{
LazyTProtocol,
TFieldBlob, ThriftException, ThriftStruct, ThriftStructCodec3, ThriftStructFieldInfo,
ThriftStructMetaData, ThriftUtil}
import org.apache.thrift.protocol._
import org.apache.thrift.transport.{TMemoryBuffer, TTransport}
import java.nio.ByteBuffer
import java.util.Arrays
import scala.collection.immutable.{Map => immutable$Map}
import scala.collection.mutable.Builder
import scala.collection.mutable.{
ArrayBuffer => mutable$ArrayBuffer, Buffer => mutable$Buffer,
HashMap => mutable$HashMap, HashSet => mutable$HashSet}
import scala.collection.{Map, Set}
object DomainEntryPointAddedPb extends ThriftStructCodec3[DomainEntryPointAddedPb] {
private val NoPassthroughFields = immutable$Map.empty[Short, TFieldBlob]
val Struct = new TStruct("DomainEntryPointAddedPb")
val DomainField = new TField("domain", TType.STRING, 1)
val DomainFieldManifest = implicitly[Manifest[String]]
/**
* Field information in declaration order.
*/
lazy val fieldInfos: scala.List[ThriftStructFieldInfo] = scala.List[ThriftStructFieldInfo](
new ThriftStructFieldInfo(
DomainField,
true,
false,
DomainFieldManifest,
_root_.scala.None,
_root_.scala.None,
immutable$Map.empty[String, String],
immutable$Map.empty[String, String]
)
)
lazy val structAnnotations: immutable$Map[String, String] =
immutable$Map.empty[String, String]
/**
* Checks that all required fields are non-null.
*/
def validate(_item: DomainEntryPointAddedPb): Unit = {
}
def withoutPassthroughFields(original: DomainEntryPointAddedPb): DomainEntryPointAddedPb =
new Immutable(
domain =
{
val field = original.domain
field.map { field =>
field
}
}
)
override def encode(_item: DomainEntryPointAddedPb, _oproto: TProtocol): Unit = {
_item.write(_oproto)
}
private[this] def lazyDecode(_iprot: LazyTProtocol): DomainEntryPointAddedPb = {
var domainOffset: Int = -1
var _passthroughFields: Builder[(Short, TFieldBlob), immutable$Map[Short, TFieldBlob]] = null
var _done = false
val _start_offset = _iprot.offset
_iprot.readStructBegin()
while (!_done) {
val _field = _iprot.readFieldBegin()
if (_field.`type` == TType.STOP) {
_done = true
} else {
_field.id match {
case 1 =>
_field.`type` match {
case TType.STRING =>
domainOffset = _iprot.offsetSkipString
case _actualType =>
val _expectedType = TType.STRING
throw new TProtocolException(
"Received wrong type for field 'domain' (expected=%s, actual=%s).".format(
ttypeToString(_expectedType),
ttypeToString(_actualType)
)
)
}
case _ =>
if (_passthroughFields == null)
_passthroughFields = immutable$Map.newBuilder[Short, TFieldBlob]
_passthroughFields += (_field.id -> TFieldBlob.read(_field, _iprot))
}
_iprot.readFieldEnd()
}
}
_iprot.readStructEnd()
new LazyImmutable(
_iprot,
_iprot.buffer,
_start_offset,
_iprot.offset,
domainOffset,
if (_passthroughFields == null)
NoPassthroughFields
else
_passthroughFields.result()
)
}
override def decode(_iprot: TProtocol): DomainEntryPointAddedPb =
_iprot match {
case i: LazyTProtocol => lazyDecode(i)
case i => eagerDecode(i)
}
private[this] def eagerDecode(_iprot: TProtocol): DomainEntryPointAddedPb = {
var domain: _root_.scala.Option[String] = _root_.scala.None
var _passthroughFields: Builder[(Short, TFieldBlob), immutable$Map[Short, TFieldBlob]] = null
var _done = false
_iprot.readStructBegin()
while (!_done) {
val _field = _iprot.readFieldBegin()
if (_field.`type` == TType.STOP) {
_done = true
} else {
_field.id match {
case 1 =>
_field.`type` match {
case TType.STRING =>
domain = _root_.scala.Some(readDomainValue(_iprot))
case _actualType =>
val _expectedType = TType.STRING
throw new TProtocolException(
"Received wrong type for field 'domain' (expected=%s, actual=%s).".format(
ttypeToString(_expectedType),
ttypeToString(_actualType)
)
)
}
case _ =>
if (_passthroughFields == null)
_passthroughFields = immutable$Map.newBuilder[Short, TFieldBlob]
_passthroughFields += (_field.id -> TFieldBlob.read(_field, _iprot))
}
_iprot.readFieldEnd()
}
}
_iprot.readStructEnd()
new Immutable(
domain,
if (_passthroughFields == null)
NoPassthroughFields
else
_passthroughFields.result()
)
}
def apply(
domain: _root_.scala.Option[String] = _root_.scala.None
): DomainEntryPointAddedPb =
new Immutable(
domain
)
def unapply(_item: DomainEntryPointAddedPb): _root_.scala.Option[_root_.scala.Option[String]] = _root_.scala.Some(_item.domain)
@inline private def readDomainValue(_iprot: TProtocol): String = {
_iprot.readString()
}
@inline private def writeDomainField(domain_item: String, _oprot: TProtocol): Unit = {
_oprot.writeFieldBegin(DomainField)
writeDomainValue(domain_item, _oprot)
_oprot.writeFieldEnd()
}
@inline private def writeDomainValue(domain_item: String, _oprot: TProtocol): Unit = {
_oprot.writeString(domain_item)
}
object Immutable extends ThriftStructCodec3[DomainEntryPointAddedPb] {
override def encode(_item: DomainEntryPointAddedPb, _oproto: TProtocol): Unit = { _item.write(_oproto) }
override def decode(_iprot: TProtocol): DomainEntryPointAddedPb = DomainEntryPointAddedPb.decode(_iprot)
override lazy val metaData: ThriftStructMetaData[DomainEntryPointAddedPb] = DomainEntryPointAddedPb.metaData
}
/**
* The default read-only implementation of DomainEntryPointAddedPb. You typically should not need to
* directly reference this class; instead, use the DomainEntryPointAddedPb.apply method to construct
* new instances.
*/
class Immutable(
val domain: _root_.scala.Option[String],
override val _passthroughFields: immutable$Map[Short, TFieldBlob])
extends DomainEntryPointAddedPb {
def this(
domain: _root_.scala.Option[String] = _root_.scala.None
) = this(
domain,
Map.empty
)
}
/**
* This is another Immutable, this however keeps strings as lazy values that are lazily decoded from the backing
* array byte on read.
*/
private[this] class LazyImmutable(
_proto: LazyTProtocol,
_buf: Array[Byte],
_start_offset: Int,
_end_offset: Int,
domainOffset: Int,
override val _passthroughFields: immutable$Map[Short, TFieldBlob])
extends DomainEntryPointAddedPb {
override def write(_oprot: TProtocol): Unit = {
_oprot match {
case i: LazyTProtocol => i.writeRaw(_buf, _start_offset, _end_offset - _start_offset)
case _ => super.write(_oprot)
}
}
lazy val domain: _root_.scala.Option[String] =
if (domainOffset == -1)
None
else {
Some(_proto.decodeString(_buf, domainOffset))
}
/**
* Override the super hash code to make it a lazy val rather than def.
*
* Calculating the hash code can be expensive, caching it where possible
* can provide significant performance wins. (Key in a hash map for instance)
* Usually not safe since the normal constructor will accept a mutable map or
* set as an arg
* Here however we control how the class is generated from serialized data.
* With the class private and the contract that we throw away our mutable references
* having the hash code lazy here is safe.
*/
override lazy val hashCode = super.hashCode
}
/**
* This Proxy trait allows you to extend the DomainEntryPointAddedPb trait with additional state or
* behavior and implement the read-only methods from DomainEntryPointAddedPb using an underlying
* instance.
*/
trait Proxy extends DomainEntryPointAddedPb {
protected def _underlying_DomainEntryPointAddedPb: DomainEntryPointAddedPb
override def domain: _root_.scala.Option[String] = _underlying_DomainEntryPointAddedPb.domain
override def _passthroughFields = _underlying_DomainEntryPointAddedPb._passthroughFields
}
}
trait DomainEntryPointAddedPb
extends ThriftStruct
with scala.Product1[Option[String]]
with java.io.Serializable
{
import DomainEntryPointAddedPb._
def domain: _root_.scala.Option[String]
def _passthroughFields: immutable$Map[Short, TFieldBlob] = immutable$Map.empty
def _1 = domain
/**
* Gets a field value encoded as a binary blob using TCompactProtocol. If the specified field
* is present in the passthrough map, that value is returned. Otherwise, if the specified field
* is known and not optional and set to None, then the field is serialized and returned.
*/
def getFieldBlob(_fieldId: Short): _root_.scala.Option[TFieldBlob] = {
lazy val _buff = new TMemoryBuffer(32)
lazy val _oprot = new TCompactProtocol(_buff)
_passthroughFields.get(_fieldId) match {
case blob: _root_.scala.Some[TFieldBlob] => blob
case _root_.scala.None => {
val _fieldOpt: _root_.scala.Option[TField] =
_fieldId match {
case 1 =>
if (domain.isDefined) {
writeDomainValue(domain.get, _oprot)
_root_.scala.Some(DomainEntryPointAddedPb.DomainField)
} else {
_root_.scala.None
}
case _ => _root_.scala.None
}
_fieldOpt match {
case _root_.scala.Some(_field) =>
val _data = Arrays.copyOfRange(_buff.getArray, 0, _buff.length)
_root_.scala.Some(TFieldBlob(_field, _data))
case _root_.scala.None =>
_root_.scala.None
}
}
}
}
/**
* Collects TCompactProtocol-encoded field values according to `getFieldBlob` into a map.
*/
def getFieldBlobs(ids: TraversableOnce[Short]): immutable$Map[Short, TFieldBlob] =
(ids flatMap { id => getFieldBlob(id) map { id -> _ } }).toMap
/**
* Sets a field using a TCompactProtocol-encoded binary blob. If the field is a known
* field, the blob is decoded and the field is set to the decoded value. If the field
* is unknown and passthrough fields are enabled, then the blob will be stored in
* _passthroughFields.
*/
def setField(_blob: TFieldBlob): DomainEntryPointAddedPb = {
var domain: _root_.scala.Option[String] = this.domain
var _passthroughFields = this._passthroughFields
_blob.id match {
case 1 =>
domain = _root_.scala.Some(readDomainValue(_blob.read))
case _ => _passthroughFields += (_blob.id -> _blob)
}
new Immutable(
domain,
_passthroughFields
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is removed
* from the passthroughFields map, if present.
*/
def unsetField(_fieldId: Short): DomainEntryPointAddedPb = {
var domain: _root_.scala.Option[String] = this.domain
_fieldId match {
case 1 =>
domain = _root_.scala.None
case _ =>
}
new Immutable(
domain,
_passthroughFields - _fieldId
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is removed
* from the passthroughFields map, if present.
*/
def unsetDomain: DomainEntryPointAddedPb = unsetField(1)
override def write(_oprot: TProtocol): Unit = {
DomainEntryPointAddedPb.validate(this)
_oprot.writeStructBegin(Struct)
if (domain.isDefined) writeDomainField(domain.get, _oprot)
if (_passthroughFields.nonEmpty) {
_passthroughFields.values.foreach { _.write(_oprot) }
}
_oprot.writeFieldStop()
_oprot.writeStructEnd()
}
def copy(
domain: _root_.scala.Option[String] = this.domain,
_passthroughFields: immutable$Map[Short, TFieldBlob] = this._passthroughFields
): DomainEntryPointAddedPb =
new Immutable(
domain,
_passthroughFields
)
override def canEqual(other: Any): Boolean = other.isInstanceOf[DomainEntryPointAddedPb]
override def equals(other: Any): Boolean =
canEqual(other) &&
_root_.scala.runtime.ScalaRunTime._equals(this, other) &&
_passthroughFields == other.asInstanceOf[DomainEntryPointAddedPb]._passthroughFields
override def hashCode: Int = _root_.scala.runtime.ScalaRunTime._hashCode(this)
override def toString: String = _root_.scala.runtime.ScalaRunTime._toString(this)
override def productArity: Int = 1
override def productElement(n: Int): Any = n match {
case 0 => this.domain
case _ => throw new IndexOutOfBoundsException(n.toString)
}
override def productPrefix: String = "DomainEntryPointAddedPb"
} | dkomanov/scala-serialization | scala-serialization/src/main/scala/com/komanov/serialization/domain/thriftscala/DomainEntryPointAddedPb.scala | Scala | mit | 13,673 |
import de.tototec.sbuild._
@version("0.7.0")
@classpath("target/org.sbuild.plugins.adept-0.0.9000.jar")
class Test(implicit _project: Project) {
import org.sbuild.plugins.adept._
Plugin[Adept]
}
| lefou/sbuild-adept | org.sbuild.plugins.adept/Test.scala | Scala | apache-2.0 | 202 |
package coursier.cli.options
import caseapp.{ExtraName => Short, HelpMessage => Help, ValueDescription => Value, _}
// format: off
final case class DependencyOptions(
@Help("Exclude module")
@Value("organization:name")
@Short("E")
@Help("Global level exclude")
exclude: List[String] = Nil,
@Short("x")
@Help("Path to the local exclusion file. " +
"Syntax: <org:name>--<org:name>. `--` means minus. Example file content:\n\t" +
"\tcom.twitter.penguin:korean-text--com.twitter:util-tunable-internal_2.11\n\t" +
"\torg.apache.commons:commons-math--com.twitter.search:core-query-nodes\n\t" +
"Behavior: If root module A excludes module X, but root module B requires X, module X will still be fetched."
)
localExcludeFile: String = "",
@Help("If --sbt-plugin options are passed: default sbt version (short version X.Y is enough - note that for sbt 1.x, this should be passed 1.0)")
@Value("sbt version")
sbtVersion: String = "1.0",
@Help("Add intransitive dependencies")
intransitive: List[String] = Nil,
@Help("Add sbt plugin dependencies")
sbtPlugin: List[String] = Nil,
scalaJs: Boolean = false,
@Help("Enable scala-native")
@Short("S")
native: Boolean = false
)
// format: on
object DependencyOptions {
implicit val parser = Parser[DependencyOptions]
implicit val help = caseapp.core.help.Help[DependencyOptions]
}
| alexarchambault/coursier | modules/cli/src/main/scala/coursier/cli/options/DependencyOptions.scala | Scala | apache-2.0 | 1,400 |
package im.actor.config
import java.nio.file.{ Path, Paths }
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import com.typesafe.config.{ Config, ConfigException, ConfigFactory }
import scala.collection.JavaConversions._
import scala.concurrent.duration._
import scala.util.{ Failure, Success, Try }
object ActorConfig {
def load(defaults: Config = ConfigFactory.empty()): Config = {
val mainConfig = ConfigFactory.load()
val config = defaults.withFallback(ConfigFactory.parseString(
s"""
|akka {
| actor {
| provider: "akka.cluster.ClusterActorRefProvider"
| }
|
| extensions: [
| "im.actor.server.db.DbExtension",
| "akka.cluster.client.ClusterClientReceptionist",
| "im.actor.server.push.actor.ActorPush"
| ] $${akka.extensions}
|
| loggers = ["akka.event.slf4j.Slf4jLogger"]
| logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
|
| cluster.sharding.state-store-mode = "ddata"
|
| persistence {
| journal.plugin: "jdbc-journal"
| snapshot-store.plugin: "jdbc-snapshot-store"
| }
|
| stream {
| materializer {
| auto-fusing: off
| }
| }
|}
|
|akka-persistence-jdbc {
| tables {
| journal {
| tableName = "persistence_journal"
| }
| deletedTo {
| tableName = "persistence_deleted_to"
| }
| snapshot {
| tableName = "persistence_snapshot"
| }
| }
|}
""".stripMargin
))
.withFallback(mainConfig)
.withFallback(ConfigFactory.parseResources("runtime.conf"))
.resolve()
// Compatibility with old config which used "enabled-modules"
Try(config.getConfig("enabled-modules")) match {
case Success(oldModConfig) ⇒
ConfigFactory.parseMap(Map("modules" → oldModConfig.root())).withFallback(config).resolve()
case Failure(_: ConfigException.Missing) ⇒ config
case Failure(e) ⇒ throw e
}
}
val defaultTimeout: FiniteDuration = ActorConfig.load().getDuration("common.default-timeout", TimeUnit.MILLISECONDS).millis
def projectName(implicit system: ActorSystem) = system.settings.config.getString("project-name")
def baseUrl(implicit system: ActorSystem) = {
val config = system.settings.config
config.getString("http.base-uri")
}
}
| ljshj/actor-platform | actor-server/actor-runtime/src/main/scala/im/actor/config/ActorConfig.scala | Scala | mit | 2,588 |
package no.nr.edvard.convergence
import models.{JavaTypeName, JavaElement, JavaType, JavaPackage}
import scala.math._
import processing.core._
import collection.mutable
class Renderer(
name: String,
stopFn: () => Unit,
classMap: Map[JavaTypeName, JavaType],
packageRoot: JavaPackage,
width: Int = 1200,
height: Int = 900
) extends PApplet {
case class Position(x: Float, y: Float)
type PositionMap = Map[JavaElement, Position]
case class Color(r: Float, g: Float, b: Float, a: Float)
implicit def double2Float(d: Double) = d.toFloat
implicit def position2PVector(pos: Position) = new PVector(pos.x, pos.y)
override def setup() {
size(width, height, PConstants.JAVA2D)
smooth()
noLoop()
}
override def draw() {
val positions = {
val accum = mutable.ListBuffer[(JavaElement, Position)]()
layOut(packageRoot, accum, Pi/1.1)
println(accum.size)
accum.toMap
}
background(0)
resetMatrix()
translate(width/2.0, 7.8*height/9.0)
//stroke(255, 10)
//line(-400, 10, 400, 10)
rotate(-Pi/8.0)
def drawPackages(p: JavaPackage) {
val fromPos = positions(p)
if (p.name.count(_.equals('.')) <= 2) {
pushMatrix()
textSize(9)
translate(fromPos.x, fromPos.y)
rotate(Pi/8.0)
fill(250, 220)
text(p.name.split("\\.").last, 0, 0)
popMatrix()
}
fill(0, 0)
p.subPackages.foreach(sp => {
val toPos = positions(sp)
line(fromPos.x, fromPos.y, toPos.x, toPos.y)
})
p.subPackages.foreach(drawPackages)
}
stroke(100, 50, 200, 60)
//stroke(220, 120)
drawPackages(packageRoot)
stroke(0, 0)
for ((el, pos) <- positions)
el match {
case t: JavaType => {
if (random(0, 10) > 8) {
fill(noise(pos.x) + noise(pos.y), 150)
ellipse(pos.x+random(0, 3), pos.y+random(0, 3), 1, 1)
}
fill(255, 255, 255, 150)
ellipse(pos.x, pos.y, 1, 1)
}
case _ =>
}
stroke(50, 250, 10, 20)
classMap.foreach({ case (_, _class) => {
val potentialSuperclass = _class.superClass
positions.get(potentialSuperclass) match {
case Some(superClassPos) =>
positions.get(_class) match {
case Some(subClassPos) => {
line(
subClassPos.x, subClassPos.y,
superClassPos.x, superClassPos.y
)
}
case _ =>
}
case _ =>
}
}})
classMap.foreach({ case (_, caller: JavaType) =>
caller.callees.foreach(callee => {
val callerPos = positions.getOrElse(caller, null)
val calleePos = positions.getOrElse(callee, null)
if (callerPos != null && calleePos != null) {
stroke(220, 150, 30, 16)
line(callerPos.x, callerPos.y, calleePos.x, calleePos.y)
val dxDiv = (calleePos.x - callerPos.x)/10.0
val dyDiv = (calleePos.y - callerPos.y)/10.0
stroke(255, 255, 255, 20)
line(
callerPos.x, callerPos.y,
callerPos.x + dxDiv, callerPos.y + dyDiv
)
}
})
})
save(name + ".jpg")
stopFn()
}
val (packageHorSpacing, packageVertSpacing) = (80.0, 100.0)
def layOut(
_package: JavaPackage,
accum: mutable.ListBuffer[(JavaElement, Position)],
spread: Float
){
for (t <- _package.types)
layOut(t, accum)
val numSubPackages = _package.subPackages.size
accum.append((_package -> curZeroVec()))
pushMatrix()
val spreadPer = spread/numSubPackages
if (numSubPackages != 1)
rotate(-spread/2.0)
for (p <- _package.subPackages) {
if (numSubPackages != 1)
rotate(spreadPer)
pushMatrix()
translate(0, -packageVertSpacing)
layOut(p, accum, spread*0.7)
popMatrix()
}
popMatrix()
}
def layOut(
_type: JavaType,
accum: mutable.ListBuffer[(JavaElement, Position)]
) {
fill(255, 150)
stroke(0, 0)
val (r, h) = (random(0.0, 2*Pi), random(0.0, 4.5))
rotate(r)
translate(0, h)
accum.append((_type -> curZeroVec()))
ellipse(random(-20, 20), random(-20, 20), 1, 1)
translate(0, -h)
rotate(-r)
}
def curZeroVec(): Position = {
val dest = new PVector(0, 0)
getMatrix().mult(new PVector(0, 0), dest)
Position(dest.x, dest.y)
}
} | edwkar/edwbsc | projects/Osiris/src/main/scala/no/nr/edvard/convergence/Renderer.scala | Scala | gpl-2.0 | 4,455 |
// object Test {
// val x: Int => Int = identity
// }
trait Foo[F[_], I[X] <: X] {
type Id[X] <: X
def foo[G[x] >: F[x]]: G[Unit]
def foo2[X >: F[String]]: Id[X]
def foo3[X >: F[String]]: I[X]
}
trait M[A] {
def bla: Int = 1
def baz(f: Int => Int): Int = f(1)
}
object Test {
def bar(x: Foo[M, [X] =>> X]): Unit = {
x.foo.bla
x.foo.baz(x => x)
x.foo2.bla
x.foo3.bla
}
}
| dotty-staging/dotty | tests/pos/i9567.scala | Scala | apache-2.0 | 407 |
package lila.pref
import play.api.libs.json._
import lila.db.JsTube
import lila.db.JsTube.Helpers._
import lila.user.User
case class Pref(
_id: String, // user id
dark: Boolean,
transp: Boolean,
bgImg: Option[String],
is3d: Boolean,
theme: String,
pieceSet: String,
theme3d: String,
pieceSet3d: String,
soundSet: String,
blindfold: Int,
autoQueen: Int,
autoThreefold: Int,
takeback: Int,
clockTenths: Int,
clockBar: Boolean,
clockSound: Boolean,
premove: Boolean,
animation: Int,
captured: Boolean,
follow: Boolean,
highlight: Boolean,
destination: Boolean,
coords: Int,
replay: Int,
challenge: Int,
message: Int,
coordColor: Int,
puzzleDifficulty: Int,
submitMove: Int,
confirmResign: Int,
insightShare: Int,
tags: Map[String, String] = Map.empty) {
import Pref._
def id = _id
def realTheme = Theme(theme)
def realPieceSet = PieceSet(pieceSet)
def realTheme3d = Theme3d(theme3d)
def realPieceSet3d = PieceSet3d(pieceSet3d)
def realSoundSet = SoundSet(soundSet)
def coordColorName = Color.choices.toMap.get(coordColor).fold("random")(_.toLowerCase)
def hasSeenVerifyTitle = tags contains Tag.verifyTitle
def get(name: String): Option[String] = name match {
case "bg" => transp.fold("transp", dark.fold("dark", "light")).some
case "bgImg" => bgImg
case "theme" => theme.some
case "pieceSet" => pieceSet.some
case "theme3d" => theme3d.some
case "pieceSet3d" => pieceSet3d.some
case "is3d" => is3d.toString.some
case "soundSet" => soundSet.some
case _ => none
}
def set(name: String, value: String): Option[Pref] = name match {
case "bg" =>
if (value == "transp") copy(dark = true, transp = true).some
else Pref.bgs get value map { b => copy(dark = b, transp = false) }
case "bgImg" => copy(bgImg = value.some).some
case "theme" => Theme.allByName get value map { t => copy(theme = t.name) }
case "pieceSet" => PieceSet.allByName get value map { p => copy(pieceSet = p.name) }
case "theme3d" => Theme3d.allByName get value map { t => copy(theme3d = t.name) }
case "pieceSet3d" => PieceSet3d.allByName get value map { p => copy(pieceSet3d = p.name) }
case "is3d" => copy(is3d = value == "true").some
case "soundSet" => SoundSet.allByKey get value map { s => copy(soundSet = s.name) }
case _ => none
}
def animationFactor = animation match {
case Animation.NONE => 0
case Animation.FAST => 0.5f
case Animation.NORMAL => 1
case Animation.SLOW => 2
case _ => 1
}
def isBlindfold = blindfold == Pref.Blindfold.YES
def bgImgOrDefault = bgImg | Pref.defaultBgImg
}
object Pref {
val defaultBgImg = "http://lichess1.org/assets/images/background/landscape.jpg"
object Tag {
val verifyTitle = "verifyTitle"
}
object Difficulty {
val EASY = 1
val NORMAL = 2
val HARD = 3
val choices = Seq(
EASY -> "Easy",
NORMAL -> "Normal",
HARD -> "Hard")
}
object Color {
val WHITE = 1
val RANDOM = 2
val BLACK = 3
val choices = Seq(
WHITE -> "White",
RANDOM -> "Random",
BLACK -> "Black")
}
object AutoQueen {
val NEVER = 1
val PREMOVE = 2
val ALWAYS = 3
val choices = Seq(
NEVER -> "Never",
ALWAYS -> "Always",
PREMOVE -> "When premoving")
}
object SubmitMove {
val NEVER = 0
val CORRESPONDENCE_ONLY = 4
val CORRESPONDENCE_UNLIMITED = 1
val ALWAYS = 2
val choices = Seq(
NEVER -> "Never",
CORRESPONDENCE_ONLY -> "Correspondence games only",
CORRESPONDENCE_UNLIMITED -> "Correspondence and unlimited",
ALWAYS -> "Always")
}
object ConfirmResign {
val NO = 0
val YES = 1
val choices = Seq(
NO -> "No",
YES -> "Yes")
}
object InsightShare {
val NOBODY = 0
val FRIENDS = 1
val EVERYBODY = 2
val choices = Seq(
NOBODY -> "With nobody",
FRIENDS -> "With friends",
EVERYBODY -> "With everybody")
}
object Blindfold {
val NO = 0
val YES = 1
val choices = Seq(
NO -> "What? No!",
YES -> "Yes, hide the pieces")
}
object AutoThreefold {
val NEVER = 1
val TIME = 2
val ALWAYS = 3
val choices = Seq(
NEVER -> "Never",
ALWAYS -> "Always",
TIME -> "When time remaining < 30 seconds")
}
object Takeback {
val NEVER = 1
val CASUAL = 2
val ALWAYS = 3
val choices = Seq(
NEVER -> "Never",
ALWAYS -> "Always",
CASUAL -> "In casual games only")
}
object Animation {
val NONE = 0
val FAST = 1
val NORMAL = 2
val SLOW = 3
val choices = Seq(
NONE -> "None",
FAST -> "Fast",
NORMAL -> "Normal",
SLOW -> "Slow")
}
object Coords {
val NONE = 0
val INSIDE = 1
val OUTSIDE = 2
val choices = Seq(
NONE -> "No",
INSIDE -> "Inside the board",
OUTSIDE -> "Outside the board")
}
object Replay {
val NEVER = 0
val SLOW = 1
val ALWAYS = 2
val choices = Seq(
NEVER -> "Never",
SLOW -> "On slow games",
ALWAYS -> "Always")
}
object ClockTenths {
val NEVER = 0
val LOWTIME = 1
val ALWAYS = 2
val choices = Seq(
NEVER -> "Never",
LOWTIME -> "When time remaining < 10 seconds",
ALWAYS -> "Always")
}
object Challenge {
val NEVER = 1
val RATING = 2
val FRIEND = 3
val ALWAYS = 4
private val ratingThreshold = 500
val choices = Seq(
NEVER -> "Never",
RATING -> s"If rating is ± $ratingThreshold",
FRIEND -> "Only friends",
ALWAYS -> "Always")
def block(from: User, to: User, pref: Int, follow: Boolean): Option[String] = pref match {
case NEVER => "{{user}} doesn't accept challenges.".some
case RATING if from.perfs.bestRating > to.perfs.bestRating => none
case RATING if math.abs(from.perfs.bestRating - to.perfs.bestRating) > ratingThreshold =>
s"{{user}} only accepts challenges if rating is ± $ratingThreshold.".some
case FRIEND if !follow => "{{user}} only accepts challenges from friends.".some
case _ => none
}
}
object Message {
val NEVER = 1
val FRIEND = 2
val ALWAYS = 3
val choices = Seq(
NEVER -> "Never",
FRIEND -> "Only friends",
ALWAYS -> "Always")
}
def create(id: String) = default.copy(_id = id)
lazy val default = Pref(
_id = "",
dark = false,
transp = false,
bgImg = none,
is3d = false,
theme = Theme.default.name,
pieceSet = PieceSet.default.name,
theme3d = Theme3d.default.name,
pieceSet3d = PieceSet3d.default.name,
soundSet = SoundSet.default.name,
blindfold = Blindfold.NO,
autoQueen = AutoQueen.PREMOVE,
autoThreefold = AutoThreefold.TIME,
takeback = Takeback.ALWAYS,
clockBar = true,
clockSound = true,
premove = true,
animation = 2,
captured = true,
follow = true,
highlight = true,
destination = true,
coords = Coords.OUTSIDE,
replay = Replay.ALWAYS,
clockTenths = ClockTenths.LOWTIME,
challenge = Challenge.ALWAYS,
message = Message.ALWAYS,
coordColor = Color.RANDOM,
puzzleDifficulty = Difficulty.NORMAL,
submitMove = SubmitMove.CORRESPONDENCE_ONLY,
confirmResign = ConfirmResign.YES,
insightShare = InsightShare.FRIENDS,
tags = Map.empty)
import ornicar.scalalib.Zero
implicit def PrefZero: Zero[Pref] = Zero.instance(default)
private val bgs = Map("light" -> false, "dark" -> true)
}
| terokinnunen/lila | modules/pref/src/main/Pref.scala | Scala | mit | 7,770 |
package com.twitter.finagle.postgresql
import com.twitter.finagle.postgresql.BackendMessage.CommandTag
import com.twitter.io.Buf
import com.twitter.io.Reader
import com.twitter.util.Await
class RichClientSpec extends PgSqlIntegrationSpec {
"Rich client" should {
"support multi-line queries" in withRichClient() { client =>
Reader
.toAsyncStream(client.multiQuery("select 1;select 2;"))
.mapF(s => Client.Expect.ResultSet(s))
.mapF(s => s.toSeq)
.toSeq()
.map { rs =>
rs must haveSize(2)
}
}
"read" in withRichClient() { client =>
client
.read("select 1;")
.map(_.rows must haveSize(1))
}
"modify" in withRichClient() { client =>
client
.modify("create user fake;")
.map(_ must beEqualTo(Response.Command(CommandTag.Other("CREATE ROLE"))))
}
// TODO: COPY in CRDB cannot be done within a prepared statement
// https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/conn.go#L848-L855
"copy from" in withRichClient() { client =>
withTmpTable() { tlbName =>
client
.modify(s"COPY $tlbName FROM STDIN;")
.map(_ => ok)
}
}.pendingUntilFixed()
// TODO: COPY in CRDB cannot be done within a prepared statement
// https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/conn.go#L848-L855
"copy to" in withRichClient() { client =>
withTmpTable() { tlbName =>
client
.modify(s"COPY $tlbName TO STDOUT;")
.map(_ => ok)
}
}.pendingUntilFixed()
"prepare read" in withRichClient() { client =>
client
.prepare("select 1")
.read(Nil)
.map(_.rows must haveSize(1))
}
"prepare modify" in withRichClient() { client =>
client
.prepare("create user another;")
.modify(Nil)
.map(_ must beEqualTo(Response.Command(CommandTag.Other("CREATE ROLE"))))
}
"prepare param" in withRichClient() { client =>
client
.prepare("select $1::bool, $2::bytea")
.read(Parameter(true) :: Parameter(Buf.ByteArray(0, 1, 2, 3, 4)) :: Nil)
.map { rs =>
rs.rows must haveSize(1)
rs.rows.head.get[Boolean](0) must be(true)
rs.rows.head.get[Buf](1) must_== Buf.ByteArray(0, 1, 2, 3, 4)
}
}
"prepare reuse" in withRichClient() { client =>
val stmt = client.prepare("select $1::bool, $2::bytea")
def read(param1: Boolean, param2: Buf) =
stmt
.read(Parameter(param1) :: Parameter(param2) :: Nil)
.map { rs =>
rs.rows must haveSize(1)
rs.rows.head.get[Boolean](0) must_== param1
rs.rows.head.get[Buf](1) must_== param2
}
Await.result(read(false, Buf.ByteArray(-1, 0, 1, 2)))
Await.result(read(true, Buf.ByteArray(4, 3, 2, 1)))
}
}
}
| twitter/finagle | finagle-postgresql/src/it/scala/com/twitter/finagle/postgresql/RichClientSpec.scala | Scala | apache-2.0 | 2,927 |
package org.qbproject.schema.internal.json
import play.api.libs.json.{JsNumber, JsString, JsValue}
/**
* Convenience partial functions that may be passed into the map function of a type mapper.
*/
trait JsValueUpdateOps {
val trim: PartialFunction[JsValue, JsValue] = {
case JsString(s) => JsString(s.trim)
}
val toLowerCase: PartialFunction[JsValue, JsValue] = {
case JsString(s) => JsString(s.toLowerCase)
}
val toUpperCase: PartialFunction[JsValue, JsValue] = {
case JsString(s) => JsString(s.toUpperCase)
}
def inc(i: Int): PartialFunction[JsValue, JsValue] = {
case JsNumber(n) => JsNumber(n + i)
}
}
| edgarmueller/qbproject | qbschema/src/main/scala/org/qbproject/schema/internal/json/JsValueUpdateOps.scala | Scala | apache-2.0 | 646 |
import sbt._
import sbt.Keys._
import sbt.{ThisBuild, Project}
object SbtScalaParseBuild extends Build {
val repoKind = SettingKey[String]("repo-kind", "Maven repository kind (\\"snapshots\\" or \\"releases\\")")
val projectName = "sbt-scalaparse"
val sbtScalariform: Project = Project(
projectName,
file("."),
settings = Defaults.defaultSettings ++ Seq(
sbtPlugin := true,
name := projectName,
description := "sbt plugin for better parse errors using @lihaoyi's ScalaParse",
organization := "org.cvogt",
name := "sbt-scalaparse",
version in ThisBuild := "0.2",
libraryDependencies ++= Seq(
"com.lihaoyi" %% "scalaparse" % "0.2.1"
),
scalacOptions ++= List(
"-unchecked",
"-deprecation",
"-feature"
),
resolvers ++= Seq(Resolver.sonatypeRepo("releases"),Resolver.sonatypeRepo("snapshots")),
organizationName := "Jan Christopher Vogt",
organization := "org.cvogt",
repoKind <<= (version)(v => if(v.trim.endsWith("SNAPSHOT")) "snapshots" else "releases"),
//publishTo <<= (repoKind)(r => Some(Resolver.file("test", file("c:/temp/repo/"+r)))),
publishTo <<= (repoKind){
case "snapshots" => Some("snapshots" at "https://oss.sonatype.org/content/repositories/snapshots")
case "releases" => Some("releases" at "https://oss.sonatype.org/service/local/staging/deploy/maven2")
},
publishMavenStyle := true,
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
makePomConfiguration ~= { _.copy(configurations = Some(Seq(Compile, Runtime, Optional))) },
licenses += ("Two-clause BSD-style license", url("http://github.com/cvogt/"+projectName+"/blob/master/LICENSE.txt")),
homepage := Some(url("http://github.com/cvogt/"+projectName)),
startYear := Some(2015),
pomExtra :=
<developers>
<developer>
<id>cvogt</id>
<name>Jan Christopher Vogt</name>
<timezone>-5</timezone>
<url>https://github.com/cvogt/</url>
</developer>
</developers>
<scm>
<url>git@github.com:cvogt/{projectName}.git</url>
<connection>scm:git:git@github.com:cvogt/{projectName}.git</connection>
</scm>
)
)
}
| cvogt/sbt-scalaparse | project/Build.scala | Scala | bsd-2-clause | 2,334 |
package com.twitter.scalding.reducer_estimation
import com.twitter.scalding._
import com.twitter.scalding.platform.{ HadoopPlatformJobTest, HadoopSharedPlatformTest }
import org.scalatest.{ Matchers, WordSpec }
import scala.collection.JavaConverters._
import scala.util.{ Failure, Success, Try }
class SimpleJobWithNoSetReducers(args: Args, customConfig: Config) extends Job(args) {
import HipJob._
override def config = super.config ++ customConfig.toMap.toMap
TypedPipe.from(inSrc)
.flatMap(_.split("[^\\\\w]+"))
.map(_.toLowerCase -> 1)
.group
.sum
.write(counts)
}
object EmptyHistoryService extends HistoryService {
def fetchHistory(info: FlowStrategyInfo, maxHistory: Int): Try[Seq[FlowStepHistory]] = Success(Nil)
}
object ErrorHistoryService extends HistoryService {
def fetchHistory(info: FlowStrategyInfo, maxHistory: Int): Try[Seq[FlowStepHistory]] =
Failure(new RuntimeException("Failed to fetch job history"))
}
object HistoryServiceWithData {
// we only care about these two input size fields for RatioBasedEstimator
def makeHistory(inputHdfsBytesRead: Long, inputHdfsReduceFileBytesRead: Long): FlowStepHistory =
makeHistory(inputHdfsBytesRead, inputHdfsReduceFileBytesRead, Seq())
def makeHistory(inputHdfsBytesRead: Long, inputHdfsReduceFileBytesRead: Long, taskRuntimes: Seq[Long]): FlowStepHistory = {
val random = new scala.util.Random(123)
val tasks = taskRuntimes.map { time =>
val startTime = random.nextLong
Task(
taskType = "REDUCE",
status = "SUCCEEDED",
startTime = startTime,
finishTime = startTime + time)
}
FlowStepHistory(
keys = null,
submitTime = 0,
launchTime = 0L,
finishTime = 0L,
totalMaps = 0L,
totalReduces = 0L,
finishedMaps = 0L,
finishedReduces = 0L,
failedMaps = 0L,
failedReduces = 0L,
mapFileBytesRead = 0L,
mapFileBytesWritten = 0L,
reduceFileBytesRead = inputHdfsReduceFileBytesRead,
hdfsBytesRead = inputHdfsBytesRead,
hdfsBytesWritten = 0L,
mapperTimeMillis = 0L,
reducerTimeMillis = 0L,
reduceShuffleBytes = 0L,
cost = 1.1,
tasks = tasks)
}
def inputSize = HipJob.InSrcFileSize
}
abstract class HistoryServiceWithData extends HistoryService
object ValidHistoryService extends HistoryServiceWithData {
import HistoryServiceWithData._
def fetchHistory(info: FlowStrategyInfo, maxHistory: Int): Try[Seq[FlowStepHistory]] =
// past reducer ratio 0.5
Success(
Seq(
makeHistory(10, 1), // below threshold, ignored
makeHistory(inputSize, inputSize / 2),
makeHistory(inputSize, inputSize / 2),
makeHistory(inputSize, inputSize / 2)))
}
object SmallDataExplosionHistoryService extends HistoryServiceWithData {
import HistoryServiceWithData._
def fetchHistory(info: FlowStrategyInfo, maxHistory: Int): Try[Seq[FlowStepHistory]] = {
// huge ratio, but data is still small overall
val outSize = inputSize * 1000
Success(
Seq(
makeHistory(inputSize, outSize),
makeHistory(inputSize, outSize),
makeHistory(inputSize, outSize)))
}
}
object InvalidHistoryService extends HistoryServiceWithData {
import HistoryServiceWithData._
def fetchHistory(info: FlowStrategyInfo, maxHistory: Int): Try[Seq[FlowStepHistory]] =
// all entries below the 10% threshold for past input size
Success(
Seq(
makeHistory(10, 1),
makeHistory(10, 1),
makeHistory(10, 1)))
}
class EmptyHistoryBasedEstimator extends RatioBasedEstimator {
override val historyService = EmptyHistoryService
}
class ErrorHistoryBasedEstimator extends RatioBasedEstimator {
override val historyService = ErrorHistoryService
}
class ValidHistoryBasedEstimator extends RatioBasedEstimator {
override val historyService = ValidHistoryService
}
class SmallDataExplosionHistoryBasedEstimator extends RatioBasedEstimator {
override val historyService = SmallDataExplosionHistoryService
}
class InvalidHistoryBasedEstimator extends RatioBasedEstimator {
override val historyService = InvalidHistoryService
}
class RatioBasedReducerEstimatorTest extends WordSpec with Matchers with HadoopSharedPlatformTest {
import HipJob._
"Single-step job with ratio-based reducer estimator" should {
"not set reducers when no history is found" in {
val customConfig = Config.empty.addReducerEstimator(classOf[EmptyHistoryBasedEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> "1k") +
(RatioBasedEstimator.inputRatioThresholdKey -> 0.10f.toString)
HadoopPlatformJobTest(new SimpleJobWithNoSetReducers(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = steps.head.getConfig
conf.getNumReduceTasks should equal (1) // default
}
.run
}
"not set reducers when error fetching history" in {
val customConfig = Config.empty.addReducerEstimator(classOf[ErrorHistoryBasedEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> "1k") +
(RatioBasedEstimator.inputRatioThresholdKey -> 0.10f.toString)
HadoopPlatformJobTest(new SimpleJobWithNoSetReducers(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = steps.head.getConfig
conf.getNumReduceTasks should equal (1) // default
}
.run
}
"set reducers correctly when there is valid history" in {
val customConfig = Config.empty
.addReducerEstimator(classOf[ValidHistoryBasedEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> "1k") +
(RatioBasedEstimator.inputRatioThresholdKey -> 0.10f.toString)
HadoopPlatformJobTest(new SimpleJobWithNoSetReducers(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
// base estimate from input size reducer = 3
// reducer ratio from history = 0.5
// final estimate = ceil(3 * 0.5) = 2
val conf = steps.head.getConfig
conf.getNumReduceTasks should equal (2)
}
.run
}
/*
* If the InputSizeReducerEstimator decides that less than 1 reducer is necessary, it
* rounds up to 1. If the RatioBasedEstimator relies on this, it will use the rounded-up
* value to calculate the number of reducers. In the case of data explosion on a small dataset,
* you end up with a very large number of reducers because this rounding error is multiplied.
* This regression test ensures that this is no longer the case.
*
* see https://github.com/twitter/scalding/issues/1541 for more details.
*/
"handle mapper output explosion over small data correctly" in {
val customConfig = Config.empty
.addReducerEstimator(classOf[SmallDataExplosionHistoryBasedEstimator]) +
// set the bytes per reducer to to 500x input size, so that we estimate needing 2 reducers,
// even though there's a very large explosion in input data size, the data is still pretty small
(InputSizeReducerEstimator.BytesPerReducer -> (HistoryServiceWithData.inputSize * 500).toString) +
(RatioBasedEstimator.inputRatioThresholdKey -> 0.10f.toString)
HadoopPlatformJobTest(new SimpleJobWithNoSetReducers(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = steps.head.getConfig
conf.getNumReduceTasks should equal (2) // used to pick 1000 with the rounding error
}.run
}
"not set reducers when there is no valid history" in {
val customConfig = Config.empty.addReducerEstimator(classOf[InvalidHistoryBasedEstimator]) +
(InputSizeReducerEstimator.BytesPerReducer -> "1k") +
(RatioBasedEstimator.inputRatioThresholdKey -> 0.10f.toString)
HadoopPlatformJobTest(new SimpleJobWithNoSetReducers(_, customConfig), cluster)
.inspectCompletedFlow { flow =>
val steps = flow.getFlowSteps.asScala
steps should have size 1
val conf = steps.head.getConfig
conf.getNumReduceTasks should equal (1) // default
}
.run
}
}
}
| tglstory/scalding | scalding-hadoop-test/src/test/scala/com/twitter/scalding/reducer_estimation/RatioBasedEstimatorTest.scala | Scala | apache-2.0 | 8,543 |
package extracells.render.block
import cpw.mods.fml.client.registry.{ISimpleBlockRenderingHandler, RenderingRegistry}
import extracells.tileentity.TileEntityHardMeDrive
import net.minecraft.block.Block
import net.minecraft.client.Minecraft
import net.minecraft.client.renderer.texture.TextureMap
import net.minecraft.client.renderer.{Tessellator, RenderBlocks}
import net.minecraft.util.{IIcon, ResourceLocation}
import net.minecraft.world.IBlockAccess
import org.lwjgl.opengl.GL11
object RendererHardMEDrive extends ISimpleBlockRenderingHandler {
var renderID = 0
val tex = new ResourceLocation("extracells", "textures/blocks/hardmedrive.png")
val i = new Icon(5, 11, 5, 7)
val i2 = new Icon(5, 11, 8, 10)
val i3 = new Icon(5, 11, 11, 13)
def registerRenderer() = {
renderID = RenderingRegistry.getNextAvailableRenderId
RenderingRegistry.registerBlockHandler(this)
}
override def getRenderId = renderID
override def shouldRender3DInInventory(modelId: Int): Boolean = true
override def renderInventoryBlock(block: Block, metadata: Int, modelId: Int, renderer: RenderBlocks) = {
val tessellator = Tessellator.instance
renderer.setRenderBounds(0.0D, 0.0D, 0.0D, 1.0D, 1.0D, 1.0D)
GL11.glTranslatef(-0.5F, -0.5F, -0.5F)
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, -1.0F, 0.0F)
renderer.renderFaceYNeg(block, 0.0D, 0.0D, 0.0D, renderer.getBlockIconFromSideAndMetadata(block, 0, 3))
tessellator.draw
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 1.0F, 0.0F)
renderer.renderFaceYPos(block, 0.0D, 0.0D, 0.0D, renderer.getBlockIconFromSideAndMetadata(block, 1, 3))
tessellator.draw
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, -1.0F)
renderer.renderFaceZNeg(block, 0.0D, 0.0D, 0.0D, renderer.getBlockIconFromSideAndMetadata(block, 2, 3) )
tessellator.draw
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, 1.0F)
renderer.renderFaceZPos(block, 0.0D, 0.0D, 0.0D, renderer.getBlockIconFromSideAndMetadata(block, 3, 3))
tessellator.draw
Minecraft.getMinecraft.renderEngine.bindTexture(tex)
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, 1.0F)
renderer.renderMinX = .3125D
renderer.renderMinY = .25D
renderer.renderMaxX = .6875D
renderer.renderMaxY = .375D
renderer.renderFaceZPos(block, 0.0D, 0.0D, 0.0D, i)
tessellator.draw
renderer.renderMinY = .43525D
renderer.renderMaxY = .56025D
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, 1.0F)
renderer.renderFaceZPos(block, 0.0D, 0.0D, 0.0D, i)
tessellator.draw
renderer.renderMinY = .62275D
renderer.renderMaxY = .75D
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, 1.0F)
renderer.renderFaceZPos(block, 0.0D, 0.0D, 0.0D, i)
renderer.renderMinX = 0.0D
renderer.renderMinY = 0.0D
renderer.renderMaxX = 1.0D
renderer.renderMaxY = 1.0D
tessellator.draw
Minecraft.getMinecraft.renderEngine.bindTexture(TextureMap.locationBlocksTexture)
tessellator.startDrawingQuads
tessellator.setNormal(-1.0F, 0.0F, 0.0F)
renderer.renderFaceXNeg(block, 0.0D, 0.0D, 0.0D, renderer.getBlockIconFromSideAndMetadata(block, 4, 3))
tessellator.draw
tessellator.startDrawingQuads
tessellator.setNormal(1.0F, 0.0F, 0.0F)
renderer.renderFaceXPos(block, 0.0D, 0.0D, 0.0D, renderer.getBlockIconFromSideAndMetadata(block, 5, 3))
tessellator.draw
GL11.glTranslatef(0.5F, 0.5F, 0.5F)
}
override def renderWorldBlock(world: IBlockAccess, x: Int, y: Int, z: Int, block: Block, modelId: Int, renderer: RenderBlocks): Boolean = {
val tessellator = Tessellator.instance
renderer.renderStandardBlock(block, x, y, z)
tessellator.addTranslation(x, y, z)
val meta = world.getBlockMetadata(x, y, z)
val tileEntity = world.getTileEntity(x, y, z)
if( tileEntity == null || (!tileEntity.isInstanceOf[TileEntityHardMeDrive]))
return false
val tileEntityHardMeDrive = tileEntity.asInstanceOf[TileEntityHardMeDrive]
var b = true
try {
Tessellator.instance.draw
} catch {
case e: IllegalStateException => b = false
}
GL11.glPushMatrix();
GL11.glEnable(GL11.GL_BLEND);
GL11.glDisable(GL11.GL_CULL_FACE);
tessellator.setColorOpaque_I(block.colorMultiplier(world, x, y, z));
tessellator.setBrightness(240)
Minecraft.getMinecraft.renderEngine.bindTexture(tex)
meta match {
case 2 => renderZNeg(renderer, block, generateRenderInformations(tileEntityHardMeDrive))
case 3 => renderZPos(renderer, block, generateRenderInformations(tileEntityHardMeDrive))
case 4 => renderXNeg(renderer, block, generateRenderInformations(tileEntityHardMeDrive))
case 5 => renderXPos(renderer, block, generateRenderInformations(tileEntityHardMeDrive))
case _ =>
}
Minecraft.getMinecraft.renderEngine.bindTexture(TextureMap.locationBlocksTexture)
GL11.glPopMatrix();
if(b)
tessellator.startDrawingQuads
tessellator.addTranslation(-x, -y, -z)
true
}
def generateRenderInformations(tileEntity: TileEntityHardMeDrive): Array[RenderInformation] = {
val renderInformations = new Array[RenderInformation](3)
renderInformations(2) = new RenderInformation(4, tileEntity.getColorByStatus(2))
renderInformations(1) = new RenderInformation(7, tileEntity.getColorByStatus(1))
renderInformations(0) = new RenderInformation(10, tileEntity.getColorByStatus(0))
renderInformations
}
def renderXPos(renderer : RenderBlocks, block: Block, renderInformations: Array[RenderInformation]){
val tessellator = Tessellator.instance
renderer.renderMinZ = .3125D
renderer.renderMaxZ = .6875D
val it = renderInformations.iterator
while(it.hasNext){
val renderInformation = it.next
renderer.renderMinY = 1.0D / 16.0D * renderInformation.getPos
renderer.renderMaxY = 1.0D / 16.0D * (renderInformation.getPos + 2)
tessellator.startDrawingQuads
tessellator.setNormal(1.0F, 0.0F, 0.0F)
renderer.renderFaceXPos(block, 0.0D, 0.0D, 0.0D, renderInformation.getIcon)
tessellator.draw
tessellator.startDrawingQuads
tessellator.setNormal(1.0F, 0.0F, 0.0F)
tessellator.setColorOpaque_I(renderInformation.getColor)
renderer.renderFaceXPos(block, 0.0D, 0.0D, 0.0D, renderInformation.getIcon2)
tessellator.draw
}
renderer.renderMinX = 0.0D
renderer.renderMinY = 0.0D
renderer.renderMinZ = 0.0D
renderer.renderMaxX = 1.0D
renderer.renderMaxY = 1.0D
renderer.renderMaxZ = 1.0D
}
def renderXNeg(renderer : RenderBlocks, block: Block, renderInformations: Array[RenderInformation]){
val tessellator = Tessellator.instance
renderer.renderMinZ = .3125D
renderer.renderMaxZ = .6875D
val it = renderInformations.iterator
while(it.hasNext){
val renderInformation = it.next
renderer.renderMinY = 1.0D / 16.0D * renderInformation.getPos
renderer.renderMaxY = 1.0D / 16.0D * (renderInformation.getPos + 2)
tessellator.startDrawingQuads
tessellator.setNormal(-1.0F, 0.0F, 0.0F)
renderer.renderFaceXNeg(block, 0.0D, 0.0D, 0.0D, renderInformation.getIcon)
tessellator.draw
tessellator.startDrawingQuads
tessellator.setNormal(-1.0F, 0.0F, 0.0F)
tessellator.setColorOpaque_I(renderInformation.getColor)
renderer.renderFaceXNeg(block, 0.0D, 0.0D, 0.0D, renderInformation.getIcon2)
tessellator.draw
}
renderer.renderMinX = 0.0D
renderer.renderMinY = 0.0D
renderer.renderMinZ = 0.0D
renderer.renderMaxX = 1.0D
renderer.renderMaxY = 1.0D
renderer.renderMaxZ = 1.0D
}
def renderZPos(renderer : RenderBlocks, block: Block, renderInformations: Array[RenderInformation]){
val tessellator = Tessellator.instance
renderer.renderMinX = .3125D
renderer.renderMaxX = .6875D
val it = renderInformations.iterator
while(it.hasNext){
val renderInformation = it.next
renderer.renderMinY = 1.0D / 16.0D * renderInformation.getPos
renderer.renderMaxY = 1.0D / 16.0D * (renderInformation.getPos + 2.0D)
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, 1.0F)
renderer.renderFaceZPos(block, 0.0D, 0.0D, 0.0D, renderInformation.getIcon)
tessellator.draw
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, 1.0F)
tessellator.setColorOpaque_I(renderInformation.getColor)
renderer.renderFaceZPos(block, 0.0D, 0.0D, 0.0D, renderInformation.getIcon2)
tessellator.draw
}
renderer.renderMinX = 0.0D
renderer.renderMinY = 0.0D
renderer.renderMinZ = 0.0D
renderer.renderMaxX = 1.0D
renderer.renderMaxY = 1.0D
renderer.renderMaxZ = 1.0D
}
def renderZNeg(renderer : RenderBlocks, block: Block, renderInformations: Array[RenderInformation]){
val tessellator = Tessellator.instance
renderer.renderMinX = .3125D
renderer.renderMaxX = .6875D
val it = renderInformations.iterator
while(it.hasNext){
val renderInformation = it.next
renderer.renderMinY = 1.0D / 16.0D * renderInformation.getPos
renderer.renderMaxY = 1.0D / 16.0D * (renderInformation.getPos + 2.0D)
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, -1.0F)
renderer.renderFaceZNeg(block, 0.0D, 0.0D, 0.0D, renderInformation.getIcon)
tessellator.draw
tessellator.startDrawingQuads
tessellator.setNormal(0.0F, 0.0F, -1.0F)
tessellator.setColorOpaque_I(renderInformation.getColor)
renderer.renderFaceZNeg(block, 0.0D, 0.0D, 0.0D, renderInformation.getIcon2)
tessellator.draw
}
renderer.renderMinX = 0.0D
renderer.renderMinY = 0.0D
renderer.renderMinZ = 0.0D
renderer.renderMaxX = 1.0D
renderer.renderMaxY = 1.0D
renderer.renderMaxZ = 1.0D
}
class RenderInformation(pos: Double, color: Int){
def getIcon = i3
def getIcon2 = i3
def getPos = pos
def getColor = color
}
protected class Icon(minU: Float, maxU: Float, minV: Float, maxV: Float) extends IIcon {
override def getIconHeight: Int = ???
override def getMinU: Float = minU
override def getMaxU: Float = maxU
override def getInterpolatedV (p_94207_1_ : Double):Float = {
val f: Float = this.getMaxV - this.getMinV
this.getMinV + f// * (p_94207_1_.toFloat / 16.0F)
}
override def getIconName: String = ""
override def getIconWidth: Int = 0
override def getMinV: Float = minV
override def getMaxV: Float = maxV
override def getInterpolatedU (p_94214_1_ : Double):Float = {
val f: Float = this.getMaxU - this.getMinU
this.getMinU + f// * (p_94214_1_.toFloat / 16.0F)
}
}
}
| AmethystAir/ExtraCells2 | src/main/scala/extracells/render/block/RendererHardMEDrive.scala | Scala | mit | 10,837 |
package bignum.benchmark
import com.google.caliper.Param
import com.google.caliper.{Runner => CaliperRunner}
import bignum.BigInt2
import annotation.tailrec
object DivideBenchmark {
def main(args: Array[String]) {
CaliperRunner.main(classOf[DivideBenchmark], args: _*)
}
}
class DivideBenchmark extends SimpleScalaBenchmark {
@Param(Array("200", "500", "1000"))
val length: Int = 0
var bigint = BigInt("0")
var bigint2 = BigInt2("0")
var biginteger = new java.math.BigInteger("0")
var bigint0 = BigInt("0")
var bigint20 = BigInt2("0")
var biginteger0 = new java.math.BigInteger("0")
override def setUp() {
val rng = new java.util.Random()
val a0 = new java.math.BigInteger(length, rng).toString
val a = new java.math.BigInteger(length*3, rng).toString
bigint = BigInt(a)
bigint2 = BigInt2(a)
biginteger = new java.math.BigInteger(a)
bigint0 = BigInt(a0)
bigint20 = BigInt2(a0)
biginteger0 = new java.math.BigInteger(a0)
}
def timeBigInt(reps: Int) = repeat(reps) {
var result = bigint
var result0 = bigint0
tfor(0)(_ < 2, _ + 1) { i =>
result = result / result0
}
result
}
def timeBigInt2(reps: Int) = repeat(reps) {
var result = bigint2
var result0 = bigint20
tfor(0)(_ < 2, _ + 1) { i =>
result = result / result0
}
result
}
def timeBigInteger(reps: Int) = repeat(reps) {
var result = biginteger
var result0 = biginteger0
tfor(0)(_ < 2, _ + 1) { i =>
result = result.divide(result0)
}
result
}
@tailrec
final def tfor[@specialized T](i: T)(test: T => Boolean, inc: T => T)(f: T => Unit) {
if(test(i)) {
f(i)
tfor(inc(i))(test, inc)(f)
}
}
}
| techaddict/bignum | benchmark/src/main/scala/benchmark/Division.scala | Scala | mit | 1,735 |
package org.ausdigital.apecconnect.common.model
trait ApecConnectEnumEntry {
def value: Int
def name: String
}
| TeamAusDigital/apec-connect | server/modules/common/src/main/scala/org/ausdigital/apecconnect/common/model/ApecConnectEnumEntry.scala | Scala | apache-2.0 | 116 |
package org.jetbrains.plugins.scala.codeInspection.typeChecking
import com.intellij.codeInspection.{InspectionManager, LocalQuickFix, ProblemDescriptor, ProblemHighlightType}
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.{AbstractRegisteredInspection, ScalaInspectionBundle}
class IsInstanceOfInspection extends AbstractRegisteredInspection {
override protected def problemDescriptor(
element: PsiElement,
maybeQuickFix: Option[LocalQuickFix],
descriptionTemplate: String,
highlightType: ProblemHighlightType
)(implicit manager: InspectionManager, isOnTheFly: Boolean): Option[ProblemDescriptor] = {
element match {
case IsInstanceOfCall.withoutExplicitType() =>
val message = ScalaInspectionBundle.message("missing.explicit.type.in.isinstanceof.call")
super.problemDescriptor(element, None, message, highlightType)
case _ => None
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/typeChecking/IsInstanceOfInspection.scala | Scala | apache-2.0 | 937 |
package ru.freefry.furniture_factory.core
import akka.actor.ActorSystem
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
/**
* Core trait containing member `system: ActorSystem`.
* @author freefry
*/
trait Core {
implicit def system: ActorSystem
implicit def timeout: Timeout
implicit def conf: Config
implicit lazy val executionContext = system.dispatcher
}
/**
* Implements [[ru.freefry.furniture_factory.core.Core]], contains reference to actual [[akka.actor.ActorSystem]].
*/
trait BootedCore extends Core
with FactoryImpl
with MastersPoolImpl
with LegMasterImpl
with SeatAndBackrestMasterImpl
with TabletopMasterImpl
with AssemblyMasterImpl
with AssemblyMasterWorkerImpl {
import scala.concurrent.duration._
implicit lazy val system = ActorSystem("furniture-factory")
implicit lazy val timeout: Timeout = Timeout(5 seconds)
implicit lazy val conf = ConfigFactory.load()
/** Terminates ActorSystem at shutdown */
sys.addShutdownHook(system.shutdown())
}
| freefry/furniture-factory | src/main/scala/ru/freefry/furniture_factory/core/core.scala | Scala | apache-2.0 | 1,072 |
package ch.ninecode.model
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.Serializer
import com.esotericsoftware.kryo.io.Input
import com.esotericsoftware.kryo.io.Output
import org.apache.spark.sql.Row
import ch.ninecode.cim.CIMClassInfo
import ch.ninecode.cim.CIMContext
import ch.ninecode.cim.CIMParseable
import ch.ninecode.cim.CIMRelationship
import ch.ninecode.cim.CIMSerializer
/**
* Resistive and reactive components of compensation for generator associated with IEEE type 2 voltage compensator for current flow out of another generator in the interconnection.
*
* @param IdentifiedObject [[ch.ninecode.model.IdentifiedObject IdentifiedObject]] Reference to the superclass object.
* @param rcij <font color="#0f0f0f">Resistive component of compensation of generator associated with this IEEE type 2 voltage compensator for current flow out of another generator (<i>Rcij</i>).</font>
* @param xcij <font color="#0f0f0f">Reactive component of compensation of generator associated with this IEEE type 2 voltage compensator for current flow out of another generator (<i>Xcij</i>).</font>
* @param SynchronousMachineDynamics [[ch.ninecode.model.SynchronousMachineDynamics SynchronousMachineDynamics]] Standard synchronous machine out of which current flow is being compensated for.
* @param VcompIEEEType2 [[ch.ninecode.model.VCompIEEEType2 VCompIEEEType2]] The standard IEEE type 2 voltage compensator of this compensation.
* @group VoltageCompensatorDynamics
* @groupname VoltageCompensatorDynamics Package VoltageCompensatorDynamics
* @groupdesc VoltageCompensatorDynamics <font color="#0f0f0f">Synchronous machine terminal voltage transducer and current compensator models</font> adjust the terminal voltage feedback to the excitation system by adding a quantity that is proportional to the terminal current of the generator. It is linked to a specific generator (synchronous machine).
* <font color="#0f0f0f">Several types of compensation are available on most excitation systems. Synchronous machine active and reactive current compensation are the most common. Either reactive droop compensation and/or line-drop compensation can be used, simulating an impedance drop and effectively regulating at some point other than the terminals of the machine. The impedance or range of adjustment and type of compensation should be specified for different types. </font>
* <font color="#0f0f0f">Care shall be taken to ensure that a consistent PU system is utilized for the compensator parameters and the synchronous machine current base.</font>
* <font color="#0f0f0f">For further information see IEEE 421.5-2005, 4.</font>
*
*
* <font color="#0f0f0f">
* </font>
*/
final case class GenICompensationForGenJ
(
IdentifiedObject: IdentifiedObject = null,
rcij: Double = 0.0,
xcij: Double = 0.0,
SynchronousMachineDynamics: String = null,
VcompIEEEType2: String = null
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: IdentifiedObject = IdentifiedObject
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = GenICompensationForGenJ.cls
def emitelem (position: Int, value: Any): Unit = if (mask(position)) emit_element(GenICompensationForGenJ.fields(position), value)
def emitattr (position: Int, value: Any): Unit = if (mask(position)) emit_attribute(GenICompensationForGenJ.fields(position), value)
emitelem(0, rcij)
emitelem(1, xcij)
emitattr(2, SynchronousMachineDynamics)
emitattr(3, VcompIEEEType2)
s.toString
}
override def export: String =
{
"\\t<cim:GenICompensationForGenJ rdf:%s=\\"%s\\">\\n%s\\t</cim:GenICompensationForGenJ>".format(if (about) "about" else "ID", id, export_fields)
}
}
object GenICompensationForGenJ
extends
CIMParseable[GenICompensationForGenJ]
{
override val fields: Array[String] = Array[String](
"rcij",
"xcij",
"SynchronousMachineDynamics",
"VcompIEEEType2"
)
override val relations: List[CIMRelationship] = List(
CIMRelationship("SynchronousMachineDynamics", "SynchronousMachineDynamics", "1", "0..*"),
CIMRelationship("VcompIEEEType2", "VCompIEEEType2", "1", "2..*")
)
val rcij: Fielder = parse_element(element(cls, fields(0)))
val xcij: Fielder = parse_element(element(cls, fields(1)))
val SynchronousMachineDynamics: Fielder = parse_attribute(attribute(cls, fields(2)))
val VcompIEEEType2: Fielder = parse_attribute(attribute(cls, fields(3)))
def parse (context: CIMContext): GenICompensationForGenJ =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = GenICompensationForGenJ(
IdentifiedObject.parse(context),
toDouble(mask(rcij(), 0)),
toDouble(mask(xcij(), 1)),
mask(SynchronousMachineDynamics(), 2),
mask(VcompIEEEType2(), 3)
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[GenICompensationForGenJ] = GenICompensationForGenJSerializer
}
object GenICompensationForGenJSerializer extends CIMSerializer[GenICompensationForGenJ]
{
def write (kryo: Kryo, output: Output, obj: GenICompensationForGenJ): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeDouble(obj.rcij),
() => output.writeDouble(obj.xcij),
() => output.writeString(obj.SynchronousMachineDynamics),
() => output.writeString(obj.VcompIEEEType2)
)
IdentifiedObjectSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[GenICompensationForGenJ]): GenICompensationForGenJ =
{
val parent = IdentifiedObjectSerializer.read(kryo, input, classOf[IdentifiedObject])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = GenICompensationForGenJ(
parent,
if (isSet(0)) input.readDouble else 0.0,
if (isSet(1)) input.readDouble else 0.0,
if (isSet(2)) input.readString else null,
if (isSet(3)) input.readString else null
)
obj.bitfields = bitfields
obj
}
}
/**
* <font color="#0f0f0f">Terminal voltage transducer and load compensator as defined in IEEE 421.5-2005, 4.
*
* This model is common to all excitation system models described in the IEEE Standard. </font>
* <font color="#0f0f0f">Parameter details:</font>
* <ol>
* <li><font color="#0f0f0f">If <i>Rc</i> and <i>Xc</i> are set to zero, the l</font>oad compensation is not employed and the behaviour is as a simple sensing circuit.</li>
* </ol>
* <ol>
* <li>If all parameters (<i>Rc</i>, <i>Xc</i> and <i>Tr</i>) are set to zero, the standard model VCompIEEEType1 is bypassed.</li>
* </ol>
* Reference: IEEE 421.5-2005 4.
*
* @param VoltageCompensatorDynamics [[ch.ninecode.model.VoltageCompensatorDynamics VoltageCompensatorDynamics]] Reference to the superclass object.
* @param rc <font color="#0f0f0f">Resistive component of compensation of a generator (<i>Rc</i>) (>= 0).</font>
* @param tr <font color="#0f0f0f">Time constant which is used for the combined voltage sensing and compensation signal (<i>Tr</i>) (>= 0).</font>
* @param xc <font color="#0f0f0f">Reactive component of compensation of a generator (<i>Xc</i>) (>= 0).</font>
* @group VoltageCompensatorDynamics
* @groupname VoltageCompensatorDynamics Package VoltageCompensatorDynamics
* @groupdesc VoltageCompensatorDynamics <font color="#0f0f0f">Synchronous machine terminal voltage transducer and current compensator models</font> adjust the terminal voltage feedback to the excitation system by adding a quantity that is proportional to the terminal current of the generator. It is linked to a specific generator (synchronous machine).
* <font color="#0f0f0f">Several types of compensation are available on most excitation systems. Synchronous machine active and reactive current compensation are the most common. Either reactive droop compensation and/or line-drop compensation can be used, simulating an impedance drop and effectively regulating at some point other than the terminals of the machine. The impedance or range of adjustment and type of compensation should be specified for different types. </font>
* <font color="#0f0f0f">Care shall be taken to ensure that a consistent PU system is utilized for the compensator parameters and the synchronous machine current base.</font>
* <font color="#0f0f0f">For further information see IEEE 421.5-2005, 4.</font>
*
*
* <font color="#0f0f0f">
* </font>
*/
final case class VCompIEEEType1
(
VoltageCompensatorDynamics: VoltageCompensatorDynamics = null,
rc: Double = 0.0,
tr: Double = 0.0,
xc: Double = 0.0
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: VoltageCompensatorDynamics = VoltageCompensatorDynamics
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = VCompIEEEType1.cls
def emitelem (position: Int, value: Any): Unit = if (mask(position)) emit_element(VCompIEEEType1.fields(position), value)
emitelem(0, rc)
emitelem(1, tr)
emitelem(2, xc)
s.toString
}
override def export: String =
{
"\\t<cim:VCompIEEEType1 rdf:%s=\\"%s\\">\\n%s\\t</cim:VCompIEEEType1>".format(if (about) "about" else "ID", id, export_fields)
}
}
object VCompIEEEType1
extends
CIMParseable[VCompIEEEType1]
{
override val fields: Array[String] = Array[String](
"rc",
"tr",
"xc"
)
val rc: Fielder = parse_element(element(cls, fields(0)))
val tr: Fielder = parse_element(element(cls, fields(1)))
val xc: Fielder = parse_element(element(cls, fields(2)))
def parse (context: CIMContext): VCompIEEEType1 =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = VCompIEEEType1(
VoltageCompensatorDynamics.parse(context),
toDouble(mask(rc(), 0)),
toDouble(mask(tr(), 1)),
toDouble(mask(xc(), 2))
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[VCompIEEEType1] = VCompIEEEType1Serializer
}
object VCompIEEEType1Serializer extends CIMSerializer[VCompIEEEType1]
{
def write (kryo: Kryo, output: Output, obj: VCompIEEEType1): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeDouble(obj.rc),
() => output.writeDouble(obj.tr),
() => output.writeDouble(obj.xc)
)
VoltageCompensatorDynamicsSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[VCompIEEEType1]): VCompIEEEType1 =
{
val parent = VoltageCompensatorDynamicsSerializer.read(kryo, input, classOf[VoltageCompensatorDynamics])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = VCompIEEEType1(
parent,
if (isSet(0)) input.readDouble else 0.0,
if (isSet(1)) input.readDouble else 0.0,
if (isSet(2)) input.readDouble else 0.0
)
obj.bitfields = bitfields
obj
}
}
/**
* <font color="#0f0f0f">Terminal voltage transducer and load compensator as defined in IEEE 421.5-2005, 4.
*
* This model is designed to cover the following types of compensation: </font>
* <ul>
* <li><font color="#0f0f0f">reactive droop;</font></li>
* <li><font color="#0f0f0f">transformer-drop or line-drop compensation;</font></li>
* <li><font color="#0f0f0f">reactive differential compensation known also as cross-current compensation.</font></li>
* </ul>
* <font color="#0f0f0f">Reference: IEEE 421.5-2005, 4.</font>
*
* @param VoltageCompensatorDynamics [[ch.ninecode.model.VoltageCompensatorDynamics VoltageCompensatorDynamics]] Reference to the superclass object.
* @param tr <font color="#0f0f0f">Time constant which is used for the combined voltage sensing and compensation signal (<i>Tr</i>) (>= 0).</font>
* @param GenICompensationForGenJ [[ch.ninecode.model.GenICompensationForGenJ GenICompensationForGenJ]] Compensation of this voltage compensator's generator for current flow out of another generator.
* @group VoltageCompensatorDynamics
* @groupname VoltageCompensatorDynamics Package VoltageCompensatorDynamics
* @groupdesc VoltageCompensatorDynamics <font color="#0f0f0f">Synchronous machine terminal voltage transducer and current compensator models</font> adjust the terminal voltage feedback to the excitation system by adding a quantity that is proportional to the terminal current of the generator. It is linked to a specific generator (synchronous machine).
* <font color="#0f0f0f">Several types of compensation are available on most excitation systems. Synchronous machine active and reactive current compensation are the most common. Either reactive droop compensation and/or line-drop compensation can be used, simulating an impedance drop and effectively regulating at some point other than the terminals of the machine. The impedance or range of adjustment and type of compensation should be specified for different types. </font>
* <font color="#0f0f0f">Care shall be taken to ensure that a consistent PU system is utilized for the compensator parameters and the synchronous machine current base.</font>
* <font color="#0f0f0f">For further information see IEEE 421.5-2005, 4.</font>
*
*
* <font color="#0f0f0f">
* </font>
*/
final case class VCompIEEEType2
(
VoltageCompensatorDynamics: VoltageCompensatorDynamics = null,
tr: Double = 0.0,
GenICompensationForGenJ: List[String] = null
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: VoltageCompensatorDynamics = VoltageCompensatorDynamics
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = VCompIEEEType2.cls
def emitelem (position: Int, value: Any): Unit = if (mask(position)) emit_element(VCompIEEEType2.fields(position), value)
def emitattrs (position: Int, value: List[String]): Unit = if (mask(position) && (null != value)) value.foreach(x => emit_attribute(VCompIEEEType2.fields(position), x))
emitelem(0, tr)
emitattrs(1, GenICompensationForGenJ)
s.toString
}
override def export: String =
{
"\\t<cim:VCompIEEEType2 rdf:%s=\\"%s\\">\\n%s\\t</cim:VCompIEEEType2>".format(if (about) "about" else "ID", id, export_fields)
}
}
object VCompIEEEType2
extends
CIMParseable[VCompIEEEType2]
{
override val fields: Array[String] = Array[String](
"tr",
"GenICompensationForGenJ"
)
override val relations: List[CIMRelationship] = List(
CIMRelationship("GenICompensationForGenJ", "GenICompensationForGenJ", "2..*", "1")
)
val tr: Fielder = parse_element(element(cls, fields(0)))
val GenICompensationForGenJ: FielderMultiple = parse_attributes(attribute(cls, fields(1)))
def parse (context: CIMContext): VCompIEEEType2 =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = VCompIEEEType2(
VoltageCompensatorDynamics.parse(context),
toDouble(mask(tr(), 0)),
masks(GenICompensationForGenJ(), 1)
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[VCompIEEEType2] = VCompIEEEType2Serializer
}
object VCompIEEEType2Serializer extends CIMSerializer[VCompIEEEType2]
{
def write (kryo: Kryo, output: Output, obj: VCompIEEEType2): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeDouble(obj.tr),
() => writeList(obj.GenICompensationForGenJ, output)
)
VoltageCompensatorDynamicsSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[VCompIEEEType2]): VCompIEEEType2 =
{
val parent = VoltageCompensatorDynamicsSerializer.read(kryo, input, classOf[VoltageCompensatorDynamics])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = VCompIEEEType2(
parent,
if (isSet(0)) input.readDouble else 0.0,
if (isSet(1)) readList(input) else null
)
obj.bitfields = bitfields
obj
}
}
/**
* Voltage compensator function block whose behaviour is described by reference to a standard model <font color="#0f0f0f">or by definition of a user-defined model.</font>
*
* @param DynamicsFunctionBlock [[ch.ninecode.model.DynamicsFunctionBlock DynamicsFunctionBlock]] Reference to the superclass object.
* @param ExcitationSystemDynamics [[ch.ninecode.model.ExcitationSystemDynamics ExcitationSystemDynamics]] Excitation system model with which this voltage compensator is associated.
* @param RemoteInputSignal [[ch.ninecode.model.RemoteInputSignal RemoteInputSignal]] Remote input signal used by this voltage compensator model.
* @group VoltageCompensatorDynamics
* @groupname VoltageCompensatorDynamics Package VoltageCompensatorDynamics
* @groupdesc VoltageCompensatorDynamics <font color="#0f0f0f">Synchronous machine terminal voltage transducer and current compensator models</font> adjust the terminal voltage feedback to the excitation system by adding a quantity that is proportional to the terminal current of the generator. It is linked to a specific generator (synchronous machine).
* <font color="#0f0f0f">Several types of compensation are available on most excitation systems. Synchronous machine active and reactive current compensation are the most common. Either reactive droop compensation and/or line-drop compensation can be used, simulating an impedance drop and effectively regulating at some point other than the terminals of the machine. The impedance or range of adjustment and type of compensation should be specified for different types. </font>
* <font color="#0f0f0f">Care shall be taken to ensure that a consistent PU system is utilized for the compensator parameters and the synchronous machine current base.</font>
* <font color="#0f0f0f">For further information see IEEE 421.5-2005, 4.</font>
*
*
* <font color="#0f0f0f">
* </font>
*/
final case class VoltageCompensatorDynamics
(
DynamicsFunctionBlock: DynamicsFunctionBlock = null,
ExcitationSystemDynamics: String = null,
RemoteInputSignal: String = null
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: DynamicsFunctionBlock = DynamicsFunctionBlock
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = VoltageCompensatorDynamics.cls
def emitattr (position: Int, value: Any): Unit = if (mask(position)) emit_attribute(VoltageCompensatorDynamics.fields(position), value)
emitattr(0, ExcitationSystemDynamics)
emitattr(1, RemoteInputSignal)
s.toString
}
override def export: String =
{
"\\t<cim:VoltageCompensatorDynamics rdf:%s=\\"%s\\">\\n%s\\t</cim:VoltageCompensatorDynamics>".format(if (about) "about" else "ID", id, export_fields)
}
}
object VoltageCompensatorDynamics
extends
CIMParseable[VoltageCompensatorDynamics]
{
override val fields: Array[String] = Array[String](
"ExcitationSystemDynamics",
"RemoteInputSignal"
)
override val relations: List[CIMRelationship] = List(
CIMRelationship("ExcitationSystemDynamics", "ExcitationSystemDynamics", "1", "1"),
CIMRelationship("RemoteInputSignal", "RemoteInputSignal", "0..1", "0..1")
)
val ExcitationSystemDynamics: Fielder = parse_attribute(attribute(cls, fields(0)))
val RemoteInputSignal: Fielder = parse_attribute(attribute(cls, fields(1)))
def parse (context: CIMContext): VoltageCompensatorDynamics =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = VoltageCompensatorDynamics(
DynamicsFunctionBlock.parse(context),
mask(ExcitationSystemDynamics(), 0),
mask(RemoteInputSignal(), 1)
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[VoltageCompensatorDynamics] = VoltageCompensatorDynamicsSerializer
}
object VoltageCompensatorDynamicsSerializer extends CIMSerializer[VoltageCompensatorDynamics]
{
def write (kryo: Kryo, output: Output, obj: VoltageCompensatorDynamics): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeString(obj.ExcitationSystemDynamics),
() => output.writeString(obj.RemoteInputSignal)
)
DynamicsFunctionBlockSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[VoltageCompensatorDynamics]): VoltageCompensatorDynamics =
{
val parent = DynamicsFunctionBlockSerializer.read(kryo, input, classOf[DynamicsFunctionBlock])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = VoltageCompensatorDynamics(
parent,
if (isSet(0)) input.readString else null,
if (isSet(1)) input.readString else null
)
obj.bitfields = bitfields
obj
}
}
private[ninecode] object _VoltageCompensatorDynamics
{
def register: List[CIMClassInfo] =
{
List(
GenICompensationForGenJ.register,
VCompIEEEType1.register,
VCompIEEEType2.register,
VoltageCompensatorDynamics.register
)
}
} | derrickoswald/CIMScala | CIMReader/src/main/scala/ch/ninecode/model/VoltageCompensatorDynamics.scala | Scala | mit | 25,641 |
package org.bitcoins.core.script
import org.bitcoins.core.crypto._
import org.bitcoins.core.script.constant._
import org.bitcoins.core.script.control.{OP_ELSE, OP_ENDIF, OP_IF, OP_NOTIF}
import org.bitcoins.core.script.flag.ScriptFlag
import org.bitcoins.core.script.result._
import org.bitcoins.core.util.BitcoinScriptUtil
/** Created by chris on 2/3/16.
*/
sealed trait ScriptProgram {
/** This contains all relevant information for hashing and checking a
* [[org.bitcoins.core.protocol.script.ScriptSignature ScriptSignature]] for
* a [[org.bitcoins.core.protocol.transaction.Transaction Transaction]].
*/
def txSignatureComponent: TxSigComponent
/** The current state of the stack for execution of the
* [[org.bitcoins.core.script.ScriptProgram ScriptProgram]].
*/
def stack: List[ScriptToken]
/** The script operations that need to still be executed. */
def script: List[ScriptToken]
/** The original script that was given. */
def originalScript: List[ScriptToken]
/** The alternative stack is used in some Script op codes. */
def altStack: List[ScriptToken]
/** [[org.bitcoins.core.script.flag.ScriptFlag ScriptFlag]] that are run with the script.
* These flags indicate special conditions that a script needs to be run with.
* [[https://github.com/bitcoin/bitcoin/blob/master/src/script/interpreter.h#L31]]
* @return
*/
def flags: Seq[ScriptFlag]
/** Returns true if the stack top is true */
def stackTopIsTrue: Boolean =
stack.nonEmpty && BitcoinScriptUtil.castToBool(stack.head)
/** Returns true if the stack top is false */
def stackTopIsFalse: Boolean = !stackTopIsTrue
/** Sets a [[org.bitcoins.core.script.result.ScriptError ScriptError]] on a given
* [[org.bitcoins.core.script.ScriptProgram ScriptProgram]].
* @param error the error that the program hit while being executed in the script interpreter
* @return the ExecutedScriptProgram with the given error set inside of the trait
*/
def failExecution(error: ScriptError): ExecutedScriptProgram
}
/** This represents a [[org.bitcoins.core.script.ScriptProgram ScriptProgram]]
* before any script operations have been executed in the
* [[org.bitcoins.core.script.interpreter.ScriptInterpreter ScriptInterpreter]].
*/
case class PreExecutionScriptProgram(
txSignatureComponent: TxSigComponent,
stack: List[ScriptToken],
script: List[ScriptToken],
originalScript: List[ScriptToken],
altStack: List[ScriptToken],
flags: Seq[ScriptFlag])
extends ScriptProgram {
def toExecutionInProgress: ExecutionInProgressScriptProgram = {
ExecutionInProgressScriptProgram(
txSignatureComponent = txSignatureComponent,
stack = stack,
script = script,
originalScript = originalScript,
altStack = altStack,
flags = flags,
lastCodeSeparator = None,
conditionalCounter = ConditionalCounter.empty
)
}
override def failExecution(error: ScriptError): ExecutedScriptProgram = {
this.toExecutionInProgress.failExecution(error)
}
def updateStack(tokens: Seq[ScriptToken]): PreExecutionScriptProgram = {
this.copy(stack = tokens.toList)
}
def updateAltStack(tokens: Seq[ScriptToken]): PreExecutionScriptProgram = {
this.copy(altStack = tokens.toList)
}
def updateScript(tokens: Seq[ScriptToken]): PreExecutionScriptProgram = {
this.copy(script = tokens.toList)
}
def updateOriginalScript(
tokens: Seq[ScriptToken]): PreExecutionScriptProgram = {
this.copy(originalScript = tokens.toList)
}
def updateStackAndScript(
stackTokens: Seq[ScriptToken],
scriptTokens: Seq[ScriptToken]): PreExecutionScriptProgram = {
val updatedStack = this.updateStack(stackTokens)
val updatedScript = updatedStack.updateScript(scriptTokens)
require(updatedStack.stack == stackTokens)
require(updatedScript.script == scriptTokens)
updatedScript
}
}
object PreExecutionScriptProgram {
def apply(txSigComponent: TxSigComponent): PreExecutionScriptProgram = {
PreExecutionScriptProgram(
txSignatureComponent = txSigComponent,
stack = Nil,
script = txSigComponent.scriptSignature.asm.toList,
originalScript = txSigComponent.scriptSignature.asm.toList,
altStack = Nil,
flags = txSigComponent.flags
)
}
}
/** This represents any ScriptProgram that is not PreExecution */
sealed trait StartedScriptProgram extends ScriptProgram
/** Implements the counting required for O(1) handling of conditionals in Bitcoin Script.
* @see [[https://github.com/bitcoin/bitcoin/pull/16902]]
*
* @param trueCount The depth of OP_IFs/OP_NOTIFs we've entered on the true condition before the first false.
* @param falseAndIgnoreCount The depth of OP_IFs/OP_NOTIFs we've entered after and including the first false condition.
* Every OP_IF/OP_NOTIF adds to trueCount or falseAndIgnoreCount.
* OP_ELSE has an effect only when falseAndIgnoreCount == 0 or 1, in which case it moves
* 1 from trueCount to falseAndIgnoreCount or vice versa.
* OP_ENDIF subtracts one from either falseAndIgnoreCount or trueCount if falseAndIgnoreCount == 0.
* trueCount + falseAndIgnoreCount represents the current depth in the conditional tree.
* falseAndIgnoreCount == 0 represents whether operations should be executed.
*/
case class ConditionalCounter(trueCount: Int, falseAndIgnoreCount: Int) {
require(trueCount >= 0, "Should have failed as unbalanced")
require(falseAndIgnoreCount >= 0, "Should have failed as unbalanced")
def noFalseEncountered: Boolean = {
falseAndIgnoreCount == 0
}
def noTrueEncountered: Boolean = {
trueCount == 0
}
def noConditionEncountered: Boolean = {
noTrueEncountered && noFalseEncountered
}
def totalDepth: Int = {
trueCount + falseAndIgnoreCount
}
/** Should be called for every OP_IF and OP_NOTIF with whether the first (true)
* or second (false) branch should be taken.
*/
def addCondition(condition: Boolean): ConditionalCounter = {
if (!noFalseEncountered || !condition) {
this.copy(falseAndIgnoreCount = falseAndIgnoreCount + 1)
} else {
this.copy(trueCount = trueCount + 1)
}
}
/** Should be called on for every OP_ELSE.
*
* It is assumed that !noConditionEncountered
*/
def invertCondition(): ConditionalCounter = {
if (falseAndIgnoreCount > 1) {
// Do nothing, we aren't in an execution now branch anyway
this
} else if (falseAndIgnoreCount == 1) {
this.copy(trueCount = trueCount + 1, falseAndIgnoreCount = 0)
} else { // Case falseAndIgnoreCount = 0, trueCount > 0
this.copy(trueCount = trueCount - 1, falseAndIgnoreCount = 1)
}
}
/** Should be called on for every OP_ENDIF.
*
* It is assumed that !noConditionEncountered
*/
def removeCondition(): ConditionalCounter = {
if (falseAndIgnoreCount > 0) {
this.copy(falseAndIgnoreCount = falseAndIgnoreCount - 1)
} else {
this.copy(trueCount = trueCount - 1)
}
}
}
object ConditionalCounter {
val empty: ConditionalCounter =
ConditionalCounter(trueCount = 0, falseAndIgnoreCount = 0)
}
/** Type for a [[org.bitcoins.core.script.ScriptProgram ScriptProgram]] that is currently being
* evaluated by the [[org.bitcoins.core.script.interpreter.ScriptInterpreter ScriptInterpreter]].
*
* @param lastCodeSeparator The index of the last [[org.bitcoins.core.script.crypto.OP_CODESEPARATOR OP_CODESEPARATOR]]
* @param conditionalCounter Keeps track of where we are within a conditional tree.
*/
case class ExecutionInProgressScriptProgram(
txSignatureComponent: TxSigComponent,
stack: List[ScriptToken],
script: List[ScriptToken],
originalScript: List[ScriptToken],
altStack: List[ScriptToken],
flags: Seq[ScriptFlag],
lastCodeSeparator: Option[Int],
conditionalCounter: ConditionalCounter)
extends StartedScriptProgram {
def toExecutedProgram: ExecutedScriptProgram = {
val errorOpt = if (conditionalCounter.totalDepth > 0) {
Some(ScriptErrorUnbalancedConditional)
} else {
None
}
ExecutedScriptProgram(
txSignatureComponent,
stack,
script,
originalScript,
altStack,
flags,
errorOpt
)
}
override def failExecution(error: ScriptError): ExecutedScriptProgram = {
this.toExecutedProgram.failExecution(error)
}
def replaceFlags(
newFlags: Seq[ScriptFlag]): ExecutionInProgressScriptProgram = {
this.copy(flags = newFlags)
}
/** Non-conditional opcodes should be executed only if this is true */
def isInExecutionBranch: Boolean = {
conditionalCounter.noFalseEncountered
}
/** ScriptInterpreter should look at the script head only if this is true.
*
* Note that OP_IF, OP_NOTIF, OP_ELSE, and OP_ENDIF must be executed even if
* isInExecutionBranch is false as they must modify the states of trueCount and falseAndIgnoreCount.
*/
def shouldExecuteNextOperation: Boolean = {
script.headOption match {
case None => false
case Some(OP_IF | OP_NOTIF | OP_ELSE | OP_ENDIF) => true
case Some(_) => isInExecutionBranch
}
}
/** Should be called for every OP_IF and OP_NOTIF with whether the first (true)
* or second (false) branch should be taken.
*/
def addCondition(condition: Boolean): ExecutionInProgressScriptProgram = {
this.copy(conditionalCounter = conditionalCounter.addCondition(condition))
}
/** Should be called on for every OP_ELSE */
def invertCondition(): StartedScriptProgram = {
if (conditionalCounter.noConditionEncountered) {
this.failExecution(ScriptErrorUnbalancedConditional)
} else {
this.copy(conditionalCounter = conditionalCounter.invertCondition())
}
}
/** Should be called on for every OP_ENDIF */
def removeCondition(): StartedScriptProgram = {
if (conditionalCounter.noConditionEncountered) {
this.failExecution(ScriptErrorUnbalancedConditional)
} else {
this.copy(conditionalCounter = conditionalCounter.removeCondition())
}
}
/** Removes the flags on the given [[org.bitcoins.core.script.ScriptProgram ScriptProgram]]
*
* @return
*/
def removeFlags(): ExecutionInProgressScriptProgram = {
this.replaceFlags(Seq.empty)
}
def updateStack(
tokens: Seq[ScriptToken]): ExecutionInProgressScriptProgram = {
this.copy(stack = tokens.toList)
}
def updateAltStack(
tokens: Seq[ScriptToken]): ExecutionInProgressScriptProgram = {
this.copy(altStack = tokens.toList)
}
def updateScript(
tokens: Seq[ScriptToken]): ExecutionInProgressScriptProgram = {
this.copy(script = tokens.toList)
}
def updateStackAndScript(
stack: Seq[ScriptToken],
script: Seq[ScriptToken]): ExecutionInProgressScriptProgram = {
this
.updateStack(stack)
.updateScript(script)
}
def updateOriginalScript(
tokens: Seq[ScriptToken]): ExecutionInProgressScriptProgram = {
this.copy(originalScript = tokens.toList)
}
def updateLastCodeSeparator(
newLastCodeSeparator: Int): ExecutionInProgressScriptProgram = {
this.copy(lastCodeSeparator = Some(newLastCodeSeparator))
}
}
/** Type for a [[org.bitcoins.core.script.ScriptProgram ScriptProgram]] that has been
* evaluated completely by the
* [[org.bitcoins.core.script.interpreter.ScriptInterpreter ScriptInterpreter]].
*
* @param error Indicates if the [[org.bitcoins.core.script.ScriptProgram ScriptProgram]] has
* encountered a [[org.bitcoins.core.script.result.ScriptError ScriptError]] in its execution.
*/
case class ExecutedScriptProgram(
txSignatureComponent: TxSigComponent,
stack: List[ScriptToken],
script: List[ScriptToken],
originalScript: List[ScriptToken],
altStack: List[ScriptToken],
flags: Seq[ScriptFlag],
error: Option[ScriptError])
extends StartedScriptProgram {
override def failExecution(error: ScriptError): ExecutedScriptProgram = {
this.copy(error = Some(error))
}
}
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/script/ScriptProgram.scala | Scala | mit | 12,339 |
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: blah@cliffano.com
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
*/
package org.openapitools.app
import org.scalatra.swagger.{ ApiInfo, SwaggerWithAuth, Swagger }
import org.scalatra.swagger.{ JacksonSwaggerBase, Swagger }
import org.scalatra.ScalatraServlet
import org.json4s.{ DefaultFormats, Formats }
class ResourcesApp(implicit protected val swagger: OpenAPIApp)
extends ScalatraServlet with JacksonSwaggerBase {
before() {
response.headers += ("Access-Control-Allow-Origin" -> "*")
}
}
class OpenAPIApp extends Swagger(apiInfo = OpenAPIInfo.apiInfo, apiVersion = "1.0", swaggerVersion = Swagger.SpecVersion)
object OpenAPIInfo {
val apiInfo = ApiInfo(
"""Swaggy Jenkins""",
"""Jenkins API clients generated from Swagger / Open API specification""",
"""http://github.com/cliffano/swaggy-jenkins""",
"""blah@cliffano.com""",
"""All rights reserved""",
"""http://apache.org/licenses/LICENSE-2.0.html""")
}
| cliffano/swaggy-jenkins | clients/scalatra/generated/src/main/scala/ServletApp.scala | Scala | mit | 1,195 |
package twotails
import org.scalatest.{ FlatSpec, Matchers }
class Bippy{
@mutualrec final def one(x: Int, y: Int = 1): Int = if(0 < x) two(y, x) else 0
@mutualrec final def two(x: Int, y: Int = 1): Int = if(0 < x) one(x-1, y-1) else 0
}
class Baz{
@mutualrec final def one(x: Int)(y: Int): Int = if(0 < x) two(y)(x) else 0
@mutualrec final def two(x: Int)(y: Int): Int = if(0 < x) one(x-1)(y-1) else 0
}
class Bazooka{
@mutualrec final def one(x: Int)(y: Int)(z: Int): Int = if(0 < x) two(y)(x)(z) else z
@mutualrec final def two(x: Int)(y: Int)(z: Int): Int = if(0 < x) one(x-1)(y-1)(z+1) else z
}
class ArgumentListTest extends FlatSpec with Matchers{
val fourK = 400000
"Two mutually recursive, double-argument list, annotated methods" should "not throw a StackOverflow" in{
val c = new Baz
c.one(fourK)(fourK) should equal (0)
}
"Two mutually recursive, multi-argument list, annotated methods" should "not throw a StackOverflow" in{
val baz = new Bazooka
baz.one(fourK)(fourK)(0) should equal (fourK)
}
} | wheaties/TwoTails | core/src/test/scala/twotails/ArgumentListTest.scala | Scala | apache-2.0 | 1,054 |
package com.github.agourlay.cornichon.steps
import com.github.agourlay.cornichon.core.Engine
import com.github.agourlay.cornichon.dsl.ProvidedInstances
import com.github.agourlay.cornichon.resolver.PlaceholderResolver
import monix.execution.Scheduler
trait StepUtilSpec extends ProvidedInstances {
implicit val scheduler = Scheduler.Implicits.global
val resolver = PlaceholderResolver.withoutExtractor()
val engine = Engine.withStepTitleResolver(resolver)
}
| OlegIlyenko/cornichon | cornichon-core/src/test/scala/com/github/agourlay/cornichon/steps/StepUtilSpec.scala | Scala | apache-2.0 | 469 |
package controller
import skinny._
import skinny.validator._
import model.{ Gender, Employee }
class EmployeesController extends SkinnyResource with ApplicationController {
protectFromForgery()
override def model = Employee
override def resourcesName = "employees"
override def resourceName = "employee"
override def resourcesBasePath = s"/${toSnakeCase(resourcesName)}"
override def useSnakeCasedParamKeys = true
override def viewsDirectoryPath = s"/${resourcesName}"
override def createParams = Params(params).withDate("started_employment").withDate("left_employment")
override def createForm = validation(createParams,
paramKey("first_name") is required & maxLength(512),
paramKey("middle_name") is required & maxLength(512),
paramKey("last_name") is required & maxLength(512),
paramKey("gender") is required & numeric & intValue,
paramKey("started_employment") is dateFormat,
paramKey("left_employment") is dateFormat,
paramKey("other_detail") is maxLength(512)
)
override def createFormStrongParameters = Seq(
"first_name" -> ParamType.String,
"middle_name" -> ParamType.String,
"last_name" -> ParamType.String,
"gender" -> ParamType.Int,
"started_employment" -> ParamType.LocalDate,
"left_employment" -> ParamType.LocalDate,
"other_detail" -> ParamType.String
)
override def updateParams = {
val p: Params = Params(params).withDate("started_employment").withDate("left_employment")
val p1 = p.selectDynamic("started_employment") match {
case Some(_) => p
case None => Params(p.underlying + ("started_employment" -> null))
}
p1.selectDynamic("left_employment") match {
case Some(_) => p1
case None => Params(p1.underlying + ("left_employment" -> null))
}
}
override def updateForm = validation(updateParams,
paramKey("first_name") is required & maxLength(512),
paramKey("middle_name") is required & maxLength(512),
paramKey("last_name") is required & maxLength(512),
paramKey("gender") is required & numeric & intValue,
paramKey("started_employment") is dateFormat,
paramKey("left_employment") is dateFormat,
paramKey("other_detail") is maxLength(512)
)
override def updateFormStrongParameters = Seq(
"first_name" -> ParamType.String,
"middle_name" -> ParamType.String,
"last_name" -> ParamType.String,
"gender" -> ParamType.Int,
"started_employment" -> ParamType.LocalDate,
"left_employment" -> ParamType.LocalDate,
"other_detail" -> ParamType.String
)
beforeAction(only = Seq('index, 'indexWithSlash, 'new, 'create, 'createWithSlash, 'edit, 'update)) {
set("genderElements", Gender.all())
}
}
| grimrose/skinny-employee-schedule-sample | src/main/scala/controller/EmployeesController.scala | Scala | mit | 2,715 |
package com.sksamuel.scrimage
import org.scalatest.{WordSpec, Matchers}
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
class ScalingDeadlockTest extends WordSpec with Matchers {
import scala.concurrent.ExecutionContext.Implicits.global
val in = Image(getClass.getResourceAsStream("/com/sksamuel/scrimage/bird.jpg"))
"image scale" should {
"not deadlock on multiple concurrent scales" in {
val futures = for ( k <- 0 until 50 ) yield {
Future {
in.scaleTo(200, 200)
}
}
val images = Await.result(Future.sequence(futures), 1.minute)
images.size shouldBe 50
}
}
}
| davenatx/scrimage | scrimage-core/src/test/scala/com/sksamuel/scrimage/ScalingDeadlockTest.scala | Scala | apache-2.0 | 661 |
package cn.hjmao.rosalind.hamm
object Main extends App {
val lines = scala.io.Source.fromFile("dataset.txt").getLines().toList
val string0 = lines(0)
val string1 = lines(1)
val indices = 0 until string0.length
println(indices.filter(index => string0(index) != string1(index)).length)
}
| RepoCastle/rosalind | src/cn/hjmao/rosalind/hamm/answer.scala | Scala | apache-2.0 | 299 |
package lila.mod
import akka.actor.ActorSelection
import lila.analyse.{ Analysis, AnalysisRepo }
import lila.db.BSON.BSONJodaDateTimeHandler
import lila.db.Types.Coll
import lila.evaluation.Statistics
import lila.evaluation.{ AccountAction, Analysed, GameAssessment, PlayerAssessment, PlayerAggregateAssessment, PlayerFlags, PlayerAssessments, Assessible }
import lila.game.{ Game, Player, GameRepo, Source, Pov }
import lila.user.{ User, UserRepo }
import org.joda.time.DateTime
import reactivemongo.bson._
import scala.concurrent._
import scala.util.Random
import chess.Color
final class AssessApi(
collAssessments: Coll,
logApi: ModlogApi,
modApi: ModApi,
reporter: ActorSelection,
analyser: ActorSelection,
userIdsSharingIp: String => Fu[List[String]]) {
import PlayerFlags.playerFlagsBSONHandler
private implicit val playerAssessmentBSONhandler = Macros.handler[PlayerAssessment]
def createPlayerAssessment(assessed: PlayerAssessment) =
collAssessments.update(BSONDocument("_id" -> assessed._id), assessed, upsert = true).void
def getPlayerAssessmentById(id: String) =
collAssessments.find(BSONDocument("_id" -> id))
.one[PlayerAssessment]
def getPlayerAssessmentsByUserId(userId: String, nb: Int = 100) =
collAssessments.find(BSONDocument("userId" -> userId))
.sort(BSONDocument("date" -> -1))
.cursor[PlayerAssessment]()
.collect[List](nb)
def getResultsByGameIdAndColor(gameId: String, color: Color) =
getPlayerAssessmentById(gameId + "/" + color.name)
def getGameResultsById(gameId: String) =
getResultsByGameIdAndColor(gameId, Color.White) zip
getResultsByGameIdAndColor(gameId, Color.Black) map {
a => PlayerAssessments(a._1, a._2)
}
def getPlayerAggregateAssessment(userId: String, nb: Int = 100): Fu[Option[PlayerAggregateAssessment]] = {
val relatedUsers = userIdsSharingIp(userId)
UserRepo.byId(userId) zip
getPlayerAssessmentsByUserId(userId, nb) zip
relatedUsers zip
(relatedUsers flatMap UserRepo.filterByEngine) map {
case (((Some(user), assessedGamesHead :: assessedGamesTail), relatedUs), relatedCheaters) =>
Some(PlayerAggregateAssessment(
user,
assessedGamesHead :: assessedGamesTail,
relatedUs,
relatedCheaters))
case _ => none
}
}
def withGames(pag: PlayerAggregateAssessment): Fu[PlayerAggregateAssessment.WithGames] =
GameRepo games pag.playerAssessments.map(_.gameId) map {
PlayerAggregateAssessment.WithGames(pag, _)
}
def getPlayerAggregateAssessmentWithGames(userId: String, nb: Int = 100): Fu[Option[PlayerAggregateAssessment.WithGames]] =
getPlayerAggregateAssessment(userId, nb) flatMap {
case None => fuccess(none)
case Some(pag) => withGames(pag).map(_.some)
}
def refreshAssessByUsername(username: String): Funit = withUser(username) { user =>
(GameRepo.gamesForAssessment(user.id, 100) flatMap { gs =>
(gs map { g =>
AnalysisRepo.doneById(g.id) flatMap {
case Some(a) => onAnalysisReady(g, a, false)
case _ => funit
}
}).sequenceFu.void
}) >> assessUser(user.id)
}
def onAnalysisReady(game: Game, analysis: Analysis, thenAssessUser: Boolean = true): Funit = {
def consistentMoveTimes(game: Game)(player: Player) = Statistics.consistentMoveTimes(Pov(game, player))
val shouldAssess =
if (!game.source.exists(assessableSources.contains)) false
else if (game.players.exists(_.hasSuspiciousHoldAlert)) true
else if (game.isCorrespondence) false
else if (game.players exists consistentMoveTimes(game)) true
else if (game.playedTurns < 40) false
else if (game.mode.casual) false
else true
shouldAssess.?? {
val assessible = Assessible(Analysed(game, analysis))
createPlayerAssessment(assessible playerAssessment chess.White) >>
createPlayerAssessment(assessible playerAssessment chess.Black)
} >> ((shouldAssess && thenAssessUser) ?? {
game.whitePlayer.userId.??(assessUser) >> game.blackPlayer.userId.??(assessUser)
})
}
def assessUser(userId: String): Funit =
getPlayerAggregateAssessment(userId) flatMap {
case Some(playerAggregateAssessment) => playerAggregateAssessment.action match {
case AccountAction.Engine | AccountAction.EngineAndBan =>
modApi.autoAdjust(userId)
case AccountAction.Report =>
reporter ! lila.hub.actorApi.report.Cheater(userId, playerAggregateAssessment.reportText(3))
funit
case AccountAction.Nothing =>
reporter ! lila.hub.actorApi.report.Clean(userId)
funit
}
case none => funit
}
private val assessableSources: Set[Source] = Set(Source.Lobby, Source.Tournament)
def onGameReady(game: Game, white: User, black: User): Funit = {
def manyBlurs(player: Player) =
(player.blurs.toDouble / game.playerMoves(player.color)) >= 0.7
def winnerGreatProgress(player: Player): Boolean = {
game.winner ?? (player ==)
} && game.perfType ?? { perfType =>
player.color.fold(white, black).perfs(perfType).progress >= 140
}
def noFastCoefVariation(player: Player): Option[Double] =
Statistics.noFastMoves(Pov(game, player)) ?? Statistics.moveTimeCoefVariation(Pov(game, player))
def winnerUserOption = game.winnerColor.map(_.fold(white, black))
def winnerNbGames = for {
user <- winnerUserOption
perfType <- game.perfType
} yield user.perfs(perfType).nb
def suspCoefVariation(c: Color) = {
val x = noFastCoefVariation(game player c)
x.filter(_ < 0.45) orElse x.filter(_ < 0.5).ifTrue(Random.nextBoolean)
}
val whiteSuspCoefVariation = suspCoefVariation(chess.White)
val blackSuspCoefVariation = suspCoefVariation(chess.Black)
val shouldAnalyse: Option[String] =
if (!game.analysable) none
else if (!game.source.exists(assessableSources.contains)) none
// give up on correspondence games
else if (game.isCorrespondence) none
// stop here for short games
else if (game.playedTurns < 36) none
// stop here for long games
else if (game.playedTurns > 90) none
// stop here for casual games
else if (!game.mode.rated) none
// someone is using a bot
else if (game.players.exists(_.hasSuspiciousHoldAlert)) "Hold alert".some
// white has consistent move times
else if (whiteSuspCoefVariation.isDefined) whiteSuspCoefVariation.map(x => s"White Move times ~ $x")
// black has consistent move times
else if (blackSuspCoefVariation.isDefined) blackSuspCoefVariation.map(x => s"Black Move times ~ $x")
// don't analyse other bullet games
else if (game.speed == chess.Speed.Bullet) none
// someone blurs a lot
else if (game.players exists manyBlurs) "Blurs".some
// the winner shows a great rating progress
else if (game.players exists winnerGreatProgress) "Winner progress".some
// analyse some tourney games
else if (game.isTournament) Random.nextInt(5) == 0 option "Tourney random"
/// analyse new player games
else if (winnerNbGames.??(30 >) && Random.nextInt(2) == 0) "New winner".some
else none
shouldAnalyse foreach { reason =>
// println(s"[autoanalyse] ${game.id} $reason")
analyser ! lila.hub.actorApi.ai.AutoAnalyse(game.id)
}
funit
}
private def withUser[A](username: String)(op: User => Fu[A]): Fu[A] =
UserRepo named username flatten "[mod] missing user " + username flatMap op
}
| pavelo65/lila | modules/mod/src/main/AssessApi.scala | Scala | mit | 7,666 |
package org.finra.datagenerator.scaffolding.config
case class Config[+T](conf: ConfigDefinition[T], value: Option[T]) extends Comparable[Config[_]] {
def isNotDefault = value.isEmpty
def getValue(): T = {
if(value.isDefined) value.get
else if(conf.default.isDefined) conf.default.get
else throw new NoSuchElementException()
}
override def compareTo(o: Config[_]): Int = conf.priority.compare(o.conf.priority)
} | yukaReal/DataGenerator | rubber-scaffolding/rubber-commons/src/main/scala/org/finra/datagenerator/scaffolding/config/Config.scala | Scala | apache-2.0 | 453 |
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package breeze;
package serialization;
/**
* Reads a fixed number of cells from a table.
*
* @author dramage
*/
trait TableMultiCellReadable[@specialized V] extends Readable[TableRowReader,V] {
def size : Int;
}
/**
* Low priority conversions of cell readable to multi-cell readable.
*
* @author dramage
*/
trait LowPriorityTableMultiCellReadableImplicits {
implicit def anyTableCellReadable[V](implicit rc : TableCellReadable[V])
: TableMultiCellReadable[V] = new TableMultiCellReadable[V] {
override def size = 1;
override def read(row : TableRowReader) = {
val rv = rc.read(row.next);
require(!row.hasNext, "Wrong number of cells in row.");
rv;
}
}
}
object TableMultiCellReadable extends LowPriorityTableMultiCellReadableImplicits {
type Input = TableRowReader;
implicit def forTuple2[A,B]
(implicit ra : TableMultiCellReadable[A], rb : TableMultiCellReadable[B])
: TableMultiCellReadable[(A,B)] = new TableMultiCellReadable[(A,B)] {
override def size = 2;
override def read(row : Input) =
(ra.read(row.take(ra.size)),
rb.read(row.take(rb.size)));
}
implicit def forTuple3[A,B,C]
(implicit ra : TableMultiCellReadable[A], rb : TableMultiCellReadable[B],
rc : TableMultiCellReadable[C])
: TableMultiCellReadable[(A,B,C)] = new TableMultiCellReadable[(A,B,C)] {
override def size = 3;
override def read(row : Input) =
(ra.read(row.take(ra.size)),
rb.read(row.take(rb.size)),
rc.read(row.take(rc.size)));
}
implicit def forTuple4[A,B,C,D]
(implicit ra : TableMultiCellReadable[A], rb : TableMultiCellReadable[B],
rc : TableMultiCellReadable[C], rd : TableMultiCellReadable[D])
: TableMultiCellReadable[(A,B,C,D)] = new TableMultiCellReadable[(A,B,C,D)] {
override def size = 4;
override def read(row : Input) =
(ra.read(row.take(ra.size)),
rb.read(row.take(rb.size)),
rc.read(row.take(rc.size)),
rd.read(row.take(rd.size)));
}
implicit def forTuple5[A,B,C,D,E]
(implicit ra : TableMultiCellReadable[A], rb : TableMultiCellReadable[B],
rc : TableMultiCellReadable[C], rd : TableMultiCellReadable[D], re : TableMultiCellReadable[E])
: TableMultiCellReadable[(A,B,C,D,E)] = new TableMultiCellReadable[(A,B,C,D,E)] {
override def size = 5;
override def read(row : Input) =
(ra.read(row.take(ra.size)),
rb.read(row.take(rb.size)),
rc.read(row.take(rc.size)),
rd.read(row.take(rd.size)),
re.read(row.take(re.size)));
}
}
/**
* For writing a row of a table.
*
* @author dramage
*/
trait TableMultiCellWritable[V] extends Writable[TableRowWriter, V] {
def size : Int;
}
/**
* Low priority Writable conversions.
*
* @author dramage
*/
trait LowPriorityTableMultiCellWritableImplicits {
implicit def anyTableCellWritable[V](implicit wc : TableCellWritable[V])
: TableMultiCellWritable[V] = new TableMultiCellWritable[V] {
override def size = 1;
def write(writer : TableRowWriter, value : V) = {
wc.write(writer.next, value);
}
}
}
object TableMultiCellWritable extends LowPriorityTableMultiCellWritableImplicits {
type Output = TableRowWriter
implicit def forTuple2[A,B]
(implicit wa : TableMultiCellWritable[A],
wb : TableMultiCellWritable[B])
: TableMultiCellWritable[(A,B)] = new TableMultiCellWritable[(A,B)] {
override def size = 2;
def write(writer : Output, v : (A,B)) = {
wa.write(writer, v._1);
wb.write(writer, v._2);
}
}
implicit def forTuple3[A,B,C]
(implicit wa : TableMultiCellWritable[A],
wb : TableMultiCellWritable[B],
wc : TableMultiCellWritable[C])
: TableMultiCellWritable[(A,B,C)] = new TableMultiCellWritable[(A,B,C)] {
override def size = 3;
def write(writer : Output, v : (A,B,C)) = {
wa.write(writer, v._1);
wb.write(writer, v._2);
wc.write(writer, v._3);
}
}
implicit def forTuple4[A,B,C,D]
(implicit wa : TableMultiCellWritable[A],
wb : TableMultiCellWritable[B],
wc : TableMultiCellWritable[C],
wd : TableMultiCellWritable[D])
: TableMultiCellWritable[(A,B,C,D)] = new TableMultiCellWritable[(A,B,C,D)] {
override def size = 4;
def write(writer : Output, v : (A,B,C,D)) = {
wa.write(writer, v._1);
wb.write(writer, v._2);
wc.write(writer, v._3);
wd.write(writer, v._4);
}
}
implicit def forTuple5[A,B,C,D,E]
(implicit wa : TableMultiCellWritable[A],
wb : TableMultiCellWritable[B],
wc : TableMultiCellWritable[C],
wd : TableMultiCellWritable[D],
we : TableMultiCellWritable[E])
: TableMultiCellWritable[(A,B,C,D,E)] = new TableMultiCellWritable[(A,B,C,D,E)] {
override def size = 5;
def write(writer : Output, v : (A,B,C,D,E)) = {
wa.write(writer, v._1);
wb.write(writer, v._2);
wc.write(writer, v._3);
wd.write(writer, v._4);
we.write(writer, v._5);
}
}
}
| tjhunter/scalanlp-core | process/src/main/scala/breeze/serialization/TableMultiCellSerialization.scala | Scala | apache-2.0 | 5,515 |
package org.joda.time.convert
import java.util.{Calendar, GregorianCalendar}
import org.joda.time.chrono.{
BuddhistChronology,
GJChronology,
GregorianChronology,
ISOChronology,
JulianChronology
}
import org.joda.time.{Chronology, DateTimeZone}
object CalendarConverter {
val INSTANCE = new CalendarConverter()
}
class CalendarConverter
extends AbstractConverter
with InstantConverter
with PartialConverter {
override def getChronology(`object`: AnyRef,
chrono: Chronology): Chronology = {
if (chrono != null) {
return chrono
}
val cal = `object`.asInstanceOf[Calendar]
var zone: DateTimeZone = null
try {
zone = DateTimeZone.forTimeZone(cal.getTimeZone)
} catch {
case ex: IllegalArgumentException => zone = DateTimeZone.getDefault
}
getChronology(cal, zone)
}
override def getChronology(`object`: AnyRef,
zone: DateTimeZone): Chronology = {
if (`object`.getClass.getName.endsWith(".BuddhistCalendar")) {
BuddhistChronology.getInstance(zone)
} else if (`object`.isInstanceOf[GregorianCalendar]) {
val gc = `object`.asInstanceOf[GregorianCalendar]
val cutover = gc.getGregorianChange.getTime
if (cutover == Long.MinValue) {
GregorianChronology.getInstance(zone)
} else if (cutover == Long.MaxValue) {
JulianChronology.getInstance(zone)
} else {
GJChronology.getInstance(zone, cutover, 4)
}
} else {
ISOChronology.getInstance(zone)
}
}
override def getInstantMillis(`object`: AnyRef, chrono: Chronology): Long = {
val calendar = `object`.asInstanceOf[Calendar]
calendar.getTime.getTime
}
def getSupportedType(): Class[_] = classOf[Calendar]
}
| mdedetrich/soda-time | shared/src/main/scala/org/joda/time/convert/CalendarConverter.scala | Scala | bsd-2-clause | 1,791 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.index.attribute
import java.nio.charset.StandardCharsets
import com.typesafe.scalalogging.LazyLogging
import org.geotools.factory.Hints
import org.locationtech.geomesa.filter.{Bounds, FilterHelper, FilterValues, filterToString}
import org.locationtech.geomesa.index.geotools.GeoMesaDataStoreFactory.GeoMesaDataStoreConfig
import org.locationtech.geomesa.index.index.IndexKeySpace
import org.locationtech.geomesa.index.index.IndexKeySpace._
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.index.ByteArrays
import org.locationtech.geomesa.utils.index.ByteArrays.{OneByteArray, ZeroByteArray}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
object AttributeIndexKeySpace extends AttributeIndexKeySpace
trait AttributeIndexKeySpace extends IndexKeySpace[AttributeIndexValues[Any], AttributeIndexKey] with LazyLogging {
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor
import scala.collection.JavaConverters._
override def supports(sft: SimpleFeatureType): Boolean =
sft.getAttributeDescriptors.asScala.exists(_.isIndexed)
override def indexKeyByteLength: Int =
throw new IllegalArgumentException("Attribute key space has variable length index keys")
override def toIndexKey(sft: SimpleFeatureType, lenient: Boolean): SimpleFeature => Seq[AttributeIndexKey] = {
val indexedAttributes =
SimpleFeatureTypes.getSecondaryIndexedAttributes(sft).map(d => (sft.indexOf(d.getName), d.isList))
(feature) => indexedAttributes.flatMap { case (i, list) =>
AttributeIndexKey.encodeForIndex(feature.getAttribute(i), list).map(v => AttributeIndexKey(i.toShort, v))
}
}
override def toIndexKeyBytes(sft: SimpleFeatureType, lenient: Boolean): ToIndexKeyBytes = {
val indexedAttributes = SimpleFeatureTypes.getSecondaryIndexedAttributes(sft).map { d =>
val i = sft.indexOf(d.getName)
(i, AttributeIndexKey.indexToBytes(i), d.isList)
}
(prefix, feature, suffix) => {
val baseLength = prefix.map(_.length).sum + suffix.length + 3 // 2 for attributed i, 1 for null byte
indexedAttributes.flatMap { case (idx, idxBytes, list) =>
AttributeIndexKey.encodeForIndex(feature.getAttribute(idx), list).map { encoded =>
val value = encoded.getBytes(StandardCharsets.UTF_8)
// create the byte array - allocate a single array up front to contain everything
val bytes = Array.ofDim[Byte](baseLength + value.length)
var i = 0
prefix.foreach { p => System.arraycopy(p, 0, bytes, i, p.length); i += p.length }
bytes(i) = idxBytes(0)
i += 1
bytes(i) = idxBytes(1)
i += 1
System.arraycopy(value, 0, bytes, i, value.length)
i += value.length
bytes(i) = ByteArrays.ZeroByte
System.arraycopy(suffix, 0, bytes, i + 1, suffix.length)
bytes
}
}
}
}
override def getIndexValues(sft: SimpleFeatureType, filter: Filter, explain: Explainer): AttributeIndexValues[Any] = {
val attribute = {
val names = FilterHelper.propertyNames(filter, sft)
require(names.lengthCompare(1) == 0,
s"Couldn't extract single attribute name from filter '${filterToString(filter)}'")
names.head
}
val i = sft.indexOf(attribute)
require(i != -1, s"Attribute '$attribute' from filter '${filterToString(filter)}' does not exist in '$sft'")
val descriptor = sft.getDescriptor(i)
val binding = if (descriptor.isList) { descriptor.getListType() } else { descriptor.getType.getBinding }
val bounds = FilterHelper.extractAttributeBounds(filter, attribute, binding)
if (bounds.isEmpty) {
// we have an attribute, but weren't able to extract any bounds
logger.warn(s"Unable to extract any attribute bounds from: ${filterToString(filter)}")
}
AttributeIndexValues(attribute, i, bounds.asInstanceOf[FilterValues[Bounds[Any]]], binding.asInstanceOf[Class[Any]])
}
override def getRanges(values: AttributeIndexValues[Any],
multiplier: Int): Iterator[ScanRange[AttributeIndexKey]] = {
import AttributeIndexKey.encodeForQuery
import org.locationtech.geomesa.filter.WILDCARD_SUFFIX
val AttributeIndexValues(_, i, fb, binding) = values
if (fb.isEmpty) {
// we have an attribute, but weren't able to extract any bounds... scan all values
Iterator.single(UnboundedRange(AttributeIndexKey(i.toShort, null, inclusive = false)))
} else {
fb.values.iterator.flatMap { bounds =>
bounds.bounds match {
case (None, None) => // not null
Iterator.single(UnboundedRange(AttributeIndexKey(i.toShort, null, inclusive = false)))
case (Some(lower), None) =>
val start = AttributeIndexKey(i.toShort, encodeForQuery(lower, binding), bounds.lower.inclusive)
Iterator.single(LowerBoundedRange(start))
case (None, Some(upper)) =>
val end = AttributeIndexKey(i.toShort, encodeForQuery(upper, binding), bounds.upper.inclusive)
Iterator.single(UpperBoundedRange(end))
case (Some(lower), Some(upper)) =>
if (lower == upper) {
val row = AttributeIndexKey(i.toShort, encodeForQuery(lower, binding), inclusive = true)
Iterator.single(SingleRowRange(row))
} else if (lower + WILDCARD_SUFFIX == upper) {
val row = AttributeIndexKey(i.toShort, encodeForQuery(lower, binding), inclusive = true)
Iterator.single(PrefixRange(row))
} else {
val start = AttributeIndexKey(i.toShort, encodeForQuery(lower, binding), bounds.lower.inclusive)
val end = AttributeIndexKey(i.toShort, encodeForQuery(upper, binding), bounds.upper.inclusive)
Iterator.single(BoundedRange(start, end))
}
}
}
}
}
override def getRangeBytes(ranges: Iterator[ScanRange[AttributeIndexKey]],
prefixes: Seq[Array[Byte]],
tier: Boolean): Iterator[ByteRange] = {
if (tier) {
getTieredRangeBytes(ranges, prefixes)
} else {
getStandardRangeBytes(ranges, prefixes)
}
}
override def useFullFilter(values: Option[AttributeIndexValues[Any]],
config: Option[GeoMesaDataStoreConfig],
hints: Hints): Boolean = {
// if we have an attribute, but weren't able to extract any bounds, values.values will be empty
values.forall(v => v.values.isEmpty || !v.values.precise)
}
private def getTieredRangeBytes(ranges: Iterator[ScanRange[AttributeIndexKey]],
prefixes: Seq[Array[Byte]]): Iterator[ByteRange] = {
import org.locationtech.geomesa.utils.index.ByteArrays.concat
val bytes = ranges.map {
case SingleRowRange(row) =>
SingleRowByteRange(lower(row))
case BoundedRange(lo, hi) =>
tieredUpper(hi) match {
case None => TieredByteRange(lower(lo), upper(hi), lowerTierable = true)
case Some(up) => BoundedByteRange(lower(lo), up)
}
case PrefixRange(prefix) =>
TieredByteRange(lower(prefix, prefix = true), upper(prefix, prefix = true))
case LowerBoundedRange(lo) =>
TieredByteRange(lower(lo), upper(AttributeIndexKey(lo.i, null)), lowerTierable = true)
case UpperBoundedRange(hi) =>
tieredUpper(hi) match {
case None => TieredByteRange(lower(AttributeIndexKey(hi.i, null)), upper(hi))
case Some(up) => TieredByteRange(lower(AttributeIndexKey(hi.i, null)), up, upperTierable = true)
}
case UnboundedRange(empty) =>
TieredByteRange(lower(empty), upper(empty))
}
if (prefixes.isEmpty) { bytes } else {
bytes.flatMap {
case SingleRowByteRange(row) => prefixes.map(p => SingleRowByteRange(concat(p, row)))
case BoundedByteRange(lo, hi) => prefixes.map(p => BoundedByteRange(concat(p, lo), concat(p, hi)))
case t: TieredByteRange => prefixes.map(p => t.copy(lower = concat(p, t.lower), upper = concat(p, t.upper)))
}
}
}
private def getStandardRangeBytes(ranges: Iterator[ScanRange[AttributeIndexKey]],
prefixes: Seq[Array[Byte]]): Iterator[ByteRange] = {
import org.locationtech.geomesa.utils.index.ByteArrays.concat
val bytes = ranges.map {
case SingleRowRange(row) => SingleRowByteRange(lower(row))
case BoundedRange(lo, hi) => BoundedByteRange(lower(lo), upper(hi))
case PrefixRange(prefix) => BoundedByteRange(lower(prefix, prefix = true), upper(prefix, prefix = true))
case LowerBoundedRange(lo) => BoundedByteRange(lower(lo), upper(AttributeIndexKey(lo.i, null)))
case UpperBoundedRange(hi) => BoundedByteRange(lower(AttributeIndexKey(hi.i, null)), upper(hi))
case UnboundedRange(empty) => BoundedByteRange(lower(empty), upper(empty))
case r => throw new IllegalArgumentException(s"Unexpected range type $r")
}
if (prefixes.isEmpty) { bytes } else {
bytes.flatMap {
case SingleRowByteRange(row) => prefixes.map(p => SingleRowByteRange(concat(p, row)))
case BoundedByteRange(lo, hi) => prefixes.map(p => BoundedByteRange(concat(p, lo), concat(p, hi)))
}
}
}
/**
* Gets a lower range bound for an attribute value. The bound can be used with additional tiering or not
*
* @param key attribute value
* @param prefix if this is a prefix scan or not
* @return
*/
private def lower(key: AttributeIndexKey, prefix: Boolean = false): Array[Byte] = {
val index = AttributeIndexKey.indexToBytes(key.i)
if (key.value == null) {
index
} else if (prefix) {
// note: inclusive doesn't make sense for prefix ranges
ByteArrays.concat(index, key.value.getBytes(StandardCharsets.UTF_8))
} else if (key.inclusive) {
ByteArrays.concat(index, key.value.getBytes(StandardCharsets.UTF_8), ZeroByteArray)
} else {
ByteArrays.concat(index, key.value.getBytes(StandardCharsets.UTF_8), OneByteArray)
}
}
/**
* Gets an upper range bound for an attribute value. The bound is only suitable when there is no additional tiering
*
* @param key attribute value
* @param prefix if this is a prefix scan or not
* @return
*/
private def upper(key: AttributeIndexKey, prefix: Boolean = false): Array[Byte] = {
val index = AttributeIndexKey.indexToBytes(key.i)
if (key.value == null) {
ByteArrays.rowFollowingPrefix(index)
} else if (prefix) {
// get the row following the prefix, then get the next row
// note: inclusiveness doesn't really make sense for prefix ranges
ByteArrays.rowFollowingPrefix(ByteArrays.concat(index, key.value.getBytes(StandardCharsets.UTF_8)))
} else if (key.inclusive) {
// row following prefix, after the delimiter
ByteArrays.concat(index, key.value.getBytes(StandardCharsets.UTF_8), OneByteArray)
} else {
// exclude the row
ByteArrays.concat(index, key.value.getBytes(StandardCharsets.UTF_8), ZeroByteArray)
}
}
/**
* Gets an upper bound for a range that will be tiered. A bound will only be returned if it
* supports additional tiering.
*
* @param key attribute value
* @return
*/
private def tieredUpper(key: AttributeIndexKey): Option[Array[Byte]] = {
// note: we can't tier exclusive end points, as we can't calculate previous rows
if (key.value == null || !key.inclusive) { None } else {
// match the final row, and count on remaining range to exclude the rest
val index = AttributeIndexKey.indexToBytes(key.i)
Some(ByteArrays.concat(index, key.value.getBytes(StandardCharsets.UTF_8), ZeroByteArray))
}
}
}
| ddseapy/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/index/attribute/AttributeIndexKeySpace.scala | Scala | apache-2.0 | 12,530 |
import play.api._
import models._
import anorm._
object Global extends GlobalSettings {
override def onStart(app: Application) {
InitialData.insert()
}
}
/**
* Initial set of data to be imported
* in the sample application.
*/
object InitialData {
def date(str: String) = new java.text.SimpleDateFormat("yyyy-MM-dd").parse(str)
def insert() = {
if(User.findAll.isEmpty) {
Seq(
User("guillaume@sample.com", "Guillaume Bort", "secret"),
User("maxime@sample.com", "Maxime Dantec", "secret"),
User("sadek@sample.com", "Sadek Drobi", "secret"),
User("erwan@sample.com", "Erwan Loisant", "secret")
).foreach(User.create)
Seq(
Project(Some(1), "Play framework", "Play 2.0") -> Seq("guillaume@sample.com", "maxime@sample.com", "sadek@sample.com", "erwan@sample.com"),
Project(Some(2), "Play framework", "Play 1.2.4") -> Seq("guillaume@sample.com", "erwan@sample.com"),
Project(Some(3), "Play framework", "Website") -> Seq("guillaume@sample.com", "maxime@sample.com"),
Project(Some(4), "Zenexity", "Secret project") -> Seq("guillaume@sample.com", "maxime@sample.com", "sadek@sample.com", "erwan@sample.com"),
Project(Some(5), "Zenexity", "Playmate") -> Seq("maxime@sample.com"),
Project(Some(6), "Personal", "Things to do") -> Seq("guillaume@sample.com"),
Project(Some(7), "Zenexity", "Play samples") -> Seq("guillaume@sample.com", "maxime@sample.com"),
Project(Some(8), "Personal", "Private") -> Seq("maxime@sample.com"),
Project(Some(9), "Personal", "Private") -> Seq("guillaume@sample.com"),
Project(Some(10), "Personal", "Private") -> Seq("erwan@sample.com"),
Project(Some(11), "Personal", "Private") -> Seq("sadek@sample.com")
).foreach {
case (project,members) => Project.create(project, members)
}
Seq(
Task(None, "Todo", 1, "Fix the documentation", false, None, Some("guillaume@sample.com")),
Task(None, "Urgent", 1, "Prepare the beta release", false, Some(date("2011-11-15")), None),
Task(None, "Todo", 9, "Buy some milk", false, None, None),
Task(None, "Todo", 2, "Check 1.2.4-RC2", false, Some(date("2011-11-18")), Some("guillaume@sample.com")),
Task(None, "Todo", 7, "Finish zentask integration", true, Some(date("2011-11-15")), Some("maxime@sample.com")),
Task(None, "Todo", 4, "Release the secret project", false, Some(date("2012-01-01")), Some("sadek@sample.com"))
).foreach(Task.create)
}
}
} | scoverage/scoverage-maven-samples | playframework/singlemodule/zentasks/zentasks-scala-2.10/app/Global.scala | Scala | apache-2.0 | 2,599 |
package com.arcusys.learn.liferay.update.version250
import com.arcusys.learn.liferay.LiferayClasses.LUpgradeProcess
import com.arcusys.learn.liferay.LogFactoryHelper
import com.arcusys.learn.liferay.update.version250.slide.SlideTableComponent
import com.arcusys.valamis.persistence.common.SlickDBInfo
import com.arcusys.valamis.web.configuration.ioc.Configuration
class DBUpdater2415(dbInfo: SlickDBInfo) extends LUpgradeProcess with SlideTableComponent{
val logger = LogFactoryHelper.getLog(getClass)
override def getThreshold = 2415
lazy val driver = dbInfo.slickDriver
lazy val db = dbInfo.databaseDef
import driver.simple._
def this() = this(Configuration.inject[SlickDBInfo](None))
override def doUpgrade(): Unit = {
db.withTransaction { implicit session =>
val slideSetId = slideSets.filter(_.courseId === 0L).first.id
val textAndImageSlideId = slides returning slides.map(_.id) +=
createSlideEntity("Text and image","text-and-image.png",slideSetId.get)
val textSlideId = slides returning slides.map(_.id) +=
createSlideEntity("Text only","text-only.png",slideSetId.get)
val titleSlideId = slides returning slides.map(_.id) +=
createSlideEntity("Title and subtitle","title-and-subtitle.png",slideSetId.get)
val videoSlideId = slides returning slides.map(_.id) +=
createSlideEntity("Video only","video-only.png",slideSetId.get)
var elements = Seq[SlideElement]()
elements = elements :+ createSlideElementEntity("25", "80", "800", "80", "1",
"<div><strong><span style=\\"font-size:48px\\">Page header</span></strong></div>",
"text",textAndImageSlideId
):+ createSlideElementEntity("160", "60", "320", "490", "2",
"<div style=\\"text-align:left\\"><span style=\\"font-size:20px, font-weight: lighter\\">Page text</span></div>",
"text",textAndImageSlideId
):+ createSlideElementEntity("160", "420", "480", "490", "3", "", "image", textAndImageSlideId
):+ createSlideElementEntity("25", "80", "800", "80", "1",
"<div><span style=\\"font-size:48px\\">Page header</span></div>",
"text",textSlideId
):+ createSlideElementEntity("160", "80", "800", "490", "2",
"<div style=\\"text-align:left\\"><span style=\\"font-size:20px\\">Page text</span></div>",
"text",textSlideId
):+ createSlideElementEntity("160", "80", "800", "80", "1",
"<div><strong><span style=\\"font-size:48px\\">Page header</span></strong></div>",
"text",titleSlideId
):+ createSlideElementEntity("240", "80", "800", "80", "2",
"<div><span style=\\"font-size:20px\\">Page subtitle</span></div>",
"text",titleSlideId
):+ createSlideElementEntity("25", "80", "800", "80", "1",
"<div><span style=\\"font-size:48px\\">Page header</span></div>",
"text",videoSlideId
):+ createSlideElementEntity("160", "80", "800", "490", "2","", "video", videoSlideId)
elements.foreach(element =>{
slideElements returning slideElements.map(_.id) insert element
})
}
}
private def createSlideEntity(title: String, bgImage: String, slideSetId: Long): Slide ={
Slide(
title = title,
bgImage = Some(bgImage),
slideSetId = slideSetId,
isTemplate = true)
}
private def createSlideElementEntity(top: String,
left: String,
width: String,
height: String,
zIndex: String,
content: String,
slideEntityType: String,
slideId: Long): SlideElement ={
SlideElement(
top = top,
left = left,
width = width,
height = height,
zIndex = zIndex,
content = content,
slideEntityType = slideEntityType,
slideId = slideId)
}
} | arcusys/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/liferay/update/version250/DBUpdater2415.scala | Scala | gpl-3.0 | 3,942 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect
package internal
package pickling
import PickleFormat._
import scala.annotation.tailrec
import util.shortClassOfInstance
trait Translations {
self: SymbolTable =>
def isTreeSymbolPickled(code: Int): Boolean = (code: @annotation.switch) match {
case PACKAGEtree | CLASStree | MODULEtree | VALDEFtree | DEFDEFtree | TYPEDEFtree | LABELtree => true
case IMPORTtree | TEMPLATEtree | BINDtree | FUNCTIONtree | RETURNtree => true
case APPLYDYNAMICtree | SUPERtree | THIStree | SELECTtree | IDENTtree => true
case _ => false
}
/** This method should be equivalent to tree.hasSymbolField, but that method
* doesn't do us any good when we're unpickling because we need to know based
* on the Int tag - the tree doesn't exist yet. Thus, this method is documentation only.
*/
def isTreeSymbolPickled(tree: Tree): Boolean = isTreeSymbolPickled(picklerSubTag(tree))
// The ad hoc pattern matching of tuples out of AnyRefs is a
// truly terrible idea. It reaches the height of its powers in
// combination with scala's insistence on helpfully tupling
// multiple arguments passed to a single-arg AnyRef.
final def picklerTag(ref: AnyRef): Int = ref match {
case tp: Type => picklerTag(tp)
case sym: Symbol => picklerTag(sym)
case const: Constant => LITERAL + const.tag
case _: Tree => TREE // its sub tag more precisely identifies it
case _: TermName => TERMname
case _: TypeName => TYPEname
case _: ArrayAnnotArg => ANNOTARGARRAY // an array of annotation arguments
case _: AnnotationInfo => ANNOTINFO // annotations on types (not linked to a symbol)
case (_: Symbol, _: AnnotationInfo) => SYMANNOT // symbol annotations, i.e. on terms
case (_: Symbol, _: List[_]) => CHILDREN // the direct subclasses of a sealed symbol
case _: Modifiers => MODIFIERS
case _ => throw new IllegalStateException(s"unpicklable entry ${shortClassOfInstance(ref)} $ref")
}
/** Local symbols only. The assessment of locality depends
* on convoluted conditions which depends in part on the root
* symbol being pickled, so it cannot be reproduced here.
* The pickler tags at stake are EXTMODCLASSref and EXTref.
* Those tags are never produced here - such symbols must be
* excluded prior to calling this method.
*/
def picklerTag(sym: Symbol): Int = sym match {
case NoSymbol => NONEsym
case _: ClassSymbol => CLASSsym
case _: TypeSymbol if sym.isAbstractType => TYPEsym
case _: TypeSymbol => ALIASsym
case _: TermSymbol if sym.isModule => MODULEsym
case _: TermSymbol => VALsym
case x => throw new MatchError(x)
}
@tailrec
final def picklerTag(tpe: Type): Int = tpe match {
case NoType => NOtpe
case NoPrefix => NOPREFIXtpe
case _: ThisType => THIStpe
case _: SingleType => SINGLEtpe
case _: SuperType => SUPERtpe
case _: ConstantType => CONSTANTtpe
case _: TypeBounds => TYPEBOUNDStpe
case _: TypeRef => TYPEREFtpe
case _: RefinedType => REFINEDtpe
case _: ClassInfoType => CLASSINFOtpe
case _: MethodType => METHODtpe
case _: PolyType => POLYtpe
case _: NullaryMethodType => POLYtpe // bad juju, distinct ints are not at a premium!
case _: ExistentialType => EXISTENTIALtpe
case StaticallyAnnotatedType(_, _) => ANNOTATEDtpe
case _: AnnotatedType => picklerTag(tpe.underlying)
case x => throw new MatchError(x)
}
def picklerSubTag(tree: Tree): Int = tree match {
case EmptyTree => EMPTYtree
case _: PackageDef => PACKAGEtree
case _: ClassDef => CLASStree
case _: ModuleDef => MODULEtree
case _: ValDef => VALDEFtree
case _: DefDef => DEFDEFtree
case _: TypeDef => TYPEDEFtree
case _: LabelDef => LABELtree
case _: Import => IMPORTtree
// case _: DocDef => DOCDEFtree
case _: Template => TEMPLATEtree
case _: Block => BLOCKtree
case _: CaseDef => CASEtree
case _: Alternative => ALTERNATIVEtree
case _: Star => STARtree
case _: Bind => BINDtree
case _: UnApply => UNAPPLYtree
case _: ArrayValue => ARRAYVALUEtree
case _: Function => FUNCTIONtree
case _: Assign => ASSIGNtree
case _: If => IFtree
case _: Match => MATCHtree
case _: Return => RETURNtree
case _: Try => TREtree // TREtree?
case _: Throw => THROWtree
case _: New => NEWtree
case _: Typed => TYPEDtree
case _: TypeApply => TYPEAPPLYtree
case _: Apply => APPLYtree
case _: ApplyDynamic => APPLYDYNAMICtree
case _: Super => SUPERtree
case _: This => THIStree
case _: Select => SELECTtree
case _: Ident => IDENTtree
case _: Literal => LITERALtree
case _: TypeTree => TYPEtree
case _: Annotated => ANNOTATEDtree
case _: SingletonTypeTree => SINGLETONTYPEtree
case _: SelectFromTypeTree => SELECTFROMTYPEtree
case _: CompoundTypeTree => COMPOUNDTYPEtree
case _: AppliedTypeTree => APPLIEDTYPEtree
case _: TypeBoundsTree => TYPEBOUNDStree
case _: ExistentialTypeTree => EXISTENTIALTYPEtree
case x => throw new MatchError(x)
}
}
| scala/scala | src/reflect/scala/reflect/internal/pickling/Translations.scala | Scala | apache-2.0 | 6,649 |
package org.janzhou.LSH
trait LSHHelper extends NumberConversions {
private val rand = new scala.util.Random(System.nanoTime)
def hyperplane(dim:Int):HyperplaneLSH[Int] = new HyperplaneLSH(Array.fill(dim)(rand.nextInt))
private def seed(dim:Int, min:Int, max:Int):Iterable[Int] = {
val range = max - min
val mid = range / 2
Array.fill(dim)(rand.nextInt(range) - mid).toIterable
}
private def seed(dim:Int):Iterable[Int] = {
Array.fill(dim)(rand.nextGaussian()).map(x => if (x<0) -1 else 1).toIterable
}
def move(min:Int, max:Int, data:Iterable[Int]) = {
val mid = (max + min)/2
data.map(_ - mid)
}
def forIntVector(dim:Int, min:Int, max:Int, repeating:Int):RepeatingLSH[Int] = {
val signature = new HyperplaneSignatureLSH(Array.fill(repeating)(
seed(dim, min, max)
))
new RepeatingLSH(signature)
}
def forIntVector(dim:Int, repeating:Int):RepeatingLSH[Int] = {
val signature = new HyperplaneSignatureLSH(Array.fill(repeating)(
seed(dim)
))
new RepeatingLSH(signature)
}
def forIntVectorSignature(dim:Int, size:Int):HyperplaneSignatureLSH[Int] = {
new HyperplaneSignatureLSH(Array.fill(size)(
seed(dim)
))
}
def CosineSimilarity(a:Iterable[Int], b:Iterable[Int]):Double = {
val a2 = Number.dot(a, a)
val b2 = Number.dot(b, b)
val ret = Number.dot(a, b) / (math.sqrt(a2) * math.sqrt(b2))
if(ret > 0) ret else -ret
}
def forIntSetSignature(dim:Int, max:Int, prime:Int):MinSignatureLSH[Int] = {
new MinSignatureLSH(Array.fill(dim)(
(rand.nextInt(max), rand.nextInt(prime))
), prime)
}
def SetSimilarity(a:Iterable[Int], b:Iterable[Int]):Double = {
val x:Double = a.toStream.filter(b.toStream.contains(_)).length.toDouble
val y:Double = b.toStream.filter(a.toStream.contains(_)).length.toDouble
val z:Double = ( a ++ b ).toStream.length.toDouble
(x+y)/z
}
}
| janzhou/scala-lsh | src/main/scala/org/janzhou/lsh/LSHHelper.scala | Scala | mit | 1,926 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.datamap
import java.io.{File, IOException}
import java.text.SimpleDateFormat
import java.util
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import org.apache.commons.lang3.ArrayUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.{Job, TaskAttemptID, TaskType}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.{CarbonInputMetrics, Partition, TaskContext}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.Decimal
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datamap.{DataMapStoreManager, Segment}
import org.apache.carbondata.core.datamap.dev.DataMapBuilder
import org.apache.carbondata.core.datastore.block.SegmentProperties
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMapFactory
import org.apache.carbondata.core.indexstore.SegmentPropertiesFetcher
import org.apache.carbondata.core.keygenerator.KeyGenerator
import org.apache.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator
import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes}
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema, TableInfo}
import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn
import org.apache.carbondata.core.scan.wrappers.ByteArrayWrapper
import org.apache.carbondata.core.statusmanager.SegmentStatusManager
import org.apache.carbondata.core.util.{CarbonUtil, TaskMetricsMap}
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.datamap.bloom.DataConvertUtil
import org.apache.carbondata.events.{BuildDataMapPostExecutionEvent, BuildDataMapPreExecutionEvent, OperationContext, OperationListenerBus}
import org.apache.carbondata.hadoop.{CarbonInputSplit, CarbonMultiBlockSplit, CarbonProjection, CarbonRecordReader}
import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport
import org.apache.carbondata.spark.{RefreshResult, RefreshResultImpl}
import org.apache.carbondata.spark.rdd.{CarbonRDDWithTableInfo, CarbonSparkPartition}
import org.apache.carbondata.spark.util.SparkDataTypeConverterImpl
/**
* Helper object to rebuild the index DataMap
*/
object IndexDataMapRebuildRDD {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
/**
* Rebuild the datamap for all existing data in the table
*/
def rebuildDataMap(
sparkSession: SparkSession,
carbonTable: CarbonTable,
schema: DataMapSchema
): Unit = {
val tableIdentifier = carbonTable.getAbsoluteTableIdentifier
val segmentStatusManager = new SegmentStatusManager(tableIdentifier)
val validAndInvalidSegments = segmentStatusManager.getValidAndInvalidSegments()
val validSegments = validAndInvalidSegments.getValidSegments
val indexedCarbonColumns = carbonTable.getIndexedColumns(schema)
val operationContext = new OperationContext()
val buildDataMapPreExecutionEvent = new BuildDataMapPreExecutionEvent(sparkSession,
tableIdentifier,
mutable.Seq[String](schema.getDataMapName))
OperationListenerBus.getInstance().fireEvent(buildDataMapPreExecutionEvent, operationContext)
val segments2DmStorePath = validSegments.asScala.map { segment =>
val dataMapStorePath = CarbonTablePath.getDataMapStorePath(carbonTable.getTablePath,
segment.getSegmentNo, schema.getDataMapName)
segment -> dataMapStorePath
}.filter(p => !FileFactory.isFileExist(p._2)).toMap
segments2DmStorePath.foreach { case (_, dmPath) =>
if (!FileFactory.mkdirs(dmPath, FileFactory.getFileType(dmPath))) {
throw new IOException(
s"Failed to create directory $dmPath for rebuilding datamap ${ schema.getDataMapName }")
}
}
val status = new IndexDataMapRebuildRDD[String, (String, Boolean)](
sparkSession,
new RefreshResultImpl(),
carbonTable.getTableInfo,
schema.getDataMapName,
indexedCarbonColumns.asScala.toArray,
segments2DmStorePath.keySet
).collect
// for failed segments, clean the result
val failedSegments = status
.find { case (taskId, (segmentId, rebuildStatus)) =>
!rebuildStatus
}
.map { task =>
val segmentId = task._2._1
val dmPath = segments2DmStorePath.filter(p => p._1.getSegmentNo.equals(segmentId)).values
val cleanResult = dmPath.map(p =>
FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(p)))
if (cleanResult.exists(!_)) {
LOGGER.error(s"Failed to clean up datamap store for segment_$segmentId")
false
} else {
true
}
}
if (failedSegments.nonEmpty) {
throw new Exception(s"Failed to refresh datamap ${ schema.getDataMapName }")
}
DataMapStoreManager.getInstance().clearDataMaps(tableIdentifier)
val buildDataMapPostExecutionEvent = new BuildDataMapPostExecutionEvent(sparkSession,
tableIdentifier)
OperationListenerBus.getInstance().fireEvent(buildDataMapPostExecutionEvent, operationContext)
}
}
class OriginalReadSupport(dataTypes: Array[DataType]) extends CarbonReadSupport[Array[Object]] {
override def initialize(carbonColumns: Array[CarbonColumn],
carbonTable: CarbonTable): Unit = {
}
override def readRow(data: Array[Object]): Array[Object] = {
dataTypes.zipWithIndex.foreach { case (dataType, i) =>
if (dataType == DataTypes.STRING && data(i) != null) {
data(i) = data(i).toString
}
}
data
}
override def close(): Unit = {
}
}
/**
* This class will generate row value which is raw bytes for the dimensions.
*/
class RawBytesReadSupport(segmentProperties: SegmentProperties, indexColumns: Array[CarbonColumn])
extends CarbonReadSupport[Array[Object]] {
var dimensionKeyGenerator: KeyGenerator = _
// for the dictionary dimensions
var indexCol2IdxInDictArray: Map[String, Int] = Map()
// for the non dictionary dimensions
var indexCol2IdxInNoDictArray: Map[String, Int] = Map()
// for the measures
var indexCol2IdxInMeasureArray: Map[String, Int] = Map()
/**
* rebuild process get data from query, if some columns added to table but not in this segment
* it will be filled with default value and generate new key for dict dimension.
* Here we use same way as `RowIdRestructureBasedRawResultCollector` to prepare
* key generator to get surrogate value of dict column result.
* So we do not need to make a fake mdk to split when adding row to datamap
*/
def prepareKeyGenForDictIndexColumns(carbonTable: CarbonTable,
dictIndexColumns: ListBuffer[CarbonColumn]): Unit = {
val columnCardinality = new util.ArrayList[Integer](dictIndexColumns.length)
val columnPartitioner = new util.ArrayList[Integer](dictIndexColumns.length)
dictIndexColumns.foreach { col =>
val dim = carbonTable.getDimensionByName(carbonTable.getTableName, col.getColName)
val currentBlockDimension = segmentProperties.getDimensionFromCurrentBlock(dim)
if (null != currentBlockDimension) {
columnCardinality.add(segmentProperties.getDimColumnsCardinality.apply(
currentBlockDimension.getKeyOrdinal))
columnPartitioner.add(segmentProperties.getDimensionPartitions.apply(
currentBlockDimension.getKeyOrdinal
))
} else {
columnPartitioner.add(1)
if (col.hasEncoding(Encoding.DIRECT_DICTIONARY)) {
columnCardinality.add(Integer.MAX_VALUE)
} else {
val defaultValue = col.getDefaultValue
if (null != col.getDefaultValue) {
columnCardinality.add(CarbonCommonConstants.DICTIONARY_DEFAULT_CARDINALITY + 1)
} else {
columnCardinality.add(CarbonCommonConstants.DICTIONARY_DEFAULT_CARDINALITY)
}
}
}
}
if (!columnCardinality.isEmpty) {
val latestColumnCardinality = ArrayUtils.toPrimitive(columnCardinality.toArray(
new Array[Integer](columnCardinality.size)))
val latestColumnPartitioner = ArrayUtils.toPrimitive(columnPartitioner.toArray(
new Array[Integer](columnPartitioner.size)))
val dimensionBitLength = CarbonUtil.getDimensionBitLength(
latestColumnCardinality, latestColumnPartitioner)
this.dimensionKeyGenerator = new MultiDimKeyVarLengthGenerator(dimensionBitLength)
}
}
override def initialize(carbonColumns: Array[CarbonColumn],
carbonTable: CarbonTable): Unit = {
val dictIndexColumns = new ListBuffer[CarbonColumn]()
// prepare index info to extract data from query result
indexColumns.foreach { col =>
if (col.isDimension) {
val dim = carbonTable.getDimensionByName(carbonTable.getTableName, col.getColName)
if (!dim.isGlobalDictionaryEncoding && !dim.isDirectDictionaryEncoding) {
indexCol2IdxInNoDictArray =
indexCol2IdxInNoDictArray + (col.getColName -> indexCol2IdxInNoDictArray.size)
} else {
dictIndexColumns.append(col)
indexCol2IdxInDictArray =
indexCol2IdxInDictArray + (col.getColName -> indexCol2IdxInDictArray.size)
}
} else {
indexCol2IdxInMeasureArray =
indexCol2IdxInMeasureArray + (col.getColName -> indexCol2IdxInMeasureArray.size)
}
}
if (dictIndexColumns.size > 0) {
prepareKeyGenForDictIndexColumns(carbonTable, dictIndexColumns)
}
}
/**
* input: all the dimensions are bundled in one ByteArrayWrapper in position 0,
* then comes the measures one by one; last 3 elements are block/page/row id
* output: all the dimensions and measures comes one after another
*/
override def readRow(data: Array[Object]): Array[Object] = {
var surrogatKeys = new Array[Long](0)
if(null != dimensionKeyGenerator) {
surrogatKeys = dimensionKeyGenerator.getKeyArray(
data(0).asInstanceOf[ByteArrayWrapper].getDictionaryKey)
}
// fill return row from data
val rtn = new Array[Object](indexColumns.length + 3)
indexColumns.zipWithIndex.foreach { case (col, i) =>
rtn(i) = if (indexCol2IdxInDictArray.contains(col.getColName)) {
surrogatKeys(indexCol2IdxInDictArray(col.getColName)).toInt.asInstanceOf[Integer]
} else if (indexCol2IdxInNoDictArray.contains(col.getColName)) {
data(0).asInstanceOf[ByteArrayWrapper].getNoDictionaryKeyByIndex(
indexCol2IdxInNoDictArray(col.getColName))
} else {
// measures start from 1
val value = data(1 + indexCol2IdxInMeasureArray(col.getColName))
if (null == value) {
DataConvertUtil.getNullValueForMeasure(col.getDataType,
col.getColumnSchema.getScale)
} else if (DataTypes.isDecimal(col.getDataType)) {
// In rebuild process, value is built for spark
// here we need to convert it to java BigDecimal for carbon
value.asInstanceOf[Decimal].toBigDecimal.bigDecimal
} else {
value
}
}
}
rtn(indexColumns.length) = data(data.length - 3)
rtn(indexColumns.length + 1) = data(data.length - 2)
rtn(indexColumns.length + 2) = data(data.length - 1)
rtn
}
override def close(): Unit = {
}
}
class IndexDataMapRebuildRDD[K, V](
session: SparkSession,
result: RefreshResult[K, V],
@transient tableInfo: TableInfo,
dataMapName: String,
indexColumns: Array[CarbonColumn],
segments: Set[Segment]
) extends CarbonRDDWithTableInfo[(K, V)](
session.sparkContext, Nil, tableInfo.serialize()) {
private val dataMapSchema = DataMapStoreManager.getInstance().getDataMapSchema(dataMapName)
private val queryId = sparkContext.getConf.get("queryId", System.nanoTime() + "")
private val jobTrackerId: String = {
val formatter = new SimpleDateFormat("yyyyMMddHHmm")
formatter.format(new util.Date())
}
override def internalCompute(split: Partition, context: TaskContext): Iterator[(K, V)] = {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
val carbonTable = CarbonTable.buildFromTableInfo(getTableInfo)
val dataMapFactory = DataMapManager.get().getDataMapProvider(
carbonTable, dataMapSchema, session).getDataMapFactory
var status = false
val inputMetrics = new CarbonInputMetrics
TaskMetricsMap.getInstance().registerThreadCallback()
val inputSplit = split.asInstanceOf[CarbonSparkPartition].split.value
val segmentId = inputSplit.getAllSplits.get(0).getSegment.getSegmentNo
val segment = segments.find(p => p.getSegmentNo.equals(segmentId))
if (segment.isDefined) {
inputMetrics.initBytesReadCallback(context, inputSplit)
val attemptId = new TaskAttemptID(jobTrackerId, id, TaskType.MAP, split.index, 0)
val attemptContext = new TaskAttemptContextImpl(new Configuration(), attemptId)
val format = createInputFormat(segment.get, attemptContext)
val model = format.createQueryModel(inputSplit, attemptContext)
// one query id per table
model.setQueryId(queryId)
model.setVectorReader(false)
model.setRequiredRowId(true)
var reader: CarbonRecordReader[Array[Object]] = null
var refresher: DataMapBuilder = null
try {
val segmentPropertiesFetcher = DataMapStoreManager.getInstance().getDataMap(carbonTable,
BlockletDataMapFactory.DATA_MAP_SCHEMA).getDataMapFactory
.asInstanceOf[SegmentPropertiesFetcher]
val segmentProperties = segmentPropertiesFetcher.getSegmentProperties(segment.get)
// we use task name as shard name to create the folder for this datamap
val shardName = CarbonTablePath.getShardName(inputSplit.getAllSplits.get(0).getBlockPath)
refresher = dataMapFactory.createBuilder(segment.get, shardName, segmentProperties)
refresher.initialize()
model.setForcedDetailRawQuery(refresher.isIndexForCarbonRawBytes)
val readSupport = if (refresher.isIndexForCarbonRawBytes) {
new RawBytesReadSupport(segmentProperties, indexColumns)
} else {
new OriginalReadSupport(indexColumns.map(_.getDataType))
}
reader = new CarbonRecordReader[Array[Object]](model, readSupport, inputMetrics)
reader.initialize(inputSplit, attemptContext)
// skip clear datamap and we will do this adter rebuild
reader.setSkipClearDataMapAtClose(true)
var blockletId = 0
var firstRow = true
while (reader.nextKeyValue()) {
val rowWithPosition = reader.getCurrentValue
val size = rowWithPosition.length
val pageId = rowWithPosition(size - 2).asInstanceOf[Int]
val rowId = rowWithPosition(size - 1).asInstanceOf[Int]
if (!firstRow && pageId == 0 && rowId == 0) {
// new blocklet started, increase blockletId
blockletId = blockletId + 1
} else {
firstRow = false
}
refresher.addRow(blockletId, pageId, rowId, rowWithPosition)
}
refresher.finish()
status = true
} finally {
if (reader != null) {
try {
reader.close()
} catch {
case ex: Throwable =>
LOGGER.error(ex, "Failed to close reader")
}
}
if (refresher != null) {
try {
refresher.close()
} catch {
case ex: Throwable =>
LOGGER.error(ex, "Failed to close index writer")
}
}
}
}
new Iterator[(K, V)] {
var finished = false
override def hasNext: Boolean = {
!finished
}
override def next(): (K, V) = {
finished = true
result.getKey(split.index.toString, (segmentId, status))
}
}
}
private def createInputFormat(segment: Segment,
attemptContext: TaskAttemptContextImpl) = {
val format = new CarbonTableInputFormat[Object]
val tableInfo1 = getTableInfo
val conf = attemptContext.getConfiguration
CarbonInputFormat.setTableInfo(conf, tableInfo1)
CarbonInputFormat.setDatabaseName(conf, tableInfo1.getDatabaseName)
CarbonInputFormat.setTableName(conf, tableInfo1.getFactTable.getTableName)
CarbonInputFormat.setDataTypeConverter(conf, classOf[SparkDataTypeConverterImpl])
val identifier = tableInfo1.getOrCreateAbsoluteTableIdentifier()
CarbonInputFormat.setTablePath(
conf,
identifier.appendWithLocalPrefix(identifier.getTablePath))
CarbonInputFormat.setSegmentsToAccess(
conf,
List(segment).asJava)
CarbonInputFormat.setColumnProjection(
conf,
new CarbonProjection(indexColumns.map(_.getColName)))
format
}
override protected def getPartitions = {
if (!dataMapSchema.isIndexDataMap) {
throw new UnsupportedOperationException
}
val conf = new Configuration()
val jobConf = new JobConf(conf)
SparkHadoopUtil.get.addCredentials(jobConf)
val job = Job.getInstance(jobConf)
job.getConfiguration.set("query.id", queryId)
val format = new CarbonTableInputFormat[Object]
CarbonInputFormat.setSegmentsToAccess(
job.getConfiguration,
segments.toList.asJava)
CarbonInputFormat.setTableInfo(
job.getConfiguration,
tableInfo)
CarbonInputFormat.setTablePath(
job.getConfiguration,
tableInfo.getOrCreateAbsoluteTableIdentifier().getTablePath)
CarbonInputFormat.setDatabaseName(
job.getConfiguration,
tableInfo.getDatabaseName)
CarbonInputFormat.setTableName(
job.getConfiguration,
tableInfo.getFactTable.getTableName)
format
.getSplits(job)
.asScala
.map(_.asInstanceOf[CarbonInputSplit])
.groupBy(p => (p.getSegmentId, p.taskId))
.map { group =>
new CarbonMultiBlockSplit(
group._2.asJava,
group._2.flatMap(_.getLocations).toArray)
}
.zipWithIndex
.map { split =>
new CarbonSparkPartition(id, split._2, split._1)
}
.toArray
}
}
| jatin9896/incubator-carbondata | integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala | Scala | apache-2.0 | 19,401 |
package org.repwatch.config
import com.typesafe.config.ConfigFactory
class ApplicationConfig {
private val config = ConfigFactory.load("repwatch.conf")
val googleApiKey: String = config.getString("google.api-key")
val propublicaApiKey: String = config.getString("propublica.api-key")
}
| csunwold/repwatch | alexa/src/main/scala/org/repwatch/config/ApplicationConfig.scala | Scala | gpl-3.0 | 296 |
package dotty.tools.dotc
package transform
import core._
import Contexts.Context
import Decorators._
import tasty._
import config.Printers.{noPrinter, pickling}
import java.io.PrintStream
import Periods._
import Phases._
import Symbols._
import Flags.Module
import reporting.ThrowingReporter
import collection.mutable
object Pickler {
val name: String = "pickler"
}
/** This phase pickles trees */
class Pickler extends Phase {
import ast.tpd._
override def phaseName: String = Pickler.name
// No need to repickle trees coming from TASTY
override def isRunnable(implicit ctx: Context): Boolean =
super.isRunnable && !ctx.settings.fromTasty.value
private def output(name: String, msg: String) = {
val s = new PrintStream(name)
s.print(msg)
s.close
}
// Maps that keep a record if -Ytest-pickler is set.
private val beforePickling = new mutable.HashMap[ClassSymbol, String]
private val picklers = new mutable.HashMap[ClassSymbol, TastyPickler]
/** Drop any elements of this list that are linked module classes of other elements in the list */
private def dropCompanionModuleClasses(clss: List[ClassSymbol])(implicit ctx: Context): List[ClassSymbol] = {
val companionModuleClasses =
clss.filterNot(_.is(Module)).map(_.linkedClass).filterNot(_.isAbsent())
clss.filterNot(companionModuleClasses.contains)
}
override def run(implicit ctx: Context): Unit = {
val unit = ctx.compilationUnit
pickling.println(i"unpickling in run ${ctx.runId}")
for {
cls <- dropCompanionModuleClasses(topLevelClasses(unit.tpdTree))
tree <- sliceTopLevel(unit.tpdTree, cls)
}
{
val pickler = new TastyPickler(cls)
if (ctx.settings.YtestPickler.value) {
beforePickling(cls) = tree.show
picklers(cls) = pickler
}
val treePkl = pickler.treePkl
treePkl.pickle(tree :: Nil)
treePkl.compactify()
pickler.addrOfTree = treePkl.buf.addrOfTree
pickler.addrOfSym = treePkl.addrOfSym
if (tree.span.exists)
new PositionPickler(pickler, treePkl.buf.addrOfTree).picklePositions(tree :: Nil)
if (!ctx.settings.YdropComments.value)
new CommentPickler(pickler, treePkl.buf.addrOfTree).pickleComment(tree)
// other pickle sections go here.
val pickled = pickler.assembleParts()
unit.pickled += (cls -> pickled)
def rawBytes = // not needed right now, but useful to print raw format.
pickled.iterator.grouped(10).toList.zipWithIndex.map {
case (row, i) => s"${i}0: ${row.mkString(" ")}"
}
// println(i"rawBytes = \n$rawBytes%\n%") // DEBUG
if (pickling ne noPrinter) {
println(i"**** pickled info of $cls")
println(new TastyPrinter(pickled).printContents())
}
}
}
override def runOn(units: List[CompilationUnit])(implicit ctx: Context): List[CompilationUnit] = {
val result = super.runOn(units)
if (ctx.settings.YtestPickler.value)
testUnpickler(
ctx.fresh
.setPeriod(Period(ctx.runId + 1, FirstPhaseId))
.setReporter(new ThrowingReporter(ctx.reporter))
.addMode(Mode.ReadPositions)
.addMode(Mode.ReadComments)
.addMode(Mode.PrintShowExceptions))
result
}
private def testUnpickler(implicit ctx: Context): Unit = {
pickling.println(i"testing unpickler at run ${ctx.runId}")
ctx.initialize()
val unpicklers =
for ((cls, pickler) <- picklers) yield {
val unpickler = new DottyUnpickler(pickler.assembleParts())
unpickler.enter(roots = Set.empty)
cls -> unpickler
}
pickling.println("************* entered toplevel ***********")
for ((cls, unpickler) <- unpicklers) {
val unpickled = unpickler.rootTrees
testSame(i"$unpickled%\n%", beforePickling(cls), cls)
}
}
private def testSame(unpickled: String, previous: String, cls: ClassSymbol)(implicit ctx: Context) =
if (previous != unpickled) {
output("before-pickling.txt", previous)
output("after-pickling.txt", unpickled)
ctx.error(s"""pickling difference for $cls in ${cls.source}, for details:
|
| diff before-pickling.txt after-pickling.txt""".stripMargin)
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/transform/Pickler.scala | Scala | apache-2.0 | 4,288 |
import sbt._
class CommonsProject(info: ProjectInfo) extends DefaultProject(info){//} with ChecksumPlugin{
val scalatest = {
if(buildScalaVersion.contains("2.9"))
"org.scalatest" % "scalatest_2.9.0" % "1.4.1" % "test"
else
"org.scalatest" % "scalatest" % "1.3" % "test"
}
val asm = "org.scala-lang" % "scalap" % buildScalaVersion
Credentials(Path.userHome / ".ivy2" / ".credentials", log)
val publishTo = {
if(version.toString.endsWith("-SNAPSHOT"))
"Sonatype Nexus Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
else "Sonatype Nexus Staging" at "https://oss.sonatype.org/service/local/staging/deploy/maven2"
}
override def managedStyle = ManagedStyle.Maven
override def deliverProjectDependencies = Nil
override def packageDocsJar = defaultJarPath("-javadoc.jar")
override def packageSrcJar= defaultJarPath("-sources.jar")
lazy val sourceArtifact = Artifact.sources(artifactID)
lazy val docsArtifact = Artifact.javadoc(artifactID)
val scalaTestRepo = "Scala Test Repo" at "http://scala-tools.org/repo-snapshots"
val typesafeRepo = "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases"
override def packageToPublishActions = super.packageToPublishActions ++ Seq(packageDocs, packageSrc)
override def pomExtra = {
// If these aren't lazy, then the build crashes looking for
// ${moduleName}/project/build.properties.
(
<name>{name}</name>
<description>Recursivity Commons Project POM</description>
<url>http://github.com/wfaler/recursivity-commons</url>
<inceptionYear>2010</inceptionYear>
<organization>
<name>Recursivity Commons Project</name>
<url>http://github.com/wfaler/recursivity-commons</url>
</organization>
<licenses>
<license>
<name>BSD</name>
<url>http://github.com/wfaler/recursivity-commons/LICENSE</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git://github.com/wfaler/recursivity-commons.git</connection>
<url>http://github.com/wfaler/recursivity-commons</url>
</scm>
<developers>
<developer>
<id>wfaler</id>
<name>Wille Faler</name>
<url>http://blog.recursivity.com</url>
</developer>
</developers>)
}
}
| bowler-framework/recursivity-commons | project/build/CommonsProject.scala | Scala | bsd-3-clause | 2,349 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
package monix.benchmarks
import java.util.concurrent.TimeUnit
import monix.eval.Task
import org.openjdk.jmh.annotations._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
/** To do comparative benchmarks between versions:
*
* benchmarks/run-benchmark TaskMapCallsBenchmark
*
* This will generate results in `benchmarks/results`.
*
* Or to run the benchmark from within SBT:
*
* jmh:run -i 10 -wi 10 -f 2 -t 1 monix.benchmarks.TaskMapCallsBenchmark
*
* Which means "10 iterations", "10 warm-up iterations", "2 forks", "1 thread".
* Please note that benchmarks should be usually executed at least in
* 10 iterations (as a rule of thumb), but more is better.
*/
@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class TaskMapCallsBenchmark {
import TaskMapCallsBenchmark.test
@Benchmark
def one(): Long = test(12000, 1)
@Benchmark
def batch30(): Long = test(12000 / 30, 30)
@Benchmark
def batch120(): Long = test(12000 / 120, 120)
}
object TaskMapCallsBenchmark {
def test(iterations: Int, batch: Int): Long = {
val f = (x: Int) => x + 1
var task = Task.eval(0)
var j = 0
while (j < batch) { task = task.map(f); j += 1 }
var sum = 0L
var i = 0
while (i < iterations) {
sum += Await.result(task.runToFuture, Duration.Inf)
i += 1
}
sum
}
}
*/ | alexandru/monifu | benchmarks/shared/src/main/scala/monix/benchmarks/TaskMapCallsBenchmark.scala | Scala | apache-2.0 | 2,090 |
package es.weso.shex
import org.scalatest.FunSpec
import com.typesafe.config._
import com.hp.hpl.jena.rdf.model.ModelFactory
import java.io.FileOutputStream
import java.io.FileInputStream
import scala.collection.JavaConverters._
import org.scalatest.Matchers
import es.weso.shacl.Report
class RunTestsDeriv extends FunSpec with Matchers {
val runner = RunTestsFolder(ShapeValidatorWithDeriv)
val report = runner.createReport
describe("test-suite report") {
info("Running tests from " + runner.testsDir)
info("Manifest file: " + runner.manifestFile)
for ((r, n) <- report.items.sortWith(Report.sortReport) zip (1 to report.items.length))
it(r.name + ". " + n) {
if (!r.passed)
fail("Test failed: " + r)
}
}
describe("Generate W3c EARL report") {
it("Should Generate EARL report ") {
val earlModel = report.generateEARL
val conf: Config = ConfigFactory.load()
val outFile = conf.getString("EarlReportFile")
earlModel.write(new FileOutputStream(outFile), "TURTLE")
val readModel = ModelFactory.createDefaultModel()
readModel.read(new FileInputStream(outFile), "", "TURTLE")
val earl = "http://www.w3.org/ns/earl#"
val earl_outcome = readModel.createProperty(earl + "outcome")
val earl_passed = readModel.createResource(earl + "passed")
val earl_failed = readModel.createResource(earl + "failed")
val passed = readModel.listResourcesWithProperty(earl_outcome, earl_passed).toList.asScala
val failed = readModel.listResourcesWithProperty(earl_outcome, earl_failed).toList.asScala
info("Generated report " + passed.length + " passed. " + failed.length + " failed. File: " + outFile)
}
}
} | jorgeyp/ShExcala | src/test/scala/es/weso/shex/RunTestsFolderDeriv.scala | Scala | mit | 1,731 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* THIS CODE WAS ORIGINALLY COPIED DIRECTLY FROM THE OPEN SOURCE PROJECT TAP (Trusted Analytics Platform)
* which has an Apache V2.0. IT WAS LATER UPDATED TO SUPPORT SPARK 2.1 SparkSession
*/
package org.apache.spot.testutils
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, WordSpec}
trait TestingSparkContextWordSpec extends WordSpec with BeforeAndAfterAll {
var sparkSession: SparkSession = null
override def beforeAll() = {
sparkSession = SparkSession.builder().appName("spot-ml-testing")
.master("local")
.config("", "")
.getOrCreate()
}
/**
* Clean up after the test is done
*/
override def afterAll() = {
sparkSession.stop()
}
}
| brandon-edwards/incubator-spot | spot-ml/src/test/scala/org/apache/spot/testutils/TestingSparkContextWordSpec.scala | Scala | apache-2.0 | 1,533 |
package ml.sparkling.graph.operators.predicates
import ml.sparkling.graph.api.operators.IterativeComputation.{SimpleVertexPredicate, VertexPredicate}
import org.apache.spark.graphx.VertexId
/**
* Created by Roman Bartusiak (roman.bartusiak@pwr.edu.pl http://riomus.github.io).
* Always true predicate
*/
object AllPathPredicate extends VertexPredicate[Any] with Serializable with SimpleVertexPredicate {
override def apply[B<:Any](v1: VertexId, v2: B): Boolean = true
override def apply(id: VertexId): Boolean = true
}
| sparkling-graph/sparkling-graph | operators/src/main/scala/ml/sparkling/graph/operators/predicates/AllPathPredicate.scala | Scala | bsd-2-clause | 529 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.sexp.formats
import shapeless._
import org.ensime.sexp._
/**
* Helper methods for generating wrappers for types in a family, also
* known as "type hints".
*
* See https://gist.github.com/fommil/3a04661116c899056197
*
* Will be replaced by a port of spray-json-shapeless.
*/
trait FamilyFormats {
case class TypeHint[T](hint: SexpSymbol)
implicit def typehint[T](implicit t: Typeable[T]): TypeHint[T] =
TypeHint(SexpSymbol(":" + t.describe.replaceAll("\\\\.type$", "")))
// always serialises to Nil, and is differentiated by the TraitFormat
// scala names https://github.com/milessabin/shapeless/issues/256
implicit def singletonFormat[T <: Singleton](implicit w: Witness.Aux[T]): SexpFormat[T] = new SexpFormat[T] {
def write(t: T) = SexpNil
def read(v: Sexp) =
if (v == SexpNil) w.value
else deserializationError(v)
}
abstract class TraitFormat[T] extends SexpFormat[T] {
protected def wrap[E](t: E)(implicit th: TypeHint[E], sf: SexpFormat[E]): Sexp = {
val contents = t.toSexp
// special cases: empty case clases, and case objects (hopefully)
if (contents == SexpNil) SexpList(th.hint)
else SexpData(th.hint -> contents)
}
// implement by matching on the implementations and passing off to wrap
// def write(t: T): Sexp
final def read(sexp: Sexp): T = sexp match {
case SexpList(List(hint @ SexpSymbol(_))) => read(hint, SexpNil)
case SexpData(map) if map.size == 1 =>
map.head match {
case (hint, value) => read(hint, value)
}
case x => deserializationError(x)
}
// implement by matching on the hint and passing off to convertTo[Impl]
protected def read(hint: SexpSymbol, value: Sexp): T
}
}
| d1egoaz/ensime-sbt | src/sbt-test/sbt-ensime/ensime-server/s-express/src/main/scala/org/ensime/sexp/formats/FamilyFormats.scala | Scala | apache-2.0 | 1,898 |
// Databricks notebook source
// MAGIC %md
// MAGIC
// MAGIC # [SDS-2.2, Scalable Data Science](https://lamastex.github.io/scalable-data-science/sds/2/2/)
// COMMAND ----------
// MAGIC %md
// MAGIC Archived YouTube video of this live unedited lab-lecture:
// MAGIC
// MAGIC [](https://www.youtube.com/embed/GF-VFR39dIU?start=0&end=410&autoplay=1) [](https://www.youtube.com/embed/atwM-8fXNQY?start=0&end=2372&autoplay=1) [](https://www.youtube.com/embed/Uh5J9hqk12I?start=0&end=2858&autoplay=1) [](https://www.youtube.com/embed/Se_E00wrdqM?start=0&end=2242&autoplay=1)
// COMMAND ----------
// MAGIC %md
// MAGIC #Topic Modeling with Latent Dirichlet Allocation
// MAGIC
// MAGIC This is an augmentation of a notebook from Databricks Guide.
// MAGIC This notebook will provide a brief algorithm summary, links for further reading, and an example of how to use LDA for Topic Modeling.
// COMMAND ----------
// MAGIC %md
// MAGIC ##Algorithm Summary
// MAGIC
// MAGIC - **Task**: Identify topics from a collection of text documents
// MAGIC - **Input**: Vectors of word counts
// MAGIC - **Optimizers**:
// MAGIC - EMLDAOptimizer using [Expectation Maximization](https://en.wikipedia.org/wiki/Expectation%E2%80%93maximization_algorithm)
// MAGIC - OnlineLDAOptimizer using Iterative Mini-Batch Sampling for [Online Variational Bayes](https://www.cs.princeton.edu/~blei/papers/HoffmanBleiBach2010b.pdf)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Intro to LDA by David Blei
// MAGIC Watch at least the first 25 or so minutes of this video by David Blei on a crash introduction to topic modeling via Latent Dirichlet Allocation (LDA).
// MAGIC
// MAGIC [](https://www.youtube.com/watch?v=FkckgwMHP2s)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Links
// MAGIC
// MAGIC - Spark API docs
// MAGIC - Scala: [LDA](https://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.mllib.clustering.LDA)
// MAGIC - Python: [LDA](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.clustering.LDA)
// MAGIC - [MLlib Programming Guide](http://spark.apache.org/docs/latest/mllib-clustering.html#latent-dirichlet-allocation-lda)
// MAGIC - [ML Feature Extractors & Transformers](http://spark.apache.org/docs/latest/ml-features.html)
// MAGIC - [Wikipedia: Latent Dirichlet Allocation](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Readings for LDA
// MAGIC
// MAGIC * A high-level introduction to the topic from Communications of the ACM
// MAGIC * [http://www.cs.columbia.edu/~blei/papers/Blei2012.pdf](http://www.cs.columbia.edu/~blei/papers/Blei2012.pdf)
// MAGIC * A very good high-level humanities introduction to the topic (recommended by Chris Thomson in English Department at UC, Ilam):
// MAGIC * [http://journalofdigitalhumanities.org/2-1/topic-modeling-and-digital-humanities-by-david-m-blei/](http://journalofdigitalhumanities.org/2-1/topic-modeling-and-digital-humanities-by-david-m-blei/)
// MAGIC
// MAGIC Also read the methodological and more formal papers cited in the above links if you want to know more.
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC Let's get a bird's eye view of LDA from [http://www.cs.columbia.edu/~blei/papers/Blei2012.pdf](http://www.cs.columbia.edu/~blei/papers/Blei2012.pdf) next.
// MAGIC
// MAGIC * See pictures (hopefully you read the paper last night!)
// MAGIC * Algorithm of the generative model (this is unsupervised clustering)
// MAGIC * For a careful introduction to the topic see Section 27.3 and 27.4 (pages 950-970) pf Murphy's *Machine Learning: A Probabilistic Perspective, MIT Press, 2012*.
// MAGIC * We will be quite application focussed or applied here!
// MAGIC * Understand Expectation Maximization Algorithm read *Section 8.5 The EM Algorithm* in *The Elements of Statistical Learning* by Hastie, Tibshirani and Freidman (2001, Springer Series in Statistics). Read from free 21MB PDF of the book available from here [https://web.stanford.edu/~hastie/Papers/ESLII.pdf](https://web.stanford.edu/~hastie/Papers/ESLII.pdf) or from its backup here [http://lamastex.org/research_events/Readings/StatLearn/ESLII.pdf](http://lamastex.org/research_events/Readings/StatLearn/ESLII.pdf).
// COMMAND ----------
//This allows easy embedding of publicly available information into any other notebook
//when viewing in git-book just ignore this block - you may have to manually chase the URL in frameIt("URL").
//Example usage:
// displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
def frameIt( u:String, h:Int ) : String = {
"""<iframe
src=""""+ u+""""
width="95%" height="""" + h + """"
sandbox>
<p>
<a href="http://spark.apache.org/docs/latest/index.html">
Fallback link for browsers that, unlikely, don't support frames
</a>
</p>
</iframe>"""
}
displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",200))
// COMMAND ----------
displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Model",600))
// COMMAND ----------
displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Mathematical_definition",910))
// COMMAND ----------
// MAGIC %md
// MAGIC ## Probabilistic Topic Modeling Example
// MAGIC
// MAGIC This is an outline of our Topic Modeling workflow. Feel free to jump to any subtopic to find out more.
// MAGIC
// MAGIC - Step 0. Dataset Review
// MAGIC - Step 1. Downloading and Loading Data into DBFS
// MAGIC - (Step 1. only needs to be done once per shard - see details at the end of the notebook for Step 1.)
// MAGIC - Step 2. Loading the Data and Data Cleaning
// MAGIC - Step 3. Text Tokenization
// MAGIC - Step 4. Remove Stopwords
// MAGIC - Step 5. Vector of Token Counts
// MAGIC - Step 6. Create LDA model with Online Variational Bayes
// MAGIC - Step 7. Review Topics
// MAGIC - Step 8. Model Tuning - Refilter Stopwords
// MAGIC - Step 9. Create LDA model with Expectation Maximization
// MAGIC - Step 10. Visualize Results
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC ## Step 0. Dataset Review
// MAGIC
// MAGIC In this example, we will use the mini [20 Newsgroups dataset](http://kdd.ics.uci.edu/databases/20newsgroups/20newsgroups.html), which is a random subset of the original 20 Newsgroups dataset. Each newsgroup is stored in a subdirectory, with each article stored as a separate file.
// MAGIC
// MAGIC ***
// MAGIC ***
// MAGIC
// MAGIC The following is the markdown file `20newsgroups.data.md` of the original details on the dataset, obtained as follows:
// MAGIC
// MAGIC ```%sh
// MAGIC $ wget -k http://kdd.ics.uci.edu/databases/20newsgroups/20newsgroups.data.html
// MAGIC --2016-04-07 10:31:51-- http://kdd.ics.uci.edu/databases/20newsgroups/20newsgroups.data.html
// MAGIC Resolving kdd.ics.uci.edu (kdd.ics.uci.edu)... 128.195.1.95
// MAGIC Connecting to kdd.ics.uci.edu (kdd.ics.uci.edu)|128.195.1.95|:80... connected.
// MAGIC HTTP request sent, awaiting response... 200 OK
// MAGIC Length: 4371 (4.3K) [text/html]
// MAGIC Saving to: '20newsgroups.data.html’
// MAGIC
// MAGIC 100%[======================================>] 4,371 --.-K/s in 0s
// MAGIC
// MAGIC 2016-04-07 10:31:51 (195 MB/s) - '20newsgroups.data.html’ saved [4371/4371]
// MAGIC
// MAGIC Converting 20newsgroups.data.html... nothing to do.
// MAGIC Converted 1 files in 0 seconds.
// MAGIC
// MAGIC $ pandoc -f html -t markdown 20newsgroups.data.html > 20newsgroups.data.md
// MAGIC ```
// MAGIC ### 20 Newsgroups
// MAGIC
// MAGIC #### Data Type
// MAGIC
// MAGIC text
// MAGIC
// MAGIC #### Abstract
// MAGIC
// MAGIC This data set consists of 20000 messages taken from 20 newsgroups.
// MAGIC
// MAGIC #### Sources
// MAGIC
// MAGIC ##### Original Owner and Donor
// MAGIC
// MAGIC Tom Mitchell
// MAGIC School of Computer Science
// MAGIC Carnegie Mellon University
// MAGIC tom.mitchell@cmu.edu
// MAGIC
// MAGIC **Date Donated:** September 9, 1999
// MAGIC
// MAGIC #### Data Characteristics
// MAGIC
// MAGIC One thousand Usenet articles were taken from each of the following 20
// MAGIC newsgroups.
// MAGIC
// MAGIC alt.atheism
// MAGIC comp.graphics
// MAGIC comp.os.ms-windows.misc
// MAGIC comp.sys.ibm.pc.hardware
// MAGIC comp.sys.mac.hardware
// MAGIC comp.windows.x
// MAGIC misc.forsale
// MAGIC rec.autos
// MAGIC rec.motorcycles
// MAGIC rec.sport.baseball
// MAGIC rec.sport.hockey
// MAGIC sci.crypt
// MAGIC sci.electronics
// MAGIC sci.med
// MAGIC sci.space
// MAGIC soc.religion.christian
// MAGIC talk.politics.guns
// MAGIC talk.politics.mideast
// MAGIC talk.politics.misc
// MAGIC talk.religion.misc
// MAGIC
// MAGIC Approximately 4% of the articles are crossposted. The articles are
// MAGIC typical postings and thus have headers including subject lines,
// MAGIC signature files, and quoted portions of other articles.
// MAGIC
// MAGIC #### Data Format
// MAGIC
// MAGIC Each newsgroup is stored in a subdirectory, with each article stored as
// MAGIC a separate file.
// MAGIC
// MAGIC #### Past Usage
// MAGIC
// MAGIC T. Mitchell. Machine Learning, McGraw Hill, 1997.
// MAGIC
// MAGIC T. Joachims (1996). [A probabilistic analysis of the Rocchio algorithm
// MAGIC with TFIDF for text
// MAGIC categorization](http://reports-archive.adm.cs.cmu.edu/anon/1996/CMU-CS-96-118.ps),
// MAGIC Computer Science Technical Report CMU-CS-96-118. Carnegie Mellon
// MAGIC University.
// MAGIC
// MAGIC #### Acknowledgements, Copyright Information, and Availability
// MAGIC
// MAGIC You may use this material free of charge for any educational purpose,
// MAGIC provided attribution is given in any lectures or publications that make
// MAGIC use of this material.
// MAGIC
// MAGIC #### References and Further Information
// MAGIC
// MAGIC Naive Bayes code for text classification is available from:
// MAGIC [http://www.cs.cmu.edu/afs/cs/project/theo-11/www/naive-bayes.html](http://www.cs.cmu.edu/afs/cs/project/theo-11/www/naive-bayes.html)
// MAGIC
// MAGIC * * * * *
// MAGIC
// MAGIC [The UCI KDD Archive](http://kdd.ics.uci.edu/) \\
// MAGIC [Information and Computer Science](http://www.ics.uci.edu/) \\
// MAGIC [University of California, Irvine](http://www.uci.edu/) \\
// MAGIC Irvine, CA 92697-3425 \\
// MAGIC
// MAGIC Last modified: September 9, 1999
// MAGIC
// MAGIC ***
// MAGIC ***
// COMMAND ----------
// MAGIC %md
// MAGIC **NOTE:** The mini dataset consists of 100 articles from the following 20 Usenet newsgroups:
// MAGIC
// MAGIC alt.atheism
// MAGIC comp.graphics
// MAGIC comp.os.ms-windows.misc
// MAGIC comp.sys.ibm.pc.hardware
// MAGIC comp.sys.mac.hardware
// MAGIC comp.windows.x
// MAGIC misc.forsale
// MAGIC rec.autos
// MAGIC rec.motorcycles
// MAGIC rec.sport.baseball
// MAGIC rec.sport.hockey
// MAGIC sci.crypt
// MAGIC sci.electronics
// MAGIC sci.med
// MAGIC sci.space
// MAGIC soc.religion.christian
// MAGIC talk.politics.guns
// MAGIC talk.politics.mideast
// MAGIC talk.politics.misc
// MAGIC talk.religion.misc
// MAGIC
// MAGIC Some of the newsgroups seem pretty similar on first glance, such as *comp.sys.ibm.pc.hardware* and *comp.sys.mac.hardware*, which may affect our results.
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC **NOTE:** A simpler and slicker version of the analysis is available in this notebook:
// MAGIC
// MAGIC * [https://docs.cloud.databricks.com/docs/latest/sample_applications/07%20Sample%20ML/MLPipeline%20Newsgroup%20Dataset.html](https://docs.cloud.databricks.com/docs/latest/sample_applications/07%20Sample%20ML/MLPipeline%20Newsgroup%20Dataset.html)
// MAGIC
// MAGIC But, let's do it the hard way here so that we can do it on other arbitrary datasets.
// COMMAND ----------
// MAGIC %md
// MAGIC ## Step 2. Loading the Data and Data Cleaning
// MAGIC
// MAGIC We have already used the wget command to download the file, and put it in our distributed file system (this process takes about 10 minutes). To repeat these steps or to download data from another source follow the steps at the bottom of this worksheet on **Step 1. Downloading and Loading Data into DBFS**.
// MAGIC
// MAGIC Let's make sure these files are in dbfs now:
// COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/mini_newsgroups")) // this is where the data resides in dbfs (see below to download it first, if you go to a new shard!)
// COMMAND ----------
// MAGIC %md
// MAGIC Now let us read in the data using `wholeTextFiles()`.
// MAGIC
// MAGIC Recall that the `wholeTextFiles()` command will read in the entire directory of text files, and return a key-value pair of (filePath, fileContent).
// MAGIC
// MAGIC As we do not need the file paths in this example, we will apply a map function to extract the file contents, and then convert everything to lowercase.
// COMMAND ----------
// Load text file, leave out file paths, convert all strings to lowercase
val corpus = sc.wholeTextFiles("/datasets/mini_newsgroups/*").map(_._2).map(_.toLowerCase()).cache() // let's cache
// COMMAND ----------
corpus.count // there are 2000 documents in total - this action will take about 2 minutes
// COMMAND ----------
// MAGIC %md
// MAGIC Review first 5 documents to get a sense for the data format.
// COMMAND ----------
corpus.take(5)
// COMMAND ----------
// MAGIC %md
// MAGIC To review a random document in the corpus uncomment and evaluate the following cell.
// COMMAND ----------
corpus.takeSample(false, 1)
// COMMAND ----------
// MAGIC %md
// MAGIC Note that the document begins with a header containing some metadata that we don't need, and we are only interested in the body of the document. We can do a bit of simple data cleaning here by removing the metadata of each document, which reduces the noise in our dataset. This is an important step as the accuracy of our models depend greatly on the quality of data used.
// COMMAND ----------
// Split document by double newlines, drop the first block, combine again as a string and cache
val corpus_body = corpus.map(_.split("\\\\n\\\\n")).map(_.drop(1)).map(_.mkString(" ")).cache()
// COMMAND ----------
corpus_body.count() // there should still be the same count, but now without meta-data block
// COMMAND ----------
// MAGIC %md
// MAGIC Let's review first 5 documents with metadata removed.
// COMMAND ----------
corpus_body.take(5)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Feature extraction and transformation APIs
// COMMAND ----------
// MAGIC %md
// MAGIC See [http://spark.apache.org/docs/latest/ml-features.html](http://spark.apache.org/docs/latest/ml-features.html)
// COMMAND ----------
// MAGIC %md
// MAGIC To use the convenient [Feature extraction and transformation APIs](http://spark.apache.org/docs/latest/ml-features.html), we will convert our RDD into a DataFrame.
// MAGIC
// MAGIC We will also create an ID for every document using `zipWithIndex`
// MAGIC
// MAGIC * for sytax and details search for `zipWithIndex` in [https://spark.apache.org/docs/latest/api/scala/org/apache/spark/rdd/RDD.html](https://spark.apache.org/docs/latest/api/scala/org/apache/spark/rdd/RDD.html)
// COMMAND ----------
// Convert RDD to DF with ID for every document
val corpus_df = corpus_body.zipWithIndex.toDF("corpus", "id")
// COMMAND ----------
//display(corpus_df) // uncomment to see corpus
// this was commented out after a member of the new group requested to remain anonymous on 20160525
// COMMAND ----------
// MAGIC %md
// MAGIC ## Step 3. Text Tokenization
// MAGIC
// MAGIC We will use the RegexTokenizer to split each document into tokens. We can setMinTokenLength() here to indicate a minimum token length, and filter away all tokens that fall below the minimum.
// COMMAND ----------
// MAGIC %md
// MAGIC See [http://spark.apache.org/docs/latest/ml-features.html#tokenizer](http://spark.apache.org/docs/latest/ml-features.html#tokenizer).
// COMMAND ----------
import org.apache.spark.ml.feature.RegexTokenizer
// Set params for RegexTokenizer
val tokenizer = new RegexTokenizer()
.setPattern("[\\\\W_]+") // break by white space character(s) - try to remove emails and other patterns
.setMinTokenLength(4) // Filter away tokens with length < 4
.setInputCol("corpus") // name of the input column
.setOutputCol("tokens") // name of the output column
// Tokenize document
val tokenized_df = tokenizer.transform(corpus_df)
// COMMAND ----------
//display(tokenized_df) // uncomment to see tokenized_df
// this was commented out after a member of the new group requested to remain anonymous on 20160525
// COMMAND ----------
display(tokenized_df.select("tokens"))
// COMMAND ----------
// MAGIC %md
// MAGIC ## Step 4. Remove Stopwords
// MAGIC
// MAGIC We can easily remove stopwords using the StopWordsRemover().
// COMMAND ----------
// MAGIC %md
// MAGIC See [http://spark.apache.org/docs/latest/ml-features.html#stopwordsremover](http://spark.apache.org/docs/latest/ml-features.html#stopwordsremover).
// COMMAND ----------
// MAGIC %md
// MAGIC If a list of stopwords is not provided, the StopWordsRemover() will use [this list of stopwords](http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words), also shown below, by default.
// MAGIC
// MAGIC ``` a,about,above,across,after,afterwards,again,against,all,almost,alone,along,already,also,although,always,am,among,amongst,amoungst,amount,an,and,another,any,anyhow,anyone,anything,anyway,anywhere,
// MAGIC are,around,as,at,back,be,became,because,become,becomes,becoming,been,before,beforehand,behind,being,below,beside,besides,between,beyond,bill,both,bottom,but,by,call,can,cannot,cant,co,computer,con,could,
// MAGIC couldnt,cry,de,describe,detail,do,done,down,due,during,each,eg,eight,either,eleven,else,elsewhere,empty,enough,etc,even,ever,every,everyone,everything,everywhere,except,few,fifteen,fify,fill,find,fire,first,
// MAGIC five,for,former,formerly,forty,found,four,from,front,full,further,get,give,go,had,has,hasnt,have,he,hence,her,here,hereafter,hereby,herein,hereupon,hers,herself,him,himself,his,how,however,hundred,i,ie,if,
// MAGIC in,inc,indeed,interest,into,is,it,its,itself,keep,last,latter,latterly,least,less,ltd,made,many,may,me,meanwhile,might,mill,mine,more,moreover,most,mostly,move,much,must,my,myself,name,namely,neither,never,
// MAGIC nevertheless,next,nine,no,nobody,none,noone,nor,not,nothing,now,nowhere,of,off,often,on,once,one,only,onto,or,other,others,otherwise,our,ours,ourselves,out,over,own,part,per,perhaps,please,put,rather,re,same,
// MAGIC see,seem,seemed,seeming,seems,serious,several,she,should,show,side,since,sincere,six,sixty,so,some,somehow,someone,something,sometime,sometimes,somewhere,still,such,system,take,ten,than,that,the,their,them,
// MAGIC themselves,then,thence,there,thereafter,thereby,therefore,therein,thereupon,these,they,thick,thin,third,this,those,though,three,through,throughout,thru,thus,to,together,too,top,toward,towards,twelve,twenty,two,
// MAGIC un,under,until,up,upon,us,very,via,was,we,well,were,what,whatever,when,whence,whenever,where,whereafter,whereas,whereby,wherein,whereupon,wherever,whether,which,while,whither,who,whoever,whole,whom,whose,why,will,
// MAGIC with,within,without,would,yet,you,your,yours,yourself,yourselves
// MAGIC ```
// MAGIC
// MAGIC You can use `getStopWords()` to see the list of stopwords that will be used.
// MAGIC
// MAGIC In this example, we will specify a list of stopwords for the StopWordsRemover() to use. We do this so that we can add on to the list later on.
// COMMAND ----------
display(dbutils.fs.ls("dbfs:/tmp/stopwords")) // check if the file already exists from earlier wget and dbfs-load
// COMMAND ----------
// MAGIC %md
// MAGIC If the file `dbfs:/tmp/stopwords` already exists then skip the next two cells, otherwise download and load it into DBFS by uncommenting and evaluating the next two cells.
// COMMAND ----------
//%sh wget http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words -O /tmp/stopwords # uncomment '//' at the beginning and repeat only if needed again
// COMMAND ----------
//%fs cp file:/tmp/stopwords dbfs:/tmp/stopwords # uncomment '//' at the beginning and repeat only if needed again
// COMMAND ----------
// List of stopwords
val stopwords = sc.textFile("/tmp/stopwords").collect()
// COMMAND ----------
stopwords.length // find the number of stopwords in the scala Array[String]
// COMMAND ----------
// MAGIC %md
// MAGIC Finally, we can just remove the stopwords using the `StopWordsRemover` as follows:
// COMMAND ----------
import org.apache.spark.ml.feature.StopWordsRemover
// Set params for StopWordsRemover
val remover = new StopWordsRemover()
.setStopWords(stopwords) // This parameter is optional
.setInputCol("tokens")
.setOutputCol("filtered")
// Create new DF with Stopwords removed
val filtered_df = remover.transform(tokenized_df)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Step 5. Vector of Token Counts
// MAGIC
// MAGIC LDA takes in a vector of token counts as input. We can use the `CountVectorizer()` to easily convert our text documents into vectors of token counts.
// MAGIC
// MAGIC The `CountVectorizer` will return `(VocabSize, Array(Indexed Tokens), Array(Token Frequency))`.
// MAGIC
// MAGIC Two handy parameters to note:
// MAGIC
// MAGIC - `setMinDF`: Specifies the minimum number of different documents a term must appear in to be included in the vocabulary.
// MAGIC - `setMinTF`: Specifies the minimum number of times a term has to appear in a document to be included in the vocabulary.
// COMMAND ----------
// MAGIC %md
// MAGIC See [http://spark.apache.org/docs/latest/ml-features.html#countvectorizer](http://spark.apache.org/docs/latest/ml-features.html#countvectorizer).
// COMMAND ----------
import org.apache.spark.ml.feature.CountVectorizer
// Set params for CountVectorizer
val vectorizer = new CountVectorizer()
.setInputCol("filtered")
.setOutputCol("features")
.setVocabSize(10000)
.setMinDF(5) // the minimum number of different documents a term must appear in to be included in the vocabulary.
.fit(filtered_df)
// COMMAND ----------
// Create vector of token counts
val countVectors = vectorizer.transform(filtered_df).select("id", "features")
// COMMAND ----------
// see the first countVectors
countVectors.take(2)
// COMMAND ----------
// MAGIC %md
// MAGIC To use the LDA algorithm in the MLlib library, we have to convert the DataFrame back into an RDD.
// COMMAND ----------
// Convert DF to RDD
import org.apache.spark.ml.linalg.Vector
val lda_countVector = countVectors.map { case Row(id: Long, countVector: Vector) => (id, countVector) }
// COMMAND ----------
// format: Array(id, (VocabSize, Array(indexedTokens), Array(Token Frequency)))
lda_countVector.take(1)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Let's get an overview of LDA in Spark's MLLIB
// COMMAND ----------
// MAGIC %md
// MAGIC See [http://spark.apache.org/docs/latest/mllib-clustering.html#latent-dirichlet-allocation-lda](http://spark.apache.org/docs/latest/mllib-clustering.html#latent-dirichlet-allocation-lda)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Create LDA model with Online Variational Bayes
// MAGIC
// MAGIC We will now set the parameters for LDA. We will use the OnlineLDAOptimizer() here, which implements Online Variational Bayes.
// MAGIC
// MAGIC Choosing the number of topics for your LDA model requires a bit of domain knowledge. As we know that there are 20 unique newsgroups in our dataset, we will set numTopics to be 20.
// COMMAND ----------
val numTopics = 20
// COMMAND ----------
// MAGIC %md
// MAGIC We will set the parameters needed to build our LDA model. We can also setMiniBatchFraction for the OnlineLDAOptimizer, which sets the fraction of corpus sampled and used at each iteration. In this example, we will set this to 0.8.
// COMMAND ----------
import org.apache.spark.mllib.clustering.{LDA, OnlineLDAOptimizer}
// Set LDA params
val lda = new LDA()
.setOptimizer(new OnlineLDAOptimizer().setMiniBatchFraction(0.8))
.setK(numTopics)
.setMaxIterations(3)
.setDocConcentration(-1) // use default values
.setTopicConcentration(-1) // use default values
// COMMAND ----------
// MAGIC %md
// MAGIC Create the LDA model with Online Variational Bayes.
// COMMAND ----------
// convert ML vectors into MLlib vectors
val lda_countVector_mllib = lda_countVector.map { case (id, vector) => (id, org.apache.spark.mllib.linalg.Vectors.fromML(vector)) }.rdd
val ldaModel = lda.run(lda_countVector_mllib)
// COMMAND ----------
// MAGIC %md
// MAGIC Watch **Online Learning for Latent Dirichlet Allocation** in NIPS2010 by Matt Hoffman (right click and open in new tab)
// MAGIC
// MAGIC [![Matt Hoffman's NIPS 2010 Talk Online LDA]](http://videolectures.net/nips2010_hoffman_oll/thumb.jpg)](http://videolectures.net/nips2010_hoffman_oll/)
// MAGIC
// MAGIC Also see the paper on *Online varioational Bayes* by Matt linked for more details (from the above URL): [http://videolectures.net/site/normal_dl/tag=83534/nips2010_1291.pdf](http://videolectures.net/site/normal_dl/tag=83534/nips2010_1291.pdf)
// COMMAND ----------
// MAGIC %md
// MAGIC Note that using the OnlineLDAOptimizer returns us a [LocalLDAModel](http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.mllib.clustering.LocalLDAModel), which stores the inferred topics of your corpus.
// COMMAND ----------
// MAGIC %md
// MAGIC ## Review Topics
// MAGIC
// MAGIC We can now review the results of our LDA model. We will print out all 20 topics with their corresponding term probabilities.
// MAGIC
// MAGIC Note that you will get slightly different results every time you run an LDA model since LDA includes some randomization.
// MAGIC
// MAGIC Let us review results of LDA model with Online Variational Bayes, step by step.
// COMMAND ----------
val topicIndices = ldaModel.describeTopics(maxTermsPerTopic = 5)
// COMMAND ----------
val vocabList = vectorizer.vocabulary
// COMMAND ----------
val topics = topicIndices.map { case (terms, termWeights) =>
terms.map(vocabList(_)).zip(termWeights)
}
// COMMAND ----------
// MAGIC %md
// MAGIC Feel free to take things apart to understand!
// COMMAND ----------
topicIndices(0)
// COMMAND ----------
topicIndices(0)._1
// COMMAND ----------
topicIndices(0)._1(0)
// COMMAND ----------
vocabList(topicIndices(0)._1(0))
// COMMAND ----------
// MAGIC %md
// MAGIC Review Results of LDA model with Online Variational Bayes - Doing all four steps earlier at once.
// COMMAND ----------
val topicIndices = ldaModel.describeTopics(maxTermsPerTopic = 5)
val vocabList = vectorizer.vocabulary
val topics = topicIndices.map { case (terms, termWeights) =>
terms.map(vocabList(_)).zip(termWeights)
}
println(s"$numTopics topics:")
topics.zipWithIndex.foreach { case (topic, i) =>
println(s"TOPIC $i")
topic.foreach { case (term, weight) => println(s"$term\\t$weight") }
println(s"==========")
}
// COMMAND ----------
// MAGIC %md
// MAGIC Going through the results, you may notice that some of the topic words returned are actually stopwords that are specific to our dataset (for eg: "writes", "article"...). Let's try improving our model.
// COMMAND ----------
// MAGIC %md
// MAGIC ## Step 8. Model Tuning - Refilter Stopwords
// MAGIC
// MAGIC We will try to improve the results of our model by identifying some stopwords that are specific to our dataset. We will filter these stopwords out and rerun our LDA model to see if we get better results.
// COMMAND ----------
val add_stopwords = Array("article", "writes", "entry", "date", "udel", "said", "tell", "think", "know", "just", "newsgroup", "line", "like", "does", "going", "make", "thanks")
// COMMAND ----------
// Combine newly identified stopwords to our exising list of stopwords
val new_stopwords = stopwords.union(add_stopwords)
// COMMAND ----------
import org.apache.spark.ml.feature.StopWordsRemover
// Set Params for StopWordsRemover with new_stopwords
val remover = new StopWordsRemover()
.setStopWords(new_stopwords)
.setInputCol("tokens")
.setOutputCol("filtered")
// Create new df with new list of stopwords removed
val new_filtered_df = remover.transform(tokenized_df)
// COMMAND ----------
// Set Params for CountVectorizer
val vectorizer = new CountVectorizer()
.setInputCol("filtered")
.setOutputCol("features")
.setVocabSize(10000)
.setMinDF(5)
.fit(new_filtered_df)
// Create new df of countVectors
val new_countVectors = vectorizer.transform(new_filtered_df).select("id", "features")
// COMMAND ----------
// Convert DF to RDD
val new_lda_countVector = new_countVectors.map { case Row(id: Long, countVector: Vector) => (id, countVector) }
// COMMAND ----------
// MAGIC %md
// MAGIC We will also increase MaxIterations to 10 to see if we get better results.
// COMMAND ----------
// Set LDA parameters
val new_lda = new LDA()
.setOptimizer(new OnlineLDAOptimizer().setMiniBatchFraction(0.8))
.setK(numTopics)
.setMaxIterations(10) // more than 3 this time
.setDocConcentration(-1) // use default values
.setTopicConcentration(-1) // use default values
// COMMAND ----------
// MAGIC %md
// MAGIC #### How to find what the default values are?
// MAGIC
// MAGIC Dive into the source!!!
// MAGIC
// MAGIC 1. Let's find the default value for `docConcentration` now.
// MAGIC * Got to Apache Spark package Root: [https://spark.apache.org/docs/latest/api/scala/#package](https://spark.apache.org/docs/latest/api/scala/#package)
// MAGIC * search for 'ml' in the search box on the top left (ml is for ml library)
// MAGIC * Then find the `LDA` by scrolling below on the left to mllib's `clustering` methods and click on `LDA`
// MAGIC * Then click on the source code link which should take you here:
// MAGIC * [https://github.com/apache/spark/blob/v2.2.0/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala](https://github.com/apache/spark/blob/v2.2.0/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala)
// MAGIC * Now, simply go to the right function and see the following comment block:
// MAGIC
// MAGIC ```
// MAGIC /**
// MAGIC * Concentration parameter (commonly named "alpha") for the prior placed on documents'
// MAGIC * distributions over topics ("theta").
// MAGIC *
// MAGIC * This is the parameter to a Dirichlet distribution, where larger values mean more smoothing
// MAGIC * (more regularization).
// MAGIC *
// MAGIC * If not set by the user, then docConcentration is set automatically. If set to
// MAGIC * singleton vector [alpha], then alpha is replicated to a vector of length k in fitting.
// MAGIC * Otherwise, the [[docConcentration]] vector must be length k.
// MAGIC * (default = automatic)
// MAGIC *
// MAGIC * Optimizer-specific parameter settings:
// MAGIC * - EM
// MAGIC * - Currently only supports symmetric distributions, so all values in the vector should be
// MAGIC * the same.
// MAGIC * - Values should be > 1.0
// MAGIC * - default = uniformly (50 / k) + 1, where 50/k is common in LDA libraries and +1 follows
// MAGIC * from Asuncion et al. (2009), who recommend a +1 adjustment for EM.
// MAGIC * - Online
// MAGIC * - Values should be >= 0
// MAGIC * - default = uniformly (1.0 / k), following the implementation from
// MAGIC * [[https://github.com/Blei-Lab/onlineldavb]].
// MAGIC * @group param
// MAGIC */
// MAGIC ```
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC **HOMEWORK:** Try to find the default value for `TopicConcentration`.
// COMMAND ----------
// convert ML vectors into MLlib vectors
val new_lda_countVector_mllib = new_lda_countVector.map { case (id, vector) => (id, org.apache.spark.mllib.linalg.Vectors.fromML(vector)) }.rdd
// Create LDA model with stopwords refiltered
val new_ldaModel = new_lda.run(new_lda_countVector_mllib)
// COMMAND ----------
val topicIndices = new_ldaModel.describeTopics(maxTermsPerTopic = 5)
val vocabList = vectorizer.vocabulary
val topics = topicIndices.map { case (terms, termWeights) =>
terms.map(vocabList(_)).zip(termWeights)
}
println(s"$numTopics topics:")
topics.zipWithIndex.foreach { case (topic, i) =>
println(s"TOPIC $i")
topic.foreach { case (term, weight) => println(s"$term\\t$weight") }
println(s"==========")
}
// COMMAND ----------
// MAGIC %md
// MAGIC We managed to get better results here. We can easily infer that topic 3 is about space, topic 7 is about religion, etc.
// MAGIC
// MAGIC ```
// MAGIC ==========
// MAGIC TOPIC 3
// MAGIC station 0.0022184815200582244
// MAGIC launch 0.0020621309179376145
// MAGIC shuttle 0.0019305627762549198
// MAGIC space 0.0017600147075534092
// MAGIC redesign 0.0014972130065346592
// MAGIC ==========
// MAGIC TOPIC 7
// MAGIC people 0.0038165245379908675
// MAGIC church 0.0036902650900400543
// MAGIC jesus 0.0029942866750178893
// MAGIC paul 0.0026144777524277044
// MAGIC bible 0.0020476251853453016
// MAGIC ==========
// MAGIC ```
// COMMAND ----------
// MAGIC %md
// MAGIC ## Step 9. Create LDA model with Expectation Maximization
// MAGIC
// MAGIC Let's try creating an LDA model with Expectation Maximization on the data that has been refiltered for additional stopwords. We will also increase MaxIterations here to 100 to see if that improves results.
// COMMAND ----------
// MAGIC %md
// MAGIC See [http://spark.apache.org/docs/latest/mllib-clustering.html#latent-dirichlet-allocation-lda](http://spark.apache.org/docs/latest/mllib-clustering.html#latent-dirichlet-allocation-lda)
// COMMAND ----------
import org.apache.spark.mllib.clustering.EMLDAOptimizer
// Set LDA parameters
val em_lda = new LDA()
.setOptimizer(new EMLDAOptimizer())
.setK(numTopics)
.setMaxIterations(100)
.setDocConcentration(-1) // use default values
.setTopicConcentration(-1) // use default values
// COMMAND ----------
val em_ldaModel = em_lda.run(new_lda_countVector_mllib)
// COMMAND ----------
// MAGIC %md
// MAGIC Note that the EMLDAOptimizer produces a DistributedLDAModel, which stores not only the inferred topics but also the full training corpus and topic distributions for each document in the training corpus.
// COMMAND ----------
val topicIndices = em_ldaModel.describeTopics(maxTermsPerTopic = 5)
// COMMAND ----------
val vocabList = vectorizer.vocabulary
// COMMAND ----------
vocabList.size
// COMMAND ----------
val topics = topicIndices.map { case (terms, termWeights) =>
terms.map(vocabList(_)).zip(termWeights)
}
// COMMAND ----------
vocabList(47) // 47 is the index of the term 'university' or the first term in topics - this may change due to randomness in algorithm
// COMMAND ----------
// MAGIC %md
// MAGIC This is just doing it all at once.
// COMMAND ----------
val topicIndices = em_ldaModel.describeTopics(maxTermsPerTopic = 5)
val vocabList = vectorizer.vocabulary
val topics = topicIndices.map { case (terms, termWeights) =>
terms.map(vocabList(_)).zip(termWeights)
}
println(s"$numTopics topics:")
topics.zipWithIndex.foreach { case (topic, i) =>
println(s"TOPIC $i")
topic.foreach { case (term, weight) => println(s"$term\\t$weight") }
println(s"==========")
}
// COMMAND ----------
// MAGIC %md
// MAGIC We've managed to get some good results here. For example, we can easily infer that Topic 0 is about computers, Topic 8 is about space, etc.
// MAGIC
// MAGIC
// MAGIC We still get some ambiguous results like Topic 17.
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC To improve our results further, we could employ some of the below methods:
// MAGIC
// MAGIC - Refilter data for additional data-specific stopwords
// MAGIC - Use Stemming or Lemmatization to preprocess data
// MAGIC - Experiment with a smaller number of topics, since some of these topics in the 20 Newsgroups are pretty similar
// MAGIC - Increase model's MaxIterations
// COMMAND ----------
// MAGIC %md
// MAGIC ## Visualize Results
// MAGIC
// MAGIC We will try visualizing the results obtained from the EM LDA model with a d3 bubble chart.
// COMMAND ----------
// Zip topic terms with topic IDs
val termArray = topics.zipWithIndex
// COMMAND ----------
// Transform data into the form (term, probability, topicId)
val termRDD = sc.parallelize(termArray)
val termRDD2 =termRDD.flatMap( (x: (Array[(String, Double)], Int)) => {
val arrayOfTuple = x._1
val topicId = x._2
arrayOfTuple.map(el => (el._1, el._2, topicId))
})
// COMMAND ----------
// Create DF with proper column names
val termDF = termRDD2.toDF.withColumnRenamed("_1", "term").withColumnRenamed("_2", "probability").withColumnRenamed("_3", "topicId")
// COMMAND ----------
display(termDF)
// COMMAND ----------
// MAGIC %md
// MAGIC We will convert the DataFrame into a JSON format, which will be passed into d3.
// COMMAND ----------
// Create JSON data
val rawJson = termDF.toJSON.collect().mkString(",\\n")
// COMMAND ----------
// MAGIC %md
// MAGIC We are now ready to use D3 on the rawJson data.
// COMMAND ----------
displayHTML(s"""
<!DOCTYPE html>
<meta charset="utf-8">
<style>
circle {
fill: rgb(31, 119, 180);
fill-opacity: 0.5;
stroke: rgb(31, 119, 180);
stroke-width: 1px;
}
.leaf circle {
fill: #ff7f0e;
fill-opacity: 1;
}
text {
font: 14px sans-serif;
}
</style>
<body>
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>
<script>
var json = {
"name": "data",
"children": [
{
"name": "topics",
"children": [
${rawJson}
]
}
]
};
var r = 1000,
format = d3.format(",d"),
fill = d3.scale.category20c();
var bubble = d3.layout.pack()
.sort(null)
.size([r, r])
.padding(1.5);
var vis = d3.select("body").append("svg")
.attr("width", r)
.attr("height", r)
.attr("class", "bubble");
var node = vis.selectAll("g.node")
.data(bubble.nodes(classes(json))
.filter(function(d) { return !d.children; }))
.enter().append("g")
.attr("class", "node")
.attr("transform", function(d) { return "translate(" + d.x + "," + d.y + ")"; })
color = d3.scale.category20();
node.append("title")
.text(function(d) { return d.className + ": " + format(d.value); });
node.append("circle")
.attr("r", function(d) { return d.r; })
.style("fill", function(d) {return color(d.topicName);});
var text = node.append("text")
.attr("text-anchor", "middle")
.attr("dy", ".3em")
.text(function(d) { return d.className.substring(0, d.r / 3)});
text.append("tspan")
.attr("dy", "1.2em")
.attr("x", 0)
.text(function(d) {return Math.ceil(d.value * 10000) /10000; });
// Returns a flattened hierarchy containing all leaf nodes under the root.
function classes(root) {
var classes = [];
function recurse(term, node) {
if (node.children) node.children.forEach(function(child) { recurse(node.term, child); });
else classes.push({topicName: node.topicId, className: node.term, value: node.probability});
}
recurse(null, root);
return {children: classes};
}
</script>
""")
// COMMAND ----------
// MAGIC %md
// MAGIC ### You try!
// MAGIC
// MAGIC **NOW or Later as HOMEWORK**
// MAGIC
// MAGIC 1. Try to do the same process for the State of the Union Addresses dataset from Week1. As a first step, first locate where that data is... Go to week1 and try to see if each SoU can be treated as a document for topic modeling and whether there is temporal clustering of SoU's within the same topic.
// MAGIC
// MAGIC 2. Try to improve the tuning by elaborating the pipeline with stemming, lemmatization, etc in this news-group dataset (if you want to do a project based on this, perhaps). You can also parse the input to bring in the newsgroup id's from the directories (consider exploiting the file names in the `wholeTextFiles` method) as this will let you explore how well your unsupervised algorithm is doing relative to the known newsgroups each document falls in (note you generally won't have the luxury of knowing the topic labels for typical datasets in the unsupervised topic modeling domain).
// MAGIC
// MAGIC 3. Try to parse the data closer to the clean dataset available in `/databricks-datasets/news20.binary/*` and walk through the following notebook (*but in Scala!*):
// MAGIC * [https://docs.cloud.databricks.com/docs/latest/sample_applications/07%20Sample%20ML/MLPipeline%20Newsgroup%20Dataset.html](https://docs.cloud.databricks.com/docs/latest/sample_applications/07%20Sample%20ML/MLPipeline%20Newsgroup%20Dataset.html)
// MAGIC
// COMMAND ----------
// MAGIC %fs ls /databricks-datasets/news20.binary/data-001
// COMMAND ----------
// MAGIC %md
// MAGIC ## Step 1. Downloading and Loading Data into DBFS
// MAGIC
// MAGIC **you don't have to do the download in databricks if above cell has contents in `/databricks-datasets/news20.binary/data-001`**
// MAGIC
// MAGIC Here are the steps taken for downloading and saving data to the distributed file system. Uncomment them for repeating this process on your databricks cluster or for downloading a new source of data.
// COMMAND ----------
//%sh wget http://kdd.ics.uci.edu/databases/20newsgroups/mini_newsgroups.tar.gz -O /tmp/newsgroups.tar.gz
// COMMAND ----------
// MAGIC %md
// MAGIC Untar the file into the /tmp/ folder.
// COMMAND ----------
//%sh tar xvfz /tmp/newsgroups.tar.gz -C /tmp/
// COMMAND ----------
// MAGIC %md
// MAGIC The below cell takes about 10mins to run.
// MAGIC
// MAGIC NOTE: It is slow partly because each file is small and we are facing the 'small files problem' with distributed file systems that need meta-data for each file. If the file name is not needed then it may be better to create one large stream of the contents of all the files into dbfs. We leave this as it is to show what happens when we upload a dataset of lots of little files into dbfs.
// COMMAND ----------
//%fs cp -r file:/tmp/mini_newsgroups dbfs:/datasets/mini_newsgroups
// COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/mini_newsgroups")) | raazesh-sainudiin/scalable-data-science | db/2/2/034_LDA_20NewsGroupsSmall.scala | Scala | unlicense | 43,041 |
/*
* Copyright (c) 2012-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.iglu.ctl
import cats.Show
import cats.data.{EitherNel, EitherT}
import cats.implicits._
import cats.effect.{ExitCode, IO, IOApp}
import com.snowplowanalytics.iglu.ctl.commands._
import com.snowplowanalytics.iglu.ctl.Common.Error
object Main extends IOApp {
// Check -> Folder does not exist. Common. Input folder presence is checked in Main
// generate
// Read -> (inaccessible, not a JSON, not a Schema) -- filter out a file entirely, but do not short-circuit the process
// Read -> (incompatible path) -- short-circuit the process
// Read -> load everything into memory
// Lint -> on very permissive set of checks (only errors)
// Write -> short circuits at any point
// Write -> takes force into account
// static push
// Read -> (inaccessible, not a JSON, not a Schema) -- filter out a file entirely, but do not short-circuit the process
// Read -> (incompatible path) -- short-circuit the process
// Read -> load everything into memory
// Lint -> on very permissive set of checks (only errors)
// Write -> short circuits at any point
// Write -> takes force into account
// static s3cp
// Read -> (inaccessible) -- short-circuit the process
// Read -> does not load into memory (it can be big)
// Read -> does not even parse them
def run(args: List[String]): IO[ExitCode] = {
val result: Result = Command.parse(args) match {
case Right(Command.Lint(input, skipWarnings, skipChecks)) =>
Lint.process(input, skipChecks, skipWarnings)
case Right(Command.StaticGenerate(in, out, schema, own, size, jp, raw, split, noheader, f)) =>
Generate.process(in, out, jp, raw, schema, size, split, noheader, f, own)
case Right(Command.StaticPush(input, registryRoot, apikey, public)) =>
Push.process(input, registryRoot, apikey, public)
case Right(Command.StaticS3Cp(input, bucket, s3Path, accessKey, secretKey, profile, region)) =>
S3cp.process(input, bucket, s3Path, accessKey, secretKey, profile, region)
case Right(Command.StaticDeploy(config)) =>
Deploy.process(config)
case Left(e) =>
EitherT.fromEither[IO](Error.Message(e.toString).asLeft[List[String]].toEitherNel)
}
result.value.flatMap(processResult[String])
}
def processResult[A: Show](either: EitherNel[Error, List[A]]): IO[ExitCode] =
either.fold(
errors => errors.traverse_(e => IO(System.err.println(e.show))) *> IO.pure(ExitCode.Error),
messages => messages.traverse_(e => IO(System.out.println(e.show))) *> IO.pure(ExitCode.Success)
)
}
| snowplow/iglu | 0-common/igluctl/src/main/scala/com.snowplowanalytics.iglu/ctl/Main.scala | Scala | apache-2.0 | 3,305 |
package com.twitter.finagle.mysql.param
import com.twitter.finagle.mysql.MysqlCharset.Utf8_general_ci
import com.twitter.finagle.Stack
/**
* A class eligible for configuring a mysql client's credentials during
* the Handshake phase.
*/
case class Credentials(username: Option[String], password: Option[String])
object Credentials {
implicit val param: Stack.Param[Credentials] = new Stack.Param[Credentials] {
val default: Credentials = Credentials(None, None)
override def show(p: Credentials): Seq[(String, () => String)] = {
// do not show the password for security reasons
Seq(("username", () => p.username.getOrElse("")))
}
}
}
/**
* A class eligible for configuring a mysql client's database during
* the Handshake phase.
*/
case class Database(db: Option[String])
object Database {
implicit val param: Stack.Param[Database] = Stack.Param(Database(None))
}
/**
* A class eligible for configuring a mysql client's charset during
* the Handshake phase.
*/
case class Charset(charset: Short)
object Charset {
implicit val param: Stack.Param[Charset] = Stack.Param(Charset(Utf8_general_ci))
}
/**
* A class eligible for configuring a mysql client's CLIENT_FOUND_ROWS flag
* during the Handshake phase.
*/
case class FoundRows(enabled: Boolean)
object FoundRows {
implicit val param: Stack.Param[FoundRows] = Stack.Param(FoundRows(true))
}
/**
* A class eligible for configuring the maximum number of prepare
* statements. After creating `num` prepare statements, we'll start purging
* old ones.
*/
case class MaxConcurrentPrepareStatements(num: Int) {
assert(num <= Int.MaxValue, s"$num is not <= Int.MaxValue bytes")
assert(num > 0, s"$num must be positive")
def mk(): (MaxConcurrentPrepareStatements, Stack.Param[MaxConcurrentPrepareStatements]) =
(this, MaxConcurrentPrepareStatements.param)
}
object MaxConcurrentPrepareStatements {
implicit val param: Stack.Param[MaxConcurrentPrepareStatements] =
Stack.Param(MaxConcurrentPrepareStatements(20))
}
/**
* Configure whether to support unsigned integer fields when returning elements of a [[Row]].
* If not supported, unsigned fields will be decoded as if they were signed, potentially
* resulting in corruption in the case of overflowing the signed representation. Because
* Java doesn't support unsigned integer types widening may be necessary to support the
* unsigned variants. For example, an unsigned Int is represented as a Long.
*
* `Value` representations of unsigned columns which are widened when enabled:
* `ByteValue` -> `ShortValue``
* `ShortValue` -> IntValue`
* `LongValue` -> `LongLongValue`
* `LongLongValue` -> `BigIntValue`
*/
case class UnsignedColumns(supported: Boolean)
object UnsignedColumns {
implicit val param: Stack.Param[UnsignedColumns] = Stack.Param(UnsignedColumns(false))
}
| luciferous/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/param/params.scala | Scala | apache-2.0 | 2,853 |
package slick.relational
import scala.language.existentials
import slick.ast._
import slick.SlickException
import slick.util.SlickLogger
import org.slf4j.LoggerFactory
/** Create a ResultConverter for parameters and result sets. Subclasses have
* to provide profile-specific createColumnConverter implementations. */
trait ResultConverterCompiler[Domain <: ResultConverterDomain] {
def compile(n: Node): ResultConverter[Domain, _] = n match {
case InsertColumn(paths, fs, _) =>
val pathConvs = paths.map { case Select(_, ElementSymbol(idx)) => createColumnConverter(n, idx, Some(fs)) }
if(pathConvs.length == 1) pathConvs.head else CompoundResultConverter(1, pathConvs: _*)
case OptionApply(InsertColumn(paths, fs, _)) =>
val pathConvs = paths.map { case Select(_, ElementSymbol(idx)) => createColumnConverter(n, idx, Some(fs)) }
if(pathConvs.length == 1) pathConvs.head else CompoundResultConverter(1, pathConvs: _*)
case Select(_, ElementSymbol(idx)) => createColumnConverter(n, idx, None)
case cast @ Library.SilentCast(sel @ Select(_, ElementSymbol(idx))) =>
createColumnConverter(sel.nodeTypedOrCopy(cast.nodeType), idx, None)
case OptionApply(Select(_, ElementSymbol(idx))) => createColumnConverter(n, idx, None)
case ProductNode(ch) =>
if(ch.isEmpty) new UnitResultConverter
else new ProductResultConverter(ch.map(n => compile(n))(collection.breakOut): _*)
case GetOrElse(ch, default) =>
createGetOrElseResultConverter(compile(ch).asInstanceOf[ResultConverter[Domain, Option[Any]]], default)
case TypeMapping(ch, mapper, _) =>
createTypeMappingResultConverter(compile(ch).asInstanceOf[ResultConverter[Domain, Any]], mapper)
case RebuildOption(disc, data) =>
val discConv = createGetOrElseResultConverter(compile(disc).asInstanceOf[ResultConverter[Domain, Option[Int]]], () => 0)
val dataConv = compile(data).asInstanceOf[ResultConverter[Domain, Any]]
createOptionRebuildingConverter(discConv, dataConv)
case n =>
throw new SlickException("Unexpected node in ResultSetMapping: "+n)
}
def createGetOrElseResultConverter[T](rc: ResultConverter[Domain, Option[T]], default: () => T): ResultConverter[Domain, T] =
new GetOrElseResultConverter[Domain, T](rc, default)
def createTypeMappingResultConverter(rc: ResultConverter[Domain, Any], mapper: MappedScalaType.Mapper): ResultConverter[Domain, Any] =
new TypeMappingResultConverter(rc, mapper.toBase, mapper.toMapped)
def createOptionRebuildingConverter(discriminator: ResultConverter[Domain, Int], data: ResultConverter[Domain, Any]): ResultConverter[Domain, Option[Any]] =
new OptionRebuildingResultConverter(discriminator, data)
def createColumnConverter(n: Node, idx: Int, column: Option[FieldSymbol]): ResultConverter[Domain, _]
def compileMapping(n: Node): CompiledMapping = {
val rc = compile(n)
ResultConverterCompiler.logger.debug("Compiled ResultConverter", rc)
CompiledMapping(rc, n.nodeType)
}
}
object ResultConverterCompiler {
protected lazy val logger = new SlickLogger(LoggerFactory.getLogger(classOf[ResultConverterCompiler[_]]))
}
/** A node that wraps a ResultConverter */
final case class CompiledMapping(converter: ResultConverter[_ <: ResultConverterDomain, _], tpe: Type) extends NullaryNode with TypedNode {
type Self = CompiledMapping
def nodeRebuild = copy()
override def getDumpInfo = {
val di = super.getDumpInfo
di.copy(mainInfo = "", children = di.children ++ Vector(("converter", converter)))
}
}
| easel/slick | slick/src/main/scala/slick/relational/ResultConverterCompiler.scala | Scala | bsd-2-clause | 3,569 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration
import com.mongodb.CursorType
import com.mongodb.async.SingleResultCallback
import com.mongodb.async.client.FindIterable
import org.mongodb.scala.bson.conversions.Bson
import org.mongodb.scala.internal.ObservableHelper._
import org.mongodb.scala.model.Collation
/**
* Observable interface for Find.
*
* @param wrapped the underlying java FindObservable
* @tparam TResult The type of the result.
* @since 1.0
*/
case class FindObservable[TResult](private val wrapped: FindIterable[TResult]) extends Observable[TResult] {
/**
* Helper to return a Observable limited to just the first result the query.
*
* **Note:** Sets limit in the background so only returns 1.
*
* @return a Observable which will return the first item
*/
def first(): SingleObservable[TResult] = observe(wrapped.first(_: SingleResultCallback[TResult]))
/**
* Sets the query filter to apply to the query.
*
* [[http://docs.mongodb.org/manual/reference/method/db.collection.find/ Filter]]
* @param filter the filter, which may be null.
* @return this
*/
def filter(filter: Bson): FindObservable[TResult] = {
wrapped.filter(filter)
this
}
/**
* Sets the limit to apply.
*
* [[http://docs.mongodb.org/manual/reference/method/cursor.limit/#cursor.limit Limit]]
* @param limit the limit, which may be null
* @return this
*/
def limit(limit: Int): FindObservable[TResult] = {
wrapped.limit(limit)
this
}
/**
* Sets the number of documents to skip.
*
* [[http://docs.mongodb.org/manual/reference/method/cursor.skip/#cursor.skip Skip]]
* @param skip the number of documents to skip
* @return this
*/
def skip(skip: Int): FindObservable[TResult] = {
wrapped.skip(skip)
this
}
/**
* Sets the maximum execution time on the server for this operation.
*
* [[http://docs.mongodb.org/manual/reference/operator/meta/maxTimeMS/ Max Time]]
* @param duration the duration
* @return this
*/
def maxTime(duration: Duration): FindObservable[TResult] = {
wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS)
this
}
/**
* The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor
* query. This only applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor,
* this option is ignored.
*
* On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default
* is no value: no "maxTimeMS" is sent to the server with the getMore command.
*
* On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value
*
* A zero value will be ignored.
*
* [[http://docs.mongodb.org/manual/reference/operator/meta/maxTimeMS/ Max Time]]
* @param duration the duration
* @return the maximum await execution time in the given time unit
* @since 1.1
*/
def maxAwaitTime(duration: Duration): FindObservable[TResult] = {
wrapped.maxAwaitTime(duration.toMillis, TimeUnit.MILLISECONDS)
this
}
/**
* Sets the query modifiers to apply to this operation.
*
* [[http://docs.mongodb.org/manual/reference/operator/query-modifier/ Query Modifiers]]
*
* @param modifiers the query modifiers to apply, which may be null.
* @return this
* @deprecated use the individual setters instead
*/
@deprecated("use the individual setters instead", "2.2")
def modifiers(modifiers: Bson): FindObservable[TResult] = {
wrapped.modifiers(modifiers)
this
}
/**
* Sets a document describing the fields to return for all matching documents.
*
* [[http://docs.mongodb.org/manual/reference/method/db.collection.find/ Projection]]
* @param projection the project document, which may be null.
* @return this
*/
def projection(projection: Bson): FindObservable[TResult] = {
wrapped.projection(projection)
this
}
/**
* Sets the sort criteria to apply to the query.
*
* [[http://docs.mongodb.org/manual/reference/method/cursor.sort/ Sort]]
* @param sort the sort criteria, which may be null.
* @return this
*/
def sort(sort: Bson): FindObservable[TResult] = {
wrapped.sort(sort)
this
}
/**
* The server normally times out idle cursors after an inactivity period (10 minutes)
* to prevent excess memory use. Set this option to prevent that.
*
* @param noCursorTimeout true if cursor timeout is disabled
* @return this
*/
def noCursorTimeout(noCursorTimeout: Boolean): FindObservable[TResult] = {
wrapped.noCursorTimeout(noCursorTimeout)
this
}
/**
* Users should not set this under normal circumstances.
*
* @param oplogReplay if oplog replay is enabled
* @return this
*/
def oplogReplay(oplogReplay: Boolean): FindObservable[TResult] = {
wrapped.oplogReplay(oplogReplay)
this
}
/**
* Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error).
*
* @param partial if partial results for sharded clusters is enabled
* @return this
*/
def partial(partial: Boolean): FindObservable[TResult] = {
wrapped.partial(partial)
this
}
/**
* Sets the cursor type.
*
* @param cursorType the cursor type
* @return this
*/
def cursorType(cursorType: CursorType): FindObservable[TResult] = {
wrapped.cursorType(cursorType)
this
}
/**
* Sets the collation options
*
* @param collation the collation options to use
* @return this
* @since 1.2
* @note A null value represents the server default.
* @note Requires MongoDB 3.4 or greater
*/
def collation(collation: Collation): FindObservable[TResult] = {
wrapped.collation(collation)
this
}
/**
* Sets the comment to the query. A null value means no comment is set.
*
* @param comment the comment
* @return this
* @since 2.2
*/
def comment(comment: String): FindObservable[TResult] = {
wrapped.comment(comment)
this
}
/**
* Sets the hint for which index to use. A null value means no hint is set.
*
* @param hint the hint
* @return this
* @since 2.2
*/
def hint(hint: Bson): FindObservable[TResult] = {
wrapped.hint(hint)
this
}
/**
* Sets the hint to apply.
*
* Note: If [[hint]] is set that will be used instead of any hint string.
*
* @param hint the name of the index which should be used for the operation
* @return this
* @since 2.8
*/
def hintString(hint: String): FindObservable[TResult] = {
wrapped.hintString(hint)
this
}
/**
* Sets the exclusive upper bound for a specific index. A null value means no max is set.
*
* @param max the max
* @return this
* @since 2.2
*/
def max(max: Bson): FindObservable[TResult] = {
wrapped.max(max)
this
}
/**
* Sets the minimum inclusive lower bound for a specific index. A null value means no max is set.
*
* @param min the min
* @return this
* @since 2.2
*/
def min(min: Bson): FindObservable[TResult] = {
wrapped.min(min)
this
}
/**
* Sets the maximum number of documents or index keys to scan when executing the query.
*
* A zero value or less will be ignored, and indicates that the driver should respect the server's default value.
*
* @param maxScan the maxScan
* @return this
* @since 2.2
*/
@deprecated("Deprecated as of MongoDB 4.0 release", "2.4")
def maxScan(maxScan: Long): FindObservable[TResult] = {
wrapped.maxScan(maxScan)
this
}
/**
* Sets the returnKey. If true the find operation will return only the index keys in the resulting documents.
*
* @param returnKey the returnKey
* @return this
* @since 2.2
*/
def returnKey(returnKey: Boolean): FindObservable[TResult] = {
wrapped.returnKey(returnKey)
this
}
/**
* Sets the showRecordId. Set to true to add a field `\\$recordId` to the returned documents.
*
* @param showRecordId the showRecordId
* @return this
* @since 2.2
*/
def showRecordId(showRecordId: Boolean): FindObservable[TResult] = {
wrapped.showRecordId(showRecordId)
this
}
/**
* Sets the number of documents to return per batch.
*
* @param batchSize the batch size
* @return this
* @since 2.7
*/
def batchSize(batchSize: Int): FindObservable[TResult] = {
wrapped.batchSize(batchSize)
this
}
/**
* Sets the snapshot.
*
* If true it prevents the cursor from returning a document more than once because of an intervening write operation.
*
* @param snapshot the snapshot
* @return this
* @since 2.2
*/
@deprecated("Deprecated in MongoDB 3.6 release and removed in MongoDB 4.0 release", "2.4")
def snapshot(snapshot: Boolean): FindObservable[TResult] = {
wrapped.snapshot(snapshot)
this
}
override def subscribe(observer: Observer[_ >: TResult]): Unit = observe(wrapped).subscribe(observer)
}
| rozza/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/FindObservable.scala | Scala | apache-2.0 | 9,749 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory
import org.scalatest.PrivateMethodTester
import org.apache.spark.SparkConf
import org.apache.spark.storage.{MemoryStore, TestBlockId}
class UnifiedMemoryManagerSuite extends MemoryManagerSuite with PrivateMethodTester {
private val dummyBlock = TestBlockId("--")
private val storageFraction: Double = 0.5
/**
* Make a [[UnifiedMemoryManager]] and a [[MemoryStore]] with limited class dependencies.
*/
private def makeThings(maxMemory: Long): (UnifiedMemoryManager, MemoryStore) = {
val mm = createMemoryManager(maxMemory)
val ms = makeMemoryStore(mm)
(mm, ms)
}
override protected def createMemoryManager(
maxOnHeapExecutionMemory: Long,
maxOffHeapExecutionMemory: Long): UnifiedMemoryManager = {
val conf = new SparkConf()
.set("spark.memory.fraction", "1")
.set("spark.testing.memory", maxOnHeapExecutionMemory.toString)
.set("spark.memory.offHeap.size", maxOffHeapExecutionMemory.toString)
.set("spark.memory.storageFraction", storageFraction.toString)
UnifiedMemoryManager(conf, numCores = 1)
}
test("basic execution memory") {
val maxMemory = 1000L
val taskAttemptId = 0L
val (mm, _) = makeThings(maxMemory)
assert(mm.executionMemoryUsed === 0L)
assert(mm.acquireExecutionMemory(10L, taskAttemptId, MemoryMode.ON_HEAP) === 10L)
assert(mm.executionMemoryUsed === 10L)
assert(mm.acquireExecutionMemory(100L, taskAttemptId, MemoryMode.ON_HEAP) === 100L)
// Acquire up to the max
assert(mm.acquireExecutionMemory(1000L, taskAttemptId, MemoryMode.ON_HEAP) === 890L)
assert(mm.executionMemoryUsed === maxMemory)
assert(mm.acquireExecutionMemory(1L, taskAttemptId, MemoryMode.ON_HEAP) === 0L)
assert(mm.executionMemoryUsed === maxMemory)
mm.releaseExecutionMemory(800L, taskAttemptId, MemoryMode.ON_HEAP)
assert(mm.executionMemoryUsed === 200L)
// Acquire after release
assert(mm.acquireExecutionMemory(1L, taskAttemptId, MemoryMode.ON_HEAP) === 1L)
assert(mm.executionMemoryUsed === 201L)
// Release beyond what was acquired
mm.releaseExecutionMemory(maxMemory, taskAttemptId, MemoryMode.ON_HEAP)
assert(mm.executionMemoryUsed === 0L)
}
test("basic storage memory") {
val maxMemory = 1000L
val (mm, ms) = makeThings(maxMemory)
assert(mm.storageMemoryUsed === 0L)
assert(mm.acquireStorageMemory(dummyBlock, 10L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 10L)
assert(mm.acquireStorageMemory(dummyBlock, 100L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 110L)
// Acquire more than the max, not granted
assert(!mm.acquireStorageMemory(dummyBlock, maxMemory + 1L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 110L)
// Acquire up to the max, requests after this are still granted due to LRU eviction
assert(mm.acquireStorageMemory(dummyBlock, maxMemory, evictedBlocks))
assertEvictBlocksToFreeSpaceCalled(ms, 110L)
assert(mm.storageMemoryUsed === 1000L)
assert(evictedBlocks.nonEmpty)
evictedBlocks.clear()
assert(mm.acquireStorageMemory(dummyBlock, 1L, evictedBlocks))
assertEvictBlocksToFreeSpaceCalled(ms, 1L)
assert(evictedBlocks.nonEmpty)
evictedBlocks.clear()
// Note: We evicted 1 byte to put another 1-byte block in, so the storage memory used remains at
// 1000 bytes. This is different from real behavior, where the 1-byte block would have evicted
// the 1000-byte block entirely. This is set up differently so we can write finer-grained tests.
assert(mm.storageMemoryUsed === 1000L)
mm.releaseStorageMemory(800L)
assert(mm.storageMemoryUsed === 200L)
// Acquire after release
assert(mm.acquireStorageMemory(dummyBlock, 1L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 201L)
mm.releaseAllStorageMemory()
assert(mm.storageMemoryUsed === 0L)
assert(mm.acquireStorageMemory(dummyBlock, 1L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 1L)
// Release beyond what was acquired
mm.releaseStorageMemory(100L)
assert(mm.storageMemoryUsed === 0L)
}
test("execution evicts storage") {
val maxMemory = 1000L
val taskAttemptId = 0L
val (mm, ms) = makeThings(maxMemory)
// Acquire enough storage memory to exceed the storage region
assert(mm.acquireStorageMemory(dummyBlock, 750L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.executionMemoryUsed === 0L)
assert(mm.storageMemoryUsed === 750L)
// Execution needs to request 250 bytes to evict storage memory
assert(mm.acquireExecutionMemory(100L, taskAttemptId, MemoryMode.ON_HEAP) === 100L)
assert(mm.executionMemoryUsed === 100L)
assert(mm.storageMemoryUsed === 750L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
// Execution wants 200 bytes but only 150 are free, so storage is evicted
assert(mm.acquireExecutionMemory(200L, taskAttemptId, MemoryMode.ON_HEAP) === 200L)
assert(mm.executionMemoryUsed === 300L)
assert(mm.storageMemoryUsed === 700L)
assertEvictBlocksToFreeSpaceCalled(ms, 50L)
assert(evictedBlocks.nonEmpty)
evictedBlocks.clear()
mm.releaseAllStorageMemory()
require(mm.executionMemoryUsed === 300L)
require(mm.storageMemoryUsed === 0, "bad test: all storage memory should have been released")
// Acquire some storage memory again, but this time keep it within the storage region
assert(mm.acquireStorageMemory(dummyBlock, 400L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 400L)
assert(mm.executionMemoryUsed === 300L)
// Execution cannot evict storage because the latter is within the storage fraction,
// so grant only what's remaining without evicting anything, i.e. 1000 - 300 - 400 = 300
assert(mm.acquireExecutionMemory(400L, taskAttemptId, MemoryMode.ON_HEAP) === 300L)
assert(mm.executionMemoryUsed === 600L)
assert(mm.storageMemoryUsed === 400L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
}
test("execution memory requests smaller than free memory should evict storage (SPARK-12165)") {
val maxMemory = 1000L
val taskAttemptId = 0L
val (mm, ms) = makeThings(maxMemory)
// Acquire enough storage memory to exceed the storage region size
assert(mm.acquireStorageMemory(dummyBlock, 700L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.executionMemoryUsed === 0L)
assert(mm.storageMemoryUsed === 700L)
// SPARK-12165: previously, MemoryStore would not evict anything because it would
// mistakenly think that the 300 bytes of free space was still available even after
// using it to expand the execution pool. Consequently, no storage memory was released
// and the following call granted only 300 bytes to execution.
assert(mm.acquireExecutionMemory(500L, taskAttemptId, MemoryMode.ON_HEAP) === 500L)
assertEvictBlocksToFreeSpaceCalled(ms, 200L)
assert(mm.storageMemoryUsed === 500L)
assert(mm.executionMemoryUsed === 500L)
assert(evictedBlocks.nonEmpty)
}
test("storage does not evict execution") {
val maxMemory = 1000L
val taskAttemptId = 0L
val (mm, ms) = makeThings(maxMemory)
// Acquire enough execution memory to exceed the execution region
assert(mm.acquireExecutionMemory(800L, taskAttemptId, MemoryMode.ON_HEAP) === 800L)
assert(mm.executionMemoryUsed === 800L)
assert(mm.storageMemoryUsed === 0L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
// Storage should not be able to evict execution
assert(mm.acquireStorageMemory(dummyBlock, 100L, evictedBlocks))
assert(mm.executionMemoryUsed === 800L)
assert(mm.storageMemoryUsed === 100L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(!mm.acquireStorageMemory(dummyBlock, 250L, evictedBlocks))
assert(mm.executionMemoryUsed === 800L)
assert(mm.storageMemoryUsed === 100L)
// Do not attempt to evict blocks, since evicting will not free enough memory:
assertEvictBlocksToFreeSpaceNotCalled(ms)
mm.releaseExecutionMemory(maxMemory, taskAttemptId, MemoryMode.ON_HEAP)
mm.releaseStorageMemory(maxMemory)
// Acquire some execution memory again, but this time keep it within the execution region
assert(mm.acquireExecutionMemory(200L, taskAttemptId, MemoryMode.ON_HEAP) === 200L)
assert(mm.executionMemoryUsed === 200L)
assert(mm.storageMemoryUsed === 0L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
// Storage should still not be able to evict execution
assert(mm.acquireStorageMemory(dummyBlock, 750L, evictedBlocks))
assert(mm.executionMemoryUsed === 200L)
assert(mm.storageMemoryUsed === 750L)
assertEvictBlocksToFreeSpaceNotCalled(ms) // since there were 800 bytes free
assert(!mm.acquireStorageMemory(dummyBlock, 850L, evictedBlocks))
assert(mm.executionMemoryUsed === 200L)
assert(mm.storageMemoryUsed === 750L)
// Do not attempt to evict blocks, since evicting will not free enough memory:
assertEvictBlocksToFreeSpaceNotCalled(ms)
}
test("small heap") {
val systemMemory = 1024 * 1024
val reservedMemory = 300 * 1024
val memoryFraction = 0.8
val conf = new SparkConf()
.set("spark.memory.fraction", memoryFraction.toString)
.set("spark.testing.memory", systemMemory.toString)
.set("spark.testing.reservedMemory", reservedMemory.toString)
val mm = UnifiedMemoryManager(conf, numCores = 1)
val expectedMaxMemory = ((systemMemory - reservedMemory) * memoryFraction).toLong
assert(mm.maxMemory === expectedMaxMemory)
// Try using a system memory that's too small
val conf2 = conf.clone().set("spark.testing.memory", (reservedMemory / 2).toString)
val exception = intercept[IllegalArgumentException] {
UnifiedMemoryManager(conf2, numCores = 1)
}
assert(exception.getMessage.contains("larger heap size"))
}
test("execution can evict cached blocks when there are multiple active tasks (SPARK-12155)") {
val conf = new SparkConf()
.set("spark.memory.fraction", "1")
.set("spark.memory.storageFraction", "0")
.set("spark.testing.memory", "1000")
val mm = UnifiedMemoryManager(conf, numCores = 2)
val ms = makeMemoryStore(mm)
assert(mm.maxMemory === 1000)
// Have two tasks each acquire some execution memory so that the memory pool registers that
// there are two active tasks:
assert(mm.acquireExecutionMemory(100L, 0, MemoryMode.ON_HEAP) === 100L)
assert(mm.acquireExecutionMemory(100L, 1, MemoryMode.ON_HEAP) === 100L)
// Fill up all of the remaining memory with storage.
assert(mm.acquireStorageMemory(dummyBlock, 800L, evictedBlocks))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 800)
assert(mm.executionMemoryUsed === 200)
// A task should still be able to allocate 100 bytes execution memory by evicting blocks
assert(mm.acquireExecutionMemory(100L, 0, MemoryMode.ON_HEAP) === 100L)
assertEvictBlocksToFreeSpaceCalled(ms, 100L)
assert(mm.executionMemoryUsed === 300)
assert(mm.storageMemoryUsed === 700)
assert(evictedBlocks.nonEmpty)
}
}
| chenc10/Spark-PAF | core/src/test/scala/org/apache/spark/memory/UnifiedMemoryManagerSuite.scala | Scala | apache-2.0 | 12,208 |
object Foo:
opaque type BlaBla[+T, D] = Int
extension [T, D](token: BlaBla[T, D]) def data: D = ???
//To cause the crash, after initial clean compilation
//replace `???` with `value.data` to cause the compiler crash
def foo[W <: Int](value: Bar.BlaBla[W]): Unit = ??? //value.data
| dotty-staging/dotty | sbt-test/opaques/i12927/src/main/scala/Foo.scala | Scala | apache-2.0 | 286 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.impl.storage
import slamdata.Predef.{Int, String}
import quasar.contrib.cats.stateT._
import quasar.contrib.cats.effect.stateT.catsStateTEffect
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.Random
import cats.data.StateT
import cats.effect.{Resource, IO}
import cats.syntax.applicative._
import scalaz.IMap
import scalaz.std.anyVal._
import scalaz.std.string._
import shims.{monadToScalaz, monoidToCats}
final class PureIndexedStoreSpec extends
IndexedStoreSpec[StateT[IO, IMap[Int, String], ?], Int, String] {
val emptyStore =
PureIndexedStore[StateT[IO, IMap[Int, String], ?], Int, String]
.pure[Resource[StateT[IO, IMap[Int, String], ?], ?]]
val freshIndex = StateT.liftF(IO(Random.nextInt()))
val valueA = "A"
val valueB = "B"
}
| slamdata/quasar | impl/src/test/scala/quasar/impl/storage/PureIndexedStoreSpec.scala | Scala | apache-2.0 | 1,414 |
object Driver {
def main(a: Array[String]) {
val s = new Solution()
println(s.candy(Array(1, 0, 2)))
println(s.candy(Array(1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1)))
println(s.candy(Array(5, 5, 4, 3, 3, 5, 8, 9, 7)))
}
}
| shengmin/coding-problem | leetcode/candy/Driver.scala | Scala | mit | 233 |
package com.github.symcal
import spire.algebra._
import spire.implicits._
import scala.language.implicitConversions
sealed abstract class Expr[T: Ring : Eq] {
def +(x: Expr[T]): Sum[T] = (this, x) match {
case (Sum(y@_*), Sum(z@_*)) => Sum(y ++ z: _*)
case (Sum(y@_*), _) => Sum(y :+ x: _*)
case (_, Sum(z@_*)) => Sum(Seq(this) ++ z: _*)
case (_, _) => Sum(this, x)
}
def -(x: Expr[T]): Sum[T] = (this, x) match {
case (Sum(y@_*), Sum(z@_*)) => Sum(y ++ z.map(-_): _*)
case (Sum(y@_*), _) => Sum(y :+ -x: _*)
case (_, Sum(z@_*)) => Sum(Seq(this) ++ z.map(-_): _*)
case (_, _) => Sum(this, -x)
}
def unary_- : Expr[T] = this match {
case Minus(x) ⇒ x
case y ⇒ Minus(y)
}
def *(x: Expr[T]): Product[T] = (this, x) match {
case (Product(y@_*), Product(z@_*)) => Product(y ++ z: _*)
case (Product(y@_*), _) => Product(y :+ x: _*)
case (_, Product(z@_*)) => Product(Seq(this) ++ z: _*)
case (_, _) => Product(this, x)
}
// The '#' character is needed for precedence
def #^(d: Int): IntPow[T] = IntPow(this, Const(d))
def toValue: T
final def diff(x: Var[T]): Expr[T] = diffInternal(x).simplify
private[symcal] def diffInternal(x: Var[T]): Expr[T]
def simplify: Expr[T]
final def subs(v: Var[T], e: Expr[T]): Expr[T] = subsInternal(v, e).simplify
private[symcal] def subsInternal(v: Var[T], e: Expr[T]): Expr[T]
final private[symcal] def stringForm(level: Int): String =
if (precedenceLevel < level)
"(" + printInternal + ")"
else
printInternal
def precedenceLevel: Int
protected def printInternal: String
final def print: String = stringForm(0)
def freeVars: Set[Var[T]] = Expr.freeVars(this)
def isConst: Boolean = false
def isSum: Boolean = false
def isProduct: Boolean = false
def expand: Expr[T] = expandInternal.simplify
private[symcal] def expandInternal: Sum[T]
}
object Expr {
/** If this implicit conversion is moved to `package.scala`, the expression 4 + Var('x) does not compile.
*/
implicit def valueToConst[T: Ring : Eq](x: T): Const[T] = Const(x)
final val precedenceOfAdd = 20
final val precedenceOfSubtract = 30
final val precedenceOfMinus = 40
final val precedenceOfMultiply = 50
final val precedenceOfIntPow = 60
final val precedenceOfConst = 100
def freeVars[T: Ring : Eq](e: Expr[T]): Set[Var[T]] = e match {
case Const(_) ⇒ Set()
case Minus(x) ⇒ freeVars(x)
case Var(name) ⇒ Set(Var(name))
case IntPow(x, _) ⇒ freeVars(x)
case Sum(es@_*) ⇒ es.flatMap(e ⇒ freeVars(e)).toSet
case Product(es@_*) ⇒ es.flatMap(e ⇒ freeVars(e)).toSet
}
def constZero[T: Ring : Eq] = Const(implicitly[Ring[T]].zero)
def constOne[T: Ring : Eq] = Const(implicitly[Ring[T]].one)
def intToConst[T: Ring : Eq](x: Int): Const[T] = Const(implicitly[Ring[T]].fromInt(x))
/** Auxiliary function. Returns the sequence of coefficients and powers for multinomial expansion.
* For example, to compute the expansion `(x + y)^3` we call `getTermCoeffs(2, 3)`, which returns the sequence
* {{{
* Seq(
* (1, Seq(3, 0)),
* (3, Seq(2, 1)),
* (3, Seq(1, 2)),
* (1, Seq(0, 3))
* )
* }}}
* This allows us to build the expansion as `x^3 + 3*x^2*y + 3*x*y^2 + y^3`.
*
* @param len Length of the sum list to be expanded.
* @param power Power of the expansion.
* @return Sequence of coefficients and power indices for individual subterms.
*/
private[symcal] def getTermCoeffs(len: Int, power: Int): Seq[(Int, Seq[Int])] = {
/** Auxiliary function. Returns the sequence of combination numbers, together with the sequence of indices.
* For example, `getCombinationNumbers(4)` returns `Seq( (4, 1), (3, 4), (2, 6), (1, 4), (0, 1) )`
* which corresponds to the coefficients in the expansion `(x + 1) ^ 4 = x^4 + 4*x^3 + 6*x^2 + 4*x + 1`.
*
* @param total The number of elements.
* @return Sequence of pairs `(i, c)` where `i` goes from `total` to `0` and
* `c` is equal to the number of combinations of `i` from `total`.
* `c == total! / (i! * (total - i)! )`
*/
def getCombinationNumbers(total: Int): Seq[(Int, Int)] = {
val ordering = total to 0 by -1
ordering zip ordering.scanLeft(1) { case (c, i) ⇒ c * i / (total - i + 1) }
}
def getTermCoeffsRec(m: Int, total: Int): Seq[(Int, List[Int])] = {
if (m <= 1 || total == 0) {
Seq((1, List.fill[Int](m)(total)))
} else {
val result: Seq[(Int, List[Int])] = for {
pC <- getCombinationNumbers(total)
(p, c) = pC
termCoeffs <- getTermCoeffsRec(m - 1, total - p)
(coeff, indices) = termCoeffs
} yield
(c * coeff, p :: indices)
result
}
}
getTermCoeffsRec(len, power)
}
}
final case class Const[T: Ring : Eq](value: T) extends Expr[T] {
override def toValue: T = value
private[symcal] def diffInternal(x: Var[T]): Expr[T] = Expr.constZero[T]
override def subsInternal(v: Var[T], e: Expr[T]): Expr[T] = this
override def precedenceLevel: Int = Expr.precedenceOfConst
override def printInternal: String = value.toString
override def simplify: Expr[T] = this
override def isConst: Boolean = true
override def expandInternal: Sum[T] = Sum(this)
}
final case class Minus[T: Ring : Eq](x: Expr[T]) extends Expr[T] {
override def toValue: T = -x.toValue
private[symcal] def diffInternal(z: Var[T]): Expr[T] = -x.diff(z)
override def subsInternal(v: Var[T], e: Expr[T]): Expr[T] = (-x.subsInternal(v, e)).simplify
override def precedenceLevel: Int = Expr.precedenceOfMinus
override protected def printInternal: String = "-" + x.stringForm(precedenceLevel)
override def simplify: Expr[T] = x.simplify match {
case Const(a) ⇒ Const(-a)
case Minus(a) ⇒ a
case xs => Minus(xs)
}
override def expandInternal: Sum[T] = Sum(x.expandInternal.es.map(-_): _*)
}
final case class Var[T: Ring : Eq](name: Symbol) extends Expr[T] {
override def toValue: T =
throw new Exception(s"Cannot evaluate toValue for an expression containing a variable ${name.name}.")
private[symcal] def diffInternal(x: Var[T]): Expr[T] = {
if (name == x.name) Expr.constOne[T] else Expr.constZero[T]
}
override def subsInternal(v: Var[T], e: Expr[T]): Expr[T] = v match {
case Var(`name`) ⇒ e
case _ ⇒ this
}
override def printInternal: String = name.name
override def precedenceLevel: Int = Expr.precedenceOfConst
override def simplify: Expr[T] = this
override def expandInternal: Sum[T] = Sum(this)
}
final case class IntPow[T: Ring : Eq](x: Expr[T], d: Const[Int]) extends Expr[T] {
override def toValue: T = implicitly[Ring[T]].pow(x.toValue, d.value)
private[symcal] def diffInternal(z: Var[T]): Expr[T] = d match {
case Const(0) => Expr.constZero[T]
case Const(1) => x.diffInternal(z) // no need to `diff` here because `simplify` will follow
case _ => Expr.intToConst[T](d.value) * x.diff(z) * IntPow(x, Const(d.value - 1))
}
override def simplify: Expr[T] = (x.simplify, d) match {
case (Const(a), _) => Const(IntPow(Const(a), d).toValue)
case (xs, Const(1)) => xs
case (_, Const(0)) => Expr.constOne[T]
case (xs, _) ⇒ IntPow(xs, d)
}
override def subsInternal(v: Var[T], e: Expr[T]): Expr[T] = IntPow(x.subsInternal(v, e), d).simplify
override def printInternal: String = x.stringForm(precedenceLevel + 1) + "^" + d.print
override def precedenceLevel: Int = Expr.precedenceOfIntPow
override def expandInternal: Sum[T] =
if (d.toValue >= 0) {
// We can expand only if the exponent is non-negative.
val xs = x.expandInternal.es
(xs.headOption, xs.drop(1)) match {
case (None, _) ⇒ Sum(IntPow(Expr.constZero[T], d)) // Empty Sum is equivalent to 0.
case (Some(head), tail) if tail.isEmpty ⇒ // If x.expand has only one term, we have nothing to expand.
Sum(IntPow(head, d))
case _ ⇒ // x.expand has at least 2 terms, need to expand
val newMonomials = Expr.getTermCoeffs(xs.length, d.toValue) map {
case (coeff, powers) ⇒
val newTerms = (xs zip powers).map { case (e, i) ⇒ IntPow(e, Const(i)) } :+ Expr.intToConst[T](coeff)
Product(newTerms: _*)
}
Sum(newMonomials: _*)
}
} else {
// Cannot expand a negative power.
Sum(this)
}
}
final case class Sum[T: Ring : Eq](es: Expr[T]*) extends Expr[T] {
override def isSum: Boolean = true
override def toValue: T = es.map(_.toValue).reduceOption(_ + _).getOrElse(implicitly[Ring[T]].zero)
private[symcal] def diffInternal(x: Var[T]): Expr[T] = Sum(es.map(_.diff(x)): _*)
override def subsInternal(v: Var[T], e: Expr[T]): Expr[T] = Sum(es.map(_.subsInternal(v, e)): _*)
override def precedenceLevel: Int = Expr.precedenceOfAdd
override protected def printInternal: String = (es.headOption, es.drop(1)) match {
case (None, _) => "0" // empty Sum()
case (Some(head), tail) ⇒ head.stringForm(precedenceLevel) +
tail.map {
case Minus(t) ⇒ " - " + t.stringForm(Expr.precedenceOfMultiply)
case t ⇒ " + " + t.stringForm(precedenceLevel)
}
.mkString("")
}
override def simplify: Expr[T] = {
val (constants, nonconstants) = es.map(_.simplify).partition(_.isConst)
val nonconstantsFlattened: Seq[Expr[T]] = nonconstants.flatMap {
case Sum(es@_*) ⇒ es
case x => Seq(x) // not a sum
}
// mergedConstants is the sum of all constants in the list; also could be 0.
val mergedConstants: T = constants
.collect { case x@Const(_) ⇒ x.value } // This converts into Seq[Int]. We know that we are not losing any values here.
.reduceOption(_ + _) // This may yield `None` if sequence is empty.
.getOrElse(implicitly[Ring[T]].zero) // An empty sequence of `constants` will produce 0 here.
val mergedExprs: Seq[Expr[T]] = if (mergedConstants.isZero)
nonconstantsFlattened
else
nonconstantsFlattened :+ Const(mergedConstants) // Constant should be last in `Sum`, e.g. `x + y + 2`.
// There are three cases now: empty sequence, one expr, and more than one expr.
mergedExprs.headOption match {
case Some(e) ⇒
if (mergedExprs.length == 1) {
// In this case, the simplified result is a `Sum` of just one expression, so should not be a `Sum`.
e
} else Sum(mergedExprs: _*)
case None ⇒ Expr.constZero[T] // Empty `Sum` is transformed into zero.
}
}
override def expandInternal: Sum[T] = Sum(es.flatMap(_.expandInternal.es): _*)
}
final case class Product[T: Ring : Eq](es: Expr[T]*) extends Expr[T] {
override def isProduct: Boolean = true
override def toValue: T = es.map(_.toValue).reduceOption(_ * _).getOrElse(implicitly[Ring[T]].one)
override def diffInternal(x: Var[T]): Expr[T] = {
val diffs = es.map(_.diff(x))
val replaced = diffs.zipWithIndex.map { case (expr, index) ⇒ Product(es.updated(index, expr): _*) }
Sum(replaced: _*)
}
override def subsInternal(v: Var[T], e: Expr[T]): Expr[T] = Product(es.map(_.subsInternal(v, e)): _*)
override def precedenceLevel: Int = Expr.precedenceOfMultiply
override protected def printInternal: String = es.map(_.stringForm(precedenceLevel)).mkString(" * ")
override def simplify: Expr[T] = {
val (constants, nonconstants) = es.map(_.simplify).partition(_.isConst)
val nonconstantsFlattened: Seq[Expr[T]] = nonconstants.flatMap {
case Product(es@_*) ⇒ es
case x => Seq(x) // not a product
}
// mergedConstants is the product of all constants in the list; also could be 0 or 1.
val mergedConstants: T = constants
.collect { case x@Const(_) ⇒ x.value } // This converts into Seq[Int]. We know that we are not losing any values here.
.reduceOption(_ * _) // This may yield `None` if sequence is empty.
.getOrElse(implicitly[Ring[T]].one) // An empty sequence of `constants` will produce 1 here.
val mergedExprs: Seq[Expr[T]] = if (mergedConstants.isZero)
Seq(Expr.constZero[T])
else if (mergedConstants.isOne)
nonconstantsFlattened
else Seq(Const(mergedConstants)) ++ nonconstantsFlattened // Constant should be first in `Product`, e.g. `2 * x * y`.
// There are three cases now: empty sequence, one expr, and more than one expr.
mergedExprs.headOption match {
case Some(e) ⇒
if (mergedExprs.length == 1) {
// In this case, the simplified result is a `Product` of just one expression, so should not be a `Product`.
e
} else Product(mergedExprs: _*)
case None ⇒ Expr.constOne[T] // Empty `Product` is transformed into 1.
}
}
override def expandInternal: Sum[T] = (es.headOption, es.drop(1)) match {
case (None, _) ⇒ Sum(Expr.constOne[T]) // empty Product()
case (Some(head), tail) ⇒ // Product(head, ....tail....)
val terms = for {
t <- head.expandInternal.es
z <- Product(tail: _*).expandInternal.es
} yield Product(t, z).flatten
Sum(terms: _*)
}
/** If any of the multiplicands are a [[Product]], the list is flattened.
* This auxiliary function is used to expand a [[Product]] that may contain nested [[Sum]] and [[Product]] expressions.
*
* @return A simplified (flattened) but equivalent [[Product]] term.
*/
private[symcal] def flatten: Product[T] = {
val newTerms = es.flatMap {
case Product(fs@_*) ⇒ fs
case t ⇒ Seq(t)
}
Product(newTerms: _*)
}
}
| hamdi-jenzri/symcal | src/main/scala/com/github/symcal/Expr.scala | Scala | apache-2.0 | 13,729 |
package org.karps.row
import scala.util.{Failure, Success, Try}
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
import karps.core.{row => R}
/**
* A representation of a row that is easy to manipulate with
* algebraic datatypes.
*/
case class AlgebraicRow(cells: Seq[Cell])
object AlgebraicRow {
def fromRow(r: Row, st: StructType): Try[AlgebraicRow] = {
Cell.from(r.toSeq, st) match {
case Success(RowCell(c)) => Success(c)
case Success(x) => Failure(new Exception(s"Got $x from $st -> $r"))
case Failure(e) => Failure(e)
}
}
def toRow(ar: AlgebraicRow): Row = Row(ar.cells.map(Cell.toAny):_*)
def fromProto(r: R.Row): AlgebraicRow = {
AlgebraicRow(r.values.map(Cell.fromProto))
}
def toProto(ar: AlgebraicRow): R.Row = {
R.Row(ar.cells.map(Cell.toProto))
}
/**
* Attempts to denormalize the row.
*/
def denormalize(ar: AlgebraicRow): Try[Cell] = {
ar.cells match {
case Seq(c) => Success(c)
case x => Failure(new Exception(s"Expected single cell, got $x"))
}
}
// Defines a canonical ordering across any row and any cell.
// The content need not match
object RowOrdering extends Ordering[AlgebraicRow] {
override def compare(x: AlgebraicRow, y: AlgebraicRow): Int = {
Cell.CellOrdering.compare(RowArray(x.cells), RowArray(y.cells))
}
}
}
| tjhunter/karps | src/main/scala/org/karps/row/AlgebraicRow.scala | Scala | apache-2.0 | 1,380 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.datasources._
/**
* A command used to write the result of a query to a directory.
*
* The syntax of using this command in SQL is:
* {{{
* INSERT OVERWRITE DIRECTORY (path=STRING)?
* USING format OPTIONS ([option1_name "option1_value", option2_name "option2_value", ...])
* SELECT ...
* }}}
*
* @param storage storage format used to describe how the query result is stored.
* @param provider the data source type to be used
* @param query the logical plan representing data to write to
* @param overwrite whether overwrites existing directory
*/
case class InsertIntoDataSourceDirCommand(
storage: CatalogStorageFormat,
provider: String,
query: LogicalPlan,
overwrite: Boolean) extends RunnableCommand {
override def innerChildren: Seq[LogicalPlan] = query :: Nil
override def run(sparkSession: SparkSession): Seq[Row] = {
assert(storage.locationUri.nonEmpty, "Directory path is required")
assert(provider.nonEmpty, "Data source is required")
// Create the relation based on the input logical plan: `query`.
val pathOption = storage.locationUri.map("path" -> CatalogUtils.URIToString(_))
val dataSource = DataSource(
sparkSession,
className = provider,
options = storage.properties ++ pathOption,
catalogTable = None)
val isFileFormat = classOf[FileFormat].isAssignableFrom(dataSource.providingClass)
if (!isFileFormat) {
throw new SparkException(
"Only Data Sources providing FileFormat are supported: " + dataSource.providingClass)
}
val saveMode = if (overwrite) SaveMode.Overwrite else SaveMode.ErrorIfExists
try {
sparkSession.sessionState.executePlan(dataSource.planForWriting(saveMode, query)).toRdd
} catch {
case ex: AnalysisException =>
logError(s"Failed to write to directory " + storage.locationUri.toString, ex)
throw ex
}
Seq.empty[Row]
}
}
| witgo/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/InsertIntoDataSourceDirCommand.scala | Scala | apache-2.0 | 2,989 |
package im.tox.antox.utils
import java.io.{File, FilenameFilter}
import java.util
import java.util.{ArrayList, List}
import android.app.{Activity, AlertDialog, Dialog}
import android.content.DialogInterface
import android.os.Environment
import android.util.Log
import im.tox.antox.utils.FileDialog._
import im.tox.antox.utils.ListenerList._
//remove if not needed
import scala.collection.JavaConversions._
object FileDialog {
private val PARENT_DIR = ".."
trait FileSelectedListener {
def fileSelected(file: File): Unit
}
trait DirectorySelectedListener {
def directorySelected(directory: File): Unit
}
}
class FileDialog(private val activity: Activity, path: File) {
private val TAG = getClass.getName
private var fileList: Array[String] = _
private var currentPath: File = _
private var fileListenerList: ListenerList[FileSelectedListener] = new ListenerList[FileDialog.FileSelectedListener]()
private var dirListenerList: ListenerList[DirectorySelectedListener] = new ListenerList[FileDialog.DirectorySelectedListener]()
private var selectDirectoryOption: Boolean = _
private var fileEndsWith: String = _
val newPath = if (!path.exists()) {
Environment.getExternalStorageDirectory
} else {
path
}
loadFileList(newPath)
def createFileDialog(): Dialog = {
var dialog: Dialog = null
val builder = new AlertDialog.Builder(activity)
builder.setTitle(currentPath.getPath)
if (selectDirectoryOption) {
builder.setPositiveButton("Select directory", new DialogInterface.OnClickListener() {
def onClick(dialog: DialogInterface, which: Int) {
Log.d(TAG, currentPath.getPath)
fireDirectorySelectedEvent(currentPath)
}
})
}
builder.setItems(fileList.map(x => x: CharSequence), new DialogInterface.OnClickListener() {
def onClick(dialog: DialogInterface, which: Int) {
val fileChosen = fileList(which)
val chosenFile = getChosenFile(fileChosen)
if (chosenFile.isDirectory) {
loadFileList(chosenFile)
dialog.cancel()
dialog.dismiss()
showDialog()
} else fireFileSelectedEvent(chosenFile)
}
})
dialog = builder.show()
dialog
}
def addFileListener(listener: FileSelectedListener) {
fileListenerList.add(listener)
}
def showDialog() {
createFileDialog().show()
}
private def fireFileSelectedEvent(file: File) {
fileListenerList.fireEvent(new ListenerList.FireHandler[FileSelectedListener]() {
def fireEvent(listener: FileSelectedListener) {
listener.fileSelected(file)
}
})
}
private def fireDirectorySelectedEvent(directory: File) {
dirListenerList.fireEvent(new ListenerList.FireHandler[DirectorySelectedListener]() {
def fireEvent(listener: DirectorySelectedListener) {
listener.directorySelected(directory)
}
})
}
private def loadFileList(path: File) {
this.currentPath = path
val r = new util.ArrayList[CharSequence]()
if (path.exists()) {
if (path.getParentFile != null) r.add(PARENT_DIR)
val filter = new FilenameFilter() {
def accept(dir: File, filename: String): Boolean = {
var sel = new File(dir, filename)
if (!sel.canRead) return false
if (selectDirectoryOption) return sel.isDirectory else {
var endsWith = if (fileEndsWith != null) filename.toLowerCase.endsWith(fileEndsWith) else true
return endsWith || sel.isDirectory
}
}
}
val fileList1 = path.list(filter)
for (file <- fileList1) {
r.add(file)
}
}
fileList = r.toArray(Array[String]())
fileList.map(x => x: CharSequence)
}
private def getChosenFile(fileChosen: String): File = {
if (fileChosen == PARENT_DIR) currentPath.getParentFile else new File(currentPath, fileChosen)
}
}
object ListenerList {
trait FireHandler[L] {
def fireEvent(listener: L): Unit
}
}
class ListenerList[L] {
private var listenerList: util.List[L] = new util.ArrayList[L]()
def add(listener: L) {
listenerList.add(listener)
}
def fireEvent(fireHandler: FireHandler[L]) {
val copy = new util.ArrayList[L](listenerList)
for (l <- copy) {
fireHandler.fireEvent(l)
}
}
}
| 0xPoly/Antox | app/src/main/scala/im/tox/antox/utils/FileDialog.scala | Scala | gpl-3.0 | 4,335 |
package org.zouzias.qclocktwo.examples
import org.zouzias.qclocktwo.examples.ExamplePhrases._
/**
* Phrases iterator
*/
class ExamplePhrasesIterator extends Iterator[Array[String]]{
var currentTime = ExamplePhrases.initTime()
val END_CONST: String = "__END__"
override def hasNext: Boolean = {
currentTime.head.compareTo(END_CONST) != 0 && currentTime.last.compareTo(END_CONST) != 0
}
def next(): Array[String] = {
// TODO: check if head / last exist
val minsIndex = minutes.indexOf(currentTime.last)
val hoursIndex = hours.indexOf(currentTime.head)
// Increment minutes index
if (minsIndex < minsLength - 1)
currentTime = Array(currentTime.head, minutes(minsIndex + 1))
else if (hoursIndex < hoursLength - 1)
currentTime = Array(hours(hoursIndex + 1), minutes(0))
else currentTime = Array(END_CONST, END_CONST)
println(s"Current time is : ${currentTime}")
currentTime
}
}
| zouzias/qlocktwo-grid-generator | src/main/scala/org/zouzias/qclocktwo/examples/ExamplePhrasesIterator.scala | Scala | apache-2.0 | 948 |
package shrimp
import tables._
import models._
import slick.dbio.DBIOAction
import slick.dbio.Effect.{Read, Write}
import slick.driver.H2Driver.api._
import scala.concurrent.ExecutionContext.Implicits.global
package object stores {
object CarStore {
def storeCar(car: Car) = {
(car.id match {
case Some(carId) => DBIO.successful(carId)
case None => {
tables.Cars.map(_.id).max.result
.map({
case Some(maxCarId) => maxCarId + 1
case None => 0
})
.flatMap({ carId => {
println(s"carId=${carId}")
(tables.Cars += (carId, car.name))
.flatMap({ _ =>
DBIO.successful(carId)
})
}})
}
})
.flatMap({ carId =>
DBIO.sequence(
car.colors.map({ color =>
(color match {
case Color(Some(colorId), _) => DBIO.successful(colorId)
case Color(None, _) => {
tables.Colors.map(_.id).max.result
.map({
case Some(maxColorId) => maxColorId + 1
case None => 0
})
.flatMap({ colorId =>
println(s"colorId=${colorId}")
(tables.Colors += (colorId, color.name))
.flatMap({ _ =>
DBIO.successful(colorId)
})
})
}
})
.flatMap({ colorId =>
println(s"carId=${carId} / colorId=${colorId}")
(tables.CarsColors += (carId, colorId))
.map({ _ => Color(Some(colorId), color.name) })
})
})
).map({ colors =>
Car(Some(carId), car.name, colors)
})
})
}
}
}
| radium226/snippets | scala/slick-transactions/src/main/scala/shrimp/stores/package.scala | Scala | gpl-2.0 | 1,937 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600e.v2.retriever.CT600EBoxRetriever
case class E5(value: Option[Int]) extends CtBoxIdentifier("Total turnover from exempt trading activities") with CtOptionalInteger with Input with ValidatableBox[CT600EBoxRetriever] {
override def validate(boxRetriever: CT600EBoxRetriever): Set[CtValidation] = validateZeroOrPositiveInteger(this)
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E5.scala | Scala | apache-2.0 | 1,029 |
package de.tu_berlin.dima.bdapro.flink.palindrome.omarflores16
import org.apache.flink.api.scala._
import org.apache.flink.util.Collector
import scala.collection.mutable.Queue
object Palindrome {
def main(args: Array[String]){
if (args.length != 1) {
Console.err.println("Usage: <jar> inputPath outputPath")
System.exit(-1)
}
var maxStringPalindrome = 0
val inputPath = args(0)
val env = ExecutionEnvironment.getExecutionEnvironment
val lineText = env.readTextFile(inputPath)
.flatMap( _.toLowerCase.trim.split("\\n") filter { _.nonEmpty } )
.map{ strPalindrome => (strPalindrome.size,strPalindrome,palindromeFunc(strPalindrome))}
for (t <- lineText.max(0).collect())
maxStringPalindrome = t._1
val finalTuple = lineText
.reduceGroup{
(in, out: Collector[(String)]) =>
for (t <- in) {
if (t._3 && t._1 >= maxStringPalindrome)
out.collect("The biggest palindrome sentence: " + t._2)
}
}
finalTuple.writeAsText("Output")
env.execute("bdapro-ws1617-flink")
}
def palindromeFunc(phrase: String): Boolean ={
var qPalindrome = new Queue[(Char)]
var isEqually: Boolean = true
(phrase).foreach( (w: Char) => if (!w.isSpaceChar)
qPalindrome.enqueue(w.toLower) )
while(qPalindrome.size > 1 && isEqually){
val wFirst = qPalindrome.dequeue().toString
qPalindrome = qPalindrome.reverse
val wLast = qPalindrome.dequeue().toString
qPalindrome = qPalindrome.reverse
if (wFirst != wLast)
isEqually = false
}
return isEqually
}
}
| cristiprg/BDAPRO.GlobalStateML | bdapro-ws1617-flink-jobs/src/main/scala/de/tu_berlin/dima/bdapro/flink/palindrome/omarflores16/Palindrome.scala | Scala | apache-2.0 | 1,640 |
/*
* Copyright (c) 2015, Nightfall Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package moe.nightfall.srails
import moe.nightfall.srails.common.Proxy
import net.minecraftforge.fml.common.event.{FMLInitializationEvent, FMLPostInitializationEvent, FMLPreInitializationEvent}
import net.minecraftforge.fml.common.{Mod, SidedProxy}
import org.apache.logging.log4j.LogManager
@Mod(modid = SRails.ID, name = SRails.Name, modLanguage = "scala")
object SRails {
final val ID = "srails"
final val Name = "Steve on Rails"
var log = LogManager.getLogger(Name)
@SidedProxy(serverSide = "moe.nightfall.srails.server.Proxy", clientSide = "moe.nightfall.srails.client.Proxy")
var proxy: Proxy = null
@Mod.EventHandler
def preInit(e: FMLPreInitializationEvent) {
log = e.getModLog
proxy.preInit(e)
log.info("Done with pre init phase.")
}
@Mod.EventHandler
def init(e: FMLInitializationEvent) {
proxy.init(e)
log.info("Done with init phase.")
}
@Mod.EventHandler
def postInit(e: FMLPostInitializationEvent) {
proxy.postInit(e)
log.info("Done with pre post init phase.")
}
}
| Nightfall/SRails | src/main/scala/moe/nightfall/srails/SRails.scala | Scala | bsd-2-clause | 2,369 |
package io.continuum.bokeh
package sampledata
import java.io.{File,InputStream,FileInputStream,InputStreamReader,FileNotFoundException}
import java.util.zip.{ZipInputStream,GZIPInputStream}
import java.net.URL
import scala.collection.JavaConverters._
import scalax.io.JavaConverters._
import scalax.file.Path
import au.com.bytecode.opencsv.CSVReader
import net.fortuna.ical4j.model.{Calendar,Component}
import net.fortuna.ical4j.model.component.VEvent
import net.fortuna.ical4j.data.CalendarBuilder
object FileName {
implicit def stringToFileName(fileName: String): FileName = Simple(fileName)
}
sealed trait FileName {
val name: String
def realName: String = name
}
case class Simple(name: String) extends FileName
case class GZip(name: String) extends FileName {
override def realName = name + ".gz"
}
case class Zip(name: String) extends FileName {
override def realName = name.substring(0, name.lastIndexOf(".")) + ".zip"
}
object SampleData {
lazy val dataPath: Path = {
val home = Path.fromString(System.getProperty("user.home"))
val path = home / ".bokeh" / "data"
if (!path.exists) path.createDirectory()
path
}
def getStreamFromResources(fileName: FileName): Option[InputStream] = {
Option(getClass.getClassLoader.getResourceAsStream(fileName.realName))
}
def getStreamFromFile(fileName: FileName): Option[InputStream] = {
val filePath = dataPath / fileName.realName
val fileOption = if (filePath.exists) filePath.fileOption else download(fileName)
fileOption.map(new FileInputStream(_))
}
def getFileStream(fileName: FileName): Option[InputStream] = {
getStreamFromResources(fileName) orElse getStreamFromFile(fileName)
}
def getGZipStream(fileName: FileName): Option[InputStream] = {
getFileStream(fileName).map(new GZIPInputStream(_))
}
def getZipStream(fileName: FileName): Option[InputStream] = {
getFileStream(fileName).flatMap { stream =>
val zip = new ZipInputStream(stream)
var entry = zip.getNextEntry()
var found = false
while (entry != null) {
found = !entry.isDirectory && entry.getName == fileName.name
if (found) {
entry = null
} else {
zip.closeEntry()
entry = zip.getNextEntry()
}
}
if (found) {
Some(zip)
} else {
zip.close()
None
}
}
}
def getStream(fileName: FileName): InputStream = {
val streamOpt = fileName match {
case Simple(_) => getFileStream(fileName)
case GZip(_) => getGZipStream(fileName)
case Zip(_) => getZipStream(fileName)
}
streamOpt getOrElse {
throw new FileNotFoundException(s"can't locate ${fileName.name} in resources, .bokeh/data or S3")
}
}
val dataUrl = new URL("https://s3.amazonaws.com/bokeh_data/")
def download(fileName: FileName): Option[File] = {
val url = new URL(dataUrl, fileName.realName)
val input = url.asInput
val output = dataPath / fileName.realName
input.size match {
case Some(size) =>
println(s"Downloading $url to ${output.path} (${size} bytes) ...")
output.write(input.bytes)
output.fileOption
case None =>
None
}
}
}
trait SampleData
trait CSVSampleData extends SampleData {
protected def loadRows(fileName: FileName): List[List[String]] = {
val input = new InputStreamReader(SampleData.getStream(fileName))
val reader = new CSVReader(input, ',', '"', '\\\\', 1)
reader.readAll().asScala.map(_.map(_.trim).toList).toList
}
}
trait ICalSampleData {
protected def loadEvents(fileName: FileName): List[VEvent] = {
val input = SampleData.getStream(fileName)
val builder = new CalendarBuilder()
val calendar = builder.build(input)
val components = calendar.getComponents(Component.VEVENT)
components.asScala.toList.collect { case event: VEvent => event }
}
}
| bokeh/bokeh-scala | sampledata/src/main/scala/SampleData.scala | Scala | mit | 4,287 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.characters.actions.attacks
import com.anathema_roguelike.entities.characters.Character
import com.anathema_roguelike.entities.characters.actions.costs.{ActionCosts, EnergyCost}
import com.anathema_roguelike.entities.characters.perks.actions.targetingstrategies.TargetEffect
import com.anathema_roguelike.main.utilities.Utils
import com.anathema_roguelike.stats.characterstats.CharacterStat
import com.anathema_roguelike.stats.characterstats.resources.{CurrentHealth, Damage}
import com.anathema_roguelike.stats.characterstats.secondarystats.AttackSpeed
import com.anathema_roguelike.stats.effects.Effect
object WeaponAttack {
private def getEnergyCost(character: Character) = new EnergyCost(character, character.getStatAmount[AttackSpeed])
}
abstract class WeaponAttack(
attacker: Character,
additionalCosts: ActionCosts,
targetEffects: TargetEffect[Character, _]*
) extends Attack[Character](
attacker,
WeaponAttack.getEnergyCost(attacker),
additionalCosts, targetEffects:_*) {
addTargetEffect(getWeaponAttackEffect)
def getWeaponAttackEffect: TargetEffect[Character, _ <: CharacterStat] = {
new TargetEffect[Character, CurrentHealth](Utils.getName(this)) {
override def getEffect: Option[Effect[Character, CurrentHealth]] = {
new Damage[CurrentHealth](getAttacker, this, () => {
getAttacker.getPrimaryWeaponDamage.toDouble
})
}
}
}
}
| carlminden/anathema-roguelike | src/com/anathema_roguelike/entities/characters/actions/attacks/WeaponAttack.scala | Scala | gpl-3.0 | 2,355 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.math.{BigDecimal => JBigDecimal}
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate, Operators}
import org.apache.parquet.filter2.predicate.FilterApi._
import org.apache.parquet.filter2.predicate.Operators.{Column => _, _}
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.ParquetOutputTimestampType
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.util.{AccumulatorContext, AccumulatorV2}
/**
* A test suite that tests Parquet filter2 API based filter pushdown optimization.
*
* NOTE:
*
* 1. `!(a cmp b)` is always transformed to its negated form `a cmp' b` by the
* `BooleanSimplification` optimization rule whenever possible. As a result, predicate `!(a < 1)`
* results in a `GtEq` filter predicate rather than a `Not`.
*
* 2. `Tuple1(Option(x))` is used together with `AnyVal` types like `Int` to ensure the inferred
* data type is nullable.
*
* NOTE:
*
* This file intendedly enables record-level filtering explicitly. If new test cases are
* dependent on this configuration, don't forget you better explicitly set this configuration
* within the test.
*/
class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContext {
private lazy val parquetFilters =
new ParquetFilters(conf.parquetFilterPushDownDate, conf.parquetFilterPushDownTimestamp,
conf.parquetFilterPushDownDecimal, conf.parquetFilterPushDownStringStartWith,
conf.parquetFilterPushDownInFilterThreshold, conf.caseSensitiveAnalysis)
override def beforeEach(): Unit = {
super.beforeEach()
// Note that there are many tests here that require record-level filtering set to be true.
spark.conf.set(SQLConf.PARQUET_RECORD_FILTER_ENABLED.key, "true")
}
override def afterEach(): Unit = {
try {
spark.conf.unset(SQLConf.PARQUET_RECORD_FILTER_ENABLED.key)
} finally {
super.afterEach()
}
}
private def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DATE_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> "true",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
var maybeRelation: Option[HadoopFsRelation] = None
val maybeAnalyzedPredicate = query.queryExecution.optimizedPlan.collect {
case PhysicalOperation(_, filters,
LogicalRelation(relation: HadoopFsRelation, _, _, _)) =>
maybeRelation = Some(relation)
filters
}.flatten.reduceLeftOption(_ && _)
assert(maybeAnalyzedPredicate.isDefined, "No filter is analyzed from the given query")
val (_, selectedFilters, _) =
DataSourceStrategy.selectFilters(maybeRelation.get, maybeAnalyzedPredicate.toSeq)
assert(selectedFilters.nonEmpty, "No filter is pushed down")
selectedFilters.foreach { pred =>
val maybeFilter = parquetFilters.createFilter(
new SparkToParquetSchemaConverter(conf).convert(df.schema), pred)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
maybeFilter.exists(_.getClass === filterClass)
}
checker(stripSparkFilter(query), expected)
}
}
private def checkFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
checkFilterPredicate(df, predicate, filterClass, checkAnswer(_, _: Seq[Row]), expected)
}
private def checkFilterPredicate[T]
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: T)
(implicit df: DataFrame): Unit = {
checkFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
def checkBinaryAnswer(df: DataFrame, expected: Seq[Row]) = {
assertResult(expected.map(_.getAs[Array[Byte]](0).mkString(",")).sorted) {
df.rdd.map(_.getAs[Array[Byte]](0).mkString(",")).collect().toSeq.sorted
}
}
checkFilterPredicate(df, predicate, filterClass, checkBinaryAnswer _, expected)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Array[Byte])
(implicit df: DataFrame): Unit = {
checkBinaryFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
private def testTimestampPushdown(data: Seq[Timestamp]): Unit = {
assert(data.size === 4)
val ts1 = data.head
val ts2 = data(1)
val ts3 = data(2)
val ts4 = data(3)
withParquetDataFrame(data.map(i => Tuple1(i))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], data.map(i => Row.apply(i)))
checkFilterPredicate('_1 === ts1, classOf[Eq[_]], ts1)
checkFilterPredicate('_1 <=> ts1, classOf[Eq[_]], ts1)
checkFilterPredicate('_1 =!= ts1, classOf[NotEq[_]],
Seq(ts2, ts3, ts4).map(i => Row.apply(i)))
checkFilterPredicate('_1 < ts2, classOf[Lt[_]], ts1)
checkFilterPredicate('_1 > ts1, classOf[Gt[_]], Seq(ts2, ts3, ts4).map(i => Row.apply(i)))
checkFilterPredicate('_1 <= ts1, classOf[LtEq[_]], ts1)
checkFilterPredicate('_1 >= ts4, classOf[GtEq[_]], ts4)
checkFilterPredicate(Literal(ts1) === '_1, classOf[Eq[_]], ts1)
checkFilterPredicate(Literal(ts1) <=> '_1, classOf[Eq[_]], ts1)
checkFilterPredicate(Literal(ts2) > '_1, classOf[Lt[_]], ts1)
checkFilterPredicate(Literal(ts3) < '_1, classOf[Gt[_]], ts4)
checkFilterPredicate(Literal(ts1) >= '_1, classOf[LtEq[_]], ts1)
checkFilterPredicate(Literal(ts4) <= '_1, classOf[GtEq[_]], ts4)
checkFilterPredicate(!('_1 < ts4), classOf[GtEq[_]], ts4)
checkFilterPredicate('_1 < ts2 || '_1 > ts3, classOf[Operators.Or], Seq(Row(ts1), Row(ts4)))
}
}
private def testDecimalPushDown(data: DataFrame)(f: DataFrame => Unit): Unit = {
withTempPath { file =>
data.write.parquet(file.getCanonicalPath)
readParquetFile(file.toString)(f)
}
}
// This function tests that exactly go through the `canDrop` and `inverseCanDrop`.
private def testStringStartsWith(dataFrame: DataFrame, filter: String): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
dataFrame.write.option("parquet.block.size", 512).parquet(path)
Seq(true, false).foreach { pushDown =>
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> pushDown.toString) {
val accu = new NumRowGroupsAcc
sparkContext.register(accu)
val df = spark.read.parquet(path).filter(filter)
df.foreachPartition((it: Iterator[Row]) => it.foreach(v => accu.add(0)))
if (pushDown) {
assert(accu.value == 0)
} else {
assert(accu.value > 0)
}
AccumulatorContext.remove(accu.id)
}
}
}
}
test("filter pushdown - boolean") {
withParquetDataFrame((true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], Seq(Row(true), Row(false)))
checkFilterPredicate('_1 === true, classOf[Eq[_]], true)
checkFilterPredicate('_1 <=> true, classOf[Eq[_]], true)
checkFilterPredicate('_1 =!= true, classOf[NotEq[_]], false)
}
}
test("filter pushdown - tinyint") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toByte)))) { implicit df =>
assert(df.schema.head.dataType === ByteType)
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1.toByte, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1.toByte, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1.toByte, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2.toByte, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3.toByte, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1.toByte, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4.toByte, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1.toByte) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1.toByte) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2.toByte) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3.toByte) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1.toByte) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4.toByte) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4.toByte), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2.toByte || '_1 > 3.toByte,
classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - smallint") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toShort)))) { implicit df =>
assert(df.schema.head.dataType === ShortType)
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1.toShort, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1.toShort, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1.toShort, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2.toShort, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3.toShort, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1.toShort, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4.toShort, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1.toShort) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1.toShort) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2.toShort) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3.toShort) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1.toShort) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4.toShort) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4.toShort), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2.toShort || '_1 > 3.toShort,
classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - integer") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - long") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toLong)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - float") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toFloat)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - double") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toDouble)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - string") {
withParquetDataFrame((1 to 4).map(i => Tuple1(i.toString))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 === "1", classOf[Eq[_]], "1")
checkFilterPredicate('_1 <=> "1", classOf[Eq[_]], "1")
checkFilterPredicate(
'_1 =!= "1", classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 < "2", classOf[Lt[_]], "1")
checkFilterPredicate('_1 > "3", classOf[Gt[_]], "4")
checkFilterPredicate('_1 <= "1", classOf[LtEq[_]], "1")
checkFilterPredicate('_1 >= "4", classOf[GtEq[_]], "4")
checkFilterPredicate(Literal("1") === '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("1") <=> '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("2") > '_1, classOf[Lt[_]], "1")
checkFilterPredicate(Literal("3") < '_1, classOf[Gt[_]], "4")
checkFilterPredicate(Literal("1") >= '_1, classOf[LtEq[_]], "1")
checkFilterPredicate(Literal("4") <= '_1, classOf[GtEq[_]], "4")
checkFilterPredicate(!('_1 < "4"), classOf[GtEq[_]], "4")
checkFilterPredicate('_1 < "2" || '_1 > "3", classOf[Operators.Or], Seq(Row("1"), Row("4")))
}
}
test("filter pushdown - binary") {
implicit class IntToBinary(int: Int) {
def b: Array[Byte] = int.toString.getBytes(StandardCharsets.UTF_8)
}
withParquetDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df =>
checkBinaryFilterPredicate('_1 === 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1 <=> 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkBinaryFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate(
'_1 =!= 1.b, classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate('_1 < 2.b, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate('_1 > 3.b, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate('_1 <= 1.b, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate('_1 >= 4.b, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) === '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(1.b) <=> '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(2.b) > '_1, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate(Literal(3.b) < '_1, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) >= '_1, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate(Literal(4.b) <= '_1, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(!('_1 < 4.b), classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(
'_1 < 2.b || '_1 > 3.b, classOf[Operators.Or], Seq(Row(1.b), Row(4.b)))
}
}
test("filter pushdown - date") {
implicit class StringToDate(s: String) {
def date: Date = Date.valueOf(s)
}
val data = Seq("2018-03-18", "2018-03-19", "2018-03-20", "2018-03-21")
withParquetDataFrame(data.map(i => Tuple1(i.date))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], data.map(i => Row.apply(i.date)))
checkFilterPredicate('_1 === "2018-03-18".date, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate('_1 <=> "2018-03-18".date, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate('_1 =!= "2018-03-18".date, classOf[NotEq[_]],
Seq("2018-03-19", "2018-03-20", "2018-03-21").map(i => Row.apply(i.date)))
checkFilterPredicate('_1 < "2018-03-19".date, classOf[Lt[_]], "2018-03-18".date)
checkFilterPredicate('_1 > "2018-03-20".date, classOf[Gt[_]], "2018-03-21".date)
checkFilterPredicate('_1 <= "2018-03-18".date, classOf[LtEq[_]], "2018-03-18".date)
checkFilterPredicate('_1 >= "2018-03-21".date, classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(
Literal("2018-03-18".date) === '_1, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-18".date) <=> '_1, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-19".date) > '_1, classOf[Lt[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-20".date) < '_1, classOf[Gt[_]], "2018-03-21".date)
checkFilterPredicate(
Literal("2018-03-18".date) >= '_1, classOf[LtEq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-21".date) <= '_1, classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(!('_1 < "2018-03-21".date), classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(
'_1 < "2018-03-19".date || '_1 > "2018-03-20".date,
classOf[Operators.Or],
Seq(Row("2018-03-18".date), Row("2018-03-21".date)))
}
}
test("filter pushdown - timestamp") {
// spark.sql.parquet.outputTimestampType = TIMESTAMP_MILLIS
val millisData = Seq(Timestamp.valueOf("2018-06-14 08:28:53.123"),
Timestamp.valueOf("2018-06-15 08:28:53.123"),
Timestamp.valueOf("2018-06-16 08:28:53.123"),
Timestamp.valueOf("2018-06-17 08:28:53.123"))
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.TIMESTAMP_MILLIS.toString) {
testTimestampPushdown(millisData)
}
// spark.sql.parquet.outputTimestampType = TIMESTAMP_MICROS
val microsData = Seq(Timestamp.valueOf("2018-06-14 08:28:53.123456"),
Timestamp.valueOf("2018-06-15 08:28:53.123456"),
Timestamp.valueOf("2018-06-16 08:28:53.123456"),
Timestamp.valueOf("2018-06-17 08:28:53.123456"))
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.TIMESTAMP_MICROS.toString) {
testTimestampPushdown(microsData)
}
// spark.sql.parquet.outputTimestampType = INT96 doesn't support pushdown
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.INT96.toString) {
withParquetDataFrame(millisData.map(i => Tuple1(i))) { implicit df =>
assertResult(None) {
parquetFilters.createFilter(
new SparkToParquetSchemaConverter(conf).convert(df.schema), sources.IsNull("_1"))
}
}
}
}
test("filter pushdown - decimal") {
Seq(true, false).foreach { legacyFormat =>
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> legacyFormat.toString) {
Seq(
s"a decimal(${Decimal.MAX_INT_DIGITS}, 2)", // 32BitDecimalType
s"a decimal(${Decimal.MAX_LONG_DIGITS}, 2)", // 64BitDecimalType
"a decimal(38, 18)" // ByteArrayDecimalType
).foreach { schemaDDL =>
val schema = StructType.fromDDL(schemaDDL)
val rdd =
spark.sparkContext.parallelize((1 to 4).map(i => Row(new java.math.BigDecimal(i))))
val dataFrame = spark.createDataFrame(rdd, schema)
testDecimalPushDown(dataFrame) { implicit df =>
assert(df.schema === schema)
checkFilterPredicate('a.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('a.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('a === 1, classOf[Eq[_]], 1)
checkFilterPredicate('a <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('a =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('a < 2, classOf[Lt[_]], 1)
checkFilterPredicate('a > 3, classOf[Gt[_]], 4)
checkFilterPredicate('a <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('a >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === 'a, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> 'a, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > 'a, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < 'a, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= 'a, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= 'a, classOf[GtEq[_]], 4)
checkFilterPredicate(!('a < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('a < 2 || 'a > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
}
}
}
test("Ensure that filter value matched the parquet file schema") {
val scale = 2
val schema = StructType(Seq(
StructField("cint", IntegerType),
StructField("cdecimal1", DecimalType(Decimal.MAX_INT_DIGITS, scale)),
StructField("cdecimal2", DecimalType(Decimal.MAX_LONG_DIGITS, scale)),
StructField("cdecimal3", DecimalType(DecimalType.MAX_PRECISION, scale))
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val decimal = new JBigDecimal(10).setScale(scale)
val decimal1 = new JBigDecimal(10).setScale(scale + 1)
assert(decimal.scale() === scale)
assert(decimal1.scale() === scale + 1)
assertResult(Some(lt(intColumn("cdecimal1"), 1000: Integer))) {
parquetFilters.createFilter(parquetSchema, sources.LessThan("cdecimal1", decimal))
}
assertResult(None) {
parquetFilters.createFilter(parquetSchema, sources.LessThan("cdecimal1", decimal1))
}
assertResult(Some(lt(longColumn("cdecimal2"), 1000L: java.lang.Long))) {
parquetFilters.createFilter(parquetSchema, sources.LessThan("cdecimal2", decimal))
}
assertResult(None) {
parquetFilters.createFilter(parquetSchema, sources.LessThan("cdecimal2", decimal1))
}
assert(parquetFilters.createFilter(
parquetSchema, sources.LessThan("cdecimal3", decimal)).isDefined)
assertResult(None) {
parquetFilters.createFilter(parquetSchema, sources.LessThan("cdecimal3", decimal1))
}
}
test("SPARK-6554: don't push down predicates which reference partition columns") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
// If the "part = 1" filter gets pushed down, this query will throw an exception since
// "part" is not a valid column in the actual Parquet file
checkAnswer(
spark.read.parquet(dir.getCanonicalPath).filter("part = 1"),
(1 to 3).map(i => Row(i, i.toString, 1)))
}
}
}
test("SPARK-10829: Filter combine partition key and attribute doesn't work in DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
// If the "part = 1" filter gets pushed down, this query will throw an exception since
// "part" is not a valid column in the actual Parquet file
checkAnswer(
spark.read.parquet(dir.getCanonicalPath).filter("a > 0 and (part = 0 or a > 1)"),
(2 to 3).map(i => Row(i, i.toString, 1)))
}
}
}
test("SPARK-12231: test the filter and empty project in partitioned DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}"
(1 to 3).map(i => (i, i + 1, i + 2, i + 3)).toDF("a", "b", "c", "d").
write.partitionBy("a").parquet(path)
// The filter "a > 1 or b < 2" will not get pushed down, and the projection is empty,
// this query will throw an exception since the project from combinedFilter expect
// two projection while the
val df1 = spark.read.parquet(dir.getCanonicalPath)
assert(df1.filter("a > 1 or b < 2").count() == 2)
}
}
}
test("SPARK-12231: test the new projection in partitioned DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}"
(1 to 3).map(i => (i, i + 1, i + 2, i + 3)).toDF("a", "b", "c", "d").
write.partitionBy("a").parquet(path)
// test the generate new projection case
// when projects != partitionAndNormalColumnProjs
val df1 = spark.read.parquet(dir.getCanonicalPath)
checkAnswer(
df1.filter("a > 1 or b > 2").orderBy("a").selectExpr("a", "b", "c", "d"),
(2 to 3).map(i => Row(i, i + 1, i + 2, i + 3)))
}
}
}
test("Filter applied on merged Parquet schema with new column should work") {
import testImplicits._
Seq("true", "false").foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized) {
withTempPath { dir =>
val path1 = s"${dir.getCanonicalPath}/table1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path1)
val path2 = s"${dir.getCanonicalPath}/table2"
(1 to 3).map(i => (i, i.toString)).toDF("c", "b").write.parquet(path2)
// No matter "c = 1" gets pushed down or not, this query should work without exception.
val df = spark.read.parquet(path1, path2).filter("c = 1").selectExpr("c", "b", "a")
checkAnswer(
df,
Row(1, "1", null))
val path3 = s"${dir.getCanonicalPath}/table3"
val dfStruct = sparkContext.parallelize(Seq((1, 1))).toDF("a", "b")
dfStruct.select(struct("a").as("s")).write.parquet(path3)
val path4 = s"${dir.getCanonicalPath}/table4"
val dfStruct2 = sparkContext.parallelize(Seq((1, 1))).toDF("c", "b")
dfStruct2.select(struct("c").as("s")).write.parquet(path4)
// No matter "s.c = 1" gets pushed down or not, this query should work without exception.
val dfStruct3 = spark.read.parquet(path3, path4).filter("s.c = 1")
.selectExpr("s")
checkAnswer(dfStruct3, Row(Row(null, 1)))
}
}
}
}
// The unsafe row RecordReader does not support row by row filtering so run it with it disabled.
test("SPARK-11661 Still pushdown filters returned by unhandledFilters") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
val df = spark.read.parquet(path).filter("a = 2")
// The result should be single row.
// When a filter is pushed to Parquet, Parquet can apply it to every row.
// So, we can check the number of rows returned from the Parquet
// to make sure our filter pushdown work.
assert(stripSparkFilter(df).count == 1)
}
}
}
}
test("SPARK-12218: 'Not' is included in Parquet filter pushdown") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/table1"
(1 to 5).map(i => (i, (i % 2).toString)).toDF("a", "b").write.parquet(path)
checkAnswer(
spark.read.parquet(path).where("not (a = 2) or not(b in ('1'))"),
(1 to 5).map(i => Row(i, (i % 2).toString)))
checkAnswer(
spark.read.parquet(path).where("not (a = 2 and b in ('1'))"),
(1 to 5).map(i => Row(i, (i % 2).toString)))
}
}
}
test("SPARK-12218 Converting conjunctions into Parquet filter predicates") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType, nullable = true),
StructField("c", DoubleType, nullable = true)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
assertResult(Some(and(
lt(intColumn("a"), 10: Integer),
gt(doubleColumn("c"), 1.5: java.lang.Double)))
) {
parquetFilters.createFilter(
parquetSchema,
sources.And(
sources.LessThan("a", 10),
sources.GreaterThan("c", 1.5D)))
}
assertResult(None) {
parquetFilters.createFilter(
parquetSchema,
sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix")))
}
assertResult(None) {
parquetFilters.createFilter(
parquetSchema,
sources.Not(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix"))))
}
}
test("SPARK-16371 Do not push down filters when inner name and outer name are the same") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Tuple1(i)))) { implicit df =>
// Here the schema becomes as below:
//
// root
// |-- _1: struct (nullable = true)
// | |-- _1: integer (nullable = true)
//
// The inner column name, `_1` and outer column name `_1` are the same.
// Obviously this should not push down filters because the outer column is struct.
assert(df.filter("_1 IS NOT NULL").count() === 4)
}
}
test("Filters should be pushed down for vectorized Parquet reader at row group level") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true",
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/table"
(1 to 1024).map(i => (101, i)).toDF("a", "b").write.parquet(path)
Seq(true, false).foreach { enablePushDown =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> enablePushDown.toString) {
val accu = new NumRowGroupsAcc
sparkContext.register(accu)
val df = spark.read.parquet(path).filter("a < 100")
df.foreachPartition((it: Iterator[Row]) => it.foreach(v => accu.add(0)))
if (enablePushDown) {
assert(accu.value == 0)
} else {
assert(accu.value > 0)
}
AccumulatorContext.remove(accu.id)
}
}
}
}
}
test("SPARK-17213: Broken Parquet filter push-down for string columns") {
Seq(true, false).foreach { vectorizedEnabled =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorizedEnabled.toString) {
withTempPath { dir =>
import testImplicits._
val path = dir.getCanonicalPath
// scalastyle:off nonascii
Seq("a", "é").toDF("name").write.parquet(path)
// scalastyle:on nonascii
assert(spark.read.parquet(path).where("name > 'a'").count() == 1)
assert(spark.read.parquet(path).where("name >= 'a'").count() == 2)
// scalastyle:off nonascii
assert(spark.read.parquet(path).where("name < 'é'").count() == 1)
assert(spark.read.parquet(path).where("name <= 'é'").count() == 2)
// scalastyle:on nonascii
}
}
}
}
test("SPARK-20364: Disable Parquet predicate pushdown for fields having dots in the names") {
import testImplicits._
Seq(true, false).foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized.toString,
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> true.toString,
SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
withTempPath { path =>
Seq(Some(1), None).toDF("col.dots").write.parquet(path.getAbsolutePath)
val readBack = spark.read.parquet(path.getAbsolutePath).where("`col.dots` IS NOT NULL")
assert(readBack.count() == 1)
}
}
}
}
test("Filters should be pushed down for Parquet readers at row group level") {
import testImplicits._
withSQLConf(
// Makes sure disabling 'spark.sql.parquet.recordFilter' still enables
// row group level filtering.
SQLConf.PARQUET_RECORD_FILTER_ENABLED.key -> "false",
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
withTempPath { path =>
val data = (1 to 1024)
data.toDF("a").coalesce(1)
.write.option("parquet.block.size", 512)
.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath).filter("a == 500")
// Here, we strip the Spark side filter and check the actual results from Parquet.
val actual = stripSparkFilter(df).collect().length
// Since those are filtered at row group level, the result count should be less
// than the total length but should not be a single record.
// Note that, if record level filtering is enabled, it should be a single record.
// If no filter is pushed down to Parquet, it should be the total length of data.
assert(actual > 1 && actual < data.length)
}
}
}
test("SPARK-23852: Broken Parquet push-down for partially-written stats") {
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
// parquet-1217.parquet contains a single column with values -1, 0, 1, 2 and null.
// The row-group statistics include null counts, but not min and max values, which
// triggers PARQUET-1217.
val df = readResourceParquetFile("test-data/parquet-1217.parquet")
// Will return 0 rows if PARQUET-1217 is not fixed.
assert(df.where("col > 0").count() === 2)
}
}
test("filter pushdown - StringStartsWith") {
withParquetDataFrame((1 to 4).map(i => Tuple1(i + "str" + i))) { implicit df =>
checkFilterPredicate(
'_1.startsWith("").asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq("1str1", "2str2", "3str3", "4str4").map(Row(_)))
Seq("2", "2s", "2st", "2str", "2str2").foreach { prefix =>
checkFilterPredicate(
'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
"2str2")
}
Seq("2S", "null", "2str22").foreach { prefix =>
checkFilterPredicate(
'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq.empty[Row])
}
checkFilterPredicate(
!'_1.startsWith("").asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq().map(Row(_)))
Seq("2", "2s", "2st", "2str", "2str2").foreach { prefix =>
checkFilterPredicate(
!'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq("1str1", "3str3", "4str4").map(Row(_)))
}
Seq("2S", "null", "2str22").foreach { prefix =>
checkFilterPredicate(
!'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq("1str1", "2str2", "3str3", "4str4").map(Row(_)))
}
assertResult(None) {
parquetFilters.createFilter(
new SparkToParquetSchemaConverter(conf).convert(df.schema),
sources.StringStartsWith("_1", null))
}
}
import testImplicits._
// Test canDrop() has taken effect
testStringStartsWith(spark.range(1024).map(_.toString).toDF(), "value like 'a%'")
// Test inverseCanDrop() has taken effect
testStringStartsWith(spark.range(1024).map(c => "100").toDF(), "value not like '10%'")
}
test("SPARK-17091: Convert IN predicate to Parquet filter push-down") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
assertResult(Some(FilterApi.eq(intColumn("a"), null: Integer))) {
parquetFilters.createFilter(parquetSchema, sources.In("a", Array(null)))
}
assertResult(Some(FilterApi.eq(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(parquetSchema, sources.In("a", Array(10)))
}
// Remove duplicates
assertResult(Some(FilterApi.eq(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(parquetSchema, sources.In("a", Array(10, 10)))
}
assertResult(Some(or(or(
FilterApi.eq(intColumn("a"), 10: Integer),
FilterApi.eq(intColumn("a"), 20: Integer)),
FilterApi.eq(intColumn("a"), 30: Integer)))
) {
parquetFilters.createFilter(parquetSchema, sources.In("a", Array(10, 20, 30)))
}
assert(parquetFilters.createFilter(parquetSchema, sources.In("a",
Range(0, conf.parquetFilterPushDownInFilterThreshold).toArray)).isDefined)
assert(parquetFilters.createFilter(parquetSchema, sources.In("a",
Range(0, conf.parquetFilterPushDownInFilterThreshold + 1).toArray)).isEmpty)
import testImplicits._
withTempPath { path =>
val data = 0 to 1024
data.toDF("a").selectExpr("if (a = 1024, null, a) AS a") // convert 1024 to null
.coalesce(1).write.option("parquet.block.size", 512)
.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
Seq(true, false).foreach { pushEnabled =>
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> pushEnabled.toString) {
Seq(1, 5, 10, 11).foreach { count =>
val filter = s"a in(${Range(0, count).mkString(",")})"
assert(df.where(filter).count() === count)
val actual = stripSparkFilter(df.where(filter)).collect().length
if (pushEnabled && count <= conf.parquetFilterPushDownInFilterThreshold) {
assert(actual > 1 && actual < data.length)
} else {
assert(actual === data.length)
}
}
assert(df.where("a in(null)").count() === 0)
assert(df.where("a = null").count() === 0)
assert(df.where("a is null").count() === 1)
}
}
}
}
test("SPARK-25207: Case-insensitive field resolution for pushdown when reading parquet") {
def createParquetFilter(caseSensitive: Boolean): ParquetFilters = {
new ParquetFilters(conf.parquetFilterPushDownDate, conf.parquetFilterPushDownTimestamp,
conf.parquetFilterPushDownDecimal, conf.parquetFilterPushDownStringStartWith,
conf.parquetFilterPushDownInFilterThreshold, caseSensitive)
}
val caseSensitiveParquetFilters = createParquetFilter(caseSensitive = true)
val caseInsensitiveParquetFilters = createParquetFilter(caseSensitive = false)
def testCaseInsensitiveResolution(
schema: StructType,
expected: FilterPredicate,
filter: sources.Filter): Unit = {
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
assertResult(Some(expected)) {
caseInsensitiveParquetFilters.createFilter(parquetSchema, filter)
}
assertResult(None) {
caseSensitiveParquetFilters.createFilter(parquetSchema, filter)
}
}
val schema = StructType(Seq(StructField("cint", IntegerType)))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), null.asInstanceOf[Integer]), sources.IsNull("CINT"))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), null.asInstanceOf[Integer]),
sources.IsNotNull("CINT"))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), 1000: Integer), sources.EqualTo("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), 1000: Integer),
sources.Not(sources.EqualTo("CINT", 1000)))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), 1000: Integer), sources.EqualNullSafe("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), 1000: Integer),
sources.Not(sources.EqualNullSafe("CINT", 1000)))
testCaseInsensitiveResolution(
schema,
FilterApi.lt(intColumn("cint"), 1000: Integer), sources.LessThan("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.ltEq(intColumn("cint"), 1000: Integer),
sources.LessThanOrEqual("CINT", 1000))
testCaseInsensitiveResolution(
schema, FilterApi.gt(intColumn("cint"), 1000: Integer), sources.GreaterThan("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.gtEq(intColumn("cint"), 1000: Integer),
sources.GreaterThanOrEqual("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.or(
FilterApi.eq(intColumn("cint"), 10: Integer),
FilterApi.eq(intColumn("cint"), 20: Integer)),
sources.In("CINT", Array(10, 20)))
val dupFieldSchema = StructType(
Seq(StructField("cint", IntegerType), StructField("cINT", IntegerType)))
val dupParquetSchema = new SparkToParquetSchemaConverter(conf).convert(dupFieldSchema)
assertResult(None) {
caseInsensitiveParquetFilters.createFilter(
dupParquetSchema, sources.EqualTo("CINT", 1000))
}
}
test("SPARK-25207: exception when duplicate fields in case-insensitive mode") {
withTempPath { dir =>
val count = 10
val tableName = "spark_25207"
val tableDir = dir.getAbsoluteFile + "/table"
withTable(tableName) {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
spark.range(count).selectExpr("id as A", "id as B", "id as b")
.write.mode("overwrite").parquet(tableDir)
}
sql(
s"""
|CREATE TABLE $tableName (A LONG, B LONG) USING PARQUET LOCATION '$tableDir'
""".stripMargin)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val e = intercept[SparkException] {
sql(s"select a from $tableName where b > 0").collect()
}
assert(e.getCause.isInstanceOf[RuntimeException] && e.getCause.getMessage.contains(
"""Found duplicate field(s) "B": [B, b] in case-insensitive mode"""))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(sql(s"select A from $tableName where B > 0"), (1 until count).map(Row(_)))
}
}
}
}
}
class NumRowGroupsAcc extends AccumulatorV2[Integer, Integer] {
private var _sum = 0
override def isZero: Boolean = _sum == 0
override def copy(): AccumulatorV2[Integer, Integer] = {
val acc = new NumRowGroupsAcc()
acc._sum = _sum
acc
}
override def reset(): Unit = _sum = 0
override def add(v: Integer): Unit = _sum += v
override def merge(other: AccumulatorV2[Integer, Integer]): Unit = other match {
case a: NumRowGroupsAcc => _sum += a._sum
case _ => throw new UnsupportedOperationException(
s"Cannot merge ${this.getClass.getName} with ${other.getClass.getName}")
}
override def value: Integer = _sum
}
| rekhajoshm/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala | Scala | apache-2.0 | 49,082 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.asyncmvc.controllers
import akka.actor.{Props, ActorSystem, ActorRef}
import akka.stream.ActorMaterializer
import uk.gov.hmrc.play.asyncmvc.async.{Cache, TimedEvent}
import uk.gov.hmrc.play.asyncmvc.example.connectors.{Stock, StockConnector}
import uk.gov.hmrc.play.asyncmvc.example.controllers.{ExampleAsyncController, InputForm}
import uk.gov.hmrc.play.asyncmvc.model.TaskCache
import play.api.test.FakeRequest
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import uk.gov.hmrc.http.HeaderCarrier
trait AsyncSetup {
def notImplemented = throw new IllegalArgumentException("Not implemented!")
trait Setup {
implicit val system = ActorSystem()
implicit val materializer = ActorMaterializer()
val testSessionId="TestId"
lazy val ident = s"Example-$testSessionId"
implicit val hc = HeaderCarrier()
implicit val req = FakeRequest()
val successStockConnector = new StockConnector {
lazy val baseUrl: String = notImplemented
override def getStock(id: Long): Future[Stock] = {
Future.successful(Stock("SUCCESS", id))
}
}
def dynamicDelayStockConnector(rangeNum:Int, dynamic:Boolean = false) = new StockConnector {
lazy val baseUrl: String = notImplemented
val delay = rangeNum
if (dynamic) {
scala.util.Random.nextInt(rangeNum)
} else {
rangeNum
}
override def getStock(id: Long): Future[Stock] = {
TimedEvent.delayedSuccess(delay, 0).map(_ => {
Stock("TEST DYNAMIC DELAY", id)
})
}
}
val errorStockConnector = new StockConnector {
lazy val baseUrl: String = notImplemented
override def getStock(id: Long) = Future.failed(throw new Exception("FAILURE!"))
}
trait ControllerUnderTest extends ExampleAsyncController {
def getName :String = notImplemented
override lazy val taskCache = cache
override def waitMode = false
override lazy val asyncActor: ActorRef = system.actorOf(Props(new AsyncMVCAsyncActor(taskCache, CLIENT_TIMEOUT)), name = getName)
}
val cache = new Cache[TaskCache] {
var bodyCache:Option[TaskCache] = None
def put(id:String, value:TaskCache)(implicit hc:HeaderCarrier):Future[Unit] = {
bodyCache = Some(value)
Future.successful(Unit)
}
def get(id:String)(implicit hc:HeaderCarrier):Future[Option[TaskCache]] = {
Future.successful(bodyCache)
}
}
lazy val controller : ExampleAsyncController = notImplemented
}
trait Blocking extends Setup {
override val testSessionId="TestIdBlock"
val form = uk.gov.hmrc.play.asyncmvc.example.controllers.InputForm("Example Data", 11)
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override def buildUniqueId() = testSessionId
override lazy val stockConnector: StockConnector = successStockConnector
override def waitMode = true
}
}
trait SetupNonBlocking extends Setup {
override val testSessionId="TestIdNonBlocking"
val form = InputForm("Example Data", 11)
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override lazy val stockConnector: StockConnector = successStockConnector
override def buildUniqueId() = testSessionId
}
}
trait SetupNonBlockingSafeGuard extends SetupNonBlocking {
override val testSessionId="SafeGuard"
override val form = InputForm("Example Data", 12)
}
trait SetupNonBlockingTimeout extends Setup {
val form = InputForm("Example Data", 22)
override val testSessionId="TestIdNonBlockingTimeout"
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override lazy val stockConnector: StockConnector = dynamicDelayStockConnector(1000)
override def buildUniqueId() = testSessionId
override def getClientTimeout = 1
override lazy val asyncActor: ActorRef = system.actorOf(Props(new AsyncMVCAsyncActor(cache, 1000)), name = getName)
}
}
trait SetupBlockingTimeout extends Setup {
val form = InputForm("Example Data", 16)
override val testSessionId="TestIdBlockingTimeout"
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override lazy val stockConnector: StockConnector = dynamicDelayStockConnector(1000)
override def buildUniqueId() = testSessionId
override def waitMode = true
override def getClientTimeout = 1
override def blockingDelayTime = 1
override lazy val asyncActor: ActorRef = system.actorOf(Props(new AsyncMVCAsyncActor(cache, 100)), name = getName)
}
}
trait SetupNonBlockingError extends Setup {
val form = InputForm("Example Data", 33)
override val testSessionId="TestIdSetupNonBlockingError"
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override lazy val stockConnector: StockConnector = errorStockConnector
override def buildUniqueId() = testSessionId
}
}
trait SetupNonBlockingCacheError extends Setup {
val form = InputForm("Example Data", 23)
override val testSessionId="TestIdSetupCacheError"
override val cache = new Cache[TaskCache] {
var bodyCache:Option[TaskCache] = None
def put(id:String, value:TaskCache)(implicit hc:HeaderCarrier):Future[Unit] = {
Future.failed(new Exception("Controlled explosion!"))
}
def get(id:String)(implicit hc:HeaderCarrier):Future[Option[TaskCache]] = {
Future.failed(new Exception("Should not be called"))
}
}
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override def buildUniqueId() = testSessionId
}
}
trait SetupBlockingError extends SetupNonBlockingError {
override val testSessionId="TestIdBlockingError"
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override lazy val stockConnector = errorStockConnector
override def buildUniqueId() = testSessionId
override def waitMode = true
}
}
trait SetupBlockingThrottle extends Setup {
override val testSessionId="TestIdBlockingThrottle"
val form = InputForm("Example Data", 44)
override lazy val controller : ExampleAsyncController = new ExampleAsyncController() {
override lazy val stockConnector : StockConnector = notImplemented
override lazy val taskCache = notImplemented
override def buildUniqueId() = testSessionId
override def waitMode = true
override def throttleLimit = 0
override lazy val asyncActor: ActorRef = notImplemented
}
}
trait SetupNonBlockingThrottle extends SetupBlockingThrottle {
override val testSessionId="TestIdNonBlockingThrottle"
override lazy val controller : ExampleAsyncController = new ExampleAsyncController() {
override lazy val stockConnector : StockConnector = notImplemented
override lazy val taskCache = notImplemented
override def buildUniqueId() = testSessionId
override def waitMode = false
override def throttleLimit = 0
override lazy val asyncActor: ActorRef = notImplemented
}
}
trait SetupBlocking extends Setup {
override val testSessionId="TestIdBlocking"
val form = InputForm("Example Data", 55)
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override lazy val stockConnector: StockConnector = dynamicDelayStockConnector(1000)
override def buildUniqueId() = testSessionId
override def waitMode = true
override def getClientTimeout = 5000
override lazy val asyncActor: ActorRef = system.actorOf(Props(new AsyncMVCAsyncActor(cache, getClientTimeout)), name = getName)
}
}
trait SetupConcurrencyDynamicBlocking extends Setup {
val form = InputForm("Example Data", 1)
override lazy val controller : ExampleAsyncController = new ControllerUnderTest {
override def getName = testSessionId
override lazy val stockConnector: StockConnector = successStockConnector
override def buildUniqueId() = testSessionId
override def waitMode = true
override def getClientTimeout = 80000
override lazy val asyncActor: ActorRef = system.actorOf(Props(new AsyncMVCAsyncActor(cache, getClientTimeout)), name = getName)
}
}
}
| hmrc/play-async | src/test/scala/uk/gov/hmrc/play/asyncmvc/controllers/AsyncSetup.scala | Scala | apache-2.0 | 9,392 |
package controllers.silhouette
import java.util.UUID
import com.google.inject.AbstractModule
import com.mohiva.play.silhouette.api.{ Environment, LoginInfo }
import com.mohiva.play.silhouette.impl.authenticators.CookieAuthenticator
import com.mohiva.play.silhouette.test._
import models.User
import net.codingwell.scalaguice.ScalaModule
import org.specs2.mock.Mockito
import org.specs2.specification.Scope
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.concurrent.Execution.Implicits._
import play.api.test.{ FakeRequest, PlaySpecification, WithApplication }
import utils.auth.DefaultEnv
/**
* Test case for the [[controllers.ApplicationController]] class.
*/
class ApplicationControllerSpec extends PlaySpecification with Mockito {
sequential
"The `index` action" should {
"redirect to login page if user is unauthorized" in new Context {
new WithApplication(application) {
val Some(redirectResult) = route(app, FakeRequest(routes.ApplicationController.index())
.withAuthenticator[DefaultEnv](LoginInfo("invalid", "invalid"))
)
status(redirectResult) must be equalTo SEE_OTHER
val redirectURL = redirectLocation(redirectResult).getOrElse("")
redirectURL must contain(routes.SignInController.view().toString)
val Some(unauthorizedResult) = route(app, FakeRequest(GET, redirectURL))
status(unauthorizedResult) must be equalTo OK
contentType(unauthorizedResult) must beSome("text/html")
contentAsString(unauthorizedResult) must contain("Silhouette - Sign In")
}
}
"return 200 if user is authorized" in new Context {
new WithApplication(application) {
val Some(result) = route(app, FakeRequest(routes.ApplicationController.index())
.withAuthenticator[DefaultEnv](identity.loginInfo)
)
status(result) must beEqualTo(OK)
}
}
}
/**
* The context.
*/
trait Context extends Scope {
/**
* A fake Guice module.
*/
class FakeModule extends AbstractModule with ScalaModule {
def configure() = {
bind[Environment[DefaultEnv]].toInstance(env)
}
}
/**
* An identity.
*/
val identity = User(
userID = UUID.randomUUID(),
loginInfo = LoginInfo("facebook", "user@facebook.com"),
firstName = None,
lastName = None,
fullName = None,
email = None,
avatarURL = None,
activated = true
)
/**
* A Silhouette fake environment.
*/
implicit val env: Environment[DefaultEnv] = new FakeEnvironment[DefaultEnv](Seq(identity.loginInfo -> identity))
/**
* The application.
*/
lazy val application = new GuiceApplicationBuilder()
.overrides(new FakeModule)
.build()
}
}
| raisercostin/play-silhouette-slick-seed | modules/silhouette/test/controllers/ApplicationControllerSpec.scala | Scala | apache-2.0 | 2,813 |
package org.backuity.matchete
import org.backuity.matchete.TestUtil._
import org.junit.{ComparisonFailure, Test}
class SeqDiffableTest extends JunitMatchers {
@Test
def beEqualNestedList_Ok(): Unit = {
{
List(Person("John", 21), Person("Jane", 32)) must_== List(Person("John", 21), Person("Jane", 12))
} must throwAn[AssertionError].withMessage(
"""List(Person(John,21), Person(Jane,32)) is not equal to List(Person(John,21), Person(Jane,12))
|Got : (1).age = 32
|Expected: (1).age = 12""".stripMargin)
implicit val diffableNACC: Diffable[CustomEqual] = Diffable.forFields[CustomEqual](_.str, _.int)
{
List(new CustomEqual("one", 1), new CustomEqual("two", 2)) must_== List(new CustomEqual("one", 2), new CustomEqual("two", 2))
} must throwAn[AssertionError].withMessage(
"""List(CE(one,1), CE(two,2)) is not equal to List(CE(one,2), CE(two,2))
|Got : (0).int = 1
|Expected: (0).int = 2""".stripMargin)
}
@Test
def beEqualNestedList_Error(): Unit = {
implicit val stuffDiffable: Diffable[Stuff] = Diffable.forFields(_.name, _.price)
{
Bucket(List(Flower("john", 12))) must_== Bucket(List(Bike("john", 21, "BMX")))
} must throwAn[AssertionError].withMessage(
"""Bucket(List(Flower(john,12))) is not equal to Bucket(List(Bike(john,21,BMX)))
|Got : stuffs.(0).price = 12
|Expected: stuffs.(0).price = 21""".stripMargin)
}
@Test
def beEqualNestedList_Error_DifferentSize(): Unit = {
{
Bucket(List(Flower("john", 12), Flower("dude", 12))) must_== Bucket(List(Bike("john", 21, "BMX")))
} must throwAn[AssertionError].withMessage(
"""Bucket(List(Flower(john,12), Flower(dude,12))) is not equal to Bucket(List(Bike(john,21,BMX)))
|Got : stuffs.size = 2
|Expected: stuffs.size = 1""".stripMargin)
}
@Test
def beEqualNestedList_ShouldThrowComparisonFailureForStringFields(): Unit = {
implicit val stuffDiffable: Diffable[Stuff] = Diffable.forFields(_.name, _.price)
{
Bucket(List(Flower("x", 13), Flower("john toto", 12))) must_== Bucket(List(Flower("x", 13), Flower("john X toto", 21)))
} must throwA[ComparisonFailure].suchAs {
case c: ComparisonFailure =>
c.getMessage must_==
"""
| Bucket(List(Flower(x,13), Flower(john toto,12)))
|
|is not equal to
|
| Bucket(List(Flower(x,13), Flower(john X toto,21)))
|
|Got : stuffs.(1).name = 'john toto'
|Expected: stuffs.(1).name = 'john X toto' expected:<john [X ]toto> but was:<john []toto>""".stripMargin
c.getActual must_== "john toto"
c.getExpected must_== "john X toto"
}
}
@Test
def beEqual_Seq() {
List(1, 2, 3) must_== List(1, 2, 3)
Seq(1, 2, 3) must_== List(1, 2, 3)
{
Seq(1, 2, 3) must_== List(1, 3, 2)
} must throwAn[AssertionError].withMessage(
"""List(1, 2, 3) is not equal to List(1, 3, 2)
|Got : (1) = 2
|Expected: (1) = 3""".stripMargin)
{
Seq(1, 2, 3) must_== List(1, 2, 3, 4)
} must throwAn[AssertionError].withMessage(
"""List(1, 2, 3) is not equal to List(1, 2, 3, 4)
|Got : size = 3
|Expected: size = 4""".stripMargin)
{
Seq(1, 2, 3) must_== List(1)
} must throwAn[AssertionError].withMessage(
"""List(1, 2, 3) is not equal to List(1)
|Got : size = 3
|Expected: size = 1""".stripMargin)
{
Seq.empty[Int] must_== List(1, 2, 3, 4)
} must throwAn[AssertionError].withMessage(
"""List() is not equal to List(1, 2, 3, 4)
|Got : size = 0
|Expected: size = 4""".stripMargin)
}
}
| backuity/matchete | core/src/test/scala/org/backuity/matchete/SeqDiffableTest.scala | Scala | apache-2.0 | 3,775 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ha
import java.io.File
import java.time.Instant
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import scala.util.Try
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.JUnitRunner
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import common.TestUtils
import common.WhiskProperties
import common.rest.WskRest
import common.WskActorSystem
import common.WskProps
import common.WskTestHelpers
import whisk.core.WhiskConfig
import whisk.utils.retry
@RunWith(classOf[JUnitRunner])
class ShootComponentsTests extends FlatSpec with Matchers with WskTestHelpers with ScalaFutures with WskActorSystem {
implicit val wskprops = WskProps()
val wsk = new WskRest
val defaultAction = Some(TestUtils.getTestActionFilename("hello.js"))
implicit val materializer = ActorMaterializer()
implicit val testConfig = PatienceConfig(1.minute)
// Throttle requests to the remaining controllers to avoid getting 429s. (60 req/min)
val amountOfControllers = WhiskProperties.getProperty(WhiskConfig.controllerInstances).toInt
val limit = WhiskProperties.getProperty(WhiskConfig.actionInvokeConcurrentLimit).toDouble
val limitPerController = limit / amountOfControllers
val allowedRequestsPerMinute = (amountOfControllers - 1.0) * limitPerController
val timeBeweenRequests = 60.seconds / allowedRequestsPerMinute
val controller0DockerHost = WhiskProperties.getBaseControllerHost() + ":" + WhiskProperties.getProperty(
WhiskConfig.dockerPort)
def restartComponent(host: String, component: String) = {
def file(path: String) = Try(new File(path)).filter(_.exists).map(_.getAbsolutePath).toOption
val docker = (file("/usr/bin/docker") orElse file("/usr/local/bin/docker")).getOrElse("docker")
val cmd = Seq(docker, "--host", host, "restart", component)
println(s"Running command: ${cmd.mkString(" ")}")
TestUtils.runCmd(0, new File("."), cmd: _*)
}
def ping(host: String, port: Int) = {
val response = Try { Http().singleRequest(HttpRequest(uri = s"http://$host:$port/ping")).futureValue }.toOption
response.map { res =>
(res.status, Unmarshal(res).to[String].futureValue)
}
}
def isControllerAlive(instance: Int): Boolean = {
require(instance >= 0 && instance < 2, "Controller instance not known.")
val host = WhiskProperties.getProperty("controller.hosts").split(",")(instance)
val port = WhiskProperties.getControllerBasePort + instance
val res = ping(host, port)
res == Some((StatusCodes.OK, "pong"))
}
def doRequests(amount: Int, actionName: String): Seq[(Int, Int)] = {
(0 until amount).map { i =>
val start = Instant.now
// Do POSTs and GETs
val invokeExit = Future { wsk.action.invoke(actionName, expectedExitCode = TestUtils.DONTCARE_EXIT).exitCode }
val getExit = Future { wsk.action.get(actionName, expectedExitCode = TestUtils.DONTCARE_EXIT).exitCode }
println(s"Done rerquests with responses: invoke: ${invokeExit.futureValue} and get: ${getExit.futureValue}")
val remainingWait = timeBeweenRequests.toMillis - (Instant.now.toEpochMilli - start.toEpochMilli)
Thread.sleep(if (remainingWait < 0) 0L else remainingWait)
(invokeExit.futureValue, getExit.futureValue)
}
}
behavior of "Controllers hot standby"
it should "use controller1 if controller0 goes down" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
if (amountOfControllers >= 2) {
val actionName = "shootcontroller"
assetHelper.withCleaner(wsk.action, actionName) { (action, _) =>
action.create(actionName, defaultAction)
}
// Produce some load on the system for 100 seconds. Kill the controller after 4 requests
val totalRequests = (100.seconds / timeBeweenRequests).toInt
val requestsBeforeRestart = doRequests(4, actionName)
// Kill the controller
restartComponent(controller0DockerHost, "controller0")
// Wait until down
retry({
isControllerAlive(0) shouldBe false
}, 100, Some(100.milliseconds))
// Check that second controller is still up
isControllerAlive(1) shouldBe true
val requestsAfterRestart = doRequests(totalRequests - 4, actionName)
val requests = requestsBeforeRestart ++ requestsAfterRestart
val unsuccessfulInvokes = requests.map(_._1).count(_ != TestUtils.SUCCESS_EXIT)
// Allow 3 failures for the 100 seconds
unsuccessfulInvokes should be <= 3
val unsuccessfulGets = requests.map(_._2).count(_ != TestUtils.SUCCESS_EXIT)
// Only allow 1 failure in GET requests, because they are idempotent and they should be passed to the next controller if one crashes
unsuccessfulGets shouldBe 0
// Check that both controllers are up
// controller0
isControllerAlive(0) shouldBe true
//controller1
isControllerAlive(1) shouldBe true
}
}
}
| duynguyen/incubator-openwhisk | tests/src/test/scala/ha/ShootComponentsTests.scala | Scala | apache-2.0 | 5,977 |
package de.htwg.zeta.server.model.modelValidator.validator.rules.metaModelDependent
import scala.collection.immutable.Seq
import de.htwg.zeta.common.models.project.concept.Concept
import de.htwg.zeta.common.models.project.concept.elements.AttributeType
import de.htwg.zeta.common.models.project.concept.elements.AttributeType.BoolType
import de.htwg.zeta.common.models.project.concept.elements.AttributeType.DoubleType
import de.htwg.zeta.common.models.project.concept.elements.AttributeType.IntType
import de.htwg.zeta.common.models.project.concept.elements.AttributeType.StringType
import de.htwg.zeta.common.models.project.concept.elements.AttributeValue
import de.htwg.zeta.common.models.project.instance.elements.NodeInstance
import de.htwg.zeta.server.model.modelValidator.Util
import de.htwg.zeta.server.model.modelValidator.validator.rules.DslRule
import de.htwg.zeta.server.model.modelValidator.validator.rules.GeneratorRule
import de.htwg.zeta.server.model.modelValidator.validator.rules.SingleNodeRule
/**
* This file was created by Tobias Droth as part of his master thesis at HTWG Konstanz (03/2017 - 09/2017).
*/
class NodeAttributeScalarTypes(val nodeType: String, val attributeType: String, val attributeDataType: AttributeType) extends SingleNodeRule with DslRule {
override val name: String = getClass.getSimpleName
override val description: String =
s"Attributes of type $attributeType in nodes of type $nodeType must be of data type ${attributeDataType.asString}."
override val possibleFix: String =
s"Remove attribute values of attribute $attributeType in node $nodeType which are not of data type ${attributeDataType.asString}."
override def isValid(node: NodeInstance): Option[Boolean] = if (node.className == nodeType) Some(rule(node)) else None
def rule(node: NodeInstance): Boolean = {
// this is nonsense. It wont be completely replaced as it is part of a recent master thesis.
// also this is duplicate code to EdgeAttributeScalarTypes
def handleAttributeValues(list: List[AttributeValue]): Boolean = list.forall(value => value.attributeType == attributeDataType)
node.attributeValues.get(attributeType) match {
case None => true
case Some(values) => handleAttributeValues(values)
}
}
override val dslStatement: String =
s"""Attributes ofType "$attributeType" inNodes "$nodeType" areOfScalarType "${attributeDataType.asString}""""
}
object NodeAttributeScalarTypes extends GeneratorRule {
override def generateFor(metaModel: Concept): Seq[DslRule] = Util.inheritAttributes(metaModel.classes)
.filterNot(_.abstractness)
.foldLeft(Seq[DslRule]()) { (acc, currentClass) =>
acc ++ currentClass.attributes
.filter(att => Seq(StringType, IntType, BoolType, DoubleType).contains(att.typ))
.map(att => new NodeAttributeScalarTypes(currentClass.name, att.name, att.typ))
}
}
| Zeta-Project/zeta | api/server/app/de/htwg/zeta/server/model/modelValidator/validator/rules/metaModelDependent/NodeAttributeScalarTypes.scala | Scala | bsd-2-clause | 2,902 |
package service
import play.api.Logger
import play.api.libs.Files.TemporaryFile
import play.api.mvc.{MultipartFormData, Request}
/**
* Created by supriya on 15/2/16.
*/
class UploadService {
private val log: Logger = Logger(this.getClass)
/**
* Get file from the request and move it in your location
*
* @param request
* @return
*/
def uploadFile(request: Request[MultipartFormData[TemporaryFile]]): String = {
log.error("Called uploadFile function" + request)
request.body.file("file").map { picture =>
import java.io.File
val filename = picture.filename
val contentType = picture.contentType
log.error(s"File name : $filename, content type : $contentType")
picture.ref.moveTo(new File(s"/tmp/$filename"))
"File uploaded"
}.getOrElse {
"Missing file"
}
}
}
| rklick-solutions/bigdata-tutorial | app/service/UploadService.scala | Scala | apache-2.0 | 853 |
/*
* Copyright 2013 Joachim Hofer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.schedulers
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import rx.Scheduler
import rx.Subscription
import rx.schedulers.actor.SchedulerActor
import rx.schedulers.actor.SchedulerActor._
import rx.subscriptions.Subscriptions
import rx.util.functions.Action0
import scala.concurrent.duration._
import scala.concurrent.Future
/** Provides RxJava schedulers which schedule actions in an akka actor.
* This actor is created in the given context (which may be an actor system or a parent actor context).
* The scheduler has to be explicitly shut down after usage in order for the internally used actor to stop.
*/
object AkkaScheduler {
def forParent(parent: ActorRefFactory): AkkaScheduler = new AkkaScheduler(parent)
def forParentWithName(parent: ActorRefFactory, name: String): AkkaScheduler = new AkkaScheduler(parent, Some(name))
def forParentWithTimeout(parent: ActorRefFactory, timeout: FiniteDuration): AkkaScheduler =
new AkkaScheduler(parent, timeout = timeout)
def forParentWithTimeout(parent: ActorRefFactory, timeout: Long, unit: TimeUnit): AkkaScheduler =
new AkkaScheduler(parent, timeout = Duration(timeout, unit))
def forParentWithNameAndTimeout(parent: ActorRefFactory, name: String, timeout: FiniteDuration): AkkaScheduler =
new AkkaScheduler(parent, Some(name), timeout)
def forParentWithNameAndTimeout(parent: ActorRefFactory, name: String, timeout: Long, unit: TimeUnit): AkkaScheduler =
new AkkaScheduler(parent, Some(name), Duration(timeout, unit))
}
class AkkaScheduler(context: ActorRefFactory,
actorName: Option[String] = None,
timeout: FiniteDuration = 1.second) extends Scheduler {
val actor = actorName
.map (context.actorOf(Props(classOf[SchedulerActor], this), _))
.getOrElse (context.actorOf(Props(classOf[SchedulerActor], this)))
def shutdown(): Unit = context stop actor
def schedule[T](state: T, action: Action[T]): Subscription = {
actor ! StatefulAction(state, action)
Subscriptions.empty
}
def schedule[T](state: T, action: Action[T], delayTime: Long, unit: TimeUnit): Subscription = {
implicit val timeout0 = Timeout(timeout)
val cancellable = actor ? Delayed(StatefulAction(state, action), Duration(delayTime, unit))
subscriptionFor(cancellable.mapTo[Cancellable])
}
override def schedulePeriodically[T](state: T, action: Action[T], initialDelay: Long, period: Long, unit: TimeUnit): Subscription = {
implicit val timeout0 = Timeout(timeout)
val cancellable = actor ? Periodic[T](StatefulAction[T](state, action), Duration(initialDelay, unit), Duration(period, unit))
subscriptionFor(cancellable.mapTo[Cancellable])
}
private def subscriptionFor(cancellable: Future[Cancellable]) = Subscriptions create new Action0 {
def call(): Unit = {
import context.dispatcher
cancellable foreach (_.cancel())
}
}
}
| jmhofer/rxjava-akka | src/main/scala/rx/schedulers/AkkaScheduler.scala | Scala | apache-2.0 | 3,537 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.typed
import java.io.{ OutputStream, InputStream, Serializable }
import java.util.Random
import cascading.flow.FlowDef
import cascading.pipe.{ Each, Pipe }
import cascading.tap.Tap
import cascading.tuple.{ Fields, Tuple => CTuple, TupleEntry }
import com.twitter.algebird.{ Aggregator, Monoid, Semigroup }
import com.twitter.scalding.TupleConverter.{ TupleEntryConverter, singleConverter, tuple2Converter }
import com.twitter.scalding.TupleSetter.{ singleSetter, tup2Setter }
import com.twitter.scalding._
import com.twitter.scalding.serialization.OrderedSerialization
import com.twitter.scalding.serialization.OrderedSerialization.Result
import com.twitter.scalding.serialization.macros.impl.BinaryOrdering
import com.twitter.scalding.serialization.macros.impl.BinaryOrdering._
import scala.util.Try
/**
* factory methods for TypedPipe, which is the typed representation of distributed lists in scalding.
* This object is here rather than in the typed package because a lot of code was written using
* the functions in the object, which we do not see how to hide with package object tricks.
*/
object TypedPipe extends Serializable {
import Dsl.flowDefToRichFlowDef
/**
* Create a TypedPipe from a cascading Pipe, some Fields and the type T
* Avoid this if you can. Prefer from(TypedSource).
*/
def from[T](pipe: Pipe, fields: Fields)(implicit flowDef: FlowDef, mode: Mode, conv: TupleConverter[T]): TypedPipe[T] = {
val localFlow = flowDef.onlyUpstreamFrom(pipe)
new TypedPipeInst[T](pipe, fields, localFlow, mode, Converter(conv))
}
/**
* Create a TypedPipe from a TypedSource. This is the preferred way to make a TypedPipe
*/
def from[T](source: TypedSource[T]): TypedPipe[T] =
TypedPipeFactory({ (fd, mode) =>
val pipe = source.read(fd, mode)
from(pipe, source.sourceFields)(fd, mode, source.converter)
})
/**
* Create a TypedPipe from an Iterable in memory.
*/
def from[T](iter: Iterable[T]): TypedPipe[T] =
IterablePipe[T](iter)
/**
* Input must be a Pipe with exactly one Field
* Avoid this method and prefer from(TypedSource) if possible
*/
def fromSingleField[T](pipe: Pipe)(implicit fd: FlowDef, mode: Mode): TypedPipe[T] =
from(pipe, new Fields(0))(fd, mode, singleConverter[T])
/**
* Create an empty TypedPipe. This is sometimes useful when a method must return
* a TypedPipe, but sometimes at runtime we can check a condition and see that
* it should be empty.
* This is the zero of the Monoid[TypedPipe]
*/
def empty: TypedPipe[Nothing] = EmptyTypedPipe
/**
* This enables pipe.hashJoin(that) or pipe.join(that) syntax
* This is a safe enrichment because hashJoinable and CoGroupable are
* only used in the argument position or to give cogroup, join, leftJoin, rightJoin, outerJoin
* methods. Since those methods are unlikely to be used on TypedPipe in the future, this
* enrichment seems safe.
*
* This method is the Vitaly-was-right method.
*/
implicit def toHashJoinable[K, V](pipe: TypedPipe[(K, V)])(implicit ord: Ordering[K]): HashJoinable[K, V] =
new HashJoinable[K, V] {
def mapped = pipe
def keyOrdering = ord
def reducers = None
val descriptions: Seq[String] = LineNumber.tryNonScaldingCaller.map(_.toString).toList
def joinFunction = CoGroupable.castingJoinFunction[V]
}
/**
* TypedPipe instances are monoids. They are isomorphic to multisets.
*/
implicit def typedPipeMonoid[T]: Monoid[TypedPipe[T]] = new Monoid[TypedPipe[T]] {
def zero = empty
def plus(left: TypedPipe[T], right: TypedPipe[T]): TypedPipe[T] =
left ++ right
}
private val identityOrdering: OrderedSerialization[Int] = {
val delegate = BinaryOrdering.ordSer[Int]
new OrderedSerialization[Int] {
override def compareBinary(a: InputStream, b: InputStream): Result = delegate.compareBinary(a, b)
override def compare(x: Int, y: Int): Int = delegate.compare(x, y)
override def dynamicSize(t: Int): Option[Int] = delegate.dynamicSize(t)
override def write(out: OutputStream, t: Int): Try[Unit] = delegate.write(out, t)
override def read(in: InputStream): Try[Int] = delegate.read(in)
override def staticSize: Option[Int] = delegate.staticSize
override def hash(x: Int): Int = x
}
}
}
/**
* Think of a TypedPipe as a distributed unordered list that may or may not yet
* have been materialized in memory or disk.
*
* Represents a phase in a distributed computation on an input data source
* Wraps a cascading Pipe object, and holds the transformation done up until that point
*/
trait TypedPipe[+T] extends Serializable {
/**
* Implements a cross product. The right side should be tiny
* This gives the same results as
* {code for { l <- list1; l2 <- list2 } yield (l, l2) }
*/
def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)]
/**
* This is the fundamental mapper operation.
* It behaves in a way similar to List.flatMap, which means that each
* item is fed to the input function, which can return 0, 1, or many outputs
* (as a TraversableOnce) per input. The returned results will be iterated through once
* and then flattened into a single TypedPipe which is passed to the next step in the
* pipeline.
*
* This behavior makes it a powerful operator -- it can be used to filter records
* (by returning 0 items for a given input), it can be used the way map is used
* (by returning 1 item per input), it can be used to explode 1 input into many outputs,
* or even a combination of all of the above at once.
*/
def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U]
/**
* Export back to a raw cascading Pipe. useful for interop with the scalding
* Fields API or with Cascading code.
* Avoid this if possible. Prefer to write to TypedSink.
*/
final def toPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe = {
import Dsl._
// Ensure we hook into all pipes coming out of the typed API to apply the FlowState's properties on their pipes
val pipe = asPipe[U](fieldNames).applyFlowConfigProperties(flowDef)
RichPipe.setPipeDescriptionFrom(pipe, LineNumber.tryNonScaldingCaller)
}
/**
* Provide the internal implementation to get from a typed pipe to a cascading Pipe
*/
private[typed] def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe
/////////////////////////////////////////////
//
// The following have default implementations in terms of the above
//
/////////////////////////////////////////////
import Dsl._
/**
* Merge two TypedPipes (no order is guaranteed)
* This is only realized when a group (or join) is
* performed.
*/
def ++[U >: T](other: TypedPipe[U]): TypedPipe[U] = other match {
case EmptyTypedPipe => this
case IterablePipe(thatIter) if thatIter.isEmpty => this
case _ => MergedTypedPipe(this, other)
}
/**
* Aggregate all items in this pipe into a single ValuePipe
*
* Aggregators are composable reductions that allow you to glue together
* several reductions and process them in one pass.
*
* Same as groupAll.aggregate.values
*/
def aggregate[B, C](agg: Aggregator[T, B, C]): ValuePipe[C] =
ComputedValue(groupAll.aggregate(agg).values)
/**
* Put the items in this into the keys, and unit as the value in a Group
* in some sense, this is the dual of groupAll
*/
@annotation.implicitNotFound(msg = "For asKeys method to work, the type in TypedPipe must have an Ordering.")
def asKeys[U >: T](implicit ord: Ordering[U]): Grouped[U, Unit] =
map((_, ())).group
/**
* If T <:< U, then this is safe to treat as TypedPipe[U] due to covariance
*/
protected def raiseTo[U](implicit ev: T <:< U): TypedPipe[U] =
this.asInstanceOf[TypedPipe[U]]
/**
* Filter and map. See scala.collection.List.collect.
* {@code
* collect { case Some(x) => fn(x) }
* }
*/
def collect[U](fn: PartialFunction[T, U]): TypedPipe[U] =
filter(fn.isDefinedAt(_)).map(fn)
/**
* Attach a ValuePipe to each element this TypedPipe
*/
def cross[V](p: ValuePipe[V]): TypedPipe[(T, V)] =
p match {
case EmptyValue => EmptyTypedPipe
case LiteralValue(v) => map { (_, v) }
case ComputedValue(pipe) => cross(pipe)
}
/** prints the current pipe to stdout */
def debug: TypedPipe[T] = onRawSingle(_.debug)
/** adds a description to the pipe */
def withDescription(description: String): TypedPipe[T] = new WithDescriptionTypedPipe[T](this, description)
/**
* Returns the set of distinct elements in the TypedPipe
* This is the same as: .map((_, ())).group.sum.keys
* If you want a distinct while joining, consider:
* instead of:
* {@code
* a.join(b.distinct.asKeys)
* }
* manually do the distinct:
* {@code
* a.join(b.asKeys.sum)
* }
* The latter creates 1 map/reduce phase rather than 2
*/
@annotation.implicitNotFound(msg = "For distinct method to work, the type in TypedPipe must have an Ordering.")
def distinct(implicit ord: Ordering[_ >: T]): TypedPipe[T] =
asKeys(ord.asInstanceOf[Ordering[T]]).sum.keys
/**
* Returns the set of distinct elements identified by a given lambda extractor in the TypedPipe
*/
@annotation.implicitNotFound(msg = "For distinctBy method to work, the type to distinct on in the TypedPipe must have an Ordering.")
def distinctBy[U](fn: T => U, numReducers: Option[Int] = None)(implicit ord: Ordering[_ >: U]): TypedPipe[T] = {
// cast because Ordering is not contravariant, but should be (and this cast is safe)
implicit val ordT: Ordering[U] = ord.asInstanceOf[Ordering[U]]
// Semigroup to handle duplicates for a given key might have different values.
implicit val sg = new Semigroup[T] {
def plus(a: T, b: T) = b
}
val op = map { tup => (fn(tup), tup) }.sumByKey
val reduced = numReducers match {
case Some(red) => op.withReducers(red)
case None => op
}
reduced.map(_._2)
}
/** Merge two TypedPipes of different types by using Either */
def either[R](that: TypedPipe[R]): TypedPipe[Either[T, R]] =
map(Left(_)) ++ (that.map(Right(_)))
/**
* Sometimes useful for implementing custom joins with groupBy + mapValueStream when you know
* that the value/key can fit in memory. Beware.
*/
def eitherValues[K, V, R](that: TypedPipe[(K, R)])(implicit ev: T <:< (K, V)): TypedPipe[(K, Either[V, R])] =
mapValues { (v: V) => Left(v) } ++ (that.mapValues { (r: R) => Right(r) })
/**
* If you are going to create two branches or forks,
* it may be more efficient to call this method first
* which will create a node in the cascading graph.
* Without this, both full branches of the fork will be
* put into separate cascading pipes, which can, in some cases,
* be slower.
*
* Ideally the planner would see this
*/
def fork: TypedPipe[T] = onRawSingle(identity)
/**
* limit the output to at most count items, if at least count items exist.
*/
def limit(count: Int): TypedPipe[T] =
groupAll.bufferedTake(count).values
/** Transform each element via the function f */
def map[U](f: T => U): TypedPipe[U] = flatMap { t => Iterator(f(t)) }
/** Transform only the values (sometimes requires giving the types due to scala type inference) */
def mapValues[K, V, U](f: V => U)(implicit ev: T <:< (K, V)): TypedPipe[(K, U)] =
raiseTo[(K, V)].map { case (k, v) => (k, f(v)) }
/** Similar to mapValues, but allows to return a collection of outputs for each input value */
def flatMapValues[K, V, U](f: V => TraversableOnce[U])(implicit ev: T <:< (K, V)): TypedPipe[(K, U)] =
raiseTo[(K, V)].flatMap { case (k, v) => f(v).map { v2 => k -> v2 } }
/**
* Keep only items that satisfy this predicate
*/
def filter(f: T => Boolean): TypedPipe[T] =
flatMap { t => if (f(t)) Iterator(t) else Iterator.empty }
// This is just to appease for comprehension
def withFilter(f: T => Boolean): TypedPipe[T] = filter(f)
/**
* If T is a (K, V) for some V, then we can use this function to filter.
* Prefer to use this if your filter only touches the key.
*
* This is here to match the function in KeyedListLike, where it is optimized
*/
def filterKeys[K](fn: K => Boolean)(implicit ev: T <:< (K, Any)): TypedPipe[T] =
filter { ka => fn(ka.asInstanceOf[(K, Any)]._1) }
/**
* Keep only items that don't satisfy the predicate.
* `filterNot` is the same as `filter` with a negated predicate.
*/
def filterNot(f: T => Boolean): TypedPipe[T] =
filter(!f(_))
/** flatten an Iterable */
def flatten[U](implicit ev: T <:< TraversableOnce[U]): TypedPipe[U] =
flatMap { _.asInstanceOf[TraversableOnce[U]] } // don't use ev which may not be serializable
/**
* flatten just the values
* This is more useful on KeyedListLike, but added here to reduce assymmetry in the APIs
*/
def flattenValues[K, U](implicit ev: T <:< (K, TraversableOnce[U])): TypedPipe[(K, U)] =
raiseTo[(K, TraversableOnce[U])].flatMap { case (k, us) => us.map((k, _)) }
protected def onRawSingle(onPipe: Pipe => Pipe): TypedPipe[T] = {
val self = this
TypedPipeFactory({ (fd, m) =>
val pipe = self.toPipe[T](new Fields(java.lang.Integer.valueOf(0)))(fd, m, singleSetter)
TypedPipe.fromSingleField[T](onPipe(pipe))(fd, m)
})
}
/**
* Force a materialization of this pipe prior to the next operation.
* This is useful if you filter almost everything before a hashJoin, for instance.
* This is useful for experts who see some heuristic of the planner causing
* slower performance.
*/
def forceToDisk: TypedPipe[T] = onRawSingle(_.forceToDisk)
/**
* This is the default means of grouping all pairs with the same key. Generally this triggers 1 Map/Reduce transition
*/
def group[K, V](implicit ev: <:<[T, (K, V)], ord: Ordering[K]): Grouped[K, V] =
//If the type of T is not (K,V), then at compile time, this will fail. It uses implicits to do
//a compile time check that one type is equivalent to another. If T is not (K,V), we can't
//automatically group. We cast because it is safe to do so, and we need to convert to K,V, but
//the ev is not needed for the cast. In fact, you can do the cast with ev(t) and it will return
//it as (K,V), but the problem is, ev is not serializable. So we do the cast, which due to ev
//being present, will always pass.
Grouped(raiseTo[(K, V)]).withDescription(LineNumber.tryNonScaldingCaller.map(_.toString))
/** Send all items to a single reducer */
def groupAll: Grouped[Unit, T] = groupBy(x => ())(ordSer[Unit]).withReducers(1)
/** Given a key function, add the key, then call .group */
def groupBy[K](g: T => K)(implicit ord: Ordering[K]): Grouped[K, T] =
map { t => (g(t), t) }.group
/** Group using an explicit Ordering on the key. */
def groupWith[K, V](ord: Ordering[K])(implicit ev: <:<[T, (K, V)]): Grouped[K, V] = group(ev, ord)
/**
* Forces a shuffle by randomly assigning each item into one
* of the partitions.
*
* This is for the case where you mappers take a long time, and
* it is faster to shuffle them to more reducers and then operate.
*
* You probably want shard if you are just forcing a shuffle.
*/
def groupRandomly(partitions: Int): Grouped[Int, T] = {
// Make it lazy so all mappers get their own:
lazy val rng = new java.util.Random(123) // seed this so it is repeatable
groupBy { _ => rng.nextInt(partitions) }(TypedPipe.identityOrdering)
.withReducers(partitions)
}
/**
* Partitions this into two pipes according to a predicate.
*
* Sometimes what you really want is a groupBy in these cases.
*/
def partition(p: T => Boolean): (TypedPipe[T], TypedPipe[T]) = {
val forked = fork
(forked.filter(p), forked.filterNot(p))
}
private[this] def defaultSeed: Long = System.identityHashCode(this) * 2654435761L ^ System.currentTimeMillis
/**
* Sample a fraction (between 0 and 1) uniformly independently at random each element of the pipe
* does not require a reduce step.
*/
def sample(fraction: Double): TypedPipe[T] = sample(fraction, defaultSeed)
/**
* Sample a fraction (between 0 and 1) uniformly independently at random each element of the pipe with
* a given seed.
* Does not require a reduce step.
*/
def sample(fraction: Double, seed: Long): TypedPipe[T] = {
require(0.0 <= fraction && fraction <= 1.0, s"got $fraction which is an invalid fraction")
// Make sure to fix the seed, otherwise restarts cause subtle errors
lazy val rand = new Random(seed)
filter(_ => rand.nextDouble < fraction)
}
/**
* This does a sum of values WITHOUT triggering a shuffle.
* the contract is, if followed by a group.sum the result is the same
* with or without this present, and it never increases the number of
* items. BUT due to the cost of caching, it might not be faster if
* there is poor key locality.
*
* It is only useful for expert tuning,
* and best avoided unless you are struggling with performance problems.
* If you are not sure you need this, you probably don't.
*
* The main use case is to reduce the values down before a key expansion
* such as is often done in a data cube.
*/
def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] = {
val fields: Fields = ('key, 'value)
val selfKV = raiseTo[(K, V)]
TypedPipeFactory({ (fd, mode) =>
val pipe = selfKV.toPipe(fields)(fd, mode, tup2Setter)
val msr = new MapsideReduce(sg, 'key, 'value, None)(singleConverter[V], singleSetter[V])
TypedPipe.from[(K, V)](pipe.eachTo(fields -> fields) { _ => msr }, fields)(fd, mode, tuple2Converter)
})
}
/**
* Used to force a shuffle into a given size of nodes.
* Only use this if your mappers are taking far longer than
* the time to shuffle.
*/
def shard(partitions: Int): TypedPipe[T] = groupRandomly(partitions).forceToReducers.values
/**
* Reasonably common shortcut for cases of total associative/commutative reduction
* returns a ValuePipe with only one element if there is any input, otherwise EmptyValue.
*/
def sum[U >: T](implicit plus: Semigroup[U]): ValuePipe[U] = ComputedValue(groupAll.sum[U].values)
/**
* Reasonably common shortcut for cases of associative/commutative reduction by Key
*/
def sumByKey[K, V](implicit ev: T <:< (K, V), ord: Ordering[K], plus: Semigroup[V]): UnsortedGrouped[K, V] =
group[K, V].sum[V]
/**
* This is used when you are working with Execution[T] to create loops.
* You might do this to checkpoint and then flatMap Execution to continue
* from there. Probably only useful if you need to flatMap it twice to fan
* out the data into two children jobs.
*
* This writes the current TypedPipe into a temporary file
* and then opens it after complete so that you can continue from that point
*/
def forceToDiskExecution: Execution[TypedPipe[T]] = {
val cachedRandomUUID = java.util.UUID.randomUUID
lazy val inMemoryDest = new MemorySink[T]
def hadoopTypedSource(conf: Config): TypedSource[T] with TypedSink[T] = {
// come up with unique temporary filename, use the config here
// TODO: refactor into TemporarySequenceFile class
val tmpDir = conf.get("hadoop.tmp.dir")
.orElse(conf.get("cascading.tmp.dir"))
.getOrElse("/tmp")
val tmpSeq = tmpDir + "/scalding/snapshot-" + cachedRandomUUID + ".seq"
source.TypedSequenceFile[T](tmpSeq)
}
val writeFn = { (conf: Config, mode: Mode) =>
mode match {
case _: CascadingLocal => // Local or Test mode
(this, inMemoryDest)
case _: HadoopMode =>
(this, hadoopTypedSource(conf))
}
}
val readFn = { (conf: Config, mode: Mode) =>
mode match {
case _: CascadingLocal => // Local or Test mode
TypedPipe.from(inMemoryDest.readResults)
case _: HadoopMode =>
TypedPipe.from(hadoopTypedSource(conf))
}
}
Execution.write(writeFn, readFn)
}
/**
* This gives an Execution that when run evaluates the TypedPipe,
* writes it to disk, and then gives you an Iterable that reads from
* disk on the submit node each time .iterator is called.
* Because of how scala Iterables work, mapping/flatMapping/filtering
* the Iterable forces a read of the entire thing. If you need it to
* be lazy, call .iterator and use the Iterator inside instead.
*/
def toIterableExecution: Execution[Iterable[T]] =
forceToDiskExecution.flatMap(_.toIterableExecution)
/** use a TupleUnpacker to flatten U out into a cascading Tuple */
def unpackToPipe[U >: T](fieldNames: Fields)(implicit fd: FlowDef, mode: Mode, up: TupleUnpacker[U]): Pipe = {
val setter = up.newSetter(fieldNames)
toPipe[U](fieldNames)(fd, mode, setter)
}
/**
* This attaches a function that is called at the end of the map phase on
* EACH of the tasks that are executing.
* This is for expert use only. You probably won't ever need it. Try hard
* to avoid it. Execution also has onComplete that can run when an Execution
* has completed.
*/
def onComplete(fn: () => Unit): TypedPipe[T] = new WithOnComplete[T](this, fn)
/**
* Safely write to a TypedSink[T]. If you want to write to a Source (not a Sink)
* you need to do something like: toPipe(fieldNames).write(dest)
* @return a pipe equivalent to the current pipe.
*/
def write(dest: TypedSink[T])(implicit flowDef: FlowDef, mode: Mode): TypedPipe[T] = {
// Make sure that we don't render the whole pipeline twice:
val res = fork
dest.writeFrom(res.toPipe[T](dest.sinkFields)(flowDef, mode, dest.setter))
res
}
/**
* This is the functionally pure approach to building jobs. Note,
* that you have to call run on the result or flatMap/zip it
* into an Execution that is run for anything to happen here.
*/
def writeExecution(dest: TypedSink[T]): Execution[Unit] =
Execution.write(this, dest)
/**
* If you want to write to a specific location, and then read from
* that location going forward, use this.
*/
def writeThrough[U >: T](dest: TypedSink[T] with TypedSource[U]): Execution[TypedPipe[U]] =
Execution.write(this, dest, TypedPipe.from(dest))
/**
* If you want to writeThrough to a specific file if it doesn't already exist,
* and otherwise just read from it going forward, use this.
*/
def make[U >: T](dest: FileSource with TypedSink[T] with TypedSource[U]): Execution[TypedPipe[U]] =
Execution.getMode.flatMap { mode =>
try {
dest.validateTaps(mode)
Execution.from(TypedPipe.from(dest))
} catch {
case ivs: InvalidSourceException => writeThrough(dest)
}
}
/** Just keep the keys, or ._1 (if this type is a Tuple2) */
def keys[K](implicit ev: <:<[T, (K, Any)]): TypedPipe[K] =
// avoid capturing ev in the closure:
raiseTo[(K, Any)].map(_._1)
/** swap the keys with the values */
def swap[K, V](implicit ev: <:<[T, (K, V)]): TypedPipe[(V, K)] =
raiseTo[(K, V)].map(_.swap)
/** Just keep the values, or ._2 (if this type is a Tuple2) */
def values[V](implicit ev: <:<[T, (Any, V)]): TypedPipe[V] =
raiseTo[(Any, V)].map(_._2)
/**
* ValuePipe may be empty, so, this attaches it as an Option
* cross is the same as leftCross(p).collect { case (t, Some(v)) => (t, v) }
*/
def leftCross[V](p: ValuePipe[V]): TypedPipe[(T, Option[V])] =
p match {
case EmptyValue => map { (_, None) }
case LiteralValue(v) => map { (_, Some(v)) }
case ComputedValue(pipe) => leftCross(pipe)
}
/** uses hashJoin but attaches None if thatPipe is empty */
def leftCross[V](thatPipe: TypedPipe[V]): TypedPipe[(T, Option[V])] =
map(((), _)).hashLeftJoin(thatPipe.groupAll).values
/**
* common pattern of attaching a value and then map
* recommended style:
* {@code
* mapWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def mapWithValue[U, V](value: ValuePipe[U])(f: (T, Option[U]) => V): TypedPipe[V] =
leftCross(value).map(t => f(t._1, t._2))
/**
* common pattern of attaching a value and then flatMap
* recommended style:
* {@code
* flatMapWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def flatMapWithValue[U, V](value: ValuePipe[U])(f: (T, Option[U]) => TraversableOnce[V]): TypedPipe[V] =
leftCross(value).flatMap(t => f(t._1, t._2))
/**
* common pattern of attaching a value and then filter
* recommended style:
* {@code
* filterWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def filterWithValue[U](value: ValuePipe[U])(f: (T, Option[U]) => Boolean): TypedPipe[T] =
leftCross(value).filter(t => f(t._1, t._2)).map(_._1)
/**
* These operations look like joins, but they do not force any communication
* of the current TypedPipe. They are mapping operations where this pipe is streamed
* through one item at a time.
*
* WARNING These behave semantically very differently than cogroup.
* This is because we handle (K,V) tuples on the left as we see them.
* The iterable on the right is over all elements with a matching key K, and it may be empty
* if there are no values for this key K.
*/
def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
smaller.hashCogroupOn(ev(this))(joiner)
/** Do an inner-join without shuffling this TypedPipe, but replicating argument to all tasks */
def hashJoin[K, V, W](smaller: HashJoinable[K, W])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, (V, W))] =
hashCogroup[K, V, W, (V, W)](smaller)(Joiner.hashInner2)
/** Do an leftjoin without shuffling this TypedPipe, but replicating argument to all tasks */
def hashLeftJoin[K, V, W](smaller: HashJoinable[K, W])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, (V, Option[W]))] =
hashCogroup[K, V, W, (V, Option[W])](smaller)(Joiner.hashLeft2)
/**
* For each element, do a map-side (hash) left join to look up a value
*/
def hashLookup[K >: T, V](grouped: HashJoinable[K, V]): TypedPipe[(K, Option[V])] =
map((_, ()))
.hashLeftJoin(grouped)
.map { case (t, (_, optV)) => (t, optV) }
/**
* Enables joining when this TypedPipe has some keys with many many values and
* but many with very few values. For instance, a graph where some nodes have
* millions of neighbors, but most have only a few.
*
* We build a (count-min) sketch of each key's frequency, and we use that
* to shard the heavy keys across many reducers.
* This increases communication cost in order to reduce the maximum time needed
* to complete the join.
*
* {@code pipe.sketch(100).join(thatPipe) }
* will add an extra map/reduce job over a standard join to create the count-min-sketch.
* This will generally only be beneficial if you have really heavy skew, where without
* this you have 1 or 2 reducers taking hours longer than the rest.
*/
def sketch[K, V](reducers: Int,
eps: Double = 1.0E-5, //272k width = 1MB per row
delta: Double = 0.01, //5 rows (= 5 hashes)
seed: Int = 12345)(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)],
serialization: K => Array[Byte],
ordering: Ordering[K]): Sketched[K, V] =
Sketched(ev(this), reducers, delta, eps, seed)
/**
* If any errors happen below this line, but before a groupBy, write to a TypedSink
*/
def addTrap[U >: T](trapSink: Source with TypedSink[T])(implicit conv: TupleConverter[U]): TypedPipe[U] =
TypedPipeFactory({ (flowDef, mode) =>
val fields = trapSink.sinkFields
// TODO: with diamonds in the graph, this might not be correct
val pipe = RichPipe.assignName(fork.toPipe[T](fields)(flowDef, mode, trapSink.setter))
flowDef.addTrap(pipe, trapSink.createTap(Write)(mode))
TypedPipe.from[U](pipe, fields)(flowDef, mode, conv)
})
}
/**
* This object is the EmptyTypedPipe. Prefer to create it with TypedPipe.empty
*/
final case object EmptyTypedPipe extends TypedPipe[Nothing] {
override def aggregate[B, C](agg: Aggregator[Nothing, B, C]): ValuePipe[C] = EmptyValue
// Cross product with empty is always empty.
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(Nothing, U)] = this
override def distinct(implicit ord: Ordering[_ >: Nothing]) = this
override def flatMap[U](f: Nothing => TraversableOnce[U]) = this
override def fork: TypedPipe[Nothing] = this
override def forceToDisk = this
override def leftCross[V](p: ValuePipe[V]) = this
override def limit(count: Int) = this
override def debug: TypedPipe[Nothing] = this
override def ++[U >: Nothing](other: TypedPipe[U]): TypedPipe[U] = other
override def asPipe[U >: Nothing](fieldNames: Fields)(implicit fd: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe =
IterableSource(Iterable.empty, fieldNames)(setter, singleConverter[U]).read(fd, mode)
override def toIterableExecution: Execution[Iterable[Nothing]] = Execution.from(Iterable.empty)
override def forceToDiskExecution: Execution[TypedPipe[Nothing]] = Execution.from(this)
override def sum[U >: Nothing](implicit plus: Semigroup[U]): ValuePipe[U] = EmptyValue
override def sumByLocalKeys[K, V](implicit ev: Nothing <:< (K, V), sg: Semigroup[V]) = this
override def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[Nothing] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
this
}
/**
* Creates a TypedPipe from an Iterable[T]. Prefer TypedPipe.from.
*
* If you avoid toPipe, this class is more efficient than IterableSource.
*/
final case class IterablePipe[T](iterable: Iterable[T]) extends TypedPipe[T] {
override def aggregate[B, C](agg: Aggregator[T, B, C]): ValuePipe[C] =
Some(iterable)
.filterNot(_.isEmpty)
.map(it => LiteralValue(agg(it)))
.getOrElse(EmptyValue)
override def ++[U >: T](other: TypedPipe[U]): TypedPipe[U] = other match {
case IterablePipe(thatIter) => IterablePipe(iterable ++ thatIter)
case EmptyTypedPipe => this
case _ if iterable.isEmpty => other
case _ => MergedTypedPipe(this, other)
}
override def cross[U](tiny: TypedPipe[U]) =
tiny.flatMap { u => iterable.map { (_, u) } }
override def filter(f: T => Boolean): TypedPipe[T] =
iterable.filter(f) match {
case eit if eit.isEmpty => EmptyTypedPipe
case filtered => IterablePipe(filtered)
}
/**
* When flatMap is called on an IterablePipe, we defer to make sure that f is
* applied lazily, which avoids OOM issues when the returned value from the
* map is larger than the input
*/
override def flatMap[U](f: T => TraversableOnce[U]) =
toSourcePipe.flatMap(f)
override def fork: TypedPipe[T] = this
override def forceToDisk = this
override def limit(count: Int): TypedPipe[T] = IterablePipe(iterable.take(count))
/**
* When map is called on an IterablePipe, we defer to make sure that f is
* applied lazily, which avoids OOM issues when the returned value from the
* map is larger than the input
*/
override def map[U](f: T => U): TypedPipe[U] =
toSourcePipe.map(f)
override def forceToDiskExecution: Execution[TypedPipe[T]] = Execution.from(this)
override def sum[U >: T](implicit plus: Semigroup[U]): ValuePipe[U] =
Semigroup.sumOption[U](iterable).map(LiteralValue(_))
.getOrElse(EmptyValue)
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]) = {
val kvit = raiseTo[(K, V)] match {
case IterablePipe(kviter) => kviter
case p => sys.error("This must be IterablePipe: " + p.toString)
}
IterablePipe(kvit.groupBy(_._1)
// use map to force this so it is not lazy.
.map {
case (k, kvs) =>
// These lists are never empty, get is safe.
(k, Semigroup.sumOption(kvs.iterator.map(_._2)).get)
})
}
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe =
// It is slightly more efficient to use this rather than toSourcePipe.toPipe(fieldNames)
IterableSource[U](iterable, fieldNames)(setter, singleConverter[U]).read(flowDef, mode)
private[this] def toSourcePipe =
TypedPipe.from(
IterableSource[T](iterable, new Fields("0"))(singleSetter, singleConverter))
override def toIterableExecution: Execution[Iterable[T]] = Execution.from(iterable)
}
/**
* This is an implementation detail (and should be marked private)
*/
object TypedPipeFactory {
def apply[T](next: (FlowDef, Mode) => TypedPipe[T]): TypedPipeFactory[T] = {
val memo = new java.util.WeakHashMap[FlowDef, (Mode, TypedPipe[T])]()
val fn = { (fd: FlowDef, m: Mode) =>
memo.synchronized {
memo.get(fd) match {
case null =>
val res = next(fd, m)
memo.put(fd, (m, res))
res
case (memoMode, pipe) if memoMode == m => pipe
case (memoMode, pipe) =>
sys.error("FlowDef reused on different Mode. Original: %s, now: %s".format(memoMode, m))
}
}
}
new TypedPipeFactory(NoStackAndThen(fn.tupled))
}
def unapply[T](tp: TypedPipe[T]): Option[NoStackAndThen[(FlowDef, Mode), TypedPipe[T]]] =
tp match {
case tp: TypedPipeFactory[_] =>
Some(tp.asInstanceOf[TypedPipeFactory[T]].next)
case _ => None
}
}
/**
* This is a TypedPipe that delays having access
* to the FlowDef and Mode until toPipe is called
*/
class TypedPipeFactory[T] private (@transient val next: NoStackAndThen[(FlowDef, Mode), TypedPipe[T]]) extends TypedPipe[T] {
private[this] def andThen[U](fn: TypedPipe[T] => TypedPipe[U]): TypedPipe[U] =
new TypedPipeFactory(next.andThen(fn))
override def cross[U](tiny: TypedPipe[U]) = andThen(_.cross(tiny))
override def filter(f: T => Boolean): TypedPipe[T] = andThen(_.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] = andThen(_.flatMap(f))
override def map[U](f: T => U): TypedPipe[U] = andThen(_.map(f))
override def limit(count: Int) = andThen(_.limit(count))
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]) =
andThen(_.sumByLocalKeys[K, V])
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) = {
// unwrap in a loop, without recursing
val (unwrapped, st) = unwrap(this, Array())
val pipe = unwrapped.asPipe[U](fieldNames)(flowDef, mode, setter)
RichPipe.setPipeDescriptionFrom(pipe, LineNumber.tryNonScaldingCaller(st))
pipe
}
override def toIterableExecution: Execution[Iterable[T]] = Execution.getConfigMode.flatMap {
case (conf, mode) =>
// This can only terminate in TypedPipeInst, which will
// keep the reference to this flowDef
val flowDef = new FlowDef
val (nextPipe, stackTraces) = unwrap(this, Array())(flowDef, mode)
nextPipe.toIterableExecution
}
@annotation.tailrec
private def unwrap(pipe: TypedPipe[T], st: Array[StackTraceElement])(implicit flowDef: FlowDef, mode: Mode): (TypedPipe[T], Array[StackTraceElement]) = pipe match {
case TypedPipeFactory(n) =>
val fullTrace = n match {
case NoStackAndThen.WithStackTrace(_, st) => st
case _ => Array[StackTraceElement]()
}
unwrap(n(flowDef, mode), st ++ fullTrace)
case tp => (tp, st)
}
}
/**
* This is an instance of a TypedPipe that wraps a cascading Pipe
*/
class TypedPipeInst[T] private[scalding] (@transient inpipe: Pipe,
fields: Fields,
@transient localFlowDef: FlowDef,
@transient val mode: Mode,
flatMapFn: FlatMapFn[T]) extends TypedPipe[T] {
/**
* If this TypedPipeInst represents a Source that was opened with no
* filtering or mapping
*/
private[scalding] def openIfHead: Option[(Tap[_, _, _], Fields, FlatMapFn[T])] =
// Keep this local
if (inpipe.getPrevious.isEmpty) {
val srcs = localFlowDef.getSources
if (srcs.containsKey(inpipe.getName)) {
Some((srcs.get(inpipe.getName), fields, flatMapFn))
} else {
sys.error("Invalid head: pipe has no previous, but there is no registered source.")
}
} else None
def checkMode(m: Mode): Unit =
// This check is not likely to fail unless someone does something really strange.
// for historical reasons, it is not checked by the typed system
assert(m == mode,
"Cannot switch Mode between TypedSource.read and toPipe calls. Pipe: %s, call: %s".format(mode, m))
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] = tiny match {
case EmptyTypedPipe => EmptyTypedPipe
case MergedTypedPipe(l, r) => MergedTypedPipe(cross(l), cross(r))
case IterablePipe(iter) => flatMap { t => iter.map { (t, _) } }
// This should work for any, TODO, should we just call this?
case _ => map(((), _)).hashJoin(tiny.groupAll).values
}
override def filter(f: T => Boolean): TypedPipe[T] =
new TypedPipeInst[T](inpipe, fields, localFlowDef, mode, flatMapFn.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
new TypedPipeInst[U](inpipe, fields, localFlowDef, mode, flatMapFn.flatMap(f))
override def map[U](f: T => U): TypedPipe[U] =
new TypedPipeInst[U](inpipe, fields, localFlowDef, mode, flatMapFn.map(f))
/**
* Avoid this method if possible. Prefer to stay in the TypedAPI until
* you write out.
*
* This actually runs all the pure map functions in one Cascading Each
* This approach is more efficient than untyped scalding because we
* don't use TupleConverters/Setters after each map.
*/
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, m: Mode, setter: TupleSetter[U]): Pipe = {
import Dsl.flowDefToRichFlowDef
checkMode(m)
flowDef.mergeFrom(localFlowDef)
RichPipe(inpipe).flatMapTo[TupleEntry, U](fields -> fieldNames)(flatMapFn)
}
override def toIterableExecution: Execution[Iterable[T]] =
openIfHead match {
// TODO: it might be good to apply flatMaps locally,
// since we obviously need to iterate all,
// but filters we might want the cluster to apply
// for us. So unwind until you hit the first filter, snapshot,
// then apply the unwound functions
case Some((tap, fields, Converter(conv))) =>
// To convert from java iterator to scala below
import scala.collection.JavaConverters._
Execution.getConfigMode.map {
case (conf, m) =>
// Verify the mode has not changed due to invalid TypedPipe DAG construction
checkMode(m)
new Iterable[T] {
def iterator = m.openForRead(conf, tap).asScala.map(tup => conv(tup.selectEntry(fields)))
}
}
case _ => forceToDiskExecution.flatMap(_.toIterableExecution)
}
}
final case class MergedTypedPipe[T](left: TypedPipe[T], right: TypedPipe[T]) extends TypedPipe[T] {
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] = tiny match {
case EmptyTypedPipe => EmptyTypedPipe
case _ => MergedTypedPipe(left.cross(tiny), right.cross(tiny))
}
override def debug: TypedPipe[T] =
MergedTypedPipe(left.debug, right.debug)
override def filter(f: T => Boolean): TypedPipe[T] =
MergedTypedPipe(left.filter(f), right.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
MergedTypedPipe(left.flatMap(f), right.flatMap(f))
override def sample(fraction: Double, seed: Long): TypedPipe[T] =
MergedTypedPipe(left.sample(fraction, seed), right.sample(fraction, seed))
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] =
MergedTypedPipe(left.sumByLocalKeys, right.sumByLocalKeys)
override def map[U](f: T => U): TypedPipe[U] =
MergedTypedPipe(left.map(f), right.map(f))
override def fork: TypedPipe[T] =
MergedTypedPipe(left.fork, right.fork)
@annotation.tailrec
private def flattenMerge(toFlatten: List[TypedPipe[T]], acc: List[TypedPipe[T]])(implicit fd: FlowDef, m: Mode): List[TypedPipe[T]] =
toFlatten match {
case MergedTypedPipe(l, r) :: rest => flattenMerge(l :: r :: rest, acc)
case TypedPipeFactory(next) :: rest => flattenMerge(next(fd, m) :: rest, acc)
case nonmerge :: rest => flattenMerge(rest, nonmerge :: acc)
case Nil => acc
}
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe = {
/*
* Cascading can't handle duplicate pipes in merges. What we do here is see if any pipe appears
* multiple times and if it does we can do self merges using flatMap.
* Finally, if there is actually more than one distinct TypedPipe, we use the cascading
* merge primitive. When using the merge primitive we rename all pipes going into it as
* Cascading cannot handle multiple pipes with the same name.
*/
val merged = flattenMerge(List(this), Nil)
// check for repeated pipes
.groupBy(identity)
.mapValues(_.size)
.map {
case (pipe, 1) => pipe
case (pipe, cnt) => pipe.flatMap(List.fill(cnt)(_).iterator)
}
.map(_.toPipe[U](fieldNames)(flowDef, mode, setter))
.toList
if (merged.size == 1) {
// there is no actual merging here, no need to rename:
merged.head
} else {
new cascading.pipe.Merge(merged.map(RichPipe.assignName): _*)
}
}
override def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
MergedTypedPipe(left.hashCogroup(smaller)(joiner), right.hashCogroup(smaller)(joiner))
}
case class WithOnComplete[T](typedPipe: TypedPipe[T], fn: () => Unit) extends TypedPipe[T] {
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) = {
val pipe = typedPipe.toPipe[U](fieldNames)(flowDef, mode, setter)
new Each(pipe, Fields.ALL, new CleanupIdentityFunction(fn), Fields.REPLACE)
}
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] =
WithOnComplete(typedPipe.cross(tiny), fn)
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
WithOnComplete(typedPipe.flatMap(f), fn)
}
case class WithDescriptionTypedPipe[T](typedPipe: TypedPipe[T], description: String) extends TypedPipe[T] {
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) = {
val pipe = typedPipe.toPipe[U](fieldNames)(flowDef, mode, setter)
RichPipe.setPipeDescriptions(pipe, List(description))
}
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] =
WithDescriptionTypedPipe(typedPipe.cross(tiny), description)
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
WithDescriptionTypedPipe(typedPipe.flatMap(f), description)
}
/**
* This class is for the syntax enrichment enabling
* .joinBy on TypedPipes. To access this, do
* import Syntax.joinOnMappablePipe
*/
class MappablePipeJoinEnrichment[T](pipe: TypedPipe[T]) {
def joinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (T, U)] = pipe.groupBy(g).withReducers(reducers).join(smaller.groupBy(h))
def leftJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (T, Option[U])] = pipe.groupBy(g).withReducers(reducers).leftJoin(smaller.groupBy(h))
def rightJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (Option[T], U)] = pipe.groupBy(g).withReducers(reducers).rightJoin(smaller.groupBy(h))
def outerJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (Option[T], Option[U])] = pipe.groupBy(g).withReducers(reducers).outerJoin(smaller.groupBy(h))
}
/**
* These are named syntax extensions that users can optionally import.
* Avoid import Syntax._
*/
object Syntax {
implicit def joinOnMappablePipe[T](p: TypedPipe[T]): MappablePipeJoinEnrichment[T] = new MappablePipeJoinEnrichment(p)
}
| sriramkrishnan/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/TypedPipe.scala | Scala | apache-2.0 | 45,782 |
package main.scala;
import MorseLanguageDetails._;
import GeneralFunc._;
object MorseFunc
{
/**
* MorseReader class used to interpret Morse code or translate to Morse code.
*
* METHODS:
* translate
* USE:
* PARAMETERS:
* s:String {Phrase to be translated to or from Morse.}
* If Translate to Morse mode is enabled, inserted string will be translated to Morse.
* Else code will attempt to be translated from Morse to English.
* USE_2:
* PARAMETERS:
* s:String {Phrase to be translated to or from Morse.}
* mode:String {Translation mode, either --mte or --etm.}
* RETURNS:
* A string of either English or Morse characters.
* EXCEPTIONS:
* If given string contains unsupported characters an IllegalArgumentException is thrown.
* USE_2: If given mode is not recognized an IllegalArgumentException is thrown.
* switchTranslationMode
* USE:
* Switching to and from Translate To Morse mode.
* Will switch from true to false or false to true depending on current setting.
* getTranslationMode
* USE:
* Returns what the current translation mode is.
* RETURNS:
* True if Translating to Morse
* False otherwise.
*/
class MorseReader()
{
private val MorseToEnglish = MorseLanguageDetails.MorseToEnglish;
private val EnglishToMorse = MorseLanguageDetails.EnglishToMorse;
private var TransToMorse = true;
private def transPhraseToMorse(phrase:String):String =
{
val inPhrase :String = phrase;
val inWords :List[String] = phrase.replace(MorseLanguageDetails.newLine, "\\n").split(' ').toList;
var outWords :String = "";
var invalidChars:Array[String] = Array();
var shouldThrow :Boolean = false;
for (i <- 0 to inWords.length - 1)
{
val charStrings:Array[String] = GeneralFunc.CharArrayToStringArray(inWords(i).toCharArray());
var newArray :Array[String] = charStrings;
//Loop scans each letter in the word.
for(j <- 0 to charStrings.length - 1)
{
if (this.EnglishToMorse.contains(charStrings(j).toString().toUpperCase()))
{
charStrings(j) = this.EnglishToMorse.apply(charStrings(j).toUpperCase());
}
else
{
invalidChars = invalidChars :+ charStrings(j);
shouldThrow = true;
}
//Check if a letter follows, in which case there should be a space appended to prepare for it.
if (j != charStrings.length - 1 && !MorseLanguageDetails.newLines.contains(charStrings(j)))
{
outWords += (charStrings(j) + " ");
}
else
{
outWords += charStrings(j);
}
}
// Check to make sure we are not at the last word and adds a / to prepare for the next word if we aren't.
// / denotes a space in between two words in the Morse language (when written).
if (i != inWords.length - 1)
outWords += " / ";
}
if (!shouldThrow)
return outWords;
throw new IllegalArgumentException("ERROR: Unknown symbols:[" + GeneralFunc.ArrayStringToString(invalidChars, true) + "]");
}
private def transPhraseToEnglish(phrase:String):String =
{
val inPhrase = phrase;
val inLines = phrase.replace(MorseLanguageDetails.newLine, "\\n").split("\\n").map(_.trim());
var invalidChars:Array[String] = Array();
var shouldThrow :Boolean = false;
//Go through each letter and replace it with the correct Morse equivalent.
for (i <- 0 to inLines.length - 1)
{
val WordsInLine = inLines(i).split(' ');
for (j <- 0 to WordsInLine.length - 1)
{
if (this.MorseToEnglish.contains(WordsInLine(j)))
{
WordsInLine(j) = this.MorseToEnglish.apply(WordsInLine(j));
}
else
{
invalidChars = invalidChars :+ WordsInLine(j);
shouldThrow = true;
}
}
inLines(i) = GeneralFunc.ArrayStringToString(WordsInLine);
}
if (!shouldThrow)
return GeneralFunc.ArrayStringToStringInterpolateNewLines(inLines.map(_.trim()));
throw new IllegalArgumentException("ERROR: Unknown symbols:[" + GeneralFunc.ArrayStringToString(invalidChars, true) + "]");
}
def translate(s: String):String =
{
if (this.TransToMorse)
return this.transPhraseToMorse(s);
return this.transPhraseToEnglish(s);
}
def translate(s: String, mode: String):String =
{
mode match
{
case "--etm" => { return this.transPhraseToMorse(s); };
case "--mte" => { return this.transPhraseToEnglish(s); };
}
throw new IllegalArgumentException("Mode [" + mode + "] is not a recognized mode.");
}
def switchTranslationMode() =
{
this.TransToMorse = !this.TransToMorse;
}
def getTranslationMode():Boolean =
{
return this.TransToMorse;
}
}
} | Eli45/MorseGUI | src/main/scala/MorseFunc.scala | Scala | gpl-2.0 | 6,392 |
/*
* Copyright 2015 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ff7
package monsters
package midgar1
package reactor1
import Attacks._
import battle.{BattleAction, BattleAttack, Person, Team}
import monsters.Ai.{Setup, NoSetup}
import stats.MP
import algebras._, Algebras._
import scalaz._
import Scalaz._
import shapeless.contrib.scalaz._
object Mp extends SimpleAi with RandomTarget with StatelessAi {
def attack[F[_] : Random]: Effect[F, BattleAttack] =
if (true) // if (heroes.rowPosition(self) == FrontRow)
Random.choose(1, 2, tonfa, machineGun)
else
Random.choose(1, 6, tonfa, machineGun)
}
object GuardHound extends SimpleAi with StatelessAi {
def attack[F[_] : Random]: Effect[F, BattleAttack] =
Random.choose(1, 3, tentacle, bite)
def target[F[_] : Random](targets: Team): Effect[F, Person] =
targets.toNel.minimumBy1(_.hp)
}
object MonoDrive extends Ai with NoSetup {
def apply[F[_] : Random](self: Monster, targets: Team): Effect[F, BattleAction] =
Random.chance(1, 3).map { roll ⇒
if (roll && fire.availableFor(self)) {
val attack = fire
val target = targets.toNel.minimumBy1(_.asTarget.magicDefense)
self
.copy(mp = MP(self.mp.x - fire.cost.fold(0)(_.x)))
.attacks(target, attack)
} else {
val attack = drillDrive
val target = targets.toNel.minimumBy1(_.asTarget.defense)
self.attacks(target, attack)
}
}
}
object Grunt extends SimpleAi with RandomTarget with StatelessAi {
def attack[F[_] : Random]: Effect[F, BattleAttack] = if (true) { // if (heroes.rowPosition(self) == FrontRow)
Random.choose(1, 2, beamGun, handClaw)
} else {
Random.choose(1, 12, handClaw, beamGun)
}
}
class FirstRay(count: Int) extends Ai with NoSetup {
def apply[F[_] : Random](self: Monster, targets: Team): Effect[F, BattleAction] = {
if (count == 0) {
val tar = targets.toNel.maximumBy1(_.hp)
val att = laserCannon
self.copy(ai = new FirstRay(1)).attacks(tar, att).effect
} else {
BattleAction.change(self.copy(ai = new FirstRay(0))).effect
}
}
}
object FirstRay extends Ai with Setup {
def setup[F[_] : Random](self: Monster): Effect[F, Monster] =
self.copy(ai = new FirstRay(0)).effect
}
object Sweeper extends Ai with Setup {
lazy val state1: Ai = new SimpleAi with RandomTarget {
def attack[F[_] : Random]: Effect[F, BattleAttack] = smokeShot
def modify(self: Monster): Monster = self.copy(ai = state2)
}
lazy val state2: Ai = new SimpleAi {
def attack[F[_] : Random]: Effect[F, BattleAttack] = machineGun
def target[F[_] : Random](targets: Team): Effect[F, Person] =
targets.toNel.minimumBy1(_.hp)
def modify(self: Monster): Monster = self.copy(ai = state3)
}
lazy val state3: Ai = new SimpleAi {
def attack[F[_] : Random]: Effect[F, BattleAttack] = doubleMachineGun
def target[F[_] : Random](targets: Team): Effect[F, Person] =
targets.toNel.minimumBy1(_.hp)
def modify(self: Monster): Monster = self.copy(ai = state1)
}
def setup[F[_] : Random](self: Monster): Effect[F, Monster] =
Random.chooseInt(0, 3).map {
case 0 ⇒ self.copy(ai = state1)
case 1 ⇒ self.copy(ai = state2)
case _ ⇒ self.copy(ai = state3)
}
}
| knutwalker/ff7-simulator | items/src/main/scala/ff7/monsters/midgar1/reactor1/package.scala | Scala | apache-2.0 | 3,846 |
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package utils
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s.{
DefaultFormats,
JValue,
JNothing,
MappingException
}
import org.json4s.JsonDSL._
// Iglu
import iglu.client.validation.ProcessingMessageMethods._
object ScalazJson4sUtils {
implicit val formats = DefaultFormats
/**
* Returns a field of type A at the end of a
* JSON path
*
* @tparam A Type of the field to extract
* @param head The first field in the JSON path
* Exists to ensure the path is nonempty
* @param tail The rest of the fields in the
* JSON path
* @return the list extracted from the JSON on
* success or an error String on failure
*/
def extract[A: Manifest](config: JValue, head: String, tail: String*): ValidatedMessage[A] = {
val path = head +: tail
// This check is necessary because attempting to follow
// an invalid path yields a JNothing, which would be
// interpreted as an empty list if type A is List[String]
if (fieldExists(config, head, tail: _*)) {
try {
path.foldLeft(config)(_ \ _).extract[A].success
} catch {
case me: MappingException => s"Could not extract %s as %s from supplied JSON".format(path.mkString("."), manifest[A]).toProcessingMessage.fail
}
} else s"JSON path %s not found".format(path.mkString(".")).toProcessingMessage.fail
}
/**
* Determines whether a JSON contains a specific
* JSON path
*
* @param head The first field in the JSON path
* Exists to ensure the path is nonempty
* @param tail The rest of the fields in the
* JSON path
* @return Whether the path is valid
*/
def fieldExists(config: JValue, head: String, tail: String*): Boolean =
(head +: tail).foldLeft(config)(_ \ _) match {
case JNothing => false
case s => true
}
}
| mdavid/lessig-bigdata | lib/snowplow/3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/utils/ScalazJson4sUtils.scala | Scala | mit | 2,657 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms
import org.orbeon.oxf.util.IndentedLogger
import collection.mutable.HashMap
import processor.XFormsServer
// Global indented loggers
object Loggers {
private val LoggersByCategory = new HashMap[String, IndentedLogger]
// Return an indented logger for the given category
// FIXME: more than 1 thread access the returned indented logger, which is stateful → Use threadLocal?
def getIndentedLogger(category: String): IndentedLogger = synchronized {
def newLogger = {
val logger = XFormsServer.logger
val isDebugEnabled = logger.isDebugEnabled && XFormsProperties.getDebugLogging.contains(category)
new IndentedLogger(logger, isDebugEnabled)
}
LoggersByCategory.getOrElseUpdate(category, newLogger)
}
}
| brunobuzzi/orbeon-forms | xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/Loggers.scala | Scala | lgpl-2.1 | 1,424 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions.{IsNull, ListQuery, Not}
import org.apache.spark.sql.catalyst.plans.{ExistenceJoin, LeftSemi, PlanTest}
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
class RewriteSubquerySuite extends PlanTest {
object Optimize extends RuleExecutor[LogicalPlan] {
val batches =
Batch("Column Pruning", FixedPoint(100), ColumnPruning) ::
Batch("Rewrite Subquery", FixedPoint(1),
RewritePredicateSubquery,
ColumnPruning,
CollapseProject,
RemoveNoopOperators) :: Nil
}
test("Column pruning after rewriting predicate subquery") {
val relation = LocalRelation('a.int, 'b.int)
val relInSubquery = LocalRelation('x.int, 'y.int, 'z.int)
val query = relation.where('a.in(ListQuery(relInSubquery.select('x)))).select('a)
val optimized = Optimize.execute(query.analyze)
val correctAnswer = relation
.select('a)
.join(relInSubquery.select('x), LeftSemi, Some('a === 'x))
.analyze
comparePlans(optimized, correctAnswer)
}
test("NOT-IN subquery nested inside OR") {
val relation1 = LocalRelation('a.int, 'b.int)
val relation2 = LocalRelation('c.int, 'd.int)
val exists = 'exists.boolean.notNull
val query = relation1.where('b === 1 || Not('a.in(ListQuery(relation2.select('c))))).select('a)
val correctAnswer = relation1
.join(relation2.select('c), ExistenceJoin(exists), Some('a === 'c || IsNull('a === 'c)))
.where('b === 1 || Not(exists))
.select('a)
.analyze
val optimized = Optimize.execute(query.analyze)
comparePlans(optimized, correctAnswer)
}
}
| witgo/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/RewriteSubquerySuite.scala | Scala | apache-2.0 | 2,682 |
/*
* BlobVoices.scala
* (SysSon)
*
* Copyright (c) 2013-2017 Institute of Electronic Music and Acoustics, Graz.
* Copyright (c) 2014-2019 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape
package stream
import java.awt.Rectangle
import java.awt.geom.{Area, Path2D}
import akka.stream.stage.InHandler
import akka.stream.{Attributes, FanInShape11}
import de.sciss.fscape.stream.impl.{DemandAuxInHandler, DemandChunkImpl, DemandFilterLogic, DemandInOutImpl, DemandProcessInHandler, NodeImpl, Out1DoubleImpl, Out1LogicImpl, ProcessOutHandlerImpl, StageImpl}
import scala.annotation.{switch, tailrec}
object BlobVoices {
def apply(in: OutD, width: OutI, height: OutI, minWidth: OutI, minHeight: OutI, thresh: OutD, voices: OutI,
numBlobs: OutI, bounds: OutD, numVertices: OutI, vertices: OutD)(implicit b: Builder): OutD = {
val stage0 = new Stage(b.layer)
val stage = b.add(stage0)
b.connect(in , stage.in0 )
b.connect(width , stage.in1 )
b.connect(height , stage.in2 )
b.connect(minWidth , stage.in3 )
b.connect(minHeight , stage.in4 )
b.connect(thresh , stage.in5 )
b.connect(voices , stage.in6 )
b.connect(numBlobs , stage.in7 )
b.connect(bounds , stage.in8 )
b.connect(numVertices, stage.in9 )
b.connect(vertices , stage.in10)
stage.out
}
private final val name = "BlobVoices"
private type Shape = FanInShape11[BufD, BufI, BufI, BufI, BufI, BufD, BufI, BufI, BufD, BufI, BufD, BufD]
private final class Stage(layer: Layer)(implicit ctrl: Control) extends StageImpl[Shape](name) {
val shape = new FanInShape11(
in0 = InD (s"$name.in" ),
in1 = InI (s"$name.width" ),
in2 = InI (s"$name.height" ),
in3 = InI (s"$name.minWidth" ),
in4 = InI (s"$name.minHeight" ),
in5 = InD (s"$name.thresh" ),
in6 = InI (s"$name.voices" ),
in7 = InI (s"$name.numBlobs" ),
in8 = InD (s"$name.bounds" ),
in9 = InI (s"$name.numVertices"),
in10 = InD (s"$name.vertices" ),
out = OutD(s"$name.out" )
)
def createLogic(attr: Attributes) = new Logic(layer, shape)
}
private final class Blob {
var xMin = 0.0
var xMax = 0.0
var yMin = 0.0
var yMax = 0.0
def width : Double = xMax - xMin
def height: Double = yMax - yMin
var numVertices = 0
var vertexX: Array[Double] = _
var vertexY: Array[Double] = _
override def toString = f"Blob(xMin = $xMin%g, xMax = $xMax%g, yMin = $yMin%g, yMax = $yMax%g, numVertices = $numVertices)"
}
private final class Logic(layer: Layer, shape: Shape)(implicit ctrl: Control)
extends NodeImpl(name, layer, shape)
with DemandFilterLogic[BufD, Shape]
with DemandChunkImpl [Shape]
with Out1LogicImpl [BufD, Shape]
with DemandInOutImpl [Shape]
with Out1DoubleImpl [Shape] {
private[this] var winSizeIn = 0
private[this] var winSizeOut = 0
private[this] var winBufIn : Array[Double] = _
private[this] var winBufOut: Array[Double] = _
private[this] var width = 0
private[this] var height = 0
private[this] var minWidth = 0
private[this] var minHeight = 0
private[this] var thresh = 0.0
private[this] var voices = 0
private[this] var writeToWinOff = 0
private[this] var writeToWinRemain = 0
private[this] var readFromWinOff = 0
private[this] var readFromWinRemain = 0
private[this] var isNextWindow = true
protected var bufIn0 : BufD = _ // in
private[this] var bufIn1 : BufI = _ // width
private[this] var bufIn2 : BufI = _ // height
private[this] var bufIn3 : BufI = _ // minWidth
private[this] var bufIn4 : BufI = _ // minHeight
private[this] var bufIn5 : BufD = _ // thresh
private[this] var bufIn6 : BufI = _ // voices
private[this] var bufIn7 : BufI = _ // numBlobs
private[this] var bufIn8 : BufD = _ // bounds
private[this] var bufIn9 : BufI = _ // numVertices
private[this] var bufIn10: BufD = _ // vertices
protected var bufOut0: BufD = _
protected def in0: InD = shape.in0
private[this] var _mainCanRead = false
private[this] var _auxCanRead = false
private[this] var _mainInValid = false
private[this] var _auxInValid = false
private[this] var _inValid = false
// private[this] var _blobNumCanRead = false
// private[this] var _blobBoundsCanRead = false
// private[this] var _blobNumVerticesCanRead = false
// private[this] var _blobVerticesCanRead = false
private[this] var blobNumOff = 0
private[this] var blobNumRemain = 0
private[this] var blobBoundsOff = 0
private[this] var blobBoundsRemain = 0
private[this] var blobNumVerticesOff = 0
private[this] var blobNumVerticesRemain = 0
private[this] var blobVerticesOff = 0
private[this] var blobVerticesRemain = 0
private[this] var blobs: Array[Blob] = _
private[this] var blobsBoundsRead = 0
private[this] var blobsNumVerticesRead = 0
private[this] var blobsVerticesMissing = 0
private[this] var blobsVerticesBlobIdx = 0
private[this] var blobsVerticesVertexIdx = 0
private object BlobNumInHandler extends InHandler {
def onPush(): Unit =
if (canReadBlobNum) {
readBlobNumIn()
process()
}
override def onUpstreamFinish(): Unit = {
if (canReadBlobNum) readBlobNumIn()
process()
}
}
private object BlobBoundsInHandler extends InHandler {
def onPush(): Unit =
if (canReadBlobBounds) {
readBlobBoundsIn()
process()
}
override def onUpstreamFinish(): Unit = {
if (canReadBlobBounds) readBlobBoundsIn()
process()
}
}
private object BlobNumVerticesInHandler extends InHandler {
def onPush(): Unit =
if (canReadBlobNumVertices) {
readBlobNumVerticesIn()
process()
}
override def onUpstreamFinish(): Unit = {
if (canReadBlobNumVertices) readBlobNumVerticesIn()
process()
}
}
private object BlobVerticesInHandler extends InHandler {
def onPush(): Unit =
if (canReadBlobVertices) {
readBlobVerticesIn()
process()
}
override def onUpstreamFinish(): Unit = {
if (canReadBlobVertices) readBlobVerticesIn()
process()
}
}
new DemandProcessInHandler(shape.in0 , this)
new DemandAuxInHandler (shape.in1 , this)
new DemandAuxInHandler (shape.in2 , this)
new DemandAuxInHandler (shape.in3 , this)
new DemandAuxInHandler (shape.in4 , this)
new DemandAuxInHandler (shape.in5 , this)
new DemandAuxInHandler (shape.in6 , this)
setInHandler(shape.in7, BlobNumInHandler )
setInHandler(shape.in8, BlobBoundsInHandler )
setInHandler(shape.in9, BlobNumVerticesInHandler)
setInHandler(shape.in10, BlobVerticesInHandler )
new ProcessOutHandlerImpl (shape.out , this)
@inline private[this] def canWriteToWindow = readFromWinRemain == 0 && inValid && !_statePrepareProcess
@inline private[this] def canReadBlobNum = blobNumRemain == 0 && isAvailable(shape.in7)
@inline private[this] def canReadBlobBounds = blobBoundsRemain == 0 && isAvailable(shape.in8)
@inline private[this] def canReadBlobNumVertices = blobNumVerticesRemain == 0 && isAvailable(shape.in9)
@inline private[this] def canReadBlobVertices = blobVerticesRemain == 0 && isAvailable(shape.in10)
@inline private[this] def blobNumEnded = !isAvailable(shape.in7) && isClosed(shape.in7)
@inline private[this] def blobBoundsEnded = !isAvailable(shape.in8) && isClosed(shape.in8)
@inline private[this] def blobNumVerticesEnded = !isAvailable(shape.in9) && isClosed(shape.in9)
@inline private[this] def blobVerticesEnded = !isAvailable(shape.in10) && isClosed(shape.in10)
protected def out0: OutD = shape.out
def mainCanRead : Boolean = _mainCanRead
def auxCanRead : Boolean = _auxCanRead
def mainInValid : Boolean = _mainInValid
def auxInValid : Boolean = _auxInValid
def inValid : Boolean = _inValid
// override def preStart(): Unit = {
// val sh = shape
// pull(sh.in0)
// pull(sh.in1)
// pull(sh.in2)
// pull(sh.in3)
// pull(sh.in4)
// pull(sh.in5)
// pull(sh.in6)
// pull(sh.in7)
// pull(sh.in8)
// pull(sh.in9)
// pull(sh.in10)
// }
override protected def stopped(): Unit = {
freeInputBuffers()
freeOutputBuffers()
winBufIn = null
winBufOut = null
blobs = null
}
protected def readMainIns(): Int = {
freeMainInBuffers()
val sh = shape
bufIn0 = grab(sh.in0)
bufIn0.assertAllocated()
tryPull(sh.in0)
if (!_mainInValid) {
_mainInValid= true
_inValid = _auxInValid
}
_mainCanRead = false
bufIn0.size
}
protected def readAuxIns(): Int = {
freeAuxInBuffers()
val sh = shape
var sz = 0
if (isAvailable(sh.in1)) { // width
bufIn1 = grab(sh.in1)
sz = bufIn1.size
tryPull(sh.in1)
}
if (isAvailable(sh.in2)) { // height
bufIn2 = grab(sh.in2)
sz = math.max(sz, bufIn2.size)
tryPull(sh.in2)
}
if (isAvailable(sh.in3)) { // minWidth
bufIn3 = grab(sh.in3)
sz = math.max(sz, bufIn3.size)
tryPull(sh.in3)
}
if (isAvailable(sh.in4)) { // minHeight
bufIn4 = grab(sh.in4)
sz = math.max(sz, bufIn4.size)
tryPull(sh.in4)
}
if (isAvailable(sh.in5)) { // thresh
bufIn5 = grab(sh.in5)
sz = math.max(sz, bufIn5.size)
tryPull(sh.in5)
}
if (isAvailable(sh.in6)) { // voices
bufIn6 = grab(sh.in6)
sz = math.max(sz, bufIn6.size)
tryPull(sh.in6)
}
if (!_auxInValid) {
_auxInValid = true
_inValid = _mainInValid
}
_auxCanRead = false
sz
}
private def readBlobNumIn(): Unit = {
require(blobNumRemain == 0)
freeBlobNumInBuffer()
bufIn7 = grab(shape.in7)
blobNumOff = 0
blobNumRemain = bufIn7.size
tryPull(shape.in7)
}
private def readBlobBoundsIn(): Unit = {
require(blobBoundsRemain == 0)
freeBlobBoundsInBuffer()
bufIn8 = grab(shape.in8)
blobBoundsOff = 0
blobBoundsRemain = bufIn8.size
tryPull(shape.in8)
}
private def readBlobNumVerticesIn(): Unit = {
require(blobNumVerticesRemain == 0)
freeBlobNumVerticesInBuffer()
bufIn9 = grab(shape.in9)
blobNumVerticesOff = 0
blobNumVerticesRemain = bufIn9.size
tryPull(shape.in9)
}
private def readBlobVerticesIn(): Unit = {
require(blobVerticesRemain == 0)
freeBlobVerticesInBuffer()
bufIn10 = grab(shape.in10)
blobVerticesOff = 0
blobVerticesRemain = bufIn10.size
tryPull(shape.in10)
}
private def freeInputBuffers(): Unit = {
freeMainInBuffers()
freeAuxInBuffers()
freeBlobNumInBuffer()
freeBlobBoundsInBuffer()
freeBlobNumVerticesInBuffer()
freeBlobVerticesInBuffer()
}
private def freeMainInBuffers(): Unit =
if (bufIn0 != null) {
bufIn0.release()
bufIn0 = null
}
private def freeAuxInBuffers(): Unit = {
if (bufIn1 != null) {
bufIn1.release()
bufIn1 = null
}
if (bufIn2 != null) {
bufIn2.release()
bufIn2 = null
}
if (bufIn3 != null) {
bufIn3.release()
bufIn3 = null
}
if (bufIn4 != null) {
bufIn4.release()
bufIn4 = null
}
if (bufIn5 != null) {
bufIn5.release()
bufIn5 = null
}
if (bufIn6 != null) {
bufIn6.release()
bufIn6 = null
}
}
private def freeBlobNumInBuffer(): Unit =
if (bufIn7 != null) {
bufIn7.release()
bufIn7 = null
}
private def freeBlobBoundsInBuffer(): Unit =
if (bufIn8 != null) {
bufIn8.release()
bufIn8 = null
}
private def freeBlobNumVerticesInBuffer(): Unit =
if (bufIn9 != null) {
bufIn9.release()
bufIn9 = null
}
private def freeBlobVerticesInBuffer(): Unit =
if (bufIn10 != null) {
bufIn10.release()
bufIn10 = null
}
protected def freeOutputBuffers(): Unit =
if (bufOut0 != null) {
bufOut0.release()
bufOut0 = null
}
def updateMainCanRead(): Unit =
_mainCanRead = isAvailable(in0)
def updateAuxCanRead(): Unit = {
val sh = shape
_auxCanRead =
((isClosed(sh.in1) && _auxInValid) || isAvailable(sh.in1)) &&
((isClosed(sh.in2) && _auxInValid) || isAvailable(sh.in2)) &&
((isClosed(sh.in3) && _auxInValid) || isAvailable(sh.in3)) &&
((isClosed(sh.in4) && _auxInValid) || isAvailable(sh.in4)) &&
((isClosed(sh.in5) && _auxInValid) || isAvailable(sh.in5)) &&
((isClosed(sh.in6) && _auxInValid) || isAvailable(sh.in6))
}
// @inline private def updateBlobNumCanRead (): Unit = _blobNumCanRead = isAvailable(shape.in7)
// @inline private def updateBlobBoundsCanRead (): Unit = _blobBoundsCanRead = isAvailable(shape.in8)
// @inline private def updateBlobNumVerticesCanRead(): Unit = _blobNumVerticesCanRead = isAvailable(shape.in9)
// @inline private def updateBlobVerticesCanRead (): Unit = _blobVerticesCanRead = isAvailable(shape.in10)
private[this] var _statePrepareProcess = false
private[this] var _stateReadBlobNum = false
private[this] var _stateReadBlobBounds = false
private[this] var _stateReadBlobNumVertices = false
private[this] var _stateReadBlobVertices = false
private[this] var _stateProcessBlobs = false
private[this] var _stateComplete = false
private[this] var numBlobs = 0
protected def processChunk(): Boolean = {
var stateChange = false
if (canWriteToWindow) {
val flushIn0 = inputsEnded // inRemain == 0 && shouldComplete()
if (isNextWindow && !flushIn0) {
writeToWinRemain = startNextWindow()
isNextWindow = false
stateChange = true
// logStream(s"startNextWindow(); writeToWinRemain = $writeToWinRemain")
}
val chunk = math.min(writeToWinRemain, mainInRemain) // .toInt
val flushIn = flushIn0 && writeToWinOff > 0
if (chunk > 0 || flushIn) {
// logStream(s"writeToWindow(); inOff = $inOff, writeToWinOff = $writeToWinOff, chunk = $chunk")
if (chunk > 0) {
copyInputToWindow(writeToWinOff = writeToWinOff, chunk = chunk)
mainInOff += chunk
mainInRemain -= chunk
writeToWinOff += chunk
writeToWinRemain -= chunk
stateChange = true
}
if (writeToWinRemain == 0 || flushIn) {
_statePrepareProcess = true
_stateReadBlobNum = true
stateChange = true
// logStream(s"processWindow(); readFromWinRemain = $readFromWinRemain")
}
}
}
if (_stateReadBlobNum) {
if (canReadBlobNum) readBlobNumIn()
if (blobNumRemain > 0) {
numBlobs = bufIn7.buf(blobNumOff)
if (blobs == null || blobs.length < numBlobs) {
blobs = Array.fill[Blob](numBlobs)(new Blob)
}
blobNumRemain -= 1
blobNumOff += 1
blobsBoundsRead = 0
blobsNumVerticesRead = 0
blobsVerticesMissing = 0
blobsVerticesBlobIdx = 0
blobsVerticesVertexIdx = 0
_stateReadBlobNum = false
_stateReadBlobBounds = true
_stateReadBlobNumVertices = true
_stateReadBlobVertices = true
stateChange = true
} else if (blobNumEnded) {
_stateComplete = true
return stateChange
}
}
if (_stateReadBlobBounds) {
if (canReadBlobBounds) readBlobBoundsIn()
val chunk = math.min(blobBoundsRemain, numBlobs * 4 - blobsBoundsRead)
if (chunk > 0) {
var _boundsOff = blobBoundsOff
var _boundsRead = blobsBoundsRead
val _buf = bufIn8.buf
val stop = _boundsOff + chunk
while (_boundsOff < stop) {
val blobIdx = _boundsRead / 4
val blob = blobs(blobIdx)
val coord = _buf(_boundsOff)
(_boundsRead % 4: @switch) match {
case 0 => blob.xMin = coord
case 1 => blob.xMax = coord
case 2 => blob.yMin = coord
case 3 => blob.yMax = coord
}
_boundsOff += 1
_boundsRead += 1
}
blobBoundsOff = _boundsOff
blobBoundsRemain -= chunk
blobsBoundsRead = _boundsRead
stateChange = true
}
if (blobsBoundsRead == numBlobs * 4) {
_stateReadBlobBounds = false
_stateProcessBlobs = !(_stateReadBlobNumVertices || _stateReadBlobVertices)
stateChange = true
} else if (blobBoundsRemain == 0 && blobBoundsEnded) {
_stateComplete = true
return stateChange
}
}
if (_stateReadBlobNumVertices) {
if (canReadBlobNumVertices) readBlobNumVerticesIn()
val chunk = math.min(blobNumVerticesRemain, numBlobs - blobsNumVerticesRead)
if (chunk > 0) {
var _numVerticesOff = blobNumVerticesOff
var _numVerticesRead = blobsNumVerticesRead
val _buf = bufIn9.buf
val stop = _numVerticesOff + chunk
while (_numVerticesOff < stop) {
// val blobIdx = _numVerticesRead
val blob = blobs(_numVerticesRead)
val num = _buf(_numVerticesOff)
blob.numVertices = num
blob.vertexX = new Array[Double](num)
blob.vertexY = new Array[Double](num)
blobsVerticesMissing += num * 2
_numVerticesOff += 1
_numVerticesRead += 1
}
blobNumVerticesOff = _numVerticesOff
blobNumVerticesRemain -= chunk
blobsNumVerticesRead = _numVerticesRead
stateChange = true
}
if (blobsNumVerticesRead == numBlobs) {
_stateReadBlobNumVertices = false
_stateProcessBlobs = !(_stateReadBlobBounds || _stateReadBlobVertices)
stateChange = true
} else if (blobNumVerticesRemain == 0 && blobNumVerticesEnded) {
_stateComplete = true
return stateChange
}
}
if (_stateReadBlobVertices) {
if (canReadBlobVertices) readBlobVerticesIn()
val chunk = math.min(blobVerticesRemain, blobsVerticesMissing)
if (chunk > 0) {
var _verticesOff = blobVerticesOff
val _buf = bufIn10.buf
var _blobIdx = blobsVerticesBlobIdx
var _vIdx = blobsVerticesVertexIdx
val stop = _verticesOff + chunk
while (_verticesOff < stop) {
val blob = blobs(_blobIdx)
val num = blob.numVertices
val chunk2 = math.min(num * 2 - _vIdx, stop - _verticesOff)
if (chunk2 > 0) {
val stop2 = _verticesOff + chunk2
while (_verticesOff < stop2) {
val coord = _buf(_verticesOff)
val table = if (_vIdx % 2 == 0) blob.vertexX else blob.vertexY
table(_vIdx / 2) = coord
_verticesOff += 1
_vIdx += 1
}
} else {
_blobIdx += 1
_vIdx = 0
}
}
blobsVerticesBlobIdx = _blobIdx
blobsVerticesVertexIdx = _vIdx
blobsVerticesMissing -= chunk
blobVerticesOff = _verticesOff
blobVerticesRemain -= chunk
stateChange = true
}
if (blobsVerticesMissing == 0 && !_stateReadBlobNumVertices) {
_stateReadBlobVertices = false
_stateProcessBlobs = !_stateReadBlobBounds
stateChange = true
} else if (blobVerticesRemain == 0 && blobVerticesEnded) {
_stateComplete = true
return stateChange
}
}
if (_stateProcessBlobs) {
readFromWinRemain = processWindow(writeToWinOff = writeToWinOff) // , flush = flushIn)
writeToWinOff = 0
readFromWinOff = 0
isNextWindow = true
auxInOff += 1
auxInRemain -= 1
_stateProcessBlobs = false
_statePrepareProcess = false
stateChange = true
}
if (readFromWinRemain > 0) {
val chunk = math.min(readFromWinRemain, outRemain) // .toInt
if (chunk > 0) {
// logStream(s"readFromWindow(); readFromWinOff = $readFromWinOff, outOff = $outOff, chunk = $chunk")
copyWindowToOutput(readFromWinOff = readFromWinOff, outOff = outOff, chunk = chunk)
readFromWinOff += chunk
readFromWinRemain -= chunk
outOff += chunk
outRemain -= chunk
stateChange = true
}
}
stateChange
}
protected def shouldComplete(): Boolean =
_stateComplete || (inputsEnded && writeToWinOff == 0 && readFromWinRemain == 0)
private def startNextWindow(): Int = {
val oldWinSzIn = winSizeIn
val oldWinSzOut = winSizeOut
val inOff = auxInOff
if (bufIn1 != null && inOff < bufIn1.size) {
width = math.max(1, bufIn1.buf(inOff))
}
if (bufIn2 != null && inOff < bufIn2.size) {
height = math.max(1, bufIn2.buf(inOff))
}
if (bufIn3 != null && inOff < bufIn3.size) {
minWidth = bufIn3.buf(inOff)
}
if (bufIn4 != null && inOff < bufIn4.size) {
minHeight = bufIn4.buf(inOff)
}
if (bufIn5 != null && inOff < bufIn5.size) {
thresh = bufIn5.buf(inOff)
}
if (bufIn6 != null && inOff < bufIn6.size) {
voices = math.max(1, bufIn6.buf(inOff))
}
winSizeIn = width * height
if (winSizeIn != oldWinSzIn) {
winBufIn = new Array[Double](winSizeIn)
}
val blobDimSz = BlobVoice.totalNumField * voices
// winSizeOut = blobDimSz * width
winSizeOut = blobDimSz * height
if (winSizeOut != oldWinSzOut) {
winBufOut = new Array[Double](winSizeOut)
}
winSizeIn
}
private def copyInputToWindow(writeToWinOff: Int, chunk: Int): Unit =
Util.copy(bufIn0.buf, mainInOff, winBufIn, writeToWinOff, chunk)
private def copyWindowToOutput(readFromWinOff: Int, outOff: Int, chunk: Int): Unit =
Util.copy(winBufOut, readFromWinOff, bufOut0.buf, outOff, chunk)
// ---- the fun bit ----
// this is mostly the translation from sysson-experiments/AnomaliesBlobs.scala
private[this] var WARNED_EXHAUSTED = false // XXX TODO --- should offer better fallback if running out of voices
private def processWindow(writeToWinOff: Int): Int = {
// if (writeToWinOff == 0) return writeToWinOff
// println("BlobVoices.processWindow")
val _blobs = blobs
val _width = width
val _height = height
val _minWidth = minWidth
val _minHeight = minHeight
val _numBlobs = numBlobs
val _bufIn = winBufIn
val _thresh = thresh
val blobsAllB = Vector.newBuilder[BlobVoice]
blobsAllB.sizeHint(_numBlobs)
val path = new Path2D.Double
val rect = new Rectangle
val area = new Area
var blobIdx = 0
while (blobIdx < _numBlobs) {
val blob = _blobs(blobIdx)
val bigEnough = blob.width >= _minWidth && blob.height >= _minHeight
val hasShape = blob.numVertices > 1
// if (!hasShape) {
// println(s"EMPTY: $blob")
// } else {
// println(s"-----: $blob")
// }
val ok = bigEnough && hasShape
if (ok) {
// require(blob.numVertices > 1, blob.toString + s" ; minWidth = ${_minWidth}, minHeight = ${_minHeight}")
path.reset()
var vIdx = 0
while (vIdx < blob.numVertices) {
val x = blob.vertexX(vIdx)
val y = blob.vertexY(vIdx)
if (vIdx == 0) {
path.moveTo(x, y)
} else {
path.lineTo(x, y)
}
vIdx += 1
}
path.closePath()
area.reset()
val br = path.getBounds
val blobLeft = math.max(0, br.x /* - 1 */)
val blobWidth = math.min(_width - blobLeft, br.width)
val blobRight = blobLeft + blobWidth
val blobTop = math.max(0, br.y /* - 1 */)
val blobHeight = math.min(_height - blobTop , br.height)
val blobBottom = blobTop + blobHeight
val slices = new Array[BlobSlice](blobHeight /* blobWidth */)
// var x = blobLeft
var y = blobTop
var sliceIdx = 0
while (y < blobBottom /* x < blobRight */) {
// rect.x = x
// rect.y = 0
// rect.width = 1
// rect.height = height
rect.x = 0
rect.y = y
rect.width = width
rect.height = 1
val a = new Area(path)
a.intersect(new Area(rect))
val b = a.getBounds2D
area.add(new Area(b))
// val boxTop = math.max(blobTop , math.floor(b.getMinY).toInt)
// val boxBottom = math.min(blobBottom, math.ceil (b.getMaxY).toInt)
// val boxHeight = boxBottom - boxTop
// var y = boxTop
val boxLeft = math.max(blobLeft , math.floor(b.getMinX).toInt)
val boxRight = math.min(blobRight, math.ceil (b.getMaxX).toInt)
val boxWidth = boxRight - boxLeft
var x = boxLeft
var sliceSum = 0.0
var sliceCenter = 0.0
var sliceCnt = 0
val offY = y * _width
while (x < boxRight /* y < boxBottom */) {
val value = _bufIn(x + offY /* _width * y */)
if (value > _thresh) {
sliceSum += value
// sliceCenter += value * y
sliceCenter += value * x
sliceCnt += 1
}
// y += 1
x += 1
}
import de.sciss.numbers.Implicits._
// val sliceMean = sliceSum / boxHeight
// sliceCenter = (sliceCenter / sliceSum).clip(boxTop, boxBottom - 1)
val sliceMean = if (sliceCnt > 0)
sliceSum / sliceCnt
else
_bufIn((boxLeft + boxRight) / 2 + offY) // XXX TODO --- what else could we do?
sliceCenter = if (sliceSum > 0)
(sliceCenter / sliceSum).clip(boxLeft, boxRight - 1)
else
(boxLeft + boxRight) / 2 // XXX TODO --- what else could we do?
// y = boxTop
x = boxLeft
var sliceStdDev = 0.0
while (x < boxRight /* y < boxBottom */) {
val value = _bufIn(x + offY /* _width * y */)
if (value > _thresh) {
val d = value - sliceMean
sliceStdDev += d * d
}
x /* y */ += 1
}
if (sliceCnt > 1 /* 0 */) sliceStdDev = math.sqrt(sliceStdDev / (sliceCnt - 1))
val slice = BlobSlice(
boxLeft = boxLeft /* boxTop */,
boxWidth = boxWidth /* boxHeight */,
sliceMean = sliceMean,
sliceStdDev = sliceStdDev,
sliceCenter = sliceCenter
)
slices(sliceIdx) = slice
y /* x */ += 1
sliceIdx += 1
}
// bloody floating point ops and rounding can lead to difference here
// val ri = out.getBounds
// assert(ri == br, s"ri = $ri; br = $br")
val bv = BlobVoice(
id = -1,
blobLeft = blobLeft,
blobTop = blobTop,
blobWidth = blobWidth,
blobHeight = blobHeight,
slices = slices
)
blobsAllB += bv
}
blobIdx += 1
}
val blobsAll: Vector[BlobVoice] = blobsAllB.result()
val _voices = voices
// call with shapes sorted by size in ascending order!
@tailrec def filterOverlaps(rem: Vector[BlobVoice], out: Vector[BlobVoice], id: Int): Vector[BlobVoice] =
rem match {
case head +: tail =>
val numOverlap = tail.count(_.overlapsV(head))
val idNext = if (numOverlap > _voices) id else id + 1
val outNext = if (numOverlap > _voices) out else out :+ head.copy(id = id)
filterOverlaps(rem = tail, out = outNext, id = idNext)
case _ => out
}
val blobFlt = filterOverlaps(blobsAll.sortBy(_.blobSize), out = Vector.empty, id = 1)
// .sortBy(b => (b.blobLeft, b.blobTop))
.sortBy(b => (b.blobTop, b.blobLeft))
val blobDimSz = BlobVoice.totalNumField * _voices
val _bufOut = winBufOut // Array.ofDim[Double](_width, blobDimSz)
val idIndices = 0 until blobDimSz by BlobVoice.totalNumField
@tailrec def mkArray(y /* x */: Int, activeBefore: Vector[BlobVoice], rem: Vector[BlobVoice]): Unit =
if (y < _height /* x < _width */) {
val offY = y * blobDimSz
// val active1 = activeBefore .filterNot(_.blobRight == x)
// val (activeAdd, remRem) = rem.partition(_.blobLeft == x)
val active1 = activeBefore .filterNot(_.blobBottom == y)
val (activeAdd, remRem) = rem.partition(_.blobTop == y)
val activeNow = active1 ++ activeAdd
val (activeOld, activeNew) = activeNow.partition(activeBefore.contains)
if (activeOld.nonEmpty) {
// val xM = x - 1
val yM = y - 1
val offYM = yM * blobDimSz
activeOld.foreach { blob =>
// val sliceIdx = x - blob.blobLeft
// val outY = idIndices.find { y =>
// _bufOut(xM + y * _width) == blob.id
// } .get // same slot as before
// blob.fillSlice(sliceIdx = sliceIdx, out = _bufOut, off = x + outY * _width, scan = _width)
val sliceIdx = y - blob.blobTop
/* val opt = */ idIndices.collectFirst {
case x if _bufOut(x + offYM /* yM * _width */) == blob.id => // same slot as before
blob.fillSlice(sliceIdx = sliceIdx, out = _bufOut, off = x + offY, scan = 1)
}
}
}
if (activeNew.nonEmpty) {
activeNew.foreach { blob =>
// val sliceIdx = x - blob.blobLeft
// val outY = idIndices.find { y =>
// _bufOut(x + y * _width) == 0
// } .get // empty slot
// blob.fillSlice(sliceIdx = sliceIdx, out = _bufOut, off = x + outY * _width, scan = _width)
val sliceIdx = y - blob.blobTop
val opt = idIndices.collectFirst {
case x if _bufOut(x + offY) == 0 => // empty slot
blob.fillSlice(sliceIdx = sliceIdx, out = _bufOut, off = x + offY, scan = 1)
}
if (opt.isEmpty && !WARNED_EXHAUSTED) {
Console.err.println(s"Warning: BlobVoices - ran out of voices")
WARNED_EXHAUSTED = true
}
}
}
mkArray(y = y + 1 /* x = x + 1 */, activeBefore = activeNow, rem = remRem)
} else {
assert(rem.isEmpty)
}
Util.clear(_bufOut, 0, winSizeOut)
mkArray(0, Vector.empty, blobFlt)
winSizeOut
}
}
private object BlobSlice {
final val numFields: Int = BlobSlice(0, 0, 0, 0, 0).productArity
}
private final case class BlobSlice(boxLeft: Int, boxWidth: Int, sliceMean: Double, sliceStdDev: Double,
sliceCenter: Double) {
def boxRight: Int = boxLeft + boxWidth
// def toArray: Array[Float] =
// Array[Float](boxTop, boxHeight, sliceMean.toFloat, sliceStdDev.toFloat, sliceCenter.toFloat)
def fill(out: Array[Double], off: Int, scan: Int): Unit = {
var _off = off
out(_off) = boxLeft ; _off += scan
out(_off) = boxWidth ; _off += scan
out(_off) = sliceMean ; _off += scan
out(_off) = sliceStdDev ; _off += scan
out(_off) = sliceCenter ; _off += scan
}
}
private object BlobVoice {
final val numBaseFields: Int = BlobVoice(0, 0, 0, 0, 0, Array.empty).productArity - 1
final val totalNumField: Int = numBaseFields + BlobSlice.numFields
}
/* @param id unique blob identifier, positive. if zero, blob data is invalid
* @param blobLeft blob beginning in time frames ("horizontally")
* @param blobTop blob beginning within time slice (vertically)
* @param blobWidth blob duration in time frames
* @param blobHeight blob extent within time slice
* @param slices blob form
*/
private final case class BlobVoice(id: Int, blobLeft: Int, blobTop: Int, blobWidth: Int, blobHeight: Int,
slices: Array[BlobSlice]) {
def blobRight : Int = blobLeft + blobWidth
def blobBottom : Int = blobTop + blobHeight
def blobSize : Int = blobWidth * blobHeight
// def overlapsH(that: BlobVoice): Boolean =
// this.blobLeft < that.blobRight && this.blobRight > that.blobLeft &&
// this.blobTop < that.blobBottom && this.blobBottom > that.blobTop && {
// val left = math.max(this.blobLeft , that.blobLeft )
// val right = math.min(this.blobRight, that.blobRight)
// var idx = left
// var found = false
// while (idx < right) {
// val thisSlice = this.slices(idx - this.blobLeft)
// val thatSlice = that.slices(idx - that.blobLeft)
// found = thisSlice.boxTop < thatSlice.boxBottom && thisSlice.boxBottom > thatSlice.boxTop
// idx += 1
// }
// found
// }
def overlapsV(that: BlobVoice): Boolean =
this.blobLeft < that.blobRight && this.blobRight > that.blobLeft &&
this.blobTop < that.blobBottom && this.blobBottom > that.blobTop && {
val top = math.max(this.blobTop , that.blobTop )
val bottom = math.min(this.blobBottom, that.blobBottom)
var idx = top
var found = false
while (idx < bottom) {
val thisSlice = this.slices(idx - this.blobTop)
val thatSlice = that.slices(idx - that.blobTop)
found = thisSlice.boxLeft < thatSlice.boxRight && thisSlice.boxRight > thatSlice.boxLeft
idx += 1
}
found
}
// def toArray(sliceIdx: Int): Array[Float] =
// Array[Float](id, blobLeft, blobTop, blobWidth, blobHeight) ++ slices(sliceIdx).toArray
def fillSlice(sliceIdx: Int, out: Array[Double], off: Int, scan: Int): Unit = {
var _off = off
out(_off) = id ; _off += scan
out(_off) = blobLeft ; _off += scan
out(_off) = blobTop ; _off += scan
out(_off) = blobWidth ; _off += scan
out(_off) = blobHeight ; _off += scan
val slice = slices(sliceIdx)
slice.fill(out, off = _off, scan = scan)
}
}
} | iem-projects/sysson | src/main/scala/de/sciss/fscape/stream/BlobVoices.scala | Scala | gpl-3.0 | 37,201 |
package com.airbnb.aerosolve.training
import java.io.BufferedWriter
import java.io.OutputStreamWriter
import java.util.concurrent.ConcurrentHashMap
import com.airbnb.aerosolve.core.{ModelRecord, ModelHeader, FeatureVector, Example}
import com.airbnb.aerosolve.core.models.LinearModel
import com.airbnb.aerosolve.core.util.Util
import com.typesafe.config.Config
import org.slf4j.{LoggerFactory, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import scala.util.Try
import scala.util.Random
import scala.math.abs
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
/*
* A trainer that generates a linear ranker.
*
* Params:
* loss - "ranking" (sgd), "regression" (sgd)
* learning_rate - a number between 0 and 1
* num_bags - number of parallel models to make. sgd only.
*
* For ranking:
*
* The optimization objective is score(positive) > score(negative) + 1
* We model this as loss = 1 - pos_score + neg_score
* but since score(x) = w' . x
* loss = 1 - w'(pos - neg)
* and dloss/dw = -pos + neg
* Since we are doing gradient descent, -dloss/dw = pos - neg
* For binary features this is the set of positive and negative features
* that are different.
* For the gradient update step we use adagrad which dynamically adjusts
* the step size based on the magnitude of the previous gradients.
* http://www.magicbroom.info/Papers/DuchiHaSi10.pdf
*
* For regression:
* epsilon - a parameter such that if |target - prediction| < eps
* no update will be taken
*
* Otherwise the gradient is the epsilon insensitive loss
* i.e. if w * x - target > epsilon, the gradient step is -1, else +1
*
* For classification
* rank_threshold - anything smaller or equal to this is a negative.
*
* if w * x * label > 1 we are correct. Else take a step - sign(w * x)
*
*/
object LinearRankerTrainer {
private final val log: Logger = LoggerFactory.getLogger("LinearRankerTrainer")
private final val lossKey = ("$loss", "$loss")
val MAX_WEIGHTS : Int = 1000000
def pickTrainer(sc : SparkContext,
input : RDD[Example],
config : Config,
key : String,
loss : String,
numBags : Int,
weights : collection.mutable.Map[(String, String), (Double, Double)],
iteration : Int) :
RDD[((String, String), (Double, Double))] = {
loss match {
case "ranking" => rankingTrain(sc, input, config, key, numBags, weights, iteration)
case "regression" => regressionTrain(sc, input, config, key, numBags, weights, iteration)
case "hinge" => classificationTrain(sc, input, config, key, numBags, weights, iteration)
case "logistic" => logisticTrain(sc, input, config, key, numBags, weights, iteration)
case _ => {
log.error("Unknown loss type %s".format(loss))
System.exit(-1)
rankingTrain(sc, input, config, key, numBags, weights, iteration)
}
}
}
def regressionTrain(sc : SparkContext,
input : RDD[Example],
config : Config,
key : String,
numBags : Int,
weights : collection.mutable.Map[(String, String), (Double, Double)],
iteration : Int) :
RDD[((String, String), (Double, Double))] = {
val rankKey: String = config.getString(key + ".rank_key")
val lambda : Double = config.getDouble(key + ".lambda")
val lambda2 : Double = config.getDouble(key + ".lambda2")
val epsilon : Double = config.getDouble(key + ".epsilon")
val learningRate: Double = config.getDouble(key + ".learning_rate")
val weightsBC = sc.broadcast(weights)
LinearRankerUtils
.makePointwise(input, config, key, rankKey)
.coalesce(numBags, true)
.mapPartitions(partition => {
// The keys the feature (family, value)
// The values are the weight, sum of squared gradients.
val weightMap = weightsBC.value
val rnd = new Random()
partition.foreach(examples => {
examples
.example
.filter(x => x.stringFeatures != null &&
x.floatFeatures != null &&
x.floatFeatures.containsKey(rankKey))
.foreach(sample => {
val target = sample.floatFeatures.get(rankKey).iterator.next()._2
val features = LinearRankerUtils.getFeatures(sample)
val prediction = LinearRankerUtils.score(features, weightMap)
// The loss function the epsilon insensitive loss L = max(0,|w'x - y| - epsilon)
// So if prediction = w'x and prediction > y then
// the dloss / dw is
val diff = prediction - target
val loss = Math.abs(diff) - epsilon
val lossEntry = weightMap.getOrElse(lossKey, (0.0, 0.0))
if (loss <= 0) {
// No loss suffered
weightMap.put(lossKey, (lossEntry._1, lossEntry._2 + 1.0))
} else {
val grad = if (diff > 0) 1.0 else -1.0
features.foreach(v => {
val wt = weightMap.getOrElse(v, (0.0, 0.0))
val newGradSum = wt._2 + 1.0
val newWeight = fobosUpdate(currWeight = wt._1,
gradient = grad,
eta = learningRate,
l1Reg = lambda,
l2Reg = lambda2,
sum = newGradSum)
weightMap.put(v, (newWeight, newGradSum))
})
weightMap.put(lossKey, (lossEntry._1 + loss, lossEntry._2 + 1.0))
}
})
})
weightMap
.iterator
})
}
def classificationTrain(sc : SparkContext,
input : RDD[Example],
config : Config,
key : String,
numBags : Int,
weights : collection.mutable.Map[(String, String), (Double, Double)],
iteration : Int) :
RDD[((String, String), (Double, Double))] = {
val rankKey: String = config.getString(key + ".rank_key")
val weightsBC = sc.broadcast(weights)
LinearRankerUtils
.makePointwise(input, config, key, rankKey)
.coalesce(numBags, true)
.mapPartitions(partition => {
// The keys the feature (family, value)
// The values are the weight, sum of squared gradients.
val weightMap = weightsBC.value
val lambda : Double = config.getDouble(key + ".lambda")
val lambda2 : Double = config.getDouble(key + ".lambda2")
var size = weightMap.size
val rnd = new Random()
val learningRate: Double = config.getDouble(key + ".learning_rate")
val threshold: Double = config.getDouble(key + ".rank_threshold")
val dropout : Double = config.getDouble(key + ".dropout")
partition.foreach(examples => {
examples
.example
.filter(x => x.stringFeatures != null &&
x.floatFeatures != null &&
x.floatFeatures.containsKey(rankKey))
.foreach(sample => {
val rank = sample.floatFeatures.get(rankKey).iterator.next()._2
val features = LinearRankerUtils.getFeatures(sample).filter(x => rnd.nextDouble() > dropout)
val prediction = LinearRankerUtils.score(features, weightMap) / (1.0 - dropout)
val label = if (rank <= threshold) {
-1.0
} else {
1.0
}
val loss = 1.0 - label * prediction
val lossEntry = weightMap.getOrElse(lossKey, (0.0, 0.0))
if (loss > 0.0) {
features.foreach(v => {
val wt = weightMap.getOrElse(v, (0.0, 0.0))
// Allow puts only in the first iteration and size is less than MAX_SIZE
// or if it already exists.
if ((iteration == 1 && size < MAX_WEIGHTS) || wt._1 != 0.0) {
if (wt._1 == 0.0) {
// We added a weight increase the size.
size = size + 1
}
val newGradSum = wt._2 + 1.0
val newWeight = fobosUpdate(currWeight = wt._1,
gradient = -label,
eta = learningRate,
l1Reg = lambda,
l2Reg = lambda2,
sum = newGradSum)
if (newWeight == 0.0) {
weightMap.remove(v)
} else {
weightMap.put(v, (newWeight, newGradSum))
}
}
})
weightMap.put(lossKey, (lossEntry._1 + loss, lossEntry._2 + 1.0))
} else {
weightMap.put(lossKey, (lossEntry._1, lossEntry._2 + 1.0))
}
})
})
weightMap
.iterator
})
}
def logisticTrain(sc : SparkContext,
input : RDD[Example],
config : Config,
key : String,
numBags : Int,
weights : collection.mutable.Map[(String, String), (Double, Double)],
iteration : Int) :
RDD[((String, String), (Double, Double))] = {
val weightsBC = sc.broadcast(weights)
LinearRankerUtils
.makePointwiseCompressed(input, config, key)
.coalesce(numBags, true)
.mapPartitions(partition => {
// The keys the feature (family, value)
// The values are the weight, sum of squared gradients.
val weightMap = weightsBC.value
var size = weightMap.size
val rnd = new Random()
val learningRate: Double = config.getDouble(key + ".learning_rate")
val threshold: Double = config.getDouble(key + ".rank_threshold")
val lambda : Double = config.getDouble(key + ".lambda")
val lambda2 : Double = config.getDouble(key + ".lambda2")
val dropout : Double = config.getDouble(key + ".dropout")
partition.foreach(sample => {
val prediction = LinearRankerUtils.score(sample.pos, weightMap) / (1.0 - dropout)
val label = if (sample.label <= threshold) {
-1.0
} else {
1.0
}
// To prevent blowup.
val corr = scala.math.min(10.0, label * prediction)
val expCorr = scala.math.exp(corr)
val loss = scala.math.log(1.0 + 1.0 / expCorr)
val lossEntry = weightMap.getOrElse(lossKey, (0.0, 0.0))
sample.pos.foreach(v => {
val wt = weightMap.getOrElse(v, (0.0, 0.0))
// Allow puts only in the first iteration and size is less than MAX_SIZE
// or if it already exists.
if ((iteration == 1 && size < MAX_WEIGHTS) || wt._1 != 0.0) {
if (wt._1 == 0.0) {
// We added a weight increase the size.
size = size + 1
}
val newGradSum = wt._2 + 1.0
val grad = -label / (1.0 + expCorr)
val newWeight = fobosUpdate(currWeight = wt._1,
gradient = grad,
eta = learningRate,
l1Reg = lambda,
l2Reg = lambda2,
sum = newGradSum)
if (newWeight == 0.0) {
weightMap.remove(v)
} else {
weightMap.put(v, (newWeight, newGradSum))
}
}
})
weightMap.put(lossKey, (lossEntry._1 + loss, lossEntry._2 + 1.0))
})
weightMap
.iterator
})
}
// http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf
def fobosUpdate(currWeight : Double,
gradient : Double,
eta : Double,
l1Reg : Double,
l2Reg : Double,
sum : Double) : Double = {
val etaT = eta / scala.math.sqrt(sum)
val etaTHalf = eta / scala.math.sqrt(sum + 0.5)
// FOBOS l2 regularization
val wt = (currWeight - gradient * etaT) / (1.0 + l2Reg * etaTHalf)
// FOBOS l1 regularization
val sign = if (wt > 0.0) 1.0 else -1.0
val step = scala.math.max(0.0, scala.math.abs(wt) - l1Reg * etaTHalf)
sign * step
}
def rankingTrain(sc : SparkContext,
input : RDD[Example],
config : Config,
key : String,
numBags : Int,
weights : collection.mutable.Map[(String, String), (Double, Double)],
iteration : Int) :
RDD[((String, String), (Double, Double))] = {
val examples = LinearRankerUtils.rankingTrain(input, config, key)
val weightsBC = sc.broadcast(weights)
examples
.coalesce(numBags, true)
.mapPartitions(partition => {
// The keys the feature (family, value)
// The values are the weight, sum of squared gradients.
val weightMap = weightsBC.value
var size = weightMap.size
val rnd = new Random(java.util.Calendar.getInstance().getTimeInMillis)
val learningRate: Double = config.getDouble(key + ".learning_rate")
val lambda : Double = config.getDouble(key + ".lambda")
val lambda2 : Double = config.getDouble(key + ".lambda2")
val dropout : Double = config.getDouble(key + ".dropout")
partition.foreach(ce => {
val pos = ce.pos.filter(x => rnd.nextDouble() > dropout)
val neg = ce.neg.filter(x => rnd.nextDouble() > dropout)
val posScore = LinearRankerUtils.score(pos, weightMap) / (1.0 - dropout)
val negScore = LinearRankerUtils.score(neg, weightMap) / (1.0 - dropout)
val loss = 1.0 - posScore + negScore
val lossEntry = weightMap.getOrElse(lossKey, (0.0, 0.0))
if (loss > 0.0) {
def update(v : (String, String), grad : Double) = {
val wt = weightMap.getOrElse(v, (0.0, 0.0))
// Allow puts only in the first iteration and size is less than MAX_SIZE
// or if it already exists.
if ((iteration == 1 && size < MAX_WEIGHTS) || wt._1 != 0.0) {
val newGradSum = wt._2 + 1.0
val newWeight = fobosUpdate(currWeight = wt._1,
gradient = grad,
eta = learningRate,
l1Reg = lambda,
l2Reg = lambda2,
sum = newGradSum)
if (newWeight == 0.0) {
weightMap.remove(v)
} else {
weightMap.put(v, (newWeight, newGradSum))
}
if (wt._1 == 0.0) {
size = size + 1
}
}
}
pos.foreach(v => {
update(v, -1.0)
})
neg.foreach(v => {
update(v, 1.0)
})
weightMap.put(lossKey, (lossEntry._1 + loss, lossEntry._2 + 1.0))
} else {
weightMap.put(lossKey, (lossEntry._1, lossEntry._2 + 1.0))
}
})
// Strip off the sum of squared gradients for the result
weightMap
.iterator
})
}
def train(sc : SparkContext,
input : RDD[Example],
config : Config,
key : String) : Array[((String, String), Double)] = {
val loss: String = try {
config.getString(key + ".loss")
} catch {
case _: Throwable => "ranking"
}
log.info("Training using " + loss)
sgdTrain(sc, input, config, key, loss)
}
def setPrior(config : Config,
key : String,
weights : collection.mutable.Map[(String, String), (Double, Double)]) = {
try {
val priors = config.getStringList(key + ".prior")
for (prior <- priors) {
val tokens : Array[String] = prior.split(",")
if (tokens.length == 3) {
val family = tokens(0)
val name = tokens(1)
val weight = tokens(2).toDouble
log.info("Setting prior %s:%s = %f".format(family, name, weight))
weights.put((family, name), (weight, 1.0))
}
}
} catch {
case _ : Throwable => log.info("No prior given")
}
}
def sgdTrain(sc : SparkContext,
input : RDD[Example],
config : Config,
key : String,
loss : String) : Array[((String, String), Double)] = {
val numBags : Int = config.getInt(key + ".num_bags")
val iterations : Int = config.getInt(key + ".iterations")
val subsample : Double = Try(config.getDouble(key + ".subsample")).getOrElse(1.0)
// The keys the feature (family, value)
// The values are the weight.
var weights = new ConcurrentHashMap[(String, String), (Double, Double)]().asScala
setPrior(config, key, weights)
// Since we are bagging models, average them by numBags
val scale : Double = 1.0 / numBags.toDouble
val filteredInput = input
.filter(examples => examples != null)
.sample(false, subsample)
for (i <- 1 to iterations) {
log.info("Iteration %d".format(i))
val resultsRDD = pickTrainer(sc, filteredInput, config, key, loss, numBags, weights, i)
.reduceByKey((a,b) => (a._1 + b._1, a._2 + b._2))
.persist()
val lossV = resultsRDD.filter(x => x._1 == lossKey).take(1)
var lossSum = 0.0
var count = 0.0
if (!lossV.isEmpty) {
lossSum = lossV.head._2._1
count = lossV.head._2._2
} else {
0.0
}
val results = resultsRDD
.filter(x => x._1 != lossKey)
.map(x => (scala.math.abs(x._2._1), (x._1, x._2)))
.top(LinearRankerTrainer.MAX_WEIGHTS)
.map(x => x._2)
// Nuke the old weights
weights = new ConcurrentHashMap[(String, String), (Double, Double)]().asScala
var sz = 0
results
.foreach(value => {
sz = sz + 1
weights.put(value._1, (value._2._1 * scale, value._2._2))
})
log.info("Average loss = %f count = %f weight size = %d".format(lossSum / count, count, sz))
resultsRDD.unpersist()
}
weights
// Strip off the sum of squared gradients
.map(x => (x._1, x._2._1))
.toBuffer
.sortWith((x, y) => abs(x._2) > abs(y._2))
.toArray
}
def save(writer : BufferedWriter, weights : Array[((String, String), Double)]) = {
val header = new ModelHeader()
header.setModelType("linear")
header.setNumRecords(weights.size)
val headerRecord = new ModelRecord()
headerRecord.setModelHeader(header)
writer.write(Util.encode(headerRecord))
writer.write('\\n')
log.info("Top 50 weights")
for(i <- 0 until weights.size) {
val weight = weights(i)
val (family, name) = weight._1
val wt = weight._2
if (i < 50) {
log.info("%s : %s = %f".format(family, name, wt))
}
val record = new ModelRecord();
record.setFeatureFamily(family)
record.setFeatureName(name)
record.setFeatureWeight(wt)
writer.write(Util.encode(record))
writer.write('\\n')
}
writer.close
}
def trainAndSaveToFile(sc : SparkContext,
input : RDD[Example],
config : Config,
key : String) = {
val weights = train(sc, input, config, key)
val output : String = config.getString(key + ".model_output")
val fileSystem = FileSystem.get(new java.net.URI(output),
new Configuration())
val file = fileSystem.create(new Path(output), true)
val writer = new BufferedWriter(new OutputStreamWriter(file))
save(writer, weights)
}
}
| dmoliveira/aerosolve | training/src/main/scala/com/airbnb/aerosolve/training/LinearRankerTrainer.scala | Scala | apache-2.0 | 20,178 |
package tryp
import shapeless._, poly._, ops.hlist._, ops.coproduct.ExtendBy
trait FlatMapWith[HF <: Poly2, L <: HList, In]
{
type Out <: HList
def apply(l: L, in: In): Out
}
object FlatMapWith
{
def apply[HF <: Poly2, L <: HList, In]
(implicit fm: FlatMapWith[HF, L, In]): Aux[HF, L, In, fm.Out] = fm
type Aux[HF <: Poly2, L <: HList, A, Out0] =
FlatMapWith[HF, L, A] { type Out = Out0 }
implicit def hnilFlatMapWith[HF <: Poly2, In]: Aux[HF, HNil, In, HNil] =
new FlatMapWith[HF, HNil, In] {
type Out = HNil
def apply(l: HNil, in: In): Out = HNil
}
implicit def hlistFlatMapWith
[HF <: Poly2, H, T <: HList, In, OutH <: HList, OutT <: HList]
(implicit
f: Case2.Aux[HF, H, In, OutH],
ft: FlatMapWith.Aux[HF, T, In, OutT],
prepend: Prepend[OutH, OutT]
): Aux[HF, H :: T, In, prepend.Out] =
new FlatMapWith[HF, H :: T, In] {
type Out = prepend.Out
def apply(l: H :: T, in: In): Out =
prepend(f(l.head, in), ft(l.tail, in))
}
}
object anonFlatMapper
extends Poly2
{
implicit def cse[A, B <: HList] = at[A, A => B]((a, f) => f(a))
}
final class FlatMapWithOps[L <: HList](self: L)
{
def flatMapWith[A, F <: Poly2](f: F)(a: A)
(implicit fmw: FlatMapWith[F, L, A]): fmw.Out =
fmw(self, a)
def flatMapAnon[A, B <: HList](f: A => B)
(implicit fmw: FlatMapWith[anonFlatMapper.type, L, A => B]) =
flatMapWith(anonFlatMapper)(f)
}
trait ToFlatMapWithOps
{
implicit def ToFlatMapWithOps[A <: HList](a: A): FlatMapWithOps[A] =
new FlatMapWithOps(a)
}
trait CFlatMapWith[HF <: Poly2, C <: Coproduct, In]
{
type Out <: Coproduct
def apply(l: C, in: In): Out
}
object CFlatMapWith
{
def apply[HF <: Poly2, C <: Coproduct, In]
(implicit mw: CFlatMapWith[HF, C, In]): Aux[HF, C, In, mw.Out] = mw
type Aux[HF <: Poly2, C <: Coproduct, A, Out0] =
CFlatMapWith[HF, C, A] { type Out = Out0 }
implicit def cnilCFlatMapWith[HF <: Poly2, In]: Aux[HF, CNil, In, CNil] =
new CFlatMapWith[HF, CNil, In] {
type Out = CNil
def apply(c: CNil, in: In): Out = c
}
implicit def cpCFlatMapWith
[HF <: Poly2, H, In, OutH <: Coproduct, OutT <: Coproduct, T <: Coproduct]
(implicit
f: Case2.Aux[HF, H, In, OutH],
mt: CFlatMapWith.Aux[HF, T, In, OutT],
extendBy: ExtendBy[OutH, OutT]
): Aux[HF, H :+: T, In, extendBy.Out] =
new CFlatMapWith[HF, H :+: T, In] {
type Out = extendBy.Out
def apply(c: H :+: T, in: In): Out = c match {
case Inl(h) => extendBy.right(f(h, in))
case Inr(t) => extendBy.left(mt(t, in))
}
}
}
final class CFlatMapWithOps[C <: Coproduct](self: C)
{
def flatMapWith[A, F <: Poly2](f: F)(a: A)
(implicit mw: CFlatMapWith[F, C, A]): mw.Out =
mw(self, a)
}
trait ToCFlatMapWithOps
{
implicit def ToCFlatMapWithOps[A <: Coproduct](a: A): CFlatMapWithOps[A] =
new CFlatMapWithOps(a)
}
| tek/pulsar | main/src/lib_ext/shapeless/flat_map_with.scala | Scala | mit | 2,931 |
package im.actor.server.eventbus
import java.util.UUID
import akka.actor._
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings }
import akka.pattern.ask
import akka.util.Timeout
import im.actor.config.ActorConfig
import im.actor.types._
import scala.concurrent.Future
object EventBus {
type EventBusId = String
type DeviceId = Long
sealed trait Client {
def isInternal: Boolean
def isExternal: Boolean
def externalUserId: Option[UserId]
def externalAuthId: Option[AuthId]
def internalActorRef: Option[ActorRef]
}
final case class InternalClient(ref: ActorRef) extends Client {
override def isInternal: Boolean = true
override def isExternal: Boolean = false
override def externalUserId: Option[UserId] = None
override def externalAuthId: Option[AuthId] = None
override def internalActorRef = Some(ref)
}
final case class ExternalClient(userId: UserId, authId: AuthId) extends Client {
override def isInternal: Boolean = false
override def isExternal: Boolean = true
override def externalUserId: Option[UserId] = Some(userId)
override def externalAuthId: Option[AuthId] = Some(authId)
override def internalActorRef = None
}
final case class Message(id: String, client: Client, deviceId: Option[Long], message: Array[Byte])
final case class Disposed(id: String)
final case class Joined(id: String, client: Client, deviceId: Long)
final case class Disconnected(id: String, client: Client, deviceId: Long)
}
final class EventBusExtension(system: ActorSystem) extends Extension {
import EventBus._
import EventBusMessages._
import system.dispatcher
private implicit val askTimeout = Timeout(ActorConfig.defaultTimeout)
private val region = ClusterSharding(system)
.start(
"EventBusMediator",
EventBusMediator.props,
ClusterShardingSettings(system),
EventBusMediator.extractEntityId,
EventBusMediator.extractShardId
)
def create(
clientUserId: UserId,
clientAuthId: AuthId,
timeout: Option[Long],
isOwned: Option[Boolean]
): Future[(EventBusId, DeviceId)] =
create(EventBus.ExternalClient(clientUserId, clientAuthId), timeout, isOwned)
def create(ref: ActorRef, timeout: Option[Long], isOwned: Option[Boolean]): Future[(String, EventBus.DeviceId)] =
create(EventBus.InternalClient(ref), timeout, isOwned)
def create(client: EventBus.Client, timeout: Option[Long], isOwned: Option[Boolean]): Future[(String, EventBus.DeviceId)] = {
val id = UUID.randomUUID().toString
(region ? EventBusEnvelope(id, Create(client, timeout, isOwned))).mapTo[CreateAck] map (ack ⇒ (id, ack.deviceId))
}
def dispose(clientUserId: UserId, clientAuthId: AuthId, id: String): Future[Unit] =
(region ? EventBusEnvelope(id, Dispose(ExternalClient(clientUserId, clientAuthId)))) map (_ ⇒ ())
def post(
client: Client,
id: String,
destinations: Seq[Long],
message: Array[Byte]
): Future[Unit] = (region ? EventBusEnvelope(id, Post(client, destinations, message))) map (_ ⇒ ())
def keepAlive(client: Client, id: String, timeout: Option[Long]): Future[Unit] =
region ? EventBusEnvelope(id, KeepAlive(client, timeout)) map (_ ⇒ ())
def join(client: Client, id: String, timeout: Option[Long]): Future[DeviceId] =
(region ? EventBusEnvelope(id, Join(client, timeout))).mapTo[JoinAck] map (_.deviceId)
def fetchOwner(id: String): Future[DeviceId] =
(region ? EventBusEnvelope(id, FetchInfo)).mapTo[FetchInfoAck] map (_.ownerDeviceId)
}
object EventBusExtension extends ExtensionId[EventBusExtension] with ExtensionIdProvider {
override def createExtension(system: ExtendedActorSystem): EventBusExtension = new EventBusExtension(system)
override def lookup(): ExtensionId[_ <: Extension] = EventBusExtension
} | ljshj/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/eventbus/EventBusExtension.scala | Scala | mit | 3,876 |
package play.api.libs.json
import scala.language.reflectiveCalls
import scala.reflect.macros.Context
import language.experimental.macros
object JsMacroImpl {
def readsImpl[A : c.WeakTypeTag](c: Context) : c.Expr[Reads[A]] = {
import c.universe._
import c.universe.Flag._
val companioned = weakTypeOf[A].typeSymbol
val companionSymbol = companioned.companionSymbol
val companionType = companionSymbol.typeSignature
val libsPkg = Select(Select(Ident(newTermName("play")), "api"), "libs")
val jsonPkg = Select(libsPkg, "json")
val functionalSyntaxPkg = Select(Select(libsPkg, "functional"), "syntax")
val utilPkg = Select(jsonPkg, "util")
val jsPathSelect = Select(jsonPkg, "JsPath")
val readsSelect = Select(jsonPkg, "Reads")
val unliftIdent = Select(functionalSyntaxPkg, "unlift")
val lazyHelperSelect = Select(utilPkg, newTypeName("LazyHelper"))
companionType.declaration(stringToTermName("unapply")) match {
case NoSymbol => c.abort(c.enclosingPosition, "No unapply function found")
case s =>
val unapply = s.asMethod
val unapplyReturnTypes = unapply.returnType match {
case TypeRef(_, _, args) =>
args.head match {
case t @ TypeRef(_, _, Nil) => Some(List(t))
case t @ TypeRef(_, _, args) =>
if(t <:< typeOf[Option[_]]) Some(List(t))
else if(t <:< typeOf[Seq[_]]) Some(List(t))
else if(t <:< typeOf[Set[_]]) Some(List(t))
else if(t <:< typeOf[Map[_,_]]) Some(List(t))
else if(t <:< typeOf[Product]) Some(args)
case _ => None
}
case _ => None
}
//println("Unapply return type:" + unapply.returnType)
companionType.declaration(stringToTermName("apply")) match {
case NoSymbol => c.abort(c.enclosingPosition, "No apply function found")
case s =>
// searches apply method corresponding to unapply
val applies = s.asMethod.alternatives
val apply = applies.collectFirst{
case (apply: MethodSymbol) if(apply.paramss.headOption.map(_.map(_.asTerm.typeSignature)) == unapplyReturnTypes) => apply
}
apply match {
case Some(apply) =>
//println("apply found:" + apply)
val params = apply.paramss.head //verify there is a single parameter group
val inferedImplicits = params.map(_.typeSignature).map{ implType =>
val (isRecursive, tpe) = implType match {
case TypeRef(_, t, args) =>
// Option[_] needs special treatment because we need to use XXXOpt
if(implType.typeConstructor <:< typeOf[Option[_]].typeConstructor)
(args.exists{ a => a.typeSymbol == companioned }, args.head)
else (args.exists{ a => a.typeSymbol == companioned }, implType)
case TypeRef(_, t, _) =>
(false, implType)
}
// builds reads implicit from expected type
val neededImplicitType = appliedType(weakTypeOf[Reads[_]].typeConstructor, tpe::Nil)
// infers implicit
val neededImplicit = c.inferImplicitValue(neededImplicitType)
(implType, neededImplicit, isRecursive, tpe)
}
// if any implicit is missing, abort
// else goes on
inferedImplicits.collect { case (t, impl, rec, _) if(impl == EmptyTree && !rec) => t } match {
case List() =>
val namedImplicits = params.map(_.name).zip(inferedImplicits)
//println("Found implicits:"+namedImplicits)
val helperMember = Select( This(tpnme.EMPTY), "lazyStuff")
var hasRec = false
// combines all reads into CanBuildX
val canBuild = namedImplicits.map {
case (name, (t, impl, rec, tpe)) =>
// inception of (__ \ name).read(impl)
val jspathTree = Apply(
Select( jsPathSelect, scala.reflect.NameTransformer.encode("\\")),
List(Literal(Constant(name.decoded)))
)
if(!rec) {
val readTree =
if(t.typeConstructor <:< typeOf[Option[_]].typeConstructor)
Apply(
Select( jspathTree, "readNullable" ),
List( impl )
)
else Apply(
Select( jspathTree, "read" ),
List( impl )
)
readTree
} else {
hasRec = true
val readTree =
if(t.typeConstructor <:< typeOf[Option[_]].typeConstructor)
Apply(
Select( jspathTree, "readNullable" ),
List(
Apply(
Select(Apply(jsPathSelect, List()), "lazyRead"),
List(helperMember)
)
)
)
else {
Apply(
Select( jspathTree, "lazyRead" ),
if(tpe.typeConstructor <:< typeOf[List[_]].typeConstructor)
List(
Apply(
Select(readsSelect, "list"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Set[_]].typeConstructor)
List(
Apply(
Select(readsSelect, "set"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Seq[_]].typeConstructor)
List(
Apply(
Select(readsSelect, "seq"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Map[_, _]].typeConstructor)
List(
Apply(
Select(readsSelect, "map"),
List(helperMember)
)
)
else List(helperMember)
)
}
readTree
}
}.reduceLeft{ (acc, r) =>
Apply(
Select(acc, "and"),
List(r)
)
}
// builds the final Reads using apply method
val applyMethod =
Function(
params.foldLeft(List[ValDef]())((l, e) =>
l :+ ValDef(Modifiers(PARAM), newTermName(e.name.encoded), TypeTree(), EmptyTree)
),
Apply(
Select(Ident(companionSymbol.name), newTermName("apply")),
params.foldLeft(List[Tree]())((l, e) =>
l :+ Ident(newTermName(e.name.encoded))
)
)
)
val unapplyMethod = Apply(
unliftIdent,
List(
Select( Ident( companionSymbol.name ), unapply.name )
)
)
// if case class has one single field, needs to use inmap instead of canbuild.apply
val finalTree = if(params.length > 1) {
Apply(
Select(canBuild, "apply"),
List(applyMethod)
)
} else {
Apply(
Select(canBuild, "map"),
List(applyMethod)
)
}
//println("finalTree: "+finalTree)
if(!hasRec) {
val block = Block(
Import(functionalSyntaxPkg, List(ImportSelector(nme.WILDCARD, -1, null, -1))),
finalTree
)
//println("block:"+block)
/*val reif = reify(
/*new play.api.libs.json.util.LazyHelper[Format, A] {
override lazy val lazyStuff: Format[A] = null
}*/
)
println("RAW:"+showRaw(reif.tree, printKinds = true))*/
c.Expr[Reads[A]](block)
} else {
val helper = newTermName("helper")
val helperVal = ValDef(
Modifiers(),
helper,
TypeTree(weakTypeOf[play.api.libs.json.util.LazyHelper[Reads, A]]),
Apply(lazyHelperSelect, List(finalTree))
)
val block = Select(
Block(
Import(functionalSyntaxPkg, List(ImportSelector(nme.WILDCARD, -1, null, -1))),
ClassDef(
Modifiers(Flag.FINAL),
newTypeName("$anon"),
List(),
Template(
List(
AppliedTypeTree(
lazyHelperSelect,
List(
Ident(weakTypeOf[Reads[A]].typeSymbol),
Ident(weakTypeOf[A].typeSymbol)
)
)
),
emptyValDef,
List(
DefDef(
Modifiers(),
nme.CONSTRUCTOR,
List(),
List(List()),
TypeTree(),
Block(
Apply(
Select(Super(This(tpnme.EMPTY), tpnme.EMPTY), nme.CONSTRUCTOR),
List()
)
)
),
ValDef(
Modifiers(Flag.OVERRIDE | Flag.LAZY),
newTermName("lazyStuff"),
AppliedTypeTree(Ident(weakTypeOf[Reads[A]].typeSymbol), List(TypeTree(weakTypeOf[A]))),
finalTree
)
)
)
),
Apply(Select(New(Ident(newTypeName("$anon"))), nme.CONSTRUCTOR), List())
),
newTermName("lazyStuff")
)
//println("block:"+block)
c.Expr[Reads[A]](block)
}
case l => c.abort(c.enclosingPosition, s"No implicit Reads for ${l.mkString(", ")} available.")
}
case None => c.abort(c.enclosingPosition, "No apply function found matching unapply return types")
}
}
}
}
def writesImpl[A : c.WeakTypeTag](c: Context) : c.Expr[Writes[A]] = {
import c.universe._
import c.universe.Flag._
val companioned = weakTypeOf[A].typeSymbol
val companionSymbol = companioned.companionSymbol
val companionType = companionSymbol.typeSignature
val libsPkg = Select(Select(Ident(newTermName("play")), "api"), "libs")
val jsonPkg = Select(libsPkg, "json")
val functionalSyntaxPkg = Select(Select(libsPkg, "functional"), "syntax")
val utilPkg = Select(jsonPkg, "util")
val jsPathSelect = Select(jsonPkg, "JsPath")
val writesSelect = Select(jsonPkg, "Writes")
val unliftIdent = Select(functionalSyntaxPkg, "unlift")
val lazyHelperSelect = Select(utilPkg, newTypeName("LazyHelper"))
companionType.declaration(stringToTermName("unapply")) match {
case NoSymbol => c.abort(c.enclosingPosition, "No unapply function found")
case s =>
val unapply = s.asMethod
val unapplyReturnTypes = unapply.returnType match {
case TypeRef(_, _, args) =>
args.head match {
case t @ TypeRef(_, _, Nil) => Some(List(t))
case t @ TypeRef(_, _, args) =>
if(t <:< typeOf[Option[_]]) Some(List(t))
else if(t <:< typeOf[Seq[_]]) Some(List(t))
else if(t <:< typeOf[Set[_]]) Some(List(t))
else if(t <:< typeOf[Map[_,_]]) Some(List(t))
else if(t <:< typeOf[Product]) Some(args)
case _ => None
}
case _ => None
}
//println("Unapply return type:" + unapplyReturnTypes)
companionType.declaration(stringToTermName("apply")) match {
case NoSymbol => c.abort(c.enclosingPosition, "No apply function found")
case s =>
// searches apply method corresponding to unapply
val applies = s.asMethod.alternatives
val apply = applies.collectFirst{
case (apply: MethodSymbol) if(apply.paramss.headOption.map(_.map(_.asTerm.typeSignature)) == unapplyReturnTypes) => apply
}
apply match {
case Some(apply) =>
//println("apply found:" + apply)
val params = apply.paramss.head //verify there is a single parameter group
val inferedImplicits = params.map(_.typeSignature).map{ implType =>
val (isRecursive, tpe) = implType match {
case TypeRef(_, t, args) =>
// Option[_] needs special treatment because we need to use XXXOpt
if(implType.typeConstructor <:< typeOf[Option[_]].typeConstructor)
(args.exists{ a => a.typeSymbol == companioned }, args.head)
else (args.exists{ a => a.typeSymbol == companioned }, implType)
case TypeRef(_, t, _) =>
(false, implType)
}
// builds reads implicit from expected type
val neededImplicitType = appliedType(weakTypeOf[Writes[_]].typeConstructor, tpe::Nil)
// infers implicit
val neededImplicit = c.inferImplicitValue(neededImplicitType)
(implType, neededImplicit, isRecursive, tpe)
}
// if any implicit is missing, abort
// else goes on
inferedImplicits.collect { case (t, impl, rec, _) if(impl == EmptyTree && !rec) => t } match {
case List() =>
val namedImplicits = params.map(_.name).zip(inferedImplicits)
//println("Found implicits:"+namedImplicits)
val helperMember = Select( This(tpnme.EMPTY), "lazyStuff")
var hasRec = false
// combines all reads into CanBuildX
val canBuild = namedImplicits.map {
case (name, (t, impl, rec, tpe)) =>
// inception of (__ \ name).read(impl)
val jspathTree = Apply(
Select( jsPathSelect, scala.reflect.NameTransformer.encode("\\")),
List(Literal(Constant(name.decoded)))
)
if(!rec) {
val writesTree =
if(t.typeConstructor <:< typeOf[Option[_]].typeConstructor)
Apply(
Select( jspathTree, "writeNullable" ),
List( impl )
)
else Apply(
Select( jspathTree, "write" ),
List( impl )
)
writesTree
} else {
hasRec = true
val writesTree =
if(t.typeConstructor <:< typeOf[Option[_]].typeConstructor)
Apply(
Select( jspathTree, "writeNullable" ),
List(
Apply(
Select(Apply(jsPathSelect, List()), "lazyWrite"),
List(helperMember)
)
)
)
else {
Apply(
Select( jspathTree, "lazyWrite" ),
if(tpe.typeConstructor <:< typeOf[List[_]].typeConstructor)
List(
Apply(
Select(writesSelect, "list"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Set[_]].typeConstructor)
List(
Apply(
Select(writesSelect, "set"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Seq[_]].typeConstructor)
List(
Apply(
Select(writesSelect, "seq"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Map[_, _]].typeConstructor)
List(
Apply(
Select(writesSelect, "map"),
List(helperMember)
)
)
else List(helperMember)
)
}
writesTree
}
}.reduceLeft{ (acc, r) =>
Apply(
Select(acc, "and"),
List(r)
)
}
// builds the final Reads using apply method
//val applyMethod = Ident( companionSymbol.name )
val applyMethod =
Function(
params.foldLeft(List[ValDef]())((l, e) =>
l :+ ValDef(Modifiers(PARAM), newTermName(e.name.encoded), TypeTree(), EmptyTree)
),
Apply(
Select(Ident(companionSymbol.name), newTermName("apply")),
params.foldLeft(List[Tree]())((l, e) =>
l :+ Ident(newTermName(e.name.encoded))
)
)
)
val unapplyMethod = Apply(
unliftIdent,
List(
Select( Ident( companionSymbol.name ), unapply.name )
)
)
// if case class has one single field, needs to use inmap instead of canbuild.apply
val finalTree = if(params.length > 1) {
Apply(
Select(canBuild, "apply"),
List(unapplyMethod)
)
} else {
Apply(
Select(canBuild, "contramap"),
List(unapplyMethod)
)
}
//println("finalTree: "+finalTree)
if(!hasRec) {
val block = Block(
Import(functionalSyntaxPkg, List(ImportSelector(nme.WILDCARD, -1, null, -1))),
finalTree
)
//println("block:"+block)
c.Expr[Writes[A]](block)
} else {
val helper = newTermName("helper")
val helperVal = ValDef(
Modifiers(),
helper,
TypeTree(weakTypeOf[play.api.libs.json.util.LazyHelper[Writes, A]]),
Apply(lazyHelperSelect, List(finalTree))
)
val block = Select(
Block(
Import(functionalSyntaxPkg, List(ImportSelector(nme.WILDCARD, -1, null, -1))),
ClassDef(
Modifiers(Flag.FINAL),
newTypeName("$anon"),
List(),
Template(
List(
AppliedTypeTree(
lazyHelperSelect,
List(
Ident(weakTypeOf[Writes[A]].typeSymbol),
Ident(weakTypeOf[A].typeSymbol)
)
)
),
emptyValDef,
List(
DefDef(
Modifiers(),
nme.CONSTRUCTOR,
List(),
List(List()),
TypeTree(),
Block(
Apply(
Select(Super(This(tpnme.EMPTY), tpnme.EMPTY), nme.CONSTRUCTOR),
List()
)
)
),
ValDef(
Modifiers(Flag.OVERRIDE | Flag.LAZY),
newTermName("lazyStuff"),
AppliedTypeTree(Ident(weakTypeOf[Writes[A]].typeSymbol), List(TypeTree(weakTypeOf[A]))),
finalTree
)
)
)
),
Apply(Select(New(Ident(newTypeName("$anon"))), nme.CONSTRUCTOR), List())
),
newTermName("lazyStuff")
)
//println("block:"+block)
/*val reif = reify(
new play.api.libs.json.util.LazyHelper[Format, A] {
override lazy val lazyStuff: Format[A] = null
}
)
//println("RAW:"+showRaw(reif.tree, printKinds = true))*/
c.Expr[Writes[A]](block)
}
case l => c.abort(c.enclosingPosition, s"No implicit Writes for ${l.mkString(", ")} available.")
}
case None => c.abort(c.enclosingPosition, "No apply function found matching unapply parameters")
}
}
}
}
def formatImpl[A : c.WeakTypeTag](c: Context) : c.Expr[Format[A]] = {
import c.universe._
import c.universe.Flag._
val companioned = weakTypeOf[A].typeSymbol
val companionSymbol = companioned.companionSymbol
val companionType = companionSymbol.typeSignature
val libsPkg = Select(Select(Ident(newTermName("play")), "api"), "libs")
val jsonPkg = Select(libsPkg, "json")
val functionalSyntaxPkg = Select(Select(libsPkg, "functional"), "syntax")
val utilPkg = Select(jsonPkg, "util")
val jsPathSelect = Select(jsonPkg, "JsPath")
val readsSelect = Select(jsonPkg, "Reads")
val writesSelect = Select(jsonPkg, "Writes")
val unliftIdent = Select(functionalSyntaxPkg, "unlift")
val lazyHelperSelect = Select(utilPkg, newTypeName("LazyHelper"))
companionType.declaration(stringToTermName("unapply")) match {
case NoSymbol => c.abort(c.enclosingPosition, "No unapply function found")
case s =>
val unapply = s.asMethod
val unapplyReturnTypes = unapply.returnType match {
case TypeRef(_, _, args) =>
args.head match {
case t @ TypeRef(_, _, Nil) => Some(List(t))
case t @ TypeRef(_, _, args) =>
if(t <:< typeOf[Option[_]]) Some(List(t))
else if(t <:< typeOf[Seq[_]]) Some(List(t))
else if(t <:< typeOf[Set[_]]) Some(List(t))
else if(t <:< typeOf[Map[_,_]]) Some(List(t))
else if(t <:< typeOf[Product]) Some(args)
case _ => None
}
case _ => None
}
//println("Unapply return type:" + unapplyReturnTypes)
companionType.declaration(stringToTermName("apply")) match {
case NoSymbol => c.abort(c.enclosingPosition, "No apply function found")
case s =>
// searches apply method corresponding to unapply
val applies = s.asMethod.alternatives
val apply = applies.collectFirst{
case (apply: MethodSymbol) if(apply.paramss.headOption.map(_.map(_.asTerm.typeSignature)) == unapplyReturnTypes) => apply
}
apply match {
case Some(apply) =>
//println("apply found:" + apply)
val params = apply.paramss.head //verify there is a single parameter group
val inferedImplicits = params.map(_.typeSignature).map{ implType =>
val (isRecursive, tpe) = implType match {
case TypeRef(_, t, args) =>
// Option[_] needs special treatment because we need to use XXXOpt
if(implType.typeConstructor <:< typeOf[Option[_]].typeConstructor)
(args.exists{ a => a.typeSymbol == companioned }, args.head)
else (args.exists{ a => a.typeSymbol == companioned }, implType)
case TypeRef(_, t, _) =>
(false, implType)
}
// builds reads implicit from expected type
val neededImplicitType = appliedType(weakTypeOf[Format[_]].typeConstructor, tpe::Nil)
// infers implicit
val neededImplicit = c.inferImplicitValue(neededImplicitType)
(implType, neededImplicit, isRecursive, tpe)
}
// if any implicit is missing, abort
// else goes on
inferedImplicits.collect { case (t, impl, rec, _) if(impl == EmptyTree && !rec) => t } match {
case List() =>
val namedImplicits = params.map(_.name).zip(inferedImplicits)
//println("Found implicits:"+namedImplicits)
val helperMember = Select( This(tpnme.EMPTY), "lazyStuff")
var hasRec = false
// combines all reads into CanBuildX
val canBuild = namedImplicits.map {
case (name, (t, impl, rec, tpe)) =>
// inception of (__ \ name).read(impl)
val jspathTree = Apply(
Select( jsPathSelect, scala.reflect.NameTransformer.encode("\\")),
List(Literal(Constant(name.decoded)))
)
if(!rec) {
val formatTree =
if(t.typeConstructor <:< typeOf[Option[_]].typeConstructor)
Apply(
Select( jspathTree, "formatNullable" ),
List( impl )
)
else Apply(
Select( jspathTree, "format" ),
List( impl )
)
formatTree
} else {
hasRec = true
val formatTree =
if(t.typeConstructor <:< typeOf[Option[_]].typeConstructor)
Apply(
Select( jspathTree, "formatNullable" ),
List(
Apply(
Select(Apply(jsPathSelect, List()), "lazyFormat"),
List(helperMember)
)
)
)
else {
Apply(
Select( jspathTree, "lazyFormat" ),
if(tpe.typeConstructor <:< typeOf[List[_]].typeConstructor)
List(
Apply(
Select(readsSelect, "list"),
List(helperMember)
),
Apply(
Select(writesSelect, "list"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Set[_]].typeConstructor)
List(
Apply(
Select(readsSelect, "set"),
List(helperMember)
),
Apply(
Select(writesSelect, "set"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Seq[_]].typeConstructor)
List(
Apply(
Select(readsSelect, "seq"),
List(helperMember)
),
Apply(
Select(writesSelect, "seq"),
List(helperMember)
)
)
else if(tpe.typeConstructor <:< typeOf[Map[_, _]].typeConstructor)
List(
Apply(
Select(readsSelect, "map"),
List(helperMember)
),
Apply(
Select(writesSelect, "map"),
List(helperMember)
)
)
else List(helperMember)
)
}
formatTree
}
}.reduceLeft{ (acc, r) =>
Apply(
Select(acc, "and"),
List(r)
)
}
// builds the final Reads using apply method
//val applyMethod = Ident( companionSymbol.name )
val applyMethod =
Function(
params.foldLeft(List[ValDef]())((l, e) =>
l :+ ValDef(Modifiers(PARAM), newTermName(e.name.encoded), TypeTree(), EmptyTree)
),
Apply(
Select(Ident(companionSymbol.name), newTermName("apply")),
params.foldLeft(List[Tree]())((l, e) =>
l :+ Ident(newTermName(e.name.encoded))
)
)
)
val unapplyMethod = Apply(
unliftIdent,
List(
Select( Ident( companionSymbol.name ), unapply.name )
)
)
// if case class has one single field, needs to use inmap instead of canbuild.apply
val finalTree = if(params.length > 1) {
Apply(
Select(canBuild, "apply"),
List(applyMethod, unapplyMethod)
)
} else {
Apply(
Select(canBuild, "inmap"),
List(applyMethod, unapplyMethod)
)
}
//println("finalTree: "+finalTree)
if(!hasRec) {
val block = Block(
Import(functionalSyntaxPkg, List(ImportSelector(nme.WILDCARD, -1, null, -1))),
finalTree
)
//println("block:"+block)
c.Expr[Format[A]](block)
} else {
val helper = newTermName("helper")
val helperVal = ValDef(
Modifiers(),
helper,
Ident(weakTypeOf[play.api.libs.json.util.LazyHelper[Format, A]].typeSymbol),
Apply(Ident(newTermName("LazyHelper")), List(finalTree))
)
val block = Select(
Block(
Import(functionalSyntaxPkg, List(ImportSelector(nme.WILDCARD, -1, null, -1))),
ClassDef(
Modifiers(Flag.FINAL),
newTypeName("$anon"),
List(),
Template(
List(
AppliedTypeTree(
lazyHelperSelect,
List(
Ident(weakTypeOf[Format[A]].typeSymbol),
Ident(weakTypeOf[A].typeSymbol)
)
)
),
emptyValDef,
List(
DefDef(
Modifiers(),
nme.CONSTRUCTOR,
List(),
List(List()),
TypeTree(),
Block(
Apply(
Select(Super(This(tpnme.EMPTY), tpnme.EMPTY), nme.CONSTRUCTOR),
List()
)
)
),
ValDef(
Modifiers(Flag.OVERRIDE | Flag.LAZY),
newTermName("lazyStuff"),
AppliedTypeTree(Ident(weakTypeOf[Format[A]].typeSymbol), List(TypeTree(weakTypeOf[A]))),
finalTree
)
)
)
),
Apply(Select(New(Ident(newTypeName("$anon"))), nme.CONSTRUCTOR), List())
),
newTermName("lazyStuff")
)
//println("block:"+block)
/*val reif = reify(
new play.api.libs.json.util.LazyHelper[Format, A] {
override lazy val lazyStuff: Format[A] = null
}
)
//println("RAW:"+showRaw(reif.tree, printKinds = true))*/
c.Expr[Format[A]](block)
}
case l => c.abort(c.enclosingPosition, s"No implicit format for ${l.mkString(", ")} available.")
}
case None => c.abort(c.enclosingPosition, "No apply function found matching unapply parameters")
}
}
}
}
} | noel-yap/setter-for-catan | play-2.1.1/framework/src/play/src/main/scala/play/api/libs/json/JsMacroImpl.scala | Scala | apache-2.0 | 39,435 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.utils.minhash
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
/**
* This object presents several methods for determining approximate pair-wise
* Jaccard similarity through the use of MinHash signatures. A description of
* this algorithm can be found in chapter 3 of:
*
* Rajaraman, Anand, and Jeffrey David Ullman. Mining of massive datasets.
* Cambridge University Press, 2011.
*
* This chapter may be freely (and legally) downloaded from:
*
* http://infolab.stanford.edu/~ullman/mmds/ch3.pdf
*/
object MinHash extends Serializable {
/**
* Implements an exact pair-wise MinHash similarity check. Exact refers to
* "all-pairs", not "similarity"; MinHash signature comparison approximates
* Jaccard similarity, and this method _exactly_ compares all pairs of inputs,
* as opposed to locality sensitive hashing (LSH) based approximations.
*
* @note This operation may be expensive, as it performs a cartesian
* product of all elements in the input RDD.
*
* @tparam T This function will operate on RDDs containing any type T that
* extends the MinHashable trait.
*
* @param rdd The RDD of data points to compute similarity on.
* @param signatureLength The length of MinHash signature to use.
* @param randomSeed An optional seed for random number generation.
* @return Returns an RDD containing all pairs of elements, with their
* similarity, as a tuple of (similarity, (elem1, elem2)).
*/
def exactMinHash[T <: MinHashable](rdd: RDD[T],
signatureLength: Int,
randomSeed: Option[Long] = None): RDD[(Double, (T, T))] = {
// generate signatures
val signedRdd = generateSignatures(rdd, signatureLength, randomSeed)
// cartesian this rdd by itself
val allPairsRdd = signedRdd.cartesian(signedRdd)
// compute estimated jaccard similarity and return
allPairsRdd.map(p => {
val ((sig1, item1), (sig2, item2)) = p
(sig1.similarity(sig2), (item1, item2))
})
}
/**
* Implements an approximate pair-wise MinHash similarity check. Approximate
* refers to "all-pairs", not "similarity"; MinHash signature comparison
* approximates Jaccard similarity. This method uses a locality sensitive
* hashing (LSH) based approach to reduce the number of comparisons required.
*
* We use the LSH technique described in section 3.4.1 of the Ullman text.
* This technique creates _b_ bands which divide the hashing space. For a
* MinHash signature with length _l_, we require b * r = l, where _r_ is
* the number of rows in each band. For given _b_ and _r_, we expect to
* compare all elements with similarity greater than (1/b)^(1/r).
*
* @throws IllegalArgumentException Throws an illegal argument exception if
* the number of bands does not divide
* evenly into the signature length.
*
* @tparam T This function will operate on RDDs containing any type T that
* extends the MinHashable trait.
*
* @param rdd The RDD of data points to compute similarity on.
* @param signatureLength The length of MinHash signature to use.
* @param bands The number of bands to use for LSHing.
* @param randomSeed An optional seed for random number generation.
* @return Returns an RDD containing all pairs of elements, with their
* similarity, as a tuple of (similarity, (elem1, elem2)).
*/
def approximateMinHash[T <: MinHashable](rdd: RDD[T],
signatureLength: Int,
bands: Int,
randomSeed: Option[Long] = None): RDD[(Double, (T, T))] = {
// generate signatures
val signedRdd = generateSignatures(rdd, signatureLength, randomSeed)
// get band length
if (signatureLength % bands != 0) {
throw new IllegalArgumentException("Signature length must divide roundly by the band count.")
}
val bandLength = signatureLength / bands
// replicate all keys into buckets and group by key
val bucketGroups = signedRdd.flatMap(kv => {
val (signature, item) = kv
// split signature into buckets
val buckets = signature.bucket(bandLength)
// copy key per bucket
buckets.map(bucket => (bucket, (signature, item)))
}).groupByKey()
// per bucket, take inner product and compute similarity
bucketGroups.flatMap(kv => {
val (bucket, pairs) = kv
// convert pairs to an array
val pairArray = pairs.toArray
// create list to append to
var l = List[(Double, (T, T))]()
// loop over contents to take inner product
(0 until pairArray.length).foreach(i => {
((i + 1) until pairArray.length).foreach(j => {
val (sig1, item1) = pairArray(i)
val (sig2, item2) = pairArray(j)
// is this the first bucket these elements overlap in? if not, don't
// process to limit the number of dupes emitted
if (MinHashSignature.firstBucket(sig1, sig2, bandLength).exists(_.equals(bucket))) {
// compute similarity and add to list
l = (sig1.similarity(sig2), (item1, item2)) :: l
}
})
})
// return list
l
})
}
/**
* @param randomSeed An optional random seed.
* @return Unpacks the random seed if one is provided, else returns the
* current Linux timestamp.
*/
private def getSeed(randomSeed: Option[Long]): Long = randomSeed match {
case Some(i) => i
case _ => System.currentTimeMillis()
}
/**
* Generates the signatures for an RDD of input elements.
*
* @param rdd The RDD to generate signatures for.
* @param signatureLength The desired signature length.
* @param randomSeed An optional seed for randomization.
* @return Returns an RDD of signature/item pairs.
*/
private def generateSignatures[T <: MinHashable](rdd: RDD[T],
signatureLength: Int,
randomSeed: Option[Long]): RDD[(MinHashSignature, T)] = {
// generate random hash store
val hashStore = HashStore(signatureLength, getSeed(randomSeed))
// key each item by its signature
rdd.keyBy(_.minHash(hashStore))
}
}
| tdanford/bdg-utils | utils-minhash/src/main/scala/org/bdgenomics/utils/minhash/MinHash.scala | Scala | apache-2.0 | 7,267 |
package com.nitin.nizhawan.decompiler.structures.constantpool
import com.nitin.nizhawan.decompiler.main.ByteReader
/**
* Created by nitin on 13/12/15.
*/
class ConstantPool(br:ByteReader,size:Int){
val poolEntries = new Array[ConstPoolEntry](size)
poolEntries(0) = new UnknownConstPoolEntry(0,br,this)
for(i <- 1 until size){
poolEntries(i) = ConstPoolEntry(br,this)
}
def str(idx:Int) = poolEntries(idx).info
override def toString = {
poolEntries.tail.map(_.toString).mkString("\\n")
}
} | nitin-nizhawan/jedi | src/com/nitin/nizhawan/decompiler/structures/constantpool/ConstantPool.scala | Scala | artistic-2.0 | 513 |
/*
* Copyright 2015 – 2016 Martin Seeler
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.oanda.orders
import cats.data.Xor
import io.circe.DecodingFailure
import io.circe.parser._
import org.scalatest._
import rx.oanda.utils.Buy
class OrderDecoderSpec extends FlatSpec with Matchers {
val json =
"""
|{
| "id": 43211,
| "instrument": "EUR_USD",
| "units": 5,
| "side": "buy",
| "type": "limit",
| "time": "1453326442000000",
| "price": 1.45123,
| "takeProfit": 1.7,
| "stopLoss": 1.4,
| "expiry": "1453330035000000",
| "upperBound": 0,
| "lowerBound": 0,
| "trailingStop": 10
|}
""".stripMargin
val ordersJson =
"""
|{
| "orders" : [
| {
| "id" : 175427639,
| "instrument" : "EUR_USD",
| "units" : 20,
| "side" : "buy",
| "type" : "marketIfTouched",
| "time" : 1453326442000000,
| "price" : 1,
| "takeProfit" : 0,
| "stopLoss" : 0,
| "expiry" : 1453330035000000,
| "upperBound" : 0,
| "lowerBound" : 0,
| "trailingStop" : 0
| },
| {
| "id" : 175427637,
| "instrument" : "EUR_USD",
| "units" : 10,
| "side" : "sell",
| "type" : "marketIfTouched",
| "time" : 1453326442000000,
| "price" : 1,
| "takeProfit" : 0,
| "stopLoss" : 0,
| "expiry" : 1453330035000000,
| "upperBound" : 0,
| "lowerBound" : 0,
| "trailingStop" : 0
| }
| ]
|}
""".stripMargin
behavior of "The Order Decoder"
it must "parse an order from valid json" in {
val json =
"""
|{
| "id": 43211,
| "instrument": "EUR_USD",
| "units": 5,
| "side": "buy",
| "type": "limit",
| "time": "1453326442000000",
| "price": 1.45123,
| "takeProfit": 1.7,
| "stopLoss": 1.4,
| "expiry": "1453330035000000",
| "upperBound": 0,
| "lowerBound": 0,
| "trailingStop": 10
|}
""".stripMargin
decode[Order](json) should matchPattern {
case Xor.Right(Order(43211, "EUR_USD", 5, Buy, "limit", 1453326442000000L, 1.45123, 1.7, 1.4, 1453330035000000L, 0, 0, 10)) ⇒
}
}
it must "fail on missing property" in {
val json =
"""
|{
| "id": 43211,
| "instrument": "EUR_USD",
| "units": 5,
| "side": "buy",
| "type": "limit",
| "price": 1.45123,
| "takeProfit": 1.7,
| "stopLoss": 1.4,
| "expiry": "1453330035000000",
| "upperBound": 0,
| "lowerBound": 0,
| "trailingStop": 10
|}
""".stripMargin
decode[Order](json) should matchPattern {
case Xor.Left(e: DecodingFailure) ⇒ //...
}
}
it must "parse the orders list from valid json" in {
val json =
"""
|{
| "orders" : [
| {
| "id" : 175427639,
| "instrument" : "EUR_USD",
| "units" : 20,
| "side" : "buy",
| "type" : "marketIfTouched",
| "time" : 1453326442000000,
| "price" : 1,
| "takeProfit" : 0,
| "stopLoss" : 0,
| "expiry" : 1453330035000000,
| "upperBound" : 0,
| "lowerBound" : 0,
| "trailingStop" : 0
| },
| {
| "id": 43211,
| "instrument": "EUR_CAD",
| "units": 5,
| "side": "buy",
| "type": "limit",
| "time": "1453326442000000",
| "price": 1.45123,
| "takeProfit": 1.7,
| "stopLoss": 1.4,
| "expiry": "1453330035000000",
| "upperBound": 0,
| "lowerBound": 0,
| "trailingStop": 10
| }
| ]
|}
""".stripMargin
decode[Vector[Order]](json) should matchPattern {
case Xor.Right(Vector(
Order(175427639, "EUR_USD", 20, Buy, "marketIfTouched", 1453326442000000L, 1.0, 0, 0, 1453330035000000L, 0, 0, 0),
Order(43211, "EUR_CAD", 5, Buy, "limit", 1453326442000000L, 1.45123, 1.7, 1.4, 1453330035000000L, 0, 0, 10)
)) ⇒ //...
}
}
}
| MartinSeeler/rx-oanda | src/test/scala/rx/oanda/orders/OrderDecoderSpec.scala | Scala | apache-2.0 | 4,850 |
package org.amcgala.agent
import org.amcgala.agent.World.{ CellWithIndex, InformationObject }
import org.amcgala.agent.Agent.{ OwnerPheromone, Pheromone, AgentStates }
import akka.actor.{ PoisonPill, ActorRef }
/**
* Ein WorldContraintsChecker überprüft während der Simulation die Aktionen der Agenten und kann entscheiden, ob eine Aktion in
* der Simulation von einem Agenten ausgeführt werden kann.
* Hierfür überprüft die Simulation die Anfrage des Agenten mithilfe des ConstraintsChecker und führt eine Aktion aus, kann diese
* aber auch zurückweisen.
*/
trait WorldConstraintsChecker {
/**
* Überprüft, ob der Agent vom aktuellen Feld auf das Zielfeld gehen kann.
* @param current das aktuelle Feld, auf dem der Agent steht
* @param target das Zielfeld
* @return true, wenn der Agent auf das Feld wechseln kann
*/
def checkMove(requester: ActorRef, current: CellWithIndex, target: CellWithIndex, states: List[AgentStates]): Boolean
/**
* Überprüft, ob der Agent ein Pheromon ausschütten darf.
* @param agent Der Zustand des Agenten
* @param pheromone das Pheromon, das der Agent ausschütten möchte
* @return true, wenn der Agent das Pheromon abgeben darf
*/
def checkPheromone(agent: AgentStates, pheromone: Pheromone): Boolean
/**
* Überprüft, ob der Agent den Wert eines Feldes verändern darf.
* @param currentValue der aktuelle Wert des Feldes
* @param newValue der neue Wert, auf den der Agent den Wert ändern möchte
* @return true, wenn der Agent den Wert verändern darf
*/
def checkValueChange(currentValue: Double, newValue: Double): Boolean
/**
* Prueft, ob ein [[org.amcgala.agent.World.InformationObject]] der Zelle hinzugefuegt werden darf.
* @param agent
* @param informationObject
* @return
*/
def checkInformationObject(agent: AgentStates, informationObject: InformationObject): Boolean
}
/**
* Standardbeschraenkungen der Simulation.
*
*/
class RaindropConstraints extends WorldConstraintsChecker {
def checkMove(requester: ActorRef, current: CellWithIndex, target: CellWithIndex, states: List[AgentStates]): Boolean = {
if (states.exists(entry ⇒ entry.position == target.index)) {
requester ! PoisonPill
false
} else {
val dx = math.abs(current.index.x - target.index.x)
val dy = math.abs(current.index.y - target.index.y)
if (dx > 1 || dy > 1) {
requester ! PoisonPill
return false
}
true
}
}
def checkPheromone(agent: AgentStates, pheromone: Pheromone): Boolean = pheromone match {
case owner: OwnerPheromone ⇒ agent.id == owner.id
case _ ⇒ true
}
def checkValueChange(oldValue: Double, newValue: Double): Boolean = newValue >= 0 && newValue <= 1
def checkInformationObject(agent: AgentStates, informationObject: InformationObject): Boolean = true
}
class DefaultConstraints extends WorldConstraintsChecker {
/**
* Überprüft, ob der Agent vom aktuellen Feld auf das Zielfeld gehen kann.
* @param current das aktuelle Feld, auf dem der Agent steht
* @param target das Zielfeld
* @return true, wenn der Agent auf das Feld wechseln kann
*/
def checkMove(requester: ActorRef, current: CellWithIndex, target: CellWithIndex, states: List[AgentStates]): Boolean = true
/**
* Überprüft, ob der Agent ein Pheromon ausschütten darf.
* @param agent Der Zustand des Agenten
* @param pheromone das Pheromon, das der Agent ausschütten möchte
* @return true, wenn der Agent das Pheromon abgeben darf
*/
def checkPheromone(agent: AgentStates, pheromone: Pheromone): Boolean = true
/**
* Überprüft, ob der Agent den Wert eines Feldes verändern darf.
* @param currentValue der aktuelle Wert des Feldes
* @param newValue der neue Wert, auf den der Agent den Wert ändern möchte
* @return true, wenn der Agent den Wert verändern darf
*/
def checkValueChange(currentValue: Double, newValue: Double): Boolean = true
/**
* Prueft, ob ein [[org.amcgala.agent.World.InformationObject]] der Zelle hinzugefuegt werden darf.
* @param agent
* @param informationObject
* @return
*/
def checkInformationObject(agent: Agent.AgentStates, informationObject: World.InformationObject): Boolean = informationObject match {
case o: Base.type ⇒
println("Es ist nicht erlaubt ein Objekt dieses Typs zu platzieren!")
false
case _ ⇒ true
}
}
| th-koeln/amcgala | src/main/scala/org/amcgala/agent/WorldConstraints.scala | Scala | apache-2.0 | 4,522 |
// scalac: -feature -language:implicitConversions -language:higherKinds -language:-implicitConversions -Xfatal-warnings
// showing that multiple settings are respected, and explicit enablement has precedence
class X {
def hk[M[_]] = ???
implicit def imp(x: X): Int = x.hashCode
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t8736.scala | Scala | apache-2.0 | 285 |
{
def f = {}
}
/* resolved: false */ f
| LPTK/intellij-scala | testdata/resolve2/scope/InnerBlock.scala | Scala | apache-2.0 | 41 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.cfclerk.services
import com.normation.cfclerk.domain.TechniqueId
import com.normation.eventlog.EventActor
/**
* A trait that allows its implementation to get notification
* about Reference Policy Template Library update.
*
* The implementation must be registered to a TechniqueRepository
* that allows such notification to be shared.
*
*/
trait TechniquesLibraryUpdateNotification {
/**
* A name to identify that callback
*/
def name:String
/**
* That method will be called when techniques are updated.
* TODO: perhaps we want something more useful as a return type.
*
* Description is a log description to explain why techniques should be updated
* (user action, commit, etc).
*/
def updatedTechniques(TechniqueIds:Seq[TechniqueId], actor: EventActor, reason: Option[String]) : Unit
} | fanf/cf-clerk | src/main/scala/com/normation/cfclerk/services/TechniqueLibraryUpdateNotification.scala | Scala | agpl-3.0 | 2,537 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.service
import com.waz.content.{AssetsStorage, ConversationStorage, MembersStorage, UsersStorage}
import com.waz.model._
import com.waz.service.conversation.ConversationsUiService
import com.waz.service.messages.MessagesService
import com.waz.specs.AndroidFreeSpec
import com.waz.sync.client.IntegrationsClient
import com.waz.sync.{SyncRequestService, SyncResult, SyncServiceHandle}
import com.waz.threading.{CancellableFuture, Threading}
import scala.concurrent.Future
class IntegrationsServiceSpec extends AndroidFreeSpec {
implicit val ctx = Threading.Background
val sync = mock[SyncServiceHandle]
val client = mock[IntegrationsClient]
val teamId = TeamId()
val srs = mock[SyncRequestService]
val assets = mock[AssetsStorage]
val users = mock[UsersStorage]
val members = mock[MembersStorage]
val messages = mock[MessagesService]
val convsUi = mock[ConversationsUiService]
val convs = mock[ConversationStorage]
lazy val service = new IntegrationsServiceImpl(account1Id, Some(teamId), client, assets, sync, users, members, messages, convs, convsUi, srs)
scenario("Previously downloaded AssetData are not recreated if found in database") {
val asset1 = AssetData(id = AssetId("asset-1"), remoteId = Some(RAssetId("asset-1")))
val asset1Copy = AssetData(id = AssetId("asset-1-copy"), remoteId = Some(RAssetId("asset-1")))
val asset2 = AssetData(id = AssetId("asset-2"), remoteId = Some(RAssetId("asset-2")))
val service1 = IntegrationData(id = IntegrationId("service-1"), provider = ProviderId(""), asset = Some(asset1Copy.id))
val service2 = IntegrationData(id = IntegrationId("service-2"), provider = ProviderId(""), asset = Some(asset2.id))
val beResponse = Map(
service1 -> Some(asset1Copy), //get some integration a second time from the backend
service2 -> Some(asset2)
)
val fromDatabase = Set(asset1)
(client.searchTeamIntegrations _).expects(None, *).returning(CancellableFuture.successful(Right(beResponse)))
(assets.findByRemoteIds _).expects(Set(asset1Copy.remoteId.get, asset2.remoteId.get)).returning(Future.successful(fromDatabase))
(assets.insertAll _).expects(Set(asset2)).returning(Future.successful(Set()))
val res = result(service.searchIntegrations())
res.right.get.find(_.id == service1.id).get shouldEqual service1.copy(asset = Some(asset1.id))
res.right.get.find(_.id == service2.id).get shouldEqual service2.copy(asset = Some(asset2.id))
}
scenario("create new conversation with service if none exists") {
val pId = ProviderId("provider-id")
val serviceId = IntegrationId("service-id")
val createConvSyncId = SyncId("create-conv")
val addedBotSyncId = SyncId("added-bot")
val createdConv = ConversationData(ConvId("created-conv-id"))
val serviceUserId = UserId("service-user")
(users.findUsersForService _).expects(serviceId).returning(Future.successful(Set.empty))
(convsUi.createGroupConversation _).expects(Option.empty[Name], Set.empty[UserId], false, 0).returning(Future.successful(createdConv, createConvSyncId))
(srs.await (_: SyncId)).expects(*).twice().returning(Future.successful(SyncResult.Success))
(sync.postAddBot _).expects(createdConv.id, pId, serviceId).returning(Future.successful(addedBotSyncId))
(members.getActiveUsers _).expects(createdConv.id).returning(Future.successful(Seq(account1Id, serviceUserId)))
(messages.addConnectRequestMessage _).expects(createdConv.id, account1Id, serviceUserId, "", Name.Empty, true).returning(Future.successful(null))
result(service.getOrCreateConvWithService(pId, serviceId)) shouldEqual Right(createdConv.id)
}
scenario("Opening conversation with service does not return group conversations in which that service is located, but creates a new conv") {
val pId = ProviderId("provider-id")
val serviceId = IntegrationId("service-id")
val serviceUserInGroupId = UserId("service-user-in-group")
val serviceUserInGroup = UserData(serviceUserInGroupId, name = Name("service"), searchKey = SearchKey.simple("service"), providerId = Some(pId), integrationId = Some(serviceId))
val groupConvId = ConvId("group-conv")
val membersInGroupConv = Set(
ConversationMemberData(account1Id, groupConvId),
ConversationMemberData(serviceUserInGroupId, groupConvId),
ConversationMemberData(UserId("some other user"), groupConvId)
)
val createConvSyncId = SyncId("create-conv")
val addedBotSyncId = SyncId("added-bot")
val createdConv = ConversationData(ConvId("created-conv-id"))
val serviceUserId = UserId("service-user")
(users.findUsersForService _).expects(serviceId).returning(Future.successful(Set(serviceUserInGroup)))
(members.getByUsers _).expects(Set(serviceUserInGroupId)).returning(Future.successful(membersInGroupConv.filter(_.userId == serviceUserInGroupId).toIndexedSeq))
(members.getByConvs _).expects(Set(groupConvId)).returning(Future.successful(membersInGroupConv.toIndexedSeq))
(convs.getAll _).expects(*).onCall { (ids: Traversable[ConvId]) =>
if (ids.nonEmpty) fail("Should be no matching conversations")
Future.successful(Seq.empty)
}
(convsUi.createGroupConversation _).expects(Option.empty[Name], Set.empty[UserId], false, 0).returning(Future.successful(createdConv, createConvSyncId))
(srs.await (_: SyncId)).expects(*).twice().returning(Future.successful(SyncResult.Success))
(sync.postAddBot _).expects(createdConv.id, pId, serviceId).returning(Future.successful(addedBotSyncId))
(members.getActiveUsers _).expects(createdConv.id).returning(Future.successful(Seq(account1Id, serviceUserId)))
(messages.addConnectRequestMessage _).expects(createdConv.id, account1Id, serviceUserId, "", Name.Empty, true).returning(Future.successful(null))
result(service.getOrCreateConvWithService(pId, serviceId)) shouldEqual Right(createdConv.id)
}
scenario("Open previous conversation with service if one exists") {
val pId = ProviderId("provider-id")
val serviceId = IntegrationId("service-id")
val serviceUserId = UserId("service-user-in-group")
val serviceUser = UserData(serviceUserId, name = Name("service"), searchKey = SearchKey.simple("service"), providerId = Some(pId), integrationId = Some(serviceId))
val existingConvId = ConvId("existing-conv")
val existingConv = ConversationData(existingConvId, team = Some(teamId), name = None)
val membersInGroupConv = Set(
ConversationMemberData(account1Id, existingConvId),
ConversationMemberData(serviceUserId, existingConvId)
)
(users.findUsersForService _).expects(serviceId).returning(Future.successful(Set(serviceUser)))
(members.getByUsers _).expects(Set(serviceUserId)).returning(Future.successful(membersInGroupConv.filter(_.userId == serviceUserId).toIndexedSeq))
(members.getByConvs _).expects(Set(existingConvId)).returning(Future.successful(membersInGroupConv.toIndexedSeq))
(convs.getAll _).expects(*).onCall { (ids: Traversable[ConvId]) =>
if (ids.isEmpty) fail("Should be 1 matching conversations")
Future.successful(Seq(Some(existingConv)))
}
result(service.getOrCreateConvWithService(pId, serviceId)) shouldEqual Right(existingConv.id)
}
} | wireapp/wire-android-sync-engine | zmessaging/src/test/scala/com/waz/service/IntegrationsServiceSpec.scala | Scala | gpl-3.0 | 8,047 |
//
// EventCounter.scala -- Scala object EventCounter
// Project OrcScala
//
// Created by jthywiss on Jul 18, 2017.
//
// Copyright (c) 2019 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.util
import java.io.OutputStreamWriter
/** Rudimentary event counting facility.
*
* Each thread accumulates counts in a thread-local counting map. At JVM
* shutdown, the count maps are dumped to stdout.
*
* Counts are accumulated per "event type"s, which are [[scala.Symbol]]s,
* using the `count` method.
*
* @author jthywiss
*/
object EventCounter {
/* Because of aggressive inlining, changing this flag requires a clean rebuild */
final val counterOn = false
private final val eventCountsTL = if (counterOn) new ThreadLocal[EventCounts]() {
override protected def initialValue = new EventCounts(Thread.currentThread().getId)
}
else null
@inline
def count[A](eventType: Symbol) = {
if (counterOn) {
eventCountsTL.get().add(eventType, 1L)
}
}
@inline
private final class EventCounts(val javaThreadId: Long) {
final val countMap = new scala.collection.mutable.HashMap[Symbol, Array[Long]]
@inline
def add(eventType: Symbol, intervalCount: Long) {
val accums = countMap.get(eventType)
if (accums.isDefined) {
accums.get(0) += intervalCount
} else {
val newAccums = new Array[Long](1)
newAccums(0) = intervalCount
countMap.put(eventType, newAccums)
}
}
EventCountDumpThread.register(this)
}
private object EventCountDumpThread extends ShutdownHook("EventCountDumpThread") {
private val accums = scala.collection.mutable.Set[EventCounts]()
override def run = synchronized {
val sumMap = new scala.collection.mutable.HashMap[Symbol, Array[Long]]
var eventTypeColWidth = 14
for (ec <- accums) {
for (e <- ec.countMap) {
val sums = sumMap.getOrElseUpdate(e._1, Array(0L))
sums(0) += e._2(0)
if (e._1.name.length > eventTypeColWidth) eventTypeColWidth = e._1.name.length
}
}
/* Convention: synchronize on System.err during output of block */
System.err synchronized {
System.err.append(s"Event Counters: begin, ${sumMap.size} entries\\n")
System.err.append("Event-Type".padTo(eventTypeColWidth, '-'))
System.err.append("\\t-------Count--------\\n")
for (e <- sumMap) {
System.err.append(e._1.name.padTo(eventTypeColWidth, ' '))
System.err.append(f"\\t${e._2(0)}%20d\\n")
}
System.err.append(f"Event Counters: end\\n")
}
val csvOut = ExecutionLogOutputStream("eventCount", "csv", "Event count output file")
if (csvOut.isDefined) {
val eventCountCsv = new OutputStreamWriter(csvOut.get, "UTF-8")
val csvWriter = new CsvWriter(eventCountCsv.append(_))
val tableColumnTitles = Seq("Event Type", "Count")
csvWriter.writeHeader(tableColumnTitles)
val rows = sumMap.map(e => ((e._1.name, e._2(0))))
csvWriter.writeRows(rows)
eventCountCsv.close()
}
}
def register(ec: EventCounts) = synchronized {
accums += ec
}
}
if (counterOn) Runtime.getRuntime().addShutdownHook(EventCountDumpThread)
}
| orc-lang/orc | OrcScala/src/orc/util/EventCounter.scala | Scala | bsd-3-clause | 3,508 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Matthew Saltz, John Miller, Ayushi Jain
* @version 1.3
* @date Thu Jul 25 11:28:31 EDT 2013
* @see LICENSE (MIT style license file).
*/
package scalation.graphalytics.mutable
import scala.collection.mutable.Map
import scala.collection.mutable.{Set => SET}
import scala.reflect.ClassTag
import scalation.graphalytics.mutable.{ExampleMGraphD => EX_GRAPH}
import scalation.util.time
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MDualIso` class provides an implementation for Subgraph Isomorphism
* that uses Dual Graph Simulation for pruning.
* @param g the data graph G(V, E, l)
* @param q the query graph Q(U, D, k)
*/
class MDualIso [TLabel: ClassTag] (g: MGraph [TLabel], q: MGraph [TLabel])
extends GraphMatcher (g, q)
{
private val duals = new MDualSim (g, q) // object for Dual Simulation algorithm
private var t0 = 0.0 // start time for timer
private var matches = SET [Array [SET [Int]]] () // initialize matches to empty
private var noBijections = true // no results yet
private var limit = 1000000 // limit on number of matches
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set an upper bound on the number matches to allow before quitting.
* @param _limit the number of matches before quitting
*/
def setLimit (_limit: Int) { limit = _limit }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply the Dual Subgraph Isomorphism algorithm to find subgraphs of data
* graph 'g' that isomorphically match query graph 'q'. These are represented
* by a set of single-valued bijections {'psi'} where each 'psi' function
* maps each query graph vertex 'u' to a data graph vertices 'v'.
*/
override def bijections (): SET [Array [Int]] =
{
matches = SET [Array [SET [Int]]] () // initialize matches to empty
val phi = duals.feasibleMates () // initial mappings from label match
saltzDualIso (duals.saltzDualSim (phi), 0) // recursively find all bijections
val psi = simplify (matches) // pull bijections out matches
noBijections = false // results now available
psi // return the set of bijections
} // bijections
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply the Dual Subgraph Isomorphism pattern matching algorithm to find
* the mappings from the query graph 'q' to the data graph 'g'. These are
* represented by a multi-valued function 'phi' that maps each query graph
* vertex 'u' to a set of data graph vertices '{v}'.
*/
def mappings (): Array [SET [Int]] =
{
var psi: SET [Array [Int]] = null // mappings from Dual Simulation
if (noBijections) psi = bijections () // if no results, create them
merge (psi) // merge bijections to create mappings
} // mappings
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the count of the number of matches.
*/
def numMatches (): Int = matches.size
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Refine the mappings 'phi' using the Dual Subgraph Isomorphism algorithm.
* Enumerate bijections by using an Ullmann-like recursion that uses Dual
* Graph Simulation for pruning.
* @param phi array of mappings from a query vertex u_q to { graph vertices v_g }
* @param depth the depth of recursion
*/
private def saltzDualIso (phi: Array [SET [Int]], depth: Int)
{
if (depth == q.size) {
if (! phi.isEmpty) {
matches += phi
if (matches.size % CHECK == 0) println ("dualIso: matches so far = " + matches.size)
} // if
} else if (! phi.isEmpty) {
for (i <- phi (depth) if (! contains (phi, depth, i))) {
val phiCopy = phi.map (x => x) // make a copy of phi
phiCopy (depth) = SET [Int] (i) // isolate vertex i
if (matches.size >= limit) return // quit if at LIMIT
saltzDualIso (duals.saltzDualSim (phiCopy), depth + 1) // solve recursively for the next depth
} // for
} // if
} // saltzDualIso
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether vertex 'j' is contained in any 'phi(i)' for the previous depths.
* @param phi array of mappings from a query vertex u_q to { graph vertices v_g }
* @param depth the current depth of recursion
* @param j the vertex j to check
*/
private def contains (phi: Array [SET [Int]], depth: Int, j: Int): Boolean =
{
for (i <- 0 until depth if phi(i) contains j) return true
false
} // contains
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create an array to hold matches for each vertex 'u' in the query graph
* 'q' and initialize it to contain all empty sets. Then for each bijection,
* add each element of the bijection to its corresponding match set.
* @param psi the set of bijections
*/
private def merge (psi: SET [Array [Int]]): Array [SET [Int]] =
{
val matches = Array.ofDim [SET [Int]] (q.size).map (_ => SET [Int] ())
for (b <- bijections; i <- b.indices) matches(i) += b(i)
matches
} // merge
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Pull the bijections out of the complete match set.
* @param matches the complete match set embedding all bijections
*/
private def simplify (matches: SET [Array [SET [Int]]]): SET [Array [Int]] =
{
matches.map (m => m.map (set => set.iterator.next))
} // simplify
} // MDualIso class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MDualIsoTest` object is used to test the `MDualIso` class.
* > run-main scalation.graphalytics.mutable.DualIsoTest
*/
object MDualIsoTest extends App
{
val g = EX_GRAPH.g1
val q = EX_GRAPH.q1
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
val matcher = new MDualIso (g, q) // Dual Subgraph Isomorphism Pattern Matcher
val psi = time { matcher.bijections () } // time the matcher
println ("Number of Matches: " + matcher.numMatches)
for (p <- psi) println (s"psi = ${p.deep}")
} // MDualIsoTest
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MDualIsoTest2` object is used to test the `MDualIso` class.
* > run-main scalation.graphalytics.mutable.DualIsoTest2
*/
object MDualIsoTest2 extends App
{
val g = EX_GRAPH.g2
val q = EX_GRAPH.q2
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
val matcher = new MDualIso (g, q) // Dual Subgraph Isomorphism Pattern Matcher
val psi = time { matcher.bijections () } // time the matcher
println ("Number of Matches: " + matcher.numMatches)
for (p <- psi) println (s"psi = ${p.deep}")
} // MDualIsoTest2
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MDualIsoTest3` object is used to test the `MDualIso` class.
* > run-main scalation.graphalytics.mutable.DualIsoTest3
*/
object MDualIsoTest3 extends App
{
val mgGen = new MGraphGen [Double]
val gSize = 1000 // size of the data graph
val qSize = 10 // size of the query graph
val nLabels = 100 // number of distinct vertex labels
val eLabels = 10 // number of distinct edge labels
val gAvDegree = 5 // average vertex out degree for data graph
val qAvDegree = 2 // average vertex out degree for query graph
val g = mgGen.genRandomGraph (gSize, nLabels, eLabels, gAvDegree, false, "g")
val q = mgGen.genBFSQuery (qSize, qAvDegree, g, false, "q")
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
val matcher = new MDualIso (g, q) // Dual Subgraph Isomorphism Pattern Matcher
val psi = time { matcher.bijections () } // time the matcher
println ("Number of Matches: " + matcher.numMatches)
for (p <- psi) println (s"psi = ${p.deep}")
} // MDualIsoTest3
| NBKlepp/fda | scalation_1.3/scalation_modeling/src/main/scala/scalation/graphalytics/mutable/MDualIso.scala | Scala | mit | 9,037 |
package com.avsystem.scex
package compiler
import java.lang.annotation.RetentionPolicy
import com.avsystem.scex.compiler.ScexCompiler.CompilationFailedException
import com.avsystem.scex.compiler.TestUtils.CustomBooleanConversionRoot
import com.avsystem.scex.util.{PredefinedAccessSpecs, SimpleContext}
import org.scalatest.FunSuite
/**
* Created: 04-04-2014
* Author: ghik
*/
class LiteralExpressionsTest extends FunSuite with CompilationTest {
import com.avsystem.scex.validation.SymbolValidator._
test("string literal test") {
assert("trololo dafuq" == evaluateTemplate[String]("trololo dafuq"))
}
test("string literal with money test") {
assert("hajs$hajs" == evaluateTemplate[String]("hajs$$hajs"))
}
test("string literal with fancy characters test") {
assert(raw"""trololo "" \\" \\\\ '' dafuq\\n""" == evaluateTemplate[String](raw"""trololo "" \\" \\\\ '' dafuq\\n"""))
}
test("boolean literal test") {
assert(evaluateTemplate[Boolean]("true"))
}
test("boolean literal test with surrounding whitespaces") {
intercept[CompilationFailedException] {
evaluateTemplate[Boolean](" true ")
}
}
test("invalid boolean literal test") {
intercept[CompilationFailedException] {
evaluateTemplate[Boolean]("hueheuahueh")
}
}
test("invalid boolean literal test - expression as literal") {
intercept[CompilationFailedException] {
evaluateTemplate[Boolean]("true && false")
}
}
test("enum literal test") {
assert(RetentionPolicy.RUNTIME == evaluateTemplate[RetentionPolicy]("RUNTIME"))
}
test("bad enum literal test 1") {
intercept[CompilationFailedException] {
evaluateTemplate[RetentionPolicy](" RUNTIME ")
}
}
test("bad enum literal test 2") {
intercept[CompilationFailedException] {
evaluateTemplate[RetentionPolicy]("HUEHUE")
}
}
test("no literal conversion found test") {
intercept[CompilationFailedException] {
evaluateTemplate[List[String]]("hueheuehuaheh")
}
}
test("custom literal conversion test") {
val acl = PredefinedAccessSpecs.basicOperations ++ allow {
TestUtils.all.introduced.members
}
val header = "import com.avsystem.scex.compiler.TestUtils.zeroOneLiteralToBoolean"
assert(true == evaluateTemplate[Boolean]("1", acl, header))
}
test("disallowed custom literal conversion test") {
intercept[CompilationFailedException] {
val header = "import com.avsystem.scex.compiler.TestUtils.zeroOneLiteralToBoolean"
evaluateTemplate[Boolean]("1", header = header)
}
}
test("input dependent custom literal conversion test") {
val acl = PredefinedAccessSpecs.basicOperations ++ allow {
on { r: CustomBooleanConversionRoot =>
r.all.introduced.members
}
}
val cexpr = compiler.getCompiledExpression[SimpleContext[CustomBooleanConversionRoot], Boolean](
createProfile(acl), "TRÓ", template = true, header = "")
assert(cexpr(SimpleContext(new CustomBooleanConversionRoot("ZUO", "TRÓ"))))
}
test("java inner enum test") {
val acl = allow {
on { ei: EnumInside =>
ei.all.members
}
}
assert(EnumInside.TheEnum.THIS == evaluateTemplate[EnumInside.TheEnum]("THIS", acl))
}
}
| AVSystem/scex | scex-core/src/test/scala/com/avsystem/scex/compiler/LiteralExpressionsTest.scala | Scala | mit | 3,268 |
package debop4s.timeperiod.timerange
import java.util
import debop4s.timeperiod.TimeSpec._
import debop4s.timeperiod._
import debop4s.timeperiod.utils.Times
import org.joda.time.DateTime
import scala.beans.BeanProperty
import scala.collection.SeqView
/**
* debop4s.timeperiod.timerange.HalfyearTimeRange
* @author 배성혁 sunghyouk.bae@gmail.com
* @since 2013. 12. 29. 오후 5:49
*/
class HalfyearTimeRange(@BeanProperty val year: Int,
@BeanProperty val halfyear: Halfyear,
@BeanProperty val halfyearCount: Int,
private[this] val _calendar: ITimeCalendar = DefaultTimeCalendar)
extends CalendarTimeRange(HalfyearTimeRange.getPeriodOf(year, halfyear, halfyearCount), _calendar) {
val startHalfyear: Halfyear = halfyear
val endHalfyear: Halfyear = Times.halfyearOfMonth(endMonthOfYear)
def getStartHalfyear = startHalfyear
def getEndHalfyear = endHalfyear
def isMultipleCalendarYears: Boolean = startYear != endYear
def quartersView: SeqView[QuarterRange, Seq[_]] = {
val quarterCount = halfyearCount * QuartersPerHalfyear
val startQuarter = Times.quarterOf(startMonthOfYear)
(0 until quarterCount).view.map { q =>
val yq = Times.addQuarter(startYear, startQuarter, q)
QuarterRange(yq.year, yq.quarter, calendar)
}
}
def quarters: util.List[QuarterRange] = {
val quarterCount = halfyearCount * QuartersPerHalfyear
val startQuarter = Times.quarterOf(startMonthOfYear)
val results = new util.ArrayList[QuarterRange](quarterCount)
var q = 0
while (q < quarterCount) {
val yq = Times.addQuarter(startYear, startQuarter, q)
results add QuarterRange(yq.year, yq.quarter, calendar)
q += 1
}
results
}
def monthsView: SeqView[MonthRange, Seq[_]] = {
val monthCount = halfyearCount * MonthsPerHalfyear
(0 until monthCount).view.map { m =>
val ym = Times.addMonth(startYear, startMonthOfYear, m)
MonthRange(ym.year, ym.monthOfYear, calendar)
}
}
def months: util.List[MonthRange] = {
val monthCount = halfyearCount * MonthsPerHalfyear
val results = new util.ArrayList[MonthRange](monthCount)
var m = 0
while (m < monthCount) {
val ym = Times.addMonth(startYear, startMonthOfYear, m)
results add MonthRange(ym.year, ym.monthOfYear, calendar)
m += 1
}
results
}
}
object HalfyearTimeRange {
def apply(moment: DateTime, halfyearCount: Int, calendar: ITimeCalendar): HalfyearTimeRange = {
new HalfyearTimeRange(moment.getYear,
Times.halfyearOfMonth(moment.getMonthOfYear),
halfyearCount,
calendar)
}
def apply(moment: DateTime, halfyearCount: Int): HalfyearTimeRange = {
new HalfyearTimeRange(moment.getYear,
Times.halfyearOfMonth(moment.getMonthOfYear),
halfyearCount,
DefaultTimeCalendar)
}
def getPeriodOf(year: Int, halfyear: Halfyear, halfyearCount: Int): TimeRange = {
val yearStart = Times.startTimeOfYear(year)
val start = yearStart.plusMonths((halfyear.getValue - 1) * MonthsPerHalfyear)
val end = start.plusMonths(halfyearCount * MonthsPerHalfyear)
TimeRange(start, end)
}
}
| debop/debop4s | debop4s-timeperiod/src/main/scala/debop4s/timeperiod/timerange/HalfyearTimeRange.scala | Scala | apache-2.0 | 3,209 |
object RandomBot extends BotFactory {
def main(args: Array[String]): Unit = {
Runner.run("scalaRandom", this)
}
override def make(id: Int): Bot = new RandomBot(id)
}
class RandomBot(myId: Int) extends Bot {
override def getMoves(grid: Grid): Iterable[Move] = {
for {
site <- grid.getMine(myId)
} yield Move(site.location, Direction.getRandomDir)
}
} | yangle/HaliteIO | airesources/Scala/RandomBot.scala | Scala | mit | 379 |
package amphip.base
import scala.collection.{immutable, generic, GenTraversableOnce}
import scala.collection.generic.CanBuildFrom
/**
* Immutable map preserving insertion order
*/
class LinkedMap[K, +V] private (
private val theVect: Vector[K],
private val theMap: Map[K, V]
) extends immutable.Map[K, V] with immutable.MapLike[K, V, LinkedMap[K, V]] {
override def empty = LinkedMap.empty
def get(key: K): Option[V] = theMap.get(key)
def +[V1 >: V](kv: (K, V1)): LinkedMap[K, V1] = {
val newMap = theMap + kv
val newVect =
if (newMap.size != theMap.size) {
theVect :+ kv._1
} else {
theVect
}
new LinkedMap(newVect, newMap)
}
override def ++[V1 >: V](xs: GenTraversableOnce[(K, V1)]): LinkedMap[K, V1] =
xs.seq.foldLeft[LinkedMap[K, V1]](repr)(_ + _)
def -(key: K): LinkedMap[K, V] = {
val newVect = theVect filter { _ != key }
val newMap = theMap - key
new LinkedMap[K, V](newVect, newMap)
}
def iterator: Iterator[(K, V)] = for (key <- theVect.iterator) yield key -> theMap(key)
/** alternative version of `updated`, not great wrt type inference though ... */
def updated[V1 >: V](key: K, f: V => V1): LinkedMap[K, V1] = {
get(key) match {
case Some(value) => this + ((key, f(value)))
case None => this
}
}
}
object LinkedMap extends generic.ImmutableMapFactory[LinkedMap] {
implicit def canBuildFrom[A, B]: CanBuildFrom[Coll, (A, B), LinkedMap[A, B]] = new MapCanBuildFrom[A, B]
override def empty[A, B]: LinkedMap[A, B] = EmptyLinkedMap.asInstanceOf[LinkedMap[A, B]]
private object EmptyLinkedMap extends LinkedMap[Any, Nothing](Vector.empty[Nothing], Map.empty[Any, Nothing]) {}
} | gerferra/amphip | core/src/main/scala/amphip/base/LinkedMap.scala | Scala | mpl-2.0 | 1,721 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.