code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package skuber
import java.io._
import org.apache.commons.io.IOUtils
/**
* @author David O'Riordan
*/
case class Secret(
val kind: String ="Secret",
override val apiVersion: String = v1,
val metadata: ObjectMeta,
data: Map[String, Array[Byte]] = Map(),
val `type`: String = "")
extends ObjectResource {
def add(key: String, is: InputStream) : Unit = {
val bytes = IOUtils.toByteArray(is)
add(key, bytes)
}
def add(key: String, bytes: Array[Byte]) : Unit =
this.copy(data = data + (key -> bytes))
}
object Secret {
val specification=CoreResourceSpecification(
scope = ResourceSpecification.Scope.Namespaced,
names = ResourceSpecification.Names(
plural="secrets",
singular="secret",
kind="Secret",
shortNames=Nil
)
)
implicit val secDef = new ResourceDefinition[Secret] { def spec=specification }
implicit val secListDef = new ResourceDefinition[SecretList] { def spec=specification }
}
| minatjanster/skuber | client/src/main/scala/skuber/Secret.scala | Scala | apache-2.0 | 995 |
package org.scalacoin.currency
import org.scalatest.{MustMatchers, Matchers, FlatSpec}
/**
* Created by chris on 12/21/15.
*/
class CurrencyUnitsTest extends FlatSpec with MustMatchers {
"One satoshi" should ("be equivalent to 0.00000001 BTC") in {
CurrencyUnits.sataoshisToBitcoin(Satoshis(1)).value must be (0.00000001)
}
"One satoshi" should ("be equivalent to 0.001 bits") in {
CurrencyUnits.satoshisToBits(Satoshis(1)).value must be (0.01)
}
"One bit" should ("be equivalent to 100 Satoshis") in {
CurrencyUnits.bitsToSatoshis(Bits(1)).value must be (100)
}
"One bit" should ("be equivalent to 0.000001 BTC") in {
CurrencyUnits.bitsToBitcoins(Bits(1)).value must be (0.000001)
}
"One bitcoin" should ("be equivalent to 100,000,000 Satoshis") in {
CurrencyUnits.bitcoinsToSatoshis(Bitcoins(1)).value must be (100000000)
}
"One bitcoin" should ("be equivalent to 1,000,000 bits") in {
CurrencyUnits.bitcoinsToBits(Bitcoins(1)).value must be (1000000)
}
"The conversion of 1 BTC -> Bits -> Satoshis" should ("equivalent to BTC -> Satoshis") in {
val btcToBitsToSatoshis = CurrencyUnits.bitsToSatoshis(CurrencyUnits.bitcoinsToBits(Bitcoins(1)))
val btcToSatoshis = CurrencyUnits.bitcoinsToSatoshis(Bitcoins(1))
btcToBitsToSatoshis.value must be (btcToSatoshis.value)
}
"The conversion of 100,000,000 Satoshis -> Bits -> BTC" should ("be equivalent to 100,000,000 Satoshis -> BTC") in {
val satoshisToBitsToBTC = CurrencyUnits.bitsToBitcoins(CurrencyUnits.satoshisToBits(Satoshis(100000000)))
val satoshisToBTC = CurrencyUnits.sataoshisToBitcoin(Satoshis(100000000))
satoshisToBTC.value must be (satoshisToBitsToBTC.value)
}
"The conversion of 1,000,000 Bits -> Satoshis -> BTC" should ("be equivalent to 1,000,000 Bits -> BTC") in {
val bits = Bits(1000000)
val bitsToSatoshisToBTC = CurrencyUnits.sataoshisToBitcoin(CurrencyUnits.bitsToSatoshis(bits))
val bitsToBTC = CurrencyUnits.bitsToBitcoins(bits)
bitsToSatoshisToBTC.value must be (bitsToBTC.value)
}
it must "convert bitcoins to satoshies" in {
val bitcoins = Bitcoins(0.75)
val expectedValue = Satoshis(75000000)
CurrencyUnits.bitcoinsToSatoshis(bitcoins) must be (expectedValue)
}
it must "convert bits to satoshies" in {
val bits = Bits(75)
val expectedValue = Satoshis(7500)
CurrencyUnits.bitsToSatoshis(bits) must be (expectedValue)
}
it must "convert bitcoins to bits" in {
val bitcoins = Bitcoins(0.75)
val expectedValue = Bits(750000)
CurrencyUnits.bitcoinsToBits(bitcoins) must be (expectedValue)
}
it must "display bitcoins correctly" in {
val bitcoins = Bitcoins(1.23423523523526)
bitcoins.toString must be ("1.23424 BTC")
val bitcoinsRoundUp = Bitcoins(5.2321223523623)
bitcoinsRoundUp.toString must be ("5.23213 BTC")
val roundBitcoins = Bitcoins(2.0)
roundBitcoins.toString must be("2 BTC")
val zeroBitcoins = Bitcoins(0)
zeroBitcoins.toString must be ("0 BTC")
}
it must "display satoshis correctly" in {
val satoshis = Satoshis(1)
satoshis.toString must be ("1 Satoshis")
}
it must "display bits correctly" in {
val bits = Bits(5.23)
bits.toString must be ("5.23 Bits")
}
it must "display milliBits correctly" in {
val milliBits = MilliBitcoins(1.232312352)
milliBits.toString must be ("1.23232 mBTC")
}
it must "say that 1 BTC is equal to 100,000,000 satoshis" in {
val satoshis = Satoshis(100000000)
(satoshis == CurrencyUnits.oneBTC) must be (true)
}
it must "throw a requirement failed exception when instantiating satoshis with a double with decimal points" in {
val invalidSatoshis = 1.11111
intercept[IllegalArgumentException] {
Satoshis(invalidSatoshis)
}
}
it must "evaluate one satoshis is less than one millibit" in {
(CurrencyUnits.oneSatoshi < CurrencyUnits.oneMilliBit) must be (true)
}
it must "evalute one satoshi is less than or equal to one satoshis" in {
(CurrencyUnits.oneSatoshi <= CurrencyUnits.oneSatoshi) must be (true)
}
it must "say one BTC is greater than one hundred millibit" in {
(CurrencyUnits.oneBTC > CurrencyUnits.oneHundredMilliBits) must be (true)
}
it must "say one satoshis is greater than or equal to one satoshis" in {
(CurrencyUnits.oneSatoshi >= CurrencyUnits.oneSatoshi) must be (true)
}
it must "say one satoshis is not equal to one btc" in {
(CurrencyUnits.oneSatoshi != CurrencyUnits.oneBTC) must be (true)
}
it must "add one satoshis to another satoshi" in {
(CurrencyUnits.oneSatoshi + CurrencyUnits.oneSatoshi).value must be (2)
}
it must "subtract one satoshi from one satoshi" in {
(CurrencyUnits.oneSatoshi - CurrencyUnits.oneSatoshi).value must be (0)
}
it must "multiply one satoshi by one satoshis" in {
(CurrencyUnits.oneSatoshi * CurrencyUnits.oneSatoshi).value must be (1)
}
} | scalacoin/scalacoin | src/test/scala/org/scalacoin/currency/CurrencyUnitsTest.scala | Scala | mit | 4,968 |
package com.karasiq.shadowcloud.crypto
import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import akka.Done
import com.karasiq.shadowcloud.ShadowCloudExtension
import com.karasiq.shadowcloud.model.keys.{KeyChain, KeyId, KeyProps, KeySet}
import com.karasiq.shadowcloud.model.keys.KeyProps.RegionSet
import com.karasiq.shadowcloud.providers.KeyProvider
private[crypto] final class TestKeyProvider(sc: ShadowCloudExtension) extends KeyProvider {
private[this] case class KeySetContainer(keySet: KeySet, regionSet: RegionSet, forEncryption: Boolean, forDecryption: Boolean)
private[this] val keys = TrieMap.empty[KeyId, KeySetContainer]
def addKeySet(key: KeySet, regionSet: RegionSet, forEncryption: Boolean, forDecryption: Boolean): Future[KeySet] = {
val result = keys.putIfAbsent(key.id, KeySetContainer(key, regionSet, forEncryption, forDecryption))
if (result.isEmpty) Future.successful(key) else Future.failed(new IllegalArgumentException("Key already exists"))
}
def modifyKeySet(keyId: KeyId, regionSet: RegionSet, forEncryption: Boolean, forDecryption: Boolean) = {
keys.get(keyId) match {
case Some(key) ⇒
keys += keyId → key.copy(key.keySet, regionSet, forEncryption, forDecryption)
Future.successful(Done)
case None ⇒
Future.failed(new NoSuchElementException(keyId.toString))
}
}
def getKeyChain(): Future[KeyChain] = {
if (keys.isEmpty) {
addKeySet(sc.keys.generateKeySet())
}
val keysSeq = keys.values.toVector.map(ksp ⇒ KeyProps(ksp.keySet, ksp.regionSet, ksp.forEncryption, ksp.forDecryption))
Future.successful(KeyChain(keysSeq))
}
}
| Karasiq/shadowcloud | core/src/main/scala/com/karasiq/shadowcloud/crypto/TestKeyProvider.scala | Scala | apache-2.0 | 1,678 |
package net.sf.latexdraw.actions.shape
import org.malai.action.Action
import org.malai.undo.Undoable
import net.sf.latexdraw.actions.Modifying
import net.sf.latexdraw.actions.ShapeAction
import net.sf.latexdraw.glib.models.interfaces.GLibUtilities
import net.sf.latexdraw.glib.models.interfaces.IPoint
import net.sf.latexdraw.glib.models.interfaces.IShape
/**
* This action increments to rotation angle of shapes.<br>
* <br>
* This file is part of LaTeXDraw.<br>
* Copyright (c) 2005-2013 Arnaud BLOUIN<br>
* <br>
* LaTeXDraw is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later version.
* <br>
* LaTeXDraw is distributed without any warranty; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.<br>
* <br>
* 2012-04-20<br>
* @author Arnaud BLOUIN
* @since 3.0
*/
class RotateShapes extends Action with ShapeAction[IShape] with Undoable with Modifying {
/** The rotation angle to apply. */
protected var _rotationAngle : Double = Double.NaN
/** The gravity centre used for the rotation. */
protected var _gc : Option[IPoint] = None
/** The last increment performed on shapes. Used to execute several times the action. */
private var lastRotationAngle : Double = 0.0
override def canDo() = _shape.isDefined && _gc.isDefined && GLibUtilities.INSTANCE.isValidCoordinate(_rotationAngle) &&
GLibUtilities.INSTANCE.isValidPoint(_gc.get)
override def isRegisterable() = true
override def doActionBody() {
rotateShapes(_rotationAngle-lastRotationAngle)
lastRotationAngle = _rotationAngle
}
/**
* Rotates the shape.
* @param angleIncrement The increment to add to the rotation angle of the shape.
*/
private def rotateShapes(angleIncrement : Double) {
_shape.get.addToRotationAngle(_gc.get, angleIncrement)
_shape.get.setModified(true)
}
override def undo() {
//Mutant22
//rotateShapes(-_rotationAngle)
}
override def redo() {
rotateShapes(_rotationAngle)
}
override def getUndoName() = "rotation"
/**
* @param rotationAngle The rotation angle to apply.
*/
def setRotationAngle(rotationAngle : Double) {
_rotationAngle = rotationAngle
}
def rotationAngle = _rotationAngle
/**
* @param gc The gravity centre used for the rotation.
*/
def setGravityCentre(gc : IPoint) {
if(gc!=null)
_gc = Some(gc)
else _gc = None
}
def gc = _gc
}
| arnobl/latexdraw-mutants | GUImutants/mutant22/net.sf.latexdraw.mutant22/src/main/net/sf/latexdraw/actions/shape/RotateShapes.scala | Scala | gpl-2.0 | 2,596 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
import java.io.File
import java.net.URI
import java.util.Locale
import Project._
import BasicKeys.serverLogLevel
import Keys.{
stateBuildStructure,
bspEnabled,
colorShellPrompt,
commands,
configuration,
historyPath,
projectCommand,
sessionSettings,
shellPrompt,
templateResolverInfos,
autoStartServer,
serverHost,
serverIdleTimeout,
serverLog,
serverPort,
serverUseJni,
serverAuthentication,
serverConnectionType,
fullServerHandlers,
logLevel,
windowsServerSecurityLevel,
}
import Scope.{ Global, ThisScope }
import sbt.SlashSyntax0._
import Def.{ Flattened, Initialize, ScopedKey, Setting }
import sbt.internal.{
Load,
BuildStructure,
LoadedBuild,
LoadedBuildUnit,
SettingGraph,
SettingCompletions,
SessionSettings
}
import sbt.internal.util.{ AttributeKey, AttributeMap, Dag, Relation, Settings, ~> }
import sbt.internal.util.Types.{ const, idFun }
import sbt.internal.util.complete.DefaultParsers
import sbt.internal.server.ServerHandler
import sbt.librarymanagement.Configuration
import sbt.util.{ Show, Level }
import sjsonnew.JsonFormat
import language.experimental.macros
import scala.concurrent.TimeoutException
import scala.concurrent.duration.FiniteDuration
sealed trait ProjectDefinition[PR <: ProjectReference] {
/**
* The project ID is used to uniquely identify a project within a build.
* It is used to refer to a project from the command line and in the scope of keys.
*/
def id: String
/** The base directory for the project.*/
def base: File
/**
* The configurations for this project. These are groups of related tasks and the main reason
* to list them here is when one configuration extends another. In this case, a setting lookup
* in one configuration will fall back to the configurations it extends configuration if the setting doesn't exist.
*/
def configurations: Seq[Configuration]
/**
* The explicitly defined sequence of settings that configure this project.
* These do not include the automatically appended settings as configured by `auto`.
*/
def settings: Seq[Setting[_]]
/**
* The references to projects that are aggregated by this project.
* When a task is run on this project, it will also be run on aggregated projects.
*/
def aggregate: Seq[PR]
/** The references to projects that are classpath dependencies of this project. */
def dependencies: Seq[ClasspathDep[PR]]
/** The references to projects that are aggregate and classpath dependencies of this project. */
def uses: Seq[PR] = aggregate ++ dependencies.map(_.project)
def referenced: Seq[PR] = uses
/**
* The defined [[Plugins]] associated with this project.
* A [[AutoPlugin]] is a common label that is used by plugins to determine what settings, if any, to add to a project.
*/
def plugins: Plugins
/** Indicates whether the project was created organically, or was generated synthetically. */
def projectOrigin: ProjectOrigin
/** The [[AutoPlugin]]s enabled for this project. This value is only available on a loaded Project. */
private[sbt] def autoPlugins: Seq[AutoPlugin]
override final def hashCode: Int = id.hashCode ^ base.hashCode ^ getClass.hashCode
override final def equals(o: Any) = o match {
case p: ProjectDefinition[_] => p.getClass == this.getClass && p.id == id && p.base == base
case _ => false
}
override def toString = {
val agg = ifNonEmpty("aggregate", aggregate)
val dep = ifNonEmpty("dependencies", dependencies)
val conf = ifNonEmpty("configurations", configurations)
val autos = ifNonEmpty("autoPlugins", autoPlugins.map(_.label))
val fields = s"id $id" :: s"base: $base" :: agg ::: dep ::: conf ::: (s"plugins: List($plugins)" :: autos)
s"Project(${fields.mkString(", ")})"
}
private[this] def ifNonEmpty[T](label: String, ts: Iterable[T]): List[String] =
if (ts.isEmpty) Nil else s"$label: $ts" :: Nil
}
trait CompositeProject {
def componentProjects: Seq[Project]
}
private[sbt] object CompositeProject {
/**
* Expand user defined projects with the component projects of `compositeProjects`.
*
* If two projects with the same id appear in the user defined projects and
* in `compositeProjects.componentProjects`, the user defined project wins.
* This is necessary for backward compatibility with the idioms:
* {{{
* lazy val foo = crossProject
* lazy val fooJS = foo.js.settings(...)
* lazy val fooJVM = foo.jvm.settings(...)
* }}}
* and the rarer:
* {{{
* lazy val fooJS = foo.js.settings(...)
* lazy val foo = crossProject
* lazy val fooJVM = foo.jvm.settings(...)
* }}}
*/
def expand(compositeProjects: Seq[CompositeProject]): Seq[Project] = {
val userProjects = compositeProjects.collect { case p: Project => p }
for (p <- compositeProjects.flatMap(_.componentProjects)) yield {
userProjects.find(_.id == p.id) match {
case Some(userProject) => userProject
case None => p
}
}
}.distinct
}
sealed trait Project extends ProjectDefinition[ProjectReference] with CompositeProject {
def componentProjects: Seq[Project] = this :: Nil
private[sbt] def copy(
id: String = id,
base: File = base,
aggregate: Seq[ProjectReference] = aggregate,
dependencies: Seq[ClasspathDep[ProjectReference]] = dependencies,
settings: Seq[Setting[_]] = settings,
configurations: Seq[Configuration] = configurations
): Project =
copy2(id, base, aggregate, dependencies, settings, configurations)
private[this] def copy2(
id: String = id,
base: File = base,
aggregate: Seq[ProjectReference] = aggregate,
dependencies: Seq[ClasspathDep[ProjectReference]] = dependencies,
settings: Seq[Setting[_]] = settings,
configurations: Seq[Configuration] = configurations,
plugins: Plugins = plugins,
autoPlugins: Seq[AutoPlugin] = autoPlugins,
projectOrigin: ProjectOrigin = projectOrigin,
): Project =
unresolved(
id,
base,
aggregate = aggregate,
dependencies = dependencies,
settings = settings,
configurations,
plugins,
autoPlugins,
projectOrigin
)
def resolve(resolveRef: ProjectReference => ProjectRef): ResolvedProject = {
def resolveRefs(prs: Seq[ProjectReference]) = prs map resolveRef
def resolveDeps(ds: Seq[ClasspathDep[ProjectReference]]) = ds map resolveDep
def resolveDep(d: ClasspathDep[ProjectReference]) =
ResolvedClasspathDependency(resolveRef(d.project), d.configuration)
resolved(
id,
base,
aggregate = resolveRefs(aggregate),
dependencies = resolveDeps(dependencies),
settings,
configurations,
plugins,
autoPlugins,
projectOrigin
)
}
def resolveBuild(resolveRef: ProjectReference => ProjectReference): Project = {
def resolveRefs(prs: Seq[ProjectReference]) = prs map resolveRef
def resolveDeps(ds: Seq[ClasspathDep[ProjectReference]]) = ds map resolveDep
def resolveDep(d: ClasspathDep[ProjectReference]) =
ClasspathDependency(resolveRef(d.project), d.configuration)
copy2(aggregate = resolveRefs(aggregate), dependencies = resolveDeps(dependencies))
}
/**
* Applies the given functions to this Project.
* The second function is applied to the result of applying the first to this Project and so on.
* The intended use is a convenience for applying default configuration provided by a plugin.
*/
def configure(transforms: (Project => Project)*): Project = Function.chain(transforms)(this)
def withId(id: String) = copy(id = id)
/** Sets the base directory for this project.*/
def in(dir: File): Project = copy(base = dir)
/** Adds configurations to this project. Added configurations replace existing configurations with the same name.*/
def overrideConfigs(cs: Configuration*): Project =
copy(configurations = Defaults.overrideConfigs(cs: _*)(configurations))
/**
* Adds configuration at the *start* of the configuration list for this project. Previous configurations replace this prefix
* list with the same name.
*/
private[sbt] def prefixConfigs(cs: Configuration*): Project =
copy(configurations = Defaults.overrideConfigs(configurations: _*)(cs))
/** Adds new configurations directly to this project. To override an existing configuration, use `overrideConfigs`. */
def configs(cs: Configuration*): Project = copy(configurations = configurations ++ cs)
/** Adds classpath dependencies on internal or external projects. */
def dependsOn(deps: ClasspathDep[ProjectReference]*): Project =
copy(dependencies = dependencies ++ deps)
/**
* Adds projects to be aggregated. When a user requests a task to run on this project from the command line,
* the task will also be run in aggregated projects.
*/
def aggregate(refs: ProjectReference*): Project =
copy(aggregate = (aggregate: Seq[ProjectReference]) ++ refs)
/** Appends settings to the current settings sequence for this project. */
def settings(ss: Def.SettingsDefinition*): Project =
copy(settings = (settings: Seq[Def.Setting[_]]) ++ Def.settings(ss: _*))
/**
* Sets the [[AutoPlugin]]s of this project.
* A [[AutoPlugin]] is a common label that is used by plugins to determine what settings, if any, to enable on a project.
*/
def enablePlugins(ns: Plugins*): Project = setPlugins(ns.foldLeft(plugins)(Plugins.and))
/** Disable the given plugins on this project. */
def disablePlugins(ps: AutoPlugin*): Project =
setPlugins(Plugins.and(plugins, Plugins.And(ps.map(p => Plugins.Exclude(p)).toList)))
private[sbt] def setPlugins(ns: Plugins): Project = copy2(plugins = ns)
/** Definitively set the [[AutoPlugin]]s for this project. */
private[sbt] def setAutoPlugins(autos: Seq[AutoPlugin]): Project = copy2(autoPlugins = autos)
/** Definitively set the [[ProjectOrigin]] for this project. */
private[sbt] def setProjectOrigin(origin: ProjectOrigin): Project = copy2(projectOrigin = origin)
}
sealed trait ResolvedProject extends ProjectDefinition[ProjectRef] {
/** The [[AutoPlugin]]s enabled for this project as computed from [[plugins]].*/
def autoPlugins: Seq[AutoPlugin]
}
sealed trait ClasspathDep[PR <: ProjectReference] {
def project: PR; def configuration: Option[String]
}
final case class ResolvedClasspathDependency(project: ProjectRef, configuration: Option[String])
extends ClasspathDep[ProjectRef]
final case class ClasspathDependency(project: ProjectReference, configuration: Option[String])
extends ClasspathDep[ProjectReference]
object Project extends ProjectExtra {
private abstract class ProjectDef[PR <: ProjectReference](
val id: String,
val base: File,
val aggregate: Seq[PR],
val dependencies: Seq[ClasspathDep[PR]],
val settings: Seq[Def.Setting[_]],
val configurations: Seq[Configuration],
val plugins: Plugins,
val autoPlugins: Seq[AutoPlugin],
val projectOrigin: ProjectOrigin
) extends ProjectDefinition[PR] {
// checks for cyclic references here instead of having to do it in Scope.delegates
Dag.topologicalSort(configurations)(_.extendsConfigs)
}
def apply(id: String, base: File): Project =
unresolved(id, base, Nil, Nil, Nil, Nil, Plugins.empty, Nil, ProjectOrigin.Organic)
def showContextKey(state: State): Show[ScopedKey[_]] =
showContextKey(state, None)
def showContextKey(state: State, keyNameColor: Option[String]): Show[ScopedKey[_]] =
if (isProjectLoaded(state)) showContextKey2(session(state), keyNameColor)
else Def.showFullKey
@deprecated("Use showContextKey2 which doesn't take the unused structure param", "1.1.1")
def showContextKey(
session: SessionSettings,
structure: BuildStructure,
keyNameColor: Option[String] = None
): Show[ScopedKey[_]] =
showContextKey2(session, keyNameColor)
def showContextKey2(
session: SessionSettings,
keyNameColor: Option[String] = None
): Show[ScopedKey[_]] =
Def.showRelativeKey2(session.current, keyNameColor)
def showLoadingKey(
loaded: LoadedBuild,
keyNameColor: Option[String] = None
): Show[ScopedKey[_]] =
Def.showRelativeKey2(
ProjectRef(loaded.root, loaded.units(loaded.root).rootProjects.head),
keyNameColor
)
/** This is a variation of def apply that mixes in GeneratedRootProject. */
private[sbt] def mkGeneratedRoot(
id: String,
base: File,
aggregate: Seq[ProjectReference]
): Project = {
validProjectID(id).foreach(errMsg => sys.error(s"Invalid project ID: $errMsg"))
val plugins = Plugins.empty
val origin = ProjectOrigin.GenericRoot
new ProjectDef(id, base, aggregate, Nil, Nil, Nil, plugins, Nil, origin)
with Project
with GeneratedRootProject
}
/** Returns None if `id` is a valid Project ID or Some containing the parser error message if it is not.*/
def validProjectID(id: String): Option[String] =
DefaultParsers.parse(id, DefaultParsers.ID).left.toOption
private[this] def validProjectIDStart(id: String): Boolean =
DefaultParsers.parse(id, DefaultParsers.IDStart).isRight
/** Constructs a valid Project ID based on `id` and returns it in Right or returns the error message in Left if one cannot be constructed.*/
def normalizeProjectID(id: String): Either[String, String] = {
val attempt = normalizeBase(id)
val refined =
if (attempt.length < 1) "root"
else if (!validProjectIDStart(attempt.substring(0, 1))) "root-" + attempt
else attempt
validProjectID(refined).toLeft(refined)
}
private[this] def normalizeBase(s: String) =
s.toLowerCase(Locale.ENGLISH).replaceAll("""\W+""", "-")
/**
* Normalize a String so that it is suitable for use as a dependency management module identifier.
* This is a best effort implementation, since valid characters are not documented or consistent.
*/
def normalizeModuleID(id: String): String = normalizeBase(id)
private def resolved(
id: String,
base: File,
aggregate: Seq[ProjectRef],
dependencies: Seq[ClasspathDep[ProjectRef]],
settings: Seq[Def.Setting[_]],
configurations: Seq[Configuration],
plugins: Plugins,
autoPlugins: Seq[AutoPlugin],
origin: ProjectOrigin
): ResolvedProject =
new ProjectDef[ProjectRef](
id,
base,
aggregate,
dependencies,
settings,
configurations,
plugins,
autoPlugins,
origin
) with ResolvedProject
private def unresolved(
id: String,
base: File,
aggregate: Seq[ProjectReference],
dependencies: Seq[ClasspathDep[ProjectReference]],
settings: Seq[Def.Setting[_]],
configurations: Seq[Configuration],
plugins: Plugins,
autoPlugins: Seq[AutoPlugin],
origin: ProjectOrigin
): Project = {
validProjectID(id).foreach(errMsg => sys.error("Invalid project ID: " + errMsg))
new ProjectDef[ProjectReference](
id,
base,
aggregate,
dependencies,
settings,
configurations,
plugins,
autoPlugins,
origin
) with Project
}
final class Constructor(p: ProjectReference) {
def %(conf: Configuration): ClasspathDependency = %(conf.name)
def %(conf: String): ClasspathDependency = ClasspathDependency(p, Some(conf))
}
def getOrError[T](state: State, key: AttributeKey[T], msg: String): T =
state get key getOrElse sys.error(msg)
def structure(state: State): BuildStructure =
getOrError(state, stateBuildStructure, "No build loaded.")
def session(state: State): SessionSettings =
getOrError(state, sessionSettings, "Session not initialized.")
def isProjectLoaded(state: State): Boolean =
(state has sessionSettings) && (state has stateBuildStructure)
def extract(state: State): Extracted = extract(session(state), structure(state))
private[sbt] def extract(se: SessionSettings, st: BuildStructure): Extracted =
Extracted(st, se, se.current)(showContextKey2(se))
def getProjectForReference(ref: Reference, structure: BuildStructure): Option[ResolvedProject] =
ref match { case pr: ProjectRef => getProject(pr, structure); case _ => None }
def getProject(ref: ProjectRef, structure: BuildStructure): Option[ResolvedProject] =
getProject(ref, structure.units)
def getProject(ref: ProjectRef, structure: LoadedBuild): Option[ResolvedProject] =
getProject(ref, structure.units)
def getProject(ref: ProjectRef, units: Map[URI, LoadedBuildUnit]): Option[ResolvedProject] =
(units get ref.build).flatMap(_.defined get ref.project)
def runUnloadHooks(s: State): State = {
val previousOnUnload = orIdentity(s get Keys.onUnload.key)
previousOnUnload(s.runExitHooks())
}
def setProject(session: SessionSettings, structure: BuildStructure, s: State): State =
setProject(session, structure, s, identity)
def setProject(
session: SessionSettings,
structure: BuildStructure,
s: State,
preOnLoad: State => State
): State = {
val unloaded = runUnloadHooks(s)
val (onLoad, onUnload) = getHooks(structure.data)
val newAttrs = unloaded.attributes
.put(stateBuildStructure, structure)
.put(sessionSettings, session)
.put(Keys.onUnload.key, onUnload)
val newState = unloaded.copy(attributes = newAttrs)
// TODO: Fix this
onLoad(
preOnLoad(updateCurrent(newState)) /*LogManager.setGlobalLogLevels(updateCurrent(newState), structure.data)*/
)
}
def orIdentity[T](opt: Option[T => T]): T => T = opt getOrElse idFun
def getHook[T](key: SettingKey[T => T], data: Settings[Scope]): T => T =
orIdentity((Global / key) get data)
def getHooks(data: Settings[Scope]): (State => State, State => State) =
(getHook(Keys.onLoad, data), getHook(Keys.onUnload, data))
def current(state: State): ProjectRef = session(state).current
def updateCurrent(s: State): State = {
val structure = Project.structure(s)
val ref = Project.current(s)
Load.getProject(structure.units, ref.build, ref.project)
val msg = (ref / Keys.onLoadMessage) get structure.data getOrElse ""
if (!msg.isEmpty) s.log.info(msg)
def get[T](k: SettingKey[T]): Option[T] = (ref / k) get structure.data
def commandsIn(axis: ResolvedReference) = (axis / commands) get structure.data toList
val allCommands = commandsIn(ref) ++ commandsIn(BuildRef(ref.build)) ++ ((Global / commands) get structure.data toList)
val history = get(historyPath) flatMap idFun
val prompt = get(shellPrompt)
val newPrompt = get(colorShellPrompt)
val trs = ((Global / templateResolverInfos) get structure.data).toList.flatten
val startSvr: Option[Boolean] = get(autoStartServer)
val host: Option[String] = get(serverHost)
val port: Option[Int] = get(serverPort)
val enabledBsp: Option[Boolean] = get(bspEnabled)
val timeout: Option[Option[FiniteDuration]] = get(serverIdleTimeout)
val authentication: Option[Set[ServerAuthentication]] = get(serverAuthentication)
val connectionType: Option[ConnectionType] = get(serverConnectionType)
val srvLogLevel: Option[Level.Value] = (ref / serverLog / logLevel).get(structure.data)
val hs: Option[Seq[ServerHandler]] = get(ThisBuild / fullServerHandlers)
val commandDefs = allCommands.distinct.flatten[Command].map(_ tag (projectCommand, true))
val newDefinedCommands = commandDefs ++ BasicCommands.removeTagged(
s.definedCommands,
projectCommand
)
val winSecurityLevel = get(windowsServerSecurityLevel).getOrElse(2)
val useJni = get(serverUseJni).getOrElse(false)
val newAttrs =
s.attributes
.put(historyPath.key, history)
.put(windowsServerSecurityLevel.key, winSecurityLevel)
.put(serverUseJni.key, useJni)
.setCond(bspEnabled.key, enabledBsp)
.setCond(autoStartServer.key, startSvr)
.setCond(serverPort.key, port)
.setCond(serverHost.key, host)
.setCond(serverAuthentication.key, authentication)
.setCond(serverConnectionType.key, connectionType)
.setCond(serverIdleTimeout.key, timeout)
.put(historyPath.key, history)
.put(templateResolverInfos.key, trs)
.setCond(shellPrompt.key, prompt)
.setCond(colorShellPrompt.key, newPrompt)
.setCond(serverLogLevel, srvLogLevel)
.setCond(fullServerHandlers.key, hs)
s.copy(
attributes = newAttrs,
definedCommands = newDefinedCommands
)
}
def setCond[T](key: AttributeKey[T], vopt: Option[T], attributes: AttributeMap): AttributeMap =
attributes.setCond(key, vopt)
private[sbt] def checkTargets(data: Settings[Scope]): Option[String] = {
val dups = overlappingTargets(allTargets(data))
if (dups.isEmpty)
None
else {
val dupStrs = dups map {
case (dir, scopes) =>
s"${dir.getAbsolutePath}:\n\t${scopes.mkString("\n\t")}"
}
Some(s"Overlapping output directories:${dupStrs.mkString}")
}
}
private[this] def overlappingTargets(
targets: Seq[(ProjectRef, File)]
): Map[File, Seq[ProjectRef]] =
targets.groupBy(_._2).filter(_._2.size > 1).mapValues(_.map(_._1)).toMap
private[this] def allTargets(data: Settings[Scope]): Seq[(ProjectRef, File)] = {
import ScopeFilter._
val allProjects = ScopeFilter(Make.inAnyProject)
val targetAndRef = Def.setting { (Keys.thisProjectRef.value, Keys.target.value) }
new SettingKeyAll(Def.optional(targetAndRef)(idFun))
.all(allProjects)
.evaluate(data)
.flatMap(x => x)
}
def equal(a: ScopedKey[_], b: ScopedKey[_], mask: ScopeMask): Boolean =
a.key == b.key && Scope.equal(a.scope, b.scope, mask)
def fillTaskAxis(scoped: ScopedKey[_]): ScopedKey[_] =
ScopedKey(Scope.fillTaskAxis(scoped.scope, scoped.key), scoped.key)
def mapScope(f: Scope => Scope) = λ[ScopedKey ~> ScopedKey](k => ScopedKey(f(k.scope), k.key))
def transform(g: Scope => Scope, ss: Seq[Def.Setting[_]]): Seq[Def.Setting[_]] = {
val f = mapScope(g)
ss.map(_ mapKey f mapReferenced f)
}
def transformRef(g: Scope => Scope, ss: Seq[Def.Setting[_]]): Seq[Def.Setting[_]] = {
val f = mapScope(g)
ss.map(_ mapReferenced f)
}
def delegates(structure: BuildStructure, scope: Scope, key: AttributeKey[_]): Seq[ScopedKey[_]] =
structure.delegates(scope).map(d => ScopedKey(d, key))
def scopedKeyData(
structure: BuildStructure,
scope: Scope,
key: AttributeKey[_]
): Option[ScopedKeyData[_]] =
structure.data.get(scope, key) map { v =>
ScopedKeyData(ScopedKey(scope, key), v)
}
def details(structure: BuildStructure, actual: Boolean, scope: Scope, key: AttributeKey[_])(
implicit display: Show[ScopedKey[_]]
): String = {
val scoped = ScopedKey(scope, key)
val data = scopedKeyData(structure, scope, key) map { _.description } getOrElse {
"No entry for key."
}
val description = key.description match {
case Some(desc) => "Description:\n\t" + desc + "\n"; case None => ""
}
val definingScope = structure.data.definingScope(scope, key)
val providedBy = definingScope match {
case Some(sc) => "Provided by:\n\t" + Scope.display(sc, key.label) + "\n"
case None => ""
}
val definingScoped = definingScope match {
case Some(sc) => ScopedKey(sc, key); case None => scoped
}
val comp =
Def.compiled(structure.settings, actual)(structure.delegates, structure.scopeLocal, display)
val definedAt = comp get definingScoped map { c =>
Def.definedAtString(c.settings).capitalize
} getOrElse ""
val cMap = Def.flattenLocals(comp)
val related = cMap.keys.filter(k => k.key == key && k.scope != scope)
def derivedDependencies(c: ScopedKey[_]): List[ScopedKey[_]] =
comp
.get(c)
.map(_.settings.flatMap(s => if (s.isDerived) s.dependencies else Nil))
.toList
.flatten
val depends = cMap.get(scoped) match {
case Some(c) => c.dependencies.toSet; case None => Set.empty
}
val derivedDepends: Set[ScopedKey[_]] = derivedDependencies(definingScoped).toSet
val reverse = reverseDependencies(cMap, scoped)
val derivedReverse = reverse.filter(r => derivedDependencies(r).contains(definingScoped)).toSet
def printDepScopes(
baseLabel: String,
derivedLabel: String,
scopes: Iterable[ScopedKey[_]],
derived: Set[ScopedKey[_]]
): String = {
val label = s"$baseLabel${if (derived.isEmpty) "" else s" (D=$derivedLabel)"}"
val prefix: ScopedKey[_] => String =
if (derived.isEmpty) const("") else sk => if (derived(sk)) "D " else " "
printScopes(label, scopes, prefix = prefix)
}
def printScopes(
label: String,
scopes: Iterable[ScopedKey[_]],
max: Int = Int.MaxValue,
prefix: ScopedKey[_] => String = const("")
) =
if (scopes.isEmpty) ""
else {
val (limited, more) =
if (scopes.size <= max) (scopes, "\n") else (scopes.take(max), "\n...\n")
limited.map(sk => prefix(sk) + display.show(sk)).mkString(label + ":\n\t", "\n\t", more)
}
data + "\n" +
description +
providedBy +
definedAt +
printDepScopes("Dependencies", "derived from", depends, derivedDepends) +
printDepScopes("Reverse dependencies", "derives", reverse, derivedReverse) +
printScopes("Delegates", delegates(structure, scope, key)) +
printScopes("Related", related, 10)
}
def settingGraph(structure: BuildStructure, basedir: File, scoped: ScopedKey[_])(
implicit display: Show[ScopedKey[_]]
): SettingGraph =
SettingGraph(structure, basedir, scoped, 0)
def graphSettings(structure: BuildStructure, basedir: File)(
implicit display: Show[ScopedKey[_]]
): Unit = {
def graph(actual: Boolean, name: String) =
graphSettings(structure, actual, name, new File(basedir, name + ".dot"))
graph(true, "actual_dependencies")
graph(false, "declared_dependencies")
}
def graphSettings(structure: BuildStructure, actual: Boolean, graphName: String, file: File)(
implicit display: Show[ScopedKey[_]]
): Unit = {
val rel = relation(structure, actual)
val keyToString = display.show _
DotGraph.generateGraph(file, graphName, rel, keyToString, keyToString)
}
def relation(structure: BuildStructure, actual: Boolean)(
implicit display: Show[ScopedKey[_]]
): Relation[ScopedKey[_], ScopedKey[_]] =
relation(structure.settings, actual)(structure.delegates, structure.scopeLocal, display)
private[sbt] def relation(settings: Seq[Def.Setting[_]], actual: Boolean)(
implicit delegates: Scope => Seq[Scope],
scopeLocal: Def.ScopeLocal,
display: Show[ScopedKey[_]]
): Relation[ScopedKey[_], ScopedKey[_]] = {
val cMap = Def.flattenLocals(Def.compiled(settings, actual))
val emptyRelation = Relation.empty[ScopedKey[_], ScopedKey[_]]
cMap.foldLeft(emptyRelation) { case (r, (key, value)) => r + (key, value.dependencies) }
}
def showDefinitions(key: AttributeKey[_], defs: Seq[Scope])(
implicit display: Show[ScopedKey[_]]
): String =
showKeys(defs.map(scope => ScopedKey(scope, key)))
def showUses(defs: Seq[ScopedKey[_]])(implicit display: Show[ScopedKey[_]]): String =
showKeys(defs)
private[this] def showKeys(s: Seq[ScopedKey[_]])(implicit display: Show[ScopedKey[_]]): String =
s.map(display.show).sorted.mkString("\n\t", "\n\t", "\n\n")
def definitions(structure: BuildStructure, actual: Boolean, key: AttributeKey[_])(
implicit display: Show[ScopedKey[_]]
): Seq[Scope] =
relation(structure, actual)(display)._1s.toSeq flatMap { sk =>
if (sk.key == key) sk.scope :: Nil else Nil
}
def usedBy(structure: BuildStructure, actual: Boolean, key: AttributeKey[_])(
implicit display: Show[ScopedKey[_]]
): Seq[ScopedKey[_]] =
relation(structure, actual)(display).all.toSeq flatMap {
case (a, b) => if (b.key == key) List[ScopedKey[_]](a) else Nil
}
def reverseDependencies(
cMap: Map[ScopedKey[_], Flattened],
scoped: ScopedKey[_]
): Iterable[ScopedKey[_]] =
for ((key, compiled) <- cMap; dep <- compiled.dependencies if dep == scoped) yield key
def setAll(extracted: Extracted, settings: Seq[Def.Setting[_]]): SessionSettings =
SettingCompletions.setAll(extracted, settings).session
val ExtraBuilds = AttributeKey[List[URI]](
"extra-builds",
"Extra build URIs to load in addition to the ones defined by the project."
)
def extraBuilds(s: State): List[URI] = getOrNil(s, ExtraBuilds)
def getOrNil[T](s: State, key: AttributeKey[List[T]]): List[T] = s get key getOrElse Nil
def setExtraBuilds(s: State, extra: List[URI]): State = s.put(ExtraBuilds, extra)
def addExtraBuilds(s: State, extra: List[URI]): State =
setExtraBuilds(s, extra ::: extraBuilds(s))
def removeExtraBuilds(s: State, remove: List[URI]): State =
updateExtraBuilds(s, _.filterNot(remove.toSet))
def updateExtraBuilds(s: State, f: List[URI] => List[URI]): State =
setExtraBuilds(s, f(extraBuilds(s)))
// used by Coursier integration
private[sbt] def transitiveInterDependencies(
state: State,
projectRef: ProjectRef
): Seq[ProjectRef] = {
def dependencies(map: Map[ProjectRef, Seq[ProjectRef]], id: ProjectRef): Set[ProjectRef] = {
def helper(map: Map[ProjectRef, Seq[ProjectRef]], acc: Set[ProjectRef]): Set[ProjectRef] =
if (acc.exists(map.contains)) {
val (kept, rem) = map.partition { case (k, _) => acc(k) }
helper(rem, acc ++ kept.valuesIterator.flatten)
} else
acc
helper(map - id, map.getOrElse(id, Nil).toSet)
}
val allProjectsDeps: Map[ProjectRef, Seq[ProjectRef]] =
(for {
(p, ref) <- Project.structure(state).allProjectPairs
} yield ref -> p.dependencies.map(_.project)).toMap
val deps = dependencies(allProjectsDeps.toMap, projectRef)
Project.structure(state).allProjectRefs.filter(p => deps(p))
}
object LoadAction extends Enumeration {
val Return, Current, Plugins = Value
}
import LoadAction._
import DefaultParsers._
val loadActionParser = token(Space ~> ("plugins" ^^^ Plugins | "return" ^^^ Return)) ?? Current
val ProjectReturn =
AttributeKey[List[File]]("project-return", "Maintains a stack of builds visited using reload.")
def projectReturn(s: State): List[File] = getOrNil(s, ProjectReturn)
def inPluginProject(s: State): Boolean = projectReturn(s).length > 1
def setProjectReturn(s: State, pr: List[File]): State =
s.copy(attributes = s.attributes.put(ProjectReturn, pr))
def loadAction(s: State, action: LoadAction.Value): (State, File) = action match {
case Return =>
projectReturn(s) match {
case _ /* current */ :: returnTo :: rest =>
(setProjectReturn(s, returnTo :: rest), returnTo)
case _ => sys.error("Not currently in a plugin definition")
}
case Current =>
val base = s.configuration.baseDirectory
projectReturn(s) match {
case Nil => (setProjectReturn(s, base :: Nil), base); case x :: _ => (s, x)
}
case Plugins =>
val (newBase, oldStack) =
if (Project.isProjectLoaded(s))
(Project.extract(s).currentUnit.unit.plugins.base, projectReturn(s))
else // support changing to the definition project if it fails to load
(BuildPaths.projectStandard(s.baseDir), s.baseDir :: Nil)
val newS = setProjectReturn(s, newBase :: oldStack)
(newS, newBase)
}
def runTask[T](
taskKey: ScopedKey[Task[T]],
state: State,
checkCycles: Boolean = false
): Option[(State, Result[T])] = {
val extracted = Project.extract(state)
val ch = EvaluateTask.cancelStrategy(extracted, extracted.structure, state)
val p = EvaluateTask.executeProgress(extracted, extracted.structure, state)
val r = EvaluateTask.restrictions(state)
val fgc = EvaluateTask.forcegc(extracted, extracted.structure)
val mfi = EvaluateTask.minForcegcInterval(extracted, extracted.structure)
runTask(taskKey, state, EvaluateTaskConfig(r, checkCycles, p, ch, fgc, mfi))
}
def runTask[T](
taskKey: ScopedKey[Task[T]],
state: State,
config: EvaluateTaskConfig
): Option[(State, Result[T])] = {
val extracted = Project.extract(state)
EvaluateTask(extracted.structure, taskKey, state, extracted.currentRef, config)
}
def projectToRef(p: Project): ProjectReference = LocalProject(p.id)
implicit def projectToLocalProject(p: Project): LocalProject = LocalProject(p.id)
final class RichTaskSessionVar[S](i: Def.Initialize[Task[S]]) {
import SessionVar.{ persistAndSet, resolveContext, set, transform => tx }
def updateState(f: (State, S) => State): Def.Initialize[Task[S]] = i(t => tx(t, f))
def storeAs(key: TaskKey[S])(implicit f: JsonFormat[S]): Def.Initialize[Task[S]] = {
Keys.resolvedScoped.zipWith(i) { (scoped, task) =>
tx(
task,
(state, value) => persistAndSet(resolveContext(key, scoped.scope, state), state, value)(f)
)
}
}
def keepAs(key: TaskKey[S]): Def.Initialize[Task[S]] = {
i.zipWith(Keys.resolvedScoped) { (t, scoped) =>
tx(t, (state, value) => set(resolveContext(key, scoped.scope, state), state, value))
}
}
}
/** implicitly injected to tasks that return PromiseWrap.
*/
final class RichTaskPromise[A](i: Def.Initialize[Task[PromiseWrap[A]]]) {
import scala.concurrent.Await
import scala.concurrent.duration._
def await: Def.Initialize[Task[A]] = await(Duration.Inf)
def await(atMost: Duration): Def.Initialize[Task[A]] =
(Def
.task {
val p = i.value
var result: Option[A] = None
if (atMost == Duration.Inf) {
while (result.isEmpty) {
try {
result = Some(Await.result(p.underlying.future, Duration("1s")))
Thread.sleep(10)
} catch {
case _: TimeoutException => ()
}
}
} else {
result = Some(Await.result(p.underlying.future, atMost))
}
result.get
})
.tag(Tags.Sentinel)
}
import scala.reflect.macros._
def projectMacroImpl(c: blackbox.Context): c.Expr[Project] = {
import c.universe._
val enclosingValName = std.KeyMacro.definingValName(
c,
methodName =>
s"""$methodName must be directly assigned to a val, such as `val x = $methodName`. Alternatively, you can use `sbt.Project.apply`"""
)
val name = c.Expr[String](Literal(Constant(enclosingValName)))
reify { Project(name.splice, new File(name.splice)) }
}
}
private[sbt] trait GeneratedRootProject
trait ProjectExtra {
implicit def configDependencyConstructor[T](
p: T
)(implicit ev: T => ProjectReference): Constructor =
new Constructor(p)
implicit def classpathDependency[T](
p: T
)(implicit ev: T => ProjectReference): ClasspathDependency = ClasspathDependency(p, None)
// These used to be in Project so that they didn't need to get imported (due to Initialize being nested in Project).
// Moving Initialize and other settings types to Def and decoupling Project, Def, and Structure means these go here for now
implicit def richInitializeTask[T](init: Initialize[Task[T]]): Scoped.RichInitializeTask[T] =
new Scoped.RichInitializeTask(init)
implicit def richInitializeInputTask[T](
init: Initialize[InputTask[T]]
): Scoped.RichInitializeInputTask[T] =
new Scoped.RichInitializeInputTask(init)
implicit def richInitialize[T](i: Initialize[T]): Scoped.RichInitialize[T] =
new Scoped.RichInitialize[T](i)
implicit def richTaskSessionVar[T](init: Initialize[Task[T]]): Project.RichTaskSessionVar[T] =
new Project.RichTaskSessionVar(init)
implicit def sbtRichTaskPromise[A](
i: Initialize[Task[PromiseWrap[A]]]
): Project.RichTaskPromise[A] =
new Project.RichTaskPromise(i)
def inThisBuild(ss: Seq[Setting[_]]): Seq[Setting[_]] =
inScope(ThisScope.copy(project = Select(ThisBuild)))(ss)
def inConfig(conf: Configuration)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
inScope(ThisScope.copy(config = Select(conf)))((configuration :== conf) +: ss)
def inTask(t: Scoped)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
inScope(ThisScope.copy(task = Select(t.key)))(ss)
def inScope(scope: Scope)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
Project.transform(Scope.replaceThis(scope), ss)
private[sbt] def inThisBuild[T](i: Initialize[T]): Initialize[T] =
inScope(ThisScope.copy(project = Select(ThisBuild)), i)
private[sbt] def inConfig[T](conf: Configuration, i: Initialize[T]): Initialize[T] =
inScope(ThisScope.copy(config = Select(conf)), i)
private[sbt] def inTask[T](t: Scoped, i: Initialize[T]): Initialize[T] =
inScope(ThisScope.copy(task = Select(t.key)), i)
private[sbt] def inScope[T](scope: Scope, i: Initialize[T]): Initialize[T] =
i mapReferenced Project.mapScope(Scope.replaceThis(scope))
/**
* Creates a new Project. This is a macro that expects to be assigned directly to a val.
* The name of the val is used as the project ID and the name of the base directory of the project.
*/
def project: Project = macro Project.projectMacroImpl
}
| sbt/sbt | main/src/main/scala/sbt/Project.scala | Scala | apache-2.0 | 37,837 |
object O {
def m1(a: Int*) = (a*) // error
def m2(a: Int*) = {
val b = (a*) // error
b
}
def m3(a: Int*): Any = {
val b = (a*) // error
b
}
def m4(a: 2*) = (a*) // error
}
class O(a: Int*) {
val m = (a*) // error
} | dotty-staging/dotty | tests/neg/i7972.scala | Scala | apache-2.0 | 247 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.perf
import joptsimple.OptionParser
class PerfConfig(args: Array[String]) {
val parser = new OptionParser
val numMessagesOpt = parser.accepts("messages", "The number of messages to send or consume")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Long])
.defaultsTo(Long.MaxValue)
val reportingIntervalOpt = parser.accepts("reporting-interval", "Interval at which to print progress info.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(5000)
val dateFormatOpt = parser.accepts("date-format", "The date format to use for formatting the time field. " +
"See java.text.SimpleDateFormat for options.")
.withRequiredArg
.describedAs("date format")
.ofType(classOf[String])
.defaultsTo("yyyy-MM-dd HH:mm:ss:SSS")
val showDetailedStatsOpt = parser.accepts("show-detailed-stats", "If set, stats are reported for each reporting " +
"interval as configured by reporting-interval")
val hideHeaderOpt = parser.accepts("hide-header", "If set, skips printing the header for the stats ")
val messageSizeOpt = parser.accepts("message-size", "The size of each message.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(100)
val batchSizeOpt = parser.accepts("batch-size", "Number of messages to write in a single batch.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(200)
val compressionCodecOpt = parser.accepts("compression-codec", "If set, messages are sent compressed")
.withRequiredArg
.describedAs("compression codec ")
.ofType(classOf[java.lang.Integer])
.defaultsTo(0)
val helpOpt = parser.accepts("help", "Print usage.")
}
| dchenbecker/kafka-sbt | perf/src/main/scala/kafka/perf/PerfConfig.scala | Scala | apache-2.0 | 2,594 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala
import org.apache.flink.annotation.{Internal, Public, PublicEvolving}
import org.apache.flink.api.common.ExecutionConfig
import org.apache.flink.api.common.eventtime.{TimestampAssigner, WatermarkGenerator, WatermarkStrategy}
import org.apache.flink.api.common.functions.{FilterFunction, FlatMapFunction, MapFunction, Partitioner}
import org.apache.flink.api.common.io.OutputFormat
import org.apache.flink.api.common.operators.{ResourceSpec, SlotSharingGroup}
import org.apache.flink.api.common.serialization.SerializationSchema
import org.apache.flink.api.common.state.MapStateDescriptor
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.connector.sink.Sink
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.api.java.tuple.{Tuple => JavaTuple}
import org.apache.flink.api.java.typeutils.ResultTypeQueryable
import org.apache.flink.api.scala.operators.ScalaCsvOutputFormat
import org.apache.flink.core.fs.{FileSystem, Path}
import org.apache.flink.streaming.api.datastream.{AllWindowedStream => JavaAllWindowedStream, DataStream => JavaStream, KeyedStream => JavaKeyedStream, _}
import org.apache.flink.streaming.api.functions.sink.SinkFunction
import org.apache.flink.streaming.api.functions.timestamps.{AscendingTimestampExtractor, BoundedOutOfOrdernessTimestampExtractor}
import org.apache.flink.streaming.api.functions.{AssignerWithPeriodicWatermarks, AssignerWithPunctuatedWatermarks, ProcessFunction}
import org.apache.flink.streaming.api.operators.OneInputStreamOperator
import org.apache.flink.streaming.api.windowing.assigners._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.{GlobalWindow, TimeWindow, Window}
import org.apache.flink.util.{CloseableIterator, Collector}
import scala.collection.JavaConverters._
@Public
class DataStream[T](stream: JavaStream[T]) {
/**
* Returns the [[StreamExecutionEnvironment]] associated with the current [[DataStream]].
*
* @return associated execution environment
* @deprecated Use [[executionEnvironment]] instead
*/
@deprecated
@PublicEvolving
def getExecutionEnvironment: StreamExecutionEnvironment =
new StreamExecutionEnvironment(stream.getExecutionEnvironment)
/**
* Returns the TypeInformation for the elements of this DataStream.
*
* @deprecated Use [[dataType]] instead.
*/
@deprecated
@PublicEvolving
def getType(): TypeInformation[T] = stream.getType()
/**
* Returns the parallelism of this operation.
*
* @deprecated Use [[parallelism]] instead.
*/
@deprecated
@PublicEvolving
def getParallelism = stream.getParallelism
/**
* Returns the execution config.
*
* @deprecated Use [[executionConfig]] instead.
*/
@deprecated
@PublicEvolving
def getExecutionConfig = stream.getExecutionConfig
/**
* Returns the ID of the DataStream.
*/
@Internal
private[flink] def getId = stream.getId()
// --------------------------------------------------------------------------
// Scalaesk accessors
// --------------------------------------------------------------------------
/**
* Gets the underlying java DataStream object.
*/
def javaStream: JavaStream[T] = stream
/**
* Returns the TypeInformation for the elements of this DataStream.
*/
def dataType: TypeInformation[T] = stream.getType()
/**
* Returns the execution config.
*/
def executionConfig: ExecutionConfig = stream.getExecutionConfig()
/**
* Returns the [[StreamExecutionEnvironment]] associated with this data stream
*/
def executionEnvironment: StreamExecutionEnvironment =
new StreamExecutionEnvironment(stream.getExecutionEnvironment())
/**
* Returns the parallelism of this operation.
*/
def parallelism: Int = stream.getParallelism()
/**
* Sets the parallelism of this operation. This must be at least 1.
*/
def setParallelism(parallelism: Int): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.setParallelism(parallelism)
case _ =>
throw new UnsupportedOperationException(
"Operator " + stream + " cannot set the parallelism.")
}
this
}
def setMaxParallelism(maxParallelism: Int): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.setMaxParallelism(maxParallelism)
case _ =>
throw new UnsupportedOperationException("Operator " + stream + " cannot set the maximum" +
"paralllelism")
}
this
}
/**
* Returns the minimum resources of this operation.
*/
@PublicEvolving
def minResources: ResourceSpec = stream.getMinResources()
/**
* Returns the preferred resources of this operation.
*/
@PublicEvolving
def preferredResources: ResourceSpec = stream.getPreferredResources()
// ---------------------------------------------------------------------------
// Fine-grained resource profiles are an incomplete work-in-progress feature
// The setters are hence commented out at this point.
// ---------------------------------------------------------------------------
// /**
// * Sets the minimum and preferred resources of this operation.
// */
// @PublicEvolving
// def resources(minResources: ResourceSpec, preferredResources: ResourceSpec) : DataStream[T] =
// stream match {
// case stream : SingleOutputStreamOperator[T] => asScalaStream(
// stream.setResources(minResources, preferredResources))
// case _ =>
// throw new UnsupportedOperationException("Operator does not support " +
// "configuring custom resources specs.")
// this
// }
//
// /**
// * Sets the resource of this operation.
// */
// @PublicEvolving
// def resources(resources: ResourceSpec) : Unit = {
// this.resources(resources, resources)
// }
/**
* Gets the name of the current data stream. This name is
* used by the visualization and logging during runtime.
*
* @return Name of the stream.
*/
def name: String = stream match {
case stream : SingleOutputStreamOperator[T] => stream.getName
case _ => throw new
UnsupportedOperationException("Only supported for operators.")
}
// --------------------------------------------------------------------------
/**
* Gets the name of the current data stream. This name is
* used by the visualization and logging during runtime.
*
* @return Name of the stream.
* @deprecated Use [[name]] instead
*/
@deprecated
@PublicEvolving
def getName : String = name
/**
* Sets the name of the current data stream. This name is
* used by the visualization and logging during runtime.
*
* @return The named operator
*/
def name(name: String) : DataStream[T] = stream match {
case stream : SingleOutputStreamOperator[T] => asScalaStream(stream.name(name))
case _ => throw new UnsupportedOperationException("Only supported for operators.")
this
}
/**
* Sets an ID for this operator.
*
* The specified ID is used to assign the same operator ID across job
* submissions (for example when starting a job from a savepoint).
*
* <strong>Important</strong>: this ID needs to be unique per
* transformation and job. Otherwise, job submission will fail.
*
* @param uid The unique user-specified ID of this transformation.
* @return The operator with the specified ID.
*/
@PublicEvolving
def uid(uid: String) : DataStream[T] = javaStream match {
case stream : SingleOutputStreamOperator[T] => asScalaStream(stream.uid(uid))
case _ => throw new UnsupportedOperationException("Only supported for operators.")
this
}
@PublicEvolving
def getSideOutput[X: TypeInformation](tag: OutputTag[X]): DataStream[X] = javaStream match {
case stream : SingleOutputStreamOperator[X] =>
asScalaStream(stream.getSideOutput(tag: OutputTag[X]))
}
/**
* Sets an user provided hash for this operator. This will be used AS IS the create
* the JobVertexID.
* <p/>
* <p>The user provided hash is an alternative to the generated hashes, that is
* considered when identifying an operator through the default hash mechanics fails
* (e.g. because of changes between Flink versions).
* <p/>
* <p><strong>Important</strong>: this should be used as a workaround or for trouble
* shooting. The provided hash needs to be unique per transformation and job. Otherwise,
* job submission will fail. Furthermore, you cannot assign user-specified hash to
* intermediate nodes in an operator chain and trying so will let your job fail.
*
* @param hash the user provided hash for this operator.
* @return The operator with the user provided hash.
*/
@PublicEvolving
def setUidHash(hash: String) : DataStream[T] = javaStream match {
case stream : SingleOutputStreamOperator[T] =>
asScalaStream(stream.setUidHash(hash))
case _ => throw new UnsupportedOperationException("Only supported for operators.")
this
}
/**
* Turns off chaining for this operator so thread co-location will not be
* used as an optimization. </p> Chaining can be turned off for the whole
* job by [[StreamExecutionEnvironment.disableOperatorChaining()]]
* however it is not advised for performance considerations.
*
*/
@PublicEvolving
def disableChaining(): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.disableChaining()
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
/**
* Starts a new task chain beginning at this operator. This operator will
* not be chained (thread co-located for increased performance) to any
* previous tasks even if possible.
*
*/
@PublicEvolving
def startNewChain(): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.startNewChain()
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
/**
* Sets the slot sharing group of this operation. Parallel instances of
* operations that are in the same slot sharing group will be co-located in the same
* TaskManager slot, if possible.
*
* Operations inherit the slot sharing group of input operations if all input operations
* are in the same slot sharing group and no slot sharing group was explicitly specified.
*
* Initially an operation is in the default slot sharing group. An operation can be put into
* the default group explicitly by setting the slot sharing group to `"default"`.
*
* @param slotSharingGroup The slot sharing group name.
*/
@PublicEvolving
def slotSharingGroup(slotSharingGroup: String): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.slotSharingGroup(slotSharingGroup)
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
/**
* Sets the slot sharing group of this operation. Parallel instances of
* operations that are in the same slot sharing group will be co-located in the same
* TaskManager slot, if possible.
*
* Operations inherit the slot sharing group of input operations if all input operations
* are in the same slot sharing group and no slot sharing group was explicitly specified.
*
* Initially an operation is in the default slot sharing group. An operation can be put into
* the default group explicitly by setting the slot sharing group to `"default"`.
*
* @param slotSharingGroup Which contains name and its resource spec.
*/
@PublicEvolving
def slotSharingGroup(slotSharingGroup: SlotSharingGroup): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.slotSharingGroup(slotSharingGroup)
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
/**
* Sets the maximum time frequency (ms) for the flushing of the output
* buffer. By default the output buffers flush only when they are full.
*
* @param timeoutMillis
* The maximum time between two output flushes.
* @return The operator with buffer timeout set.
*/
def setBufferTimeout(timeoutMillis: Long): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.setBufferTimeout(timeoutMillis)
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
// --------------------------------------------------------------------------
// Stream Transformations
// --------------------------------------------------------------------------
/**
* Creates a new DataStream by merging DataStream outputs of
* the same type with each other. The DataStreams merged using this operator
* will be transformed simultaneously.
*
*/
def union(dataStreams: DataStream[T]*): DataStream[T] =
asScalaStream(stream.union(dataStreams.map(_.javaStream): _*))
/**
* Creates a new ConnectedStreams by connecting
* DataStream outputs of different type with each other. The
* DataStreams connected using this operators can be used with CoFunctions.
*/
def connect[T2](dataStream: DataStream[T2]): ConnectedStreams[T, T2] =
asScalaStream(stream.connect(dataStream.javaStream))
/**
* Creates a new [[BroadcastConnectedStream]] by connecting the current
* [[DataStream]] or [[KeyedStream]] with a [[BroadcastStream]].
*
* The latter can be created using the [[broadcast(MapStateDescriptor[])]] method.
*
* The resulting stream can be further processed using the
* ``broadcastConnectedStream.process(myFunction)``
* method, where ``myFunction`` can be either a
* [[org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction]]
* or a [[org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction]]
* depending on the current stream being a [[KeyedStream]] or not.
*
* @param broadcastStream The broadcast stream with the broadcast state to be
* connected with this stream.
* @return The [[BroadcastConnectedStream]].
*/
@PublicEvolving
def connect[R](broadcastStream: BroadcastStream[R]): BroadcastConnectedStream[T, R] =
asScalaStream(stream.connect(broadcastStream))
/**
* Groups the elements of a DataStream by the given key positions (for tuple/array types) to
* be used with grouped operators like grouped reduce or grouped aggregations.
*
*/
@deprecated("use [[DataStream.keyBy(KeySelector)]] instead")
def keyBy(fields: Int*): KeyedStream[T, JavaTuple] = asScalaStream(stream.keyBy(fields: _*))
/**
* Groups the elements of a DataStream by the given field expressions to
* be used with grouped operators like grouped reduce or grouped aggregations.
*/
@deprecated("use [[DataStream.keyBy(KeySelector)]] instead")
def keyBy(firstField: String, otherFields: String*): KeyedStream[T, JavaTuple] =
asScalaStream(stream.keyBy(firstField +: otherFields.toArray: _*))
/**
* Groups the elements of a DataStream by the given K key to
* be used with grouped operators like grouped reduce or grouped aggregations.
*/
def keyBy[K: TypeInformation](fun: T => K): KeyedStream[T, K] = {
val cleanFun = clean(fun)
val keyType: TypeInformation[K] = implicitly[TypeInformation[K]]
val keyExtractor = new KeySelector[T, K] with ResultTypeQueryable[K] {
def getKey(in: T) = cleanFun(in)
override def getProducedType: TypeInformation[K] = keyType
}
asScalaStream(new JavaKeyedStream(stream, keyExtractor, keyType))
}
/**
* Groups the elements of a DataStream by the given K key to
* be used with grouped operators like grouped reduce or grouped aggregations.
*/
def keyBy[K: TypeInformation](fun: KeySelector[T, K]): KeyedStream[T, K] = {
val cleanFun = clean(fun)
val keyType: TypeInformation[K] = implicitly[TypeInformation[K]]
asScalaStream(new JavaKeyedStream(stream, cleanFun, keyType))
}
/**
* Partitions a tuple DataStream on the specified key fields using a custom partitioner.
* This method takes the key position to partition on, and a partitioner that accepts the key
* type.
*
* Note: This method works only on single field keys.
*/
@deprecated("Use [[DataStream.partitionCustom(Partitioner, Function1)]] instead")
def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], field: Int) : DataStream[T] =
asScalaStream(stream.partitionCustom(partitioner, field))
/**
* Partitions a POJO DataStream on the specified key fields using a custom partitioner.
* This method takes the key expression to partition on, and a partitioner that accepts the key
* type.
*
* Note: This method works only on single field keys.
*/
@deprecated("Use [[DataStream.partitionCustom(Partitioner, Function1)]] instead")
def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], field: String)
: DataStream[T] =
asScalaStream(stream.partitionCustom(partitioner, field))
/**
* Partitions a DataStream on the key returned by the selector, using a custom partitioner.
* This method takes the key selector to get the key to partition on, and a partitioner that
* accepts the key type.
*
* Note: This method works only on single field keys, i.e. the selector cannot return tuples
* of fields.
*/
def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], fun: T => K)
: DataStream[T] = {
val keyType = implicitly[TypeInformation[K]]
val cleanFun = clean(fun)
val keyExtractor = new KeySelector[T, K] with ResultTypeQueryable[K] {
def getKey(in: T) = cleanFun(in)
override def getProducedType(): TypeInformation[K] = keyType
}
asScalaStream(stream.partitionCustom(partitioner, keyExtractor))
}
/**
* Sets the partitioning of the DataStream so that the output tuples
* are broad casted to every parallel instance of the next component.
*/
def broadcast: DataStream[T] = asScalaStream(stream.broadcast())
/**
* Sets the partitioning of the [[DataStream]] so that the output elements
* are broadcasted to every parallel instance of the next operation. In addition,
* it implicitly creates as many
* [[org.apache.flink.api.common.state.BroadcastState broadcast states]]
* as the specified descriptors which can be used to store the element of the stream.
*
* @param broadcastStateDescriptors the descriptors of the broadcast states to create.
* @return A [[BroadcastStream]] which can be used in the
* [[DataStream.connect(BroadcastStream)]] to create a
* [[BroadcastConnectedStream]] for further processing of the elements.
*/
@PublicEvolving
def broadcast(broadcastStateDescriptors: MapStateDescriptor[_, _]*): BroadcastStream[T] = {
if (broadcastStateDescriptors == null) {
throw new NullPointerException("State Descriptors must not be null.")
}
javaStream.broadcast(broadcastStateDescriptors: _*)
}
/**
* Sets the partitioning of the DataStream so that the output values all go to
* the first instance of the next processing operator. Use this setting with care
* since it might cause a serious performance bottleneck in the application.
*/
@PublicEvolving
def global: DataStream[T] = asScalaStream(stream.global())
/**
* Sets the partitioning of the DataStream so that the output tuples
* are shuffled to the next component.
*/
@PublicEvolving
def shuffle: DataStream[T] = asScalaStream(stream.shuffle())
/**
* Sets the partitioning of the DataStream so that the output tuples
* are forwarded to the local subtask of the next component (whenever
* possible).
*/
def forward: DataStream[T] = asScalaStream(stream.forward())
/**
* Sets the partitioning of the DataStream so that the output tuples
* are distributed evenly to the next component.
*/
def rebalance: DataStream[T] = asScalaStream(stream.rebalance())
/**
* Sets the partitioning of the [[DataStream]] so that the output tuples
* are distributed evenly to a subset of instances of the downstream operation.
*
* The subset of downstream operations to which the upstream operation sends
* elements depends on the degree of parallelism of both the upstream and downstream operation.
* For example, if the upstream operation has parallelism 2 and the downstream operation
* has parallelism 4, then one upstream operation would distribute elements to two
* downstream operations while the other upstream operation would distribute to the other
* two downstream operations. If, on the other hand, the downstream operation has parallelism
* 2 while the upstream operation has parallelism 4 then two upstream operations will
* distribute to one downstream operation while the other two upstream operations will
* distribute to the other downstream operations.
*
* In cases where the different parallelisms are not multiples of each other one or several
* downstream operations will have a differing number of inputs from upstream operations.
*/
@PublicEvolving
def rescale: DataStream[T] = asScalaStream(stream.rescale())
/**
* Initiates an iterative part of the program that creates a loop by feeding
* back data streams. To create a streaming iteration the user needs to define
* a transformation that creates two DataStreams. The first one is the output
* that will be fed back to the start of the iteration and the second is the output
* stream of the iterative part.
*
* stepfunction: initialStream => (feedback, output)
*
* A common pattern is to use output splitting to create feedback and output DataStream.
* Please see the side outputs of [[ProcessFunction]] method of the DataStream
*
* By default a DataStream with iteration will never terminate, but the user
* can use the maxWaitTime parameter to set a max waiting time for the iteration head.
* If no data received in the set time the stream terminates.
*
* Parallelism of the feedback stream must match the parallelism of the original stream.
* Please refer to the [[setParallelism]] method for parallelism modification
*/
@PublicEvolving
def iterate[R](stepFunction: DataStream[T] => (DataStream[T], DataStream[R]),
maxWaitTimeMillis:Long = 0) : DataStream[R] = {
val iterativeStream = stream.iterate(maxWaitTimeMillis)
val (feedback, output) = stepFunction(new DataStream[T](iterativeStream))
iterativeStream.closeWith(feedback.javaStream)
output
}
/**
* Initiates an iterative part of the program that creates a loop by feeding
* back data streams. To create a streaming iteration the user needs to define
* a transformation that creates two DataStreams. The first one is the output
* that will be fed back to the start of the iteration and the second is the output
* stream of the iterative part.
*
* The input stream of the iterate operator and the feedback stream will be treated
* as a ConnectedStreams where the input is connected with the feedback stream.
*
* This allows the user to distinguish standard input from feedback inputs.
*
* stepfunction: initialStream => (feedback, output)
*
* The user must set the max waiting time for the iteration head.
* If no data received in the set time the stream terminates. If this parameter is set
* to 0 then the iteration sources will indefinitely, so the job must be killed to stop.
*
*/
@PublicEvolving
def iterate[R, F: TypeInformation](
stepFunction: ConnectedStreams[T, F] => (DataStream[F], DataStream[R]),
maxWaitTimeMillis:Long): DataStream[R] = {
val feedbackType: TypeInformation[F] = implicitly[TypeInformation[F]]
val connectedIterativeStream = stream.iterate(maxWaitTimeMillis).
withFeedbackType(feedbackType)
val (feedback, output) = stepFunction(asScalaStream(connectedIterativeStream))
connectedIterativeStream.closeWith(feedback.javaStream)
output
}
/**
* Creates a new DataStream by applying the given function to every element of this DataStream.
*/
def map[R: TypeInformation](fun: T => R): DataStream[R] = {
if (fun == null) {
throw new NullPointerException("Map function must not be null.")
}
val cleanFun = clean(fun)
val mapper = new MapFunction[T, R] {
def map(in: T): R = cleanFun(in)
}
map(mapper)
}
/**
* Creates a new DataStream by applying the given function to every element of this DataStream.
*/
def map[R: TypeInformation](mapper: MapFunction[T, R]): DataStream[R] = {
if (mapper == null) {
throw new NullPointerException("Map function must not be null.")
}
val outType : TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(stream.map(mapper, outType).asInstanceOf[JavaStream[R]])
}
/**
* Creates a new DataStream by applying the given function to every element and flattening
* the results.
*/
def flatMap[R: TypeInformation](flatMapper: FlatMapFunction[T, R]): DataStream[R] = {
if (flatMapper == null) {
throw new NullPointerException("FlatMap function must not be null.")
}
val outType : TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(stream.flatMap(flatMapper, outType).asInstanceOf[JavaStream[R]])
}
/**
* Creates a new DataStream by applying the given function to every element and flattening
* the results.
*/
def flatMap[R: TypeInformation](fun: (T, Collector[R]) => Unit): DataStream[R] = {
if (fun == null) {
throw new NullPointerException("FlatMap function must not be null.")
}
val cleanFun = clean(fun)
val flatMapper = new FlatMapFunction[T, R] {
def flatMap(in: T, out: Collector[R]) { cleanFun(in, out) }
}
flatMap(flatMapper)
}
/**
* Creates a new DataStream by applying the given function to every element and flattening
* the results.
*/
def flatMap[R: TypeInformation](fun: T => TraversableOnce[R]): DataStream[R] = {
if (fun == null) {
throw new NullPointerException("FlatMap function must not be null.")
}
val cleanFun = clean(fun)
val flatMapper = new FlatMapFunction[T, R] {
def flatMap(in: T, out: Collector[R]) { cleanFun(in) foreach out.collect }
}
flatMap(flatMapper)
}
/**
* Applies the given [[ProcessFunction]] on the input stream, thereby
* creating a transformed output stream.
*
* The function will be called for every element in the stream and can produce
* zero or more output.
*
* @param processFunction The [[ProcessFunction]] that is called for each element
* in the stream.
*/
@PublicEvolving
def process[R: TypeInformation](
processFunction: ProcessFunction[T, R]): DataStream[R] = {
if (processFunction == null) {
throw new NullPointerException("ProcessFunction must not be null.")
}
asScalaStream(javaStream.process(processFunction, implicitly[TypeInformation[R]]))
}
/**
* Creates a new DataStream that contains only the elements satisfying the given filter predicate.
*/
def filter(filter: FilterFunction[T]): DataStream[T] = {
if (filter == null) {
throw new NullPointerException("Filter function must not be null.")
}
asScalaStream(stream.filter(filter))
}
/**
* Creates a new DataStream that contains only the elements satisfying the given filter predicate.
*/
def filter(fun: T => Boolean): DataStream[T] = {
if (fun == null) {
throw new NullPointerException("Filter function must not be null.")
}
val cleanFun = clean(fun)
val filterFun = new FilterFunction[T] {
def filter(in: T) = cleanFun(in)
}
filter(filterFun)
}
/**
* Windows this DataStream into tumbling time windows.
*
* This is a shortcut for either `.window(TumblingEventTimeWindows.of(size))` or
* `.window(TumblingProcessingTimeWindows.of(size))` depending on the time characteristic
* set using
* [[StreamExecutionEnvironment.setStreamTimeCharacteristic]].
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param size The size of the window.
*
* @deprecated Please use [[windowAll()]] with either [[TumblingEventTimeWindows]] or
* [[TumblingProcessingTimeWindows]]. For more information, see the deprecation
* notice on [[org.apache.flink.streaming.api.TimeCharacteristic]].
*/
@deprecated
def timeWindowAll(size: Time): AllWindowedStream[T, TimeWindow] = {
new AllWindowedStream(javaStream.timeWindowAll(size))
}
/**
* Windows this DataStream into sliding time windows.
*
* This is a shortcut for either `.window(SlidingEventTimeWindows.of(size, slide))` or
* `.window(SlidingProcessingTimeWindows.of(size, slide))` depending on the time characteristic
* set using
* [[StreamExecutionEnvironment.setStreamTimeCharacteristic]].
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param size The size of the window.
*
* @deprecated Please use [[windowAll()]] with either [[SlidingEventTimeWindows]] or
* [[SlidingProcessingTimeWindows]]. For more information, see the deprecation
* notice on [[org.apache.flink.streaming.api.TimeCharacteristic]].
*/
@deprecated
def timeWindowAll(size: Time, slide: Time): AllWindowedStream[T, TimeWindow] = {
new AllWindowedStream(javaStream.timeWindowAll(size, slide))
}
/**
* Windows this [[DataStream]] into sliding count windows.
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param size The size of the windows in number of elements.
* @param slide The slide interval in number of elements.
*/
def countWindowAll(size: Long, slide: Long): AllWindowedStream[T, GlobalWindow] = {
new AllWindowedStream(stream.countWindowAll(size, slide))
}
/**
* Windows this [[DataStream]] into tumbling count windows.
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param size The size of the windows in number of elements.
*/
def countWindowAll(size: Long): AllWindowedStream[T, GlobalWindow] = {
new AllWindowedStream(stream.countWindowAll(size))
}
/**
* Windows this data stream to a [[AllWindowedStream]], which evaluates windows
* over a key grouped stream. Elements are put into windows by a [[WindowAssigner]]. The grouping
* of elements is done both by key and by window.
*
* A [[org.apache.flink.streaming.api.windowing.triggers.Trigger]] can be defined to specify
* when windows are evaluated. However, `WindowAssigner` have a default `Trigger`
* that is used if a `Trigger` is not specified.
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param assigner The `WindowAssigner` that assigns elements to windows.
* @return The trigger windows data stream.
*/
@PublicEvolving
def windowAll[W <: Window](assigner: WindowAssigner[_ >: T, W]): AllWindowedStream[T, W] = {
new AllWindowedStream[T, W](new JavaAllWindowedStream[T, W](stream, assigner))
}
/**
* Assigns timestamps to the elements in the data stream and generates watermarks to signal
* event time progress. The given [[WatermarkStrategy is used to create a [[TimestampAssigner]]
* and [[org.apache.flink.api.common.eventtime.WatermarkGenerator]].
*
* For each event in the data stream, the [[TimestampAssigner#extractTimestamp(Object, long)]]
* method is called to assign an event timestamp.
*
* For each event in the data stream, the
* [[WatermarkGenerator#onEvent(Object, long, WatermarkOutput)]] will be called.
*
* Periodically (defined by the [[ExecutionConfig#getAutoWatermarkInterval()]]), the
* [[WatermarkGenerator#onPeriodicEmit(WatermarkOutput)]] method will be called.
*
* Common watermark generation patterns can be found as static methods in the
* [[org.apache.flink.api.common.eventtime.WatermarkStrategy]] class.
*/
def assignTimestampsAndWatermarks(watermarkStrategy: WatermarkStrategy[T]): DataStream[T] = {
val cleanedStrategy = clean(watermarkStrategy)
asScalaStream(stream.assignTimestampsAndWatermarks(cleanedStrategy))
}
/**
* Assigns timestamps to the elements in the data stream and periodically creates
* watermarks to signal event time progress.
*
* This method uses the deprecated watermark generator interfaces. Please switch to
* [[assignTimestampsAndWatermarks(WatermarkStrategy]] to use the
* new interfaces instead. The new interfaces support watermark idleness and no longer need
* to differentiate between "periodic" and "punctuated" watermarks.
*
* @deprecated please use [[assignTimestampsAndWatermarks()]]
*/
@deprecated
@PublicEvolving
def assignTimestampsAndWatermarks(assigner: AssignerWithPeriodicWatermarks[T]): DataStream[T] = {
asScalaStream(stream.assignTimestampsAndWatermarks(assigner))
}
/**
* Assigns timestamps to the elements in the data stream and periodically creates
* watermarks to signal event time progress.
*
* This method uses the deprecated watermark generator interfaces. Please switch to
* [[assignTimestampsAndWatermarks(WatermarkStrategy]] to use the
* new interfaces instead. The new interfaces support watermark idleness and no longer need
* to differentiate between "periodic" and "punctuated" watermarks.
*
* @deprecated please use [[assignTimestampsAndWatermarks()]]
*/
@deprecated
@PublicEvolving
def assignTimestampsAndWatermarks(assigner: AssignerWithPunctuatedWatermarks[T])
: DataStream[T] = {
asScalaStream(stream.assignTimestampsAndWatermarks(assigner))
}
/**
* Assigns timestamps to the elements in the data stream and periodically creates
* watermarks to signal event time progress.
*
* This method is a shortcut for data streams where the element timestamp are known
* to be monotonously ascending within each parallel stream.
* In that case, the system can generate watermarks automatically and perfectly
* by tracking the ascending timestamps.
*
* For cases where the timestamps are not monotonously increasing, use the more
* general methods [[assignTimestampsAndWatermarks(AssignerWithPeriodicWatermarks)]]
* and [[assignTimestampsAndWatermarks(AssignerWithPunctuatedWatermarks)]].
*/
@PublicEvolving
def assignAscendingTimestamps(extractor: T => Long): DataStream[T] = {
val cleanExtractor = clean(extractor)
val extractorFunction = new AscendingTimestampExtractor[T] {
def extractAscendingTimestamp(element: T): Long = {
cleanExtractor(element)
}
}
asScalaStream(stream.assignTimestampsAndWatermarks(extractorFunction))
}
/**
* Creates a co-group operation. See [[CoGroupedStreams]] for an example of how the keys
* and window can be specified.
*/
def coGroup[T2](otherStream: DataStream[T2]): CoGroupedStreams[T, T2] = {
new CoGroupedStreams(this, otherStream)
}
/**
* Creates a join operation. See [[JoinedStreams]] for an example of how the keys
* and window can be specified.
*/
def join[T2](otherStream: DataStream[T2]): JoinedStreams[T, T2] = {
new JoinedStreams(this, otherStream)
}
/**
* Writes a DataStream to the standard output stream (stdout). For each
* element of the DataStream the result of .toString is
* written.
*
*/
@PublicEvolving
def print(): DataStreamSink[T] = stream.print()
/**
* Writes a DataStream to the standard error stream (stderr).
*
* For each element of the DataStream the result of
* [[AnyRef.toString()]] is written.
*
* @return The closed DataStream.
*/
@PublicEvolving
def printToErr() = stream.printToErr()
/**
* Writes a DataStream to the standard output stream (stdout). For each
* element of the DataStream the result of [[AnyRef.toString()]] is
* written.
*
* @param sinkIdentifier The string to prefix the output with.
* @return The closed DataStream.
*/
@PublicEvolving
def print(sinkIdentifier: String): DataStreamSink[T] = stream.print(sinkIdentifier)
/**
* Writes a DataStream to the standard error stream (stderr).
*
* For each element of the DataStream the result of
* [[AnyRef.toString()]] is written.
*
* @param sinkIdentifier The string to prefix the output with.
* @return The closed DataStream.
*/
@PublicEvolving
def printToErr(sinkIdentifier: String) = stream.printToErr(sinkIdentifier)
/**
* Writes a DataStream to the file specified by path in text format. For
* every element of the DataStream the result of .toString is written.
*
* @param path The path pointing to the location the text file is written to
* @return The closed DataStream
*
* @deprecated Please use the
* [[org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink]]
* explicitly using the [[addSink()]] method.
*/
@Deprecated
@PublicEvolving
def writeAsText(path: String): DataStreamSink[T] =
stream.writeAsText(path)
/**
* Writes a DataStream to the file specified by path in text format. For
* every element of the DataStream the result of .toString is written.
*
* @param path The path pointing to the location the text file is written to
* @param writeMode Controls the behavior for existing files. Options are NO_OVERWRITE and
* OVERWRITE.
* @return The closed DataStream
*
* @deprecated Please use the
* [[org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink]]
* explicitly using the [[addSink()]] method.
*/
@Deprecated
@PublicEvolving
def writeAsText(path: String, writeMode: FileSystem.WriteMode): DataStreamSink[T] = {
if (writeMode != null) {
stream.writeAsText(path, writeMode)
} else {
stream.writeAsText(path)
}
}
/**
* Writes the DataStream in CSV format to the file specified by the path parameter. The writing
* is performed periodically every millis milliseconds.
*
* @param path Path to the location of the CSV file
* @return The closed DataStream
*
* @deprecated Please use the
* [[org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink]]
* explicitly using the [[addSink()]] method.
*/
@Deprecated
@PublicEvolving
def writeAsCsv(path: String): DataStreamSink[T] = {
writeAsCsv(
path,
null,
ScalaCsvOutputFormat.DEFAULT_LINE_DELIMITER,
ScalaCsvOutputFormat.DEFAULT_FIELD_DELIMITER)
}
/**
* Writes the DataStream in CSV format to the file specified by the path parameter. The writing
* is performed periodically every millis milliseconds.
*
* @param path Path to the location of the CSV file
* @param writeMode Controls whether an existing file is overwritten or not
* @return The closed DataStream
*
* @deprecated Please use the
* [[org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink]]
* explicitly using the [[addSink()]] method.
*/
@Deprecated
@PublicEvolving
def writeAsCsv(path: String, writeMode: FileSystem.WriteMode): DataStreamSink[T] = {
writeAsCsv(
path,
writeMode,
ScalaCsvOutputFormat.DEFAULT_LINE_DELIMITER,
ScalaCsvOutputFormat.DEFAULT_FIELD_DELIMITER)
}
/**
* Writes the DataStream in CSV format to the file specified by the path parameter. The writing
* is performed periodically every millis milliseconds.
*
* @param path Path to the location of the CSV file
* @param writeMode Controls whether an existing file is overwritten or not
* @param rowDelimiter Delimiter for consecutive rows
* @param fieldDelimiter Delimiter for consecutive fields
* @return The closed DataStream
*
* @deprecated Please use the
* [[org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink]]
* explicitly using the [[addSink()]] method.
*/
@Deprecated
@PublicEvolving
def writeAsCsv(
path: String,
writeMode: FileSystem.WriteMode,
rowDelimiter: String,
fieldDelimiter: String)
: DataStreamSink[T] = {
require(stream.getType.isTupleType, "CSV output can only be used with Tuple DataSets.")
val of = new ScalaCsvOutputFormat[Product](new Path(path), rowDelimiter, fieldDelimiter)
if (writeMode != null) {
of.setWriteMode(writeMode)
}
stream.writeUsingOutputFormat(of.asInstanceOf[OutputFormat[T]])
}
/**
* Writes a DataStream using the given [[OutputFormat]].
*/
@PublicEvolving
def writeUsingOutputFormat(format: OutputFormat[T]): DataStreamSink[T] = {
stream.writeUsingOutputFormat(format)
}
/**
* Writes the DataStream to a socket as a byte array. The format of the output is
* specified by a [[SerializationSchema]].
*/
@PublicEvolving
def writeToSocket(
hostname: String,
port: Integer,
schema: SerializationSchema[T]): DataStreamSink[T] = {
stream.writeToSocket(hostname, port, schema)
}
/**
* Adds the given sink to this DataStream. Only streams with sinks added
* will be executed once the StreamExecutionEnvironment.execute(...)
* method is called.
*
*/
def addSink(sinkFunction: SinkFunction[T]): DataStreamSink[T] =
stream.addSink(sinkFunction)
/**
* Adds the given sink to this DataStream. Only streams with sinks added
* will be executed once the StreamExecutionEnvironment.execute(...)
* method is called.
*
*/
def addSink(fun: T => Unit): DataStreamSink[T] = {
if (fun == null) {
throw new NullPointerException("Sink function must not be null.")
}
val cleanFun = clean(fun)
val sinkFunction = new SinkFunction[T] {
override def invoke(in: T) = cleanFun(in)
}
this.addSink(sinkFunction)
}
/**
* Adds the given sink to this DataStream. Only streams with sinks added
* will be executed once the StreamExecutionEnvironment.execute(...)
* method is called.
*/
def sinkTo(sink: Sink[T, _, _, _]): DataStreamSink[T] = stream.sinkTo(sink)
/**
* Triggers the distributed execution of the streaming dataflow and returns an iterator over the
* elements of the given DataStream.
*
* <p>The DataStream application is executed in the regular distributed manner on the target
* environment, and the events from the stream are polled back to this application process and
* thread through Flink's REST API.
*
* <p><b>IMPORTANT</b> The returned iterator must be closed to free all cluster resources.
*/
def executeAndCollect(): CloseableIterator[T] =
CloseableIterator.fromJava(stream.executeAndCollect())
/**
* Triggers the distributed execution of the streaming dataflow and returns an iterator over the
* elements of the given DataStream.
*
* <p>The DataStream application is executed in the regular distributed manner on the target
* environment, and the events from the stream are polled back to this application process and
* thread through Flink's REST API.
*
* <p><b>IMPORTANT</b> The returned iterator must be closed to free all cluster resources.
*/
def executeAndCollect(jobExecutionName: String): CloseableIterator[T] =
CloseableIterator.fromJava(stream.executeAndCollect(jobExecutionName))
/**
* Triggers the distributed execution of the streaming dataflow and returns an iterator over the
* elements of the given DataStream.
*
* <p>The DataStream application is executed in the regular distributed manner on the target
* environment, and the events from the stream are polled back to this application process and
* thread through Flink's REST API.
*/
def executeAndCollect(limit: Int): List[T] =
stream.executeAndCollect(limit).asScala.toList
/**
* Triggers the distributed execution of the streaming dataflow and returns an iterator over the
* elements of the given DataStream.
*
* <p>The DataStream application is executed in the regular distributed manner on the target
* environment, and the events from the stream are polled back to this application process and
* thread through Flink's REST API.
*/
def executeAndCollect(jobExecutionName: String, limit: Int): List[T] =
stream.executeAndCollect(jobExecutionName, limit).asScala.toList
/**
* Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning
* is not disabled in the [[org.apache.flink.api.common.ExecutionConfig]].
*/
private[flink] def clean[F <: AnyRef](f: F): F = {
new StreamExecutionEnvironment(stream.getExecutionEnvironment).scalaClean(f)
}
/**
* Transforms the [[DataStream]] by using a custom [[OneInputStreamOperator]].
*
* @param operatorName name of the operator, for logging purposes
* @param operator the object containing the transformation logic
* @tparam R the type of elements emitted by the operator
*/
@PublicEvolving
def transform[R: TypeInformation](
operatorName: String,
operator: OneInputStreamOperator[T, R]): DataStream[R] = {
asScalaStream(stream.transform(operatorName, implicitly[TypeInformation[R]], operator))
}
}
| StephanEwen/incubator-flink | flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/DataStream.scala | Scala | apache-2.0 | 47,673 |
package com.zobot.client.packet.definitions.serverbound.play
import com.zobot.client.packet.Packet
case class ClientStatus(actionId: Int) extends Packet {
override lazy val packetId = 0x03
override lazy val packetData: Array[Byte] =
fromVarInt(actionId)
}
| BecauseNoReason/zobot | src/main/scala/com/zobot/client/packet/definitions/serverbound/play/ClientStatus.scala | Scala | mit | 267 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.avro
import java.nio.ByteBuffer
import scala.collection.JavaConverters._
import org.apache.avro.LogicalTypes.{TimestampMicros, TimestampMillis}
import org.apache.avro.Schema
import org.apache.avro.Schema.Type
import org.apache.avro.generic.GenericData.{EnumSymbol, Fixed, Record}
import org.apache.avro.util.Utf8
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{SpecializedGetters, SpecificInternalRow}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
/**
* A serializer to serialize data in catalyst format to data in avro format.
*/
class AvroSerializer(rootCatalystType: DataType, rootAvroType: Schema, nullable: Boolean) {
def serialize(catalystData: Any): Any = {
converter.apply(catalystData)
}
private val converter: Any => Any = {
val actualAvroType = resolveNullableType(rootAvroType, nullable)
val baseConverter = rootCatalystType match {
case st: StructType =>
newStructConverter(st, actualAvroType).asInstanceOf[Any => Any]
case _ =>
val tmpRow = new SpecificInternalRow(Seq(rootCatalystType))
val converter = newConverter(rootCatalystType, actualAvroType)
(data: Any) =>
tmpRow.update(0, data)
converter.apply(tmpRow, 0)
}
if (nullable) {
(data: Any) =>
if (data == null) {
null
} else {
baseConverter.apply(data)
}
} else {
baseConverter
}
}
private type Converter = (SpecializedGetters, Int) => Any
private def newConverter(catalystType: DataType, avroType: Schema): Converter = {
catalystType match {
case NullType =>
(getter, ordinal) => null
case BooleanType =>
(getter, ordinal) => getter.getBoolean(ordinal)
case ByteType =>
(getter, ordinal) => getter.getByte(ordinal).toInt
case ShortType =>
(getter, ordinal) => getter.getShort(ordinal).toInt
case IntegerType =>
(getter, ordinal) => getter.getInt(ordinal)
case LongType =>
(getter, ordinal) => getter.getLong(ordinal)
case FloatType =>
(getter, ordinal) => getter.getFloat(ordinal)
case DoubleType =>
(getter, ordinal) => getter.getDouble(ordinal)
case d: DecimalType =>
(getter, ordinal) => getter.getDecimal(ordinal, d.precision, d.scale).toString
case StringType => avroType.getType match {
case Type.ENUM =>
import scala.collection.JavaConverters._
val enumSymbols: Set[String] = avroType.getEnumSymbols.asScala.toSet
(getter, ordinal) =>
val data = getter.getUTF8String(ordinal).toString
if (!enumSymbols.contains(data)) {
throw new IncompatibleSchemaException(
"Cannot write \\"" + data + "\\" since it's not defined in enum \\"" +
enumSymbols.mkString("\\", \\"") + "\\"")
}
new EnumSymbol(avroType, data)
case _ =>
(getter, ordinal) => new Utf8(getter.getUTF8String(ordinal).getBytes)
}
case BinaryType => avroType.getType match {
case Type.FIXED =>
val size = avroType.getFixedSize()
(getter, ordinal) =>
val data: Array[Byte] = getter.getBinary(ordinal)
if (data.length != size) {
throw new IncompatibleSchemaException(
s"Cannot write ${data.length} ${if (data.length > 1) "bytes" else "byte"} of " +
"binary data into FIXED Type with size of " +
s"$size ${if (size > 1) "bytes" else "byte"}")
}
new Fixed(avroType, data)
case _ =>
(getter, ordinal) => ByteBuffer.wrap(getter.getBinary(ordinal))
}
case DateType =>
(getter, ordinal) => getter.getInt(ordinal)
case TimestampType => avroType.getLogicalType match {
case _: TimestampMillis => (getter, ordinal) => getter.getLong(ordinal) / 1000
case _: TimestampMicros => (getter, ordinal) => getter.getLong(ordinal)
// For backward compatibility, if the Avro type is Long and it is not logical type,
// output the timestamp value as with millisecond precision.
case null => (getter, ordinal) => getter.getLong(ordinal) / 1000
case other => throw new IncompatibleSchemaException(
s"Cannot convert Catalyst Timestamp type to Avro logical type ${other}")
}
case ArrayType(et, containsNull) =>
val elementConverter = newConverter(
et, resolveNullableType(avroType.getElementType, containsNull))
(getter, ordinal) => {
val arrayData = getter.getArray(ordinal)
val len = arrayData.numElements()
val result = new Array[Any](len)
var i = 0
while (i < len) {
if (containsNull && arrayData.isNullAt(i)) {
result(i) = null
} else {
result(i) = elementConverter(arrayData, i)
}
i += 1
}
// avro writer is expecting a Java Collection, so we convert it into
// `ArrayList` backed by the specified array without data copying.
java.util.Arrays.asList(result: _*)
}
case st: StructType =>
val structConverter = newStructConverter(st, avroType)
val numFields = st.length
(getter, ordinal) => structConverter(getter.getStruct(ordinal, numFields))
case MapType(kt, vt, valueContainsNull) if kt == StringType =>
val valueConverter = newConverter(
vt, resolveNullableType(avroType.getValueType, valueContainsNull))
(getter, ordinal) =>
val mapData = getter.getMap(ordinal)
val len = mapData.numElements()
val result = new java.util.HashMap[String, Any](len)
val keyArray = mapData.keyArray()
val valueArray = mapData.valueArray()
var i = 0
while (i < len) {
val key = keyArray.getUTF8String(i).toString
if (valueContainsNull && valueArray.isNullAt(i)) {
result.put(key, null)
} else {
result.put(key, valueConverter(valueArray, i))
}
i += 1
}
result
case other =>
throw new IncompatibleSchemaException(s"Unexpected type: $other")
}
}
private def newStructConverter(
catalystStruct: StructType, avroStruct: Schema): InternalRow => Record = {
val avroFields = avroStruct.getFields
assert(avroFields.size() == catalystStruct.length)
val fieldConverters = catalystStruct.zip(avroFields.asScala).map {
case (f1, f2) => newConverter(f1.dataType, resolveNullableType(f2.schema(), f1.nullable))
}
val numFields = catalystStruct.length
(row: InternalRow) =>
val result = new Record(avroStruct)
var i = 0
while (i < numFields) {
if (row.isNullAt(i)) {
result.put(i, null)
} else {
result.put(i, fieldConverters(i).apply(row, i))
}
i += 1
}
result
}
private def resolveNullableType(avroType: Schema, nullable: Boolean): Schema = {
if (nullable) {
// avro uses union to represent nullable type.
val fields = avroType.getTypes.asScala
assert(fields.length == 2)
val actualType = fields.filter(_.getType != Type.NULL)
assert(actualType.length == 1)
actualType.head
} else {
avroType
}
}
}
| eyalfa/spark | external/avro/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala | Scala | apache-2.0 | 8,392 |
package com.fanhood.presentation.main
import org.scalatra._
import scalate.ScalateSupport
class Fanhood extends FanhoodUtilsStack {
get("/") {
<html>
<body>
<h1>Hello, world!</h1>
Say <a href="hello-scalate">hello to Scalate</a>.
</body>
</html>
}
}
| gvillarroel/fanhood | fanhood-utils/src/main/scala/com/fanhood/presentation/main/Fanhood.scala | Scala | gpl-2.0 | 296 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2006-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
package generic
import scala.language.higherKinds
/** A template for companion objects of `Traversable` and subclasses thereof.
* This class provides a set of operations to create `$Coll` objects.
* It is typically inherited by companion objects of subclasses of `Traversable`.
*
* @since 2.8
*
* @define coll collection
* @define Coll Traversable
* @define factoryInfo
* This object provides a set of operations to create `$Coll` values.
* @author Martin Odersky
* @version 2.8
* @define canBuildFromInfo
* The standard `CanBuildFrom` instance for $Coll objects.
* @see CanBuildFrom
* @define genericCanBuildFromInfo
* The standard `CanBuildFrom` instance for $Coll objects.
* The created value is an instance of class `GenericCanBuildFrom`,
* which forwards calls to create a new builder to the
* `genericBuilder` method of the requesting collection.
* @see CanBuildFrom
* @see GenericCanBuildFrom
*/
trait TraversableFactory[CC[X] <: Traversable[X] with GenericTraversableTemplate[X, CC]]
extends GenTraversableFactory[CC] with GenericSeqCompanion[CC]
| felixmulder/scala | src/library/scala/collection/generic/TraversableFactory.scala | Scala | bsd-3-clause | 1,683 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.docgen.refcard
import org.neo4j.cypher.QueryStatisticsTestSupport
import org.neo4j.cypher.docgen.RefcardTest
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.InternalExecutionResult
class CreateUniqueTest extends RefcardTest with QueryStatisticsTestSupport {
val graphDescription = List("ROOT LINK A", "A LINK B", "B LINK C", "C LINK ROOT")
val title = "CREATE UNIQUE"
val css = "col carddeprecation c2-1 c3-3 c4-4 c5-5 c6-6"
override val linkId = "query-create-unique"
override def assert(name: String, result: InternalExecutionResult) {
name match {
case "create" =>
assertStats(result, nodesCreated = 1, relationshipsCreated = 1, propertiesSet = 1)
assert(result.toList.size === 1)
}
}
override def parameters(name: String): Map[String, Any] =
name match {
case "parameters=aname" =>
Map("value" -> "Bob")
case "" =>
Map()
}
override val properties: Map[String, Map[String, Any]] = Map(
"A" -> Map("value" -> 10),
"B" -> Map("value" -> 20),
"C" -> Map("value" -> 30))
def text = """
###assertion=create parameters=aname
//
MATCH n WHERE id(n) = %A%
CREATE UNIQUE
(n)-[:KNOWS]->(m {property: {value}})
RETURN m###
Match pattern or create it if it does not exist.
The pattern can not include any optional parts.
"""
}
| HuangLS/neo4j | manual/cypher/refcard-tests/src/test/scala/org/neo4j/cypher/docgen/refcard/CreateUniqueTest.scala | Scala | apache-2.0 | 2,162 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.pattern.stream
import java.io.File
import akka.actor.{Props, ActorSystem}
import akka.stream.{Attributes, Outlet, Inlet, UniformFanOutShape}
import akka.stream.stage.{InHandler, OutHandler, GraphStageLogic, GraphStage}
import com.typesafe.config.Config
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
abstract class BroadcastBufferBase[T, S] (private[stream] val queue: PersistentQueue[T],
onPushCallback: () => Unit = () => {})(implicit serializer: QueueSerializer[T],
system: ActorSystem)
extends GraphStage[UniformFanOutShape[T, S]] {
def this(config: Config)(implicit serializer: QueueSerializer[T],
system: ActorSystem) = this(new PersistentQueue[T](config))
def this(persistDir: File)(implicit serializer: QueueSerializer[T],
system: ActorSystem) = this(new PersistentQueue[T](persistDir), () => {})
private val outputPorts = queue.totalOutputPorts
private val in = Inlet[T]("BroadcastBuffer.in")
private val out = Vector.tabulate(outputPorts)(i => Outlet[S]("BroadcastBuffer.out" + i))
private val outWithIndex = out.zipWithIndex
val shape: UniformFanOutShape[T, S] = UniformFanOutShape(in, out: _*)
@volatile protected var upstreamFailed = false
@volatile protected var upstreamFinished = false
protected val queueCloserActor = system.actorOf(Props(classOf[PersistentQueueCloserActor[T]], queue))
protected def elementOut(e: Event[T]): S
protected def autoCommit(outputPortId: Int, index: Long) = {}
def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
private val finished = Array.fill[Boolean](outputPorts)(false)
private val lastPushed = Array.fill[Long](outputPorts)(0)
override def preStart(): Unit = pull(in)
def outHandler(outlet: Outlet[S], outputPortId: Int) = new OutHandler {
override def onPull(): Unit = {
queue.dequeue(outputPortId) match {
case None => if (upstreamFinished) {
finished(outputPortId) = true
queueCloserActor ! PushedAndCommitted(outputPortId, lastPushed(outputPortId), queue.read(outputPortId))
if (finished.reduce(_ && _)) {
queueCloserActor ! UpstreamFinished
completeStage()
}
}
case Some(element) =>
push(outlet, elementOut(element))
lastPushed(outputPortId) = element.index
autoCommit(outputPortId, element.index)
}
}
}
setHandler(in, new InHandler {
override def onPush(): Unit = {
val element = grab(in)
queue.enqueue(element)
onPushCallback()
outWithIndex foreach { case (port, id) =>
if (isAvailable(port))
queue.dequeue(id) foreach { element =>
push(port, elementOut(element))
lastPushed(id) = element.index
autoCommit(id, element.index)
}
}
pull(in)
}
override def onUpstreamFinish(): Unit = {
upstreamFinished = true
var isAllAvailable = true
outWithIndex foreach { case (port, outportId) =>
if (isAvailable(port)) {
finished(outportId) = true
queueCloserActor ! PushedAndCommitted(outportId, lastPushed(outportId), queue.read(outportId))
} else {
isAllAvailable = false
}
}
if (isAllAvailable) {
queueCloserActor ! UpstreamFinished
completeStage()
}
}
override def onUpstreamFailure(ex: Throwable): Unit = {
val logger = Logger(LoggerFactory.getLogger(this.getClass))
logger.error("Received upstream failure signal: " + ex)
upstreamFailed = true
queueCloserActor ! UpstreamFailed
completeStage()
}
})
outWithIndex foreach { case (currentOut, outputPortId) =>
setHandler(currentOut, outHandler(currentOut, outputPortId))
}
}
}
| akara/squbs | squbs-pattern/src/main/scala/org/squbs/pattern/stream/BroadcastBufferBase.scala | Scala | apache-2.0 | 4,705 |
import scala.io.Source
object Test {
def main(args: Array[String]) {
val lines = Source.fromString(
"""|
|This is a file
|it is split on several lines.
|
|isn't it?
|""".stripMargin).getLines.toList
println("lines.size = " + lines.size)
lines.foreach(println)
}
}
| felixmulder/scala | test/files/jvm/unittest_io_Jvm.scala | Scala | bsd-3-clause | 332 |
package org.jetbrains.plugins.scala.lang.psi.impl.base.types
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScTypeElement, ScTypeLambdaTypeElement}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementImpl
import org.jetbrains.plugins.scala.lang.psi.types.api.TypeParameter
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.ScTypePolymorphicType
import org.jetbrains.plugins.scala.lang.psi.types.result.TypeResult
class ScTypeLambdaTypeElementImpl(node: ASTNode)
extends ScalaPsiElementImpl(node)
with ScTypeLambdaTypeElement {
override protected def innerType: TypeResult =
resultType.map(ScTypePolymorphicType(_, typeParameters.map(TypeParameter(_)), isLambdaTypeElement = true))
override def resultTypeElement: Option[ScTypeElement] = findChild[ScTypeElement]
override def resultType: TypeResult = this.flatMapType(resultTypeElement)
override protected def acceptScala(visitor: ScalaElementVisitor): Unit =
visitor.visitTypeLambdaTypeElement(this)
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScTypeLambdaTypeElementImpl.scala | Scala | apache-2.0 | 1,132 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import javax.swing._
import java.awt.Color
import java.awt.Font
import java.awt.Component
import java.awt.BorderLayout
import java.net.URL
import javax.swing.border.EmptyBorder
import org.scalatest.events._
/**
* A ListCellRenderer for the event List in the GUI.
*
* @author Bill Venners
*/
private[tools] class IconEmbellishedListCellRenderer extends ListCellRenderer {
private val defaultRenderer: DefaultListCellRenderer = new DefaultListCellRenderer()
private val DEEP_RED: Color = new Color(0xEE, 0x55, 0x66)
private val UNCOMFORTABLE_GRAY: Color = new Color(0xaf, 0xaf, 0x9f)
private val BACKGROUND_BLUE: Color = new Color(0x45, 0x76, 0xd4)
private val myClassLoader: ClassLoader = classOf[IconEmbellishedListCellRenderer].getClassLoader
private object Icons {
// Unselected icon URLs
private val purpleURL: URL = myClassLoader.getResource("images/purpledot.gif")
private val greenURL: URL = myClassLoader.getResource("images/greendot.gif")
private val redURL: URL = myClassLoader.getResource("images/reddot.gif")
private val blueURL: URL = myClassLoader.getResource("images/bluedot.gif")
private val grayURL: URL = myClassLoader.getResource("images/graydot.gif")
private val cyanURL: URL = myClassLoader.getResource("images/cyandot.gif")
private val yellowURL: URL = myClassLoader.getResource("images/yellowdot.gif")
// Selected icon URLs
private val purpleSelURL: URL = myClassLoader.getResource("images/purpledotsel.gif")
private val greenSelURL: URL = myClassLoader.getResource("images/greendotsel.gif")
private val redSelURL: URL = myClassLoader.getResource("images/reddotsel.gif")
private val blueSelURL: URL = myClassLoader.getResource("images/bluedotsel.gif")
private val graySelURL: URL = myClassLoader.getResource("images/graydotsel.gif")
private val cyanSelURL: URL = myClassLoader.getResource("images/cyandotsel.gif")
private val yellowSelURL: URL = myClassLoader.getResource("images/yellowdotsel.gif")
// Unselected icon images
private val purpleImageIcon: ImageIcon = new ImageIcon(purpleURL)
private val greenImageIcon: ImageIcon = new ImageIcon(greenURL)
private val redImageIcon: ImageIcon = new ImageIcon(redURL)
private val blueImageIcon: ImageIcon = new ImageIcon(blueURL)
private val grayImageIcon: ImageIcon = new ImageIcon(grayURL)
private val cyanImageIcon: ImageIcon = new ImageIcon(cyanURL)
private val yellowImageIcon: ImageIcon = new ImageIcon(yellowURL)
// Selected icon images
private val purpleSelImageIcon: ImageIcon = new ImageIcon(purpleSelURL)
private val greenSelImageIcon: ImageIcon = new ImageIcon(greenSelURL)
private val redSelImageIcon: ImageIcon = new ImageIcon(redSelURL)
private val blueSelImageIcon: ImageIcon = new ImageIcon(blueSelURL)
private val graySelImageIcon: ImageIcon = new ImageIcon(graySelURL)
private val cyanSelImageIcon: ImageIcon = new ImageIcon(cyanSelURL)
private val yellowSelImageIcon: ImageIcon = new ImageIcon(yellowSelURL)
val runStartingIcon = grayImageIcon
val testStartingIcon = purpleImageIcon
val testSucceededIcon = greenImageIcon
val testIgnoredIcon = yellowImageIcon
val testPendingIcon = yellowImageIcon
val testCanceledIcon = yellowImageIcon
val testFailedIcon = redImageIcon
val suiteStartingIcon = cyanImageIcon
val suiteCompletedIcon = cyanImageIcon
val suiteAbortedIcon = redImageIcon
val infoProvidedIcon = blueImageIcon
val scopeOpenedIcon = blueImageIcon
val scopeClosedIcon = blueImageIcon
val scopePendingIcon = yellowImageIcon
val runStoppedIcon = grayImageIcon
val runAbortedIcon = redImageIcon
val runCompletedIcon = grayImageIcon
val runStartingSelIcon = graySelImageIcon
val testStartingSelIcon = purpleSelImageIcon
val testSucceededSelIcon = greenSelImageIcon
val testIgnoredSelIcon = yellowSelImageIcon
val testPendingSelIcon = yellowSelImageIcon
val testCanceledSelIcon = yellowSelImageIcon
val testFailedSelIcon = redSelImageIcon
val suiteStartingSelIcon = cyanSelImageIcon
val suiteCompletedSelIcon = cyanSelImageIcon
val suiteAbortedSelIcon = redSelImageIcon
val infoProvidedSelIcon = blueSelImageIcon
val scopeOpenedSelIcon = blueSelImageIcon
val scopeClosedSelIcon = blueSelImageIcon
val scopePendingSelIcon = blueSelImageIcon
val runStoppedSelIcon = graySelImageIcon
val runAbortedSelIcon = redSelImageIcon
val runCompletedSelIcon = graySelImageIcon
}
private def setRendererFont(renderer: JLabel, color: Color) {
val font: Font = renderer.getFont()
renderer.setFont(new Font(font.getFontName(), Font.BOLD, font.getSize()))
renderer.setForeground(color)
}
def getListCellRendererComponent(list: JList, value: Object, index: Int, isSelected: Boolean,
cellHasFocus: Boolean): Component = {
val renderer: JLabel =
defaultRenderer.getListCellRendererComponent(list, value, index, isSelected, cellHasFocus).asInstanceOf[JLabel]
// Setting to a specific background color because that color was used to make icons that
// look nice when the row is selected.
if (isSelected)
renderer.setBackground(BACKGROUND_BLUE)
val event: Event = value.asInstanceOf[EventHolder].event
event match {
case _: DiscoveryStarting =>
case _: DiscoveryCompleted =>
case _: RunStarting => {
if (isSelected)
renderer.setIcon(Icons.runStartingSelIcon)
else
renderer.setIcon(Icons.runStartingIcon)
}
case _: TestStarting => {
if (isSelected)
renderer.setIcon(Icons.testStartingSelIcon)
else
renderer.setIcon(Icons.testStartingIcon)
}
case _: TestSucceeded => {
if (isSelected)
renderer.setIcon(Icons.testSucceededSelIcon)
else
renderer.setIcon(Icons.testSucceededIcon)
}
case _: TestIgnored => {
if (isSelected)
renderer.setIcon(Icons.testIgnoredSelIcon)
else
renderer.setIcon(Icons.testIgnoredIcon)
setRendererFont(renderer, UNCOMFORTABLE_GRAY)
}
case _: TestPending => {
if (isSelected)
renderer.setIcon(Icons.testPendingSelIcon)
else
renderer.setIcon(Icons.testPendingIcon)
}
case _: TestCanceled => {
if (isSelected)
renderer.setIcon(Icons.testCanceledSelIcon)
else
renderer.setIcon(Icons.testCanceledIcon)
}
case _: TestFailed => {
if (isSelected)
renderer.setIcon(Icons.testFailedSelIcon)
else
renderer.setIcon(Icons.testFailedIcon)
setRendererFont(renderer, DEEP_RED)
}
case _: RunAborted => {
if (isSelected)
renderer.setIcon(Icons.runAbortedSelIcon)
else
renderer.setIcon(Icons.runAbortedIcon)
setRendererFont(renderer, DEEP_RED)
}
case _: SuiteAborted => {
if (isSelected)
renderer.setIcon(Icons.suiteAbortedSelIcon)
else
renderer.setIcon(Icons.suiteAbortedIcon)
setRendererFont(renderer, DEEP_RED)
}
case _: SuiteStarting => {
if (isSelected)
renderer.setIcon(Icons.suiteStartingSelIcon)
else
renderer.setIcon(Icons.suiteStartingIcon)
}
case _: SuiteCompleted => {
if (isSelected)
renderer.setIcon(Icons.suiteCompletedSelIcon)
else
renderer.setIcon(Icons.suiteCompletedIcon)
}
case _: InfoProvided => {
if (isSelected)
renderer.setIcon(Icons.infoProvidedSelIcon)
else
renderer.setIcon(Icons.infoProvidedIcon)
}
case _: MarkupProvided => { // Shouldn't get here because not registering markup events
if (isSelected)
renderer.setIcon(Icons.infoProvidedSelIcon)
else
renderer.setIcon(Icons.infoProvidedIcon)
}
case _: ScopeOpened => {
if (isSelected)
renderer.setIcon(Icons.scopeOpenedSelIcon)
else
renderer.setIcon(Icons.scopeOpenedIcon)
}
case _: ScopeClosed => {
if (isSelected)
renderer.setIcon(Icons.scopeClosedSelIcon)
else
renderer.setIcon(Icons.scopeClosedIcon)
}
case _: ScopePending => {
if (isSelected)
renderer.setIcon(Icons.scopePendingSelIcon)
else
renderer.setIcon(Icons.scopePendingIcon)
}
case _: RunCompleted => {
if (isSelected)
renderer.setIcon(Icons.runCompletedSelIcon)
else
renderer.setIcon(Icons.runCompletedIcon)
}
case _: RunStopped => {
if (isSelected)
renderer.setIcon(Icons.runStoppedSelIcon)
else
renderer.setIcon(Icons.runStoppedIcon)
}
}
event.formatter match {
case Some(IndentedText(_, _, indentationLevel)) =>
if (indentationLevel > 0) {
val panel = new JPanel(new BorderLayout)
panel.setBackground(renderer.getBackground)
val WidthOfIconInPixels = 12
panel.setBorder(new EmptyBorder(0, WidthOfIconInPixels * indentationLevel, 0, 0))
renderer.setBorder(new EmptyBorder(0, 0, 0, 0))
panel.add(renderer, BorderLayout.CENTER)
panel
}
else renderer
case _ =>
renderer
}
}
}
| svn2github/scalatest | src/main/scala/org/scalatest/tools/IconEmbellishedListCellRenderer.scala | Scala | apache-2.0 | 10,151 |
package net.categoricaldata.load
object SQLDumpLoader extends DataLoader {
def fromLines(lines: Iterable[String]): DataTables = ???
} | JasonGross/categoricaldata | src/main/scala/net/categoricaldata/load/SQLDumpLoader.scala | Scala | mit | 135 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.jobs
import java.io.File
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.conf.Configuration
import org.locationtech.geomesa.utils.classpath.ClassPathUtils
object JobUtils extends LazyLogging {
/**
* Sets the libjars into a Hadoop configuration. Will search the environment first, then the
* classpath, until all required jars have been found.
*
* @param conf job configuration
* @param libJars jar prefixes to load
*/
def setLibJars(conf: Configuration, libJars: Seq[String], searchPath: Iterator[() => Seq[File]]): Unit = {
val extra = ClassPathUtils.loadClassPathFromEnv("GEOMESA_EXTRA_CLASSPATHS")
val found = ClassPathUtils.findJars(libJars, searchPath)
// always prepend GEOMESA_EXTRA_CLASSPATHS first
val paths = (extra ++ found).map(f => "file://" + f.getAbsolutePath)
// tmpjars is the hadoop config that corresponds to libjars
if (paths.nonEmpty) {
conf.setStrings("tmpjars", paths: _*)
}
logger.debug(s"Job will use the following libjars:${paths.mkString("\\n\\t", "\\n\\t", "")}")
}
}
| locationtech/geomesa | geomesa-jobs/src/main/scala/org/locationtech/geomesa/jobs/JobUtils.scala | Scala | apache-2.0 | 1,578 |
package org.scala_tools.maven.mojo.util
sealed trait Resource[+T] {
val value: T
def close: Unit
def use[X](f: T => X) = try { f(value) } finally { close }
}
object Resource {
type Closable = { def close(); }
implicit def pimpClosable[A <: Closable](x : A) = new Resource[A] {
override val value = x
override def close = x.close()
}
}
import java.io._
object RichBufferedReader {
implicit def pimpBufferedReader(reader : BufferedReader) = new Iterable[String] {
override def elements = new BufferedReaderIterator(reader)
def iterator = elements
}
}
class BufferedReaderIterator(reader : BufferedReader) extends Iterator[String] {
private[this] var isNextCalled = false
private[this] var currentLine : String = null
private def getNextLine(reset : Boolean) = {
if(!isNextCalled) {
currentLine = reader.readLine
isNextCalled = true
}
if(reset) {
isNextCalled = false
}
currentLine
}
def hasNext = getNextLine(false) != null
def next() = getNextLine(true)
} | jacekszymanski/mvnplugins | scala-mojo-support/src/main/java/org/scala_tools/maven/mojo/util/Resource.scala | Scala | apache-2.0 | 1,056 |
package org.GutenburgNLP.utils
import java.nio.charset.MalformedInputException
import java.util.zip.ZipException
import scala.annotation.tailrec
/**
* Collection of basic NLP-ish functions
*
*/
object NLPUtils {
/**
* Count the number of words in a .zip file
*
* @param filename filepath of the file
* @return Count the number of words contained within a text file
*/
def wordCountZipSum(filename: String): Int = {
try {
val zip: CompressedFile = new CompressedFile(filename)
val textFile: Int = zip
.find(e => e.getName endsWith ".txt")
.map(x => x.map(y => stringSplit(y)).sum)
.sum
textFile
} catch {
// Catching Errors
case foo: MalformedInputException => 0
case bar: ZipException => 0
}
}
/**
* Split a large body of text into words
*
* @param stringData body of text - in this case a book
* @return Number of words
*/
def stringSplit(stringData: String): Int = {
stringData.split("\\\\W+").length
}
/**
* Get the number of lines in each of the text files
*
* @param filename filepath of the file
* @return Returns a Int of the number of words
*/
def getFileLines(filename: String): Int = {
try {
val zip: CompressedFile = new CompressedFile(filename)
val textFile: Int =
zip.find(e => e.getName endsWith ".txt").map(x => x.length).sum
textFile
} catch {
case foo: MalformedInputException => 0
case bar: ZipException => 0
}
}
/**
* Recursive function to split a List pieces with N strings
*
* @param xs TODO
* @param n TODO
* @return TODO
*/
def splitter[A](xs: List[A], n: Int): List[List[A]] = {
@tailrec
def splitInner(res: List[List[A]], lst: List[A], n: Int): List[List[A]] = {
if (lst.isEmpty) res
else {
val headList: List[A] = lst.take(n)
val tailList: List[A] = lst.drop(n)
splitInner(headList :: res, tailList, n)
}
}
splitInner(Nil, xs, n).reverse
}
/**
* Recursive function to split a List pieces with N strings
*
* @param stringData body of text - in this case a book
* @return Map[Word, Count]
*/
def getMapWordCount(stringData: String): Map[String, Int] = {
try {
val zip: CompressedFile = new CompressedFile(stringData)
val textFile: Map[String, Int] = zip
.find(e => e.getName endsWith ".txt")
.head
.flatMap(_.split("\\\\W+"))
.map(x => x.toLowerCase)
.foldLeft(Map.empty[String, Int]) { (count, word) =>
count + (word -> (count.getOrElse(word, 0) + 1))
}
textFile
} catch {
case foo: MalformedInputException => Map()
case bar: ZipException => Map()
}
}
/**
* Removing punctuation from the body of text and split into words.
*
* @param words body of text
* @return Iterator[String] - split string by words
**/
def removePunctuation(words: String): Iterator[String] = {
"[a-zA-Z]+".r findAllIn words map (_.toLowerCase)
}
/**
* Remove the Header and Footer Function
*
* TODO - NOT WORKING AS EXPECTED DUE TO CHAR CONVERSION
*
* @param words String Data
* @param headers List of Strings which are the header starters
* @param footers List of Strings which are the header starters
* @return String of data with the path removed
**/
def removeHeaderFooter(words: String,
headers: List[String],
footers: List[String]): String = {
def splitStringLast(words: String, st: String): String = {
words.indexOf(st) match {
case -1 => words
case _ => words.substring(words.indexOf(st) + st.length + 10)
}
}
def splitStringFirst(words: String, st: String): String = {
words.indexOf(st) match {
case -1 => words
case _ => words.substring(0, words.indexOf(st) - 1)
}
}
// Debugging
// println(headers.map(x => splitStringLast(words, x)).map(x => x.length)
// .reduceRight((x,y) => if (x > y && y > (words.length * 0.75)) y else x))
// println(headers.map(x => splitStringLast(words, x)).map(x => x.length))
// Removing the Headers
val woHeader: String = headers
.map(x => splitStringLast(words, x))
.reduceRight((x, y) =>
if (x.length > y.length && y.length > (words.length * 0.75)) y else x)
// Debugging
// println(footers.map(x => splitStringFirst(woHeader, x)).map(x => x.length).reduceRight((x,y) => if (x > y) y else x))
// println(footers.map(x => splitStringFirst(woHeader, x)).map(x => x.length))
// Removing the Footers
val woFooter: String = footers
.map(x => splitStringFirst(woHeader, x))
.reduceRight((x, y) =>
if (x.length > y.length && y.length > (woHeader.length * 0.75)) y
else x)
// println(words.length, woFooter.length)
// Return the Processed string
woFooter
}
}
| dandxy89/SparkScalaGutenburgNLP | src/main/scala/org/GutenburgNLP/utils/NLPUtils.scala | Scala | apache-2.0 | 5,038 |
/*
* Copyright (c) 2017-2022 Lymia Alusyia <lymia@lymiahugs.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package moe.lymia.princess
import moe.lymia.princess.core.state.{GuiContext, GuiLoop}
import moe.lymia.princess.views.frontend.SplashScreen
import moe.lymia.princess.views.mainframe.MainFrame
import org.eclipse.swt.widgets.Display
import java.nio.file.Paths
private case class CLIException(message: String) extends Exception
class CLI {
private val logger = DefaultLogger.bind("CLI")
private type CommandFn = () => Unit
private var command: Option[CommandFn] = None
private var loadTarget: Option[String] = None
private val parser = new scopt.OptionParser[Unit]("./PrincessEdit") {
help("help").text("Shows this help message.")
note("")
arg[String]("<project.pedit-project>").foreach(x => loadTarget = Some(x)).hidden().optional()
note("")
opt[String]('l', "loadProject").valueName("<project.pedit-project>").text("Loads a project.").foreach{ x =>
setCmd(cmd_load _)
loadTarget = Some(x)
}
if (!Environment.isNativeImage) opt[Unit]("nativeImageGenConfig").foreach { x =>
setCmd(cmd_nativeImageGenConfig _)
}
if (!Environment.isNativeImage) opt[Unit]("nativeImageProcessConfig").foreach{ x =>
setCmd(cmd_nativeImageProcessConfig _)
}
}
private def error(s: String) = throw CLIException(s)
private def time[T](what: String)(v: => T) = {
val time = System.currentTimeMillis()
val res = v
logger.info(s"$what in ${System.currentTimeMillis() - time}ms.")
res
}
private def setCmd(cmd: () => Unit): Unit = {
if(command.isDefined) error("Command already set!")
command = Some(cmd)
}
private def mainLoop[T](f: GuiContext => Unit): Unit = new GuiLoop().mainLoop(f)
private def cmd_default(): Unit = {
mainLoop { ctx =>
loadTarget match {
case Some(x) =>
MainFrame.loadProject(null, ctx, Paths.get(loadTarget.get))
case None =>
new SplashScreen(ctx).open()
}
}
}
private def cmd_load(): Unit = {
mainLoop { ctx =>
MainFrame.loadProject(null, ctx, Paths.get(loadTarget.get))
}
}
private def cmd_nativeImageGenConfig(): Unit = {
assert(!Environment.isNativeImage)
mainLoop { ctx =>
time("Generated configurations for native-image") {
new NativeImageGenConfig(ctx).execute()
}
}
}
private def cmd_nativeImageProcessConfig(): Unit = {
assert(!Environment.isNativeImage)
NativeImageProcessConfig.processConfigs()
}
def main(args: Seq[String]): Unit = {
try {
Display.setAppName(AppName.PrincessEdit)
if(parser.parse(args, ()).isDefined) command.getOrElse(cmd_default _)()
} catch {
case CLIException(e) => println(e)
case e: Exception => e.printStackTrace()
}
}
} | Lymia/PrincessEdit | modules/princess-edit/src/main/scala/moe/lymia/princess/CLI.scala | Scala | mit | 3,882 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.metrics.types
import io.gatling.commons.stats.{ KO, OK, Status }
import io.gatling.core.config.GatlingConfiguration
import org.HdrHistogram.{ IntCountsHistogram, AbstractHistogram }
class HistogramRequestMetricsBuffer(configuration: GatlingConfiguration) extends RequestMetricsBuffer {
private val percentile1 = configuration.charting.indicators.percentile1
private val percentile2 = configuration.charting.indicators.percentile2
private val percentile3 = configuration.charting.indicators.percentile3
private val percentile4 = configuration.charting.indicators.percentile4
private val okHistogram: AbstractHistogram = new IntCountsHistogram(2)
private val koHistogram: AbstractHistogram = new IntCountsHistogram(2)
private val allHistogram: AbstractHistogram = new IntCountsHistogram(2)
override def add(status: Status, time: Long): Unit = {
val recordableTime = time.max(1L)
allHistogram.recordValue(recordableTime)
status match {
case OK => okHistogram.recordValue(recordableTime)
case KO => koHistogram.recordValue(recordableTime)
}
}
override def clear(): Unit = {
okHistogram.reset()
koHistogram.reset()
allHistogram.reset()
}
override def metricsByStatus: MetricByStatus =
MetricByStatus(
ok = metricsOfHistogram(okHistogram),
ko = metricsOfHistogram(koHistogram),
all = metricsOfHistogram(allHistogram)
)
private def metricsOfHistogram(histogram: AbstractHistogram): Option[Metrics] = {
val count = histogram.getTotalCount
if (count > 0) {
Some(Metrics(
count = count,
min = histogram.getMinValue.toInt,
max = histogram.getMaxValue.toInt,
mean = histogram.getMean.toInt,
stdDev = histogram.getStdDeviation.toInt,
percentile1 = histogram.getValueAtPercentile(percentile1).toInt,
percentile2 = histogram.getValueAtPercentile(percentile2).toInt,
percentile3 = histogram.getValueAtPercentile(percentile3).toInt,
percentile4 = histogram.getValueAtPercentile(percentile4).toInt
))
} else
None
}
}
| wiacekm/gatling | gatling-metrics/src/main/scala/io/gatling/metrics/types/HistogramRequestMetricsBuffer.scala | Scala | apache-2.0 | 2,742 |
package com.tribbloids.spookystuff.dsl
import com.tribbloids.spookystuff.caching.ExploreRunnerCache
import com.tribbloids.spookystuff.execution.ExplorePlan.Params
import com.tribbloids.spookystuff.execution.NodeKey
import com.tribbloids.spookystuff.row._
import com.tribbloids.spookystuff.utils.CachingUtils.ConcurrentMap
sealed trait ExploreAlgorithm {
def getImpl(
params: Params,
schema: SpookySchema
): ExploreAlgorithm.Impl
}
object ExploreAlgorithm {
trait Impl extends Serializable {
val params: Params
val schema: SpookySchema
/**
*
*/
def openReducer: RowReducer
def openReducerBetweenEpochs: RowReducer = openReducer
def nextOpenSelector(
open: ConcurrentMap[NodeKey, Iterable[DataRow]]
): (NodeKey, Iterable[DataRow])
/**
*
*/
def visitedReducer: RowReducer //precede eliminator
def visitedReducerBetweenEpochs: RowReducer = visitedReducer
}
trait EliminatingImpl extends Impl {
/**
*
*/
val ordering: RowOrdering
/**
*
*/
def eliminator(
open: Iterable[DataRow],
visited: Iterable[DataRow]
): Iterable[DataRow]
override final def nextOpenSelector(
open: ConcurrentMap[NodeKey, Iterable[DataRow]]
): (NodeKey, Iterable[DataRow]) = {
//Should I use pre-sorted collection? Or is it overengineering?
val bestOpenBeforeElimination: (NodeKey, Iterable[DataRow]) = open.min(ordering)
val bestOpenNodeID = bestOpenBeforeElimination._1
open -= bestOpenNodeID
val existingVisitedOption: Option[Iterable[DataRow]] =
ExploreRunnerCache
.get(bestOpenNodeID -> params.executionID)
.reduceOption(visitedReducer)
val bestOpen: (NodeKey, Iterable[DataRow]) = existingVisitedOption match {
case Some(allVisited) =>
val dataRowsAfterElimination = eliminator(bestOpenBeforeElimination._2, allVisited)
bestOpenBeforeElimination.copy(_2 = dataRowsAfterElimination)
case None =>
bestOpenBeforeElimination
}
bestOpen
}
}
}
object ExploreAlgorithms {
import ExploreAlgorithm._
case object BreadthFirst extends ExploreAlgorithm {
override def getImpl(
params: Params,
schema: SpookySchema
) = Impl(params, schema)
case class Impl(
override val params: Params,
schema: SpookySchema
) extends EliminatingImpl {
import params._
import scala.Ordering.Implicits._
override val openReducer: RowReducer = { (v1, v2) =>
(v1 ++ v2)
.groupBy(_.groupID)
.values
.minBy(_.head.sortIndex(Seq(depthField, ordinalField)))
}
override val visitedReducer: RowReducer = openReducer
override val ordering: RowOrdering = Ordering.by { tuple: (NodeKey, Iterable[DataRow]) =>
val inProgress = ExploreRunnerCache
.getOnGoingRunners(params.executionID)
.flatMap(_.fetchingInProgressOpt)
val result = if (inProgress contains tuple._1) {
Int.MaxValue
} else {
val v = tuple._2
// assert(v.size == 1)
v.head
.getInt(depthField)
.getOrElse(Int.MaxValue)
}
result
}
override def eliminator(
open: Iterable[DataRow],
visited: Iterable[DataRow]
): Iterable[DataRow] = {
val visitedDepth = visited.head.getInt(depthField)
open.filter { row =>
row.getInt(depthField) < visitedDepth
}
}
}
}
//move reduce of openSet to elimination, should have identical result
//case class ShortestPathImpl2(
// depthField: IndexedField,
// ordinalField: IndexedField,
// extracts: Seq[Expression[Any]]
// ) extends ExploreAlgorithmImpl {
//
// import scala.Ordering.Implicits._
//
// override def openReducer: RowReducer = {
// _ ++ _
// }
//
// override def visitedReducer: RowReducer = {
// (v1, v2) =>
// Array((v1 ++ v2).minBy(_.sortIndex(Seq(depthField, ordinalField))))
// }
//
// override def ordering: RowOrdering = Ordering.by{
// v: Iterable[DataRow] =>
// assert(v.size == 1)
// v.head.getInt(depthField).get
// }
//
// override def eliminator: RowEliminator = {
// (v1, v2) =>
// assert(v2.size == 1)
// val visitedDepth = v2.head.getInt(depthField).get
// val filtered = v1.filter {
// row =>
// row.getInt(depthField).get < visitedDepth
// }
// if (filtered.isEmpty) filtered
// else Some(filtered.minBy(_.sortIndex(Seq(depthField, ordinalField))))
// }
//}
case object DepthFirst extends ExploreAlgorithm {
override def getImpl(params: Params, schema: SpookySchema): Impl =
Impl(params, schema)
case class Impl(params: Params, schema: SpookySchema) extends EliminatingImpl {
/**
*
*/
override val ordering: RowOrdering = ???
/**
*
*/
override def eliminator(open: Iterable[DataRow], visited: Iterable[DataRow]): Iterable[DataRow] = ???
/**
*
*/
override val openReducer: RowReducer = ???
/**
*
*/
override val visitedReducer: RowReducer = ???
}
}
}
//TODO: finish these
//case object AllSimplePath extends ExploreOptimizer
//case object AllPath extends ExploreOptimizer {
// override def openReducer: RowReducer = {_ ++ _}
//
// //precede eliminator
// override def ordering: RowOrdering = {_ ++ _}
//
// override def visitedReducer: RowReducer = {_ ++ _}
//
// override def eliminator: RowEliminator = {(v1, v2) => v1}
//}
//this won't merge identical traces and do lookup, only used in case each resolve may yield different result
//case class Dijkstra(
//
// ) extends ExploreOptimizer {
// override def openReducer: RowReducer = ???
//
// //precede eliminator
// override def ordering: RowOrdering = ???
//
// override def visitedReducer: RowReducer = ???
//
// override def eliminator: RowEliminator = ???
//}
| tribbloid/spookystuff | core/src/main/scala/com/tribbloids/spookystuff/dsl/ExploreAlgorithm.scala | Scala | apache-2.0 | 6,279 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{File, FileNotFoundException}
import java.nio.file.{Files, StandardOpenOption}
import java.util.Locale
import scala.collection.mutable
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.sql.TestingUDT.{IntervalUDT, NullData, NullUDT}
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.Filter
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.datasources.FilePartition
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2ScanRelation, FileScan}
import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetTable
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
class FileBasedDataSourceSuite extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper {
import testImplicits._
override def beforeAll(): Unit = {
super.beforeAll()
spark.sessionState.conf.setConf(SQLConf.ORC_IMPLEMENTATION, "native")
}
override def afterAll(): Unit = {
try {
spark.sessionState.conf.unsetConf(SQLConf.ORC_IMPLEMENTATION)
} finally {
super.afterAll()
}
}
private val allFileBasedDataSources = Seq("orc", "parquet", "csv", "json", "text")
private val nameWithSpecialChars = "sp&cial%c hars"
allFileBasedDataSources.foreach { format =>
test(s"Writing empty datasets should not fail - $format") {
withTempPath { dir =>
Seq("str").toDS().limit(0).write.format(format).save(dir.getCanonicalPath)
}
}
}
// `TEXT` data source always has a single column whose name is `value`.
allFileBasedDataSources.filterNot(_ == "text").foreach { format =>
test(s"SPARK-23072 Write and read back unicode column names - $format") {
withTempPath { path =>
val dir = path.getCanonicalPath
// scalastyle:off nonascii
val df = Seq("a").toDF("한글")
// scalastyle:on nonascii
df.write.format(format).option("header", "true").save(dir)
val answerDf = spark.read.format(format).option("header", "true").load(dir)
assert(df.schema.sameType(answerDf.schema))
checkAnswer(df, answerDf)
}
}
}
// Only ORC/Parquet support this. `CSV` and `JSON` returns an empty schema.
// `TEXT` data source always has a single column whose name is `value`.
Seq("orc", "parquet").foreach { format =>
test(s"SPARK-15474 Write and read back non-empty schema with empty dataframe - $format") {
withTempPath { file =>
val path = file.getCanonicalPath
val emptyDf = Seq((true, 1, "str")).toDF().limit(0)
emptyDf.write.format(format).save(path)
val df = spark.read.format(format).load(path)
assert(df.schema.sameType(emptyDf.schema))
checkAnswer(df, emptyDf)
}
}
}
Seq("orc", "parquet").foreach { format =>
test(s"SPARK-23271 empty RDD when saved should write a metadata only file - $format") {
withTempPath { outputPath =>
val df = spark.emptyDataFrame.select(lit(1).as("i"))
df.write.format(format).save(outputPath.toString)
val partFiles = outputPath.listFiles()
.filter(f => f.isFile && !f.getName.startsWith(".") && !f.getName.startsWith("_"))
assert(partFiles.length === 1)
// Now read the file.
val df1 = spark.read.format(format).load(outputPath.toString)
checkAnswer(df1, Seq.empty[Row])
assert(df1.schema.equals(df.schema.asNullable))
}
}
}
allFileBasedDataSources.foreach { format =>
test(s"SPARK-23372 error while writing empty schema files using $format") {
withTempPath { outputPath =>
val errMsg = intercept[AnalysisException] {
spark.emptyDataFrame.write.format(format).save(outputPath.toString)
}
assert(errMsg.getMessage.contains(
"Datasource does not support writing empty or nested empty schemas"))
}
// Nested empty schema
withTempPath { outputPath =>
val schema = StructType(Seq(
StructField("a", IntegerType),
StructField("b", StructType(Nil)),
StructField("c", IntegerType)
))
val df = spark.createDataFrame(sparkContext.emptyRDD[Row], schema)
val errMsg = intercept[AnalysisException] {
df.write.format(format).save(outputPath.toString)
}
assert(errMsg.getMessage.contains(
"Datasource does not support writing empty or nested empty schemas"))
}
}
}
allFileBasedDataSources.foreach { format =>
test(s"SPARK-22146 read files containing special characters using $format") {
withTempDir { dir =>
val tmpFile = s"$dir/$nameWithSpecialChars"
spark.createDataset(Seq("a", "b")).write.format(format).save(tmpFile)
val fileContent = spark.read.format(format).load(tmpFile)
checkAnswer(fileContent, Seq(Row("a"), Row("b")))
}
}
}
// Separate test case for formats that support multiLine as an option.
Seq("json", "csv").foreach { format =>
test("SPARK-23148 read files containing special characters " +
s"using $format with multiline enabled") {
withTempDir { dir =>
val tmpFile = s"$dir/$nameWithSpecialChars"
spark.createDataset(Seq("a", "b")).write.format(format).save(tmpFile)
val reader = spark.read.format(format).option("multiLine", true)
val fileContent = reader.load(tmpFile)
checkAnswer(fileContent, Seq(Row("a"), Row("b")))
}
}
}
allFileBasedDataSources.foreach { format =>
testQuietly(s"Enabling/disabling ignoreMissingFiles using $format") {
def testIgnoreMissingFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
Seq("0").toDF("a").write.format(format).save(new Path(basePath, "second").toString)
Seq("1").toDF("a").write.format(format).save(new Path(basePath, "fourth").toString)
val firstPath = new Path(basePath, "first")
val thirdPath = new Path(basePath, "third")
val fs = thirdPath.getFileSystem(spark.sessionState.newHadoopConf())
Seq("2").toDF("a").write.format(format).save(firstPath.toString)
Seq("3").toDF("a").write.format(format).save(thirdPath.toString)
val files = Seq(firstPath, thirdPath).flatMap { p =>
fs.listStatus(p).filter(_.isFile).map(_.getPath)
}
val df = spark.read.format(format).load(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString,
new Path(basePath, "fourth").toString)
// Make sure all data files are deleted and can't be opened.
files.foreach(f => fs.delete(f, false))
assert(fs.delete(thirdPath, true))
for (f <- files) {
intercept[FileNotFoundException](fs.open(f))
}
checkAnswer(df, Seq(Row("0"), Row("1")))
}
}
for {
ignore <- Seq("true", "false")
sources <- Seq("", format)
} {
withSQLConf(SQLConf.IGNORE_MISSING_FILES.key -> ignore,
SQLConf.USE_V1_SOURCE_LIST.key -> sources) {
if (ignore.toBoolean) {
testIgnoreMissingFiles()
} else {
val exception = intercept[SparkException] {
testIgnoreMissingFiles()
}
assert(exception.getMessage().contains("does not exist"))
}
}
}
}
}
// Text file format only supports string type
test("SPARK-24691 error handling for unsupported types - text") {
withTempDir { dir =>
// write path
val textDir = new File(dir, "text").getCanonicalPath
var msg = intercept[AnalysisException] {
Seq(1).toDF.write.text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support int data type"))
msg = intercept[AnalysisException] {
Seq(1.2).toDF.write.text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support double data type"))
msg = intercept[AnalysisException] {
Seq(true).toDF.write.text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support boolean data type"))
msg = intercept[AnalysisException] {
Seq(1).toDF("a").selectExpr("struct(a)").write.text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support struct<a:int> data type"))
msg = intercept[AnalysisException] {
Seq((Map("Tesla" -> 3))).toDF("cars").write.mode("overwrite").text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support map<string,int> data type"))
msg = intercept[AnalysisException] {
Seq((Array("Tesla", "Chevy", "Ford"))).toDF("brands")
.write.mode("overwrite").text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support array<string> data type"))
// read path
Seq("aaa").toDF.write.mode("overwrite").text(textDir)
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", IntegerType, true) :: Nil)
spark.read.schema(schema).text(textDir).collect()
}.getMessage
assert(msg.contains("Text data source does not support int data type"))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", DoubleType, true) :: Nil)
spark.read.schema(schema).text(textDir).collect()
}.getMessage
assert(msg.contains("Text data source does not support double data type"))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", BooleanType, true) :: Nil)
spark.read.schema(schema).text(textDir).collect()
}.getMessage
assert(msg.contains("Text data source does not support boolean data type"))
}
}
// Unsupported data types of csv, json, orc, and parquet are as follows;
// csv -> R/W: Null, Array, Map, Struct
// json -> R/W: Interval
// orc -> R/W: Interval, W: Null
// parquet -> R/W: Interval, Null
test("SPARK-24204 error handling for unsupported Array/Map/Struct types - csv") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
var msg = intercept[AnalysisException] {
Seq((1, "Tesla")).toDF("a", "b").selectExpr("struct(a, b)").write.csv(csvDir)
}.getMessage
assert(msg.contains("CSV data source does not support struct<a:int,b:string> data type"))
msg = intercept[AnalysisException] {
val schema = StructType.fromDDL("a struct<b: Int>")
spark.range(1).write.mode("overwrite").csv(csvDir)
spark.read.schema(schema).csv(csvDir).collect()
}.getMessage
assert(msg.contains("CSV data source does not support struct<b:int> data type"))
msg = intercept[AnalysisException] {
Seq((1, Map("Tesla" -> 3))).toDF("id", "cars").write.mode("overwrite").csv(csvDir)
}.getMessage
assert(msg.contains("CSV data source does not support map<string,int> data type"))
msg = intercept[AnalysisException] {
val schema = StructType.fromDDL("a map<int, int>")
spark.range(1).write.mode("overwrite").csv(csvDir)
spark.read.schema(schema).csv(csvDir).collect()
}.getMessage
assert(msg.contains("CSV data source does not support map<int,int> data type"))
msg = intercept[AnalysisException] {
Seq((1, Array("Tesla", "Chevy", "Ford"))).toDF("id", "brands")
.write.mode("overwrite").csv(csvDir)
}.getMessage
assert(msg.contains("CSV data source does not support array<string> data type"))
msg = intercept[AnalysisException] {
val schema = StructType.fromDDL("a array<int>")
spark.range(1).write.mode("overwrite").csv(csvDir)
spark.read.schema(schema).csv(csvDir).collect()
}.getMessage
assert(msg.contains("CSV data source does not support array<int> data type"))
msg = intercept[AnalysisException] {
Seq((1, new TestUDT.MyDenseVector(Array(0.25, 2.25, 4.25)))).toDF("id", "vectors")
.write.mode("overwrite").csv(csvDir)
}.getMessage
assert(msg.contains("CSV data source does not support array<double> data type"))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", new TestUDT.MyDenseVectorUDT(), true) :: Nil)
spark.range(1).write.mode("overwrite").csv(csvDir)
spark.read.schema(schema).csv(csvDir).collect()
}.getMessage
assert(msg.contains("CSV data source does not support array<double> data type."))
}
}
test("SPARK-24204 error handling for unsupported Interval data types - csv, json, parquet, orc") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
// TODO: test file source V2 after write path is fixed.
Seq(true).foreach { useV1 =>
val useV1List = if (useV1) {
"csv,json,orc,parquet"
} else {
""
}
def validateErrorMessage(msg: String): Unit = {
val msg1 = "cannot save interval data type into external storage."
val msg2 = "data source does not support interval data type."
assert(msg.toLowerCase(Locale.ROOT).contains(msg1) ||
msg.toLowerCase(Locale.ROOT).contains(msg2))
}
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1List) {
// write path
Seq("csv", "json", "parquet", "orc").foreach { format =>
val msg = intercept[AnalysisException] {
sql("select interval 1 days").write.format(format).mode("overwrite").save(tempDir)
}.getMessage
validateErrorMessage(msg)
}
// read path
Seq("parquet", "csv").foreach { format =>
var msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", CalendarIntervalType, true) :: Nil)
spark.range(1).write.format(format).mode("overwrite").save(tempDir)
spark.read.schema(schema).format(format).load(tempDir).collect()
}.getMessage
validateErrorMessage(msg)
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", new IntervalUDT(), true) :: Nil)
spark.range(1).write.format(format).mode("overwrite").save(tempDir)
spark.read.schema(schema).format(format).load(tempDir).collect()
}.getMessage
validateErrorMessage(msg)
}
}
}
}
}
test("SPARK-24204 error handling for unsupported Null data types - csv, parquet, orc") {
// TODO: test file source V2 after write path is fixed.
Seq(true).foreach { useV1 =>
val useV1List = if (useV1) {
"csv,orc,parquet"
} else {
""
}
def errorMessage(format: String): String = {
s"$format data source does not support null data type."
}
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1List) {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
Seq("parquet", "csv", "orc").foreach { format =>
// write path
var msg = intercept[AnalysisException] {
sql("select null").write.format(format).mode("overwrite").save(tempDir)
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(errorMessage(format)))
msg = intercept[AnalysisException] {
spark.udf.register("testType", () => new NullData())
sql("select testType()").write.format(format).mode("overwrite").save(tempDir)
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(errorMessage(format)))
// read path
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", NullType, true) :: Nil)
spark.range(1).write.format(format).mode("overwrite").save(tempDir)
spark.read.schema(schema).format(format).load(tempDir).collect()
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(errorMessage(format)))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", new NullUDT(), true) :: Nil)
spark.range(1).write.format(format).mode("overwrite").save(tempDir)
spark.read.schema(schema).format(format).load(tempDir).collect()
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(errorMessage(format)))
}
}
}
}
}
Seq("parquet", "orc").foreach { format =>
test(s"Spark native readers should respect spark.sql.caseSensitive - ${format}") {
withTempDir { dir =>
val tableName = s"spark_25132_${format}_native"
val tableDir = dir.getCanonicalPath + s"/$tableName"
withTable(tableName) {
val end = 5
val data = spark.range(end).selectExpr("id as A", "id * 2 as b", "id * 3 as B")
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
data.write.format(format).mode("overwrite").save(tableDir)
}
sql(s"CREATE TABLE $tableName (a LONG, b LONG) USING $format LOCATION '$tableDir'")
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
checkAnswer(sql(s"select a from $tableName"), data.select("A"))
checkAnswer(sql(s"select A from $tableName"), data.select("A"))
// RuntimeException is triggered at executor side, which is then wrapped as
// SparkException at driver side
val e1 = intercept[SparkException] {
sql(s"select b from $tableName").collect()
}
assert(
e1.getCause.isInstanceOf[RuntimeException] &&
e1.getCause.getMessage.contains(
"""Found duplicate field(s) "b": [b, B] in case-insensitive mode"""))
val e2 = intercept[SparkException] {
sql(s"select B from $tableName").collect()
}
assert(
e2.getCause.isInstanceOf[RuntimeException] &&
e2.getCause.getMessage.contains(
"""Found duplicate field(s) "b": [b, B] in case-insensitive mode"""))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(sql(s"select a from $tableName"), (0 until end).map(_ => Row(null)))
checkAnswer(sql(s"select b from $tableName"), data.select("b"))
}
}
}
}
}
test("SPARK-25237 compute correct input metrics in FileScanRDD") {
// TODO: Test CSV V2 as well after it implements [[SupportsReportStatistics]].
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "csv") {
withTempPath { p =>
val path = p.getAbsolutePath
spark.range(1000).repartition(1).write.csv(path)
val bytesReads = new mutable.ArrayBuffer[Long]()
val bytesReadListener = new SparkListener() {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
bytesReads += taskEnd.taskMetrics.inputMetrics.bytesRead
}
}
sparkContext.addSparkListener(bytesReadListener)
try {
spark.read.csv(path).limit(1).collect()
sparkContext.listenerBus.waitUntilEmpty()
assert(bytesReads.sum === 7860)
} finally {
sparkContext.removeSparkListener(bytesReadListener)
}
}
}
}
test("Do not use cache on overwrite") {
Seq("", "orc").foreach { useV1SourceReaderList =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1SourceReaderList) {
withTempDir { dir =>
val path = dir.toString
spark.range(1000).write.mode("overwrite").orc(path)
val df = spark.read.orc(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("overwrite").orc(path)
assert(df.count() == 10)
assert(spark.read.orc(path).count() == 10)
}
}
}
}
test("Do not use cache on append") {
Seq("", "orc").foreach { useV1SourceReaderList =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1SourceReaderList) {
withTempDir { dir =>
val path = dir.toString
spark.range(1000).write.mode("append").orc(path)
val df = spark.read.orc(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("append").orc(path)
assert(df.count() == 1010)
assert(spark.read.orc(path).count() == 1010)
}
}
}
}
test("UDF input_file_name()") {
Seq("", "orc").foreach { useV1SourceReaderList =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1SourceReaderList) {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(10).write.orc(path)
val row = spark.read.orc(path).select(input_file_name).first()
assert(row.getString(0).contains(path))
}
}
}
}
test("Option pathGlobFilter: filter files correctly") {
withTempPath { path =>
val dataDir = path.getCanonicalPath
Seq("foo").toDS().write.text(dataDir)
Seq("bar").toDS().write.mode("append").orc(dataDir)
val df = spark.read.option("pathGlobFilter", "*.txt").text(dataDir)
checkAnswer(df, Row("foo"))
// Both glob pattern in option and path should be effective to filter files.
val df2 = spark.read.option("pathGlobFilter", "*.txt").text(dataDir + "/*.orc")
checkAnswer(df2, Seq.empty)
val df3 = spark.read.option("pathGlobFilter", "*.txt").text(dataDir + "/*xt")
checkAnswer(df3, Row("foo"))
}
}
test("Option pathGlobFilter: simple extension filtering should contains partition info") {
withTempPath { path =>
val input = Seq(("foo", 1), ("oof", 2)).toDF("a", "b")
input.write.partitionBy("b").text(path.getCanonicalPath)
Seq("bar").toDS().write.mode("append").orc(path.getCanonicalPath + "/b=1")
// If we use glob pattern in the path, the partition column won't be shown in the result.
val df = spark.read.text(path.getCanonicalPath + "/*/*.txt")
checkAnswer(df, input.select("a"))
val df2 = spark.read.option("pathGlobFilter", "*.txt").text(path.getCanonicalPath)
checkAnswer(df2, input)
}
}
test("Option recursiveFileLookup: recursive loading correctly") {
val expectedFileList = mutable.ListBuffer[String]()
def createFile(dir: File, fileName: String, format: String): Unit = {
val path = new File(dir, s"${fileName}.${format}")
Files.write(
path.toPath,
s"content of ${path.toString}".getBytes,
StandardOpenOption.CREATE, StandardOpenOption.WRITE
)
val fsPath = new Path(path.getAbsoluteFile.toURI).toString
expectedFileList.append(fsPath)
}
def createDir(path: File, dirName: String, level: Int): Unit = {
val dir = new File(path, s"dir${dirName}-${level}")
dir.mkdir()
createFile(dir, s"file${level}", "bin")
createFile(dir, s"file${level}", "text")
if (level < 4) {
// create sub-dir
createDir(dir, "sub0", level + 1)
createDir(dir, "sub1", level + 1)
}
}
withTempPath { path =>
path.mkdir()
createDir(path, "root", 0)
val dataPath = new File(path, "dirroot-0").getAbsolutePath
val fileList = spark.read.format("binaryFile")
.option("recursiveFileLookup", true)
.load(dataPath)
.select("path").collect().map(_.getString(0))
assert(fileList.toSet === expectedFileList.toSet)
val fileList2 = spark.read.format("binaryFile")
.option("recursiveFileLookup", true)
.option("pathGlobFilter", "*.bin")
.load(dataPath)
.select("path").collect().map(_.getString(0))
assert(fileList2.toSet === expectedFileList.filter(_.endsWith(".bin")).toSet)
}
}
test("Option recursiveFileLookup: disable partition inferring") {
val dataPath = Thread.currentThread().getContextClassLoader
.getResource("test-data/text-partitioned").toString
val df = spark.read.format("binaryFile")
.option("recursiveFileLookup", true)
.load(dataPath)
assert(!df.columns.contains("year"), "Expect partition inferring disabled")
val fileList = df.select("path").collect().map(_.getString(0))
val expectedFileList = Array(
dataPath + "/year=2014/data.txt",
dataPath + "/year=2015/data.txt"
).map(path => new Path(path).toString)
assert(fileList.toSet === expectedFileList.toSet)
}
test("Return correct results when data columns overlap with partition columns") {
Seq("parquet", "orc", "json").foreach { format =>
withTempPath { path =>
val tablePath = new File(s"${path.getCanonicalPath}/cOl3=c/cOl1=a/cOl5=e")
Seq((1, 2, 3, 4, 5)).toDF("cOl1", "cOl2", "cOl3", "cOl4", "cOl5")
.write.format(format).save(tablePath.getCanonicalPath)
val df = spark.read.format(format).load(path.getCanonicalPath)
.select("CoL1", "Col2", "CoL5", "CoL3")
checkAnswer(df, Row("a", 2, "e", "c"))
}
}
}
test("Return correct results when data columns overlap with partition columns (nested data)") {
Seq("parquet", "orc", "json").foreach { format =>
withSQLConf(SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "true") {
withTempPath { path =>
val tablePath = new File(s"${path.getCanonicalPath}/c3=c/c1=a/c5=e")
val inputDF = sql("SELECT 1 c1, 2 c2, 3 c3, named_struct('c4_1', 2, 'c4_2', 3) c4, 5 c5")
inputDF.write.format(format).save(tablePath.getCanonicalPath)
val resultDF = spark.read.format(format).load(path.getCanonicalPath)
.select("c1", "c4.c4_1", "c5", "c3")
checkAnswer(resultDF, Row("a", 2, "e", "c"))
}
}
}
}
test("sizeInBytes should be the total size of all files") {
Seq("orc", "").foreach { useV1SourceReaderList =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1SourceReaderList) {
withTempDir { dir =>
dir.delete()
spark.range(1000).write.orc(dir.toString)
val df = spark.read.orc(dir.toString)
assert(df.queryExecution.optimizedPlan.stats.sizeInBytes === BigInt(getLocalDirSize(dir)))
}
}
}
}
test("SPARK-22790,SPARK-27668: spark.sql.sources.compressionFactor takes effect") {
Seq(1.0, 0.5).foreach { compressionFactor =>
withSQLConf(SQLConf.FILE_COMPRESSION_FACTOR.key -> compressionFactor.toString,
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "250") {
withTempPath { workDir =>
// the file size is 486 bytes
val workDirPath = workDir.getAbsolutePath
val data1 = Seq(100, 200, 300, 400).toDF("count")
data1.write.orc(workDirPath + "/data1")
val df1FromFile = spark.read.orc(workDirPath + "/data1")
val data2 = Seq(100, 200, 300, 400).toDF("count")
data2.write.orc(workDirPath + "/data2")
val df2FromFile = spark.read.orc(workDirPath + "/data2")
val joinedDF = df1FromFile.join(df2FromFile, Seq("count"))
if (compressionFactor == 0.5) {
val bJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case bJoin: BroadcastHashJoinExec => bJoin
}
assert(bJoinExec.nonEmpty)
val smJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case smJoin: SortMergeJoinExec => smJoin
}
assert(smJoinExec.isEmpty)
} else {
// compressionFactor is 1.0
val bJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case bJoin: BroadcastHashJoinExec => bJoin
}
assert(bJoinExec.isEmpty)
val smJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case smJoin: SortMergeJoinExec => smJoin
}
assert(smJoinExec.nonEmpty)
}
}
}
}
}
test("File source v2: support partition pruning") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "") {
allFileBasedDataSources.foreach { format =>
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format(format)
.partitionBy("p1", "p2")
.option("header", true)
.save(dir.getCanonicalPath)
val df = spark
.read
.format(format)
.option("header", true)
.load(dir.getCanonicalPath)
.where("p1 = 1 and p2 = 2 and value != \\"a\\"")
val filterCondition = df.queryExecution.optimizedPlan.collectFirst {
case f: Filter => f.condition
}
assert(filterCondition.isDefined)
// The partitions filters should be pushed down and no need to be reevaluated.
assert(filterCondition.get.collectFirst {
case a: AttributeReference if a.name == "p1" || a.name == "p2" => a
}.isEmpty)
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: FileScan) => f
}
assert(fileScan.nonEmpty)
assert(fileScan.get.partitionFilters.nonEmpty)
assert(fileScan.get.dataFilters.nonEmpty)
assert(fileScan.get.planInputPartitions().forall { partition =>
partition.asInstanceOf[FilePartition].files.forall { file =>
file.filePath.contains("p1=1") && file.filePath.contains("p2=2")
}
})
checkAnswer(df, Row("b", 1, 2))
}
}
}
}
test("File source v2: support passing data filters to FileScan without partitionFilters") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "") {
allFileBasedDataSources.foreach { format =>
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format(format)
.partitionBy("p1", "p2")
.option("header", true)
.save(dir.getCanonicalPath)
val df = spark
.read
.format(format)
.option("header", true)
.load(dir.getCanonicalPath)
.where("value = 'a'")
val filterCondition = df.queryExecution.optimizedPlan.collectFirst {
case f: Filter => f.condition
}
assert(filterCondition.isDefined)
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: FileScan) => f
}
assert(fileScan.nonEmpty)
assert(fileScan.get.partitionFilters.isEmpty)
assert(fileScan.get.dataFilters.nonEmpty)
checkAnswer(df, Row("a", 1, 2))
}
}
}
}
test("File table location should include both values of option `path` and `paths`") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "") {
withTempPaths(3) { paths =>
paths.zipWithIndex.foreach { case (path, index) =>
Seq(index).toDF("a").write.mode("overwrite").parquet(path.getCanonicalPath)
}
val df = spark
.read
.option("path", paths.head.getCanonicalPath)
.parquet(paths(1).getCanonicalPath, paths(2).getCanonicalPath)
df.queryExecution.optimizedPlan match {
case PhysicalOperation(_, _, DataSourceV2ScanRelation(table: ParquetTable, _, _)) =>
assert(table.paths.toSet == paths.map(_.getCanonicalPath).toSet)
case _ =>
throw new AnalysisException("Can not match ParquetTable in the query.")
}
checkAnswer(df, Seq(0, 1, 2).map(Row(_)))
}
}
}
test("SPARK-31935: Hadoop file system config should be effective in data source options") {
Seq("parquet", "").foreach { format =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> format) {
withTempDir { dir =>
val path = dir.getCanonicalPath
val defaultFs = "nonexistFS://nonexistFS"
val expectMessage = "No FileSystem for scheme nonexistFS"
val message1 = intercept[java.io.IOException] {
spark.range(10).write.option("fs.defaultFS", defaultFs).parquet(path)
}.getMessage
assert(message1.filterNot(Set(':', '"').contains) == expectMessage)
val message2 = intercept[java.io.IOException] {
spark.read.option("fs.defaultFS", defaultFs).parquet(path)
}.getMessage
assert(message2.filterNot(Set(':', '"').contains) == expectMessage)
}
}
}
}
test("SPARK-31116: Select nested schema with case insensitive mode") {
// This test case failed at only Parquet. ORC is added for test coverage parity.
Seq("orc", "parquet").foreach { format =>
Seq("true", "false").foreach { nestedSchemaPruningEnabled =>
withSQLConf(
SQLConf.CASE_SENSITIVE.key -> "false",
SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> nestedSchemaPruningEnabled) {
withTempPath { dir =>
val path = dir.getCanonicalPath
// Prepare values for testing nested parquet data
spark
.range(1L)
.selectExpr("NAMED_STRUCT('lowercase', id, 'camelCase', id + 1) AS StructColumn")
.write
.format(format)
.save(path)
val exactSchema = "StructColumn struct<lowercase: LONG, camelCase: LONG>"
checkAnswer(spark.read.schema(exactSchema).format(format).load(path), Row(Row(0, 1)))
// In case insensitive manner, parquet's column cases are ignored
val innerColumnCaseInsensitiveSchema =
"StructColumn struct<Lowercase: LONG, camelcase: LONG>"
checkAnswer(
spark.read.schema(innerColumnCaseInsensitiveSchema).format(format).load(path),
Row(Row(0, 1)))
val rootColumnCaseInsensitiveSchema =
"structColumn struct<lowercase: LONG, camelCase: LONG>"
checkAnswer(
spark.read.schema(rootColumnCaseInsensitiveSchema).format(format).load(path),
Row(Row(0, 1)))
}
}
}
}
}
}
object TestingUDT {
@SQLUserDefinedType(udt = classOf[IntervalUDT])
class IntervalData extends Serializable
class IntervalUDT extends UserDefinedType[IntervalData] {
override def sqlType: DataType = CalendarIntervalType
override def serialize(obj: IntervalData): Any =
throw new UnsupportedOperationException("Not implemented")
override def deserialize(datum: Any): IntervalData =
throw new UnsupportedOperationException("Not implemented")
override def userClass: Class[IntervalData] = classOf[IntervalData]
}
@SQLUserDefinedType(udt = classOf[NullUDT])
private[sql] class NullData extends Serializable
private[sql] class NullUDT extends UserDefinedType[NullData] {
override def sqlType: DataType = NullType
override def serialize(obj: NullData): Any =
throw new UnsupportedOperationException("Not implemented")
override def deserialize(datum: Any): NullData =
throw new UnsupportedOperationException("Not implemented")
override def userClass: Class[NullData] = classOf[NullData]
}
}
| ConeyLiu/spark | sql/core/src/test/scala/org/apache/spark/sql/FileBasedDataSourceSuite.scala | Scala | apache-2.0 | 36,971 |
package scala.tools.testing
import scala.reflect.runtime._
import scala.tools.reflect.ToolBox
trait RunTesting extends ClearAfterClass {
def compilerArgs = "" // to be overridden
val runner = cached("toolbox", () => Runner.make(compilerArgs))
}
class Runner(val toolBox: ToolBox[universe.type]) {
def run[T](code: String): T = toolBox.eval(toolBox.parse(code)).asInstanceOf[T]
}
object Runner {
def make(compilerArgs: String) = new Runner(universe.runtimeMirror(getClass.getClassLoader).mkToolBox(options = compilerArgs))
}
| felixmulder/scala | test/junit/scala/tools/testing/RunTesting.scala | Scala | bsd-3-clause | 536 |
/**
* Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gravity.goose
import images.Image
import org.jsoup.nodes.{Element, Document}
import java.util.Date
import scala.collection._
/**
* Created by Jim Plush
* User: jim
* Date: 8/14/11
*/
class Article {
/**
* title of the article
*/
var title: String = null
/**
* stores the lovely, pure text from the article, stripped of html, formatting, etc...
* just raw text with paragraphs separated by newlines. This is probably what you want to use.
*/
var cleanedArticleText: String = ""
/**
* stores paragraph text from the article, stripped of html, formatting, etc...
* by storing it in a map with the raw html absolute position and the paragraph
*/
var cleanedArticleSimpleHTML: String = ""
var cleanedArticleSimpleHTMLDoc: Option[Document] = None
var outputFormat: String = "HTML_STYLE"
/**
* meta description field in HTML source
*/
var metaDescription: String = ""
/**
* meta keywords field in the HTML source
*/
var metaKeywords: String = ""
/**
* The canonical link of this article if found in the meta data
*/
var canonicalLink: String = ""
/**
* holds the domain of this article we're parsing
*/
var domain: String = ""
/**
* holds the top Element we think is a candidate for the main body of the article
*/
var topNode: Element = null
/**
* holds the top Image object that we think represents this article
*/
var topImage: Image = new Image
/**
* holds a set of tags that may have been in the artcle, these are not meta keywords
*/
var tags: Set[String] = null
/**
* holds a list of any movies we found on the page like youtube, vimeo
*/
var movies: List[Element] = Nil
/**
* stores the final URL that we're going to try and fetch content against, this would be expanded if any
* escaped fragments were found in the starting url
*/
var finalUrl: String = "";
/**
* stores the MD5 hash of the url to use for various identification tasks
*/
var linkhash: String = "";
/**
* stores the RAW HTML straight from the network connection
*/
var rawHtml: String = ""
/**
* the JSoup Document object
*/
var doc: Document = null
/**
* this is the original JSoup document that contains a pure object from the original HTML without any cleaning
* options done on it
*/
var rawDoc: Document = null
/**
* Sometimes useful to try and know when the publish date of an article was
*/
var publishDate: Date = null
/**
* A property bucket for consumers of goose to store custom data extractions.
* This is populated by an implementation of {@link com.gravity.goose.extractors.AdditionalDataExtractor}
* which is executed before document cleansing within {@link com.gravity.goose.CrawlingActor#crawl}
* @return a {@link Map Map<String,String>} of property name to property vaue (represented as a {@link String}.
*/
var additionalData: Map[String, String] = Map.empty
} | davidyanez/goose | src/main/scala/com/gravity/goose/Article.scala | Scala | apache-2.0 | 3,751 |
/*
* Copyright 2011-2012 Jonathan Anderson
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.{FileInputStream,FileOutputStream}
import java.net.{URI,URL}
import java.security.{AccessController, AllPermission}
import java.util.logging.Level._
import java.util.logging.Logger
import javax.swing.JFileChooser
import scala.collection.{immutable,mutable}
import scala.collection.JavaConversions._
import me.footlights.api
import me.footlights.api.KernelInterface
import me.footlights.api.support.Either._
import me.footlights.core.data.store.CASClient
package me.footlights.core {
import apps.AppWrapper
import crypto.{Fingerprint,Keychain,MutableKeychain}
import data.store.{CASClient, DiskStore, Store}
/**
* The Footlights kernel, composed of:
* - a filesystem
* - application management
* - a UI manager (for dispatching events to the web UI, swing UI, etc.)
*
* It is abstract, since it does not implement:
* - openLocalFile()
*
* These methods should be mixed in (e.g. "with {@link SwingDialogs}") on instantiation, as
* should {@link KernelPrivilege} if running in a privilege-constrained environment.
*/
abstract class Kernel(
protected val io: IO,
protected val appLoader: ClassLoader,
protected val prefs: FileBackedPreferences,
protected val keychain: MutableKeychain,
protected val loadedApps: mutable.HashMap[URI,AppWrapper],
protected val uis: mutable.Set[UI],
protected val cache: DiskStore)
extends Footlights
with data.store.Filesystem
with apps.ApplicationManagement
with users.IdentityManagement
with Placeholders
with UIManager
{
private val resolver = Resolver(io, keychain)
protected val store = CASClient(Preferences(prefs), resolver, Option(cache)) // TODO: don't wrap?
/**
* Fetch the JAR file named by a {@link URI} (either directly by CAS hash-name or
* indirectly by an indirection {@link URL}) and store it locally as a conventional
* JAR file inside the OS' filesystem.
*/
override def localizeJar(uri:URI) = {
val absoluteLink:Either[Exception,crypto.Link] =
if (uri.isOpaque)
keychain getLink Fingerprint.decode(uri) toRight {
new Exception("Key for URI '%s' not stored in keychain" format uri)
}
else if (uri.getScheme != null) resolver.resolve(uri.toURL)
else Left(new Exception("Tried to localize scheme-less URI '%s'" format uri.toString))
absoluteLink flatMap {
store fetch _ toRight(new Exception("%s not in store" format uri))
} map { _.getInputStream } map {
java.nio.channels.Channels.newChannel
} map { in =>
val tmpFile = java.io.File.createTempFile("footlights-dep", ".jar")
tmpFile.setWritable(true)
new FileOutputStream(tmpFile).getChannel.transferFrom(in, 0, MaxJarSize)
tmpFile
} map { new java.util.jar.JarFile(_) }
}
private val log = Logger getLogger classOf[Kernel].getCanonicalName
/** The largest JAR file which we are happy to open. */
private val MaxJarSize = 1024 * 1024 * 1024
}
object Kernel {
/** Create a Footlights kernel which uses a given ClassLoader to load applications. */
def init(appLoader:ClassLoader): Kernel = {
// This is the Footlights core, the security kernel; ensure that we can do anything.
AccessController checkPermission { new AllPermission() }
val fileBackedPrefs = FileBackedPreferences.loadFromDefaultLocation
Flusher(fileBackedPrefs) start
val prefs = Preferences.create(Option(fileBackedPrefs))
val io = IO.direct
val keychainFile = prefs getString { FileBackedPreferences.KEYCHAIN_KEY } map {
new java.io.File(_) } get
val keystore =
if (keychainFile.exists) {
try { Keychain.importKeyStore(new FileInputStream(keychainFile).getChannel) }
catch {
case e:Exception =>
log.log(SEVERE, "Error loading keychain", e)
Keychain()
}
} else Keychain()
val keychain = new MutableKeychain(keystore, (k:Keychain) => security.Privilege.sudo { () =>
val tmp = java.io.File.createTempFile("tmp-", "", keychainFile.getParentFile)
k exportKeyStore { new FileOutputStream(tmp) getChannel }
tmp renameTo keychainFile
})
val apps = new mutable.HashMap[URI,AppWrapper]
val uis = new mutable.HashSet[UI]
// Local disk cache for the network-based store.
val cache =
DiskStore.newBuilder
.setPreferences(prefs)
.setDefaultDirectory
.build
Flusher(cache) start
new Kernel(io, appLoader, fileBackedPrefs, keychain, apps, uis, cache)
with SwingPowerboxes
with security.KernelPrivilege
}
private def getStoreLocation(
key:String, prefs:Preferences, setupData:Option[Map[String,_]]) = {
prefs getString("cas." + key) orElse {
setupData.get.get(key) match { case Some(s:String) => Option(s); case _ => None }
} map {
new URL(_)
}
}
private val log = Logger getLogger { classOf[Kernel].getCanonicalName }
}
}
| trombonehero/Footlights | Client/Core/src/main/scala/me/footlights/core/kernel.scala | Scala | apache-2.0 | 5,341 |
package cz.jenda.pidifrky.data
import android.app.Activity
import cz.jenda.pidifrky.data.pojo.Card
/**
* Created <b>30.9.13</b><br>
*
* @author Jenda Kolena, jendakolena@gmail.com
* @version 0.1
* @since 0.2
*/
class CardTiles(context: Activity, card: Card) {
// private final val table: Array[Array[Int]] = new Array[Array[Int]](5, 5)
// private final val cardsDao: Nothing = null
// private final val showNumbers: Boolean = false
// private final val showFound: Boolean = false
//
// def this(context: Activity, card: Card) {
// this()
// cardsDao = CardsDao.getInstance(context)
// val prefs: SharedPreferences = PreferenceManager.getDefaultSharedPreferences(context.getApplicationContext)
// showNumbers = prefs.getBoolean(PidifrkyConstants.PREF_SHOW_TILES_NUMBERS, false)
// showFound = prefs.getBoolean(PidifrkyConstants.PREF_SHOW_TILES_FOUND, false)
// val nums: Array[String] = card.getNeighbours.split(",")
// var t: Int = 0
// {
// var c: Int = 0
// while (c <= 11) {
// {
// t = nums(c).toInt
// table(c / 5)(c % 5) = t
// val c2: Int = 13 + c
// t = nums(c2 - 1).toInt
// table(c2 / 5)(c2 % 5) = t
// }
// ({
// c += 1; c - 1
// })
// }
// }
// table(2)(2) = card.getNumber
// }
//
// def getCode: String = {
// val recs: Int = 3
// val pics: Array[AnyRef] = new Array[String](25 * recs)
// {
// var i: Int = 0
// var c: Int = 0
// while (c < 25) {
// {
// try {
// val n: Int = table(c / 5)(c % 5)
// val path: String = Utils.getFullImageUri(n).getEncodedPath
// pics(i) = n + ""
// pics(i + 1) = path
// pics(i + 2) = (if (showNumbers) n + "" else "") + (if (showFound && cardsDao.isOwner(n)) "<img src=\\"file:///android_res/drawable/smiley.png\\" />" else "")
// }
// catch {
// case e: Exception => {
// pics(i) = ""
// pics(i + 1) = "file:///android_res/drawable/tile_empty.png"
// pics(i + 2) = ""
// }
// }
// }
// i += recs
// ({
// c += 1; c - 1
// })
// }
// }
// return String.format(Utils.getContext.getString(R.string.tiles_code).replaceAll("url", "url('%s')").replaceAll("href=\\"\\"", "href=\\"%s\\"").replaceAll("></div", "><span>%s</span></div"), pics)
// }
} | jendakol/pidifrky | client/src/main/scala/cz/jenda/pidifrky/data/CardTiles.scala | Scala | apache-2.0 | 2,581 |
/*******************************************************************************
Copyright (c) 2013, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.debug.commands
import kr.ac.kaist.jsaf.analysis.typing.debug.DebugConsole
class CmdHelp extends Command {
override val name = "help"
override def run(c: DebugConsole, args: Array[String]): Unit = {
val commands = c.command.keySet
if (args.length == 0) {
System.out.println("Command list:")
commands.foreach(cmd => {
if (!cmd.equals(""))
System.out.println(cmd+"\\t"+c.command(cmd).info)
})
System.out.println("For more information, see 'help <command>'.")
} else {
val str = args(0)
c.command.get(str) match {
case Some(cmd) => cmd.help()
case None => System.out.println("'"+str+"' is not a command. See 'help'.")
}
}
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/debug/commands/CmdHelp.scala | Scala | bsd-3-clause | 1,094 |
package is.hail.stats
import is.hail.utils._
import org.apache.commons.math3.util.CombinatoricsUtils.factorialLog
import org.scalatest.testng.TestNGSuite
import org.testng.annotations.Test
class LeveneHaldaneSuite extends TestNGSuite {
def LH(n: Int, nA: Int)(nAB: Int): Double = {
assert(nA >= 0 && nA <= n)
if (nAB < 0 || nAB > nA || (nA - nAB) % 2 != 0) 0
else {
val nB = 2 * n - nA
val nAA = (nA - nAB) / 2
val nBB = (nB - nAB) / 2
math.exp(nAB * math.log(2) + factorialLog(n) - (factorialLog(nAA) + factorialLog(nAB) + factorialLog(nBB)) - (factorialLog(2 * n) - (factorialLog(nA) + factorialLog(nB))))
}
}
// val examples = List((15, 10), (15, 9), (15, 0), (15, 15), (1, 0), (1, 1), (0, 0), (1526, 431), (1526, 430), (10000,1500))
// The above is commented out because ain't nobody got time for that.
val examples = List((15, 10), (15, 9), (15, 0), (15, 15), (1, 0), (1, 1), (0, 0), (1526, 431), (1526, 430))
@Test def pmfTest() {
def test(e: (Int, Int)): Boolean = {
val (n, nA) = e
val p0 = LeveneHaldane(n, nA).probability _
val p1 = LH(n, nA) _
(-2 to nA + 2).forall(nAB => D_==(p0(nAB), p1(nAB)))
}
examples foreach { e => assert(test(e)) }
}
@Test def modeTest() {
def test(e: (Int, Int)): Boolean = {
val (n, nA) = e
val LH = LeveneHaldane(n, nA)
D_==(LH.probability(LH.mode), (nA % 2 to nA by 2).map(LH.probability).max)
}
examples foreach {e => assert(test(e))}
}
@Test def meanTest() {
def test(e: (Int, Int)): Boolean = {
val (n, nA) = e
val LH = LeveneHaldane(n, nA)
D_==(LH.getNumericalMean, (LH.getSupportLowerBound to LH.getSupportUpperBound).map(i => i * LH.probability(i)).sum)
}
examples foreach {e => assert(test(e))}
}
@Test def varianceTest() {
def test(e: (Int, Int)): Boolean = {
val (n, nA) = e
val LH = LeveneHaldane(n, nA)
D_==(LH.getNumericalVariance + LH.getNumericalMean * LH.getNumericalMean, (LH.getSupportLowerBound to LH.getSupportUpperBound).map(i => i * i * LH.probability(i)).sum)
}
examples foreach {e => assert(test(e))}
}
@Test def exactTestsTest() {
def test(e: (Int, Int)): Boolean = {
val (n, nA) = e
val LH = LeveneHaldane(n, nA)
(-2 to nA + 2).forall(nAB => (
D_==(LH.leftMidP(nAB) + LH.rightMidP(nAB), 1.0)
&& D_==(LH.leftMidP(nAB),
0.5 * LH.probability(nAB) + (0 to nAB - 1).map(LH.probability).sum)
&& D_==(LH.exactMidP(nAB),
{val p0 = LH.probability(nAB)
(0 to nA).map(LH.probability).filter(D_<(_, p0, tolerance = 1.0E-12)).sum + 0.5 * (0 to nA).map(LH.probability).filter(D_==(_, p0, tolerance = 1.0E-12)).sum
})
))
}
examples foreach {e => assert(test(e))}
}
}
| cseed/hail | hail/src/test/scala/is/hail/stats/LeveneHaldaneSuite.scala | Scala | mit | 2,889 |
// See LICENSE.txt for license details.
package problems
import chisel3._
// Problem:
//
// Implement a 16-bit Fibonacci Linear-feedback shift register
// with polynomial x^16 + x^14 + x^13 + x^11 + 1
// State change is allowed only when 'inc' is asserted
//
class LFSR16 extends Module {
val io = IO(new Bundle {
val inc = Input(Bool())
val out = Output(UInt(16.W))
})
// Implement below ----------
io.out := 0.U
// Implement above ----------
}
| timtian090/Playground | chiselTutorial/src/main/scala/problems/LFSR16.scala | Scala | mit | 469 |
package sbt.std.neg
import org.scalatest.FunSuite
import sbt.std.TaskLinterDSLFeedback
import sbt.std.TestUtil._
class TaskNegSpec extends FunSuite {
import tools.reflect.ToolBoxError
def expectError(errorSnippet: String,
compileOptions: String = "",
baseCompileOptions: String = s"-cp $toolboxClasspath")(code: String) = {
val errorMessage = intercept[ToolBoxError] {
eval(code, s"$compileOptions $baseCompileOptions")
println(s"Test failed -- compilation was successful! Expected:\\n$errorSnippet")
}.getMessage
val userMessage =
s"""
|FOUND: $errorMessage
|EXPECTED: $errorSnippet
""".stripMargin
assert(errorMessage.contains(errorSnippet), userMessage)
}
test("Fail on task invocation inside if it is used inside a regular task") {
val fooNegError = TaskLinterDSLFeedback.useOfValueInsideIfExpression("fooNeg")
val barNegError = TaskLinterDSLFeedback.useOfValueInsideIfExpression("barNeg")
expectError(List(fooNegError, barNegError).mkString("\\n")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|val barNeg = taskKey[String]("")
|var condition = true
|
|val bazNeg = Def.task[String] {
| if (condition) fooNeg.value
| else barNeg.value
|}
""".stripMargin
}
}
test("Fail on task invocation inside `if` if it is used inside a regular task") {
val fooNegError = TaskLinterDSLFeedback.useOfValueInsideIfExpression("fooNeg")
val barNegError = TaskLinterDSLFeedback.useOfValueInsideIfExpression("barNeg")
expectError(List(fooNegError, barNegError).mkString("\\n")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|val barNeg = taskKey[String]("")
|var condition = true
|def bi(s: String) = s + " "
|
|val bazNeg = Def.task[String] {
| if (condition) "" + fooNeg.value
| else bi(barNeg.value)
|}
""".stripMargin
}
}
test("Fail on task invocation inside `if` of task returned by dynamic task") {
expectError(TaskLinterDSLFeedback.useOfValueInsideIfExpression("fooNeg")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|val barNeg = taskKey[String]("")
|var condition = true
|
|val bazNeg = Def.taskDyn[String] {
| if (condition) {
| Def.task {
| if (condition) {
| fooNeg.value
| } else ""
| }
| } else Def.task("")
|}
""".stripMargin
}
}
test("Fail on task invocation inside nested `if` of task returned by dynamic task") {
val fooNegCatch = TaskLinterDSLFeedback.useOfValueInsideIfExpression("fooNeg")
val barNegCatch = TaskLinterDSLFeedback.useOfValueInsideIfExpression("barNeg")
expectError(List(fooNegCatch, barNegCatch).mkString("\\n")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|val barNeg = taskKey[String]("")
|var condition = true
|
|val bazNeg = Def.taskDyn[String] {
| if (condition) {
| Def.task {
| if (condition) {
| val first = if (!condition && condition) {
| fooNeg.value
| } else ""
| if ("true".toBoolean) first
| else {
| barNeg.value
| }
| } else ""
| }
| } else Def.task("")
|}
""".stripMargin
}
}
test("Fail on task invocation inside else of task returned by dynamic task") {
expectError(TaskLinterDSLFeedback.useOfValueInsideIfExpression("barNeg")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|val barNeg = taskKey[String]("")
|var condition = true
|
|val bazNeg = Def.taskDyn[String] {
| if (condition) {
| Def.task {
| if (condition) ""
| else barNeg.value
| }
| } else Def.task("")
|}
""".stripMargin
}
}
test("Fail on task invocation inside anonymous function returned by regular task") {
val fooNegError = TaskLinterDSLFeedback.useOfValueInsideAnon("fooNeg")
expectError(fooNegError) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|val barNeg = taskKey[String]("")
|var condition = true
|
|val bazNeg = Def.task[String] {
| val anon = () => fooNeg.value
| if (condition) anon()
| else anon()
|}
""".stripMargin
}
}
test("Fail on task invocation inside nested anonymous function returned by regular task") {
val fooNegError = TaskLinterDSLFeedback.useOfValueInsideAnon("fooNeg")
val barNegError = TaskLinterDSLFeedback.useOfValueInsideAnon("barNeg")
expectError(List(fooNegError, barNegError).mkString("\\n")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|val barNeg = taskKey[String]("")
|var condition = true
|
|val bazNeg = Def.task[String] {
| val anon = () => { val _ = () => fooNeg.value; barNeg.value}
| if (condition) anon()
| else anon()
|}
""".stripMargin
}
}
test("Fail on task invocation inside complex anonymous function returned by regular task") {
val fooNegError = TaskLinterDSLFeedback.useOfValueInsideAnon("fooNeg")
expectError(fooNegError) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|var condition = true
|
|val bazNeg = Def.task[String] {
| val anon = () => fooNeg.value + ""
| if (condition) anon()
| else anon()
|}
""".stripMargin
}
}
test("Fail on task invocation inside anonymous function returned by dynamic task") {
val fooNegError = TaskLinterDSLFeedback.useOfValueInsideAnon("fooNeg")
expectError(fooNegError) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|val barNeg = taskKey[String]("")
|var condition = true
|
|val bazNeg = Def.taskDyn[String] {
| if (condition) {
| val anon = () => fooNeg.value
| Def.task(anon())
| } else Def.task("")
|}
""".stripMargin
}
}
test("Detect a missing `.value` inside a task") {
expectError(TaskLinterDSLFeedback.missingValueForKey("fooNeg")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg = taskKey[String]("")
|
|def avoidDCE = {println(""); ""}
|val bazNeg = Def.task[String] {
| fooNeg
| avoidDCE
|}
""".stripMargin
}
}
test("Detect a missing `.value` inside a val definition of a task") {
expectError(TaskLinterDSLFeedback.missingValueForKey("fooNeg2")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg2 = taskKey[String]("")
|
|def avoidDCE = {println(""); ""}
|val bazNeg = Def.task[String] {
| val _ = fooNeg2
| avoidDCE
|}
""".stripMargin
}
}
test("Detect a missing `.value` inside a val definition of an inner method of a task") {
expectError(TaskLinterDSLFeedback.missingValueForKey("fooNeg2")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg2 = taskKey[String]("")
|
|def avoidDCE = {println(""); ""}
|val bazNeg = Def.task[String] {
| def inner = {
| val _ = fooNeg2
| avoidDCE
| }
| inner
|}
""".stripMargin
}
}
test("Detect a missing `.value` inside an inner method of a task") {
expectError(TaskLinterDSLFeedback.missingValueForKey("fooNeg3")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg3 = taskKey[String]("")
|def avoidDCE = {println(""); ""}
|val bazNeg = Def.task[String] {
| def inner: String = {
| fooNeg3
| avoidDCE
| }
| inner
|}
""".stripMargin
}
}
test("Detect a missing `.value` inside a task whose return type is Unit") {
expectError(TaskLinterDSLFeedback.missingValueForKey("fooNeg4")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg4 = taskKey[String]("")
|
|val bazNeg = Def.task[Unit] {
| fooNeg4
|}
""".stripMargin
}
}
// Enable these tests when https://github.com/scala/bug/issues/10340 is fixed
/*
test("Detect a missing `.value` inside a val of an inner method of a task returning a literal") {
expectError(TaskLinterDSLFeedback.missingValueForKey("fooNeg3")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg3 = taskKey[String]("")
|
|val bazNeg = Def.task[String] {
| def inner: String = {
| val _ = fooNeg3
| ""
| }
| inner
|}
""".stripMargin
}
}
test("Detect a missing `.value` inside a val of a task returning a literal") {
expectError(TaskLinterDSLFeedback.missingValueForKey("fooNeg3")) {
"""
|import sbt._
|import sbt.Def._
|
|val fooNeg3 = taskKey[String]("")
|
|val bazNeg = Def.task[String] {
| val _ = fooNeg3
| ""
|}
""".stripMargin
}
}
*/
}
| Duhemm/sbt | main-settings/src/test/scala/sbt/std/neg/TaskNegSpec.scala | Scala | bsd-3-clause | 9,965 |
package sparklyr
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream}
import scala.collection.mutable.HashMap
import scala.language.existentials
import io.netty.channel.{ChannelHandlerContext, SimpleChannelInboundHandler}
import io.netty.channel.ChannelHandler.Sharable
import sparklyr.Serializer._
object StreamHandler {
def read(
msg: Array[Byte],
classMap: Map[String, Object],
logger: Logger,
hostContext: String): Array[Byte] = {
val bis = new ByteArrayInputStream(msg)
val dis = new DataInputStream(bis)
val bos = new ByteArrayOutputStream()
val dos = new DataOutputStream(bos)
val objId = readString(dis)
val isStatic = readBoolean(dis)
val methodName = readString(dis)
val numArgs = readInt(dis)
if (objId == "Handler") {
methodName match {
case "echo" =>
val args = readArgs(numArgs, dis)
if (numArgs != 1) throw new IllegalArgumentException("echo should take a single argument")
writeInt(dos, 0)
writeObject(dos, args(0))
case "rm" =>
try {
val t = readObjectType(dis)
if (t != 'c') throw new IllegalArgumentException("object removal expects a string")
val objToRemove = readString(dis)
JVMObjectTracker.remove(objToRemove)
writeInt(dos, 0)
writeObject(dos, null)
} catch {
case e: Exception =>
logger.logError(s"failed to remove $objId", e)
writeInt(dos, -1)
writeString(dos, s"Removing $objId failed: ${e.getMessage}")
}
case "getHostContext" =>
writeInt(dos, 0)
writeObject(dos, hostContext.asInstanceOf[AnyRef])
case _ =>
dos.writeInt(-1)
writeString(dos, s"Error: unknown method $methodName")
}
} else {
handleMethodCall(isStatic, objId, methodName, numArgs, dis, dos, classMap, logger)
}
bos.toByteArray
}
def handleMethodCall(
isStatic: Boolean,
objId: String,
methodName: String,
numArgs: Int,
dis: DataInputStream,
dos: DataOutputStream,
classMap: Map[String, Object],
logger: Logger): Unit = {
var obj: Object = null
try {
val cls = if (isStatic) {
if (classMap != null && classMap.contains(objId)) {
obj = classMap(objId)
classMap(objId).getClass.asInstanceOf[Class[_]]
}
else {
Class.forName(objId)
}
} else {
JVMObjectTracker.get(objId) match {
case None => throw new IllegalArgumentException("Object not found " + objId)
case Some(o) =>
obj = o
o.getClass
}
}
val args = readArgs(numArgs, dis)
val res = Invoke.invoke(cls, objId, obj, methodName, args, logger)
writeInt(dos, 0)
writeObject(dos, res.asInstanceOf[AnyRef])
} catch {
case e: Exception =>
logger.logError(s"failed calling $methodName on $objId")
writeInt(dos, -1)
writeString(dos, Utils.exceptionString(
if (e.getCause == null) e else e.getCause
))
case e: NoClassDefFoundError =>
logger.logError(s"failed calling $methodName on $objId with no class dound error")
writeInt(dos, -1)
writeString(dos, Utils.exceptionString(
if (e.getCause == null) e else e.getCause
))
}
}
// Read a number of arguments from the data input stream
def readArgs(numArgs: Int, dis: DataInputStream): Array[java.lang.Object] = {
(0 until numArgs).map { _ =>
readObject(dis)
}.toArray
}
}
| kevinykuo/sparklyr | java/spark-1.5.2/stream.scala | Scala | apache-2.0 | 3,771 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import org.apache.spark.sql.execution.streaming.StreamExecution
trait StateStoreMetricsTest extends StreamTest {
private var lastCheckedRecentProgressIndex = -1
private var lastQuery: StreamExecution = null
override def beforeEach(): Unit = {
super.beforeEach()
lastCheckedRecentProgressIndex = -1
}
def assertNumStateRows(total: Seq[Long], updated: Seq[Long]): AssertOnQuery =
AssertOnQuery(s"Check total state rows = $total, updated state rows = $updated") { q =>
val recentProgress = q.recentProgress
require(recentProgress.nonEmpty, "No progress made, cannot check num state rows")
require(recentProgress.length < spark.sessionState.conf.streamingProgressRetention,
"This test assumes that all progresses are present in q.recentProgress but " +
"some may have been dropped due to retention limits")
if (q.ne(lastQuery)) lastCheckedRecentProgressIndex = -1
lastQuery = q
val numStateOperators = recentProgress.last.stateOperators.length
val progressesSinceLastCheck = recentProgress
.slice(lastCheckedRecentProgressIndex + 1, recentProgress.length)
.filter(_.stateOperators.length == numStateOperators)
val allNumUpdatedRowsSinceLastCheck =
progressesSinceLastCheck.map(_.stateOperators.map(_.numRowsUpdated))
lazy val debugString = "recent progresses:\\n" +
progressesSinceLastCheck.map(_.prettyJson).mkString("\\n\\n")
val numTotalRows = recentProgress.last.stateOperators.map(_.numRowsTotal)
assert(numTotalRows === total, s"incorrect total rows, $debugString")
val numUpdatedRows = arraySum(allNumUpdatedRowsSinceLastCheck, numStateOperators)
assert(numUpdatedRows === updated, s"incorrect updates rows, $debugString")
lastCheckedRecentProgressIndex = recentProgress.length - 1
true
}
def assertNumStateRows(total: Long, updated: Long): AssertOnQuery =
assertNumStateRows(Seq(total), Seq(updated))
def arraySum(arraySeq: Seq[Array[Long]], arrayLength: Int): Seq[Long] = {
if (arraySeq.isEmpty) return Seq.fill(arrayLength)(0L)
assert(arraySeq.forall(_.length == arrayLength),
"Arrays are of different lengths:\\n" + arraySeq.map(_.toSeq).mkString("\\n"))
(0 until arrayLength).map { index => arraySeq.map(_.apply(index)).sum }
}
}
| bravo-zhang/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/StateStoreMetricsTest.scala | Scala | apache-2.0 | 3,183 |
package samples.scalaexchange.step6
import akka.http.scaladsl.Http
import akka.http.scaladsl.server._
import samples.scalaexchange.utils.SampleApp
import scala.io.StdIn
object IncomingStreamsHttpServiceApp extends SampleApp
with IncomingStreamsService {
// our routes:
val route: Route =
incomingStreams
// start the http server:
val bindingFuture = Http().bindAndHandle(route, "127.0.0.1", 8080)
} | ktoso/akka-scala-exchange | src/main/scala/samples/scalaexchange/step6/IncomingStreamsHttpServiceApp.scala | Scala | apache-2.0 | 419 |
package org.trpleo.thirstyyonath.matching
import org.trpleo.thirstyyonath.model.offer.Offer
import org.trpleo.thirstyyonath.model.orderbook.OrderBook
/**
* Created by ipapp on 02/03/16.
*/
trait MatchingAlgorithm {
def doMatching(orderBook: OrderBook): OrderBook
def sellSideCompare(thisOffer: Offer, thatOffer: Offer)
def buySideCompare(thisOffer: Offer, thatOffer: Offer)
}
| trpleo/thirsty-yonath | src/main/scala/org/trpleo/thirstyyonath/matching/MatchingAlgorithm.scala | Scala | mit | 386 |
package typeclass
import typeclass.Prelude._
trait Applicative[F[_]] extends Functor[F]{
def pure[A](a: A): F[A]
def ap[A, B](fab: F[A => B], fa: F[A]): F[B]
def map[A, B](fa: F[A])(f: A => B): F[B] =
ap(pure(f), fa)
def map2[A, B, C](fa: F[A], fb: F[B])(f: (A, B) => C): F[C] =
ap(map(fa)((a: A) => (b: B) => f(a, b)), fb)
def map3[A, B, C, D](fa: F[A], fb: F[B], fc: F[C])(f: (A, B, C) => D): F[D] =
ap(map2(fa, fb)((a, b) => (c: C) => f(a, b, c)), fc)
def *>[A, B](fa: F[A], fb: F[B]): F[B] =
map2(fa, fb)((_, b) => b)
def <*[A, B](fa: F[A], fb: F[B]): F[A] =
map2(fa, fb)((a, _) => a)
def forever[A](fa: F[A]): F[A] =
*>(fa, forever(fa))
def tuple2[A, B](fa: F[A], fb: F[B]): F[(A, B)] =
map2(fa, fb)((_, _))
def tuple3[A, B, C](fa: F[A], fb: F[B], fc: F[C]): F[(A, B, C)] =
map3(fa, fb, fc)((_, _, _))
def lift2[A, B, C](f: (A, B) => C): (F[A], F[B]) => F[C] =
map2(_, _)(f)
def lift3[A, B, C, D](f: (A, B, C) => D): (F[A], F[B], F[C]) => F[D] =
map3(_, _, _)(f)
}
object Applicative {
def apply[F[_]](implicit ev: Applicative[F]): Applicative[F] = ev
}
case class ApplicativeLaws[F[_]](implicit F: Applicative[F]) {
import typeclass.syntax.applicative._
import typeclass.syntax.functor._
import scalaprops.{Gen, Properties, Property}
import scalaprops.Properties.properties
import scalaprops.Property.forAll
import scalaz.std.string._
def liftFunction[A, B](implicit genA: Gen[A], genAB: Gen[A => B]): Property =
forAll((a: A, f: A => B) =>
f.pure.ap(a.pure) == f(a).pure
)
def apId[A](implicit genFA: Gen[F[A]]): Property =
forAll((fa: F[A]) =>
(identity[A] _).pure.ap(fa) == fa
)
def consistentMap[A, B](implicit genA: Gen[F[A]], genAB: Gen[A => B]): Property =
forAll((fa: F[A], f: A => B) =>
f.pure.ap(fa) == fa.map(f)
)
def laws(implicit genFI: Gen[F[Int]], genF: Gen[Int => Int]): Properties[String] =
properties("Applicative")(
("liftFunction", liftFunction[Int, Int]),
("apId" , apId[Int]),
("consistentMap", consistentMap[Int, Int])
)
def all(implicit genFI: Gen[F[Int]], genF: Gen[Int => Int]): Properties[String] =
Properties.fromProps("Applicative-all", FunctorLaws[F].all, laws)
}
| julien-truffaut/Typeclass | answer/src/main/scala/typeclass/Applicative.scala | Scala | mit | 2,288 |
package system.cell.core
import java.net.InetAddress
import akka.actor.{ActorRef, Props}
import com.actors.TemplateActor
import com.utils.Practicability
import spray.json._
import system.cell.cluster.{CellClusterSupervisor, CellPublisher, CellSubscriber}
import system.cell.processor.route.actors.RouteManager
import system.cell.sensormanagement.SensorManager
import system.cell.userManagement.UserManager
import system.exceptions.IncorrectConfigurationException
import system.names.NamingSystem
import system.ontologies.messages.AriannaJsonProtocol._
import system.ontologies.messages.MessageType.Topology.Subtype.ViewedFromACell
import system.ontologies.messages.MessageType._
import system.ontologies.messages._
import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.io.Source
import scala.util.Random
/**
* This is the main actor of a cell, it provide the main cell management and
* the other cell's actors initialization
* Created by Matteo Gabellini on 14/07/2017.
*/
class CellCoreActor(mediator: ActorRef) extends TemplateActor {
private var actualSelfLoad: Int = 0
private var localCellInfo: CellInfo = CellInfo.empty
private var sensorsMounted: List[SensorInfo] = List.empty[SensorInfo]
private val topology: mutable.Map[String, RoomViewedFromACell] = mutable.HashMap.empty
private val indexByUri: mutable.Map[String, String] = mutable.HashMap.empty
private val practicabilityToBeRestored: mutable.Map[String, Double] = mutable.HashMap.empty
var clusterListener: ActorRef = _
var cellPublisher: ActorRef = _
var cellSubscriber: ActorRef = _
var sensorManager: ActorRef = _
var userActor: ActorRef = _
var routeManager: ActorRef = _
override def preStart: Unit = {
super.preStart()
cellSubscriber = context.actorOf(Props(new CellSubscriber(mediator)), NamingSystem.Subscriber)
cellPublisher = context.actorOf(Props(new CellPublisher(mediator)), NamingSystem.Publisher)
sensorManager = context.actorOf(Props[SensorManager], NamingSystem.SensorManager)
userActor = context.actorOf(Props[UserManager], NamingSystem.UserManager)
routeManager = context.actorOf(Props[RouteManager], NamingSystem.RouteManager)
}
override protected def init(args: List[String]): Unit = {
log.debug("Hello there! the cell core is being initialized")
clusterListener = context.actorOf(Props[CellClusterSupervisor], NamingSystem.CellClusterSupervisor)
val cellConfiguration = Source.fromFile(args.head.asInstanceOf[String]).getLines.mkString
val loadedConfig = cellConfiguration.parseJson.convertTo[CellConfig]
if (loadedConfig.cellInfo == CellInfo.empty) throw IncorrectConfigurationException(this.name)
localCellInfo = loadedConfig.cellInfo.copy(ip = InetAddress.getLocalHost.getHostAddress)
sensorManager ! AriadneMessage(Init,
Init.Subtype.Greetings,
Location.PreMade.selfToSelf,
Greetings(List(loadedConfig.sensors.toJson.toString())))
}
override protected def receptive: Receive = {
case msg@AriadneMessage(Info, Info.Subtype.Request, Location.PreMade.selfToSelf, cnt: SensorsInfoUpdate) => {
//Informations request from the cell publisher in order to complete the handshake task with the system.master
if (sensorsMounted.isEmpty) {
log.debug("Sensor Data not yet ready, stash the info request")
stash()
} else {
sender() ! msg.copy(
subtype = Info.Subtype.Response,
content = SensorsInfoUpdate(localCellInfo, sensorsMounted)
)
}
}
case msg@AriadneMessage(Update, Update.Subtype.Sensors, Location.PreMade.selfToSelf, cnt: SensorsInfoUpdate) => {
if (sensorsMounted.isEmpty) {
sensorsMounted = cnt.sensors
unstashAll()
} else {
sensorsMounted = cnt.sensors
}
}
case msg@AriadneMessage(Error, Error.Subtype.CellMappingMismatch, _, cnt: Empty) =>
log.error("Mapping Error")
case msg@AriadneMessage(Topology, ViewedFromACell, Location.PreMade.masterToCell, cnt: AreaViewedFromACell) => {
log.info(s"Topology arrived from Master $cnt")
log.info("Sending ACK to Master for Topology...")
cellPublisher ! AriadneMessage(
Topology,
Topology.Subtype.Acknowledgement,
Location.PreMade.selfToSelf,
localCellInfo
)
cnt.rooms.foreach(room => topology += room.info.id.name -> room)
cnt.rooms.foreach(room => indexByUri += room.cell.uri -> room.info.id.name)
userActor ! AriadneMessage(
Init,
Init.Subtype.Greetings,
Location.PreMade.selfToSelf,
Greetings(List(localCellInfo.uri, localCellInfo.port.toString)))
userActor ! msg.copy(direction = Location.PreMade.cellToUser)
this.context.become(cultured, discardOld = true)
log.info("I've become cultured")
}
case _ => desist _
}
protected def cultured: Receive = ({
case msg@AriadneMessage(Alarm, _, Location.PreMade.selfToSelf, _) => {
//Alarm triggered in the current cell
val currentCell: RoomViewedFromACell = topology(indexByUri(localCellInfo.uri))
val msgToSend = msg.copy(
content = AlarmContent(localCellInfo, currentCell.info)
)
cellPublisher ! msgToSend
context.become(localEmergency, discardOld = true)
log.info("Alarm triggered locally")
}
case msg@AriadneMessage(Alarm, Alarm.Subtype.End, _, _) =>
userActor ! msg
this.updatePracticabilityOnAlarmEnd()
log.info("Alarm deactiveted")
case msg@AriadneMessage(Update, Update.Subtype.CurrentPeople, Location.PreMade.userToCell, cnt: CurrentPeopleUpdate) => {
actualSelfLoad = cnt.currentPeople
topology += indexByUri(localCellInfo.uri) -> topology(indexByUri(localCellInfo.uri)).copy(practicability = updatedPracticability())
cellPublisher ! msg.copy(content = cnt.copy(room = topology(indexByUri(cnt.room.name)).info.id))
cellPublisher ! AriadneMessage(
Update,
Update.Subtype.Practicability,
Location.PreMade.selfToSelf,
PracticabilityUpdate(
topology(indexByUri(localCellInfo.uri)).info.id,
topology(indexByUri(localCellInfo.uri)).practicability
)
)
}
}: Receive) orElse this.proactive
protected def localEmergency: Receive = ({
case msg@AriadneMessage(Alarm, Alarm.Subtype.End, _, _) => {
userActor ! msg
context.become(cultured, discardOld = true)
practicabilityToBeRestored += indexByUri(localCellInfo.uri) -> updatedPracticability()
this.updatePracticabilityOnAlarmEnd()
cellPublisher ! AriadneMessage(
Update,
Update.Subtype.Practicability,
Location.PreMade.selfToSelf,
PracticabilityUpdate(
topology(indexByUri(localCellInfo.uri)).info.id,
topology(indexByUri(localCellInfo.uri)).practicability
)
)
log.info("Alarm deactiveted")
}
case msg@AriadneMessage(Update, Update.Subtype.CurrentPeople, Location.PreMade.userToCell, cnt: CurrentPeopleUpdate) => {
actualSelfLoad = cnt.currentPeople
cellPublisher ! msg.copy(content = cnt.copy(room = topology(indexByUri(cnt.room.name)).info.id))
}
}: Receive) orElse this.proactive
private def proactive: Receive = {
case msg@AriadneMessage(Init, Init.Subtype.Goodbyes, _, _) => {
log.info("Ariadne system is shutting down...")
context.system.terminate().onComplete(_ => println("Ariadne system has shutdown!"))
System.exit(0)
}
case msg@AriadneMessage(Update, Update.Subtype.Sensors, Location.PreMade.selfToSelf, cnt: SensorsInfoUpdate) => {
cellPublisher ! msg.copy(content = cnt.copy(cell = this.localCellInfo))
}
case AriadneMessage(Update, Update.Subtype.Practicability, Location.PreMade.cellToCell, cnt: PracticabilityUpdate) => {
if (topology(cnt.room.name).practicability == Double.PositiveInfinity) {
/*
* save the practicability update of a cell considered in alarm to prevent
* the receiving ordering problem between Alarm, Alarm End and Practicability Update messages
* sent from a cell in alarm during the alarm deactivation
* */
practicabilityToBeRestored += cnt.room.name -> cnt.practicability
} else {
topology += cnt.room.name -> topology(cnt.room.name).copy(practicability = cnt.practicability)
}
}
case AriadneMessage(Route, Route.Subtype.Request, Location.PreMade.userToCell, cnt: RouteRequest) => {
//route request from user management
routeManager ! AriadneMessage(
Route,
Route.Subtype.Info,
Location.PreMade.selfToSelf,
RouteInfo(
cnt,
AreaViewedFromACell(Random.nextInt(), topology.values.toList)
)
)
}
case msg@AriadneMessage(Route, Route.Subtype.Response, Location.PreMade.cellToUser, _) =>
//route response from route manager for the user
userActor ! msg
case AriadneMessage(Alarm, _, _, alarm) => {
val (id, area) = alarm match {
case AlarmContent(compromisedCell, _) =>
("-1", topology.values.map(cell => {
if (cell.cell.uri == compromisedCell.uri)
cell.copy(practicability = Double.PositiveInfinity)
else cell
}).toList)
case _ =>
("0", topology.values.toList)
}
//request to the route manager the escape route
routeManager ! AriadneMessage(
Route,
Route.Subtype.Info,
Location.PreMade.selfToSelf,
RouteInfo(
RouteRequest(id, topology(indexByUri(localCellInfo.uri)).info.id, RoomID.empty, isEscape = true),
AreaViewedFromACell(Random.nextInt(), area)
)
)
}
case msg@AriadneMessage(Topology, ViewedFromACell, Location.PreMade.masterToCell, cnt: AreaViewedFromACell) => {
//The master did not receive the ack -> resend acknowledgement
cellPublisher ! AriadneMessage(
Topology,
Topology.Subtype.Acknowledgement,
Location.PreMade.selfToSelf,
localCellInfo
)
}
case _ => desist _
}
private def updatePracticabilityOnAlarmEnd(): Unit = {
practicabilityToBeRestored.keys.foreach(X =>
topology += X -> topology(X).copy(practicability = practicabilityToBeRestored(X))
)
practicabilityToBeRestored.clear()
}
private def updatedPracticability(): Double = {
Practicability(
topology(indexByUri(localCellInfo.uri)).info.capacity,
actualSelfLoad,
topology(indexByUri(localCellInfo.uri)).passages.length
)
}
}
| albertogiunta/arianna | src/main/scala/system/cell/core/CellCoreActor.scala | Scala | gpl-3.0 | 11,986 |
package com.atomicscala.Bodies
class NoBody {
def who():String = {
"Nobody's home"
}
}
class SomeBody {
def name():String = {
"Janet Doe"
}
println(name + " is SomeBody")
}
class EveryBody {
val all = Vector(new SomeBody, new SomeBody, new SomeBody)
}
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/20_Summary2/ClassBodies.scala | Scala | apache-2.0 | 275 |
/*
* This file is part of Evo2DSim.
*
* Evo2DSim is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Evo2DSim is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Evo2DSim. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vastness.evo2dsim.core.evolution.genomes.standard
import org.vastness.evo2dsim.core.evolution.genomes.Genome
import scala.util.Random
case class STDGenome(nodes: Set[STDNode] = Set.empty,
connections: Set[STDConnection] = Set.empty,
em: STDEvolutionManager) extends Genome {
val name = "STDGenome"
type Self = STDGenome
type SelfNode = STDNode
type SelfConnection = STDConnection
def crossover(other: STDGenome) = this // TODO: Add a proper crossover
def mutate = {
val probability = 1.0 / ( nodes.size + connections.size) // 1/l
STDGenome(mutateNodes(probability), mutateConnections(probability), em)
}
private def mutateConnections(p: Double): Set[SelfConnection] =
connections map {c => if(Random.nextDouble <= p) c.mutate(em.randSource.sample()) else c }
private def mutateNodes(p: Double) =
nodes map {n => if(Random.nextDouble <= p) n.mutate(em.randSource.sample()) else n }
/**
* Implements euclidean distance
* @param other
* @return
*/
def distance(other: Genome): Double = other match {
case other: Self => {
val nodesDistances = zipper(nodesMap, other.nodesMap) map {
case (a, b) => math.pow(a.bias - b.bias, 2)
}
val connDistances = zipper(connectionMap, other.connectionMap) map {
case (a, b) => math.pow(a.weight - b.weight, 2)
}
return math.sqrt(nodesDistances.sum + connDistances.sum)
}
case _ => ???
}
}
| vchuravy/Evo2DSim | core/src/main/scala/org/vastness/evo2dsim/core/evolution/genomes/standard/STDGenome.scala | Scala | mit | 2,191 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import java.time.Instant
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.control.NonFatal
import io.fabric8.kubernetes.api.model.{HasMetadata, PersistentVolumeClaim, Pod, PodBuilder}
import io.fabric8.kubernetes.client.KubernetesClient
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.KubernetesConf
import org.apache.spark.deploy.k8s.KubernetesUtils.addOwnerReference
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.util.{Clock, Utils}
private[spark] class ExecutorPodsAllocator(
conf: SparkConf,
secMgr: SecurityManager,
executorBuilder: KubernetesExecutorBuilder,
kubernetesClient: KubernetesClient,
snapshotsStore: ExecutorPodsSnapshotsStore,
clock: Clock) extends Logging {
private val EXECUTOR_ID_COUNTER = new AtomicInteger(0)
// ResourceProfile id -> total expected executors per profile, currently we don't remove
// any resource profiles - https://issues.apache.org/jira/browse/SPARK-30749
private val totalExpectedExecutorsPerResourceProfileId = new ConcurrentHashMap[Int, Int]()
private val rpIdToResourceProfile = new mutable.HashMap[Int, ResourceProfile]
private val podAllocationSize = conf.get(KUBERNETES_ALLOCATION_BATCH_SIZE)
private val podAllocationDelay = conf.get(KUBERNETES_ALLOCATION_BATCH_DELAY)
private val maxPendingPods = conf.get(KUBERNETES_MAX_PENDING_PODS)
private val podCreationTimeout = math.max(
podAllocationDelay * 5,
conf.get(KUBERNETES_ALLOCATION_EXECUTOR_TIMEOUT))
private val driverPodReadinessTimeout = conf.get(KUBERNETES_ALLOCATION_DRIVER_READINESS_TIMEOUT)
private val executorIdleTimeout = conf.get(DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT) * 1000
private val namespace = conf.get(KUBERNETES_NAMESPACE)
private val kubernetesDriverPodName = conf
.get(KUBERNETES_DRIVER_POD_NAME)
private val shouldDeleteExecutors = conf.get(KUBERNETES_DELETE_EXECUTORS)
val driverPod = kubernetesDriverPodName
.map(name => Option(kubernetesClient.pods()
.withName(name)
.get())
.getOrElse(throw new SparkException(
s"No pod was found named $name in the cluster in the " +
s"namespace $namespace (this was supposed to be the driver pod.).")))
// Executor IDs that have been requested from Kubernetes but have not been detected in any
// snapshot yet. Mapped to the (ResourceProfile id, timestamp) when they were created.
private val newlyCreatedExecutors = mutable.LinkedHashMap.empty[Long, (Int, Long)]
// Executor IDs that have been requested from Kubernetes but have not been detected in any POD
// snapshot yet but already known by the scheduler backend. Mapped to the ResourceProfile id.
private val schedulerKnownNewlyCreatedExecs = mutable.LinkedHashMap.empty[Long, Int]
private val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(conf)
// visible for tests
private[k8s] val numOutstandingPods = new AtomicInteger()
private var lastSnapshot = ExecutorPodsSnapshot()
// Executors that have been deleted by this allocator but not yet detected as deleted in
// a snapshot from the API server. This is used to deny registration from these executors
// if they happen to come up before the deletion takes effect.
@volatile private var deletedExecutorIds = Set.empty[Long]
def start(applicationId: String, schedulerBackend: KubernetesClusterSchedulerBackend): Unit = {
driverPod.foreach { pod =>
// Wait until the driver pod is ready before starting executors, as the headless service won't
// be resolvable by DNS until the driver pod is ready.
Utils.tryLogNonFatalError {
kubernetesClient
.pods()
.withName(pod.getMetadata.getName)
.waitUntilReady(driverPodReadinessTimeout, TimeUnit.SECONDS)
}
}
snapshotsStore.addSubscriber(podAllocationDelay) {
onNewSnapshots(applicationId, schedulerBackend, _)
}
}
def setTotalExpectedExecutors(resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Unit = {
resourceProfileToTotalExecs.foreach { case (rp, numExecs) =>
rpIdToResourceProfile.getOrElseUpdate(rp.id, rp)
totalExpectedExecutorsPerResourceProfileId.put(rp.id, numExecs)
}
logDebug(s"Set total expected execs to $totalExpectedExecutorsPerResourceProfileId")
if (numOutstandingPods.get() == 0) {
snapshotsStore.notifySubscribers()
}
}
def isDeleted(executorId: String): Boolean = deletedExecutorIds.contains(executorId.toLong)
private def onNewSnapshots(
applicationId: String,
schedulerBackend: KubernetesClusterSchedulerBackend,
snapshots: Seq[ExecutorPodsSnapshot]): Unit = {
val k8sKnownExecIds = snapshots.flatMap(_.executorPods.keys)
newlyCreatedExecutors --= k8sKnownExecIds
schedulerKnownNewlyCreatedExecs --= k8sKnownExecIds
// Although we are going to delete some executors due to timeout in this function,
// it takes undefined time before the actual deletion. Hence, we should collect all PVCs
// in use at the beginning. False positive is okay in this context in order to be safe.
val k8sKnownPVCNames = snapshots.flatMap(_.executorPods.values.map(_.pod)).flatMap { pod =>
pod.getSpec.getVolumes.asScala
.flatMap { v => Option(v.getPersistentVolumeClaim).map(_.getClaimName) }
}
// transfer the scheduler backend known executor requests from the newlyCreatedExecutors
// to the schedulerKnownNewlyCreatedExecs
val schedulerKnownExecs = schedulerBackend.getExecutorIds().map(_.toLong).toSet
schedulerKnownNewlyCreatedExecs ++=
newlyCreatedExecutors.filterKeys(schedulerKnownExecs.contains(_)).mapValues(_._1)
newlyCreatedExecutors --= schedulerKnownNewlyCreatedExecs.keySet
// For all executors we've created against the API but have not seen in a snapshot
// yet - check the current time. If the current time has exceeded some threshold,
// assume that the pod was either never created (the API server never properly
// handled the creation request), or the API server created the pod but we missed
// both the creation and deletion events. In either case, delete the missing pod
// if possible, and mark such a pod to be rescheduled below.
val currentTime = clock.getTimeMillis()
val timedOut = newlyCreatedExecutors.flatMap { case (execId, (_, timeCreated)) =>
if (currentTime - timeCreated > podCreationTimeout) {
Some(execId)
} else {
logDebug(s"Executor with id $execId was not found in the Kubernetes cluster since it" +
s" was created ${currentTime - timeCreated} milliseconds ago.")
None
}
}
if (timedOut.nonEmpty) {
logWarning(s"Executors with ids ${timedOut.mkString(",")} were not detected in the" +
s" Kubernetes cluster after $podCreationTimeout ms despite the fact that a previous" +
" allocation attempt tried to create them. The executors may have been deleted but the" +
" application missed the deletion event.")
newlyCreatedExecutors --= timedOut
if (shouldDeleteExecutors) {
Utils.tryLogNonFatalError {
kubernetesClient
.pods()
.withLabel(SPARK_APP_ID_LABEL, applicationId)
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.withLabelIn(SPARK_EXECUTOR_ID_LABEL, timedOut.toSeq.map(_.toString): _*)
.delete()
}
}
}
if (snapshots.nonEmpty) {
lastSnapshot = snapshots.last
}
// Make a local, non-volatile copy of the reference since it's used multiple times. This
// is the only method that modifies the list, so this is safe.
var _deletedExecutorIds = deletedExecutorIds
if (snapshots.nonEmpty) {
val existingExecs = lastSnapshot.executorPods.keySet
_deletedExecutorIds = _deletedExecutorIds.filter(existingExecs.contains)
}
val notDeletedPods = lastSnapshot.executorPods.filterKeys(!_deletedExecutorIds.contains(_))
// Map the pods into per ResourceProfile id so we can check per ResourceProfile,
// add a fast path if not using other ResourceProfiles.
val rpIdToExecsAndPodState =
mutable.HashMap[Int, mutable.HashMap[Long, ExecutorPodState]]()
if (totalExpectedExecutorsPerResourceProfileId.size <= 1) {
rpIdToExecsAndPodState(ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID) =
mutable.HashMap.empty ++= notDeletedPods
} else {
notDeletedPods.foreach { case (execId, execPodState) =>
val rpId = execPodState.pod.getMetadata.getLabels.get(SPARK_RESOURCE_PROFILE_ID_LABEL).toInt
val execPods = rpIdToExecsAndPodState.getOrElseUpdate(rpId,
mutable.HashMap[Long, ExecutorPodState]())
execPods(execId) = execPodState
}
}
// sum of all the pending pods unknown by the scheduler (total for all the resources)
var totalPendingCount = 0
// total not running pods (including scheduler known & unknown, pending & newly requested ones)
var totalNotRunningPodCount = 0
val podsToAllocateWithRpId = totalExpectedExecutorsPerResourceProfileId
.asScala
.toSeq
.sortBy(_._1)
.flatMap { case (rpId, targetNum) =>
val podsForRpId = rpIdToExecsAndPodState.getOrElse(rpId, mutable.HashMap.empty)
val currentRunningCount = podsForRpId.values.count {
case PodRunning(_) => true
case _ => false
}
val (schedulerKnownPendingExecsForRpId, currentPendingExecutorsForRpId) = podsForRpId.filter {
case (_, PodPending(_)) => true
case _ => false
}.partition { case (k, _) =>
schedulerKnownExecs.contains(k)
}
// This variable is used later to print some debug logs. It's updated when cleaning up
// excess pod requests, since currentPendingExecutorsForRpId is immutable.
var pendingCountForRpId = currentPendingExecutorsForRpId.size
val newlyCreatedExecutorsForRpId =
newlyCreatedExecutors.filter { case (_, (waitingRpId, _)) =>
rpId == waitingRpId
}
val schedulerKnownNewlyCreatedExecsForRpId =
schedulerKnownNewlyCreatedExecs.filter { case (_, waitingRpId) =>
rpId == waitingRpId
}
if (podsForRpId.nonEmpty) {
logDebug(s"ResourceProfile Id: $rpId (" +
s"pod allocation status: $currentRunningCount running, " +
s"${currentPendingExecutorsForRpId.size} unknown pending, " +
s"${schedulerKnownPendingExecsForRpId.size} scheduler backend known pending, " +
s"${newlyCreatedExecutorsForRpId.size} unknown newly created, " +
s"${schedulerKnownNewlyCreatedExecsForRpId.size} scheduler backend known newly created)")
}
// It's possible that we have outstanding pods that are outdated when dynamic allocation
// decides to downscale the application. So check if we can release any pending pods early
// instead of waiting for them to time out. Drop them first from the unacknowledged list,
// then from the pending. However, in order to prevent too frequent fluctuation, newly
// requested pods are protected during executorIdleTimeout period.
//
// TODO: with dynamic allocation off, handle edge cases if we end up with more running
// executors than expected.
var notRunningPodCountForRpId =
currentPendingExecutorsForRpId.size + schedulerKnownPendingExecsForRpId.size +
newlyCreatedExecutorsForRpId.size + schedulerKnownNewlyCreatedExecsForRpId.size
val podCountForRpId = currentRunningCount + notRunningPodCountForRpId
if (podCountForRpId > targetNum) {
val excess = podCountForRpId - targetNum
val newlyCreatedToDelete = newlyCreatedExecutorsForRpId
.filter { case (_, (_, createTime)) =>
currentTime - createTime > executorIdleTimeout
}.keys.take(excess).toList
val pendingToDelete = currentPendingExecutorsForRpId
.filter(x => isExecutorIdleTimedOut(x._2, currentTime))
.take(excess - newlyCreatedToDelete.size)
.map { case (id, _) => id }
val toDelete = newlyCreatedToDelete ++ pendingToDelete
if (toDelete.nonEmpty) {
logInfo(s"Deleting ${toDelete.size} excess pod requests (${toDelete.mkString(",")}).")
_deletedExecutorIds = _deletedExecutorIds ++ toDelete
Utils.tryLogNonFatalError {
kubernetesClient
.pods()
.withField("status.phase", "Pending")
.withLabel(SPARK_APP_ID_LABEL, applicationId)
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.withLabelIn(SPARK_EXECUTOR_ID_LABEL, toDelete.sorted.map(_.toString): _*)
.delete()
newlyCreatedExecutors --= newlyCreatedToDelete
pendingCountForRpId -= pendingToDelete.size
notRunningPodCountForRpId -= toDelete.size
}
}
}
totalPendingCount += pendingCountForRpId
totalNotRunningPodCount += notRunningPodCountForRpId
// The code below just prints debug messages, which are only useful when there's a change
// in the snapshot state. Since the messages are a little spammy, avoid them when we know
// there are no useful updates.
if (log.isDebugEnabled && snapshots.nonEmpty) {
val outstanding = pendingCountForRpId + newlyCreatedExecutorsForRpId.size
if (currentRunningCount >= targetNum && !dynamicAllocationEnabled) {
logDebug(s"Current number of running executors for ResourceProfile Id $rpId is " +
"equal to the number of requested executors. Not scaling up further.")
} else {
if (newlyCreatedExecutorsForRpId.nonEmpty) {
logDebug(s"Still waiting for ${newlyCreatedExecutorsForRpId.size} executors for " +
s"ResourceProfile Id $rpId before requesting more.")
}
}
}
if (newlyCreatedExecutorsForRpId.isEmpty && podCountForRpId < targetNum) {
Some(rpId, podCountForRpId, targetNum)
} else {
// for this resource profile we do not request more PODs
None
}
}
val remainingSlotFromPendingPods = maxPendingPods - totalNotRunningPodCount
if (remainingSlotFromPendingPods > 0 && podsToAllocateWithRpId.size > 0) {
ExecutorPodsAllocator.splitSlots(podsToAllocateWithRpId, remainingSlotFromPendingPods)
.foreach { case ((rpId, podCountForRpId, targetNum), sharedSlotFromPendingPods) =>
val numMissingPodsForRpId = targetNum - podCountForRpId
val numExecutorsToAllocate =
math.min(math.min(numMissingPodsForRpId, podAllocationSize), sharedSlotFromPendingPods)
logInfo(s"Going to request $numExecutorsToAllocate executors from Kubernetes for " +
s"ResourceProfile Id: $rpId, target: $targetNum, known: $podCountForRpId, " +
s"sharedSlotFromPendingPods: $sharedSlotFromPendingPods.")
requestNewExecutors(numExecutorsToAllocate, applicationId, rpId, k8sKnownPVCNames)
}
}
deletedExecutorIds = _deletedExecutorIds
// Update the flag that helps the setTotalExpectedExecutors() callback avoid triggering this
// update method when not needed. PODs known by the scheduler backend are not counted here as
// they considered running PODs and they should not block upscaling.
numOutstandingPods.set(totalPendingCount + newlyCreatedExecutors.size)
}
private def getReusablePVCs(applicationId: String, pvcsInUse: Seq[String]) = {
if (conf.get(KUBERNETES_DRIVER_OWN_PVC) && conf.get(KUBERNETES_DRIVER_REUSE_PVC) &&
driverPod.nonEmpty) {
val createdPVCs = kubernetesClient
.persistentVolumeClaims
.withLabel("spark-app-selector", applicationId)
.list()
.getItems
.asScala
val reusablePVCs = createdPVCs.filterNot(pvc => pvcsInUse.contains(pvc.getMetadata.getName))
logInfo(s"Found ${reusablePVCs.size} reusable PVCs from ${createdPVCs.size} PVCs")
reusablePVCs
} else {
mutable.Buffer.empty[PersistentVolumeClaim]
}
}
private def requestNewExecutors(
numExecutorsToAllocate: Int,
applicationId: String,
resourceProfileId: Int,
pvcsInUse: Seq[String]): Unit = {
// Check reusable PVCs for this executor allocation batch
val reusablePVCs = getReusablePVCs(applicationId, pvcsInUse)
for ( _ <- 0 until numExecutorsToAllocate) {
val newExecutorId = EXECUTOR_ID_COUNTER.incrementAndGet()
val executorConf = KubernetesConf.createExecutorConf(
conf,
newExecutorId.toString,
applicationId,
driverPod,
resourceProfileId)
val resolvedExecutorSpec = executorBuilder.buildFromFeatures(executorConf, secMgr,
kubernetesClient, rpIdToResourceProfile(resourceProfileId))
val executorPod = resolvedExecutorSpec.pod
val podWithAttachedContainer = new PodBuilder(executorPod.pod)
.editOrNewSpec()
.addToContainers(executorPod.container)
.endSpec()
.build()
val resources = replacePVCsIfNeeded(
podWithAttachedContainer, resolvedExecutorSpec.executorKubernetesResources, reusablePVCs)
val createdExecutorPod = kubernetesClient.pods().create(podWithAttachedContainer)
try {
addOwnerReference(createdExecutorPod, resources)
resources
.filter(_.getKind == "PersistentVolumeClaim")
.foreach { resource =>
if (conf.get(KUBERNETES_DRIVER_OWN_PVC) && driverPod.nonEmpty) {
addOwnerReference(driverPod.get, Seq(resource))
}
val pvc = resource.asInstanceOf[PersistentVolumeClaim]
logInfo(s"Trying to create PersistentVolumeClaim ${pvc.getMetadata.getName} with " +
s"StorageClass ${pvc.getSpec.getStorageClassName}")
kubernetesClient.persistentVolumeClaims().create(pvc)
}
newlyCreatedExecutors(newExecutorId) = (resourceProfileId, clock.getTimeMillis())
logDebug(s"Requested executor with id $newExecutorId from Kubernetes.")
} catch {
case NonFatal(e) =>
kubernetesClient.pods().delete(createdExecutorPod)
throw e
}
}
}
private def replacePVCsIfNeeded(
pod: Pod,
resources: Seq[HasMetadata],
reusablePVCs: mutable.Buffer[PersistentVolumeClaim]): Seq[HasMetadata] = {
val replacedResources = mutable.Set[HasMetadata]()
resources.foreach {
case pvc: PersistentVolumeClaim =>
// Find one with the same storage class and size.
val index = reusablePVCs.indexWhere { p =>
p.getSpec.getStorageClassName == pvc.getSpec.getStorageClassName &&
p.getSpec.getResources.getRequests.get("storage") ==
pvc.getSpec.getResources.getRequests.get("storage")
}
if (index >= 0) {
val volume = pod.getSpec.getVolumes.asScala.find { v =>
v.getPersistentVolumeClaim != null &&
v.getPersistentVolumeClaim.getClaimName == pvc.getMetadata.getName
}
if (volume.nonEmpty) {
val matchedPVC = reusablePVCs.remove(index)
replacedResources.add(pvc)
logInfo(s"Reuse PersistentVolumeClaim ${matchedPVC.getMetadata.getName}")
volume.get.getPersistentVolumeClaim.setClaimName(matchedPVC.getMetadata.getName)
}
}
case _ => // no-op
}
resources.filterNot(replacedResources.contains)
}
private def isExecutorIdleTimedOut(state: ExecutorPodState, currentTime: Long): Boolean = {
try {
val startTime = Instant.parse(state.pod.getStatus.getStartTime).toEpochMilli()
currentTime - startTime > executorIdleTimeout
} catch {
case _: Exception =>
logDebug(s"Cannot get startTime of pod ${state.pod}")
true
}
}
}
private[spark] object ExecutorPodsAllocator {
// A utility function to split the available slots among the specified consumers
def splitSlots[T](consumers: Seq[T], slots: Int): Seq[(T, Int)] = {
val d = slots / consumers.size
val r = slots % consumers.size
consumers.take(r).map((_, d + 1)) ++ consumers.takeRight(consumers.size - r).map((_, d))
}
}
| jiangxb1987/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala | Scala | apache-2.0 | 21,557 |
package lila.activity
import activities._
import model._
import org.joda.time.Interval
import lila.game.LightPov
import lila.practice.PracticeStudy
import lila.simul.Simul
import lila.study.Study
import lila.swiss.Swiss
import lila.tournament.LeaderboardApi.{ Entry => TourEntry }
import lila.ublog.UblogPost
case class ActivityView(
interval: Interval,
games: Option[Games] = None,
puzzles: Option[Puzzles] = None,
storm: Option[Storm] = None,
racer: Option[Racer] = None,
streak: Option[Streak] = None,
practice: Option[Map[PracticeStudy, Int]] = None,
simuls: Option[List[Simul]] = None,
patron: Option[Patron] = None,
forumPosts: Option[Map[lila.forum.Topic, List[lila.forum.Post]]] = None,
ublogPosts: Option[List[UblogPost.LightPost]] = None,
corresMoves: Option[(Int, List[LightPov])] = None,
corresEnds: Option[(Score, List[LightPov])] = None,
follows: Option[Follows] = None,
studies: Option[List[Study.IdName]] = None,
teams: Option[Teams] = None,
tours: Option[ActivityView.Tours] = None,
swisses: Option[List[(Swiss.IdName, Int)]] = None,
stream: Boolean = false,
signup: Boolean = false
)
object ActivityView {
case class Tours(
nb: Int,
best: List[TourEntry]
)
}
| luanlv/lila | modules/activity/src/main/ActivityView.scala | Scala | mit | 1,278 |
package preact
import org.scalajs.dom
object Preact {
val raw = preact.raw.RawPreact
type VNode = raw.VNode
type Child = raw.Child
type Component[Props, State] = preact.Component[Props, State]
val FunctionFactory = preact.FunctionFactory
object FunctionComponent {
trait WithProps[Props] {
def apply(props: Props): VNode
}
trait WithChildren {
def apply(children: Child*): VNode
}
trait WithPropsAndChildren[Props] {
def apply(props: Props, children: Child*): VNode
}
}
def render(node: VNode, parent: dom.Element): dom.Element = raw.render(node, parent)
def render(node: VNode, parent: dom.Element, mergeWith: dom.Element): dom.Element =
raw.render(node, parent, mergeWith)
def rerender(): Unit = raw.rerender()
}
| LMnet/scala-js-preact | core/src/main/scala/preact/Preact.scala | Scala | mit | 794 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.vectorized
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.expressions.SpecificInternalRow
import org.apache.spark.sql.execution.columnar.ColumnAccessor
import org.apache.spark.sql.execution.columnar.compression.ColumnBuilderHelper
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnarArray
import org.apache.spark.unsafe.types.UTF8String
class ColumnVectorSuite extends SparkFunSuite with BeforeAndAfterEach {
private def withVector(
vector: WritableColumnVector)(
block: WritableColumnVector => Unit): Unit = {
try block(vector) finally vector.close()
}
private def withVectors(
size: Int,
dt: DataType)(
block: WritableColumnVector => Unit): Unit = {
withVector(new OnHeapColumnVector(size, dt))(block)
withVector(new OffHeapColumnVector(size, dt))(block)
}
private def testVectors(
name: String,
size: Int,
dt: DataType)(
block: WritableColumnVector => Unit): Unit = {
test(name) {
withVectors(size, dt)(block)
}
}
testVectors("boolean", 10, BooleanType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendBoolean(i % 2 == 0)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
assert(array.get(i, BooleanType) === (i % 2 == 0))
}
}
testVectors("byte", 10, ByteType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendByte(i.toByte)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
assert(array.get(i, ByteType) === i.toByte)
}
}
testVectors("short", 10, ShortType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendShort(i.toShort)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
assert(array.get(i, ShortType) === i.toShort)
}
}
testVectors("int", 10, IntegerType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendInt(i)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
assert(array.get(i, IntegerType) === i)
}
}
testVectors("long", 10, LongType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendLong(i)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
assert(array.get(i, LongType) === i)
}
}
testVectors("float", 10, FloatType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendFloat(i.toFloat)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
assert(array.get(i, FloatType) === i.toFloat)
}
}
testVectors("double", 10, DoubleType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendDouble(i.toDouble)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
assert(array.get(i, DoubleType) === i.toDouble)
}
}
testVectors("string", 10, StringType) { testVector =>
(0 until 10).map { i =>
val utf8 = s"str$i".getBytes("utf8")
testVector.appendByteArray(utf8, 0, utf8.length)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
assert(array.get(i, StringType) === UTF8String.fromString(s"str$i"))
}
}
testVectors("binary", 10, BinaryType) { testVector =>
(0 until 10).map { i =>
val utf8 = s"str$i".getBytes("utf8")
testVector.appendByteArray(utf8, 0, utf8.length)
}
val array = new ColumnarArray(testVector, 0, 10)
(0 until 10).foreach { i =>
val utf8 = s"str$i".getBytes("utf8")
assert(array.get(i, BinaryType) === utf8)
}
}
testVectors("mutable ColumnarRow", 10, IntegerType) { testVector =>
val mutableRow = new MutableColumnarRow(Array(testVector))
(0 until 10).foreach { i =>
mutableRow.rowId = i
mutableRow.setInt(0, 10 - i)
}
(0 until 10).foreach { i =>
mutableRow.rowId = i
assert(mutableRow.getInt(0) === (10 - i))
}
}
val arrayType: ArrayType = ArrayType(IntegerType, containsNull = true)
testVectors("array", 10, arrayType) { testVector =>
val data = testVector.arrayData()
var i = 0
while (i < 6) {
data.putInt(i, i)
i += 1
}
// Populate it with arrays [0], [1, 2], [], [3, 4, 5]
testVector.putArray(0, 0, 1)
testVector.putArray(1, 1, 2)
testVector.putArray(2, 3, 0)
testVector.putArray(3, 3, 3)
assert(testVector.getArray(0).toIntArray() === Array(0))
assert(testVector.getArray(1).toIntArray() === Array(1, 2))
assert(testVector.getArray(2).toIntArray() === Array.empty[Int])
assert(testVector.getArray(3).toIntArray() === Array(3, 4, 5))
}
val structType: StructType = new StructType().add("int", IntegerType).add("double", DoubleType)
testVectors("struct", 10, structType) { testVector =>
val c1 = testVector.getChild(0)
val c2 = testVector.getChild(1)
c1.putInt(0, 123)
c2.putDouble(0, 3.45)
c1.putInt(1, 456)
c2.putDouble(1, 5.67)
assert(testVector.getStruct(0).get(0, IntegerType) === 123)
assert(testVector.getStruct(0).get(1, DoubleType) === 3.45)
assert(testVector.getStruct(1).get(0, IntegerType) === 456)
assert(testVector.getStruct(1).get(1, DoubleType) === 5.67)
}
test("[SPARK-22092] off-heap column vector reallocation corrupts array data") {
withVector(new OffHeapColumnVector(8, arrayType)) { testVector =>
val data = testVector.arrayData()
(0 until 8).foreach(i => data.putInt(i, i))
(0 until 8).foreach(i => testVector.putArray(i, i, 1))
// Increase vector's capacity and reallocate the data to new bigger buffers.
testVector.reserve(16)
// Check that none of the values got lost/overwritten.
(0 until 8).foreach { i =>
assert(testVector.getArray(i).toIntArray() === Array(i))
}
}
}
test("[SPARK-22092] off-heap column vector reallocation corrupts struct nullability") {
withVector(new OffHeapColumnVector(8, structType)) { testVector =>
(0 until 8).foreach(i => if (i % 2 == 0) testVector.putNull(i) else testVector.putNotNull(i))
testVector.reserve(16)
(0 until 8).foreach(i => assert(testVector.isNullAt(i) == (i % 2 == 0)))
}
}
test("CachedBatch boolean Apis") {
val dataType = BooleanType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setBoolean(0, i % 2 == 0)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0) == true)
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getBoolean(i) == (i % 2 == 0))
}
}
}
test("CachedBatch byte Apis") {
val dataType = ByteType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setByte(0, i.toByte)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0) == true)
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getByte(i) == i)
}
}
}
test("CachedBatch short Apis") {
val dataType = ShortType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setShort(0, i.toShort)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0) == true)
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getShort(i) == i)
}
}
}
test("CachedBatch int Apis") {
val dataType = IntegerType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setInt(0, i)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0) == true)
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getInt(i) == i)
}
}
}
test("CachedBatch long Apis") {
val dataType = LongType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setLong(0, i.toLong)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0) == true)
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getLong(i) == i.toLong)
}
}
}
test("CachedBatch float Apis") {
val dataType = FloatType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setFloat(0, i.toFloat)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0) == true)
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getFloat(i) == i.toFloat)
}
}
}
test("CachedBatch double Apis") {
val dataType = DoubleType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setDouble(0, i.toDouble)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0) == true)
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getDouble(i) == i.toDouble)
}
}
}
}
| bravo-zhang/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnVectorSuite.scala | Scala | apache-2.0 | 12,379 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Fri Jan 29 15:43:08 EST 2016
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra
import scala.collection.Traversable
import scala.collection.mutable.IndexedSeq
import scala.util.Sorting.quickSort
import scalation.math.Rational.{abs => ABS, max => MAX, _}
import scalation.math.Rational
import scalation.util.Error
import scalation.util.SortingD.{iqsort, qsort2}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `VectoQ` class stores and operates on Numeric Vectors of base type `Rational`.
* It follows the framework of `gen.VectorN [T]` and is provided for performance.
* @param dim the dimension/size of the vector
* @param v the 1D array used to store vector elements
*/
trait VectoQ
extends Traversable [Rational] with PartiallyOrdered [VectoQ] with Vec with Error with Serializable
{
/** Vector dimension
*/
val dim: Int
/** Number of elements in the vector as a Rational
*/
val nd = dim.toDouble
/** Range for the storage array
*/
val range = 0 until dim
/** Format String used for printing vector values (change using 'setFormat')
* Ex: "%d,\\t", "%.6g,\\t" or "%12.6g,\\t"
*/
protected var fString = "%s,\\t"
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the size (number of elements) of 'this' vector.
*/
override def size: Int = dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Produce the range of all indices (0 to one less than dim).
*/
def indices: Range = 0 until dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Expand the size (dim) of 'this' vector by 'more' elements.
* @param more the number of new elements to add
*/
def expand (more: Int = dim): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a zero vector (all elements are zero) of length 'size'.
* @param size the size of the new vector
*/
def zero (size: Int): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a one vector (all elements are one) of length 'size'.
* @param size the size of the new vector
*/
def one (size: Int): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a vector of the form (0, ... 1, ... 0) where the 1 is at position j.
* @param j the position to place the 1
* @param size the size of the vector (upper bound = size - 1)
*/
def oneAt (j: Int, size: Int = dim): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a vector of the form (0, ... -1, ... 0) where the -1 is at position j.
* @param j the position to place the -1
* @param size the size of the vector (upper bound = size - 1)
*/
def _oneAt (j: Int, size: Int = dim): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `VectoQ` into a `VectoI`.
*/
def toInt: VectoI
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `VectoQ` into a `VectoL`.
*/
def toLong: VectoL
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `VectoQ` into a `VectoD`.
*/
def toDouble: VectoD
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' `VectoQ` into a dense version.
*/
def toDense: VectorQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's element at the 'i'-th index position.
* @param i the given index
*/
def apply (i: Int): Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's elements within the given range (vector slicing).
* @param r the given range
*/
def apply (r: Range): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get 'this' vector's entire sequence/array.
*/
def apply (): IndexedSeq [Rational]
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's element at the 'i'-th index position.
* @param i the given index
* @param x the value to assign
*/
def update (i: Int, x: Rational)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's elements over the given range (vector slicing).
* @param r the given range
* @param x the value to assign
*/
def update (r: Range, x: Rational)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set 'this' vector's elements over the given range (vector slicing).
* @param r the given range
* @param u the vector to assign
*/
def update (r: Range, u: VectoQ)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Iterate over 'this' vector element by element.
* @param f the function to apply
*/
def foreach [U] (f: Rational => U)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set each value in 'this' vector to 'x'.
* @param x the value to be assigned
*/
def set (x: Rational)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the values in 'this' vector to the values in sequence 'u'.
* @param u the sequence of values to be assigned
*/
def set (u: Seq [Rational])
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a copy of this Vector.
*/
def copy: VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Filter the elements of 'this' vector based on the predicate 'p', returning
* a new vector.
* @param p the predicate (`Boolean` function) to apply
*/
// def filter (p: Rational => Boolean): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Filter the elements of 'this' vector based on the predicate 'p', returning
* the index positions.
* @param p the predicate (`Boolean` function) to apply
*/
def filterPos (p: Rational => Boolean): IndexedSeq [Int]
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Map the elements of 'this' vector by applying the mapping function 'f'.
* @param f the function to apply
*/
def map (f: Rational => Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Slice 'this' vector 'from' to 'end'.
* @param from the start of the slice (included)
* @param till the end of the slice (excluded)
*/
override def slice (from: Int, till: Int = dim): VectoQ = null
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Select a subset of elements of 'this' vector corresponding to a 'basis'.
* @param basis the set of index positions (e.g., 0, 2, 5)
*/
def select (basis: Array [Int]): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' vector and vector' b'.
* @param b the vector to be concatenated
*/
def ++ (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Concatenate 'this' vector and scalar 's'.
* @param s the scalar to be concatenated
*/
def ++ (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and vector 'b'.
* @param b the vector to add
*/
def + (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def + (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add 'this' vector and scalar 's._2' only at position 's._1'.
* @param s the (position, scalar) to add
*/
def + (s: Tuple2 [Int, Rational]): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def += (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def += (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the negative of 'this' vector (unary minus).
*/
def unary_- (): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract vector 'b'.
* @param b the vector to subtract
*/
def - (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract scalar 's'.
* @param s the scalar to subtract
*/
def - (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract scalar 's._2' only at position 's._1'.
* @param s the (position, scalar) to subtract
*/
def - (s: Tuple2 [Int, Rational]): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract in-place vector 'b'.
* @param b the vector to add
*/
def -= (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** From 'this' vector subtract in-place scalar 's'.
* @param s the scalar to add
*/
def -= (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' vector by vector 'b'.
* @param b the vector to multiply by
*/
def * (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply 'this' vector by scalar 's'.
* @param s the scalar to multiply by
*/
def * (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def *= (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Multiply in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def *= (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' vector by vector 'b' (element-by-element).
* @param b the vector to divide by
*/
def / (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide 'this' vector by scalar 's'.
* @param s the scalar to divide by
*/
def / (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' vector and vector 'b'.
* @param b the vector to add
*/
def /= (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Divide in-place 'this' vector and scalar 's'.
* @param s the scalar to add
*/
def /= (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing each element of 'this' vector raised to the
* s-th power.
* @param s the scalar exponent
*/
def ~^ (s: Rational): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Raise in-place each element of 'this' vector to the 's'-th power.
* @param s the scalar exponent
*/
def ~^= (s: Rational): VectoQ
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for inequality.
* @param b that vector
*/
def ≠ (b: VectoQ) = this != b
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for less than or equal to.
* @param b that vector
*/
def ≤ (b: VectoQ) = this <= b
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with that vector 'b' for greater than or equal to.
* @param b that vector
*/
def ≥ (b: VectoQ) = this >= b
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing the square of each element of 'this' vector.
*/
def sq: VectoQ = this * this
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector containing the reciprocal of each element of 'this' vector.
*/
def recip: VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector that is the element-wise absolute value of 'this' vector.
*/
def abs: VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the elements of 'this' vector.
*/
def sum: Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the absolute value of the elements of 'this' vector.
*/
def sumAbs: Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the elements of 'this' vector skipping the 'i'-th element (Not Equal 'i').
* @param i the index of the element to skip
*/
def sumNE (i: Int): Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sum the positive (> 0) elements of 'this' vector.
*/
def sumPos: Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the mean of the elements of 'this' vector.
*/
def mean = sum / nd
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the (unbiased) sample variance of the elements of 'this' vector.
*/
def variance = (normSq - sum * sum / nd) / (nd-1.0)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the population variance of the elements of 'this' vector.
* This is also the (biased) MLE estimator for sample variance.
*/
def pvariance = (normSq - sum * sum / nd) / nd
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Establish the rank order of the elements in 'self' vector, e.g.,
* (8.0, 2.0, 4.0, 6.0) is (3, 0, 1, 2).
*/
def rank: VectoI
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Cumulate the values of 'this' vector from left to right (e.g., create a
* CDF from a pmf). Example: (4, 2, 3, 1) --> (4, 6, 9, 10)
*/
def cumulate: VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector so that it sums to one (like a probability vector).
*/
def normalize: VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector so its length is one (unit vector).
*/
def normalizeU: VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Normalize 'this' vector to have a maximum of one.
*/
def normalize1: VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product (or inner product) of 'this' vector with vector 'b'.
* @param b the other vector
*/
def dot (b: VectoQ): Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the dot product (or inner product) of 'this' vector with vector 'b'.
* @param b the other vector
*/
def ∙ (b: VectoQ): Rational = this dot b
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Euclidean norm (2-norm) squared of 'this' vector.
*/
def normSq: Rational = this dot this
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Euclidean norm (2-norm) of 'this' vector.
*/
def norm: Rational = sqrt (normSq).toRational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Manhattan norm (1-norm) of 'this' vector.
*/
def norm1: Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the maximum element in 'this' vector.
* @param e the ending index (exclusive) for the search
*/
def max (e: Int = dim): Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Take the maximum of 'this' vector with vector 'b' (element-by element).
* @param b the other vector
*/
def max (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the minimum element in 'this' vector.
* @param e the ending index (exclusive) for the search
*/
def min (e: Int = dim): Rational
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Take the minimum of 'this' vector with vector 'b' (element-by element).
* @param b the other vector
*/
def min (b: VectoQ): VectoQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the element with the greatest magnitude in 'this' vector.
*/
def mag: Rational = ABS (max ()) max ABS (min ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the argument maximum of 'this' vector (index of maximum element).
* @param e the ending index (exclusive) for the search
*/
def argmax (e: Int = dim): Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find the argument minimum of 'this' vector (index of minimum element).
* @param e the ending index (exclusive) for the search
*/
def argmin (e: Int = dim): Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the argument minimum of 'this' vector (-1 if it's not negative).
* @param e the ending index (exclusive) for the search
*/
def argminNeg (e: Int = dim): Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the argument maximum of 'this' vector (-1 if it's not positive).
* @param e the ending index (exclusive) for the search
*/
def argmaxPos (e: Int = dim): Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first negative element in 'this' vector (-1 otherwise).
* @param e the ending index (exclusive) for the search
*/
def firstNeg (e: Int = dim): Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first positive element in 'this' vector (-1 otherwise).
* @param e the ending index (exclusive) for the search
*/
def firstPos (e: Int = dim): Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the index of the first occurrence of element 'x' in 'this' vector,
* or -1 if not found.
* @param x the given element
* @param e the ending index (exclusive) for the search
*/
def indexOf (x: Rational, e: Int = dim): Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find and return index of first element satisfying predicate 'p', or
* -1 if not found.
* @param p the predicate to check
*/
def indexWhere (p: (Rational) => Boolean): Int
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of strictly negative elements in 'this' vector.
*/
def countNeg: Int
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of strictly positive elements in 'this' vector.
*/
def countPos: Int
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a vector consisting of the distinct elements in 'this' vector.
*/
def distinct: VectoQ
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of distinct elements in 'this' vector.
*/
def countinct: Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether 'x' is contained in 'this' vector.
* @param x the element to be checked
*/
def contains (x: Rational): Boolean
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sort 'this' vector in-place in ascending (non-decreasing) order.
*/
def sort ()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sort 'this' vector in-place in descending (non-increasing) order.
*/
def sort2 ()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Swap elements 'i' and 'j' in 'this' vector.
* @param i the first element in the swap
* @param j the second element in the swap
*/
def swap (i: Int, j: Int)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether the other vector 'b' is at least as long as 'this' vector.
* @param b the other vector
*/
def sameDimensions (b: VectoQ): Boolean = dim <= b.dim
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check whether 'this' vector is nonnegative (has no negative elements).
*/
def isNonnegative: Boolean
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compare 'this' vector with vector 'b'.
* @param b the other vector
*/
// private def tryCompareTo [B >: VectoQ] (b: B)
// (implicit view_1: (B) => PartiallyOrdered [B]): Option [Int]
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Override equals to determine whether 'this' vector equals vector 'b..
* @param b the vector to compare with this
*/
override def equals (b: Any): Boolean
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Must also override hashCode for 'this' vector to be compatible with equals.
*/
override def hashCode (): Int
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the format to the 'newFormat' (e.g., "%.6g,\\t" or "%12.6g,\\t").
* @param newFormat the new format String
*/
def setFormat (newFormat: String) { fString = newFormat }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert 'this' vector to a String.
*/
override def toString: String
} // VectoQ trait
| NBKlepp/fda | scalation_1.3/scalation_mathstat/src/main/scala/scalation/linalgebra/VectoQ.scala | Scala | mit | 24,237 |
import sbt._
object Versions {
val scalaVersion = "2.11.6"
val ideaVersion = "142.4859.6"
val sbtStructureVersion = "4.2.0"
val luceneVersion = "4.8.1"
val aetherVersion = "1.0.0.v20140518"
val sisuInjectVersion = "2.2.3"
val wagonVersion = "2.6"
val httpComponentsVersion = "4.3.1"
}
object Dependencies {
import Versions._
val sbtStructureExtractor012 = "org.jetbrains" % "sbt-structure-extractor-0-12" % sbtStructureVersion
val sbtStructureExtractor013 = "org.jetbrains" % "sbt-structure-extractor-0-13" % sbtStructureVersion
val sbtLaunch = "org.scala-sbt" % "sbt-launch" % "0.13.8"
val scalaLibrary = "org.scala-lang" % "scala-library" % scalaVersion
val scalaReflect = "org.scala-lang" % "scala-reflect" % scalaVersion
val scalaCompiler = "org.scala-lang" % "scala-compiler" % scalaVersion
val scalaXml = "org.scala-lang.modules" %% "scala-xml" % "1.0.2"
val scalaParserCombinators = "org.scala-lang.modules" %% "scala-parser-combinators" % "1.0.4"
val sbtStructureCore = "org.jetbrains" % "sbt-structure-core_2.11" % sbtStructureVersion
val evoInflector = "org.atteo" % "evo-inflector" % "1.2"
val scalatestFindersPatched = "org.scalatest" % "scalatest-finders-patched" % "0.9.6"
val plexusContainerDefault = "org.codehaus.plexus" % "plexus-container-default" % "1.5.5"
val plexusClassworlds = "org.codehaus.plexus" % "plexus-classworlds" % "2.4"
val plexusUtils = "org.codehaus.plexus" % "plexus-utils" % "3.0.8"
val plexusComponentAnnotations = "org.codehaus.plexus" % "plexus-component-annotations" % "1.5.5"
val xbeanReflect = "org.apache.xbean" % "xbean-reflect" % "3.4"
val luceneCore = "org.apache.lucene" % "lucene-core" % luceneVersion
val luceneHighlighter = "org.apache.lucene" % "lucene-highlighter" % luceneVersion
val luceneMemory = "org.apache.lucene" % "lucene-memory" % luceneVersion
val luceneQueries = "org.apache.lucene" % "lucene-queries" % luceneVersion
val luceneQueryParser = "org.apache.lucene" % "lucene-queryparser" % luceneVersion
val luceneAnalyzers = "org.apache.lucene" % "lucene-analyzers-common" % luceneVersion
val luceneSandbox = "org.apache.lucene" % "lucene-sandbox" % luceneVersion
val aetherApi = "org.eclipse.aether" % "aether-api" % aetherVersion
val aetherUtil = "org.eclipse.aether" % "aether-util" % aetherVersion
val sisuInjectPlexus = "org.sonatype.sisu" % "sisu-inject-plexus" % sisuInjectVersion
val sisuInjectBean = "org.sonatype.sisu" % "sisu-inject-bean" % sisuInjectVersion
val sisuGuice = "org.sonatype.sisu" % "sisu-guice" % "3.0.3"
val wagonHttp = "org.apache.maven.wagon" % "wagon-http" % wagonVersion
val wagonHttpShared = "org.apache.maven.wagon" % "wagon-http-shared" % wagonVersion
val wagonProviderApi = "org.apache.maven.wagon" % "wagon-provider-api" % wagonVersion
val httpClient = "org.apache.httpcomponents" % "httpclient" % httpComponentsVersion
val httpCore = "org.apache.httpcomponents" % "httpcore" % httpComponentsVersion
val commonsLogging = "commons-logging" % "commons-logging" % "1.1.3"
val commonsCodec = "commons-codec" % "commons-codec" % "1.6"
val commonsLang = "commons-lang" % "commons-lang" % "2.6"
val commonsIo = "commons-io" % "commons-io" % "2.2"
val jsoup = "org.jsoup" % "jsoup" % "1.7.2"
val mavenIndexerCore = "org.apache.maven.indexer" % "indexer-core" % "6.0"
val mavenModel = "org.apache.maven" % "maven-model" % "3.0.5"
val junitInterface = "com.novocode" % "junit-interface" % "0.11" % "test"
}
object DependencyGroups {
import Dependencies._
val plexusContainer = Seq(
plexusContainerDefault,
plexusClassworlds,
plexusUtils,
plexusComponentAnnotations,
xbeanReflect
)
val lucene = Seq(
luceneCore,
luceneHighlighter,
luceneMemory,
luceneQueries,
luceneQueryParser,
luceneAnalyzers,
luceneSandbox
)
val aether = Seq(
aetherApi,
aetherUtil
)
val sisu = Seq(
sisuInjectPlexus,
sisuInjectBean,
sisuGuice
)
val wagon = Seq(
wagonHttp,
wagonHttpShared,
wagonProviderApi,
httpClient,
httpCore,
commonsCodec,
commonsLogging,
commonsLang,
commonsIo,
jsoup
)
val mavenIndexer = Seq(
mavenIndexerCore,
mavenModel
) ++ plexusContainer ++ lucene ++ aether ++ sisu ++ wagon
val scalaCommunity = Seq(
scalaLibrary,
scalaReflect,
scalaXml,
scalaParserCombinators,
sbtStructureCore,
evoInflector,
scalatestFindersPatched
) ++ mavenIndexer
val scalap = Seq(
scalaLibrary,
scalaReflect,
scalaCompiler
)
val scalaRunner = Seq(
"org.specs2" %% "specs2" % "2.3.11" % "provided" excludeAll ExclusionRule(organization = "org.ow2.asm")
)
val runners = Seq(
"org.specs2" %% "specs2" % "2.3.11" % "provided" excludeAll ExclusionRule(organization = "org.ow2.asm"),
"org.scalatest" % "scalatest_2.11" % "2.2.1" % "provided",
"com.lihaoyi" %% "utest" % "0.1.3" % "provided"
)
val sbtLaunchTestDownloader =
Seq("0.12.4", "0.13.0", "0.13.1", "0.13.2",
"0.13.5", "0.13.6", "0.13.7", "0.13.8",
"0.13.9")
.map(v => "org.scala-sbt" % "sbt-launch" % v)
val testDownloader = Seq(
"org.scalatest" % "scalatest_2.11" % "2.2.1",
"org.scalatest" % "scalatest_2.10" % "2.2.1",
"org.specs2" % "specs2_2.11" % "2.4.15",
"org.scalaz" % "scalaz-core_2.11" % "7.1.0",
"org.scalaz" % "scalaz-concurrent_2.11" % "7.1.0",
"org.scala-lang.modules" % "scala-xml_2.11" % "1.0.2",
"org.specs2" % "specs2_2.10" % "2.4.6",
"org.scalaz" % "scalaz-core_2.10" % "7.1.0",
"org.scalaz" % "scalaz-concurrent_2.10" % "7.1.0",
"org.scalaz.stream" % "scalaz-stream_2.11" % "0.6a",
"com.chuusai" % "shapeless_2.11" % "2.0.0",
"org.typelevel" % "scodec-bits_2.11" % "1.1.0-SNAPSHOT",
"org.typelevel" % "scodec-core_2.11" % "1.7.0-SNAPSHOT",
"org.scalatest" % "scalatest_2.11" % "2.1.7",
"org.scalatest" % "scalatest_2.10" % "2.1.7",
"org.scalatest" % "scalatest_2.10" % "1.9.2",
"com.github.julien-truffaut" %% "monocle-core" % "1.2.0-SNAPSHOT",
"com.github.julien-truffaut" %% "monocle-generic" % "1.2.0-SNAPSHOT",
"com.github.julien-truffaut" %% "monocle-macro" % "1.2.0-SNAPSHOT",
"io.spray" %% "spray-routing" % "1.3.1"
)
val sbtRuntime = Seq(
sbtStructureExtractor012,
sbtStructureExtractor013,
sbtLaunch
)
}
| SergeevPavel/intellij-scala | project/dependencies.scala | Scala | apache-2.0 | 6,428 |
package com.heluna.filter
import com.typesafe.scalalogging.slf4j.Logging
import com.heluna.cache.Redis
import com.heluna.util.MailDropConfig
import com.heluna.model.{Continue, Reject}
import java.util.Date
import com.redis.serialization.Parse.Implicits.parseLong
/**
* Created with IntelliJ IDEA.
* User: mark
* Date: 5/1/13
* Time: 7:45 PM
*/
object SubjectFloodFilter extends Object with Redis with Logging {
def check(subject: String): Product = {
val lowerSubj = Option(subject).getOrElse("").toLowerCase
try {
(isBanned(lowerSubj) || isFlooding(lowerSubj)) match {
case true => {
// Ban this IP address, or extend an existing ban.
ban(lowerSubj)
Reject(MailDropConfig("maildrop.subject.ban-response").getOrElse("Banned."))
}
case false => Continue()
}
} catch {
// Redis is probably down here - just continue.
case e: Exception => {
logger error "Exception in SubjectFloodFilter: " + e.getMessage
Continue()
}
}
}
def subjectKey(subject: String): String = "subj:" + subject
def banKey(subject: String): String = "bansubj:" + subject
def isBanned(subject: String): Boolean = redis.get[String](banKey(subject)).isDefined
def isFlooding(subject: String): Boolean = {
val now = new Date().getTime
val maxMessages = MailDropConfig.getInt("maildrop.subject.flood-messages").getOrElse(20)
val key = subjectKey(subject)
redis.lpush(key, now)
redis.ltrim(key, 0, maxMessages)
redis.expire(key, MailDropConfig.getSeconds("maildrop.subject.flood-time").getOrElse(120))
(redis.llen(key).getOrElse(0L).toInt > maxMessages) &&
((now - redis.rpop[Long](key).getOrElse(0L)) < MailDropConfig.getMilliseconds("maildrop.subject.flood-time").getOrElse(120000L))
}
def ban(subject: String) {
val key = banKey(subject)
redis.set(key, "y")
redis.expire(key, MailDropConfig.getSeconds("maildrop.subject.ban-time").getOrElse(300))
}
}
| ministryofjustice/maildrop | smtp/src/main/scala/com/heluna/filter/SubjectFloodFilter.scala | Scala | mit | 1,919 |
package com.tirthal.learning.java2scala.scalaway.classobj
// Scala way - basic syntax
// Java developers to be aware that ---> read next comments...
object Abc {
def main(args: Array[String])
{
// In Scala, semicolons are pretty much optional.
/*
* Scala is a pure object-oriented language in the sense that everything is an object, including numbers or functions.
* It differs from Java in that respect, since Java distinguishes primitive types (such as boolean and int) from reference types,
* and does not enable one to manipulate functions as values (at least prior to Java 8).
*/
// --- Scala has no primitive (e.g. int, boolean, etc.), rather everything is object
// --- Type inference in Scala - automatically infers the type of variable based on value, and no need to explicitly tell the compiler
val age = 34 // just declare any variable using "val" keyword
val maxHeartRate = 210 - age * 0.5
assert(maxHeartRate == 193.0)
// Scala - Less code compare to java for adding two objects
val total = BigDecimal(10) + BigDecimal(20)
assert(total == 10+20)
// --- Equality check in Scala (== vs. eq)
val eq1 = new String("A") == new String("A") // == - value comparison (result = true)
val eq2 = new String("A").eq(new String("A")) // eq() - reference comparison (result = false)
assert(eq1 != eq2)
// --- Scala's type-inferred local function
/*
* Define a method with the "def" keyword
*
* Scala permits concept of the local function that isn't present in Java. This is a function that is defined (and is only in scope) within another function.
* This can be a simple way to have a helper function that the developer doesn't want to expose to the outside world.
* In Java there would be no recourse but to use a private method and have the function visible to other methods within the same class.
*
* There is no explicit return type specified. The compiler can figure out that it returns Int by examining the return code of java.lang.String#length, which is int.
*/
def len(obj : AnyRef) = {
obj.toString.length
}
assert(len("Tirthal") == 7)
}
} | tirthalpatel/Learning-Scala | ScalaQuickStart/src/main/scala/com/tirthal/learning/java2scala/scalaway/classobj/Abc.scala | Scala | mit | 2,229 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.examples.scala.basics
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
/**
* Simple example for demonstrating the use of the Table API for a Word Count in Scala.
*
* This example shows how to:
* - Convert DataSets to Tables
* - Apply group, aggregate, select, and filter operations
*
*/
object WordCountTable {
// *************************************************************************
// PROGRAM
// *************************************************************************
def main(args: Array[String]): Unit = {
// set up execution environment
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env)
val input = env.fromElements(WC("hello", 1), WC("hello", 1), WC("ciao", 1))
val expr = input.toTable(tEnv)
val result = expr
.groupBy($"word")
.select($"word", $"frequency".sum as "frequency")
.filter($"frequency" === 2)
.toDataSet[WC]
result.print()
}
// *************************************************************************
// USER DATA TYPES
// *************************************************************************
case class WC(word: String, frequency: Long)
}
| rmetzger/flink | flink-examples/flink-examples-table/src/main/scala/org/apache/flink/table/examples/scala/basics/WordCountTable.scala | Scala | apache-2.0 | 2,135 |
package es.weso.rbe.deriv
import es.weso.rbe._
import es.weso.collection._
case class DerivChecker[A](rbe: Rbe[A]) extends BagChecker[A] {
def check(bag:Bag[A], open: Boolean): Either[String, Bag[A]] = {
val d = rbe.derivBag(bag, open, rbe.symbols)
if (d.nullable) Right(bag)
else {
d match {
case Fail(msg) => Left(msg)
case _ => Left(s"Non nullable expression: $d, bag: $bag, rbe: $rbe, open: $open")
}
}
}
} | labra/rbe | src/main/scala/es/weso/rbe/deriv/DerivChecker.scala | Scala | mit | 465 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail.internal
import cats.Eq
import cats.effect.Sync
import cats.syntax.all._
import monix.tail.Iterant
import monix.tail.Iterant.{Concat, Halt, Last, Next, NextBatch, NextCursor, Scope, Suspend}
import monix.tail.batches.BatchCursor
import scala.collection.mutable.ArrayBuffer
private[tail] object IterantDistinctUntilChanged {
/**
* Implementation for `distinctUntilChangedByKey`.
*/
def apply[F[_], A, K](self: Iterant[F, A], f: A => K)
(implicit F: Sync[F], K: Eq[K]): Iterant[F, A] = {
Suspend(F.delay(new Loop(f).apply(self)))
}
private class Loop[F[_], A, K](f: A => K)(implicit F: Sync[F], K: Eq[K])
extends Iterant.Visitor[F, A, Iterant[F, A]] {
private[this] var current: K = null.asInstanceOf[K]
def visit(ref: Next[F, A]): Iterant[F, A] = {
val a = ref.item
val k = f(a)
if (current == null || K.neqv(current, k)) {
current = k
Next(a, ref.rest.map(this))
} else {
Suspend(ref.rest.map(this))
}
}
def visit(ref: NextBatch[F, A]): Iterant[F, A] =
processCursor(ref.toNextCursor())
def visit(ref: NextCursor[F, A]): Iterant[F, A] =
processCursor(ref)
def visit(ref: Suspend[F, A]): Iterant[F, A] =
Suspend(ref.rest.map(this))
def visit(ref: Concat[F, A]): Iterant[F, A] =
ref.runMap(this)
def visit[S](ref: Scope[F, S, A]): Iterant[F, A] =
ref.runMap(this)
def visit(ref: Last[F, A]): Iterant[F, A] = {
val a = ref.item
val k = f(a)
if (current == null || K.neqv(current, k)) {
current = k
ref
} else {
Iterant.empty
}
}
def visit(ref: Halt[F, A]): Iterant[F, A] =
ref
def fail(e: Throwable): Iterant[F, A] =
Iterant.raiseError(e)
private def processCursor(self: NextCursor[F, A]): Iterant[F, A] = {
val NextCursor(cursor, rest) = self
if (!cursor.hasNext()) {
Suspend(rest.map(this))
}
else if (cursor.recommendedBatchSize <= 1) {
val a = cursor.next()
val k = f(a)
if (current == null || K.neqv(current, k)) {
current = k
Next(a, F.delay(this(self)))
}
else
Suspend(F.delay(this(self)))
}
else {
val buffer = ArrayBuffer.empty[A]
var count = cursor.recommendedBatchSize
// We already know hasNext == true
do {
val a = cursor.next()
val k = f(a)
count -= 1
if (current == null || K.neqv(current, k)) {
current = k
buffer += a
}
} while (count > 0 && cursor.hasNext())
val next =
if (cursor.hasNext())
F.delay(this(self))
else
rest.map(this)
if (buffer.isEmpty)
Suspend(next)
else {
val ref = BatchCursor.fromArray(buffer.toArray[Any]).asInstanceOf[BatchCursor[A]]
NextCursor(ref, next)
}
}
}
}
}
| Wogan/monix | monix-tail/shared/src/main/scala/monix/tail/internal/IterantDistinctUntilChanged.scala | Scala | apache-2.0 | 3,692 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.File
import java.util.Properties
import kafka.api.KAFKA_0_11_0_IV0
import kafka.api.{KAFKA_0_10_0_IV1, KAFKA_0_9_0}
import kafka.server.KafkaConfig
import kafka.server.checkpoints.OffsetCheckpointFile
import kafka.utils._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.record._
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.extension.ExtensionContext
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.{Arguments, ArgumentsProvider, ArgumentsSource}
import scala.annotation.nowarn
import scala.collection._
import scala.jdk.CollectionConverters._
/**
* This is an integration test that tests the fully integrated log cleaner
*/
class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrationTest {
val time = new MockTime()
val topicPartitions = Array(new TopicPartition("log", 0), new TopicPartition("log", 1), new TopicPartition("log", 2))
@ParameterizedTest
@ArgumentsSource(classOf[LogCleanerParameterizedIntegrationTest.AllCompressions])
def cleanerTest(codec: CompressionType): Unit = {
val largeMessageKey = 20
val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.CURRENT_MAGIC_VALUE, codec)
val maxMessageSize = largeMessageSet.sizeInBytes
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize)
val log = cleaner.logs.get(topicPartitions(0))
val appends = writeDups(numKeys = 100, numDups = 3, log = log, codec = codec)
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(startSize > compactedSize, s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize")
checkLogAfterAppendingDups(log, startSize, appends)
val appendInfo = log.appendAsLeader(largeMessageSet, leaderEpoch = 0)
// move LSO forward to increase compaction bound
log.updateHighWatermark(log.logEndOffset)
val largeMessageOffset = appendInfo.firstOffset.get.messageOffset
val dups = writeDups(startKey = largeMessageKey + 1, numKeys = 100, numDups = 3, log = log, codec = codec)
val appends2 = appends ++ Seq((largeMessageKey, largeMessageValue, largeMessageOffset)) ++ dups
val firstDirty2 = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty2)
checkLogAfterAppendingDups(log, startSize, appends2)
// simulate deleting a partition, by removing it from logs
// force a checkpoint
// and make sure its gone from checkpoint file
cleaner.logs.remove(topicPartitions(0))
cleaner.updateCheckpoints(logDir, partitionToRemove = Option(topicPartitions(0)))
val checkpoints = new OffsetCheckpointFile(new File(logDir, cleaner.cleanerManager.offsetCheckpointFile)).read()
// we expect partition 0 to be gone
assertFalse(checkpoints.contains(topicPartitions(0)))
}
@ParameterizedTest
@ArgumentsSource(classOf[LogCleanerParameterizedIntegrationTest.AllCompressions])
def testCleansCombinedCompactAndDeleteTopic(codec: CompressionType): Unit = {
val logProps = new Properties()
val retentionMs: Integer = 100000
logProps.put(LogConfig.RetentionMsProp, retentionMs: Integer)
logProps.put(LogConfig.CleanupPolicyProp, "compact,delete")
def runCleanerAndCheckCompacted(numKeys: Int): (UnifiedLog, Seq[(Int, String, Long)]) = {
cleaner = makeCleaner(partitions = topicPartitions.take(1), propertyOverrides = logProps, backOffMs = 100L)
val log = cleaner.logs.get(topicPartitions(0))
val messages = writeDups(numKeys = numKeys, numDups = 3, log = log, codec = codec)
val startSize = log.size
log.updateHighWatermark(log.logEndOffset)
val firstDirty = log.activeSegment.baseOffset
cleaner.startup()
// should compact the log
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(startSize > compactedSize, s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize")
(log, messages)
}
val (log, _) = runCleanerAndCheckCompacted(100)
// Set the last modified time to an old value to force deletion of old segments
val endOffset = log.logEndOffset
log.logSegments.foreach(_.lastModified = time.milliseconds - (2 * retentionMs))
TestUtils.waitUntilTrue(() => log.logStartOffset == endOffset,
"Timed out waiting for deletion of old segments")
assertEquals(1, log.numberOfSegments)
cleaner.shutdown()
// run the cleaner again to make sure if there are no issues post deletion
val (log2, messages) = runCleanerAndCheckCompacted(20)
val read = readFromLog(log2)
assertEquals(toMap(messages), toMap(read), "Contents of the map shouldn't change")
}
@nowarn("cat=deprecation")
@ParameterizedTest
@ArgumentsSource(classOf[LogCleanerParameterizedIntegrationTest.ExcludeZstd])
def testCleanerWithMessageFormatV0(codec: CompressionType): Unit = {
val largeMessageKey = 20
val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.MAGIC_VALUE_V0, codec)
val maxMessageSize = codec match {
case CompressionType.NONE => largeMessageSet.sizeInBytes
case _ =>
// the broker assigns absolute offsets for message format 0 which potentially causes the compressed size to
// increase because the broker offsets are larger than the ones assigned by the client
// adding `5` to the message set size is good enough for this test: it covers the increased message size while
// still being less than the overhead introduced by the conversion from message format version 0 to 1
largeMessageSet.sizeInBytes + 5
}
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize)
val log = cleaner.logs.get(topicPartitions(0))
val props = logConfigProperties(maxMessageSize = maxMessageSize)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_9_0.version)
log.updateConfig(new LogConfig(props))
val appends = writeDups(numKeys = 100, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(startSize > compactedSize, s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize")
checkLogAfterAppendingDups(log, startSize, appends)
val appends2: Seq[(Int, String, Long)] = {
val dupsV0 = writeDups(numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
val appendInfo = log.appendAsLeader(largeMessageSet, leaderEpoch = 0)
// move LSO forward to increase compaction bound
log.updateHighWatermark(log.logEndOffset)
val largeMessageOffset = appendInfo.firstOffset.map(_.messageOffset).get
// also add some messages with version 1 and version 2 to check that we handle mixed format versions correctly
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_11_0_IV0.version)
log.updateConfig(new LogConfig(props))
val dupsV1 = writeDups(startKey = 30, numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
val dupsV2 = writeDups(startKey = 15, numKeys = 5, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V2)
appends ++ dupsV0 ++ Seq((largeMessageKey, largeMessageValue, largeMessageOffset)) ++ dupsV1 ++ dupsV2
}
val firstDirty2 = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty2)
checkLogAfterAppendingDups(log, startSize, appends2)
}
@nowarn("cat=deprecation")
@ParameterizedTest
@ArgumentsSource(classOf[LogCleanerParameterizedIntegrationTest.ExcludeZstd])
def testCleaningNestedMessagesWithV0AndV1(codec: CompressionType): Unit = {
val maxMessageSize = 192
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize, segmentSize = 256)
val log = cleaner.logs.get(topicPartitions(0))
val props = logConfigProperties(maxMessageSize = maxMessageSize, segmentSize = 256)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_9_0.version)
log.updateConfig(new LogConfig(props))
// with compression enabled, these messages will be written as a single message containing
// all of the individual messages
var appendsV0 = writeDupsSingleMessageSet(numKeys = 2, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
appendsV0 ++= writeDupsSingleMessageSet(numKeys = 2, startKey = 3, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_10_0_IV1.version)
log.updateConfig(new LogConfig(props))
var appendsV1 = writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
appendsV1 ++= writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
appendsV1 ++= writeDupsSingleMessageSet(startKey = 6, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
val appends = appendsV0 ++ appendsV1
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
assertTrue(firstDirty > appendsV0.size) // ensure we clean data from V0 and V1
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(startSize > compactedSize, s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize")
checkLogAfterAppendingDups(log, startSize, appends)
}
@ParameterizedTest
@ArgumentsSource(classOf[LogCleanerParameterizedIntegrationTest.AllCompressions])
def cleanerConfigUpdateTest(codec: CompressionType): Unit = {
val largeMessageKey = 20
val (_, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.CURRENT_MAGIC_VALUE, codec)
val maxMessageSize = largeMessageSet.sizeInBytes
cleaner = makeCleaner(partitions = topicPartitions, backOffMs = 1, maxMessageSize = maxMessageSize,
cleanerIoBufferSize = Some(1))
val log = cleaner.logs.get(topicPartitions(0))
writeDups(numKeys = 100, numDups = 3, log = log, codec = codec)
val startSize = log.size
cleaner.startup()
assertEquals(1, cleaner.cleanerCount)
// Verify no cleaning with LogCleanerIoBufferSizeProp=1
val firstDirty = log.activeSegment.baseOffset
val topicPartition = new TopicPartition("log", 0)
cleaner.awaitCleaned(topicPartition, firstDirty, maxWaitMs = 10)
assertTrue(cleaner.cleanerManager.allCleanerCheckpoints.isEmpty, "Should not have cleaned")
def kafkaConfigWithCleanerConfig(cleanerConfig: CleanerConfig): KafkaConfig = {
val props = TestUtils.createBrokerConfig(0, "localhost:2181")
props.put(KafkaConfig.LogCleanerThreadsProp, cleanerConfig.numThreads.toString)
props.put(KafkaConfig.LogCleanerDedupeBufferSizeProp, cleanerConfig.dedupeBufferSize.toString)
props.put(KafkaConfig.LogCleanerDedupeBufferLoadFactorProp, cleanerConfig.dedupeBufferLoadFactor.toString)
props.put(KafkaConfig.LogCleanerIoBufferSizeProp, cleanerConfig.ioBufferSize.toString)
props.put(KafkaConfig.MessageMaxBytesProp, cleanerConfig.maxMessageSize.toString)
props.put(KafkaConfig.LogCleanerBackoffMsProp, cleanerConfig.backOffMs.toString)
props.put(KafkaConfig.LogCleanerIoMaxBytesPerSecondProp, cleanerConfig.maxIoBytesPerSecond.toString)
KafkaConfig.fromProps(props)
}
// Verify cleaning done with larger LogCleanerIoBufferSizeProp
val oldConfig = kafkaConfigWithCleanerConfig(cleaner.currentConfig)
val newConfig = kafkaConfigWithCleanerConfig(CleanerConfig(numThreads = 2,
dedupeBufferSize = cleaner.currentConfig.dedupeBufferSize,
dedupeBufferLoadFactor = cleaner.currentConfig.dedupeBufferLoadFactor,
ioBufferSize = 100000,
maxMessageSize = cleaner.currentConfig.maxMessageSize,
maxIoBytesPerSecond = cleaner.currentConfig.maxIoBytesPerSecond,
backOffMs = cleaner.currentConfig.backOffMs))
cleaner.reconfigure(oldConfig, newConfig)
assertEquals(2, cleaner.cleanerCount)
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(startSize > compactedSize, s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize")
}
private def checkLastCleaned(topic: String, partitionId: Int, firstDirty: Long): Unit = {
// wait until cleaning up to base_offset, note that cleaning happens only when "log dirty ratio" is higher than
// LogConfig.MinCleanableDirtyRatioProp
val topicPartition = new TopicPartition(topic, partitionId)
cleaner.awaitCleaned(topicPartition, firstDirty)
val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints(topicPartition)
assertTrue(lastCleaned >= firstDirty, s"log cleaner should have processed up to offset $firstDirty, but lastCleaned=$lastCleaned")
}
private def checkLogAfterAppendingDups(log: UnifiedLog, startSize: Long, appends: Seq[(Int, String, Long)]): Unit = {
val read = readFromLog(log)
assertEquals(toMap(appends), toMap(read), "Contents of the map shouldn't change")
assertTrue(startSize > log.size)
}
private def toMap(messages: Iterable[(Int, String, Long)]): Map[Int, (String, Long)] = {
messages.map { case (key, value, offset) => key -> (value, offset) }.toMap
}
private def readFromLog(log: UnifiedLog): Iterable[(Int, String, Long)] = {
for (segment <- log.logSegments; deepLogEntry <- segment.log.records.asScala) yield {
val key = TestUtils.readString(deepLogEntry.key).toInt
val value = TestUtils.readString(deepLogEntry.value)
(key, value, deepLogEntry.offset)
}
}
private def writeDupsSingleMessageSet(numKeys: Int, numDups: Int, log: UnifiedLog, codec: CompressionType,
startKey: Int = 0, magicValue: Byte): Seq[(Int, String, Long)] = {
val kvs = for (_ <- 0 until numDups; key <- startKey until (startKey + numKeys)) yield {
val payload = counter.toString
incCounter()
(key, payload)
}
val records = kvs.map { case (key, payload) =>
new SimpleRecord(key.toString.getBytes, payload.getBytes)
}
val appendInfo = log.appendAsLeader(MemoryRecords.withRecords(magicValue, codec, records: _*), leaderEpoch = 0)
// move LSO forward to increase compaction bound
log.updateHighWatermark(log.logEndOffset)
val offsets = appendInfo.firstOffset.get.messageOffset to appendInfo.lastOffset
kvs.zip(offsets).map { case (kv, offset) => (kv._1, kv._2, offset) }
}
}
object LogCleanerParameterizedIntegrationTest {
class AllCompressions extends ArgumentsProvider {
override def provideArguments(context: ExtensionContext): java.util.stream.Stream[_ <: Arguments] =
java.util.Arrays.stream(CompressionType.values.map(codec => Arguments.of(codec)))
}
// zstd compression is not supported with older message formats (i.e supported by V0 and V1)
class ExcludeZstd extends ArgumentsProvider {
override def provideArguments(context: ExtensionContext): java.util.stream.Stream[_ <: Arguments] =
java.util.Arrays.stream(CompressionType.values.filter(_ != CompressionType.ZSTD).map(codec => Arguments.of(codec)))
}
}
| TiVo/kafka | core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala | Scala | apache-2.0 | 16,692 |
package org.precompiler.scala101.ch03
/**
* Created by RL on 6/2/17.
*/
object StringDemo {
def main(args:Array[String]): Unit = {
val multilineStr =
"""
SELECT *
FROM dual;
"""
val multilineStr2 =
"""
|SELECT *
|FROM emp;
""".stripMargin
println(multilineStr)
println(multilineStr2)
val discount = 10
val price = 100
println(s"Discount is ${discount}%")
println(s"Price is $$${price * 0.9}")
}
}
| precompiler/scala-101 | learning-scala/src/main/scala/org/precompiler/scala101/ch03/StringDemo.scala | Scala | apache-2.0 | 497 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.collector.processor
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.storage.Storage
/**
* Store the incoming span in the storage system.
*/
class StorageProcessor(storage: Storage) extends Processor[Span] {
def process(span: Span) =
storage.storeSpan(span) onFailure failureHandler("storeSpan")
def shutdown() = storage.close()
}
| rodzyn0688/zipkin | zipkin-server/src/main/scala/com/twitter/zipkin/collector/processor/StorageProcessor.scala | Scala | apache-2.0 | 987 |
package com.catinthedark.lib
import com.badlogic.gdx.graphics.Texture
import com.badlogic.gdx.graphics.g2d.SpriteBatch
import com.badlogic.gdx.graphics.glutils.ShapeRenderer
import com.badlogic.gdx.graphics.glutils.ShapeRenderer.ShapeType
import com.badlogic.gdx.math.{Rectangle, Vector2}
/**
* Created by over on 13.12.14.
*/
object Magic {
class RichSpriteBatch(val batch: SpriteBatch) {
val debugBatch = new ShapeRenderer
debugBatch.setProjectionMatrix(batch.getProjectionMatrix)
def managed(f: SpriteBatch => Unit): Unit = {
batch.begin()
f(batch)
batch.end()
}
/**
* Use this method to draw some entities with real-view and physical position.
* So in debug you can see debug rectangle for physical coordinates.
* @param t - texture for real view
* @param viewPos - container for real view position
* @param physicalPos - container for physical position
*/
def richDraw(t: Texture, viewPos: Rectangle, physicalPos: Rectangle): Unit = {
batch.draw(t, viewPos.x, viewPos.y)
//TODO: if some kind of debug??
//debugBatch.begin(ShapeType.Line)
//debugBatch.rect(viewPos.x, viewPos.y, viewPos.width, viewPos.height)
//debugBatch.end()
}
def drawCentered(tex: Texture, x: Float, y: Float,
centerX: Boolean = true, centerY: Boolean = true) =
batch.draw(tex,
if (centerX) x - tex.getWidth / 2 else x,
if (centerY) y - tex.getHeight / 2 else y
)
}
implicit def richifySpriteBatch(batch: SpriteBatch) = new RichSpriteBatch(batch)
implicit def vector2ToTuple2(vec: Vector2): Tuple2[Float, Float] = (vec.x, vec.y)
implicit def tuple2ToVector2(vec: Tuple2[Float, Float]): Vector2 = new Vector2(vec._1, vec._2)
}
| cat-in-the-dark/old48_33_game | src/main/scala/com/catinthedark/lib/Magic.scala | Scala | mit | 1,793 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.random
import java.nio.ByteBuffer
import java.util.{Random => JavaRandom}
import scala.util.hashing.MurmurHash3
/**
* This class implements a XORShift random number generator algorithm
* Source:
* Marsaglia, G. (2003). Xorshift RNGs. Journal of Statistical Software, Vol. 8, Issue 14.
* @see <a href="http://www.jstatsoft.org/v08/i14/paper">Paper</a>
* This implementation is approximately 3.5 times faster than
* {@link java.util.Random java.util.Random}, partly because of the algorithm, but also due
* to renouncing thread safety. JDK's implementation uses an AtomicLong seed, this class
* uses a regular Long. We can forgo thread safety since we use a new instance of the RNG
* for each thread.
*/
private[spark] class XORShiftRandom(init: Long) extends JavaRandom(init) {
def this() = this(System.nanoTime)
private var seed = XORShiftRandom.hashSeed(init)
// we need to just override next - this will be called by nextInt, nextDouble,
// nextGaussian, nextLong, etc.
override protected def next(bits: Int): Int = {
var nextSeed = seed ^ (seed << 21)
nextSeed ^= (nextSeed >>> 35)
nextSeed ^= (nextSeed << 4)
seed = nextSeed
(nextSeed & ((1L << bits) -1)).asInstanceOf[Int]
}
override def setSeed(s: Long) {
seed = XORShiftRandom.hashSeed(s)
}
}
/** Contains benchmark method and main method to run benchmark of the RNG */
private[spark] object XORShiftRandom {
/** Hash seeds to have 0/1 bits throughout. */
private[random] def hashSeed(seed: Long): Long = {
val bytes = ByteBuffer.allocate(java.lang.Long.SIZE).putLong(seed).array()
val lowBits = MurmurHash3.bytesHash(bytes)
val highBits = MurmurHash3.bytesHash(bytes, lowBits)
(highBits.toLong << 32) | (lowBits.toLong & 0xFFFFFFFFL)
}
}
| yanboliang/spark | core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala | Scala | apache-2.0 | 2,609 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.beans.BeanInfo
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.ml.linalg.Vector
/**
* :: Experimental ::
*
* Class that represents the features and labels of a data point.
*
* @param label Label for this data point.
* @param features List of features for this data point.
*/
@Since("2.0.0")
@Experimental
@BeanInfo
case class LabeledPoint(@Since("2.0.0") label: Double, @Since("2.0.0") features: Vector) {
override def toString: String = {
s"($label,$features)"
}
}
| gioenn/xSpark | mllib/src/main/scala/org/apache/spark/ml/feature/LabeledPoint.scala | Scala | apache-2.0 | 1,368 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.data
import java.io.Serializable
import java.util.{Map => JMap}
import org.apache.accumulo.core.client.mock.{MockConnector, MockInstance}
import org.apache.accumulo.core.client.security.tokens.{AuthenticationToken, PasswordToken}
import org.apache.accumulo.core.client.{Connector, ZooKeeperInstance}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapreduce.Job
import org.geotools.data.DataAccessFactory.Param
import org.geotools.data.DataStoreFactorySpi
import org.locationtech.geomesa.core.stats.StatWriter
import org.locationtech.geomesa.feature.FeatureEncoding
import org.locationtech.geomesa.security
import scala.collection.JavaConversions._
class AccumuloDataStoreFactory extends DataStoreFactorySpi {
import org.locationtech.geomesa.core.data.AccumuloDataStoreFactory._
import org.locationtech.geomesa.core.data.AccumuloDataStoreFactory.params._
// this is a pass-through required of the ancestor interface
def createNewDataStore(params: JMap[String, Serializable]) = createDataStore(params)
def createDataStore(params: JMap[String, Serializable]) = {
val visStr = visibilityParam.lookUp(params).asInstanceOf[String]
val visibility =
if (visStr == null)
""
else
visStr
val tableName = tableNameParam.lookUp(params).asInstanceOf[String]
val useMock = java.lang.Boolean.valueOf(mockParam.lookUp(params).asInstanceOf[String])
val (connector, token) =
if (params.containsKey(connParam.key)) (connParam.lookUp(params).asInstanceOf[Connector], null)
else buildAccumuloConnector(params, useMock)
// convert the connector authorizations into a string array - this is the maximum auths this connector can support
val securityOps = connector.securityOperations
val masterAuths = securityOps.getUserAuthorizations(connector.whoami)
val masterAuthsStrings = masterAuths.map(b => new String(b))
// get the auth params passed in as a comma-delimited string
val configuredAuths = authsParam.lookupOpt[String](params).getOrElse("").split(",").filter(s => !s.isEmpty)
// verify that the configured auths are valid for the connector we are using (fail-fast)
configuredAuths.foreach(a => if(!masterAuthsStrings.contains(a) && !connector.isInstanceOf[MockConnector])
throw new IllegalArgumentException(s"The authorization '$a' is not valid for the Accumulo connector being used"))
// if no auths are specified we default to the connector auths
// TODO would it be safer to default to no auths?
val auths: List[String] =
if (!configuredAuths.isEmpty)
configuredAuths.toList
else
masterAuthsStrings.toList
val authorizationsProvider = security.getAuthorizationsProvider(params, auths)
val featureEncoding = featureEncParam.lookupOpt[String](params)
.map(FeatureEncoding.withName)
.getOrElse(DEFAULT_ENCODING)
// stats defaults to true if not specified
val collectStats = !useMock &&
Option(statsParam.lookUp(params)).map(_.toString.toBoolean).forall(_ == true)
// caching defaults to false if not specified
val caching = Option(cachingParam.lookUp(params)).exists(_.toString.toBoolean)
if (collectStats) {
new AccumuloDataStore(connector,
token,
tableName,
authorizationsProvider,
visibility,
queryThreadsParam.lookupOpt(params),
recordThreadsParam.lookupOpt(params),
writeThreadsParam.lookupOpt(params),
caching,
featureEncoding) with StatWriter
} else {
new AccumuloDataStore(connector,
token,
tableName,
authorizationsProvider,
visibility,
queryThreadsParam.lookupOpt(params),
recordThreadsParam.lookupOpt(params),
writeThreadsParam.lookupOpt(params),
caching,
featureEncoding)
}
}
override def getDisplayName = "Accumulo (GeoMesa)"
override def getDescription = "Apache Accumulo\u2122 distributed key/value store"
override def getParametersInfo =
Array(
instanceIdParam,
zookeepersParam,
userParam,
passwordParam,
authsParam,
visibilityParam,
tableNameParam,
statsParam,
cachingParam
)
def canProcess(params: JMap[String,Serializable]) = AccumuloDataStoreFactory.canProcess(params)
override def isAvailable = true
override def getImplementationHints = null
}
object AccumuloDataStoreFactory {
import org.locationtech.geomesa.core.data.AccumuloDataStoreFactory.params._
implicit class RichParam(val p: Param) {
def lookupOpt[A](params: JMap[String, Serializable]) =
Option(p.lookUp(params)).asInstanceOf[Option[A]]
}
object params {
val connParam = new Param("connector", classOf[Connector], "The Accumulo connector", false)
val instanceIdParam = new Param("instanceId", classOf[String], "The Accumulo Instance ID", true)
val zookeepersParam = new Param("zookeepers", classOf[String], "Zookeepers", true)
val userParam = new Param("user", classOf[String], "Accumulo user", true)
val passwordParam = new Param("password", classOf[String], "Password", true)
val authsParam = org.locationtech.geomesa.security.authsParam
val visibilityParam = new Param("visibilities", classOf[String], "Accumulo visibilities to apply to all written data", false)
val tableNameParam = new Param("tableName", classOf[String], "The Accumulo Table Name", true)
val queryThreadsParam = new Param("queryThreads", classOf[Integer], "The number of threads to use per query", false)
val recordThreadsParam = new Param("recordThreads", classOf[Integer], "The number of threads to use for record retrieval", false)
val writeThreadsParam = new Param("writeThreads", classOf[Integer], "The number of threads to use for writing records", false)
val statsParam = new Param("collectStats", classOf[java.lang.Boolean], "Toggle collection of statistics", false)
val cachingParam = new Param("caching", classOf[java.lang.Boolean], "Toggle caching of results", false)
val mockParam = new Param("useMock", classOf[String], "Use a mock connection (for testing)", false)
val featureEncParam = new Param("featureEncoding", classOf[String], "The feature encoding format (kryo, avro or text). Default is Kryo", false, "kryo")
}
def buildAccumuloConnector(params: JMap[String,Serializable], useMock: Boolean): (Connector, AuthenticationToken) = {
val zookeepers = zookeepersParam.lookUp(params).asInstanceOf[String]
val instance = instanceIdParam.lookUp(params).asInstanceOf[String]
val user = userParam.lookUp(params).asInstanceOf[String]
val password = passwordParam.lookUp(params).asInstanceOf[String]
val authToken = new PasswordToken(password.getBytes)
if(useMock) {
(new MockInstance(instance).getConnector(user, authToken), authToken)
} else {
(new ZooKeeperInstance(instance, zookeepers).getConnector(user, authToken), authToken)
}
}
/**
* Return true/false whether or not the catalog referenced by these params exists
* already (aka the accumulo table has been created)
*/
def catalogExists(params: JMap[String,Serializable], useMock: Boolean): Boolean = {
val (conn, _) = buildAccumuloConnector(params, useMock)
conn.tableOperations().exists(tableNameParam.lookUp(params).asInstanceOf[String])
}
def canProcess(params: JMap[String,Serializable]): Boolean =
params.containsKey(instanceIdParam.key) || params.containsKey(connParam.key)
def configureJob(job: Job, params: JMap[String, Serializable]): Job = {
val conf = job.getConfiguration
conf.set(ZOOKEEPERS, zookeepersParam.lookUp(params).asInstanceOf[String])
conf.set(INSTANCE_ID, instanceIdParam.lookUp(params).asInstanceOf[String])
conf.set(ACCUMULO_USER, userParam.lookUp(params).asInstanceOf[String])
conf.set(ACCUMULO_PASS, passwordParam.lookUp(params).asInstanceOf[String])
conf.set(TABLE, tableNameParam.lookUp(params).asInstanceOf[String])
authsParam.lookupOpt[String](params).foreach(ap => conf.set(AUTHS, ap))
visibilityParam.lookupOpt[String](params).foreach(vis => conf.set(VISIBILITY, vis))
featureEncParam.lookupOpt[String](params).foreach(fep => conf.set(FEATURE_ENCODING, fep))
job
}
def getMRAccumuloConnectionParams(conf: Configuration): JMap[String, AnyRef] =
Map(zookeepersParam.key -> conf.get(ZOOKEEPERS),
instanceIdParam.key -> conf.get(INSTANCE_ID),
userParam.key -> conf.get(ACCUMULO_USER),
passwordParam.key -> conf.get(ACCUMULO_PASS),
tableNameParam.key -> conf.get(TABLE),
authsParam.key -> conf.get(AUTHS),
visibilityParam.key -> conf.get(VISIBILITY),
featureEncParam.key -> conf.get(FEATURE_ENCODING))
}
| mmatz-ccri/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/data/AccumuloDataStoreFactory.scala | Scala | apache-2.0 | 9,576 |
object That {
trait A {
type T <: I;
trait I {}
}
trait B {
type T <: J;
trait J {}
}
trait C extends A with B {
type T <: I with J;
}
}
| AlexSikia/dotty | tests/untried/pos/scoping2.scala | Scala | bsd-3-clause | 207 |
/*
* Copyright 2015 eleflow.com.br.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eleflow.uberdata.core
import scala.collection.JavaConverters._
import java.io._
import eleflow.uberdata.core.listener.UberdataSparkListener
import eleflow.uberdata.core.data.Dataset
import eleflow.uberdata.core.util.ClusterSettings
import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.apache.spark.sql.{SQLContext, SparkSession}
import org.apache.spark.sql.hive.thriftserver.HiveThriftServer2
import org.apache.spark.{SparkConf, SparkContext}
import ClusterSettings._
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.DataType
import scala.annotation.tailrec
import scala.sys.process._
import scala.util.matching.Regex
import org.slf4j.Logger
import org.slf4j.LoggerFactory
object IUberdataContext {
var conf: SparkConf = new SparkConf
private lazy val uc: IUberdataContext = new IUberdataContext(conf)
def getUC: IUberdataContext = uc
def getUC(conf: SparkConf): IUberdataContext = {
this.conf = conf
uc
}
def getNewUC(conf: SparkConf = conf): IUberdataContext = {
uc.terminate()
this.conf = conf
uc.sparkSession
uc
}
}
/**
* User: paulomagalhaes
* Date: 8/15/14 12:24 PM
*/
class IUberdataContext(@transient sparkConf: SparkConf) extends Serializable {
protected def this(sparkConf: SparkConf, data: String) = this(sparkConf)
// @transient protected lazy val s3Client: AmazonS3 = new AmazonS3Client()
val version: String = UberdataCoreVersion.gitVersion
protected val basePath: String = "/"
@transient var _sqlContext: Option[SQLContext] = None
@transient protected var sc: Option[SparkContext] = None
protected lazy val builder: SparkSession.Builder = SparkSession.builder().
appName(ClusterSettings.appName)
private var _masterHost: Option[String] = None
val slf4jLogger: Logger = LoggerFactory.getLogger(IUberdataContext.getClass);
def initialized: Boolean = sc.isDefined
def isContextDefined: Boolean = sc.isDefined
def terminate(): Unit = {
clearContext()
builder.getOrCreate().stop()
val path = getSparkEc2Py
ClusterSettings.master.getOrElse(
shellRun(Seq(path, "destroy", clusterName))
)
_masterHost = None
ClusterSettings.resume = false
}
def clearContext(): Unit = {
ClusterSettings.resume = true
_sqlContext = None
sparkSession.stop()
//builder.getOrCreate().stop()
}
def clusterInfo(): Unit = {
val path = getSparkEc2Py
shellRun(Seq(path, "get-master", clusterName))
}
def shellRun(command: Seq[String]): String = {
val out = new StringBuilder
val logger = ProcessLogger((o: String) => {
out.append(o)
slf4jLogger.info(o)
//logInfo(o)
}, (e: String) => {
slf4jLogger.info(e)
//logInfo(e)
})
command ! logger
out.toString()
}
def reconnect(): Unit = {
sc.foreach(_.stop())
}
def getAllFilesRecursively(fullPath: Path): Seq[String] = {
val fs = fullPath.getFileSystem(new Configuration)
@tailrec
def iter(fs: FileSystem, paths: Seq[Path], result: Seq[String]): Seq[String] = paths match {
case path :: tail =>
val children: Seq[FileStatus] = try {
fs.listStatus(path)
} catch {
case e: FileNotFoundException =>
// listStatus throws FNFE if the dir is empty
Seq.empty[FileStatus]
}
val (files, directories) = children.partition(_.isFile)
iter(
fs,
tail ++ directories.map(_.getPath),
files.map(_.getPath.toString) ++ result
)
case _ =>
result
}
iter(fs, Seq(fullPath), Seq())
}
def sparkSession: SparkSession = configuredBuilder.getOrCreate()
def configuredBuilder: SparkSession.Builder = builder.config(confBuild).enableHiveSupport()
@deprecated("user sparkSession instead")
def sparkContext: SparkContext = {
val context = configuredBuilder
.config("spark.sql.warehouse.dir", "file:///tmp/spark-warehouse").getOrCreate().sparkContext
addClasspathToSparkContext(context)
val listener = new UberdataSparkListener(context.getConf)
context.addSparkListener(listener)
context
}
def confBuild: SparkConf = if (ClusterSettings.master.isDefined) {
createSparkContextForProvisionedCluster(sparkConf)
} else {
createSparkContextForNewCluster(sparkConf)
}
def addClasspathToSparkContext(context: SparkContext): Unit = {
val sqoop = "org.apache.sqoop.sqoop-.*jar".r
val jodaJar = "joda-time.joda-time-.*jar".r
val eleflowJar = "eleflow.*jar".r
val guavaJar = "com.google.guava.*".r
val mySqlDriver = "mysql-connector-java.*".r
val oracle = "ojdbc6.*".r
val sparkts = "com.cloudera.sparkts.*jar".r
val xgboost = "ml.dmlc.*xgboost4j.*jar".r
val csv = ".*csv.*jar".r
val iuberdata = "iuberdata.*jar".r
val urls = this.getClass.getClassLoader.asInstanceOf[java.net.URLClassLoader].getURLs
val jarUrls = urls.filter(
url =>
sqoop.findFirstIn(url.getFile).isDefined
|| jodaJar.findFirstIn(url.getFile).isDefined
|| eleflowJar.findFirstIn(url.getFile).isDefined
|| guavaJar.findFirstIn(url.getFile).isDefined
|| mySqlDriver.findFirstIn(url.getFile).isDefined
|| oracle.findFirstIn(url.getFile).isDefined
|| sparkts.findFirstIn(url.getFile).isDefined
|| xgboost.findFirstIn(url.getFile).isDefined
|| csv.findFirstIn(url.getFile).isDefined
|| iuberdata.findFirstIn(url.getFile).isDefined
)
jarUrls.foreach { url =>
//logInfo(s"adding ${url.getPath} to spark context jars")
slf4jLogger.info(s"adding ${url.getPath} to spark context jars")
context.addJar(url.getPath)
}
ClusterSettings.jarsToBeAdded.foreach(context.addJar)
}
def createSparkContextForNewCluster(conf: SparkConf): SparkConf = {
//log.info(s"connecting to $masterHost")
slf4jLogger.info(s"connecting to $masterHost")
conf.setMaster(s"spark://$masterHost:7077")
confSetup(conf)
}
def masterHost: String = {
_masterHost match {
case Some(host) => host
case None =>
initHostNames()
_masterHost.get
}
}
def initHostNames(): Unit = {
_masterHost = createCluster
}
def createCluster: Option[String] = {
val path = getSparkEc2Py
val mandatory = Seq(
path,
"--hadoop-major-version",
hadoopVersion,
"--master-instance-type",
masterInstanceType,
"--slaves",
coreInstanceCount.toString,
"--instance-type",
coreInstanceType
)
val command = mandatory ++ (ec2KeyName match {
case None => Seq[String]()
case Some(keyName) => Seq("--key-pair", keyName)
}) ++ (spotPriceFactor match {
case None => Seq[String]()
case Some(spotPrice) => Seq("--spot-price", spotPrice)
}) ++ (region match {
case None => Seq[String]()
case Some(awsRegion) => Seq("--region", awsRegion)
}) ++ (profile match {
case None => Seq[String]()
case Some(awsProfile) => Seq("--profile", awsProfile)
}) ++ (if (resume) Seq("--resume") else Seq())
val output = shellRun(command ++ Seq("launch", clusterName))
//log.info(s"Output:: $output")
slf4jLogger.info(s"Output:: $output")
val pattern = new Regex(
"Spark standalone cluster started at http://([^:]+):8080"
)
val host = pattern.findAllIn(output).matchData.map(_.group(1)).next
Some(host)
}
def masterHost_(host: String): Unit = _masterHost = Some(host)
private def confSetup(conf: SparkConf): SparkConf = {
ClusterSettings.additionalConfs.map {
case (key, value) => conf.set(key, value)
}
conf.set("spark.app.name", ClusterSettings.appName)
conf.set("spark.sql.parquet.compression.codec", "snappy")
conf.set("spark.local.dir", ClusterSettings.localDir)
conf.set("spark.externalBlockStore.baseDir", ClusterSettings.baseDir)
conf.set("spark.task.cpus", ClusterSettings.taskCpus.toString)
ClusterSettings.defaultParallelism.map(
value => conf.set("spark.default.parallelism", value.toString)
)
ClusterSettings.kryoBufferMaxSize.map(
value => conf.set("spark.kryoserializer.buffer.max", value.toString)
)
//according to keo, in Making Sense of Spark Performance webcast, this codec is better than default
conf.set("spark.io.compression.codec", "lzf")
conf.set("spark.driver.maxResultSize", ClusterSettings.maxResultSize)
conf.set(
"spark.serializer",
ClusterSettings.serializer.getOrElse(
"org.apache.spark.serializer.KryoSerializer"
)
)
val defaultConfStream =
this.getClass.getClassLoader.getResourceAsStream("spark-defaults.conf")
if (defaultConfStream != null) {
val defaultConf = IOUtils.readLines(defaultConfStream)
defaultConf.asScala.map { line =>
val keyValue = line.split("\\s+")
if (keyValue.size == 2)
conf.set(keyValue(0), keyValue(1))
}
}
//according to keo, in Making Sense of Spark Performance webcast, this codec is better than default
conf.set("spark.io.compression.codec", "lzf")
conf.set("spark.driver.maxResultSize", ClusterSettings.maxResultSize)
ClusterSettings.executorMemory.foreach(
conf.set("spark.executor.memory", _)
)
conf
}
def createSparkContextForProvisionedCluster(conf: SparkConf): SparkConf = {
slf4jLogger.info("connecting to localhost")
conf.setMaster(ClusterSettings.master.get)
confSetup(conf)
}
def sql(sql: String): DataFrame = {
sqlContext.sql(sql)
}
def sqlContext: SQLContext = _sqlContext match {
case None =>
sparkSession.sparkContext
_sqlContext = if (!sparkConf.get("spark.master").startsWith("yarn")) {
val context = sparkSession.sqlContext
HiveThriftServer2.startWithContext(context)
Some(context)
} else Some(sparkSession.sqlContext)
_sqlContext.get
case Some(ctx) => ctx
}
def load(file: String, separator: String, loadSchema: Seq[DataType]): Dataset = {
val fileDataSet = Dataset(this, file, separator)
fileDataSet.applyColumnTypes(loadSchema)
fileDataSet
}
def load(file: String, separator: String = ","): Dataset =
Dataset(this, file, separator)
private def copyFromClasspath2Tmp(filePath: String) = {
val scriptPath = System.getProperty("java.io.tmpdir")
val classLoader: ClassLoader = getClass.getClassLoader
val out: File = new File(s"$scriptPath/$filePath")
if (out.exists && out.isDirectory) {
throw new RuntimeException(
"Can't create python script " + out.getAbsolutePath
)
}
if (!out.getParentFile.exists()) {
out.getParentFile.mkdirs()
}
try {
val outStream: FileOutputStream = new FileOutputStream(out)
IOUtils.copy(classLoader.getResourceAsStream(filePath), outStream)
outStream.close()
} catch {
case e: IOException =>
throw new RuntimeException(e)
}
out
}
private def getSparkEc2Py = {
copyFromClasspath2Tmp(
"python/deploy.generic/root/spark-ec2/ec2-variables.sh"
).toString
val path = copyFromClasspath2Tmp("python/spark_ec2.py")
path.setExecutable(true)
//log.info(s"spark_ec2.py in $path")
slf4jLogger.info(s"spark_ec2.py in $path")
path.toString
}
}
| eleflow/uberdata | iuberdata_core/src/main/scala/eleflow/uberdata/core/IUberdataContext.scala | Scala | apache-2.0 | 11,413 |
package org.scalawiki.dto.cmd
trait WatchParam[+T] extends Parameter[T]
case class Watch(override val arg: Boolean = true) extends BooleanParameter("watch",
"Add the page to your watchlist. Deprecated. Use the watchlist argument") with WatchParam[Boolean]
case class UnWatch(override val arg: Boolean = true) extends BooleanParameter("unwatch",
"Remove the page from your watchlist. Deprecated. Use the watchlist argument") with WatchParam[Boolean]
// TODO single Enum value arg
case class WatchList(override val args: WatchListArg*) extends EnumParameter[WatchListArg]("watchlist",
"Specify how the watchlist is affected by this edit") with WatchParam[WatchListArg]
trait WatchListArg extends EnumArg[WatchListArg] { val param = WatchList }
object WLWatch extends EnumArgument[WatchListArg]("watch", "add the page to the watchlist.") with WatchListArg
object WLUnWatch extends EnumArgument[WatchListArg]("unwatch", "remove the page from the watchlist.") with WatchListArg
object WLPreferences extends EnumArgument[WatchListArg]("preferences", "use the preference settings (Default).") with WatchListArg
object WLNoChange extends EnumArgument[WatchListArg]("nochange", "don't change the watchlist.") with WatchListArg | intracer/scalawiki | scalawiki-core/src/main/scala/org/scalawiki/dto/cmd/WatchParam.scala | Scala | apache-2.0 | 1,231 |
/**
* Copyright (C) 2018 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr.xbl
import org.orbeon.dom.{Element, QName}
import org.orbeon.oxf.fr.library.FRComponentParamSupport
import org.orbeon.oxf.fr.{AppForm, XMLNames}
import org.orbeon.oxf.util.CollectionUtils._
import org.orbeon.oxf.util.StringUtils._
import org.orbeon.oxf.xforms.analysis.PartAnalysisForXblSupport
import org.orbeon.oxf.xforms.xbl.XBLSupport
object FormRunnerXblSupport extends XBLSupport {
private val FRKeepIfParamQName = QName("keep-if-param-non-blank", XMLNames.FRNamespace)
private val FRKeepIfDesignTimeQName = QName("keep-if-design-time", XMLNames.FRNamespace)
def keepElement(
partAnalysisCtx : PartAnalysisForXblSupport,
boundElement : Element,
directNameOpt : Option[QName],
elem : Element
): Boolean = {
def fromAttribute(paramName: QName) =
boundElement.attributeValueOpt(paramName)
def fromMetadataAndProperties(paramName: QName) =
FRComponentParamSupport.fromMetadataAndProperties(
partAnalysis = partAnalysisCtx,
directNameOpt = directNameOpt,
paramName = paramName
) map
(_.getStringValue)
def keepIfParamNonBlank =
elem.attributeValueOpt(FRKeepIfParamQName) match {
case Some(att) =>
val paramName = QName(att)
fromAttribute(paramName) orElse
fromMetadataAndProperties(paramName) exists
(_.nonAllBlank)
case None => true
}
def isDesignTime =
partAnalysisCtx.ancestorIterator.lastOption() flatMap
FRComponentParamSupport.findConstantMetadataRootElem flatMap
FRComponentParamSupport.appFormFromMetadata contains
AppForm.FormBuilder
def keepIfDesignTime =
elem.attributeValueOpt(FRKeepIfDesignTimeQName) match {
case Some("true") => isDesignTime
case Some("false") => ! isDesignTime
case _ => true
}
! (! keepIfParamNonBlank || ! keepIfDesignTime)
}
}
| orbeon/orbeon-forms | form-runner/jvm/src/main/scala/org/orbeon/oxf/fr/xbl/FormRunnerXblSupport.scala | Scala | lgpl-2.1 | 2,663 |
/*
* Copyright 2015-2018 Snowflake Computing
* Copyright 2015 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.snowflake.spark.snowflake
/**
* Wrapper class for representing the name of a Snowflake table.
*
* Note, we don't do any escaping/unescaping for Snowflake tables,
* we expect the user to do it.
*/
private[snowflake] case class TableName(name: String) {
override def toString: String = name
def toStatement: Identifier = Identifier(name)
}
| snowflakedb/spark-snowflake | src/main/scala/net/snowflake/spark/snowflake/TableName.scala | Scala | apache-2.0 | 1,004 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.File
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Paths}
import scala.sys.process._
import scala.util.control.NonFatal
import org.apache.commons.lang3.{JavaVersion, SystemUtils}
import org.apache.hadoop.conf.Configuration
import org.scalatest.time.Span
import org.scalatest.time.SpanSugar._
import org.apache.spark.{SparkConf, TestUtils}
import org.apache.spark.deploy.SparkSubmitTestUtils
import org.apache.spark.internal.config.MASTER_REST_SERVER_ENABLED
import org.apache.spark.internal.config.UI.UI_ENABLED
import org.apache.spark.launcher.JavaModuleOptions
import org.apache.spark.sql.{QueryTest, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTableType
import org.apache.spark.sql.internal.StaticSQLConf.WAREHOUSE_PATH
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.tags.{ExtendedHiveTest, SlowHiveTest}
import org.apache.spark.util.Utils
/**
* Test HiveExternalCatalog backward compatibility.
*
* Note that, this test suite will automatically download spark binary packages of different
* versions to a local directory. If the `spark.test.cache-dir` system property is defined, this
* directory will be used. If there is already a spark folder with expected version under this
* local directory, e.g. `/{cache-dir}/spark-2.0.3`, downloading for this spark version will be
* skipped. If the system property is not present, a temporary directory will be used and cleaned
* up after the test.
*/
@SlowHiveTest
@ExtendedHiveTest
class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils {
import HiveExternalCatalogVersionsSuite._
override protected val defaultSparkSubmitTimeout: Span = 5.minutes
private val wareHousePath = Utils.createTempDir(namePrefix = "warehouse")
private val tmpDataDir = Utils.createTempDir(namePrefix = "test-data")
// For local test, you can set `spark.test.cache-dir` to a static value like `/tmp/test-spark`, to
// avoid downloading Spark of different versions in each run.
private val sparkTestingDir = Option(System.getProperty(SPARK_TEST_CACHE_DIR_SYSTEM_PROPERTY))
.map(new File(_)).getOrElse(Utils.createTempDir(namePrefix = "test-spark"))
private val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val hiveVersion = if (SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9)) {
HiveUtils.builtinHiveVersion
} else {
"1.2.1"
}
override def afterAll(): Unit = {
try {
Utils.deleteRecursively(wareHousePath)
Utils.deleteRecursively(tmpDataDir)
// Only delete sparkTestingDir if it wasn't defined to a static location by the system prop
if (Option(System.getProperty(SPARK_TEST_CACHE_DIR_SYSTEM_PROPERTY)).isEmpty) {
Utils.deleteRecursively(sparkTestingDir)
}
} finally {
super.afterAll()
}
}
private def tryDownloadSpark(version: String, path: String): Unit = {
// Try a few mirrors first; fall back to Apache archive
val mirrors =
(0 until 2).flatMap { _ =>
try {
Some(getStringFromUrl("https://www.apache.org/dyn/closer.lua?preferred=true"))
} catch {
// If we can't get a mirror URL, skip it. No retry.
case _: Exception => None
}
}
val sites =
mirrors.distinct :+ "https://archive.apache.org/dist" :+ PROCESS_TABLES.releaseMirror
logInfo(s"Trying to download Spark $version from $sites")
for (site <- sites) {
val filename = if (version.startsWith("3")) {
s"spark-$version-bin-hadoop3.2.tgz"
} else {
s"spark-$version-bin-hadoop2.7.tgz"
}
val url = s"$site/spark/spark-$version/$filename"
logInfo(s"Downloading Spark $version from $url")
try {
getFileFromUrl(url, path, filename)
val downloaded = new File(sparkTestingDir, filename).getCanonicalPath
val targetDir = new File(sparkTestingDir, s"spark-$version").getCanonicalPath
Seq("mkdir", targetDir).!
val exitCode = Seq("tar", "-xzf", downloaded, "-C", targetDir, "--strip-components=1").!
Seq("rm", downloaded).!
// For a corrupted file, `tar` returns non-zero values. However, we also need to check
// the extracted file because `tar` returns 0 for empty file.
val sparkSubmit = new File(sparkTestingDir, s"spark-$version/bin/spark-submit")
if (exitCode == 0 && sparkSubmit.exists()) {
return
} else {
Seq("rm", "-rf", targetDir).!
}
} catch {
case ex: Exception =>
logWarning(s"Failed to download Spark $version from $url: ${ex.getMessage}")
}
}
fail(s"Unable to download Spark $version")
}
private def genDataDir(name: String): String = {
new File(tmpDataDir, name).getCanonicalPath
}
private def getFileFromUrl(urlString: String, targetDir: String, filename: String): Unit = {
val conf = new SparkConf
// if the caller passes the name of an existing file, we want doFetchFile to write over it with
// the contents from the specified url.
conf.set("spark.files.overwrite", "true")
val hadoopConf = new Configuration
val outDir = new File(targetDir)
if (!outDir.exists()) {
outDir.mkdirs()
}
// propagate exceptions up to the caller of getFileFromUrl
Utils.doFetchFile(urlString, outDir, filename, conf, hadoopConf)
}
private def getStringFromUrl(urlString: String): String = {
val contentFile = File.createTempFile("string-", ".txt")
contentFile.deleteOnExit()
// exceptions will propagate to the caller of getStringFromUrl
getFileFromUrl(urlString, contentFile.getParent, contentFile.getName)
val contentPath = Paths.get(contentFile.toURI)
new String(Files.readAllBytes(contentPath), StandardCharsets.UTF_8)
}
override def beforeAll(): Unit = {
super.beforeAll()
val tempPyFile = File.createTempFile("test", ".py")
// scalastyle:off line.size.limit
Files.write(tempPyFile.toPath,
s"""
|from pyspark.sql import SparkSession
|import os
|
|spark = SparkSession.builder.enableHiveSupport().getOrCreate()
|version_index = spark.conf.get("spark.sql.test.version.index", None)
|
|spark.sql("create table data_source_tbl_{} using json as select 1 i".format(version_index))
|
|spark.sql("create table hive_compatible_data_source_tbl_{} using parquet as select 1 i".format(version_index))
|
|json_file = "${genDataDir("json_")}" + str(version_index)
|spark.range(1, 2).selectExpr("cast(id as int) as i").write.json(json_file)
|spark.sql("create table external_data_source_tbl_{}(i int) using json options (path '{}')".format(version_index, json_file))
|
|parquet_file = "${genDataDir("parquet_")}" + str(version_index)
|spark.range(1, 2).selectExpr("cast(id as int) as i").write.parquet(parquet_file)
|spark.sql("create table hive_compatible_external_data_source_tbl_{}(i int) using parquet options (path '{}')".format(version_index, parquet_file))
|
|json_file2 = "${genDataDir("json2_")}" + str(version_index)
|spark.range(1, 2).selectExpr("cast(id as int) as i").write.json(json_file2)
|spark.sql("create table external_table_without_schema_{} using json options (path '{}')".format(version_index, json_file2))
|
|parquet_file2 = "${genDataDir("parquet2_")}" + str(version_index)
|spark.range(1, 3).selectExpr("1 as i", "cast(id as int) as p", "1 as j").write.parquet(os.path.join(parquet_file2, "p=1"))
|spark.sql("create table tbl_with_col_overlap_{} using parquet options(path '{}')".format(version_index, parquet_file2))
|
|spark.sql("create view v_{} as select 1 i".format(version_index))
""".stripMargin.getBytes("utf8"))
// scalastyle:on line.size.limit
if (PROCESS_TABLES.testingVersions.isEmpty) {
logError("Fail to get the latest Spark versions to test.")
}
PROCESS_TABLES.testingVersions.zipWithIndex.foreach { case (version, index) =>
val sparkHome = new File(sparkTestingDir, s"spark-$version")
if (!sparkHome.exists()) {
tryDownloadSpark(version, sparkTestingDir.getCanonicalPath)
}
// Extract major.minor for testing Spark 3.1.x and 3.0.x with metastore 2.3.9 and Java 11.
val hiveMetastoreVersion = """^\\d+\\.\\d+""".r.findFirstIn(hiveVersion).get
val args = Seq(
"--name", "prepare testing tables",
"--master", "local[2]",
"--conf", s"${UI_ENABLED.key}=false",
"--conf", s"${MASTER_REST_SERVER_ENABLED.key}=false",
"--conf", s"${HiveUtils.HIVE_METASTORE_VERSION.key}=$hiveMetastoreVersion",
"--conf", s"${HiveUtils.HIVE_METASTORE_JARS.key}=maven",
"--conf", s"${WAREHOUSE_PATH.key}=${wareHousePath.getCanonicalPath}",
"--conf", s"spark.sql.test.version.index=$index",
"--driver-java-options", s"-Dderby.system.home=${wareHousePath.getCanonicalPath} " +
// TODO SPARK-37159 Consider to remove the following
// JVM module options once the Spark 3.2 line is EOL.
JavaModuleOptions.defaultModuleOptions(),
tempPyFile.getCanonicalPath)
runSparkSubmit(args, Some(sparkHome.getCanonicalPath), isSparkTesting = false)
}
tempPyFile.delete()
}
test("backward compatibility") {
val args = Seq(
"--class", PROCESS_TABLES.getClass.getName.stripSuffix("$"),
"--name", "HiveExternalCatalog backward compatibility test",
"--master", "local[2]",
"--conf", s"${UI_ENABLED.key}=false",
"--conf", s"${MASTER_REST_SERVER_ENABLED.key}=false",
"--conf", s"${HiveUtils.HIVE_METASTORE_VERSION.key}=$hiveVersion",
"--conf", s"${HiveUtils.HIVE_METASTORE_JARS.key}=maven",
"--conf", s"${WAREHOUSE_PATH.key}=${wareHousePath.getCanonicalPath}",
"--driver-java-options", s"-Dderby.system.home=${wareHousePath.getCanonicalPath}",
unusedJar.toString)
if (PROCESS_TABLES.testingVersions.nonEmpty) runSparkSubmit(args)
}
}
object PROCESS_TABLES extends QueryTest with SQLTestUtils {
val releaseMirror = sys.env.getOrElse("SPARK_RELEASE_MIRROR",
"https://dist.apache.org/repos/dist/release")
// Tests the latest version of every release line.
val testingVersions: Seq[String] = {
import scala.io.Source
val versions: Seq[String] = try Utils.tryWithResource(
Source.fromURL(s"$releaseMirror/spark")) { source =>
source.mkString
.split("\\n")
.filter(_.contains("""<a href="spark-"""))
.filterNot(_.contains("preview"))
.map("""<a href="spark-(\\d.\\d.\\d)/">""".r.findFirstMatchIn(_).get.group(1))
.filter(_ < org.apache.spark.SPARK_VERSION)
} catch {
// Do not throw exception during object initialization.
case NonFatal(_) => Nil
}
versions
.filter(v => v.startsWith("3") || !TestUtils.isPythonVersionAtLeast38())
.filter(v => v.startsWith("3") || !SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9))
.filter(v => !((v.startsWith("3.0") || v.startsWith("3.1")) &&
SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_17)))
}
protected var spark: SparkSession = _
def main(args: Array[String]): Unit = {
val session = SparkSession.builder()
.enableHiveSupport()
.getOrCreate()
spark = session
import session.implicits._
testingVersions.indices.foreach { index =>
Seq(
s"data_source_tbl_$index",
s"hive_compatible_data_source_tbl_$index",
s"external_data_source_tbl_$index",
s"hive_compatible_external_data_source_tbl_$index",
s"external_table_without_schema_$index").foreach { tbl =>
val tableMeta = spark.sharedState.externalCatalog.getTable("default", tbl)
// make sure we can insert and query these tables.
session.sql(s"insert into $tbl select 2")
checkAnswer(session.sql(s"select * from $tbl"), Row(1) :: Row(2) :: Nil)
checkAnswer(session.sql(s"select i from $tbl where i > 1"), Row(2))
// make sure we can rename table.
val newName = tbl + "_renamed"
sql(s"ALTER TABLE $tbl RENAME TO $newName")
val readBack = spark.sharedState.externalCatalog.getTable("default", newName)
val actualTableLocation = readBack.storage.locationUri.get.getPath
val expectedLocation = if (tableMeta.tableType == CatalogTableType.EXTERNAL) {
tableMeta.storage.locationUri.get.getPath
} else {
spark.sessionState.catalog.defaultTablePath(TableIdentifier(newName, None)).getPath
}
assert(actualTableLocation == expectedLocation)
// make sure we can alter table location.
withTempDir { dir =>
val path = dir.toURI.toString.stripSuffix("/")
sql(s"ALTER TABLE ${tbl}_renamed SET LOCATION '$path'")
val readBack = spark.sharedState.externalCatalog.getTable("default", tbl + "_renamed")
val actualTableLocation = readBack.storage.locationUri.get.getPath
val expected = dir.toURI.getPath.stripSuffix("/")
assert(actualTableLocation == expected)
}
}
// test permanent view
checkAnswer(sql(s"select i from v_$index"), Row(1))
// SPARK-22356: overlapped columns between data and partition schema in data source tables
val tbl_with_col_overlap = s"tbl_with_col_overlap_$index"
assert(spark.table(tbl_with_col_overlap).columns === Array("i", "p", "j"))
checkAnswer(spark.table(tbl_with_col_overlap), Row(1, 1, 1) :: Row(1, 1, 1) :: Nil)
assert(sql("desc " + tbl_with_col_overlap).select("col_name")
.as[String].collect().mkString(",").contains("i,p,j"))
}
}
}
object HiveExternalCatalogVersionsSuite {
private val SPARK_TEST_CACHE_DIR_SYSTEM_PROPERTY = "spark.test.cache-dir"
}
| nchammas/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala | Scala | apache-2.0 | 14,823 |
package mdtags
import org.specs2.mutable.Specification
class MarkDownSpec extends Specification {
"Empty MarkDown" should {
"return an empty string" in {
MarkDown().toMarkdown() must equalTo("")
}
}
"MarkDown" should {
"have a h1 title and return the appropriate MarkDown" in {
MarkDown(
h1("Hello MdTags")
).toMarkdown() must equalTo("# Hello MdTags")
}
"have a h2 title and return the appropriate MarkDown" in {
MarkDown(
h2("So we meet again, MdTags...")
).toMarkdown() must equalTo("## So we meet again, MdTags...")
}
"have a h1 and h2 title and return the appropriate MarkDown" in {
MarkDown(
h1("This example uses varargs"),
h2("See the method MarkDown#apply() for as an example")
).toMarkdown() must equalTo("""# This example uses varargs
|
|## See the method MarkDown#apply() for as an example""".stripMargin)
}
"have a text element and return the text" in {
MarkDown(
"This is just plaintext, without special formatting."
).toMarkdown() must equalTo("This is just plaintext, without special formatting.")
}
"have titles and texts, etc combined and return the appropriate MarkDown" in {
MarkDown(
h1("The first title"),
h2("The first subtitle"),
"This is the first paragraph.",
h2("This is the second subtitle"),
"This is the second paragraph.",
h3("Level 3 Title"),
h4("Level 4 Title"),
h5("Level 5 Title"),
h6("Level 6 Title"),
mdtags.link("http://www.google.com/", "Google"),
mdtags.link("http://www.gmail.com/"),
image("my-image.png", "My image")
).toMarkdown() must equalTo("""# The first title
|
|## The first subtitle
|
|This is the first paragraph.
|
|## This is the second subtitle
|
|This is the second paragraph.
|
|### Level 3 Title
|
|#### Level 4 Title
|
|##### Level 5 Title
|
|###### Level 6 Title
|
|[Google](http://www.google.com/)
|
|[http://www.gmail.com/](http://www.gmail.com/)
|
|""".stripMargin)
}
"MarkDown can contain code elements and return the appropriate MarkDown" in {
MarkDown(
code(
syntax = "java",
code = """class Main {
| public static void main(String[] args) {
| System.out.println(args.length);
| }
|}""".stripMargin)
).toMarkdown() must equalTo(
"""```java
|class Main {
| public static void main(String[] args) {
| System.out.println(args.length);
| }
|}
|```""".stripMargin)
}
}
}
| timo-schmid/mdtags | src/test/scala/mdtags/MarkDownSpec.scala | Scala | apache-2.0 | 3,610 |
abstract class Shape {
def centerPoint:(Double, Double)
}
class Rectangle(val x: Double, val y:Double, val width:Double, val height:Double) extends Shape {
def centerPoint = (x + width / 2, y + height / 2)
}
class Circle(override val centerPoint: (Double, Double), val radius:Double) extends Shape {
}
println(new Rectangle(4, 4, 8, 6).centerPoint)
println(new Circle((5, 7), 8).centerPoint)
| Gerhut/scala-for-the-impatient | Chapter8/6.scala | Scala | unlicense | 400 |
package latis.ops.filter
import latis.dm.Scalar
import latis.dm.Tuple
import latis.ops.OperationFactory
/*
* Removes all data points of a given name from the dataset
* that differ more than 'maxDelta' in value from their preceding points.
*/
class MaxDeltaFilter(val name: String, val maxDelta: Double) extends Filter {
var currentValue: Double = Double.NaN
/*
* Apply operation to a Scalar
*/
override def applyToScalar(scalar: Scalar): Option[Scalar] = {
if (currentValue.isNaN && scalar.hasName(name)) {
currentValue = scalar.getNumberData.doubleValue
Some(scalar) //Assumes the first data point is always good data, which may not always be true
} else {
if (scalar.hasName(name)) {
val nextValue = scalar.getNumberData.doubleValue
val delta = scala.math.abs(currentValue - nextValue)
if (delta > maxDelta) {
None //Delta is greater than maxDelta, remove
} else {
currentValue = nextValue
Some(scalar) //Acceptable delta, keep
}
} else { Some(scalar) }
}
}
/*
* Apply operation to a Tuple
*/
override def applyToTuple(tuple: Tuple): Option[Tuple] = {
val x = tuple.getVariables.map(applyToVariable(_))
x.find(_.isEmpty) match {
case Some(_) => None //Found an unacceptable delta, exclude the entire tuple
case None => Some(Tuple(x.map(_.get), tuple.getMetadata))
}
}
}
object MaxDeltaFilter extends OperationFactory {
override def apply(args: Seq[String]): MaxDeltaFilter = args match {
case Seq(name, value) => new MaxDeltaFilter(name, value.toDouble)
//TODO: error handling
}
def apply(name: String, maxDelta: Double): MaxDeltaFilter = new MaxDeltaFilter(name, maxDelta)
}
| dlindhol/LaTiS | src/main/scala/latis/ops/filter/MaxDeltaFilter.scala | Scala | epl-1.0 | 1,792 |
package feh.tec.nxt
import feh.tec.nxt.LegoRobotRubik.{Motors, remotely}
import feh.tec.rubik.RubikCube._
import feh.tec.rubik.RubikCubeImage
import feh.tec.rubik.RubikCubeImage.{Side, SidesMap, ColorMap}
import feh.util._
import lejos.nxt.LightSensor
object RubikCubeImageNXT{
def readCubes[T, C](gatherColor: => T)
(implicit motors: Motors,
ls: LightSensor,
rd: RobotDescriptor,
cMap: ColorMap[T, C],
sMap: SidesMap,
sName: WithSideName[C] ): Map[CubeId, CubeWithOrientation[C]] =
RubikCubeImage.readCubes(readImage(gatherColor))
def readImage[T, C](gatherColor: => T)
(implicit motors: Motors,
ls: LightSensor,
rd: RobotDescriptor,
cmap: ColorMap[T, C] ): RubikCubeImage[C] = readImage(readSomeImage(gatherColor))
def readImage[T, C](img: RubikCubeImage[T])
(implicit motors: Motors,
ls: LightSensor,
rd: RobotDescriptor,
cmap: ColorMap[T, C] ): RubikCubeImage[C] = img.map(cmap.colorFor)
def readSomeImage[T](gatherColor: => T)
(implicit motors: Motors,
ls: LightSensor,
rd: RobotDescriptor): RubikCubeImage[T] =
{
import rd._
def flip(n: Int) = {
// ls.setFloodlight(false)
remotely.flipCube(n)
// ls.setFloodlight(true)
}
// Up
val up = gatherSideColors(gatherColor)
// Back, Down, Front
val sides1 = for(i <- 2 to 4) yield {
flip(1)
gatherSideColors(gatherColor)
}
flip(1)
remotely.rotate.clockwise90()
flip(1)
val left = gatherSideColors(gatherColor)
flip(2)
val right = gatherSideColors(gatherColor)
flip(1)
remotely.rotate.counterclockwise90()
motors.lsm.get.rotateTo(0)
RubikCubeImage(up +: (sides1 ++ Seq(left, right)))
}
def gatherSideColors[T](gatherColor: => T)
(implicit motors: Motors, rcd: ReadColorsSequenceDescriptor): Side[T] =
{
val center = {motors.lsm.get.rotateTo(rcd.centerLightAbsAngle); Thread.sleep(100); gatherColor}
def rotate(ar: Int, al: Int) = {
motors.lsm.get.rotateTo(al, true)
motors.crm.get.rotate(ar, false)
}
motors.crm.get.rotate(rcd.rotAngleDelta0)
val rest = Y[(Int, List[(Int, Int)]), Map[(Int, Int), T]](
rec => {
case (_, Nil) => Map.empty
case (i, p :: ps) =>
val g = gatherColor
(rotate _).tupled( if (i % 2 == 1) rcd.oddAngles else rcd.evenAngles )
rec(i+1, ps) + (p -> g)
}
)(1 -> rcd.indices.tail.toList)
motors.crm.get.rotate(rcd.finalRotation, false)
Side(rest + (rcd.indices.head -> center))
}
}
| fehu/int-sis--Rubik | nxt/src/main/scala/feh/tec/nxt/RubikCubeImageNXT.scala | Scala | mit | 2,961 |
package net.xylophones.planetoid.game.logic
import net.xylophones.planetoid.game.model.GameEvent._
import net.xylophones.planetoid.game.model.{GameEvent, PlayerInput, GamePhysics, GameModelUpdateResult}
class RoundCompleteCountdownUpdater(currentTimeSource: CurrentTimeSource) extends GameModelResultUpdater {
override def update(initialResults: GameModelUpdateResult, physics: GamePhysics, playerInputs: IndexedSeq[PlayerInput]): GameModelUpdateResult = {
val model = initialResults.model
if (model.roundEndTimer.isDefined) {
val timer = model.roundEndTimer.get
val now = currentTimeSource.currentTime()
val msSinceLastUpdate = now - timer.lastTimeStampMs
val newRemainingTime = Math.max(timer.remainingTimeMs - msSinceLastUpdate, 0)
val newTimer = timer.copy(lastTimeStampMs = now, remainingTimeMs = newRemainingTime)
val events: Set[GameEvent] = if (newTimer.isComplete) Set(GameEvent.RoundComplete)
else Set()
val newModel = model.copy(roundEndTimer = Some(newTimer))
new GameModelUpdateResult(newModel, initialResults.events ++ events)
} else {
initialResults
}
}
}
| wjsrobertson/planetoid3d | game/src/main/scala/net/xylophones/planetoid/game/logic/RoundCompleteCountdownUpdater.scala | Scala | apache-2.0 | 1,187 |
package org.scaladebugger.api.profiles.traits.info
import com.sun.jdi.{ThreadGroupReference, ThreadReference}
import scala.util.Try
/**
* Represents the interface for thread-based interaction.
*/
trait ThreadGroupInfo extends ObjectInfo with CommonInfo {
/**
* Converts the current profile instance to a representation of
* low-level Java instead of a higher-level abstraction.
*
* @return The profile instance providing an implementation corresponding
* to Java
*/
override def toJavaInfo: ThreadGroupInfo
/**
* Returns the JDI representation this profile instance wraps.
*
* @return The JDI instance
*/
override def toJdiInstance: ThreadGroupReference
/**
* Represents the name of the thread group.
*
* @return The thread group name as a string
*/
def name: String
/**
* Represents the parent of this thread group.
*
* @return Some thread group if a parent exists, otherwise None if top-level
*/
def parent: Option[ThreadGroupInfo]
/**
* Resumes all threads in the thread group and subgroups. This is not an
* atomic operation, so new threads added to a group will be unaffected.
*/
def resume(): Unit
/**
* Suspends all threads in the thread group and subgroups. This is not an
* atomic operation, so new threads added to a group will be unaffected.
*/
def suspend(): Unit
/**
* Returns all live (started, but not stopped) threads in this thread group.
* Does not include any threads in subgroups.
*
* @return The collection of threads
*/
def threads: Seq[ThreadInfo]
/**
* Returns all live thread groups in this thread group. Only immediate
* subgroups to this group are returned.
*
* @return The collection of thread groups
*/
def threadGroups: Seq[ThreadGroupInfo]
/**
* Returns a string presenting a better human-readable description of
* the JDI instance.
*
* @return The human-readable description
*/
override def toPrettyString: String = {
s"Thread Group $name (0x$uniqueIdHexString)"
}
}
| ensime/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/profiles/traits/info/ThreadGroupInfo.scala | Scala | apache-2.0 | 2,080 |
package amora.backend.indexer
import org.junit.Test
import amora.converter.protocol.Artifact
import amora.converter.protocol.Project
class ScalaRefTest extends RestApiTest {
@Test
def return_type_at_members() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
val a: [[Int]] = 0
var b: [[Int]] = 0
def c: [[Int]] = 0
lazy val d: [[Int]] = 0
}
""")
}
@Test
def return_type_at_nested_members() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def x: [[Int]] = {
val a: [[Int]] = {
val a: [[Int]] = 0
[[a]]
}
var b: [[Int]] = {
var a: [[Int]] = 0
[[a]]
}
def c: [[Int]] = {
def a: [[Int]] = 0
[[a]]
}
[[a]]
}
}
""")
}
@Test
def return_type_at_nested_lazy_vals() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
lazy val a: [[Int]] = {
lazy val a: [[Int]] = {
lazy val a: [[Int]] = 0
[[a]]
}
[[a]]
}
}
""")
}
@Test
def member_ref() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
val [[!Int]]a = 0
def [[!Int]]b = [[a]]
var [[!Int]]c = [[b]]
lazy val [[!Int]]d = [[c]]
}
""")
}
@Test
def classOf_ref() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
val [[!Class]]a = [[classOf]][ /* Int */ [[Int]] ]
}
""")
}
@Test
def refs_of_imports() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
import [[scala]].[[collection]].[[mutable]].[[Buffer]]
import [[scala]].[[collection]].[[mutable]].[[ListBuffer]]
class [[!AnyRef]]X {
[[Buffer]]
[[ListBuffer]]
}
""")
}
@Test
def refs_of_rename_imports() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
import [[scala]].[[collection]].[[mutable]].{ [[Buffer]] ⇒ [[B]], [[ListBuffer]] }
class [[!AnyRef]]X {
[[B]]
[[ListBuffer]]
}
""")
}
@Test
def refs_of_package_import() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
import [[scala]].[[collection]].[[mutable]]
class [[!AnyRef]]X {
val lb: [[mutable]].[[ListBuffer]] [ [[Int]] ] = null
}
""")
}
@Test
def self_ref_with_fully_qualified_name() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait [[!AnyRef]]X {
[[!X]]self: [[scala]].[[collection]].[[mutable]].[[AbstractSet]][ [[java]].[[io]].[[File]] ] ⇒
}
""")
}
@Test
def self_ref_reference_has_correct_position() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait [[!AnyRef]]X { [[!X]]self ⇒
}
""")
}
@Test
def self_ref_references_owner() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
[a ref:] ref:refToDecl [decl:name ?name ; decl:posStart ?start ; decl:posEnd ?end] ; ref:owner [decl:name "self"] .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait [[X]] { self ⇒
}
""")
}
@Test
def self_ref_usage() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait [[!AnyRef]]X { [[!X]]self ⇒
def [[!X]]x = [[self]]
}
""")
}
@Test
def self_ref_and_this_ref_usage() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait [[!AnyRef]]X { [[!X]]self ⇒
def [[!X]]x1 = [[self]]
def [[!X]]x2 = [[this]]
}
""")
}
@Test
def self_ref_usages_reference_self_ref_decl() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
[a ref:] ref:refToDecl [decl:name ?name ; decl:posStart ?start ; decl:posEnd ?end] ; ref:name "self" .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait X { [[self]] ⇒
def x = self
}
""")
}
@Test
def self_ref_usages_have_correct_positions() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait [[!AnyRef]]X { [[!X]]longSelfRefName ⇒
def [[!X]]x = [[longSelfRefName]]
}
""")
}
@Test
def compound_type_ref_in_self_ref() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait [[!AnyRef]]X {
[[!X]]self: [[scala]].[[collection]].[[SeqLike]] [ [[List]] [ [[Int]] ], [[List]] [ [[Int]] ] ]
with [[scala]].[[collection]].[[IterableLike]] [ [[List]] [ [[Int]] ], [[List]] [ [[Int]] ] ]
with [[scala]].[[collection]].[[GenSeqLike]] [ [[List]] [ [[Int]] ], [[List]] [ [[Int]] ] ] ⇒
}
""")
}
@Test
def self_ref_with_parent() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait [[!AnyRef]]X {
[[!X]]selfRef: [[scala]].[[collection]].[[mutable]].[[AbstractMap]] [ [[List]] [ [[Map]] [ [[Int]], [[Set]] [ [[Int]] ] ] ], [[Map]] [ [[Int]], [[String]] ] ] ⇒
}
""")
}
@Test
def refs_in_if_expr() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
val [[!Boolean]]b1 = true
val [[!Boolean]]b2 = true
val [[!Boolean]]b3 = true
def [[!Boolean]]f = if ([[b1]]) [[b2]] else [[b3]]
}
""")
}
@Test
def refs_of_single_method() = {
indexRegionData("""
prefix def:<http://amora.center/kb/amora/Schema/Def/>
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
?def def:jvmSignature "(IF)I" .
[a ref:] ref:owner ?def ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
def f(i: Int) = i
def [[!Int]]f(i: Int, s: Float) = [[i]]
}
""")
}
@Test
def refs_of_parameter() = {
indexRegionData("""
prefix param:<http://amora.center/kb/amora/Flag/param>
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
[a ref:] ref:refToDecl [decl:flag param:] ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
def f(i: Int) = {
[[i]]
}
}
""")
}
@Test
def refs_of_local_value_with_same_name_as_parameter() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
[a ref:] ref:refToDecl [decl:flag "param"] ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
def f(i: Int) = {
val i = 0
// i doesn't point to the parameter
i
}
}
""")
}
@Test
def refs_to_local_value_when_parameter_of_same_name_exists() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix v:<http://amora.center/kb/amora/Schema/Val/>
select * where {
?val a v: .
FILTER NOT EXISTS {
?val v:flag "param" .
}
[a ref:] ref:refToDecl ?val ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
def f(i: Int) = {
val i = 0
[[i]]
}
}
""")
}
@Test
def refs_of_type_parameter() = {
indexRegionData("""
prefix tparam:<http://amora.center/kb/amora/Flag/tparam>
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
[a ref:] ref:refToDecl [decl:flag tparam:] ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait X[A] {
def f[B](a: [[A]], b: [[B]]): [[A]]
}
""")
}
@Test
def refs_of_type_parameter_without_shadowed_type_parameter_refs() = {
indexRegionData("""
prefix tparam:<http://amora.center/kb/amora/Flag/tparam>
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
prefix t:<http://amora.center/kb/amora/Schema/Trait/>
select * where {
# find type parameter
?tparam decl:owner [a t:] ; decl:flag tparam: .
# find references of type parameter
[a ref:] ref:refToDecl ?tparam ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait X[A] {
def f(a: [[A]], b: [[A]]): [[A]]
def f[A](a: A): A
}
""")
}
@Test
def refs_of_shadowed_type_parameter() = {
indexRegionData("""
prefix tparam:<http://amora.center/kb/amora/Flag/tparam>
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
prefix def:<http://amora.center/kb/amora/Schema/Def/>
select * where {
# find type parameter
?tparam decl:owner [a def:; def:jvmSignature "(Ljava/lang/Object;)Ljava/lang/Object;"] ; decl:flag tparam: .
# find references of type parameter
[a ref:] ref:refToDecl ?tparam ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait X[A] {
def f(a: A, b: A): A
def f[A](a: [[A]]): [[A]]
}
""")
}
@Test
def refs_of_type_parameter_when_parameter_of_same_name_exists() = {
indexRegionData("""
prefix tparam:<http://amora.center/kb/amora/Flag/tparam>
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
prefix def:<http://amora.center/kb/amora/Schema/Def/>
select * where {
# find type parameter
?tparam decl:owner [a def:] ; decl:flag tparam: .
# find references of type parameter
[a ref:] ref:refToDecl ?tparam ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
def [[!A]]f[A](A: [[A]]) = {
A
}
}
""")
}
@Test
def refs_of_type_parameter_when_local_val_decl_of_same_name_exists() = {
indexRegionData("""
prefix tparam:<http://amora.center/kb/amora/Flag/tparam>
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
prefix def:<http://amora.center/kb/amora/Schema/Def/>
select * where {
# find type parameter
?tparam decl:owner [a def:] ; decl:flag tparam: .
# find references of type parameter
[a ref:] ref:refToDecl ?tparam ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
def f[A](A: [[A]]) = {
val A = 0
A
}
}
""")
}
@Test
def multiple_calls_to_def() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Int]]f(i: [[Int]]) = 0
[[f]](0)
[[f]](0)
}
""")
}
@Test
def multiple_blocks_with_same_name() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Int]]f(i: [[Int]]) = 0
[[f]]({val [[!Int]]i = 0; [[i]]})
[[f]]({val [[!Int]]i = 0; [[i]]})
}
""")
}
@Test
def explicit_apply_method() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
val [[!Option]]a = [[Option]].[[!Int]][[apply]](1)
}
""")
}
@Test
def implicit_apply_method() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
val [[!Option]]a = [[!apply]][[!Int]][[Option]](1)
}
""")
}
def class_annotation() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
@[[Ann]]([[!apply]][[!Class]][[Array]]([[classOf]] [ [[X]] ]))
class [[!AnyRef]]X
class Ann(arr: [[Array]][ [[Class]] [_] ]) extends [[scala]].[[annotation]].[[StaticAnnotation]]
""")
}
def multiple_annotations() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
@[[Ann1]]([[!apply]][[!Class]][[Array]]([[classOf]] [ [[X]] ]))
@[[Ann2]]
@[[Ann1]]([[!apply]][[!Class]][[Array]]([[classOf]] [ [[X]] ]))
class [[!AnyRef]]X
class Ann1(arr: [[Array]][ [[Class]] [_] ]) extends [[scala]].[[annotation]].[[StaticAnnotation]]
class Ann2 extends [[scala]].[[annotation]].[[StaticAnnotation]]
""")
}
@Test
def refs_of_lambda_decl() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Function1]]f([[!Function1]]i: [[Int]] ⇒ [[Int]]) = [[i]]
}
""")
}
@Test
def refs_of_function_decl() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Function1]]f(i: [[Function1]][ [[Int]], [[Int]] ]) = [[i]]
}
""")
}
@Test
def multiple_lambda_refs() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Function1]]f([[!Function1]]i: [[Int]] ⇒ [[Int]]) = [[i]]
[[f]]([[!Int]]v ⇒ [[v]])
[[f]]([[!Int]]value ⇒ [[value]])
}
""")
}
@Test
def ref_with_qualifier() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"f1.scala" → """
package a.b
import [[d]].[[e]]
class [[!AnyRef]]X {
val f: [[e]].[[Y]] = null
}
""",
"f2.scala" → """
package d.e
class [[!AnyRef]]Y
""")
}
@Test
def ref_to_val_from_within_another_val() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
[a ref:] ref:name "xs" ; ref:refToDecl [decl:name ?name ; decl:posStart ?start ; decl:posEnd ?end] .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
val [[xs]] = 0
val ys = xs
}
""")
}
@Test
def this_ref_points_to_class() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
[a ref:] ref:name "this" ; ref:refToDecl [decl:name ?name ; decl:posStart ?start ; decl:posEnd ?end] .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[X]] {
val value = this
}
""")
}
@Test
def this_ref_has_correct_position() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name "this" ; ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
val value = [[this]]
}
""")
}
@Test
def type_alias_can_be_referenced() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
[a ref:] ref:name "Type" ; ref:refToDecl [decl:name ?name ; decl:posStart ?start ; decl:posEnd ?end] .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
trait X {
type [[Type]] [A, B] = Map[A, B]
def f: Type[Int, Int]
}
""")
}
@Test
def type_alias_parameter_can_be_referenced() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
prefix decl:<http://amora.center/kb/amora/Schema/Decl/>
select * where {
values ?vals { "A" "B" }
[a ref:] ref:name ?vals ; ref:refToDecl [decl:name ?name ; decl:posStart ?start ; decl:posEnd ?end] .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
type Type [ [[A]] , [[B]] ] = Map[A, B]
}
""")
}
@Test
def string_interpolation() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
val [[!Int]]x1 = 0
val [[!Int]]x2 = 0
val [[!String]]y = [[s]]"$[[x1]]$[[x2]]"
}
""")
}
@Test
def scope_nested_within_scope() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Int]]f(h: [[Option]] [ [[Int]] ]) = [[h]] match {
case [[Some]](i) ⇒ [[i]]
}
}
""")
}
@Test
def ref_as_owner_of_scope() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Int]]a([[!Function1]]f: [[Int]] ⇒ [[Int]]) = 0
}
class [[!AnyRef]]Y {
new [[X]]().[[a]] {
case i ⇒ [[i]]
}
}
""")
}
@Test
def repeated_args() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Int]]f(i: [[Int]]*) = 0
}
""")
}
@Test
def explicit_type_ascription() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
val x: [[Int]] = 0
val [[!Int]]y = 0
}
""")
}
@Test
def ctor_with_parameter() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X([[!Int]]i: [[Int]], [[!Int]]j: [[Int]])
""")
}
@Test
def by_name_parameter() = {
indexRegionData("""
prefix ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a ref:] ref:name ?name ; ref:posStart ?start ; ref:posEnd ?end .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class [[!AnyRef]]X {
def [[!Int]]meth([[!Function0]]f: ⇒ [[Int]]) = 0
}
""")
}
@Test
def the_owner_of_a_ref_can_be_a_ref() = {
indexRegionData("""
prefix Ref:<http://amora.center/kb/amora/Schema/Ref/>
select * where {
[a Ref:] Ref:name "i" ; Ref:owner [Ref:name ?name ; Ref:posStart ?start ; Ref:posEnd ?end] .
}
""",
Artifact(Project("p"), "o", "n", "v1"),
"x.scala" → """
class X {
def f(i: Int) = [[println]](i)
}
""")
}
}
| sschaef/tooling-research | backend/src/test/scala/amora/backend/indexer/ScalaRefTest.scala | Scala | mit | 27,049 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IndexSink, StubElement, StubInputStream, StubOutputStream}
import com.intellij.util.io.StringRef
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScFunctionDeclaration, ScFunctionDefinition}
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScFunctionStubImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.index.ScalaIndexKeys.METHOD_NAME_KEY
/**
* User: Alexander Podkhalyuzin
* Date: 14.10.2008
*/
abstract class ScFunctionElementType(debugName: String) extends ScStubElementType[ScFunctionStub, ScFunction](debugName) {
override def serialize(stub: ScFunctionStub, dataStream: StubOutputStream): Unit = {
dataStream.writeName(stub.getName)
dataStream.writeBoolean(stub.isDeclaration)
dataStream.writeNames(stub.annotations)
dataStream.writeOptionName(stub.typeText)
dataStream.writeOptionName(stub.bodyText)
dataStream.writeBoolean(stub.hasAssign)
dataStream.writeBoolean(stub.isImplicit)
dataStream.writeBoolean(stub.isLocal)
}
override def deserialize(dataStream: StubInputStream, parentStub: StubElement[_ <: PsiElement]): ScFunctionStub =
new ScFunctionStubImpl(parentStub, this,
nameRef = dataStream.readName,
isDeclaration = dataStream.readBoolean,
annotationsRefs = dataStream.readNames,
typeTextRef = dataStream.readOptionName,
bodyTextRef = dataStream.readOptionName,
hasAssign = dataStream.readBoolean,
isImplicit = dataStream.readBoolean,
isLocal = dataStream.readBoolean)
override def createStub(function: ScFunction, parentStub: StubElement[_ <: PsiElement]): ScFunctionStub = {
val maybeFunction = Option(function)
val returnTypeText = maybeFunction.flatMap {
_.returnTypeElement
}.map {
_.getText
}
val maybeDefinition = maybeFunction.collect {
case definition: ScFunctionDefinition => definition
}
val bodyText = returnTypeText match {
case Some(_) => None
case None =>
maybeDefinition.flatMap {
_.body
}.map {
_.getText
}
}
val hasAssign = maybeDefinition.exists {
_.hasAssign
}
val annotations = function.annotations.map {
_.annotationExpr.constr.typeElement.getText
}.map { text =>
text.substring(text.lastIndexOf('.') + 1)
}.toArray
new ScFunctionStubImpl(parentStub, this,
nameRef = StringRef.fromString(function.name),
isDeclaration = function.isInstanceOf[ScFunctionDeclaration],
annotationsRefs = annotations.asReferences,
typeTextRef = returnTypeText.asReference,
bodyTextRef = bodyText.asReference,
hasAssign = hasAssign,
isImplicit = function.hasModifierProperty("implicit"),
isLocal = function.containingClass == null)
}
override def indexStub(stub: ScFunctionStub, sink: IndexSink): Unit = {
this.indexStub(Array(stub.getName), sink, METHOD_NAME_KEY)
if (stub.isImplicit) {
this.indexImplicit(sink)
}
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScFunctionElementType.scala | Scala | apache-2.0 | 3,155 |
package com.github.mdr.mash.ns.view
import com.github.mdr.mash.classes.{ AbstractObjectWrapper, Field, MashClass, NewStaticMethod }
import com.github.mdr.mash.inference.Type
import com.github.mdr.mash.ns.core.BooleanClass
import com.github.mdr.mash.runtime.{ MashBoolean, MashObject, MashValue }
import scala.collection.immutable.ListMap
object ViewClass extends MashClass("view.View") {
def unpackView(value: MashValue): MashValue = value match {
case obj@MashObject(_, Some(ViewClass)) ⇒ obj.get(ViewClass.Fields.Data).map(unpackView).getOrElse(obj)
case _ ⇒ value
}
object Fields {
val Data = Field("data", Some("Data to display"), Type.Any)
val DisableCustomViews = Field("disableCustomViews", Some("If true, disable custom views for this data"), BooleanClass)
val UseBrowser = Field("useBrowser", Some("If true, always use the object browser where possible"), BooleanClass)
val UseTree = Field("useTree", Some("If true, always use the tree object browser where possible"), BooleanClass)
val Print = Field("print", Some("If true, print"), BooleanClass)
}
import Fields._
override lazy val fields = Seq(Data, DisableCustomViews, UseBrowser, UseTree)
case class Wrapper(x: MashValue) extends AbstractObjectWrapper(x) {
def disableCustomViews: Boolean = getBooleanField(DisableCustomViews)
def useBrowser: Boolean = getBooleanField(UseBrowser)
def useTree: Boolean = getBooleanField(UseTree)
def print: Boolean = getBooleanField(Print)
def data = getField(Data)
}
def build(data: MashValue,
disableCustomViews: Boolean = false,
useBrowser: Boolean = false,
useTree: Boolean = false,
print: Boolean = false) =
data match {
case obj: MashObject if obj.classOpt == Some(ViewClass) ⇒
val wrapper = Wrapper(obj)
MashObject.of(ListMap(
Data -> wrapper.data,
DisableCustomViews -> MashBoolean(disableCustomViews || wrapper.disableCustomViews),
UseBrowser -> MashBoolean(useBrowser || wrapper.useBrowser),
UseTree -> MashBoolean(useTree || wrapper.useTree),
Print -> MashBoolean(print || wrapper.print)), ViewClass)
case value: MashValue ⇒
MashObject.of(ListMap(
Data -> data,
DisableCustomViews -> MashBoolean(disableCustomViews),
UseBrowser -> MashBoolean(useBrowser),
UseTree -> MashBoolean(useTree),
Print -> MashBoolean(print)), ViewClass)
}
override val staticMethods = Seq(NewStaticMethod(this))
override def summaryOpt = Some("Instructions on how to display data in Mash's output system")
} | mdr/mash | src/main/scala/com/github/mdr/mash/ns/view/ViewClass.scala | Scala | mit | 2,714 |
package io.hydrosphere.mist.api
trait MLMistJob extends ContextSupport
| KineticCookie/mist | mist-lib/src/main/scala/io/hydrosphere/mist/api/MLMistJob.scala | Scala | apache-2.0 | 72 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.client
import java.io.{ByteArrayOutputStream, File, PrintStream, PrintWriter}
import java.net.URI
import org.apache.commons.lang3.{JavaVersion, SystemUtils}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.common.StatsSetupConst
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.sql.{AnalysisException, Row}
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.{DatabaseAlreadyExistsException, NoSuchDatabaseException, NoSuchPermanentFunctionException, PartitionsAlreadyExistException}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, EqualTo, Literal}
import org.apache.spark.sql.hive.HiveExternalCatalog
import org.apache.spark.sql.hive.test.TestHiveVersion
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.util.{MutableURLClassLoader, Utils}
class HiveClientSuite(version: String, allVersions: Seq[String])
extends HiveVersionSuite(version) {
private var versionSpark: TestHiveVersion = null
private val emptyDir = Utils.createTempDir().getCanonicalPath
/**
* Drops table `tableName` after calling `f`.
*/
protected def withTable(tableNames: String*)(f: => Unit): Unit = {
try f finally {
tableNames.foreach { name =>
versionSpark.sql(s"DROP TABLE IF EXISTS $name")
}
}
}
test("create client") {
client = null
System.gc() // Hack to avoid SEGV on some JVM versions.
val hadoopConf = new Configuration()
hadoopConf.set("test", "success")
client = buildClient(hadoopConf)
if (versionSpark != null) versionSpark.reset()
versionSpark = TestHiveVersion(client)
assert(versionSpark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog]
.client.version.fullVersion.startsWith(version))
}
def table(database: String, tableName: String,
tableType: CatalogTableType = CatalogTableType.MANAGED): CatalogTable = {
CatalogTable(
identifier = TableIdentifier(tableName, Some(database)),
tableType = tableType,
schema = new StructType().add("key", "int"),
storage = CatalogStorageFormat(
locationUri = None,
inputFormat = Some(classOf[TextInputFormat].getName),
outputFormat = Some(classOf[HiveIgnoreKeyTextOutputFormat[_, _]].getName),
serde = Some(classOf[LazySimpleSerDe].getName),
compressed = false,
properties = Map.empty
))
}
///////////////////////////////////////////////////////////////////////////
// Database related API
///////////////////////////////////////////////////////////////////////////
private val tempDatabasePath = Utils.createTempDir().toURI
test("createDatabase") {
val defaultDB = CatalogDatabase("default", "desc", new URI("loc"), Map())
client.createDatabase(defaultDB, ignoreIfExists = true)
val tempDB = CatalogDatabase(
"temporary", description = "test create", tempDatabasePath, Map())
client.createDatabase(tempDB, ignoreIfExists = true)
intercept[DatabaseAlreadyExistsException] {
client.createDatabase(tempDB, ignoreIfExists = false)
}
}
test("create/get/alter database should pick right user name as owner") {
if (version != "0.12") {
val currentUser = UserGroupInformation.getCurrentUser.getUserName
val ownerName = "SPARK_29425"
val db1 = "SPARK_29425_1"
val db2 = "SPARK_29425_2"
val ownerProps = Map("owner" -> ownerName)
// create database with owner
val dbWithOwner = CatalogDatabase(db1, "desc", Utils.createTempDir().toURI, ownerProps)
client.createDatabase(dbWithOwner, ignoreIfExists = true)
val getDbWithOwner = client.getDatabase(db1)
assert(getDbWithOwner.properties("owner") === ownerName)
// alter database without owner
client.alterDatabase(getDbWithOwner.copy(properties = Map()))
assert(client.getDatabase(db1).properties("owner") === "")
// create database without owner
val dbWithoutOwner = CatalogDatabase(db2, "desc", Utils.createTempDir().toURI, Map())
client.createDatabase(dbWithoutOwner, ignoreIfExists = true)
val getDbWithoutOwner = client.getDatabase(db2)
assert(getDbWithoutOwner.properties("owner") === currentUser)
// alter database with owner
client.alterDatabase(getDbWithoutOwner.copy(properties = ownerProps))
assert(client.getDatabase(db2).properties("owner") === ownerName)
}
}
test("createDatabase with null description") {
withTempDir { tmpDir =>
val dbWithNullDesc =
CatalogDatabase("dbWithNullDesc", description = null, tmpDir.toURI, Map())
client.createDatabase(dbWithNullDesc, ignoreIfExists = true)
assert(client.getDatabase("dbWithNullDesc").description == "")
}
}
test("setCurrentDatabase") {
client.setCurrentDatabase("default")
}
test("getDatabase") {
// No exception should be thrown
client.getDatabase("default")
intercept[NoSuchDatabaseException](client.getDatabase("nonexist"))
}
test("databaseExists") {
assert(client.databaseExists("default"))
assert(!client.databaseExists("nonexist"))
}
test("listDatabases") {
assert(client.listDatabases("defau.*") == Seq("default"))
}
test("alterDatabase") {
val database = client.getDatabase("temporary").copy(properties = Map("flag" -> "true"))
client.alterDatabase(database)
assert(client.getDatabase("temporary").properties.contains("flag"))
// test alter database location
val tempDatabasePath2 = Utils.createTempDir().toURI
// Hive support altering database location since HIVE-8472.
if (version == "3.0" || version == "3.1") {
client.alterDatabase(database.copy(locationUri = tempDatabasePath2))
val uriInCatalog = client.getDatabase("temporary").locationUri
assert("file" === uriInCatalog.getScheme)
assert(new Path(tempDatabasePath2.getPath).toUri.getPath === uriInCatalog.getPath,
"Failed to alter database location")
} else {
val e = intercept[AnalysisException] {
client.alterDatabase(database.copy(locationUri = tempDatabasePath2))
}
assert(e.getMessage.contains("does not support altering database location"))
}
}
test("dropDatabase") {
assert(client.databaseExists("temporary"))
client.createTable(table("temporary", tableName = "tbl"), ignoreIfExists = false)
val ex = intercept[AnalysisException] {
client.dropDatabase("temporary", ignoreIfNotExists = false, cascade = false)
assert(false, "dropDatabase should throw HiveException")
}
assert(ex.message.contains("Cannot drop a non-empty database: temporary."))
client.dropDatabase("temporary", ignoreIfNotExists = false, cascade = true)
assert(!client.databaseExists("temporary"))
}
///////////////////////////////////////////////////////////////////////////
// Table related API
///////////////////////////////////////////////////////////////////////////
test("createTable") {
client.createTable(table("default", tableName = "src"), ignoreIfExists = false)
client.createTable(table("default", tableName = "temporary"), ignoreIfExists = false)
client.createTable(table("default", tableName = "view1", tableType = CatalogTableType.VIEW),
ignoreIfExists = false)
}
test("loadTable") {
client.loadTable(
emptyDir,
tableName = "src",
replace = false,
isSrcLocal = false)
}
test("tableExists") {
// No exception should be thrown
assert(client.tableExists("default", "src"))
assert(!client.tableExists("default", "nonexistent"))
}
test("getTable") {
// No exception should be thrown
client.getTable("default", "src")
}
test("getTableOption") {
assert(client.getTableOption("default", "src").isDefined)
}
test("getTablesByName") {
assert(client.getTablesByName("default", Seq("src")).head
== client.getTableOption("default", "src").get)
}
test("getTablesByName when multiple tables") {
assert(client.getTablesByName("default", Seq("src", "temporary"))
.map(_.identifier.table) == Seq("src", "temporary"))
}
test("getTablesByName when some tables do not exist") {
assert(client.getTablesByName("default", Seq("src", "notexist"))
.map(_.identifier.table) == Seq("src"))
}
test("getTablesByName when contains invalid name") {
// scalastyle:off
val name = "砖"
// scalastyle:on
assert(client.getTablesByName("default", Seq("src", name))
.map(_.identifier.table) == Seq("src"))
}
test("getTablesByName when empty") {
assert(client.getTablesByName("default", Seq.empty).isEmpty)
}
test("alterTable(table: CatalogTable)") {
val newTable = client.getTable("default", "src").copy(properties = Map("changed" -> ""))
client.alterTable(newTable)
assert(client.getTable("default", "src").properties.contains("changed"))
}
test("alterTable - should respect the original catalog table's owner name") {
val ownerName = "SPARK-29405"
val originalTable = client.getTable("default", "src")
// mocking the owner is what we declared
val newTable = originalTable.copy(owner = ownerName)
client.alterTable(newTable)
assert(client.getTable("default", "src").owner === ownerName)
// mocking the owner is empty
val newTable2 = originalTable.copy(owner = "")
client.alterTable(newTable2)
assert(client.getTable("default", "src").owner === client.userName)
}
test("alterTable(dbName: String, tableName: String, table: CatalogTable)") {
val newTable = client.getTable("default", "src").copy(properties = Map("changedAgain" -> ""))
client.alterTable("default", "src", newTable)
assert(client.getTable("default", "src").properties.contains("changedAgain"))
}
test("alterTable - rename") {
val newTable = client.getTable("default", "src")
.copy(identifier = TableIdentifier("tgt", database = Some("default")))
assert(!client.tableExists("default", "tgt"))
client.alterTable("default", "src", newTable)
assert(client.tableExists("default", "tgt"))
assert(!client.tableExists("default", "src"))
}
test("alterTable - change database") {
val tempDB = CatalogDatabase(
"temporary", description = "test create", tempDatabasePath, Map())
client.createDatabase(tempDB, ignoreIfExists = true)
val newTable = client.getTable("default", "tgt")
.copy(identifier = TableIdentifier("tgt", database = Some("temporary")))
assert(!client.tableExists("temporary", "tgt"))
client.alterTable("default", "tgt", newTable)
assert(client.tableExists("temporary", "tgt"))
assert(!client.tableExists("default", "tgt"))
}
test("alterTable - change database and table names") {
val newTable = client.getTable("temporary", "tgt")
.copy(identifier = TableIdentifier("src", database = Some("default")))
assert(!client.tableExists("default", "src"))
client.alterTable("temporary", "tgt", newTable)
assert(client.tableExists("default", "src"))
assert(!client.tableExists("temporary", "tgt"))
}
test("listTables(database)") {
assert(client.listTables("default") === Seq("src", "temporary", "view1"))
}
test("listTables(database, pattern)") {
assert(client.listTables("default", pattern = "src") === Seq("src"))
assert(client.listTables("default", pattern = "nonexist").isEmpty)
}
test("listTablesByType(database, pattern, tableType)") {
assert(client.listTablesByType("default", pattern = "view1",
CatalogTableType.VIEW) === Seq("view1"))
assert(client.listTablesByType("default", pattern = "nonexist",
CatalogTableType.VIEW).isEmpty)
}
test("dropTable") {
val versionsWithoutPurge =
if (allVersions.contains("0.14")) allVersions.takeWhile(_ != "0.14") else Nil
// First try with the purge option set. This should fail if the version is < 0.14, in which
// case we check the version and try without it.
try {
client.dropTable("default", tableName = "temporary", ignoreIfNotExists = false,
purge = true)
assert(!versionsWithoutPurge.contains(version))
} catch {
case _: UnsupportedOperationException =>
assert(versionsWithoutPurge.contains(version))
client.dropTable("default", tableName = "temporary", ignoreIfNotExists = false,
purge = false)
}
// Drop table with type CatalogTableType.VIEW.
try {
client.dropTable("default", tableName = "view1", ignoreIfNotExists = false,
purge = true)
assert(!versionsWithoutPurge.contains(version))
} catch {
case _: UnsupportedOperationException =>
client.dropTable("default", tableName = "view1", ignoreIfNotExists = false,
purge = false)
}
assert(client.listTables("default") === Seq("src"))
}
///////////////////////////////////////////////////////////////////////////
// Partition related API
///////////////////////////////////////////////////////////////////////////
private val storageFormat = CatalogStorageFormat(
locationUri = None,
inputFormat = None,
outputFormat = None,
serde = None,
compressed = false,
properties = Map.empty)
test("sql create partitioned table") {
val table = CatalogTable(
identifier = TableIdentifier("src_part", Some("default")),
tableType = CatalogTableType.MANAGED,
schema = new StructType().add("value", "int").add("key1", "int").add("key2", "int"),
partitionColumnNames = Seq("key1", "key2"),
storage = CatalogStorageFormat(
locationUri = None,
inputFormat = Some(classOf[TextInputFormat].getName),
outputFormat = Some(classOf[HiveIgnoreKeyTextOutputFormat[_, _]].getName),
serde = Some(classOf[LazySimpleSerDe].getName),
compressed = false,
properties = Map.empty
))
client.createTable(table, ignoreIfExists = false)
}
val testPartitionCount = 2
test("createPartitions") {
val partitions = (1 to testPartitionCount).map { key2 =>
CatalogTablePartition(Map("key1" -> "1", "key2" -> key2.toString), storageFormat)
}
client.createPartitions(
"default", "src_part", partitions, ignoreIfExists = true)
}
test("getPartitionNames(catalogTable)") {
val partitionNames = (1 to testPartitionCount).map(key2 => s"key1=1/key2=$key2")
assert(partitionNames == client.getPartitionNames(client.getTable("default", "src_part")))
}
test("getPartitions(db, table, spec)") {
assert(testPartitionCount ==
client.getPartitions("default", "src_part", None).size)
}
test("getPartitionsByFilter") {
// Only one partition [1, 1] for key2 == 1
val result = client.getPartitionsByFilter(client.getTable("default", "src_part"),
Seq(EqualTo(AttributeReference("key2", IntegerType)(), Literal(1))))
// Hive 0.12 doesn't support getPartitionsByFilter, it ignores the filter condition.
if (version != "0.12") {
assert(result.size == 1)
} else {
assert(result.size == testPartitionCount)
}
}
test("getPartition") {
// No exception should be thrown
client.getPartition("default", "src_part", Map("key1" -> "1", "key2" -> "2"))
}
test("getPartitionOption(db: String, table: String, spec: TablePartitionSpec)") {
val partition = client.getPartitionOption(
"default", "src_part", Map("key1" -> "1", "key2" -> "2"))
assert(partition.isDefined)
}
test("getPartitionOption(table: CatalogTable, spec: TablePartitionSpec)") {
val partition = client.getPartitionOption(
client.getTable("default", "src_part"), Map("key1" -> "1", "key2" -> "2"))
assert(partition.isDefined)
}
test("getPartitions(db: String, table: String)") {
assert(testPartitionCount == client.getPartitions("default", "src_part", None).size)
}
test("loadPartition") {
val partSpec = new java.util.LinkedHashMap[String, String]
partSpec.put("key1", "1")
partSpec.put("key2", "2")
client.loadPartition(
emptyDir,
"default",
"src_part",
partSpec,
replace = false,
inheritTableSpecs = false,
isSrcLocal = false)
}
test("loadDynamicPartitions") {
val partSpec = new java.util.LinkedHashMap[String, String]
partSpec.put("key1", "1")
partSpec.put("key2", "") // Dynamic partition
client.loadDynamicPartitions(
emptyDir,
"default",
"src_part",
partSpec,
replace = false,
numDP = 1)
}
test("renamePartitions") {
val oldSpec = Map("key1" -> "1", "key2" -> "1")
val newSpec = Map("key1" -> "1", "key2" -> "3")
client.renamePartitions("default", "src_part", Seq(oldSpec), Seq(newSpec))
// Checks the existence of the new partition (key1 = 1, key2 = 3)
assert(client.getPartitionOption("default", "src_part", newSpec).isDefined)
}
test("alterPartitions") {
val spec = Map("key1" -> "1", "key2" -> "2")
val parameters = Map(StatsSetupConst.TOTAL_SIZE -> "0", StatsSetupConst.NUM_FILES -> "1")
val newLocation = new URI(Utils.createTempDir().toURI.toString.stripSuffix("/"))
val storage = storageFormat.copy(
locationUri = Some(newLocation),
// needed for 0.12 alter partitions
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
val partition = CatalogTablePartition(spec, storage, parameters)
client.alterPartitions("default", "src_part", Seq(partition))
assert(client.getPartition("default", "src_part", spec)
.storage.locationUri.contains(newLocation))
assert(client.getPartition("default", "src_part", spec)
.parameters.get(StatsSetupConst.TOTAL_SIZE).contains("0"))
}
test("dropPartitions") {
val spec = Map("key1" -> "1", "key2" -> "3")
val versionsWithoutPurge =
if (allVersions.contains("1.2")) allVersions.takeWhile(_ != "1.2") else Nil
// Similar to dropTable; try with purge set, and if it fails, make sure we're running
// with a version that is older than the minimum (1.2 in this case).
try {
client.dropPartitions("default", "src_part", Seq(spec), ignoreIfNotExists = true,
purge = true, retainData = false)
assert(!versionsWithoutPurge.contains(version))
} catch {
case _: UnsupportedOperationException =>
assert(versionsWithoutPurge.contains(version))
client.dropPartitions("default", "src_part", Seq(spec), ignoreIfNotExists = true,
purge = false, retainData = false)
}
assert(client.getPartitionOption("default", "src_part", spec).isEmpty)
}
test("createPartitions if already exists") {
val partitions = Seq(CatalogTablePartition(
Map("key1" -> "101", "key2" -> "102"),
storageFormat))
try {
client.createPartitions("default", "src_part", partitions, ignoreIfExists = false)
val errMsg = intercept[PartitionsAlreadyExistException] {
client.createPartitions("default", "src_part", partitions, ignoreIfExists = false)
}.getMessage
assert(errMsg.contains("partitions already exists"))
} finally {
client.dropPartitions(
"default",
"src_part",
partitions.map(_.spec),
ignoreIfNotExists = true,
purge = false,
retainData = false)
}
}
///////////////////////////////////////////////////////////////////////////
// Function related API
///////////////////////////////////////////////////////////////////////////
def function(name: String, className: String): CatalogFunction = {
CatalogFunction(
FunctionIdentifier(name, Some("default")), className, Seq.empty[FunctionResource])
}
test("createFunction") {
val functionClass = "org.apache.spark.MyFunc1"
if (version == "0.12") {
// Hive 0.12 doesn't support creating permanent functions
intercept[AnalysisException] {
client.createFunction("default", function("func1", functionClass))
}
} else {
client.createFunction("default", function("func1", functionClass))
}
}
test("functionExists") {
if (version == "0.12") {
// Hive 0.12 doesn't allow customized permanent functions
assert(!client.functionExists("default", "func1"))
} else {
assert(client.functionExists("default", "func1"))
}
}
test("renameFunction") {
if (version == "0.12") {
// Hive 0.12 doesn't allow customized permanent functions
intercept[NoSuchPermanentFunctionException] {
client.renameFunction("default", "func1", "func2")
}
} else {
client.renameFunction("default", "func1", "func2")
assert(client.functionExists("default", "func2"))
}
}
test("alterFunction") {
val functionClass = "org.apache.spark.MyFunc2"
if (version == "0.12") {
// Hive 0.12 doesn't allow customized permanent functions
intercept[NoSuchPermanentFunctionException] {
client.alterFunction("default", function("func2", functionClass))
}
} else {
client.alterFunction("default", function("func2", functionClass))
}
}
test("getFunction") {
if (version == "0.12") {
// Hive 0.12 doesn't allow customized permanent functions
intercept[NoSuchPermanentFunctionException] {
client.getFunction("default", "func2")
}
} else {
// No exception should be thrown
val func = client.getFunction("default", "func2")
assert(func.className == "org.apache.spark.MyFunc2")
}
}
test("getFunctionOption") {
if (version == "0.12") {
// Hive 0.12 doesn't allow customized permanent functions
assert(client.getFunctionOption("default", "func2").isEmpty)
} else {
assert(client.getFunctionOption("default", "func2").isDefined)
assert(client.getFunctionOption("default", "the_func_not_exists").isEmpty)
}
}
test("listFunctions") {
if (version == "0.12") {
// Hive 0.12 doesn't allow customized permanent functions
assert(client.listFunctions("default", "fun.*").isEmpty)
} else {
assert(client.listFunctions("default", "fun.*").size == 1)
}
}
test("dropFunction") {
if (version == "0.12") {
// Hive 0.12 doesn't support creating permanent functions
intercept[NoSuchPermanentFunctionException] {
client.dropFunction("default", "func2")
}
} else {
// No exception should be thrown
client.dropFunction("default", "func2")
assert(client.listFunctions("default", "fun.*").isEmpty)
}
}
///////////////////////////////////////////////////////////////////////////
// SQL related API
///////////////////////////////////////////////////////////////////////////
test("sql set command") {
client.runSqlHive("SET spark.sql.test.key=1")
}
test("sql create index and reset") {
// HIVE-18448 Since Hive 3.0, INDEX is not supported.
if (version != "3.0" && version != "3.1") {
client.runSqlHive("CREATE TABLE indexed_table (key INT)")
client.runSqlHive("CREATE INDEX index_1 ON TABLE indexed_table(key) " +
"as 'COMPACT' WITH DEFERRED REBUILD")
}
}
test("sql read hive materialized view") {
// HIVE-14249 Since Hive 2.3.0, materialized view is supported.
if (version == "2.3" || version == "3.0" || version == "3.1") {
// Since Hive 3.0(HIVE-19383), we can not run local MR by `client.runSqlHive` with JDK 11.
assume(version == "2.3" || !SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9))
// Since HIVE-18394(Hive 3.1), "Create Materialized View" should default to rewritable ones
val disableRewrite = if (version == "2.3" || version == "3.0") "" else "DISABLE REWRITE"
client.runSqlHive("CREATE TABLE materialized_view_tbl (c1 INT)")
client.runSqlHive(
s"CREATE MATERIALIZED VIEW mv1 $disableRewrite AS SELECT * FROM materialized_view_tbl")
val e = intercept[AnalysisException](versionSpark.table("mv1").collect()).getMessage
assert(e.contains("Hive materialized view is not supported"))
}
}
///////////////////////////////////////////////////////////////////////////
// Miscellaneous API
///////////////////////////////////////////////////////////////////////////
test("version") {
assert(client.version.fullVersion.startsWith(version))
}
test("getConf") {
assert("success" === client.getConf("test", null))
}
test("setOut") {
client.setOut(new PrintStream(new ByteArrayOutputStream()))
}
test("setInfo") {
client.setInfo(new PrintStream(new ByteArrayOutputStream()))
}
test("setError") {
client.setError(new PrintStream(new ByteArrayOutputStream()))
}
test("newSession") {
val newClient = client.newSession()
assert(newClient != null)
}
test("withHiveState and addJar") {
val newClassPath = "."
client.addJar(newClassPath)
client.withHiveState {
// No exception should be thrown.
// withHiveState changes the classloader to MutableURLClassLoader
val classLoader = Thread.currentThread().getContextClassLoader
.asInstanceOf[MutableURLClassLoader]
val urls = classLoader.getURLs
urls.contains(new File(newClassPath).toURI.toURL)
}
}
test("reset") {
// Clears all database, tables, functions...
client.reset()
assert(client.listTables("default").isEmpty)
}
///////////////////////////////////////////////////////////////////////////
// End-To-End tests
///////////////////////////////////////////////////////////////////////////
test("CREATE TABLE AS SELECT") {
withTable("tbl") {
versionSpark.sql("CREATE TABLE tbl AS SELECT 1 AS a")
assert(versionSpark.table("tbl").collect().toSeq == Seq(Row(1)))
val tableMeta = versionSpark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl"))
val totalSize = tableMeta.stats.map(_.sizeInBytes)
// Except 0.12, all the following versions will fill the Hive-generated statistics
if (version == "0.12") {
assert(totalSize.isEmpty)
} else {
assert(totalSize.nonEmpty && totalSize.get > 0)
}
}
}
test("CREATE Partitioned TABLE AS SELECT") {
withTable("tbl") {
versionSpark.sql(
"""
|CREATE TABLE tbl(c1 string)
|USING hive
|PARTITIONED BY (ds STRING)
""".stripMargin)
versionSpark.sql("INSERT OVERWRITE TABLE tbl partition (ds='2') SELECT '1'")
assert(versionSpark.table("tbl").collect().toSeq == Seq(Row("1", "2")))
val partMeta = versionSpark.sessionState.catalog.getPartition(
TableIdentifier("tbl"), spec = Map("ds" -> "2")).parameters
val totalSize = partMeta.get(StatsSetupConst.TOTAL_SIZE).map(_.toLong)
val numFiles = partMeta.get(StatsSetupConst.NUM_FILES).map(_.toLong)
// Except 0.12, all the following versions will fill the Hive-generated statistics
if (version == "0.12") {
assert(totalSize.isEmpty && numFiles.isEmpty)
} else {
assert(totalSize.nonEmpty && numFiles.nonEmpty)
}
versionSpark.sql(
"""
|ALTER TABLE tbl PARTITION (ds='2')
|SET SERDEPROPERTIES ('newKey' = 'vvv')
""".stripMargin)
val newPartMeta = versionSpark.sessionState.catalog.getPartition(
TableIdentifier("tbl"), spec = Map("ds" -> "2")).parameters
val newTotalSize = newPartMeta.get(StatsSetupConst.TOTAL_SIZE).map(_.toLong)
val newNumFiles = newPartMeta.get(StatsSetupConst.NUM_FILES).map(_.toLong)
// Except 0.12, all the following versions will fill the Hive-generated statistics
if (version == "0.12") {
assert(newTotalSize.isEmpty && newNumFiles.isEmpty)
} else {
assert(newTotalSize.nonEmpty && newNumFiles.nonEmpty)
}
}
}
test("Delete the temporary staging directory and files after each insert") {
withTempDir { tmpDir =>
withTable("tab") {
versionSpark.sql(
s"""
|CREATE TABLE tab(c1 string)
|location '${tmpDir.toURI.toString}'
""".stripMargin)
(1 to 3).map { i =>
versionSpark.sql(s"INSERT OVERWRITE TABLE tab SELECT '$i'")
}
def listFiles(path: File): List[String] = {
val dir = path.listFiles()
val folders = dir.filter(_.isDirectory).toList
val filePaths = dir.map(_.getName).toList
folders.flatMap(listFiles) ++: filePaths
}
// expect 2 files left: `.part-00000-random-uuid.crc` and `part-00000-random-uuid`
// 0.12, 0.13, 1.0 and 1.1 also has another two more files ._SUCCESS.crc and _SUCCESS
val metadataFiles = Seq("._SUCCESS.crc", "_SUCCESS")
assert(listFiles(tmpDir).filterNot(metadataFiles.contains).length == 2)
}
}
}
test("SPARK-13709: reading partitioned Avro table with nested schema") {
withTempDir { dir =>
val path = dir.toURI.toString
val tableName = "spark_13709"
val tempTableName = "spark_13709_temp"
new File(dir.getAbsolutePath, tableName).mkdir()
new File(dir.getAbsolutePath, tempTableName).mkdir()
val avroSchema =
"""{
| "name": "test_record",
| "type": "record",
| "fields": [ {
| "name": "f0",
| "type": "int"
| }, {
| "name": "f1",
| "type": {
| "type": "record",
| "name": "inner",
| "fields": [ {
| "name": "f10",
| "type": "int"
| }, {
| "name": "f11",
| "type": "double"
| } ]
| }
| } ]
|}
""".stripMargin
withTable(tableName, tempTableName) {
// Creates the external partitioned Avro table to be tested.
versionSpark.sql(
s"""CREATE EXTERNAL TABLE $tableName
|PARTITIONED BY (ds STRING)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
|LOCATION '$path/$tableName'
|TBLPROPERTIES ('avro.schema.literal' = '$avroSchema')
""".stripMargin
)
// Creates an temporary Avro table used to prepare testing Avro file.
versionSpark.sql(
s"""CREATE EXTERNAL TABLE $tempTableName
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
|LOCATION '$path/$tempTableName'
|TBLPROPERTIES ('avro.schema.literal' = '$avroSchema')
""".stripMargin
)
// Generates Avro data.
versionSpark.sql(s"INSERT OVERWRITE TABLE $tempTableName SELECT 1, STRUCT(2, 2.5)")
// Adds generated Avro data as a new partition to the testing table.
versionSpark.sql(
s"ALTER TABLE $tableName ADD PARTITION (ds = 'foo') LOCATION '$path/$tempTableName'")
// The following query fails before SPARK-13709 is fixed. This is because when reading
// data from table partitions, Avro deserializer needs the Avro schema, which is defined
// in table property "avro.schema.literal". However, we only initializes the deserializer
// using partition properties, which doesn't include the wanted property entry. Merging
// two sets of properties solves the problem.
assert(versionSpark.sql(s"SELECT * FROM $tableName").collect() ===
Array(Row(1, Row(2, 2.5D), "foo")))
}
}
}
test("CTAS for managed data source tables") {
withTable("t", "t1") {
versionSpark.range(1).write.saveAsTable("t")
assert(versionSpark.table("t").collect() === Array(Row(0)))
versionSpark.sql("create table t1 using parquet as select 2 as a")
assert(versionSpark.table("t1").collect() === Array(Row(2)))
}
}
test("Decimal support of Avro Hive serde") {
val tableName = "tab1"
// TODO: add the other logical types. For details, see the link:
// https://avro.apache.org/docs/1.8.1/spec.html#Logical+Types
val avroSchema =
"""{
| "name": "test_record",
| "type": "record",
| "fields": [ {
| "name": "f0",
| "type": [
| "null",
| {
| "precision": 38,
| "scale": 2,
| "type": "bytes",
| "logicalType": "decimal"
| }
| ]
| } ]
|}
""".stripMargin
Seq(true, false).foreach { isPartitioned =>
withTable(tableName) {
val partitionClause = if (isPartitioned) "PARTITIONED BY (ds STRING)" else ""
// Creates the (non-)partitioned Avro table
versionSpark.sql(
s"""
|CREATE TABLE $tableName
|$partitionClause
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
|TBLPROPERTIES ('avro.schema.literal' = '$avroSchema')
""".stripMargin
)
val errorMsg = "Cannot safely cast 'f0': decimal(2,1) to binary"
if (isPartitioned) {
val insertStmt = s"INSERT OVERWRITE TABLE $tableName partition (ds='a') SELECT 1.3"
if (version == "0.12" || version == "0.13") {
val e = intercept[AnalysisException](versionSpark.sql(insertStmt)).getMessage
assert(e.contains(errorMsg))
} else {
versionSpark.sql(insertStmt)
assert(versionSpark.table(tableName).collect() ===
versionSpark.sql("SELECT 1.30, 'a'").collect())
}
} else {
val insertStmt = s"INSERT OVERWRITE TABLE $tableName SELECT 1.3"
if (version == "0.12" || version == "0.13") {
val e = intercept[AnalysisException](versionSpark.sql(insertStmt)).getMessage
assert(e.contains(errorMsg))
} else {
versionSpark.sql(insertStmt)
assert(versionSpark.table(tableName).collect() ===
versionSpark.sql("SELECT 1.30").collect())
}
}
}
}
}
test("read avro file containing decimal") {
val url = Thread.currentThread().getContextClassLoader.getResource("avroDecimal")
val location = new File(url.getFile).toURI.toString
val tableName = "tab1"
val avroSchema =
"""{
| "name": "test_record",
| "type": "record",
| "fields": [ {
| "name": "f0",
| "type": [
| "null",
| {
| "precision": 38,
| "scale": 2,
| "type": "bytes",
| "logicalType": "decimal"
| }
| ]
| } ]
|}
""".stripMargin
withTable(tableName) {
versionSpark.sql(
s"""
|CREATE TABLE $tableName
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
|WITH SERDEPROPERTIES ('respectSparkSchema' = 'true')
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
|LOCATION '$location'
|TBLPROPERTIES ('avro.schema.literal' = '$avroSchema')
""".stripMargin
)
assert(versionSpark.table(tableName).collect() ===
versionSpark.sql("SELECT 1.30").collect())
}
}
test("SPARK-17920: Insert into/overwrite avro table") {
// skipped because it's failed in the condition on Windows
assume(!(Utils.isWindows && version == "0.12"))
withTempDir { dir =>
val avroSchema =
"""
|{
| "name": "test_record",
| "type": "record",
| "fields": [{
| "name": "f0",
| "type": [
| "null",
| {
| "precision": 38,
| "scale": 2,
| "type": "bytes",
| "logicalType": "decimal"
| }
| ]
| }]
|}
""".stripMargin
val schemaFile = new File(dir, "avroDecimal.avsc")
Utils.tryWithResource(new PrintWriter(schemaFile)) { writer =>
writer.write(avroSchema)
}
val schemaPath = schemaFile.toURI.toString
val url = Thread.currentThread().getContextClassLoader.getResource("avroDecimal")
val srcLocation = new File(url.getFile).toURI.toString
val destTableName = "tab1"
val srcTableName = "tab2"
withTable(srcTableName, destTableName) {
versionSpark.sql(
s"""
|CREATE EXTERNAL TABLE $srcTableName
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
|WITH SERDEPROPERTIES ('respectSparkSchema' = 'true')
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
|LOCATION '$srcLocation'
|TBLPROPERTIES ('avro.schema.url' = '$schemaPath')
""".stripMargin
)
versionSpark.sql(
s"""
|CREATE TABLE $destTableName
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
|WITH SERDEPROPERTIES ('respectSparkSchema' = 'true')
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
|TBLPROPERTIES ('avro.schema.url' = '$schemaPath')
""".stripMargin
)
versionSpark.sql(
s"""INSERT OVERWRITE TABLE $destTableName SELECT * FROM $srcTableName""")
val result = versionSpark.table(srcTableName).collect()
assert(versionSpark.table(destTableName).collect() === result)
versionSpark.sql(
s"""INSERT INTO TABLE $destTableName SELECT * FROM $srcTableName""")
assert(versionSpark.table(destTableName).collect().toSeq === result ++ result)
}
}
}
// TODO: add more tests.
}
| ueshin/apache-spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala | Scala | apache-2.0 | 39,915 |
package org.judal.storage.scala
import javax.jdo.FetchGroup
import javax.jdo.JDOUserException
import org.judal.storage.Param
import org.judal.storage.EngineFactory
import org.judal.storage.table.AbstractIndexableTableOperation
import org.judal.storage.table.Record
import org.judal.storage.table.RecordSet
import org.judal.storage.table.TableDataSource
import org.judal.storage.query.SortDirection.{ASC,DESC,same}
import scala.collection.JavaConverters._
class IndexableTableOperation[R >: Null <: Record](dataSource: TableDataSource, record: R) extends AbstractIndexableTableOperation[R](dataSource: TableDataSource, record: R) {
def this(record: R) = this(EngineFactory.getDefaultTableDataSource, record)
def this(dataSource: TableDataSource ) = this(dataSource, null);
override def fetch(maxrows: Int, offset: Int, keys: Param*) : Iterable[R] = {
getTable.fetch(getRecord.fetchGroup, maxrows, offset, keys: _*).asScala
}
override def fetch(fetchGroup: FetchGroup, columnName: String, valueSearched: AnyRef) : Iterable[R] = {
getTable.fetch(fetchGroup, columnName, valueSearched).asScala
}
override def fetchAsc(fetchGroup: FetchGroup, columnName: String, valueSearched: AnyRef, sortByColumn: String) : Iterable[R] = {
val retval : RecordSet[R] = getTable.fetch(fetchGroup, columnName, valueSearched)
retval.sort(sortByColumn)
retval.asScala
}
override def fetchDesc(fetchGroup: FetchGroup, columnName: String, valueSearched: AnyRef, sortByColumn: String) : Iterable[R] = {
val retval : RecordSet[R] = getTable.fetch(fetchGroup, columnName, valueSearched)
retval.sortDesc(sortByColumn)
retval.asScala
}
override def fetchFirst(fetchGroup: FetchGroup , columnName: String , valueSearched: Any, sortBy: String*) : R = {
var retval: R = null
var rst: RecordSet[R] = getTable.fetch(fetchGroup, columnName, valueSearched)
if (rst.size()>0) {
if (sortBy!=null && sortBy.length==1)
rst.sort(sortBy(0))
else if (sortBy!=null && sortBy.length>1) {
if (same(ASC,sortBy(1)))
rst.sort(sortBy(0))
else if (same(DESC,sortBy(1)))
rst.sortDesc(sortBy(0))
else
throw new JDOUserException("Unrecognized sort direction " + sortBy(1))
}
retval = rst.get(0)
}
retval
}
} | sergiomt/judal | scala-adaptor/src/main/scala/org/judal/storage/scala/IndexableTableOperation.scala | Scala | apache-2.0 | 2,263 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.core.helpers
import java.io.File
import org.junit.runner.RunWith
import org.mockito.Mockito._
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}
import scala.collection.mutable
@RunWith(classOf[JUnitRunner])
class JarsHelpersTest extends FlatSpec with Matchers with MockitoSugar {
val file = mock[File]
when(file.exists).thenReturn(true)
when(file.listFiles()).thenReturn(Array(
new File("first.jar"),
new File("second.jar"),
new File("sparta-driver.jar")))
it should "find the driver jar" in {
val seqofJars = JarsHelper.findDriverByPath(
file)
seqofJars should be (mutable.ArraySeq(new File("sparta-driver.jar")))
}
val fileNoSpartaDriver = mock[File]
when(fileNoSpartaDriver.exists).thenReturn(true)
when(fileNoSpartaDriver.listFiles()).thenReturn(Array(
new File("sparta.jar"),
new File("driver.jar"),
new File("sparta-driver.txt"))
)
it should "return an empty sequence" in {
val retrievedDrivers = JarsHelper.findDriverByPath(fileNoSpartaDriver)
retrievedDrivers should equal(Seq.empty)
}
}
| fjsc/sparta | serving-core/src/test/scala/com/stratio/sparta/serving/core/helpers/JarsHelpersTest.scala | Scala | apache-2.0 | 1,801 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.metrics
import scala.collection.mutable.ArrayBuffer
import com.codahale.metrics.MetricRegistry
import org.scalatest.{BeforeAndAfter, PrivateMethodTester}
import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
import org.apache.spark.deploy.master.MasterSource
import org.apache.spark.metrics.source.{Source, StaticSources}
class MetricsSystemSuite extends SparkFunSuite with BeforeAndAfter with PrivateMethodTester{
var filePath: String = _
var conf: SparkConf = null
var securityMgr: SecurityManager = null
before {
filePath = getClass.getClassLoader.getResource("test_metrics_system.properties").getFile
conf = new SparkConf(false).set("spark.metrics.conf", filePath)
securityMgr = new SecurityManager(conf)
}
test("MetricsSystem with default config") {
val metricsSystem = MetricsSystem.createMetricsSystem("default", conf, securityMgr)
metricsSystem.start()
val sources = PrivateMethod[ArrayBuffer[Source]]('sources)
val sinks = PrivateMethod[ArrayBuffer[Source]]('sinks)
assert(metricsSystem.invokePrivate(sources()).length === StaticSources.allSources.length)
assert(metricsSystem.invokePrivate(sinks()).length === 0)
assert(metricsSystem.getServletHandlers.nonEmpty)
}
test("MetricsSystem with sources add") {
val metricsSystem = MetricsSystem.createMetricsSystem("test", conf, securityMgr)
metricsSystem.start()
val sources = PrivateMethod[ArrayBuffer[Source]]('sources)
val sinks = PrivateMethod[ArrayBuffer[Source]]('sinks)
assert(metricsSystem.invokePrivate(sources()).length === StaticSources.allSources.length)
assert(metricsSystem.invokePrivate(sinks()).length === 1)
assert(metricsSystem.getServletHandlers.nonEmpty)
val source = new MasterSource(null)
metricsSystem.registerSource(source)
assert(metricsSystem.invokePrivate(sources()).length === StaticSources.allSources.length + 1)
}
test("MetricsSystem with Driver instance") {
val source = new Source {
override val sourceName = "dummySource"
override val metricRegistry = new MetricRegistry()
}
val appId = "testId"
val executorId = "driver"
conf.set("spark.app.id", appId)
conf.set("spark.executor.id", executorId)
val instanceName = "driver"
val driverMetricsSystem = MetricsSystem.createMetricsSystem(instanceName, conf, securityMgr)
val metricName = driverMetricsSystem.buildRegistryName(source)
assert(metricName === s"$appId.$executorId.${source.sourceName}")
}
test("MetricsSystem with Driver instance and spark.app.id is not set") {
val source = new Source {
override val sourceName = "dummySource"
override val metricRegistry = new MetricRegistry()
}
val executorId = "driver"
conf.set("spark.executor.id", executorId)
val instanceName = "driver"
val driverMetricsSystem = MetricsSystem.createMetricsSystem(instanceName, conf, securityMgr)
val metricName = driverMetricsSystem.buildRegistryName(source)
assert(metricName === source.sourceName)
}
test("MetricsSystem with Driver instance and spark.executor.id is not set") {
val source = new Source {
override val sourceName = "dummySource"
override val metricRegistry = new MetricRegistry()
}
val appId = "testId"
conf.set("spark.app.id", appId)
val instanceName = "driver"
val driverMetricsSystem = MetricsSystem.createMetricsSystem(instanceName, conf, securityMgr)
val metricName = driverMetricsSystem.buildRegistryName(source)
assert(metricName === source.sourceName)
}
test("MetricsSystem with Executor instance") {
val source = new Source {
override val sourceName = "dummySource"
override val metricRegistry = new MetricRegistry()
}
val appId = "testId"
val executorId = "1"
conf.set("spark.app.id", appId)
conf.set("spark.executor.id", executorId)
val instanceName = "executor"
val driverMetricsSystem = MetricsSystem.createMetricsSystem(instanceName, conf, securityMgr)
val metricName = driverMetricsSystem.buildRegistryName(source)
assert(metricName === s"$appId.$executorId.${source.sourceName}")
}
test("MetricsSystem with Executor instance and spark.app.id is not set") {
val source = new Source {
override val sourceName = "dummySource"
override val metricRegistry = new MetricRegistry()
}
val executorId = "1"
conf.set("spark.executor.id", executorId)
val instanceName = "executor"
val driverMetricsSystem = MetricsSystem.createMetricsSystem(instanceName, conf, securityMgr)
val metricName = driverMetricsSystem.buildRegistryName(source)
assert(metricName === source.sourceName)
}
test("MetricsSystem with Executor instance and spark.executor.id is not set") {
val source = new Source {
override val sourceName = "dummySource"
override val metricRegistry = new MetricRegistry()
}
val appId = "testId"
conf.set("spark.app.id", appId)
val instanceName = "executor"
val driverMetricsSystem = MetricsSystem.createMetricsSystem(instanceName, conf, securityMgr)
val metricName = driverMetricsSystem.buildRegistryName(source)
assert(metricName === source.sourceName)
}
test("MetricsSystem with instance which is neither Driver nor Executor") {
val source = new Source {
override val sourceName = "dummySource"
override val metricRegistry = new MetricRegistry()
}
val appId = "testId"
val executorId = "dummyExecutorId"
conf.set("spark.app.id", appId)
conf.set("spark.executor.id", executorId)
val instanceName = "testInstance"
val driverMetricsSystem = MetricsSystem.createMetricsSystem(instanceName, conf, securityMgr)
val metricName = driverMetricsSystem.buildRegistryName(source)
// Even if spark.app.id and spark.executor.id are set, they are not used for the metric name.
assert(metricName != s"$appId.$executorId.${source.sourceName}")
assert(metricName === source.sourceName)
}
}
| gioenn/xSpark | core/src/test/scala/org/apache/spark/metrics/MetricsSystemSuite.scala | Scala | apache-2.0 | 6,868 |
package com.twitter.finagle
import com.twitter.app.Flaggable
import com.twitter.io.Buf
import com.twitter.finagle.util.Showable
import java.nio.charset.Charset
import java.util.BitSet
/**
* A Path comprises a sequence of byte buffers naming a
* hierarchically-addressed object.
*
* @see The [[http://twitter.github.io/finagle/guide/Names.html#paths user guide]]
* for further details.
*/
case class Path(elems: Buf*) {
require(elems.forall(Path.nonemptyBuf))
def startsWith(other: Path) = elems.startsWith(other.elems)
def take(n: Int) = Path(elems.take(n):_*)
def drop(n: Int) = Path(elems.drop(n):_*)
def ++(that: Path) =
if (that.isEmpty) this
else Path((elems ++ that.elems):_*)
def size = elems.size
def isEmpty = elems.isEmpty
lazy val showElems = elems.map(Path.showElem(_))
lazy val show = showElems.mkString("/", "/", "")
override def toString = s"""Path(${showElems.mkString(",")})"""
}
object Path {
private val nonemptyBuf: Buf => Boolean = !_.isEmpty
implicit val showable: Showable[Path] = new Showable[Path] {
def show(path: Path) = path.show
}
/**
* implicit conversion from [[com.twitter.finagle.Path]] to
* [[com.twitter.app.Flaggable]], allowing Paths to be easily used as
* [[com.twitter.app.Flag]]s
*/
implicit val flaggable: Flaggable[Path] = new Flaggable[Path] {
override def default = None
def parse(s: String) = Path.read(s)
override def show(path: Path) = path.show
}
val empty = Path()
private val Utf8Charset = Charset.forName("UTF-8")
val showableChars: Seq[Char] =
('0' to '9') ++ ('A' to 'Z') ++ ('a' to 'z') ++ "_:.#$%-".toSeq
private val charSet = {
val bits = new BitSet(Byte.MaxValue+1)
for (c <- showableChars)
bits.set(c.toInt)
bits
}
/**
* Path elements follow the lexical convention of DNS, plus a few
* extensions: "protocols mandate that component hostname labels
* may contain only the ASCII letters 'a' through 'z' (in a
* case-insensitive manner), the digits '0' through '9', and the
* hyphen ('-')."
*/
def isShowable(ch: Char): Boolean = charSet.get(ch.toInt)
private[finagle] def showableAsString(bytes: Array[Byte], size: Int): Boolean = {
var i = 0
while (i < size) {
if (!isShowable(bytes(i).toChar))
return false
i += 1
}
true
}
// We're extra careful with allocation here because any time
// there are nonbase delegations, we need to serialize the paths
// to strings
private[finagle] val showElem: Buf => String = { buf =>
val nbuf = buf.length
val bytes = Buf.ByteArray.Owned.extract(buf)
if (Path.showableAsString(bytes, nbuf))
new String(bytes, 0, nbuf, Path.Utf8Charset)
else {
val str = new StringBuilder(nbuf * 4)
var i = 0
while (i < nbuf) {
str.append("\\\\x")
str.append(Integer.toString((bytes(i) >> 4) & 0xf, 16))
str.append(Integer.toString(bytes(i) & 0xf, 16))
i += 1
}
str.toString
}
}
/**
* Parse `s` as a path with concrete syntax
*
* {{{
* path ::= '/' labels | '/'
*
* labels ::= label '/' labels | label
*
* label ::= (\\\\x[a-f0-9][a-f0-9]|[0-9A-Za-z:.#$%-_])+
*
* }}}
*
* for example
*
* {{{
* /foo/bar/baz
* /
* }}}
*
* parses into the path
*
* {{{
* Path(foo,bar,baz)
* Path()
* }}}
*
* @throws IllegalArgumentException when `s` is not a syntactically valid path.
*/
def read(s: String): Path = NameTreeParsers.parsePath(s)
/**
* Utilities for constructing and pattern matching over
* Utf8-typed paths.
*/
object Utf8 {
def apply(elems: String*): Path = {
val elems8 = elems map { el => Buf.Utf8(el) }
Path(elems8:_*)
}
def unapplySeq(path: Path): Option[Seq[String]] = {
val Path(elems@_*) = path
val n = elems.size
val elemss = new Array[String](n)
var i = 0
while (i < n) {
elems(i) match {
case Buf.Utf8(s) =>
elemss(i) = s
case _ =>
return None
}
i += 1
}
Some(elemss)
}
}
}
| sveinnfannar/finagle | finagle-core/src/main/scala/com/twitter/finagle/Path.scala | Scala | apache-2.0 | 4,195 |
package com.awesomesauce.lib
object Duplex {
def apply[A, B](o1: A, o2: B) = { new Duplex(o1, o2) }
}
class Duplex[A, B](o1: A, o2: B) extends Product2[A, B] {
def _1 = o1
def _2 = o2
def canEqual(that:Any):Boolean = {that.isInstanceOf[Duplex[A, B]]}
} | AwesomeSauceMods/AwesomeSauceCore | main/scala/com/awesomesauce/lib/Duplex.scala | Scala | mit | 270 |
import org.jmotor.tools.MavenSearchClient
import org.jmotor.tools.dto.MavenSearchRequest
import org.scalatest._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
class AppTest extends FunSuite {
private[this] val client = MavenSearchClient()
private val orgId = "org.scala-lang"
private val artId = "scala-library"
test("latestVersion") {
val future = client.latestVersion(orgId, artId)
val result = Await.result(future, 10.seconds)
assert(result.nonEmpty)
}
test("To parameters") {
val request = MavenSearchRequest(Some(orgId), Some(artId), None)
assert("""q=g:"org.scala-lang" AND a:"scala-library"&core=gav&rows=20&wt=json&start=0""" == request.toParameter)
}
test("Select All") {
val future = client.selectAll(orgId, artId)
val results = Await.result(future, 60.seconds)
results.foreach { artifact ⇒
assert(artifact.g == orgId)
assert(artifact.a == artId)
}
}
test("Search") {
val request = MavenSearchRequest(Some(orgId), Some(artId), None)
val future = client.search(request)
val results = Await.result(future, 60.seconds)
results.foreach { artifact ⇒
assert(artifact.g == orgId)
assert(artifact.a == artId)
}
}
}
| aiyanbo/search.maven.org-scala-sdk | src/test/scala/AppTest.scala | Scala | apache-2.0 | 1,306 |
package org.atnos
package origami
import org.specs2._
import folds._
import cats.implicits._
class InferenceSpec extends Specification { def is = s2"""
issue #53 with existential types $e1
another example for issue #53 $e2
"""
def e1 = {
(mean[Double].map(_ * 100), plus[Double]).tupled
ok
}
def e2 = {
folds.countUnique[String].zip(folds.count[String])
ok
}
}
| atnos-org/origami | lib/src/test/scala-2.13/org/atnos/origami/InferenceSpec.scala | Scala | mit | 396 |
/**
* The MIT License (MIT)
*
* Copyright (c) 2015 Sergio Gutiérrez Mota
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.serchinastico.charades.base.math
object MathExtension {
val floatComparisonDelta: Double = 0.00001
def getMidValue(a: Double, b: Double): Double = {
(a + b) / 2d
}
def equalsFloat(a: Double, b: Double): Boolean = {
math.abs(a - b) < floatComparisonDelta
}
}
| Serchinastico/Charades | src/main/scala/com/serchinastico/charades/base/math/MathExtension.scala | Scala | mit | 1,448 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.pages.ip2016
import forms.PSODetailsForm
import org.jsoup.Jsoup
import testHelpers.ViewSpecHelpers.CommonViewSpecHelper
import testHelpers.ViewSpecHelpers.ip2016.PsoDetailsViewMessages
import uk.gov.hmrc.play.views.html.helpers.{ErrorSummary, FormWithCSRF}
import views.html.pages.ip2016.psoDetails
class PsoDetailsViewSpec extends CommonViewSpecHelper with PsoDetailsViewMessages {
implicit val errorSummary: ErrorSummary = app.injector.instanceOf[ErrorSummary]
implicit val formWithCSRF: FormWithCSRF = app.injector.instanceOf[FormWithCSRF]
"the PsoDetailsView" should{
val pensionsForm = PSODetailsForm.psoDetailsForm.bind(Map(
"psoDay" -> "1",
"psoMonth" -> "2",
"psoYear" -> "2017",
"psoAmt" -> "12345"))
lazy val view = application.injector.instanceOf[psoDetails]
lazy val doc = Jsoup.parse(view.apply(pensionsForm).body)
val errorForm = PSODetailsForm.psoDetailsForm.bind(Map.empty[String, String])
lazy val errorView = application.injector.instanceOf[psoDetails]
lazy val errorDoc = Jsoup.parse(errorView.apply(errorForm).body)
lazy val form = doc.select("form")
"have the correct title" in{
doc.title() shouldBe plaPsoDetailsTitle
}
"have the correct and properly formatted header"in{
doc.select("h1").text shouldBe plaPsoDetailsTitle
}
"have the right headers for the PSO date and PSO amount" in{
doc.select("h2").eq(0).text shouldBe plaPsoDetailsDateQuestionText
doc.select("h2").eq(1).text shouldBe plaPsoDetailsPsoAmountQuestion
}
"have the right date hint message" in{
doc.select("span.form-hint").text shouldBe plaPsoDetailsDateHintText
}
"have the right explanatory paragraph" in{
doc.select("p").text shouldBe plaPsoDetailsVisitPTA
}
"have the right text above each textbox" in{
doc.select("[for=psoDay]").text shouldBe plaBaseDateFieldsDay
doc.select("[for=psoMonth]").text shouldBe plaBaseDateFieldsMonth
doc.select("[for=psoYear]").text shouldBe plaBaseDateFieldsYear
}
"have a valid form" in{
form.attr("method") shouldBe "POST"
form.attr("action") shouldBe controllers.routes.IP2016Controller.submitPSODetails().url
}
"have a £ symbol present" in{
doc.select(".poundSign").text shouldBe "£"
}
"have a continue button" in{
doc.select("button").text shouldBe plaBaseContinue
doc.select("button").attr("type") shouldBe "submit"
}
"display the correct errors appropriately" in{
errorForm.hasErrors shouldBe true
errorDoc.select("h2.h3-heading").text shouldBe plaBaseErrorSummaryLabel
errorDoc.select("span.error-notification").eq(0).text shouldBe plaBaseErrorsDayEmpty
errorDoc.select("span.error-notification").eq(1).text shouldBe plaBaseErrorsMonthEmpty
errorDoc.select("span.error-notification").eq(2).text shouldBe plaBaseErrorsYearEmpty
errorDoc.select("span.error-notification").eq(3).text shouldBe errorRequired
}
"not have errors on valid pages" in{
pensionsForm.hasErrors shouldBe false
doc.select("span.error-notification").eq(0).text shouldBe ""
doc.select("span.error-notification").eq(1).text shouldBe ""
doc.select("span.error-notification").eq(2).text shouldBe ""
doc.select("span.error-notification").eq(3).text shouldBe ""
}
}
}
| hmrc/pensions-lifetime-allowance-frontend | test/views/pages/ip2016/PsoDetailsViewSpec.scala | Scala | apache-2.0 | 4,002 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.models.rnn
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dataset.{DataSet, SampleToBatch}
import com.intel.analytics.bigdl.dataset.text.LabeledSentenceToSample
import com.intel.analytics.bigdl.dataset.text._
import com.intel.analytics.bigdl.dataset.text.utils.SentenceToken
import com.intel.analytics.bigdl.nn.{CrossEntropyCriterion, Module, TimeDistributedCriterion}
import com.intel.analytics.bigdl.optim._
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.{Engine, T, Table}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric._
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
object Train {
Logger.getLogger("org").setLevel(Level.ERROR)
Logger.getLogger("akka").setLevel(Level.ERROR)
Logger.getLogger("breeze").setLevel(Level.ERROR)
Logger.getLogger("com.intel.analytics.bigdl.optim").setLevel(Level.INFO)
import Utils._
val logger = Logger.getLogger(getClass)
def main(args: Array[String]): Unit = {
trainParser.parse(args, new TrainParams()).map(param => {
val conf = Engine.createSparkConf()
.setAppName("Train rnn on text")
.set("spark.task.maxFailures", "1")
val sc = new SparkContext(conf)
Engine.init
val tokens = SequencePreprocess(
param.dataFolder + "/train.txt",
sc = sc,
param.sentFile,
param.tokenFile)
val dictionary = Dictionary(tokens, param.vocabSize)
dictionary.save(param.saveFolder)
val maxTrainLength = tokens.map(x => x.length).max
val valtokens = SequencePreprocess(
param.dataFolder + "/val.txt",
sc = sc,
param.sentFile,
param.tokenFile)
val maxValLength = valtokens.map(x => x.length).max
logger.info(s"maxTrain length = ${maxTrainLength}, maxVal = ${maxValLength}")
val totalVocabLength = dictionary.getVocabSize() + 1
val startIdx = dictionary.getIndex(SentenceToken.start)
val endIdx = dictionary.getIndex(SentenceToken.end)
val padFeature = Tensor[Float]().resize(totalVocabLength)
padFeature.setValue(endIdx + 1, 1.0f)
val padLabel = startIdx
val trainSet = DataSet.rdd(tokens)
.transform(TextToLabeledSentence[Float](dictionary))
.transform(LabeledSentenceToSample[Float](totalVocabLength))
.transform(SampleToBatch[Float](batchSize = param.batchSize,
featurePadding = Some(padFeature),
labelPadding = Some(padLabel),
fixedLength = Some(maxTrainLength)))
val validationSet = DataSet.rdd(valtokens)
.transform(TextToLabeledSentence[Float](dictionary))
.transform(LabeledSentenceToSample[Float](totalVocabLength))
.transform(SampleToBatch[Float](batchSize = param.batchSize,
featurePadding = Some(padFeature),
labelPadding = Some(padLabel),
fixedLength = Some(maxValLength)))
val model = if (param.modelSnapshot.isDefined) {
Module.load[Float](param.modelSnapshot.get)
} else {
val curModel = SimpleRNN(
inputSize = totalVocabLength,
hiddenSize = param.hiddenSize,
outputSize = totalVocabLength)
curModel.reset()
curModel
}
val optimMethod = if (param.stateSnapshot.isDefined) {
OptimMethod.load[Float](param.stateSnapshot.get)
} else {
new SGD[Float](learningRate = param.learningRate, learningRateDecay = 0.0,
weightDecay = param.weightDecay, momentum = param.momentum, dampening = param.dampening)
}
val optimizer = Optimizer(
model = model,
dataset = trainSet,
criterion = TimeDistributedCriterion[Float](
CrossEntropyCriterion[Float](), sizeAverage = true)
)
if (param.checkpoint.isDefined) {
optimizer.setCheckpoint(param.checkpoint.get, Trigger.everyEpoch)
}
if(param.overWriteCheckpoint) {
optimizer.overWriteCheckpoint()
}
optimizer
.setValidation(Trigger.everyEpoch, validationSet, Array(new Loss[Float](
TimeDistributedCriterion[Float](CrossEntropyCriterion[Float](), sizeAverage = true))))
.setOptimMethod(optimMethod)
.setEndWhen(Trigger.maxEpoch(param.nEpochs))
.optimize()
sc.stop()
})
}
}
| 122689305/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/models/rnn/Train.scala | Scala | apache-2.0 | 4,984 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import scala.collection.JavaConverters._
import org.antlr.v4.runtime.{ParserRuleContext, Token}
import org.antlr.v4.runtime.tree.TerminalNode
import org.apache.spark.sql.{AnalysisException, SaveMode}
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.parser._
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, OneRowRelation, ScriptInputOutputSchema}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.datasources.{CreateTable, _}
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf, VariableSubstitution}
import org.apache.spark.sql.types.StructType
/**
* Concrete parser for Spark SQL statements.
*/
class SparkSqlParser(conf: SQLConf) extends AbstractSqlParser {
val astBuilder = new SparkSqlAstBuilder(conf)
private val substitutor = new VariableSubstitution(conf)
protected override def parse[T](command: String)(toResult: SqlBaseParser => T): T = {
super.parse(substitutor.substitute(command))(toResult)
}
}
/**
* Builder that converts an ANTLR ParseTree into a LogicalPlan/Expression/TableIdentifier.
*/
class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
import org.apache.spark.sql.catalyst.parser.ParserUtils._
/**
* Create a [[SetCommand]] logical plan.
*
* Note that we assume that everything after the SET keyword is assumed to be a part of the
* key-value pair. The split between key and value is made by searching for the first `=`
* character in the raw string.
*/
override def visitSetConfiguration(ctx: SetConfigurationContext): LogicalPlan = withOrigin(ctx) {
// Construct the command.
val raw = remainder(ctx.SET.getSymbol)
val keyValueSeparatorIndex = raw.indexOf('=')
if (keyValueSeparatorIndex >= 0) {
val key = raw.substring(0, keyValueSeparatorIndex).trim
val value = raw.substring(keyValueSeparatorIndex + 1).trim
SetCommand(Some(key -> Option(value)))
} else if (raw.nonEmpty) {
SetCommand(Some(raw.trim -> None))
} else {
SetCommand(None)
}
}
/**
* Create a [[ResetCommand]] logical plan.
* Example SQL :
* {{{
* RESET;
* }}}
*/
override def visitResetConfiguration(
ctx: ResetConfigurationContext): LogicalPlan = withOrigin(ctx) {
ResetCommand
}
/**
* Create an [[AnalyzeTableCommand]] command or an [[AnalyzeColumnCommand]] command.
* Example SQL for analyzing table :
* {{{
* ANALYZE TABLE table COMPUTE STATISTICS [NOSCAN];
* }}}
* Example SQL for analyzing columns :
* {{{
* ANALYZE TABLE table COMPUTE STATISTICS FOR COLUMNS column1, column2;
* }}}
*/
override def visitAnalyze(ctx: AnalyzeContext): LogicalPlan = withOrigin(ctx) {
if (ctx.partitionSpec != null) {
logWarning(s"Partition specification is ignored: ${ctx.partitionSpec.getText}")
}
if (ctx.identifier != null) {
if (ctx.identifier.getText.toLowerCase != "noscan") {
throw new ParseException(s"Expected `NOSCAN` instead of `${ctx.identifier.getText}`", ctx)
}
AnalyzeTableCommand(visitTableIdentifier(ctx.tableIdentifier))
} else if (ctx.identifierSeq() == null) {
AnalyzeTableCommand(visitTableIdentifier(ctx.tableIdentifier), noscan = false)
} else {
AnalyzeColumnCommand(
visitTableIdentifier(ctx.tableIdentifier),
visitIdentifierSeq(ctx.identifierSeq()))
}
}
/**
* Create a [[SetDatabaseCommand]] logical plan.
*/
override def visitUse(ctx: UseContext): LogicalPlan = withOrigin(ctx) {
SetDatabaseCommand(ctx.db.getText)
}
/**
* Create a [[ShowTablesCommand]] logical plan.
* Example SQL :
* {{{
* SHOW TABLES [(IN|FROM) database_name] [[LIKE] 'identifier_with_wildcards'];
* }}}
*/
override def visitShowTables(ctx: ShowTablesContext): LogicalPlan = withOrigin(ctx) {
ShowTablesCommand(
Option(ctx.db).map(_.getText),
Option(ctx.pattern).map(string))
}
/**
* Create a [[ShowDatabasesCommand]] logical plan.
* Example SQL:
* {{{
* SHOW (DATABASES|SCHEMAS) [LIKE 'identifier_with_wildcards'];
* }}}
*/
override def visitShowDatabases(ctx: ShowDatabasesContext): LogicalPlan = withOrigin(ctx) {
ShowDatabasesCommand(Option(ctx.pattern).map(string))
}
/**
* A command for users to list the properties for a table. If propertyKey is specified, the value
* for the propertyKey is returned. If propertyKey is not specified, all the keys and their
* corresponding values are returned.
* The syntax of using this command in SQL is:
* {{{
* SHOW TBLPROPERTIES table_name[('propertyKey')];
* }}}
*/
override def visitShowTblProperties(
ctx: ShowTblPropertiesContext): LogicalPlan = withOrigin(ctx) {
ShowTablePropertiesCommand(
visitTableIdentifier(ctx.tableIdentifier),
Option(ctx.key).map(visitTablePropertyKey))
}
/**
* A command for users to list the column names for a table.
* This function creates a [[ShowColumnsCommand]] logical plan.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW COLUMNS (FROM | IN) table_identifier [(FROM | IN) database];
* }}}
*/
override def visitShowColumns(ctx: ShowColumnsContext): LogicalPlan = withOrigin(ctx) {
ShowColumnsCommand(Option(ctx.db).map(_.getText), visitTableIdentifier(ctx.tableIdentifier))
}
/**
* A command for users to list the partition names of a table. If partition spec is specified,
* partitions that match the spec are returned. Otherwise an empty result set is returned.
*
* This function creates a [[ShowPartitionsCommand]] logical plan
*
* The syntax of using this command in SQL is:
* {{{
* SHOW PARTITIONS table_identifier [partition_spec];
* }}}
*/
override def visitShowPartitions(ctx: ShowPartitionsContext): LogicalPlan = withOrigin(ctx) {
val table = visitTableIdentifier(ctx.tableIdentifier)
val partitionKeys = Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec)
ShowPartitionsCommand(table, partitionKeys)
}
/**
* Creates a [[ShowCreateTableCommand]]
*/
override def visitShowCreateTable(ctx: ShowCreateTableContext): LogicalPlan = withOrigin(ctx) {
val table = visitTableIdentifier(ctx.tableIdentifier())
ShowCreateTableCommand(table)
}
/**
* Create a [[RefreshTable]] logical plan.
*/
override def visitRefreshTable(ctx: RefreshTableContext): LogicalPlan = withOrigin(ctx) {
RefreshTable(visitTableIdentifier(ctx.tableIdentifier))
}
/**
* Create a [[RefreshTable]] logical plan.
*/
override def visitRefreshResource(ctx: RefreshResourceContext): LogicalPlan = withOrigin(ctx) {
val resourcePath = remainder(ctx.REFRESH.getSymbol).trim
RefreshResource(resourcePath)
}
/**
* Create a [[CacheTableCommand]] logical plan.
*/
override def visitCacheTable(ctx: CacheTableContext): LogicalPlan = withOrigin(ctx) {
val query = Option(ctx.query).map(plan)
val tableIdent = visitTableIdentifier(ctx.tableIdentifier)
if (query.isDefined && tableIdent.database.isDefined) {
val database = tableIdent.database.get
throw new ParseException(s"It is not allowed to add database prefix `$database` to " +
s"the table name in CACHE TABLE AS SELECT", ctx)
}
CacheTableCommand(tableIdent, query, ctx.LAZY != null)
}
/**
* Create an [[UncacheTableCommand]] logical plan.
*/
override def visitUncacheTable(ctx: UncacheTableContext): LogicalPlan = withOrigin(ctx) {
UncacheTableCommand(visitTableIdentifier(ctx.tableIdentifier), ctx.EXISTS != null)
}
/**
* Create a [[ClearCacheCommand]] logical plan.
*/
override def visitClearCache(ctx: ClearCacheContext): LogicalPlan = withOrigin(ctx) {
ClearCacheCommand
}
/**
* Create an [[ExplainCommand]] logical plan.
* The syntax of using this command in SQL is:
* {{{
* EXPLAIN (EXTENDED | CODEGEN) SELECT * FROM ...
* }}}
*/
override def visitExplain(ctx: ExplainContext): LogicalPlan = withOrigin(ctx) {
if (ctx.FORMATTED != null) {
operationNotAllowed("EXPLAIN FORMATTED", ctx)
}
if (ctx.LOGICAL != null) {
operationNotAllowed("EXPLAIN LOGICAL", ctx)
}
val statement = plan(ctx.statement)
if (statement == null) {
null // This is enough since ParseException will raise later.
} else if (isExplainableStatement(statement)) {
ExplainCommand(statement, extended = ctx.EXTENDED != null, codegen = ctx.CODEGEN != null)
} else {
ExplainCommand(OneRowRelation)
}
}
/**
* Determine if a plan should be explained at all.
*/
protected def isExplainableStatement(plan: LogicalPlan): Boolean = plan match {
case _: DescribeTableCommand => false
case _ => true
}
/**
* Create a [[DescribeTableCommand]] logical plan.
*/
override def visitDescribeTable(ctx: DescribeTableContext): LogicalPlan = withOrigin(ctx) {
// Describe column are not supported yet. Return null and let the parser decide
// what to do with this (create an exception or pass it on to a different system).
if (ctx.describeColName != null) {
null
} else {
val partitionSpec = if (ctx.partitionSpec != null) {
// According to the syntax, visitPartitionSpec returns `Map[String, Option[String]]`.
visitPartitionSpec(ctx.partitionSpec).map {
case (key, Some(value)) => key -> value
case (key, _) =>
throw new ParseException(s"PARTITION specification is incomplete: `$key`", ctx)
}
} else {
Map.empty[String, String]
}
DescribeTableCommand(
visitTableIdentifier(ctx.tableIdentifier),
partitionSpec,
ctx.EXTENDED != null,
ctx.FORMATTED != null)
}
}
/**
* Type to keep track of a table header: (identifier, isTemporary, ifNotExists, isExternal).
*/
type TableHeader = (TableIdentifier, Boolean, Boolean, Boolean)
/**
* Validate a create table statement and return the [[TableIdentifier]].
*/
override def visitCreateTableHeader(
ctx: CreateTableHeaderContext): TableHeader = withOrigin(ctx) {
val temporary = ctx.TEMPORARY != null
val ifNotExists = ctx.EXISTS != null
if (temporary && ifNotExists) {
operationNotAllowed("CREATE TEMPORARY TABLE ... IF NOT EXISTS", ctx)
}
(visitTableIdentifier(ctx.tableIdentifier), temporary, ifNotExists, ctx.EXTERNAL != null)
}
/**
* Create a data source table, returning a [[CreateTable]] logical plan.
*
* Expected format:
* {{{
* CREATE [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name
* USING table_provider
* [OPTIONS table_property_list]
* [PARTITIONED BY (col_name, col_name, ...)]
* [CLUSTERED BY (col_name, col_name, ...)
* [SORTED BY (col_name [ASC|DESC], ...)]
* INTO num_buckets BUCKETS
* ]
* [AS select_statement];
* }}}
*/
override def visitCreateTableUsing(ctx: CreateTableUsingContext): LogicalPlan = withOrigin(ctx) {
val (table, temp, ifNotExists, external) = visitCreateTableHeader(ctx.createTableHeader)
if (external) {
operationNotAllowed("CREATE EXTERNAL TABLE ... USING", ctx)
}
val options = Option(ctx.tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty)
val provider = ctx.tableProvider.qualifiedName.getText
if (provider.toLowerCase == DDLUtils.HIVE_PROVIDER) {
throw new AnalysisException("Cannot create hive serde table with CREATE TABLE USING")
}
val schema = Option(ctx.colTypeList()).map(createSchema)
val partitionColumnNames =
Option(ctx.partitionColumnNames)
.map(visitIdentifierList(_).toArray)
.getOrElse(Array.empty[String])
val bucketSpec = Option(ctx.bucketSpec()).map(visitBucketSpec)
// TODO: this may be wrong for non file-based data source like JDBC, which should be external
// even there is no `path` in options. We should consider allow the EXTERNAL keyword.
val storage = DataSource.buildStorageFormatFromOptions(options)
val tableType = if (storage.locationUri.isDefined) {
CatalogTableType.EXTERNAL
} else {
CatalogTableType.MANAGED
}
val tableDesc = CatalogTable(
identifier = table,
tableType = tableType,
storage = storage,
schema = schema.getOrElse(new StructType),
provider = Some(provider),
partitionColumnNames = partitionColumnNames,
bucketSpec = bucketSpec
)
// Determine the storage mode.
val mode = if (ifNotExists) SaveMode.Ignore else SaveMode.ErrorIfExists
if (ctx.query != null) {
// Get the backing query.
val query = plan(ctx.query)
if (temp) {
operationNotAllowed("CREATE TEMPORARY TABLE ... USING ... AS query", ctx)
}
// Don't allow explicit specification of schema for CTAS
if (schema.nonEmpty) {
operationNotAllowed(
"Schema may not be specified in a Create Table As Select (CTAS) statement",
ctx)
}
CreateTable(tableDesc, mode, Some(query))
} else {
if (temp) {
if (ifNotExists) {
operationNotAllowed("CREATE TEMPORARY TABLE IF NOT EXISTS", ctx)
}
logWarning(s"CREATE TEMPORARY TABLE ... USING ... is deprecated, please use " +
"CREATE TEMPORARY VIEW ... USING ... instead")
CreateTempViewUsing(table, schema, replace = true, global = false, provider, options)
} else {
CreateTable(tableDesc, mode, None)
}
}
}
/**
* Creates a [[CreateTempViewUsing]] logical plan.
*/
override def visitCreateTempViewUsing(
ctx: CreateTempViewUsingContext): LogicalPlan = withOrigin(ctx) {
CreateTempViewUsing(
tableIdent = visitTableIdentifier(ctx.tableIdentifier()),
userSpecifiedSchema = Option(ctx.colTypeList()).map(createSchema),
replace = ctx.REPLACE != null,
global = ctx.GLOBAL != null,
provider = ctx.tableProvider.qualifiedName.getText,
options = Option(ctx.tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty))
}
/**
* Create a [[LoadDataCommand]] command.
*
* For example:
* {{{
* LOAD DATA [LOCAL] INPATH 'filepath' [OVERWRITE] INTO TABLE tablename
* [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
override def visitLoadData(ctx: LoadDataContext): LogicalPlan = withOrigin(ctx) {
LoadDataCommand(
table = visitTableIdentifier(ctx.tableIdentifier),
path = string(ctx.path),
isLocal = ctx.LOCAL != null,
isOverwrite = ctx.OVERWRITE != null,
partition = Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec)
)
}
/**
* Create a [[TruncateTableCommand]] command.
*
* For example:
* {{{
* TRUNCATE TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
override def visitTruncateTable(ctx: TruncateTableContext): LogicalPlan = withOrigin(ctx) {
TruncateTableCommand(
visitTableIdentifier(ctx.tableIdentifier),
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec))
}
/**
* Create a [[AlterTableRecoverPartitionsCommand]] command.
*
* For example:
* {{{
* MSCK REPAIR TABLE tablename
* }}}
*/
override def visitRepairTable(ctx: RepairTableContext): LogicalPlan = withOrigin(ctx) {
AlterTableRecoverPartitionsCommand(
visitTableIdentifier(ctx.tableIdentifier),
"MSCK REPAIR TABLE")
}
/**
* Convert a table property list into a key-value map.
* This should be called through [[visitPropertyKeyValues]] or [[visitPropertyKeys]].
*/
override def visitTablePropertyList(
ctx: TablePropertyListContext): Map[String, String] = withOrigin(ctx) {
val properties = ctx.tableProperty.asScala.map { property =>
val key = visitTablePropertyKey(property.key)
val value = visitTablePropertyValue(property.value)
key -> value
}
// Check for duplicate property names.
checkDuplicateKeys(properties, ctx)
properties.toMap
}
/**
* Parse a key-value map from a [[TablePropertyListContext]], assuming all values are specified.
*/
private def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, String] = {
val props = visitTablePropertyList(ctx)
val badKeys = props.collect { case (key, null) => key }
if (badKeys.nonEmpty) {
operationNotAllowed(
s"Values must be specified for key(s): ${badKeys.mkString("[", ",", "]")}", ctx)
}
props
}
/**
* Parse a list of keys from a [[TablePropertyListContext]], assuming no values are specified.
*/
private def visitPropertyKeys(ctx: TablePropertyListContext): Seq[String] = {
val props = visitTablePropertyList(ctx)
val badKeys = props.filter { case (_, v) => v != null }.keys
if (badKeys.nonEmpty) {
operationNotAllowed(
s"Values should not be specified for key(s): ${badKeys.mkString("[", ",", "]")}", ctx)
}
props.keys.toSeq
}
/**
* A table property key can either be String or a collection of dot separated elements. This
* function extracts the property key based on whether its a string literal or a table property
* identifier.
*/
override def visitTablePropertyKey(key: TablePropertyKeyContext): String = {
if (key.STRING != null) {
string(key.STRING)
} else {
key.getText
}
}
/**
* A table property value can be String, Integer, Boolean or Decimal. This function extracts
* the property value based on whether its a string, integer, boolean or decimal literal.
*/
override def visitTablePropertyValue(value: TablePropertyValueContext): String = {
if (value == null) {
null
} else if (value.STRING != null) {
string(value.STRING)
} else if (value.booleanValue != null) {
value.getText.toLowerCase
} else {
value.getText
}
}
/**
* Create a [[CreateDatabaseCommand]] command.
*
* For example:
* {{{
* CREATE DATABASE [IF NOT EXISTS] database_name [COMMENT database_comment]
* [LOCATION path] [WITH DBPROPERTIES (key1=val1, key2=val2, ...)]
* }}}
*/
override def visitCreateDatabase(ctx: CreateDatabaseContext): LogicalPlan = withOrigin(ctx) {
CreateDatabaseCommand(
ctx.identifier.getText,
ctx.EXISTS != null,
Option(ctx.locationSpec).map(visitLocationSpec),
Option(ctx.comment).map(string),
Option(ctx.tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty))
}
/**
* Create an [[AlterDatabasePropertiesCommand]] command.
*
* For example:
* {{{
* ALTER (DATABASE|SCHEMA) database SET DBPROPERTIES (property_name=property_value, ...);
* }}}
*/
override def visitSetDatabaseProperties(
ctx: SetDatabasePropertiesContext): LogicalPlan = withOrigin(ctx) {
AlterDatabasePropertiesCommand(
ctx.identifier.getText,
visitPropertyKeyValues(ctx.tablePropertyList))
}
/**
* Create a [[DropDatabaseCommand]] command.
*
* For example:
* {{{
* DROP (DATABASE|SCHEMA) [IF EXISTS] database [RESTRICT|CASCADE];
* }}}
*/
override def visitDropDatabase(ctx: DropDatabaseContext): LogicalPlan = withOrigin(ctx) {
DropDatabaseCommand(ctx.identifier.getText, ctx.EXISTS != null, ctx.CASCADE != null)
}
/**
* Create a [[DescribeDatabaseCommand]] command.
*
* For example:
* {{{
* DESCRIBE DATABASE [EXTENDED] database;
* }}}
*/
override def visitDescribeDatabase(ctx: DescribeDatabaseContext): LogicalPlan = withOrigin(ctx) {
DescribeDatabaseCommand(ctx.identifier.getText, ctx.EXTENDED != null)
}
/**
* Create a plan for a DESCRIBE FUNCTION command.
*/
override def visitDescribeFunction(ctx: DescribeFunctionContext): LogicalPlan = withOrigin(ctx) {
import ctx._
val functionName =
if (describeFuncName.STRING() != null) {
FunctionIdentifier(string(describeFuncName.STRING()), database = None)
} else if (describeFuncName.qualifiedName() != null) {
visitFunctionName(describeFuncName.qualifiedName)
} else {
FunctionIdentifier(describeFuncName.getText, database = None)
}
DescribeFunctionCommand(functionName, EXTENDED != null)
}
/**
* Create a plan for a SHOW FUNCTIONS command.
*/
override def visitShowFunctions(ctx: ShowFunctionsContext): LogicalPlan = withOrigin(ctx) {
import ctx._
val (user, system) = Option(ctx.identifier).map(_.getText.toLowerCase) match {
case None | Some("all") => (true, true)
case Some("system") => (false, true)
case Some("user") => (true, false)
case Some(x) => throw new ParseException(s"SHOW $x FUNCTIONS not supported", ctx)
}
val (db, pat) = if (qualifiedName != null) {
val name = visitFunctionName(qualifiedName)
(name.database, Some(name.funcName))
} else if (pattern != null) {
(None, Some(string(pattern)))
} else {
(None, None)
}
ShowFunctionsCommand(db, pat, user, system)
}
/**
* Create a [[CreateFunctionCommand]] command.
*
* For example:
* {{{
* CREATE [TEMPORARY] FUNCTION [db_name.]function_name AS class_name
* [USING JAR|FILE|ARCHIVE 'file_uri' [, JAR|FILE|ARCHIVE 'file_uri']];
* }}}
*/
override def visitCreateFunction(ctx: CreateFunctionContext): LogicalPlan = withOrigin(ctx) {
val resources = ctx.resource.asScala.map { resource =>
val resourceType = resource.identifier.getText.toLowerCase
resourceType match {
case "jar" | "file" | "archive" =>
FunctionResource(FunctionResourceType.fromString(resourceType), string(resource.STRING))
case other =>
operationNotAllowed(s"CREATE FUNCTION with resource type '$resourceType'", ctx)
}
}
// Extract database, name & alias.
val functionIdentifier = visitFunctionName(ctx.qualifiedName)
CreateFunctionCommand(
functionIdentifier.database,
functionIdentifier.funcName,
string(ctx.className),
resources,
ctx.TEMPORARY != null)
}
/**
* Create a [[DropFunctionCommand]] command.
*
* For example:
* {{{
* DROP [TEMPORARY] FUNCTION [IF EXISTS] function;
* }}}
*/
override def visitDropFunction(ctx: DropFunctionContext): LogicalPlan = withOrigin(ctx) {
val functionIdentifier = visitFunctionName(ctx.qualifiedName)
DropFunctionCommand(
functionIdentifier.database,
functionIdentifier.funcName,
ctx.EXISTS != null,
ctx.TEMPORARY != null)
}
/**
* Create a [[DropTableCommand]] command.
*/
override def visitDropTable(ctx: DropTableContext): LogicalPlan = withOrigin(ctx) {
DropTableCommand(
visitTableIdentifier(ctx.tableIdentifier),
ctx.EXISTS != null,
ctx.VIEW != null,
ctx.PURGE != null)
}
/**
* Create a [[AlterTableRenameCommand]] command.
*
* For example:
* {{{
* ALTER TABLE table1 RENAME TO table2;
* ALTER VIEW view1 RENAME TO view2;
* }}}
*/
override def visitRenameTable(ctx: RenameTableContext): LogicalPlan = withOrigin(ctx) {
AlterTableRenameCommand(
visitTableIdentifier(ctx.from),
visitTableIdentifier(ctx.to),
ctx.VIEW != null)
}
/**
* Create an [[AlterTableSetPropertiesCommand]] command.
*
* For example:
* {{{
* ALTER TABLE table SET TBLPROPERTIES ('comment' = new_comment);
* ALTER VIEW view SET TBLPROPERTIES ('comment' = new_comment);
* }}}
*/
override def visitSetTableProperties(
ctx: SetTablePropertiesContext): LogicalPlan = withOrigin(ctx) {
AlterTableSetPropertiesCommand(
visitTableIdentifier(ctx.tableIdentifier),
visitPropertyKeyValues(ctx.tablePropertyList),
ctx.VIEW != null)
}
/**
* Create an [[AlterTableUnsetPropertiesCommand]] command.
*
* For example:
* {{{
* ALTER TABLE table UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
* ALTER VIEW view UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
* }}}
*/
override def visitUnsetTableProperties(
ctx: UnsetTablePropertiesContext): LogicalPlan = withOrigin(ctx) {
AlterTableUnsetPropertiesCommand(
visitTableIdentifier(ctx.tableIdentifier),
visitPropertyKeys(ctx.tablePropertyList),
ctx.EXISTS != null,
ctx.VIEW != null)
}
/**
* Create an [[AlterTableSerDePropertiesCommand]] command.
*
* For example:
* {{{
* ALTER TABLE table [PARTITION spec] SET SERDE serde_name [WITH SERDEPROPERTIES props];
* ALTER TABLE table [PARTITION spec] SET SERDEPROPERTIES serde_properties;
* }}}
*/
override def visitSetTableSerDe(ctx: SetTableSerDeContext): LogicalPlan = withOrigin(ctx) {
AlterTableSerDePropertiesCommand(
visitTableIdentifier(ctx.tableIdentifier),
Option(ctx.STRING).map(string),
Option(ctx.tablePropertyList).map(visitPropertyKeyValues),
// TODO a partition spec is allowed to have optional values. This is currently violated.
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec))
}
/**
* Create an [[AlterTableAddPartitionCommand]] command.
*
* For example:
* {{{
* ALTER TABLE table ADD [IF NOT EXISTS] PARTITION spec [LOCATION 'loc1']
* ALTER VIEW view ADD [IF NOT EXISTS] PARTITION spec
* }}}
*
* ALTER VIEW ... ADD PARTITION ... is not supported because the concept of partitioning
* is associated with physical tables
*/
override def visitAddTablePartition(
ctx: AddTablePartitionContext): LogicalPlan = withOrigin(ctx) {
if (ctx.VIEW != null) {
operationNotAllowed("ALTER VIEW ... ADD PARTITION", ctx)
}
// Create partition spec to location mapping.
val specsAndLocs = if (ctx.partitionSpec.isEmpty) {
ctx.partitionSpecLocation.asScala.map {
splCtx =>
val spec = visitNonOptionalPartitionSpec(splCtx.partitionSpec)
val location = Option(splCtx.locationSpec).map(visitLocationSpec)
spec -> location
}
} else {
// Alter View: the location clauses are not allowed.
ctx.partitionSpec.asScala.map(visitNonOptionalPartitionSpec(_) -> None)
}
AlterTableAddPartitionCommand(
visitTableIdentifier(ctx.tableIdentifier),
specsAndLocs,
ctx.EXISTS != null)
}
/**
* Create an [[AlterTableRenamePartitionCommand]] command
*
* For example:
* {{{
* ALTER TABLE table PARTITION spec1 RENAME TO PARTITION spec2;
* }}}
*/
override def visitRenameTablePartition(
ctx: RenameTablePartitionContext): LogicalPlan = withOrigin(ctx) {
AlterTableRenamePartitionCommand(
visitTableIdentifier(ctx.tableIdentifier),
visitNonOptionalPartitionSpec(ctx.from),
visitNonOptionalPartitionSpec(ctx.to))
}
/**
* Create an [[AlterTableDropPartitionCommand]] command
*
* For example:
* {{{
* ALTER TABLE table DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...] [PURGE];
* ALTER VIEW view DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...];
* }}}
*
* ALTER VIEW ... DROP PARTITION ... is not supported because the concept of partitioning
* is associated with physical tables
*/
override def visitDropTablePartitions(
ctx: DropTablePartitionsContext): LogicalPlan = withOrigin(ctx) {
if (ctx.VIEW != null) {
operationNotAllowed("ALTER VIEW ... DROP PARTITION", ctx)
}
AlterTableDropPartitionCommand(
visitTableIdentifier(ctx.tableIdentifier),
ctx.partitionSpec.asScala.map(visitNonOptionalPartitionSpec),
ifExists = ctx.EXISTS != null,
purge = ctx.PURGE != null,
retainData = false)
}
/**
* Create an [[AlterTableRecoverPartitionsCommand]] command
*
* For example:
* {{{
* ALTER TABLE table RECOVER PARTITIONS;
* }}}
*/
override def visitRecoverPartitions(
ctx: RecoverPartitionsContext): LogicalPlan = withOrigin(ctx) {
AlterTableRecoverPartitionsCommand(visitTableIdentifier(ctx.tableIdentifier))
}
/**
* Create an [[AlterTableSetLocationCommand]] command
*
* For example:
* {{{
* ALTER TABLE table [PARTITION spec] SET LOCATION "loc";
* }}}
*/
override def visitSetTableLocation(ctx: SetTableLocationContext): LogicalPlan = withOrigin(ctx) {
AlterTableSetLocationCommand(
visitTableIdentifier(ctx.tableIdentifier),
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec),
visitLocationSpec(ctx.locationSpec))
}
/**
* Create location string.
*/
override def visitLocationSpec(ctx: LocationSpecContext): String = withOrigin(ctx) {
string(ctx.STRING)
}
/**
* Create a [[BucketSpec]].
*/
override def visitBucketSpec(ctx: BucketSpecContext): BucketSpec = withOrigin(ctx) {
BucketSpec(
ctx.INTEGER_VALUE.getText.toInt,
visitIdentifierList(ctx.identifierList),
Option(ctx.orderedIdentifierList)
.toSeq
.flatMap(_.orderedIdentifier.asScala)
.map { orderedIdCtx =>
Option(orderedIdCtx.ordering).map(_.getText).foreach { dir =>
if (dir.toLowerCase != "asc") {
operationNotAllowed(s"Column ordering must be ASC, was '$dir'", ctx)
}
}
orderedIdCtx.identifier.getText
})
}
/**
* Convert a nested constants list into a sequence of string sequences.
*/
override def visitNestedConstantList(
ctx: NestedConstantListContext): Seq[Seq[String]] = withOrigin(ctx) {
ctx.constantList.asScala.map(visitConstantList)
}
/**
* Convert a constants list into a String sequence.
*/
override def visitConstantList(ctx: ConstantListContext): Seq[String] = withOrigin(ctx) {
ctx.constant.asScala.map(visitStringConstant)
}
/**
* Fail an unsupported Hive native command.
*/
override def visitFailNativeCommand(
ctx: FailNativeCommandContext): LogicalPlan = withOrigin(ctx) {
val keywords = if (ctx.unsupportedHiveNativeCommands != null) {
ctx.unsupportedHiveNativeCommands.children.asScala.collect {
case n: TerminalNode => n.getText
}.mkString(" ")
} else {
// SET ROLE is the exception to the rule, because we handle this before other SET commands.
"SET ROLE"
}
operationNotAllowed(keywords, ctx)
}
/**
* Create a [[AddFileCommand]], [[AddJarCommand]], [[ListFilesCommand]] or [[ListJarsCommand]]
* command depending on the requested operation on resources.
* Expected format:
* {{{
* ADD (FILE[S] <filepath ...> | JAR[S] <jarpath ...>)
* LIST (FILE[S] [filepath ...] | JAR[S] [jarpath ...])
* }}}
*/
override def visitManageResource(ctx: ManageResourceContext): LogicalPlan = withOrigin(ctx) {
val mayebePaths = remainder(ctx.identifier).trim
ctx.op.getType match {
case SqlBaseParser.ADD =>
ctx.identifier.getText.toLowerCase match {
case "file" => AddFileCommand(mayebePaths)
case "jar" => AddJarCommand(mayebePaths)
case other => operationNotAllowed(s"ADD with resource type '$other'", ctx)
}
case SqlBaseParser.LIST =>
ctx.identifier.getText.toLowerCase match {
case "files" | "file" =>
if (mayebePaths.length > 0) {
ListFilesCommand(mayebePaths.split("\\\\s+"))
} else {
ListFilesCommand()
}
case "jars" | "jar" =>
if (mayebePaths.length > 0) {
ListJarsCommand(mayebePaths.split("\\\\s+"))
} else {
ListJarsCommand()
}
case other => operationNotAllowed(s"LIST with resource type '$other'", ctx)
}
case _ => operationNotAllowed(s"Other types of operation on resources", ctx)
}
}
/**
* Create a table, returning a [[CreateTable]] logical plan.
*
* This is not used to create datasource tables, which is handled through
* "CREATE TABLE ... USING ...".
*
* Note: several features are currently not supported - temporary tables, bucketing,
* skewed columns and storage handlers (STORED BY).
*
* Expected format:
* {{{
* CREATE [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name
* [(col1[:] data_type [COMMENT col_comment], ...)]
* [COMMENT table_comment]
* [PARTITIONED BY (col2[:] data_type [COMMENT col_comment], ...)]
* [ROW FORMAT row_format]
* [STORED AS file_format]
* [LOCATION path]
* [TBLPROPERTIES (property_name=property_value, ...)]
* [AS select_statement];
* }}}
*/
override def visitCreateTable(ctx: CreateTableContext): LogicalPlan = withOrigin(ctx) {
val (name, temp, ifNotExists, external) = visitCreateTableHeader(ctx.createTableHeader)
// TODO: implement temporary tables
if (temp) {
throw new ParseException(
"CREATE TEMPORARY TABLE is not supported yet. " +
"Please use CREATE TEMPORARY VIEW as an alternative.", ctx)
}
if (ctx.skewSpec != null) {
operationNotAllowed("CREATE TABLE ... SKEWED BY", ctx)
}
if (ctx.bucketSpec != null) {
operationNotAllowed("CREATE TABLE ... CLUSTERED BY", ctx)
}
val comment = Option(ctx.STRING).map(string)
val dataCols = Option(ctx.columns).map(visitColTypeList).getOrElse(Nil)
val partitionCols = Option(ctx.partitionColumns).map(visitColTypeList).getOrElse(Nil)
val properties = Option(ctx.tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty)
val selectQuery = Option(ctx.query).map(plan)
// Note: Hive requires partition columns to be distinct from the schema, so we need
// to include the partition columns here explicitly
val schema = StructType(dataCols ++ partitionCols)
// Storage format
val defaultStorage: CatalogStorageFormat = {
val defaultStorageType = conf.getConfString("hive.default.fileformat", "textfile")
val defaultHiveSerde = HiveSerDe.sourceToSerDe(defaultStorageType)
CatalogStorageFormat(
locationUri = None,
inputFormat = defaultHiveSerde.flatMap(_.inputFormat)
.orElse(Some("org.apache.hadoop.mapred.TextInputFormat")),
outputFormat = defaultHiveSerde.flatMap(_.outputFormat)
.orElse(Some("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")),
serde = defaultHiveSerde.flatMap(_.serde),
compressed = false,
properties = Map())
}
validateRowFormatFileFormat(ctx.rowFormat, ctx.createFileFormat, ctx)
val fileStorage = Option(ctx.createFileFormat).map(visitCreateFileFormat)
.getOrElse(CatalogStorageFormat.empty)
val rowStorage = Option(ctx.rowFormat).map(visitRowFormat)
.getOrElse(CatalogStorageFormat.empty)
val location = Option(ctx.locationSpec).map(visitLocationSpec)
// If we are creating an EXTERNAL table, then the LOCATION field is required
if (external && location.isEmpty) {
operationNotAllowed("CREATE EXTERNAL TABLE must be accompanied by LOCATION", ctx)
}
val storage = CatalogStorageFormat(
locationUri = location,
inputFormat = fileStorage.inputFormat.orElse(defaultStorage.inputFormat),
outputFormat = fileStorage.outputFormat.orElse(defaultStorage.outputFormat),
serde = rowStorage.serde.orElse(fileStorage.serde).orElse(defaultStorage.serde),
compressed = false,
properties = rowStorage.properties ++ fileStorage.properties)
// If location is defined, we'll assume this is an external table.
// Otherwise, we may accidentally delete existing data.
val tableType = if (external || location.isDefined) {
CatalogTableType.EXTERNAL
} else {
CatalogTableType.MANAGED
}
// TODO support the sql text - have a proper location for this!
val tableDesc = CatalogTable(
identifier = name,
tableType = tableType,
storage = storage,
schema = schema,
provider = Some(DDLUtils.HIVE_PROVIDER),
partitionColumnNames = partitionCols.map(_.name),
properties = properties,
comment = comment)
val mode = if (ifNotExists) SaveMode.Ignore else SaveMode.ErrorIfExists
selectQuery match {
case Some(q) =>
// Hive does not allow to use a CTAS statement to create a partitioned table.
if (tableDesc.partitionColumnNames.nonEmpty) {
val errorMessage = "A Create Table As Select (CTAS) statement is not allowed to " +
"create a partitioned table using Hive's file formats. " +
"Please use the syntax of \\"CREATE TABLE tableName USING dataSource " +
"OPTIONS (...) PARTITIONED BY ...\\" to create a partitioned table through a " +
"CTAS statement."
operationNotAllowed(errorMessage, ctx)
}
// Don't allow explicit specification of schema for CTAS.
if (schema.nonEmpty) {
operationNotAllowed(
"Schema may not be specified in a Create Table As Select (CTAS) statement",
ctx)
}
val hasStorageProperties = (ctx.createFileFormat != null) || (ctx.rowFormat != null)
if (conf.convertCTAS && !hasStorageProperties) {
// At here, both rowStorage.serdeProperties and fileStorage.serdeProperties
// are empty Maps.
val newTableDesc = tableDesc.copy(
storage = CatalogStorageFormat.empty.copy(locationUri = location),
provider = Some(conf.defaultDataSourceName))
CreateTable(newTableDesc, mode, Some(q))
} else {
CreateTable(tableDesc, mode, Some(q))
}
case None => CreateTable(tableDesc, mode, None)
}
}
/**
* Create a [[CreateTableLikeCommand]] command.
*
* For example:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* LIKE [other_db_name.]existing_table_name
* }}}
*/
override def visitCreateTableLike(ctx: CreateTableLikeContext): LogicalPlan = withOrigin(ctx) {
val targetTable = visitTableIdentifier(ctx.target)
val sourceTable = visitTableIdentifier(ctx.source)
CreateTableLikeCommand(targetTable, sourceTable, ctx.EXISTS != null)
}
/**
* Create a [[CatalogStorageFormat]] for creating tables.
*
* Format: STORED AS ...
*/
override def visitCreateFileFormat(
ctx: CreateFileFormatContext): CatalogStorageFormat = withOrigin(ctx) {
(ctx.fileFormat, ctx.storageHandler) match {
// Expected format: INPUTFORMAT input_format OUTPUTFORMAT output_format
case (c: TableFileFormatContext, null) =>
visitTableFileFormat(c)
// Expected format: SEQUENCEFILE | TEXTFILE | RCFILE | ORC | PARQUET | AVRO
case (c: GenericFileFormatContext, null) =>
visitGenericFileFormat(c)
case (null, storageHandler) =>
operationNotAllowed("STORED BY", ctx)
case _ =>
throw new ParseException("Expected either STORED AS or STORED BY, not both", ctx)
}
}
/**
* Create a [[CatalogStorageFormat]].
*/
override def visitTableFileFormat(
ctx: TableFileFormatContext): CatalogStorageFormat = withOrigin(ctx) {
CatalogStorageFormat.empty.copy(
inputFormat = Option(string(ctx.inFmt)),
outputFormat = Option(string(ctx.outFmt)))
}
/**
* Resolve a [[HiveSerDe]] based on the name given and return it as a [[CatalogStorageFormat]].
*/
override def visitGenericFileFormat(
ctx: GenericFileFormatContext): CatalogStorageFormat = withOrigin(ctx) {
val source = ctx.identifier.getText
HiveSerDe.sourceToSerDe(source) match {
case Some(s) =>
CatalogStorageFormat.empty.copy(
inputFormat = s.inputFormat,
outputFormat = s.outputFormat,
serde = s.serde)
case None =>
operationNotAllowed(s"STORED AS with file format '$source'", ctx)
}
}
/**
* Create a [[CatalogStorageFormat]] used for creating tables.
*
* Example format:
* {{{
* SERDE serde_name [WITH SERDEPROPERTIES (k1=v1, k2=v2, ...)]
* }}}
*
* OR
*
* {{{
* DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]]
* [COLLECTION ITEMS TERMINATED BY char]
* [MAP KEYS TERMINATED BY char]
* [LINES TERMINATED BY char]
* [NULL DEFINED AS char]
* }}}
*/
private def visitRowFormat(ctx: RowFormatContext): CatalogStorageFormat = withOrigin(ctx) {
ctx match {
case serde: RowFormatSerdeContext => visitRowFormatSerde(serde)
case delimited: RowFormatDelimitedContext => visitRowFormatDelimited(delimited)
}
}
/**
* Create SERDE row format name and properties pair.
*/
override def visitRowFormatSerde(
ctx: RowFormatSerdeContext): CatalogStorageFormat = withOrigin(ctx) {
import ctx._
CatalogStorageFormat.empty.copy(
serde = Option(string(name)),
properties = Option(tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty))
}
/**
* Create a delimited row format properties object.
*/
override def visitRowFormatDelimited(
ctx: RowFormatDelimitedContext): CatalogStorageFormat = withOrigin(ctx) {
// Collect the entries if any.
def entry(key: String, value: Token): Seq[(String, String)] = {
Option(value).toSeq.map(x => key -> string(x))
}
// TODO we need proper support for the NULL format.
val entries =
entry("field.delim", ctx.fieldsTerminatedBy) ++
entry("serialization.format", ctx.fieldsTerminatedBy) ++
entry("escape.delim", ctx.escapedBy) ++
// The following typo is inherited from Hive...
entry("colelction.delim", ctx.collectionItemsTerminatedBy) ++
entry("mapkey.delim", ctx.keysTerminatedBy) ++
Option(ctx.linesSeparatedBy).toSeq.map { token =>
val value = string(token)
validate(
value == "\\n",
s"LINES TERMINATED BY only supports newline '\\\\n' right now: $value",
ctx)
"line.delim" -> value
}
CatalogStorageFormat.empty.copy(properties = entries.toMap)
}
/**
* Throw a [[ParseException]] if the user specified incompatible SerDes through ROW FORMAT
* and STORED AS.
*
* The following are allowed. Anything else is not:
* ROW FORMAT SERDE ... STORED AS [SEQUENCEFILE | RCFILE | TEXTFILE]
* ROW FORMAT DELIMITED ... STORED AS TEXTFILE
* ROW FORMAT ... STORED AS INPUTFORMAT ... OUTPUTFORMAT ...
*/
private def validateRowFormatFileFormat(
rowFormatCtx: RowFormatContext,
createFileFormatCtx: CreateFileFormatContext,
parentCtx: ParserRuleContext): Unit = {
if (rowFormatCtx == null || createFileFormatCtx == null) {
return
}
(rowFormatCtx, createFileFormatCtx.fileFormat) match {
case (_, ffTable: TableFileFormatContext) => // OK
case (rfSerde: RowFormatSerdeContext, ffGeneric: GenericFileFormatContext) =>
ffGeneric.identifier.getText.toLowerCase match {
case ("sequencefile" | "textfile" | "rcfile") => // OK
case fmt =>
operationNotAllowed(
s"ROW FORMAT SERDE is incompatible with format '$fmt', which also specifies a serde",
parentCtx)
}
case (rfDelimited: RowFormatDelimitedContext, ffGeneric: GenericFileFormatContext) =>
ffGeneric.identifier.getText.toLowerCase match {
case "textfile" => // OK
case fmt => operationNotAllowed(
s"ROW FORMAT DELIMITED is only compatible with 'textfile', not '$fmt'", parentCtx)
}
case _ =>
// should never happen
def str(ctx: ParserRuleContext): String = {
(0 until ctx.getChildCount).map { i => ctx.getChild(i).getText }.mkString(" ")
}
operationNotAllowed(
s"Unexpected combination of ${str(rowFormatCtx)} and ${str(createFileFormatCtx)}",
parentCtx)
}
}
/**
* Create or replace a view. This creates a [[CreateViewCommand]] command.
*
* For example:
* {{{
* CREATE [OR REPLACE] [[GLOBAL] TEMPORARY] VIEW [IF NOT EXISTS] [db_name.]view_name
* [(column_name [COMMENT column_comment], ...) ]
* [COMMENT view_comment]
* [TBLPROPERTIES (property_name = property_value, ...)]
* AS SELECT ...;
* }}}
*/
override def visitCreateView(ctx: CreateViewContext): LogicalPlan = withOrigin(ctx) {
if (ctx.identifierList != null) {
operationNotAllowed("CREATE VIEW ... PARTITIONED ON", ctx)
} else {
val userSpecifiedColumns = Option(ctx.identifierCommentList).toSeq.flatMap { icl =>
icl.identifierComment.asScala.map { ic =>
ic.identifier.getText -> Option(ic.STRING).map(string)
}
}
val viewType = if (ctx.TEMPORARY == null) {
PersistedView
} else if (ctx.GLOBAL != null) {
GlobalTempView
} else {
LocalTempView
}
CreateViewCommand(
name = visitTableIdentifier(ctx.tableIdentifier),
userSpecifiedColumns = userSpecifiedColumns,
comment = Option(ctx.STRING).map(string),
properties = Option(ctx.tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty),
originalText = Option(source(ctx.query)),
child = plan(ctx.query),
allowExisting = ctx.EXISTS != null,
replace = ctx.REPLACE != null,
viewType = viewType)
}
}
/**
* Alter the query of a view. This creates a [[AlterViewAsCommand]] command.
*
* For example:
* {{{
* ALTER VIEW [db_name.]view_name AS SELECT ...;
* }}}
*/
override def visitAlterViewQuery(ctx: AlterViewQueryContext): LogicalPlan = withOrigin(ctx) {
AlterViewAsCommand(
name = visitTableIdentifier(ctx.tableIdentifier),
originalText = source(ctx.query),
query = plan(ctx.query))
}
/**
* Create a [[ScriptInputOutputSchema]].
*/
override protected def withScriptIOSchema(
ctx: QuerySpecificationContext,
inRowFormat: RowFormatContext,
recordWriter: Token,
outRowFormat: RowFormatContext,
recordReader: Token,
schemaLess: Boolean): ScriptInputOutputSchema = {
if (recordWriter != null || recordReader != null) {
// TODO: what does this message mean?
throw new ParseException(
"Unsupported operation: Used defined record reader/writer classes.", ctx)
}
// Decode and input/output format.
type Format = (Seq[(String, String)], Option[String], Seq[(String, String)], Option[String])
def format(
fmt: RowFormatContext,
configKey: String,
defaultConfigValue: String): Format = fmt match {
case c: RowFormatDelimitedContext =>
// TODO we should use the visitRowFormatDelimited function here. However HiveScriptIOSchema
// expects a seq of pairs in which the old parsers' token names are used as keys.
// Transforming the result of visitRowFormatDelimited would be quite a bit messier than
// retrieving the key value pairs ourselves.
def entry(key: String, value: Token): Seq[(String, String)] = {
Option(value).map(t => key -> t.getText).toSeq
}
val entries = entry("TOK_TABLEROWFORMATFIELD", c.fieldsTerminatedBy) ++
entry("TOK_TABLEROWFORMATCOLLITEMS", c.collectionItemsTerminatedBy) ++
entry("TOK_TABLEROWFORMATMAPKEYS", c.keysTerminatedBy) ++
entry("TOK_TABLEROWFORMATLINES", c.linesSeparatedBy) ++
entry("TOK_TABLEROWFORMATNULL", c.nullDefinedAs)
(entries, None, Seq.empty, None)
case c: RowFormatSerdeContext =>
// Use a serde format.
val CatalogStorageFormat(None, None, None, Some(name), _, props) = visitRowFormatSerde(c)
// SPARK-10310: Special cases LazySimpleSerDe
val recordHandler = if (name == "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe") {
Option(conf.getConfString(configKey, defaultConfigValue))
} else {
None
}
(Seq.empty, Option(name), props.toSeq, recordHandler)
case null =>
// Use default (serde) format.
val name = conf.getConfString("hive.script.serde",
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")
val props = Seq("field.delim" -> "\\t")
val recordHandler = Option(conf.getConfString(configKey, defaultConfigValue))
(Nil, Option(name), props, recordHandler)
}
val (inFormat, inSerdeClass, inSerdeProps, reader) =
format(
inRowFormat, "hive.script.recordreader", "org.apache.hadoop.hive.ql.exec.TextRecordReader")
val (outFormat, outSerdeClass, outSerdeProps, writer) =
format(
outRowFormat, "hive.script.recordwriter",
"org.apache.hadoop.hive.ql.exec.TextRecordWriter")
ScriptInputOutputSchema(
inFormat, outFormat,
inSerdeClass, outSerdeClass,
inSerdeProps, outSerdeProps,
reader, writer,
schemaLess)
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala | Scala | apache-2.0 | 49,974 |
package de.hpi.asg.breezetestgen.testgeneration.constraintsolving
sealed trait ArithOperator
case object Plus extends ArithOperator
case object Minus extends ArithOperator
case object And extends ArithOperator
case object Or extends ArithOperator
| 0x203/BreezeTestGen | src/main/scala/de/hpi/asg/breezetestgen/testgeneration/constraintsolving/ArithOperator.scala | Scala | mit | 248 |
/*
* Copyright 2011 BigData Mx
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mx.bigdata.datalib.sql;
class QueryBuilder {
val builder = new StringBuilder()
def select(fields: List[String]) = {
builder.append("SELECT ")
builder.append(fields.mkString(", "))
}
def from(from: String) = {
builder.append(" FROM ")
builder.append(from)
}
def join(table: String) = {
builder.append(" JOIN ")
builder.append(table)
}
def on(clauses: String*) = {
var on = clauses.filter { d => d.length != 0 }.mkString(" AND ");
if (on.length != 0) {
builder.append(" ON (")
builder.append(on)
builder.append(")")
}
}
def where(clauses: String*) = {
var where = clauses.filter { d => d.length != 0 }.mkString(" AND ");
if (where.length != 0) {
builder.append(" WHERE ")
builder.append(where)
}
}
def groupBy(fields: String*) {
builder.append(" GROUP BY ")
builder.append(fields.mkString(", "))
}
def orderBy(fields: String*) {
builder.append(" ORDER BY ")
builder.append(fields.mkString(", "))
}
def in(key: String, values: Array[String]): String = {
return if (values != null) {
var inner = new StringBuilder()
inner.append(" ")
inner.append(key)
inner.append(" IN (")
inner.append(values.map { "'" + _ + "'"}.mkString(", "))
inner.append(")")
inner.toString()
} else {
""
}
}
def between(key: String, from: String, to: String): String = {
var inner = new StringBuilder()
inner.append(" ")
inner.append(key)
inner.append(" BETWEEN ")
inner.append(String.format("'%s'", from))
inner.append(" AND ")
inner.append(String.format("'%s'", to))
inner.toString()
}
def between(key: String, from: Long, to: Long): String = {
var inner = new StringBuilder()
inner.append(" ")
inner.append(key)
inner.append(" BETWEEN ")
inner.append(from)
inner.append(" AND ")
inner.append(to)
inner.toString()
}
def op(key: String, op: String, value: Object): String = {
var inner = new StringBuilder()
inner.append(" ")
inner.append(key)
inner.append(" ")
inner.append(op)
inner.append(" ")
inner.append(value)
inner.toString()
}
def limit(offset: Int, rows: Int) {
builder.append(" LIMIT ")
builder.append(List(offset, rows).mkString(", "))
}
def build = builder.toString()
}
| bigdata-mx/DataLib | src/main/scala/mx/bigdata/datalib/sql/QueryBuilder.scala | Scala | apache-2.0 | 3,049 |
package api
import com.typesafe.config.{ Config, ConfigFactory }
import com.typesafe.scalalogging.LazyLogging
import core.ConsoleEventPublisher
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import persistence.dal.{ JobsDal, ProjectsDal, TestsDal }
import spray.testkit.{ RouteTest, Specs2Interface }
import utils.{ ActorModule, ConfigurationModuleImpl, PersistenceModule }
trait Specs2RouteTest extends RouteTest with Specs2Interface
trait AbstractAPISpec extends Specification with Specs2RouteTest with Mockito {
trait Modules extends ConfigurationModuleImpl with ActorModule with PersistenceModule
with ConsoleEventPublisher with LazyLogging {
val system = AbstractAPISpec.this.system
override val projectsDal = mock[ProjectsDal]
override val testsDal = mock[TestsDal]
override val jobsDal = mock[JobsDal]
override def config = getConfig.withFallback(super.config)
}
def getConfig: Config = ConfigFactory.empty()
}
| ShiftForward/ridgeback | src/test/scala/api/AbstractAPISpec.scala | Scala | mit | 981 |
package model
import play.api.libs.json._
/**
* Represents the Swagger definition for PipelineFolderImpl.
* @param additionalProperties Any additional properties this model may have.
*/
@javax.annotation.Generated(value = Array("org.openapitools.codegen.languages.ScalaPlayFrameworkServerCodegen"), date = "2022-02-13T02:38:35.589632Z[Etc/UTC]")
case class PipelineFolderImpl(
`class`: Option[String],
displayName: Option[String],
fullName: Option[String],
name: Option[String],
organization: Option[String],
numberOfFolders: Option[Int],
numberOfPipelines: Option[Int]
additionalProperties:
)
object PipelineFolderImpl {
implicit lazy val pipelineFolderImplJsonFormat: Format[PipelineFolderImpl] = {
val realJsonFormat = Json.format[PipelineFolderImpl]
val declaredPropNames = Set("`class`", "displayName", "fullName", "name", "organization", "numberOfFolders", "numberOfPipelines")
Format(
Reads {
case JsObject(xs) =>
val declaredProps = xs.filterKeys(declaredPropNames)
val additionalProps = JsObject(xs -- declaredPropNames)
val restructuredProps = declaredProps + ("additionalProperties" -> additionalProps)
val newObj = JsObject(restructuredProps)
realJsonFormat.reads(newObj)
case _ =>
JsError("error.expected.jsobject")
},
Writes { pipelineFolderImpl =>
val jsObj = realJsonFormat.writes(pipelineFolderImpl)
val additionalProps = jsObj.value("additionalProperties").as[JsObject]
val declaredProps = jsObj - "additionalProperties"
val newObj = declaredProps ++ additionalProps
newObj
}
)
}
}
| cliffano/swaggy-jenkins | clients/scala-play-server/generated/app/model/PipelineFolderImpl.scala | Scala | mit | 1,692 |
package asobu.distributed.gateway.enricher
import asobu.distributed.RequestEnricherDefinition
import asobu.distributed.RequestEnricherDefinition.{OrElse, AndThen}
import asobu.distributed.gateway._
import asobu.dsl.Extractor._
import play.api.mvc.{Request, AnyContent, Results}, Results.InternalServerError
import scala.concurrent.ExecutionContext
import scala.reflect._
trait Interpreter[T <: Def] {
def apply(enricherDef: T)(implicit exec: ExecutionContext): RequestEnricher
}
object Interpreter {
case class UnknownEnrichDefinition(defName: String) extends Exception(s"Support for EnricherDefinition $defName is not implemented yet.")
def interpret[T <: Def: ClassTag](enricherDefinition: RequestEnricherDefinition)(
implicit
interpreter: Interpreter[T],
ec: ExecutionContext
): RequestEnricher = {
import asobu.dsl.CatsInstances._
enricherDefinition match {
case t: T if classTag[T].runtimeClass.isInstance(t) ⇒ interpreter(t)
case AndThen(a, b) ⇒ interpret(a) andThen interpret(b)
case OrElse(a, b) ⇒ interpret(a) orElse interpret(b)
case unknown ⇒ throw new UnknownEnrichDefinition(unknown.getClass.getCanonicalName)
}
}
}
class DisabledInterpreter(implicit ex: ExecutionContext) extends Interpreter[Nothing] {
def apply(ed: Nothing)(implicit exec: ExecutionContext): RequestEnricher = null
}
| iheartradio/asobu | distributed/src/main/scala/asobu/distributed/gateway/enricher/Interpreter.scala | Scala | apache-2.0 | 1,481 |
/*
* Happy Melly Teller
* Copyright (C) 2013 - 2016, Happy Melly http://www.happymelly.com
*
* This file is part of the Happy Melly Teller.
*
* Happy Melly Teller is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Happy Melly Teller is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Happy Melly Teller. If not, see <http://www.gnu.org/licenses/>.
*
* If you have questions concerning this license or the applicable additional
* terms, you may contact by email Sergey Kotlov, sergey.kotlov@happymelly.com or
* in writing Happy Melly One, Handelsplein 37, Rotterdam, The Netherlands, 3071 PR
*/
package models.repository.core.payment
import models.core.payment.{CreditCard, Charge}
import models.database.core.payment.{CreditCardTable, ChargeTable}
import org.joda.time.LocalDate
import play.api.Application
import play.api.db.slick.{DatabaseConfigProvider, HasDatabaseConfig}
import slick.driver.JdbcProfile
import scala.concurrent.Future
/** Provides operations with database related to charges */
class CreditCardRepository(app: Application) extends HasDatabaseConfig[JdbcProfile]
with CreditCardTable {
val dbConfig = DatabaseConfigProvider.get[JdbcProfile](app)
import driver.api._
private val cards = TableQuery[CreditCards]
def delete(ids: Seq[Long]): Future[Int] = db.run(cards.filter(_.id inSet ids).delete)
def findByCustomer(customerId: Long): Future[Seq[CreditCard]] =
db.run(cards.filter(_.customerId === customerId).result)
/**
* Returns cards expiring next month
*/
def findExpiring: Future[Seq[CreditCard]] = {
val nextMonth = LocalDate.now().plusMonths(1)
val expMonth = nextMonth.getMonthOfYear
val expYear = nextMonth.getYear
val query = cards.filter(_.expYear === expYear).filter(_.expMonth === expMonth)
db.run(query.result)
}
/**
* Inserts the given record to database
*
* @param card Object to insert
* @return Returns member object with updated id
*/
def insert(card: CreditCard): Future[CreditCard] = {
val query = cards returning cards.map(_.id) into ((value, id) => value.copy(id = Some(id)))
db.run(query += card)
}
}
| HappyMelly/teller | app/models/repository/core/payment/CreditCardRepository.scala | Scala | gpl-3.0 | 2,601 |
package chehao.myscala.func
object Example {
def main(args: Array[String]): Unit = {
val list = List(1, 2, 3, 4)
println("list contains Odd ? " + containsOdd(list))
println("list exist Odd? " + list.exists { (x: Int) => x % 2 == 1 })
println("list exist Odd? " + list.exists { _ % 2 == 1 })
val file = List("debug 2013 msg", "debug 2015 msg", "error 2015 msg", "warn 2016 msg");
println("cat file |grep 'debug' | grep '2013' | wc : " +
file.filter(_.contains("debug")).filter(_.contains("2013")).size)
val num = file.map(wordcount).reduceLeft(_ + _)
println(num)
val num2 = foldLeft(file.map(wordcount))(0)(_ + _)
println(num2)
}
//def wordcount(str:String):Int = str.split(" ").count { x => x=="msg" }
def wordcount(str: String): Int = str.split(" ").count(_ == "msg")
//tail call
def foldLeft(list: List[Int])(init: Int)(f: (Int, Int) => Int): Int = {
list match {
case List() => init
case head :: tail => println(head);foldLeft(tail)(f(init, head))(f)
}
}
def containsOdd(list: List[Int]): Boolean = {
for (i <- list) {
if (i % 2 == 1)
return true;
}
return false;
}
} | Chehao/Akkala | quickstart/src/main/scala/chehao/myscala/func/WordCountExample.scala | Scala | apache-2.0 | 1,189 |
package org.skycastle.ui.components
import java.awt.event.{ActionEvent, ActionListener}
import javax.swing.event.{DocumentEvent, DocumentListener}
import javax.swing.JTextField
import org.skycastle.content.composite.CompositeEntity
import org.skycastle.ui.Ui
import org.skycastle.util.Parameters
/**
*
*
* @author Hans Haggstrom
*/
@serializable
@SerialVersionUID(1)
class FieldUi extends Ui {
type ViewType = JTextField
override def getValue() = parameters.getString( 'text, "" )
def createOwnView(composite: CompositeEntity) : ViewType = {
val field = new JTextField( 20 )
// Listen to text change
field.getDocument.addDocumentListener( new DocumentListener {
def updateParameters() {
parameters.setProperty( 'text, field.getText )
}
def changedUpdate(e: DocumentEvent) = updateParameters()
def insertUpdate(e: DocumentEvent) = updateParameters()
def removeUpdate(e: DocumentEvent) = updateParameters()
} )
field
}
protected def updateViewProperties(view: ViewType, changedParameters: Parameters) {
if (changedParameters.hasProperty('text)) {
view.setText( parameters.getString( 'text, "") )
}
}
} | weimingtom/skycastle | src/main/scala/org/skycastle/ui/components/FieldUi.scala | Scala | gpl-2.0 | 1,209 |
package controllers
trait IdTokenValidator {
def validate(aud: String): Boolean
}
| j5ik2o/forseti | app/relying-party/app/controllers/IdTokenValidator.scala | Scala | mit | 87 |
/*
* Copyright 2012-2014 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.xfinity.sirius.api.impl.status
import org.scalatest.BeforeAndAfterAll
import com.comcast.xfinity.sirius.NiceTest
import akka.actor.ActorSystem
import com.comcast.xfinity.sirius.admin.SiriusMonitorReader
import com.comcast.xfinity.sirius.api.SiriusConfiguration
import org.mockito.Mockito._
import akka.testkit.{TestProbe, TestActorRef}
import com.comcast.xfinity.sirius.api.impl.status.NodeStats.{NodeConfig, MonitorStats, MemoryUsage, FullNodeStatus}
import scala.concurrent.duration._
class StatusWorkerTest extends NiceTest with BeforeAndAfterAll {
implicit val actorSystem = ActorSystem("StatusWorkerTest")
override def afterAll() {
actorSystem.shutdown()
}
describe("in response to a GetStatus message") {
it ("must return everything it can") {
val mockMonitorReader = mock[SiriusMonitorReader]
val config = new SiriusConfiguration
config.setProp("key1", "val1")
config.setProp("key2", "val2")
val underTest = TestActorRef(
new StatusWorker(
"akka://some-system@somehost:2552/user/sirius",
config,
mockMonitorReader
)
)
doReturn(None).when(mockMonitorReader).getMonitorStats(config)
val senderProbe = TestProbe()
senderProbe.send(underTest, StatusWorker.GetStatus)
senderProbe.expectMsgPF(3 seconds) {
case FullNodeStatus(nodeName, _: MemoryUsage, configInfo, stats) =>
assert(nodeName === "akka://some-system@somehost:2552/user/sirius")
val stringifiedConfigMap = config.getConfigMap.map(kv => (kv._1, kv._2.toString))
assert(NodeConfig(stringifiedConfigMap) === configInfo)
assert(MonitorStats(None) === stats)
}
}
}
}
| mattinger/sirius | src/test/scala/com/comcast/xfinity/sirius/api/impl/status/StatusWorkerTest.scala | Scala | apache-2.0 | 2,381 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
private[spark] object ExecutorState extends Enumeration {
val LAUNCHING, LOADING, RUNNING, KILLED, FAILED, LOST = Value
type ExecutorState = Value
def isFinished(state: ExecutorState): Boolean = Seq(KILLED, FAILED, LOST).contains(state)
}
| zhangjunfang/eclipse-dir | spark/core/src/main/scala/org/apache/spark/deploy/ExecutorState.scala | Scala | bsd-2-clause | 1,083 |
package io.swagger.client.model
import io.swagger.client.core.ApiModel
import org.joda.time.DateTime
case class Permission (
/* Grant permission to target user or public so they may access measurements within the given parameters. TODO: Rename target to something more intuitive. */
target: Int,
/* ORIGINAL Variable name */
variableName: String,
/* Earliest time when measurements will be accessible in epoch seconds */
minTimestamp: Int,
/* Latest time when measurements will be accessible in epoch seconds */
maxTimestamp: Int,
/* Earliest time of day when measurements will be accessible in epoch seconds */
minTimeOfDay: Int,
/* Latest time of day when measurements will be accessible in epoch seconds */
maxTimeOfDay: Int,
/* Maybe specifies if only weekday measurements should be accessible */
week: String)
extends ApiModel
| QuantiModo/QuantiModo-SDK-Akka-Scala | src/main/scala/io/swagger/client/model/Permission.scala | Scala | gpl-2.0 | 868 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package vta.core
import chisel3._
import chisel3.util._
import vta.util.config._
import vta.shell._
/** TensorStore.
*
* Store 1D and 2D tensors from out-scratchpad (SRAM) to main memory (DRAM).
*/
class TensorStore(tensorType: String = "true", debug: Boolean = false)
(implicit p: Parameters) extends Module {
val tp = new TensorParams(tensorType)
val mp = p(ShellKey).memParams
val io = IO(new Bundle {
val start = Input(Bool())
val done = Output(Bool())
val inst = Input(UInt(INST_BITS.W))
val baddr = Input(UInt(mp.addrBits.W))
val vme_wr = new VMEWriteMaster
val tensor = new TensorClient(tensorType)
})
val tensorLength = tp.tensorLength
val tensorWidth = tp.tensorWidth
val tensorElemBits = tp.tensorElemBits
val memBlockBits = tp.memBlockBits
val memDepth = tp.memDepth
val numMemBlock = tp.numMemBlock
val dec = io.inst.asTypeOf(new MemDecode)
val waddr_cur = Reg(chiselTypeOf(io.vme_wr.cmd.bits.addr))
val waddr_nxt = Reg(chiselTypeOf(io.vme_wr.cmd.bits.addr))
val xcnt = Reg(chiselTypeOf(io.vme_wr.cmd.bits.len))
val xlen = Reg(chiselTypeOf(io.vme_wr.cmd.bits.len))
val xrem = Reg(chiselTypeOf(dec.xsize))
val xsize = (dec.xsize << log2Ceil(tensorLength*numMemBlock)) - 1.U
val xmax = (1 << mp.lenBits).U
val xmax_bytes = ((1 << mp.lenBits)*mp.dataBits/8).U
val ycnt = Reg(chiselTypeOf(dec.ysize))
val ysize = dec.ysize
val tag = Reg(UInt(8.W))
val set = Reg(UInt(8.W))
val sIdle :: sWriteCmd :: sWriteData :: sReadMem :: sWriteAck :: Nil = Enum(5)
val state = RegInit(sIdle)
// control
switch (state) {
is (sIdle) {
when (io.start) {
state := sWriteCmd
when (xsize < xmax) {
xlen := xsize
xrem := 0.U
} .otherwise {
xlen := xmax - 1.U
xrem := xsize - xmax
}
}
}
is (sWriteCmd) {
when (io.vme_wr.cmd.ready) {
state := sWriteData
}
}
is (sWriteData) {
when (io.vme_wr.data.ready) {
when (xcnt === xlen) {
state := sWriteAck
} .elsewhen (tag === (numMemBlock - 1).U) {
state := sReadMem
}
}
}
is (sReadMem) {
state := sWriteData
}
is (sWriteAck) {
when (io.vme_wr.ack) {
when (xrem === 0.U) {
when (ycnt === ysize - 1.U) {
state := sIdle
} .otherwise {
state := sWriteCmd
when (xsize < xmax) {
xlen := xsize
xrem := 0.U
} .otherwise {
xlen := xmax - 1.U
xrem := xsize - xmax
}
}
} .elsewhen (xrem < xmax) {
state := sWriteCmd
xlen := xrem
xrem := 0.U
} .otherwise {
state := sWriteCmd
xlen := xmax - 1.U
xrem := xrem - xmax
}
}
}
}
// write-to-sram
val tensorFile = Seq.fill(tensorLength) { SyncReadMem(memDepth, Vec(numMemBlock, UInt(memBlockBits.W))) }
val wdata_t = Wire(Vec(numMemBlock, UInt(memBlockBits.W)))
val no_mask = Wire(Vec(numMemBlock, Bool()))
wdata_t := DontCare
no_mask.foreach { m => m := true.B }
for (i <- 0 until tensorLength) {
val inWrData = io.tensor.wr.bits.data(i).asUInt.asTypeOf(wdata_t)
when (io.tensor.wr.valid) {
tensorFile(i).write(io.tensor.wr.bits.idx, inWrData, no_mask)
}
}
// read-from-sram
val stride = state === sWriteAck &
io.vme_wr.ack &
xcnt === xlen + 1.U &
xrem === 0.U &
ycnt =/= ysize - 1.U
when (state === sIdle) {
ycnt := 0.U
} .elsewhen (stride) {
ycnt := ycnt + 1.U
}
when (state === sWriteCmd || tag === (numMemBlock - 1).U) {
tag := 0.U
} .elsewhen (io.vme_wr.data.fire()) {
tag := tag + 1.U
}
when (state === sWriteCmd || (set === (tensorLength - 1).U && tag === (numMemBlock - 1).U)) {
set := 0.U
} .elsewhen (io.vme_wr.data.fire() && tag === (numMemBlock - 1).U) {
set := set + 1.U
}
val raddr_cur = Reg(UInt(tp.memAddrBits.W))
val raddr_nxt = Reg(UInt(tp.memAddrBits.W))
when (state === sIdle) {
raddr_cur := dec.sram_offset
raddr_nxt := dec.sram_offset
} .elsewhen (io.vme_wr.data.fire() && set === (tensorLength - 1).U && tag === (numMemBlock - 1).U) {
raddr_cur := raddr_cur + 1.U
} .elsewhen (stride) {
raddr_cur := raddr_nxt + dec.xsize
raddr_nxt := raddr_nxt + dec.xsize
}
val tread = Seq.tabulate(tensorLength) { i => i.U ->
tensorFile(i).read(raddr_cur, state === sWriteCmd | state === sReadMem) }
val mdata = MuxLookup(set, 0.U.asTypeOf(chiselTypeOf(wdata_t)), tread)
// write-to-dram
when (state === sIdle) {
waddr_cur := io.baddr + dec.dram_offset
waddr_nxt := io.baddr + dec.dram_offset
} .elsewhen (state === sWriteAck && io.vme_wr.ack && xrem =/= 0.U) {
waddr_cur := waddr_cur + xmax_bytes
} .elsewhen (stride) {
waddr_cur := waddr_nxt + (dec.xstride << log2Ceil(tensorLength*tensorWidth))
waddr_nxt := waddr_nxt + (dec.xstride << log2Ceil(tensorLength*tensorWidth))
}
io.vme_wr.cmd.valid := state === sWriteCmd
io.vme_wr.cmd.bits.addr := waddr_cur
io.vme_wr.cmd.bits.len := xlen
io.vme_wr.data.valid := state === sWriteData
io.vme_wr.data.bits := mdata(tag)
when (state === sWriteCmd) {
xcnt := 0.U
} .elsewhen (io.vme_wr.data.fire()) {
xcnt := xcnt + 1.U
}
// disable external read-from-sram requests
io.tensor.tieoffRead()
// done
io.done := state === sWriteAck & io.vme_wr.ack & xrem === 0.U & ycnt === ysize - 1.U
// debug
if (debug) {
when (io.vme_wr.cmd.fire()) {
printf("[TensorStore] ysize:%x ycnt:%x raddr:%x waddr:%x len:%x rem:%x\\n", ysize, ycnt, raddr_cur, waddr_cur, xlen, xrem)
}
when (io.vme_wr.data.fire()) {
printf("[TensorStore] data:%x\\n", io.vme_wr.data.bits)
}
when (io.vme_wr.ack) {
printf("[TensorStore] ack\\n")
}
}
}
| mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/hardware/chisel/src/main/scala/core/TensorStore.scala | Scala | apache-2.0 | 6,676 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.